seaweedfs/weed/operation/chunked_file.go

242 lines
5.7 KiB
Go
Raw Normal View History

package operation
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"sort"
2020-03-21 06:38:04 +08:00
"sync"
2019-04-19 12:43:36 +08:00
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb"
2018-10-14 15:30:20 +08:00
"github.com/chrislusf/seaweedfs/weed/util"
)
var (
// when the remote server does not allow range requests (Accept-Ranges was not set)
ErrRangeRequestsNotSupported = errors.New("Range requests are not supported by the remote server")
// ErrInvalidRange is returned by Read when trying to read past the end of the file
ErrInvalidRange = errors.New("Invalid range")
)
type ChunkInfo struct {
Fid string `json:"fid"`
Offset int64 `json:"offset"`
Size int64 `json:"size"`
}
type ChunkList []*ChunkInfo
type ChunkManifest struct {
Name string `json:"name,omitempty"`
Mime string `json:"mime,omitempty"`
Size int64 `json:"size,omitempty"`
Chunks ChunkList `json:"chunks,omitempty"`
}
// seekable chunked file reader
type ChunkedFileReader struct {
2021-08-13 12:40:33 +08:00
totalSize int64
chunkList []*ChunkInfo
master pb.ServerAddress
2021-08-13 12:40:33 +08:00
pos int64
pr *io.PipeReader
pw *io.PipeWriter
mutex sync.Mutex
grpcDialOption grpc.DialOption
}
func (s ChunkList) Len() int { return len(s) }
func (s ChunkList) Less(i, j int) bool { return s[i].Offset < s[j].Offset }
func (s ChunkList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
2020-06-20 13:45:27 +08:00
func LoadChunkManifest(buffer []byte, isCompressed bool) (*ChunkManifest, error) {
if isCompressed {
var err error
if buffer, err = util.DecompressData(buffer); err != nil {
2020-08-02 04:46:52 +08:00
glog.V(0).Infof("fail to decompress chunk manifest: %v", err)
}
}
cm := ChunkManifest{}
if e := json.Unmarshal(buffer, &cm); e != nil {
return nil, e
}
sort.Sort(cm.Chunks)
return &cm, nil
}
func (cm *ChunkManifest) Marshal() ([]byte, error) {
return json.Marshal(cm)
}
func (cm *ChunkManifest) DeleteChunks(masterFn GetMasterFn, usePublicUrl bool, grpcDialOption grpc.DialOption) error {
var fileIds []string
for _, ci := range cm.Chunks {
fileIds = append(fileIds, ci.Fid)
}
results, err := DeleteFiles(masterFn, usePublicUrl, grpcDialOption, fileIds)
if err != nil {
glog.V(0).Infof("delete %+v: %v", fileIds, err)
return fmt.Errorf("chunk delete: %v", err)
}
for _, result := range results {
if result.Error != "" {
glog.V(0).Infof("delete file %+v: %v", result.FileId, result.Error)
return fmt.Errorf("chunk delete %v: %v", result.FileId, result.Error)
}
}
return nil
}
2021-08-13 12:40:33 +08:00
func readChunkNeedle(fileUrl string, w io.Writer, offset int64, jwt string) (written int64, e error) {
req, err := http.NewRequest("GET", fileUrl, nil)
if err != nil {
return written, err
}
if offset > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
}
resp, err := util.Do(req)
if err != nil {
return written, err
}
2019-04-15 14:28:24 +08:00
defer func() {
io.Copy(io.Discard, resp.Body)
2019-04-15 14:28:24 +08:00
resp.Body.Close()
}()
switch resp.StatusCode {
case http.StatusRequestedRangeNotSatisfiable:
return written, ErrInvalidRange
case http.StatusOK:
if offset > 0 {
return written, ErrRangeRequestsNotSupported
}
case http.StatusPartialContent:
break
default:
2015-12-03 21:35:33 +08:00
return written, fmt.Errorf("Read chunk needle error: [%d] %s", resp.StatusCode, fileUrl)
}
return io.Copy(w, resp.Body)
}
func NewChunkedFileReader(chunkList []*ChunkInfo, master pb.ServerAddress, grpcDialOption grpc.DialOption) *ChunkedFileReader {
2020-03-21 06:46:16 +08:00
var totalSize int64
for _, chunk := range chunkList {
totalSize += chunk.Size
}
sort.Sort(ChunkList(chunkList))
2020-03-21 06:38:04 +08:00
return &ChunkedFileReader{
2021-08-13 12:40:33 +08:00
totalSize: totalSize,
chunkList: chunkList,
master: master,
grpcDialOption: grpcDialOption,
2020-03-21 06:38:04 +08:00
}
}
func (cf *ChunkedFileReader) Seek(offset int64, whence int) (int64, error) {
var err error
switch whence {
case io.SeekStart:
case io.SeekCurrent:
offset += cf.pos
case io.SeekEnd:
offset = cf.totalSize + offset
}
2020-03-21 06:46:16 +08:00
if offset > cf.totalSize {
err = ErrInvalidRange
}
if cf.pos != offset {
cf.Close()
}
cf.pos = offset
return cf.pos, err
}
func (cf *ChunkedFileReader) WriteTo(w io.Writer) (n int64, err error) {
chunkIndex := -1
2015-11-30 00:21:42 +08:00
chunkStartOffset := int64(0)
2020-03-21 06:46:16 +08:00
for i, ci := range cf.chunkList {
if cf.pos >= ci.Offset && cf.pos < ci.Offset+ci.Size {
chunkIndex = i
chunkStartOffset = cf.pos - ci.Offset
break
}
}
if chunkIndex < 0 {
return n, ErrInvalidRange
}
2020-03-21 06:46:16 +08:00
for ; chunkIndex < len(cf.chunkList); chunkIndex++ {
ci := cf.chunkList[chunkIndex]
// if we need read date from local volume server first?
fileUrl, jwt, lookupError := LookupFileId(func() pb.ServerAddress {
return cf.master
2021-08-13 12:40:33 +08:00
}, cf.grpcDialOption, ci.Fid)
if lookupError != nil {
return n, lookupError
}
2021-08-13 12:40:33 +08:00
if wn, e := readChunkNeedle(fileUrl, w, chunkStartOffset, jwt); e != nil {
return n, e
} else {
n += wn
cf.pos += wn
}
chunkStartOffset = 0
}
return n, nil
}
func (cf *ChunkedFileReader) ReadAt(p []byte, off int64) (n int, err error) {
cf.Seek(off, 0)
return cf.Read(p)
}
func (cf *ChunkedFileReader) Read(p []byte) (int, error) {
return cf.getPipeReader().Read(p)
}
func (cf *ChunkedFileReader) Close() (e error) {
cf.mutex.Lock()
defer cf.mutex.Unlock()
return cf.closePipe()
}
func (cf *ChunkedFileReader) closePipe() (e error) {
if cf.pr != nil {
if err := cf.pr.Close(); err != nil {
e = err
}
}
cf.pr = nil
if cf.pw != nil {
if err := cf.pw.Close(); err != nil {
e = err
}
}
cf.pw = nil
return e
}
func (cf *ChunkedFileReader) getPipeReader() io.Reader {
cf.mutex.Lock()
defer cf.mutex.Unlock()
if cf.pr != nil && cf.pw != nil {
return cf.pr
}
cf.closePipe()
cf.pr, cf.pw = io.Pipe()
go func(pw *io.PipeWriter) {
_, e := cf.WriteTo(pw)
pw.CloseWithError(e)
}(cf.pw)
return cf.pr
}