2021-03-31 12:07:34 +08:00
|
|
|
package weed_server
|
|
|
|
|
|
|
|
import (
|
|
|
|
"crypto/md5"
|
|
|
|
"hash"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"net/http"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/security"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/stats"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
|
|
|
)
|
|
|
|
|
2021-04-10 04:05:15 +08:00
|
|
|
func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) ([]*filer_pb.FileChunk, hash.Hash, int64, error, []byte) {
|
|
|
|
var fileChunks []*filer_pb.FileChunk
|
2021-03-31 12:07:34 +08:00
|
|
|
|
2021-04-10 04:05:15 +08:00
|
|
|
md5Hash := md5.New()
|
2021-03-31 12:07:34 +08:00
|
|
|
var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash))
|
|
|
|
|
2021-04-10 04:05:15 +08:00
|
|
|
chunkOffset := int64(0)
|
|
|
|
var smallContent []byte
|
2021-03-31 12:07:34 +08:00
|
|
|
|
2021-04-10 04:05:15 +08:00
|
|
|
for {
|
|
|
|
limitedReader := io.LimitReader(partReader, int64(chunkSize))
|
2021-03-31 12:07:34 +08:00
|
|
|
|
2021-04-10 04:05:15 +08:00
|
|
|
data, err := ioutil.ReadAll(limitedReader)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, 0, err, nil
|
2021-03-31 12:07:34 +08:00
|
|
|
}
|
2021-04-10 04:05:15 +08:00
|
|
|
if chunkOffset == 0 && !isAppend(r) {
|
|
|
|
if len(data) < int(fs.option.SaveToFilerLimit) || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) && len(data) < 4*1024 {
|
|
|
|
smallContent = data
|
|
|
|
chunkOffset += int64(len(data))
|
|
|
|
break
|
2021-04-01 17:21:40 +08:00
|
|
|
}
|
2021-04-10 04:05:15 +08:00
|
|
|
}
|
|
|
|
dataReader := util.NewBytesReader(data)
|
|
|
|
|
|
|
|
// retry to assign a different file id
|
|
|
|
var fileId, urlLocation string
|
|
|
|
var auth security.EncodedJwt
|
|
|
|
var assignErr, uploadErr error
|
|
|
|
var uploadResult *operation.UploadResult
|
|
|
|
for i := 0; i < 3; i++ {
|
|
|
|
// assign one file id for one chunk
|
|
|
|
fileId, urlLocation, auth, assignErr = fs.assignNewFileInfo(so)
|
|
|
|
if assignErr != nil {
|
|
|
|
return nil, nil, 0, assignErr, nil
|
2021-03-31 12:07:34 +08:00
|
|
|
}
|
|
|
|
|
2021-04-10 04:05:15 +08:00
|
|
|
// upload the chunk to the volume server
|
|
|
|
uploadResult, uploadErr, _ = fs.doUpload(urlLocation, w, r, dataReader, fileName, contentType, nil, auth)
|
2021-03-31 12:07:34 +08:00
|
|
|
if uploadErr != nil {
|
2021-04-10 04:05:15 +08:00
|
|
|
time.Sleep(251 * time.Millisecond)
|
|
|
|
continue
|
2021-04-01 17:21:40 +08:00
|
|
|
}
|
2021-04-10 04:05:15 +08:00
|
|
|
break
|
|
|
|
}
|
|
|
|
if uploadErr != nil {
|
|
|
|
return nil, nil, 0, uploadErr, nil
|
2021-04-05 09:37:56 +08:00
|
|
|
}
|
2021-03-31 12:07:34 +08:00
|
|
|
|
2021-04-10 04:05:15 +08:00
|
|
|
// if last chunk exhausted the reader exactly at the border
|
|
|
|
if uploadResult.Size == 0 {
|
|
|
|
break
|
|
|
|
}
|
2021-04-06 14:24:26 +08:00
|
|
|
|
2021-04-10 04:05:15 +08:00
|
|
|
// Save to chunk manifest structure
|
|
|
|
fileChunks = append(fileChunks, uploadResult.ToPbFileChunk(fileId, chunkOffset))
|
2021-03-31 12:07:34 +08:00
|
|
|
|
2021-04-10 04:05:15 +08:00
|
|
|
glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d)", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadResult.Size))
|
2021-03-31 12:07:34 +08:00
|
|
|
|
2021-04-10 04:05:15 +08:00
|
|
|
// reset variables for the next chunk
|
|
|
|
chunkOffset = chunkOffset + int64(uploadResult.Size)
|
2021-03-31 12:07:34 +08:00
|
|
|
|
2021-04-10 04:05:15 +08:00
|
|
|
// if last chunk was not at full chunk size, but already exhausted the reader
|
|
|
|
if int64(uploadResult.Size) < int64(chunkSize) {
|
|
|
|
break
|
2021-04-01 17:21:40 +08:00
|
|
|
}
|
|
|
|
}
|
2021-04-10 04:05:15 +08:00
|
|
|
|
|
|
|
return fileChunks, md5Hash, chunkOffset, nil, smallContent
|
2021-03-31 12:07:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt) (*operation.UploadResult, error, []byte) {
|
|
|
|
|
|
|
|
stats.FilerRequestCounter.WithLabelValues("chunkUpload").Inc()
|
|
|
|
start := time.Now()
|
|
|
|
defer func() {
|
|
|
|
stats.FilerRequestHistogram.WithLabelValues("chunkUpload").Observe(time.Since(start).Seconds())
|
|
|
|
}()
|
|
|
|
|
|
|
|
uploadResult, err, data := operation.Upload(urlLocation, fileName, fs.option.Cipher, limitedReader, false, contentType, pairMap, auth)
|
|
|
|
if uploadResult != nil && uploadResult.RetryCount > 0 {
|
|
|
|
stats.FilerRequestCounter.WithLabelValues("chunkUploadRetry").Add(float64(uploadResult.RetryCount))
|
|
|
|
}
|
|
|
|
return uploadResult, err, data
|
|
|
|
}
|