2020-03-07 22:06:58 +08:00
|
|
|
package weed_server
|
|
|
|
|
|
|
|
import (
|
2021-06-07 04:42:36 +08:00
|
|
|
"bytes"
|
2020-03-07 22:06:58 +08:00
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"net/http"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
2022-07-29 15:17:28 +08:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/filer"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/operation"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/util"
|
2020-03-07 22:06:58 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
// handling single chunk POST or PUT upload
|
2020-11-16 08:58:48 +08:00
|
|
|
func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *http.Request, so *operation.StorageOption) (filerResult *FilerPostResult, err error) {
|
2020-03-07 22:06:58 +08:00
|
|
|
|
2020-11-16 06:41:56 +08:00
|
|
|
fileId, urlLocation, auth, err := fs.assignNewFileInfo(so)
|
2020-03-07 22:06:58 +08:00
|
|
|
|
|
|
|
if err != nil || fileId == "" || urlLocation == "" {
|
2020-11-16 06:41:56 +08:00
|
|
|
return nil, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, so.Collection, so.DataCenter)
|
2020-03-07 22:06:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation)
|
|
|
|
|
2020-03-09 12:39:33 +08:00
|
|
|
// Note: encrypt(gzip(data)), encrypt data first, then gzip
|
2020-03-07 22:06:58 +08:00
|
|
|
|
|
|
|
sizeLimit := int64(fs.option.MaxMB) * 1024 * 1024
|
|
|
|
|
2021-06-07 04:42:36 +08:00
|
|
|
bytesBuffer := bufPool.Get().(*bytes.Buffer)
|
|
|
|
defer bufPool.Put(bytesBuffer)
|
|
|
|
|
|
|
|
pu, err := needle.ParseUpload(r, sizeLimit, bytesBuffer)
|
2020-03-07 22:06:58 +08:00
|
|
|
uncompressedData := pu.Data
|
|
|
|
if pu.IsGzipped {
|
|
|
|
uncompressedData = pu.UncompressedData
|
|
|
|
}
|
|
|
|
if pu.MimeType == "" {
|
|
|
|
pu.MimeType = http.DetectContentType(uncompressedData)
|
2020-06-20 13:11:36 +08:00
|
|
|
// println("detect2 mimetype to", pu.MimeType)
|
2020-03-07 22:06:58 +08:00
|
|
|
}
|
|
|
|
|
2021-09-07 07:20:49 +08:00
|
|
|
uploadOption := &operation.UploadOption{
|
|
|
|
UploadUrl: urlLocation,
|
|
|
|
Filename: pu.FileName,
|
|
|
|
Cipher: true,
|
|
|
|
IsInputCompressed: false,
|
|
|
|
MimeType: pu.MimeType,
|
|
|
|
PairMap: pu.PairMap,
|
|
|
|
Jwt: auth,
|
|
|
|
}
|
2024-07-17 14:14:09 +08:00
|
|
|
|
|
|
|
uploader, uploaderErr := operation.NewUploader()
|
|
|
|
if uploaderErr != nil {
|
|
|
|
return nil, fmt.Errorf("uploader initialization error: %v", uploaderErr)
|
|
|
|
}
|
|
|
|
|
|
|
|
uploadResult, uploadError := uploader.UploadData(uncompressedData, uploadOption)
|
2020-03-07 22:06:58 +08:00
|
|
|
if uploadError != nil {
|
|
|
|
return nil, fmt.Errorf("upload to volume server: %v", uploadError)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save to chunk manifest structure
|
2023-01-03 15:20:45 +08:00
|
|
|
fileChunks := []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(fileId, 0, time.Now().UnixNano())}
|
2020-03-07 22:06:58 +08:00
|
|
|
|
2020-04-29 07:45:48 +08:00
|
|
|
// fmt.Printf("uploaded: %+v\n", uploadResult)
|
2020-03-09 12:39:33 +08:00
|
|
|
|
2020-03-07 22:06:58 +08:00
|
|
|
path := r.URL.Path
|
|
|
|
if strings.HasSuffix(path, "/") {
|
|
|
|
if pu.FileName != "" {
|
|
|
|
path += pu.FileName
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-01 15:21:19 +08:00
|
|
|
entry := &filer.Entry{
|
2020-03-23 15:01:34 +08:00
|
|
|
FullPath: util.FullPath(path),
|
2020-09-01 15:21:19 +08:00
|
|
|
Attr: filer.Attr{
|
2022-06-06 15:39:35 +08:00
|
|
|
Mtime: time.Now(),
|
|
|
|
Crtime: time.Now(),
|
|
|
|
Mode: 0660,
|
|
|
|
Uid: OS_UID,
|
|
|
|
Gid: OS_GID,
|
|
|
|
TtlSec: so.TtlSeconds,
|
|
|
|
Mime: pu.MimeType,
|
|
|
|
Md5: util.Base64Md5ToBytes(pu.ContentMd5),
|
2020-03-07 22:06:58 +08:00
|
|
|
},
|
|
|
|
Chunks: fileChunks,
|
|
|
|
}
|
|
|
|
|
|
|
|
filerResult = &FilerPostResult{
|
|
|
|
Name: pu.FileName,
|
|
|
|
Size: int64(pu.OriginalDataSize),
|
|
|
|
}
|
|
|
|
|
2023-10-13 05:29:55 +08:00
|
|
|
if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil, false, so.MaxFileNameLength); dbErr != nil {
|
2024-06-16 00:39:49 +08:00
|
|
|
fs.filer.DeleteUncommittedChunks(entry.GetChunks())
|
2020-03-07 22:06:58 +08:00
|
|
|
err = dbErr
|
|
|
|
filerResult.Error = dbErr.Error()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|