seaweedfs/weed/server/filer_server_handlers_write_cipher.go

102 lines
2.7 KiB
Go
Raw Normal View History

package weed_server
import (
"bytes"
"context"
"fmt"
"net/http"
"strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/util"
)
// handling single chunk POST or PUT upload
2020-11-16 08:58:48 +08:00
func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *http.Request, so *operation.StorageOption) (filerResult *FilerPostResult, err error) {
2020-11-16 06:41:56 +08:00
fileId, urlLocation, auth, err := fs.assignNewFileInfo(so)
if err != nil || fileId == "" || urlLocation == "" {
2020-11-16 06:41:56 +08:00
return nil, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, so.Collection, so.DataCenter)
}
glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation)
// Note: encrypt(gzip(data)), encrypt data first, then gzip
sizeLimit := int64(fs.option.MaxMB) * 1024 * 1024
bytesBuffer := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(bytesBuffer)
pu, err := needle.ParseUpload(r, sizeLimit, bytesBuffer)
uncompressedData := pu.Data
if pu.IsGzipped {
uncompressedData = pu.UncompressedData
}
if pu.MimeType == "" {
pu.MimeType = http.DetectContentType(uncompressedData)
// println("detect2 mimetype to", pu.MimeType)
}
2021-09-07 07:20:49 +08:00
uploadOption := &operation.UploadOption{
UploadUrl: urlLocation,
Filename: pu.FileName,
Cipher: true,
IsInputCompressed: false,
MimeType: pu.MimeType,
PairMap: pu.PairMap,
Jwt: auth,
}
uploadResult, uploadError := operation.UploadData(uncompressedData, uploadOption)
if uploadError != nil {
return nil, fmt.Errorf("upload to volume server: %v", uploadError)
}
// Save to chunk manifest structure
more solid weed mount (#4089) * compare chunks by timestamp * fix slab clearing error * fix test compilation * move oldest chunk to sealed, instead of by fullness * lock on fh.entryViewCache * remove verbose logs * revert slat clearing * less logs * less logs * track write and read by timestamp * remove useless logic * add entry lock on file handle release * use mem chunk only, swap file chunk has problems * comment out code that maybe used later * add debug mode to compare data read and write * more efficient readResolvedChunks with linked list * small optimization * fix test compilation * minor fix on writer * add SeparateGarbageChunks * group chunks into sections * turn off debug mode * fix tests * fix tests * tmp enable swap file chunk * Revert "tmp enable swap file chunk" This reverts commit 985137ec472924e4815f258189f6ca9f2168a0a7. * simple refactoring * simple refactoring * do not re-use swap file chunk. Sealed chunks should not be re-used. * comment out debugging facilities * either mem chunk or swap file chunk is fine now * remove orderedMutex as *semaphore.Weighted not found impactful * optimize size calculation for changing large files * optimize performance to avoid going through the long list of chunks * still problems with swap file chunk * rename * tiny optimization * swap file chunk save only successfully read data * fix * enable both mem and swap file chunk * resolve chunks with range * rename * fix chunk interval list * also change file handle chunk group when adding chunks * pick in-active chunk with time-decayed counter * fix compilation * avoid nil with empty fh.entry * refactoring * rename * rename * refactor visible intervals to *list.List * refactor chunkViews to *list.List * add IntervalList for generic interval list * change visible interval to use IntervalList in generics * cahnge chunkViews to *IntervalList[*ChunkView] * use NewFileChunkSection to create * rename variables * refactor * fix renaming leftover * renaming * renaming * add insert interval * interval list adds lock * incrementally add chunks to readers Fixes: 1. set start and stop offset for the value object 2. clone the value object 3. use pointer instead of copy-by-value when passing to interval.Value 4. use insert interval since adding chunk could be out of order * fix tests compilation * fix tests compilation
2023-01-03 15:20:45 +08:00
fileChunks := []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(fileId, 0, time.Now().UnixNano())}
2020-04-29 07:45:48 +08:00
// fmt.Printf("uploaded: %+v\n", uploadResult)
path := r.URL.Path
if strings.HasSuffix(path, "/") {
if pu.FileName != "" {
path += pu.FileName
}
}
2020-09-01 15:21:19 +08:00
entry := &filer.Entry{
2020-03-23 15:01:34 +08:00
FullPath: util.FullPath(path),
2020-09-01 15:21:19 +08:00
Attr: filer.Attr{
Mtime: time.Now(),
Crtime: time.Now(),
Mode: 0660,
Uid: OS_UID,
Gid: OS_GID,
TtlSec: so.TtlSeconds,
Mime: pu.MimeType,
Md5: util.Base64Md5ToBytes(pu.ContentMd5),
},
Chunks: fileChunks,
}
filerResult = &FilerPostResult{
Name: pu.FileName,
Size: int64(pu.OriginalDataSize),
}
if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil, false, so.MaxFileNameLength); dbErr != nil {
2024-06-16 00:39:49 +08:00
fs.filer.DeleteUncommittedChunks(entry.GetChunks())
err = dbErr
filerResult.Error = dbErr.Error()
return
}
return
}