Added tls for http clients (#5766)

* Added global http client

* Added Do func for global http client

* Changed the code to use the global http client

* Fix http client in volume uploader

* Fixed pkg name

* Fixed http util funcs

* Fixed http client for bench_filer_upload

* Fixed http client for stress_filer_upload

* Fixed http client for filer_server_handlers_proxy

* Fixed http client for command_fs_merge_volumes

* Fixed http client for command_fs_merge_volumes and command_volume_fsck

* Fixed http client for s3api_server

* Added init global client for main funcs

* Rename global_client to client

* Changed:
- fixed NewHttpClient;
- added CheckIsHttpsClientEnabled func
- updated security.toml in scaffold

* Reduce the visibility of some functions in the util/http/client pkg

* Added the loadSecurityConfig function

* Use util.LoadSecurityConfiguration() in NewHttpClient func
This commit is contained in:
vadimartynov 2024-07-17 09:14:09 +03:00 committed by GitHub
parent c6dec11ea5
commit 86d92a42b4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
66 changed files with 646 additions and 198 deletions

View File

@ -11,6 +11,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/storage/backend" "github.com/seaweedfs/seaweedfs/weed/storage/backend"
"github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/super_block" "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var ( var (
@ -40,6 +41,8 @@ that has those volumes.
*/ */
func main() { func main() {
flag.Parse() flag.Parse()
util_http.NewGlobalHttpClient()
fileName := strconv.Itoa(*fixVolumeId) fileName := strconv.Itoa(*fixVolumeId)
if *fixVolumeCollection != "" { if *fixVolumeCollection != "" {
fileName = *fixVolumeCollection + "_" + fileName fileName = *fixVolumeCollection + "_" + fileName

View File

@ -20,6 +20,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/storage/types" "github.com/seaweedfs/seaweedfs/weed/storage/types"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
"google.golang.org/grpc" "google.golang.org/grpc"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var ( var (
@ -40,7 +41,8 @@ var (
*/ */
func main() { func main() {
flag.Parse() flag.Parse()
util_http.InitGlobalHttpClient()
util.LoadSecurityConfiguration() util.LoadSecurityConfiguration()
grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")

View File

@ -14,6 +14,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/storage/super_block" "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
"github.com/seaweedfs/seaweedfs/weed/storage/types" "github.com/seaweedfs/seaweedfs/weed/storage/types"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var ( var (
@ -36,6 +37,8 @@ The .idx has all correct offsets.
*/ */
func main() { func main() {
flag.Parse() flag.Parse()
util_http.InitGlobalHttpClient()
fileName := strconv.Itoa(*fixVolumeId) fileName := strconv.Itoa(*fixVolumeId)
if *fixVolumeCollection != "" { if *fixVolumeCollection != "" {
fileName = *fixVolumeCollection + "_" + fileName fileName = *fixVolumeCollection + "_" + fileName

View File

@ -12,6 +12,7 @@ import (
"strconv" "strconv"
"strings" "strings"
"time" "time"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var ( var (
@ -23,8 +24,8 @@ var (
) )
func main() { func main() {
flag.Parse() flag.Parse()
util_http.InitGlobalHttpClient()
if *isWrite { if *isWrite {
startGenerateMetadata() startGenerateMetadata()

View File

@ -11,6 +11,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/storage/backend" "github.com/seaweedfs/seaweedfs/weed/storage/backend"
"github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/super_block" "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var ( var (
@ -71,6 +72,7 @@ func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset in
func main() { func main() {
flag.Parse() flag.Parse()
util_http.InitGlobalHttpClient()
vid := needle.VolumeId(*volumeId) vid := needle.VolumeId(*volumeId)

View File

@ -14,6 +14,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/security" "github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var ( var (
@ -25,6 +26,7 @@ var (
func main() { func main() {
flag.Parse() flag.Parse()
util_http.InitGlobalHttpClient()
util.LoadSecurityConfiguration() util.LoadSecurityConfiguration()
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
@ -34,7 +36,7 @@ func main() {
go func() { go func() {
for { for {
println("vacuum threshold", *garbageThreshold) println("vacuum threshold", *garbageThreshold)
_, _, err := util.Get(fmt.Sprintf("http://%s/vol/vacuum?garbageThreshold=%f", pb.ServerAddress(*master).ToHttpAddress(), *garbageThreshold)) _, _, err := util_http.Get(fmt.Sprintf("http://%s/vol/vacuum?garbageThreshold=%f", pb.ServerAddress(*master).ToHttpAddress(), *garbageThreshold))
if err != nil { if err != nil {
log.Fatalf("vacuum: %v", err) log.Fatalf("vacuum: %v", err)
} }
@ -47,7 +49,7 @@ func main() {
assignResult, targetUrl := genFile(grpcDialOption, i) assignResult, targetUrl := genFile(grpcDialOption, i)
util.Delete(targetUrl, string(assignResult.Auth)) util_http.Delete(targetUrl, string(assignResult.Auth))
} }
@ -76,7 +78,13 @@ func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, st
PairMap: nil, PairMap: nil,
Jwt: assignResult.Auth, Jwt: assignResult.Auth,
} }
_, err = operation.UploadData(data, uploadOption)
uploader, err := operation.NewUploader()
if err != nil {
log.Fatalf("upload: %v", err)
}
_, err = uploader.UploadData(data, uploadOption)
if err != nil { if err != nil {
log.Fatalf("upload: %v", err) log.Fatalf("upload: %v", err)
} }

View File

@ -7,10 +7,10 @@ import (
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/seaweedfs/seaweedfs/weed/util"
"net/http" "net/http"
"strings" "strings"
"time" "time"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
// Downloads an item from an S3 Bucket in the region configured in the shared config // Downloads an item from an S3 Bucket in the region configured in the shared config
@ -21,6 +21,8 @@ import (
// For this exampl to work, the domainName is needd // For this exampl to work, the domainName is needd
// weed s3 -domainName=localhost // weed s3 -domainName=localhost
func main() { func main() {
util_http.InitGlobalHttpClient()
h := md5.New() h := md5.New()
content := strings.NewReader(stringContent) content := strings.NewReader(stringContent)
content.WriteTo(h) content.WriteTo(h)
@ -64,7 +66,7 @@ func main() {
fmt.Printf("error put request: %v\n", err) fmt.Printf("error put request: %v\n", err)
return return
} }
defer util.CloseResponse(resp) defer util_http.CloseResponse(resp)
fmt.Printf("response: %+v\n", resp) fmt.Printf("response: %+v\n", resp)
} }

View File

@ -10,6 +10,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/storage" "github.com/seaweedfs/seaweedfs/weed/storage"
"github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/super_block" "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var ( var (
@ -40,6 +41,7 @@ func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset in
func main() { func main() {
flag.Parse() flag.Parse()
util_http.InitGlobalHttpClient()
vid := needle.VolumeId(*volumeId) vid := needle.VolumeId(*volumeId)

View File

@ -12,6 +12,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/storage/idx" "github.com/seaweedfs/seaweedfs/weed/storage/idx"
"github.com/seaweedfs/seaweedfs/weed/storage/types" "github.com/seaweedfs/seaweedfs/weed/storage/types"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var ( var (
@ -27,6 +28,8 @@ This is to see content in .idx files.
*/ */
func main() { func main() {
flag.Parse() flag.Parse()
util_http.InitGlobalHttpClient()
fileName := strconv.Itoa(*fixVolumeId) fileName := strconv.Itoa(*fixVolumeId)
if *fixVolumeCollection != "" { if *fixVolumeCollection != "" {
fileName = *fixVolumeCollection + "_" + fileName fileName = *fixVolumeCollection + "_" + fileName

View File

@ -12,6 +12,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/filer" "github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var ( var (
@ -20,6 +21,7 @@ var (
func main() { func main() {
flag.Parse() flag.Parse()
util_http.InitGlobalHttpClient()
dst, err := os.OpenFile(*logdataFile, os.O_RDONLY, 0644) dst, err := os.OpenFile(*logdataFile, os.O_RDONLY, 0644)
if err != nil { if err != nil {

View File

@ -11,6 +11,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var ( var (
@ -19,6 +20,7 @@ var (
func main() { func main() {
flag.Parse() flag.Parse()
util_http.InitGlobalHttpClient()
dst, err := os.OpenFile(*metaFile, os.O_RDONLY, 0644) dst, err := os.OpenFile(*metaFile, os.O_RDONLY, 0644)
if err != nil { if err != nil {

View File

@ -13,6 +13,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/security" "github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
"google.golang.org/grpc" "google.golang.org/grpc"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var ( var (
@ -23,6 +24,7 @@ var (
func main() { func main() {
flag.Parse() flag.Parse()
util_http.InitGlobalHttpClient()
util.LoadSecurityConfiguration() util.LoadSecurityConfiguration()
grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")

View File

@ -13,6 +13,7 @@ import (
"strings" "strings"
"sync" "sync"
"time" "time"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var ( var (
@ -30,8 +31,8 @@ type stat struct {
} }
func main() { func main() {
flag.Parse() flag.Parse()
util_http.InitGlobalHttpClient()
data := make([]byte, *size) data := make([]byte, *size)
println("data len", len(data)) println("data len", len(data))
@ -43,16 +44,12 @@ func main() {
go func(x int) { go func(x int) {
defer wg.Done() defer wg.Done()
client := &http.Client{Transport: &http.Transport{
MaxIdleConns: 1024,
MaxIdleConnsPerHost: 1024,
}}
r := rand.New(rand.NewSource(time.Now().UnixNano() + int64(x))) r := rand.New(rand.NewSource(time.Now().UnixNano() + int64(x)))
for t := 0; t < *times; t++ { for t := 0; t < *times; t++ {
for f := 0; f < *fileCount; f++ { for f := 0; f < *fileCount; f++ {
fn := r.Intn(*fileCount) fn := r.Intn(*fileCount)
if size, err := uploadFileToFiler(client, data, fmt.Sprintf("file%04d", fn), *destination); err == nil { if size, err := uploadFileToFiler(data, fmt.Sprintf("file%04d", fn), *destination); err == nil {
statsChan <- stat{ statsChan <- stat{
size: size, size: size,
} }
@ -93,7 +90,7 @@ func main() {
} }
func uploadFileToFiler(client *http.Client, data []byte, filename, destination string) (size int64, err error) { func uploadFileToFiler(data []byte, filename, destination string) (size int64, err error) {
if !strings.HasSuffix(destination, "/") { if !strings.HasSuffix(destination, "/") {
destination = destination + "/" destination = destination + "/"
@ -116,10 +113,13 @@ func uploadFileToFiler(client *http.Client, data []byte, filename, destination s
uri := destination + filename uri := destination + filename
request, err := http.NewRequest(http.MethodPost, uri, body) request, err := http.NewRequest(http.MethodPost, uri, body)
if err != nil {
return 0, fmt.Errorf("http POST %s: %v", uri, err)
}
request.Header.Set("Content-Type", writer.FormDataContentType()) request.Header.Set("Content-Type", writer.FormDataContentType())
// request.Close = true // can not use this, which do not reuse http connection, impacting filer->volume also. // request.Close = true // can not use this, which do not reuse http connection, impacting filer->volume also.
resp, err := client.Do(request) resp, err := util_http.GetGlobalHttpClient().Do(request)
if err != nil { if err != nil {
return 0, fmt.Errorf("http POST %s: %v", uri, err) return 0, fmt.Errorf("http POST %s: %v", uri, err)
} else { } else {

View File

@ -14,6 +14,7 @@ import (
"strings" "strings"
"sync" "sync"
"time" "time"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var ( var (
@ -30,8 +31,8 @@ type stat struct {
} }
func main() { func main() {
flag.Parse() flag.Parse()
util_http.InitGlobalHttpClient()
var fileNames []string var fileNames []string
@ -51,8 +52,6 @@ func main() {
for x := 0; x < *concurrency; x++ { for x := 0; x < *concurrency; x++ {
wg.Add(1) wg.Add(1)
client := &http.Client{}
go func() { go func() {
defer wg.Done() defer wg.Done()
rand.Shuffle(len(fileNames), func(i, j int) { rand.Shuffle(len(fileNames), func(i, j int) {
@ -60,7 +59,7 @@ func main() {
}) })
for t := 0; t < *times; t++ { for t := 0; t < *times; t++ {
for _, filename := range fileNames { for _, filename := range fileNames {
if size, err := uploadFileToFiler(client, filename, *destination); err == nil { if size, err := uploadFileToFiler(filename, *destination); err == nil {
statsChan <- stat{ statsChan <- stat{
size: size, size: size,
} }
@ -99,7 +98,7 @@ func main() {
} }
func uploadFileToFiler(client *http.Client, filename, destination string) (size int64, err error) { func uploadFileToFiler(filename, destination string) (size int64, err error) {
file, err := os.Open(filename) file, err := os.Open(filename)
if err != nil { if err != nil {
panic(err) panic(err)
@ -131,9 +130,13 @@ func uploadFileToFiler(client *http.Client, filename, destination string) (size
uri := destination + file.Name() uri := destination + file.Name()
request, err := http.NewRequest(http.MethodPost, uri, body) request, err := http.NewRequest(http.MethodPost, uri, body)
if err != nil {
return 0, fmt.Errorf("http POST %s: %v", uri, err)
}
request.Header.Set("Content-Type", writer.FormDataContentType()) request.Header.Set("Content-Type", writer.FormDataContentType())
resp, err := client.Do(request) resp, err := util_http.GetGlobalHttpClient().Do(request)
if err != nil { if err != nil {
return 0, fmt.Errorf("http POST %s: %v", uri, err) return 0, fmt.Errorf("http POST %s: %v", uri, err)
} else { } else {

View File

@ -12,6 +12,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/needle"
util2 "github.com/seaweedfs/seaweedfs/weed/util" util2 "github.com/seaweedfs/seaweedfs/weed/util"
"golang.org/x/tools/godoc/util" "golang.org/x/tools/godoc/util"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var ( var (
@ -24,6 +25,7 @@ var (
func main() { func main() {
flag.Parse() flag.Parse()
util_http.InitGlobalHttpClient()
util2.LoadSecurityConfiguration() util2.LoadSecurityConfiguration()
grpcDialOption := security.LoadClientTLS(util2.GetViper(), "grpc.client") grpcDialOption := security.LoadClientTLS(util2.GetViper(), "grpc.client")

View File

@ -22,6 +22,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/security" "github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/wdclient" "github.com/seaweedfs/seaweedfs/weed/wdclient"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
type BenchmarkOptions struct { type BenchmarkOptions struct {
@ -214,7 +215,7 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
if isSecure { if isSecure {
jwtAuthorization = operation.LookupJwt(b.masterClient.GetMaster(context.Background()), b.grpcDialOption, df.fp.Fid) jwtAuthorization = operation.LookupJwt(b.masterClient.GetMaster(context.Background()), b.grpcDialOption, df.fp.Fid)
} }
if e := util.Delete(fmt.Sprintf("http://%s/%s", df.fp.Server, df.fp.Fid), string(jwtAuthorization)); e == nil { if e := util_http.Delete(fmt.Sprintf("http://%s/%s", df.fp.Server, df.fp.Fid), string(jwtAuthorization)); e == nil {
s.completed++ s.completed++
} else { } else {
s.failed++ s.failed++
@ -295,7 +296,7 @@ func readFiles(fileIdLineChan chan string, s *stat) {
} }
var bytes []byte var bytes []byte
for _, url := range urls { for _, url := range urls {
bytes, _, err = util.Get(url) bytes, _, err = util_http.Get(url)
if err == nil { if err == nil {
break break
} }

View File

@ -15,6 +15,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/security" "github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var ( var (
@ -63,11 +64,11 @@ func downloadToFile(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOpti
if lookupError != nil { if lookupError != nil {
return lookupError return lookupError
} }
filename, _, rc, err := util.DownloadFile(fileUrl, jwt) filename, _, rc, err := util_http.DownloadFile(fileUrl, jwt)
if err != nil { if err != nil {
return err return err
} }
defer util.CloseResponse(rc) defer util_http.CloseResponse(rc)
if filename == "" { if filename == "" {
filename = fileId filename = fileId
} }
@ -116,10 +117,10 @@ func fetchContent(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOption
return "", nil, lookupError return "", nil, lookupError
} }
var rc *http.Response var rc *http.Response
if filename, _, rc, e = util.DownloadFile(fileUrl, jwt); e != nil { if filename, _, rc, e = util_http.DownloadFile(fileUrl, jwt); e != nil {
return "", nil, e return "", nil, e
} }
defer util.CloseResponse(rc) defer util_http.CloseResponse(rc)
content, e = io.ReadAll(rc.Body) content, e = io.ReadAll(rc.Body)
return return
} }

View File

@ -344,7 +344,12 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
return err return err
} }
finalFileId, uploadResult, flushErr, _ := operation.UploadWithRetry( uploader, uploaderErr := operation.NewUploader()
if uploaderErr != nil {
return uploaderErr
}
finalFileId, uploadResult, flushErr, _ := uploader.UploadWithRetry(
worker, worker,
&filer_pb.AssignVolumeRequest{ &filer_pb.AssignVolumeRequest{
Count: 1, Count: 1,
@ -423,7 +428,13 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
<-concurrentChunks <-concurrentChunks
}() }()
fileId, uploadResult, err, _ := operation.UploadWithRetry( uploader, err := operation.NewUploader()
if err != nil {
uploadError = fmt.Errorf("upload data %v: %v\n", fileName, err)
return
}
fileId, uploadResult, err, _ := uploader.UploadWithRetry(
worker, worker,
&filer_pb.AssignVolumeRequest{ &filer_pb.AssignVolumeRequest{
Count: 1, Count: 1,
@ -535,8 +546,12 @@ func detectMimeType(f *os.File) string {
} }
func (worker *FileCopyWorker) saveDataAsChunk(reader io.Reader, name string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error) { func (worker *FileCopyWorker) saveDataAsChunk(reader io.Reader, name string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error) {
uploader, uploaderErr := operation.NewUploader()
if uploaderErr != nil {
return nil, fmt.Errorf("upload data: %v", uploaderErr)
}
finalFileId, uploadResult, flushErr, _ := operation.UploadWithRetry( finalFileId, uploadResult, flushErr, _ := uploader.UploadWithRetry(
worker, worker,
&filer_pb.AssignVolumeRequest{ &filer_pb.AssignVolumeRequest{
Count: 1, Count: 1,

View File

@ -94,10 +94,14 @@ allowed_commonNames = "" # comma-separated SSL certificate common names
[grpc.client] [grpc.client]
cert = "" cert = ""
key = "" key = ""
# Note: work in progress!
# this does not work with other clients, e.g., "weed filer|mount" etc, yet. # https client for master|volume|filer|etc connection
# It is necessary that the parameters [https.volume]|[https.master]|[https.filer] are set
[https.client] [https.client]
enabled = true enabled = true
cert = ""
key = ""
ca = ""
# volume server https options # volume server https options
[https.volume] [https.volume]

View File

@ -21,6 +21,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
"golang.org/x/net/context/ctxhttp" "golang.org/x/net/context/ctxhttp"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
//copied from https://github.com/restic/restic/tree/master/internal/selfupdate //copied from https://github.com/restic/restic/tree/master/internal/selfupdate
@ -198,7 +199,7 @@ func GitHubLatestRelease(ctx context.Context, ver string, owner, repo string) (R
if err != nil { if err != nil {
return Release{}, err return Release{}, err
} }
defer util.CloseResponse(res) defer util_http.CloseResponse(res)
if res.StatusCode != http.StatusOK { if res.StatusCode != http.StatusOK {
content := res.Header.Get("Content-Type") content := res.Header.Get("Content-Type")
@ -258,7 +259,7 @@ func getGithubData(ctx context.Context, url string) ([]byte, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer util.CloseResponse(res) defer util_http.CloseResponse(res)
if res.StatusCode != http.StatusOK { if res.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected status %v (%v) returned", res.StatusCode, res.Status) return nil, fmt.Errorf("unexpected status %v (%v) returned", res.StatusCode, res.Status)

View File

@ -15,6 +15,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
const ( const (
@ -120,7 +121,7 @@ func fetchChunkRange(buffer []byte, lookupFileIdFn wdclient.LookupFileIdFunction
glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err) glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
return 0, err return 0, err
} }
return util.RetriedFetchChunkData(buffer, urlStrings, cipherKey, isGzipped, false, offset) return util_http.RetriedFetchChunkData(buffer, urlStrings, cipherKey, isGzipped, false, offset)
} }
func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, jwt string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64, size int) (err error) { func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, jwt string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64, size int) (err error) {
@ -132,7 +133,7 @@ func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, jwt stri
for _, urlString := range urlStrings { for _, urlString := range urlStrings {
var localProcessed int var localProcessed int
var writeErr error var writeErr error
shouldRetry, err = util.ReadUrlAsStreamAuthenticated(urlString+"?readDeleted=true", jwt, cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) { shouldRetry, err = util_http.ReadUrlAsStreamAuthenticated(urlString+"?readDeleted=true", jwt, cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) {
if totalWritten > localProcessed { if totalWritten > localProcessed {
toBeSkipped := totalWritten - localProcessed toBeSkipped := totalWritten - localProcessed
if len(data) <= toBeSkipped { if len(data) <= toBeSkipped {

View File

@ -77,7 +77,13 @@ func (f *Filer) assignAndUpload(targetFile string, data []byte) (*operation.Assi
PairMap: nil, PairMap: nil,
Jwt: assignResult.Auth, Jwt: assignResult.Auth,
} }
uploadResult, err := operation.UploadData(data, uploadOption)
uploader, err := operation.NewUploader()
if err != nil {
return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err)
}
uploadResult, err := uploader.UploadData(data, uploadOption)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err) return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err)
} }

View File

@ -2,7 +2,6 @@ package filer
import ( import (
"fmt" "fmt"
"github.com/seaweedfs/seaweedfs/weed/util"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -10,6 +9,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/util/chunk_cache" "github.com/seaweedfs/seaweedfs/weed/util/chunk_cache"
"github.com/seaweedfs/seaweedfs/weed/util/mem" "github.com/seaweedfs/seaweedfs/weed/util/mem"
"github.com/seaweedfs/seaweedfs/weed/wdclient" "github.com/seaweedfs/seaweedfs/weed/wdclient"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
type ReaderCache struct { type ReaderCache struct {
@ -171,7 +171,7 @@ func (s *SingleChunkCacher) startCaching() {
s.data = mem.Allocate(s.chunkSize) s.data = mem.Allocate(s.chunkSize)
_, s.err = util.RetriedFetchChunkData(s.data, urlStrings, s.cipherKey, s.isGzipped, true, 0) _, s.err = util_http.RetriedFetchChunkData(s.data, urlStrings, s.cipherKey, s.isGzipped, true, 0)
if s.err != nil { if s.err != nil {
mem.Free(s.data) mem.Free(s.data)
s.data = nil s.data = nil

View File

@ -16,6 +16,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/wdclient" "github.com/seaweedfs/seaweedfs/weed/wdclient"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var getLookupFileIdBackoffSchedule = []time.Duration{ var getLookupFileIdBackoffSchedule = []time.Duration{
@ -194,7 +195,7 @@ func ReadAll(buffer []byte, masterClient *wdclient.MasterClient, chunks []*filer
return err return err
} }
n, err := util.RetriedFetchChunkData(buffer[idx:idx+int(chunkView.ViewSize)], urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.OffsetInChunk) n, err := util_http.RetriedFetchChunkData(buffer[idx:idx+int(chunkView.ViewSize)], urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.OffsetInChunk)
if err != nil { if err != nil {
return err return err
} }
@ -350,7 +351,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
var buffer bytes.Buffer var buffer bytes.Buffer
var shouldRetry bool var shouldRetry bool
for _, urlString := range urlStrings { for _, urlString := range urlStrings {
shouldRetry, err = util.ReadUrlAsStream(urlString+"?readDeleted=true", chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.OffsetInChunk, int(chunkView.ViewSize), func(data []byte) { shouldRetry, err = util_http.ReadUrlAsStream(urlString+"?readDeleted=true", chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.OffsetInChunk, int(chunkView.ViewSize), func(data []byte) {
buffer.Write(data) buffer.Write(data)
}) })
if !shouldRetry { if !shouldRetry {

View File

@ -14,8 +14,12 @@ import (
func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFunctionType { func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFunctionType {
return func(reader io.Reader, filename string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error) { return func(reader io.Reader, filename string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error) {
uploader, err := operation.NewUploader()
if err != nil {
return
}
fileId, uploadResult, err, data := operation.UploadWithRetry( fileId, uploadResult, err, data := uploader.UploadWithRetry(
wfs, wfs,
&filer_pb.AssignVolumeRequest{ &filer_pb.AssignVolumeRequest{
Count: 1, Count: 1,

View File

@ -13,6 +13,7 @@ import (
"math" "math"
"sync/atomic" "sync/atomic"
"time" "time"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
func (b *MessageQueueBroker) genLogFlushFunc(t topic.Topic, partition *mq_pb.Partition) log_buffer.LogFlushFuncType { func (b *MessageQueueBroker) genLogFlushFunc(t topic.Topic, partition *mq_pb.Partition) log_buffer.LogFlushFuncType {
@ -131,7 +132,7 @@ func (b *MessageQueueBroker) genLogOnDiskReadFunc(t topic.Topic, partition *mq_p
for _, urlString := range urlStrings { for _, urlString := range urlStrings {
// TODO optimization opportunity: reuse the buffer // TODO optimization opportunity: reuse the buffer
var data []byte var data []byte
if data, _, err = util.Get(urlString); err == nil { if data, _, err = util_http.Get(urlString); err == nil {
processed = true processed = true
if processedTsNs, err = eachChunkFn(data, eachLogEntryFn, starTsNs, stopTsNs); err != nil { if processedTsNs, err = eachChunkFn(data, eachLogEntryFn, starTsNs, stopTsNs); err != nil {
return return

View File

@ -55,7 +55,13 @@ func (b *MessageQueueBroker) appendToFile(targetFile string, data []byte) error
func (b *MessageQueueBroker) assignAndUpload(targetFile string, data []byte) (fileId string, uploadResult *operation.UploadResult, err error) { func (b *MessageQueueBroker) assignAndUpload(targetFile string, data []byte) (fileId string, uploadResult *operation.UploadResult, err error) {
reader := util.NewBytesReader(data) reader := util.NewBytesReader(data)
fileId, uploadResult, err, _ = operation.UploadWithRetry(
uploader, err := operation.NewUploader()
if err != nil {
return
}
fileId, uploadResult, err, _ = uploader.UploadWithRetry(
b, b,
&filer_pb.AssignVolumeRequest{ &filer_pb.AssignVolumeRequest{
Count: 1, Count: 1,

View File

@ -9,6 +9,7 @@ import (
"strings" "strings"
"sync" "sync"
"time" "time"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var ( var (
@ -45,6 +46,7 @@ func doPublish(publisher *pub_client.TopicPublisher, id int) {
func main() { func main() {
flag.Parse() flag.Parse()
util_http.InitGlobalHttpClient()
config := &pub_client.PublisherConfiguration{ config := &pub_client.PublisherConfiguration{
Topic: topic.NewTopic(*namespace, *t), Topic: topic.NewTopic(*namespace, *t),

View File

@ -11,6 +11,7 @@ import (
"strings" "strings"
"sync" "sync"
"time" "time"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var ( var (
@ -86,6 +87,7 @@ func (r *MyRecord) ToRecordValue() *schema_pb.RecordValue {
func main() { func main() {
flag.Parse() flag.Parse()
util_http.InitGlobalHttpClient()
recordType := schema.RecordTypeBegin(). recordType := schema.RecordTypeBegin().
WithField("key", schema.TypeBytes). WithField("key", schema.TypeBytes).

View File

@ -11,6 +11,7 @@ import (
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"
"strings" "strings"
"time" "time"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var ( var (
@ -23,6 +24,7 @@ var (
func main() { func main() {
flag.Parse() flag.Parse()
util_http.InitGlobalHttpClient()
subscriberConfig := &sub_client.SubscriberConfiguration{ subscriberConfig := &sub_client.SubscriberConfiguration{
ClientId: fmt.Sprintf("client-%d", *clientId), ClientId: fmt.Sprintf("client-%d", *clientId),

View File

@ -13,6 +13,7 @@ import (
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
"strings" "strings"
"time" "time"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var ( var (
@ -49,6 +50,7 @@ func FromSchemaRecordValue(recordValue *schema_pb.RecordValue) *MyRecord {
func main() { func main() {
flag.Parse() flag.Parse()
util_http.InitGlobalHttpClient()
subscriberConfig := &sub_client.SubscriberConfiguration{ subscriberConfig := &sub_client.SubscriberConfiguration{
ClientId: fmt.Sprintf("client-%d", *clientId), ClientId: fmt.Sprintf("client-%d", *clientId),

View File

@ -15,6 +15,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var ( var (
@ -103,11 +104,11 @@ func readChunkNeedle(fileUrl string, w io.Writer, offset int64, jwt string) (wri
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
} }
resp, err := util.Do(req) resp, err := util_http.Do(req)
if err != nil { if err != nil {
return written, err return written, err
} }
defer util.CloseResponse(resp) defer util_http.CloseResponse(resp)
switch resp.StatusCode { switch resp.StatusCode {
case http.StatusRequestedRangeNotSatisfiable: case http.StatusRequestedRangeNotSatisfiable:

View File

@ -38,15 +38,11 @@ If the content is already compressed, need to know the content size.
*/ */
func TestCreateNeedleFromRequest(t *testing.T) { func TestCreateNeedleFromRequest(t *testing.T) {
mc := &MockClient{} mockClient := &MockClient{}
tmp := HttpClient uploader := newUploader(mockClient)
HttpClient = mc
defer func() {
HttpClient = tmp
}()
{ {
mc.needleHandling = func(n *needle.Needle, originalSize int, err error) { mockClient.needleHandling = func(n *needle.Needle, originalSize int, err error) {
assert.Equal(t, nil, err, "upload: %v", err) assert.Equal(t, nil, err, "upload: %v", err)
assert.Equal(t, "", string(n.Mime), "mime detection failed: %v", string(n.Mime)) assert.Equal(t, "", string(n.Mime), "mime detection failed: %v", string(n.Mime))
assert.Equal(t, true, n.IsCompressed(), "this should be compressed") assert.Equal(t, true, n.IsCompressed(), "this should be compressed")
@ -62,7 +58,7 @@ func TestCreateNeedleFromRequest(t *testing.T) {
PairMap: nil, PairMap: nil,
Jwt: "", Jwt: "",
} }
uploadResult, err, data := Upload(bytes.NewReader([]byte(textContent)), uploadOption) uploadResult, err, data := uploader.Upload(bytes.NewReader([]byte(textContent)), uploadOption)
if len(data) != len(textContent) { if len(data) != len(textContent) {
t.Errorf("data actual %d expected %d", len(data), len(textContent)) t.Errorf("data actual %d expected %d", len(data), len(textContent))
} }
@ -73,7 +69,7 @@ func TestCreateNeedleFromRequest(t *testing.T) {
} }
{ {
mc.needleHandling = func(n *needle.Needle, originalSize int, err error) { mockClient.needleHandling = func(n *needle.Needle, originalSize int, err error) {
assert.Equal(t, nil, err, "upload: %v", err) assert.Equal(t, nil, err, "upload: %v", err)
assert.Equal(t, "text/plain", string(n.Mime), "mime detection failed: %v", string(n.Mime)) assert.Equal(t, "text/plain", string(n.Mime), "mime detection failed: %v", string(n.Mime))
assert.Equal(t, true, n.IsCompressed(), "this should be compressed") assert.Equal(t, true, n.IsCompressed(), "this should be compressed")
@ -90,7 +86,7 @@ func TestCreateNeedleFromRequest(t *testing.T) {
PairMap: nil, PairMap: nil,
Jwt: "", Jwt: "",
} }
Upload(bytes.NewReader(gzippedData), uploadOption) uploader.Upload(bytes.NewReader(gzippedData), uploadOption)
} }
/* /*

View File

@ -217,7 +217,13 @@ func (fi FilePart) Upload(maxMB int, masterFn GetMasterFn, usePublicUrl bool, jw
PairMap: nil, PairMap: nil,
Jwt: jwt, Jwt: jwt,
} }
ret, e, _ := Upload(fi.Reader, uploadOption)
uploader, e := NewUploader()
if e != nil {
return 0, e
}
ret, e, _ := uploader.Upload(fi.Reader, uploadOption)
if e != nil { if e != nil {
return 0, e return 0, e
} }
@ -239,7 +245,13 @@ func upload_one_chunk(filename string, reader io.Reader, masterFn GetMasterFn,
PairMap: nil, PairMap: nil,
Jwt: jwt, Jwt: jwt,
} }
uploadResult, uploadError, _ := Upload(reader, uploadOption)
uploader, uploaderError := NewUploader()
if uploaderError != nil {
return 0, uploaderError
}
uploadResult, uploadError, _ := uploader.Upload(reader, uploadOption)
if uploadError != nil { if uploadError != nil {
return 0, uploadError return 0, uploadError
} }
@ -265,6 +277,12 @@ func upload_chunked_file_manifest(fileUrl string, manifest *ChunkManifest, jwt s
PairMap: nil, PairMap: nil,
Jwt: jwt, Jwt: jwt,
} }
_, e = UploadData(buf, uploadOption)
uploader, e := NewUploader()
if e != nil {
return e
}
_, e = uploader.UploadData(buf, uploadOption)
return e return e
} }

View File

@ -9,7 +9,7 @@ import (
"io" "io"
"mime" "mime"
"mime/multipart" "mime/multipart"
"net" "sync"
"net/http" "net/http"
"net/textproto" "net/textproto"
"path/filepath" "path/filepath"
@ -21,6 +21,8 @@ import (
"github.com/seaweedfs/seaweedfs/weed/security" "github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
util_http_client "github.com/seaweedfs/seaweedfs/weed/util/http/client"
) )
type UploadOption struct { type UploadOption struct {
@ -62,29 +64,47 @@ func (uploadResult *UploadResult) ToPbFileChunk(fileId string, offset int64, tsN
} }
} }
var (
fileNameEscaper = strings.NewReplacer(`\`, `\\`, `"`, `\"`, "\n", "")
uploader *Uploader
uploaderErr error
once sync.Once
)
// HTTPClient interface for testing // HTTPClient interface for testing
type HTTPClient interface { type HTTPClient interface {
Do(req *http.Request) (*http.Response, error) Do(req *http.Request) (*http.Response, error)
} }
var ( // Uploader
HttpClient HTTPClient type Uploader struct {
) httpClient HTTPClient
}
func init() { func NewUploader() (*Uploader, error) {
HttpClient = &http.Client{Transport: &http.Transport{ once.Do(func () {
DialContext: (&net.Dialer{ // With Dial context
Timeout: 10 * time.Second, var httpClient *util_http_client.HTTPClient
KeepAlive: 10 * time.Second, httpClient, uploaderErr = util_http.NewGlobalHttpClient(util_http_client.AddDialContext)
}).DialContext, if uploaderErr != nil {
MaxIdleConns: 1024, uploaderErr = fmt.Errorf("error initializing the loader: %s", uploaderErr)
MaxIdleConnsPerHost: 1024, }
}} if httpClient != nil {
uploader = newUploader(httpClient)
}
})
return uploader, uploaderErr
}
func newUploader(httpClient HTTPClient) (*Uploader) {
return &Uploader{
httpClient: httpClient,
}
} }
// UploadWithRetry will retry both assigning volume request and uploading content // UploadWithRetry will retry both assigning volume request and uploading content
// The option parameter does not need to specify UploadUrl and Jwt, which will come from assigning volume. // The option parameter does not need to specify UploadUrl and Jwt, which will come from assigning volume.
func UploadWithRetry(filerClient filer_pb.FilerClient, assignRequest *filer_pb.AssignVolumeRequest, uploadOption *UploadOption, genFileUrlFn func(host, fileId string) string, reader io.Reader) (fileId string, uploadResult *UploadResult, err error, data []byte) { func (uploader *Uploader) UploadWithRetry(filerClient filer_pb.FilerClient, assignRequest *filer_pb.AssignVolumeRequest, uploadOption *UploadOption, genFileUrlFn func(host, fileId string) string, reader io.Reader) (fileId string, uploadResult *UploadResult, err error, data []byte) {
doUploadFunc := func() error { doUploadFunc := func() error {
var host string var host string
@ -114,7 +134,7 @@ func UploadWithRetry(filerClient filer_pb.FilerClient, assignRequest *filer_pb.A
uploadOption.Jwt = auth uploadOption.Jwt = auth
var uploadErr error var uploadErr error
uploadResult, uploadErr, data = doUpload(reader, uploadOption) uploadResult, uploadErr, data = uploader.doUpload(reader, uploadOption)
return uploadErr return uploadErr
} }
if uploadOption.RetryForever { if uploadOption.RetryForever {
@ -130,21 +150,19 @@ func UploadWithRetry(filerClient filer_pb.FilerClient, assignRequest *filer_pb.A
return return
} }
var fileNameEscaper = strings.NewReplacer(`\`, `\\`, `"`, `\"`, "\n", "")
// Upload sends a POST request to a volume server to upload the content with adjustable compression level // Upload sends a POST request to a volume server to upload the content with adjustable compression level
func UploadData(data []byte, option *UploadOption) (uploadResult *UploadResult, err error) { func (uploader *Uploader) UploadData(data []byte, option *UploadOption) (uploadResult *UploadResult, err error) {
uploadResult, err = retriedUploadData(data, option) uploadResult, err = uploader.retriedUploadData(data, option)
return return
} }
// Upload sends a POST request to a volume server to upload the content with fast compression // Upload sends a POST request to a volume server to upload the content with fast compression
func Upload(reader io.Reader, option *UploadOption) (uploadResult *UploadResult, err error, data []byte) { func (uploader *Uploader) Upload(reader io.Reader, option *UploadOption) (uploadResult *UploadResult, err error, data []byte) {
uploadResult, err, data = doUpload(reader, option) uploadResult, err, data = uploader.doUpload(reader, option)
return return
} }
func doUpload(reader io.Reader, option *UploadOption) (uploadResult *UploadResult, err error, data []byte) { func (uploader *Uploader) doUpload(reader io.Reader, option *UploadOption) (uploadResult *UploadResult, err error, data []byte) {
bytesReader, ok := reader.(*util.BytesReader) bytesReader, ok := reader.(*util.BytesReader)
if ok { if ok {
data = bytesReader.Bytes data = bytesReader.Bytes
@ -155,16 +173,16 @@ func doUpload(reader io.Reader, option *UploadOption) (uploadResult *UploadResul
return return
} }
} }
uploadResult, uploadErr := retriedUploadData(data, option) uploadResult, uploadErr := uploader.retriedUploadData(data, option)
return uploadResult, uploadErr, data return uploadResult, uploadErr, data
} }
func retriedUploadData(data []byte, option *UploadOption) (uploadResult *UploadResult, err error) { func (uploader *Uploader) retriedUploadData(data []byte, option *UploadOption) (uploadResult *UploadResult, err error) {
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
if i > 0 { if i > 0 {
time.Sleep(time.Millisecond * time.Duration(237*(i+1))) time.Sleep(time.Millisecond * time.Duration(237*(i+1)))
} }
uploadResult, err = doUploadData(data, option) uploadResult, err = uploader.doUploadData(data, option)
if err == nil { if err == nil {
uploadResult.RetryCount = i uploadResult.RetryCount = i
return return
@ -174,7 +192,7 @@ func retriedUploadData(data []byte, option *UploadOption) (uploadResult *UploadR
return return
} }
func doUploadData(data []byte, option *UploadOption) (uploadResult *UploadResult, err error) { func (uploader *Uploader) doUploadData(data []byte, option *UploadOption) (uploadResult *UploadResult, err error) {
contentIsGzipped := option.IsInputCompressed contentIsGzipped := option.IsInputCompressed
shouldGzipNow := false shouldGzipNow := false
if !option.IsInputCompressed { if !option.IsInputCompressed {
@ -230,7 +248,7 @@ func doUploadData(data []byte, option *UploadOption) (uploadResult *UploadResult
} }
// upload data // upload data
uploadResult, err = upload_content(func(w io.Writer) (err error) { uploadResult, err = uploader.upload_content(func(w io.Writer) (err error) {
_, err = w.Write(encryptedData) _, err = w.Write(encryptedData)
return return
}, len(encryptedData), &UploadOption{ }, len(encryptedData), &UploadOption{
@ -251,7 +269,7 @@ func doUploadData(data []byte, option *UploadOption) (uploadResult *UploadResult
uploadResult.Size = uint32(clearDataLen) uploadResult.Size = uint32(clearDataLen)
} else { } else {
// upload data // upload data
uploadResult, err = upload_content(func(w io.Writer) (err error) { uploadResult, err = uploader.upload_content(func(w io.Writer) (err error) {
_, err = w.Write(data) _, err = w.Write(data)
return return
}, len(data), &UploadOption{ }, len(data), &UploadOption{
@ -277,7 +295,7 @@ func doUploadData(data []byte, option *UploadOption) (uploadResult *UploadResult
return uploadResult, err return uploadResult, err
} }
func upload_content(fillBufferFunction func(w io.Writer) error, originalDataSize int, option *UploadOption) (*UploadResult, error) { func (uploader *Uploader) upload_content(fillBufferFunction func(w io.Writer) error, originalDataSize int, option *UploadOption) (*UploadResult, error) {
var body_writer *multipart.Writer var body_writer *multipart.Writer
var reqReader *bytes.Reader var reqReader *bytes.Reader
var buf *bytebufferpool.ByteBuffer var buf *bytebufferpool.ByteBuffer
@ -338,15 +356,15 @@ func upload_content(fillBufferFunction func(w io.Writer) error, originalDataSize
req.Header.Set("Authorization", "BEARER "+string(option.Jwt)) req.Header.Set("Authorization", "BEARER "+string(option.Jwt))
} }
// print("+") // print("+")
resp, post_err := HttpClient.Do(req) resp, post_err := uploader.httpClient.Do(req)
defer util.CloseResponse(resp) defer util_http.CloseResponse(resp)
if post_err != nil { if post_err != nil {
if strings.Contains(post_err.Error(), "connection reset by peer") || if strings.Contains(post_err.Error(), "connection reset by peer") ||
strings.Contains(post_err.Error(), "use of closed network connection") { strings.Contains(post_err.Error(), "use of closed network connection") {
glog.V(1).Infof("repeat error upload request %s: %v", option.UploadUrl, postErr) glog.V(1).Infof("repeat error upload request %s: %v", option.UploadUrl, postErr)
stats.FilerHandlerCounter.WithLabelValues(stats.RepeatErrorUploadContent).Inc() stats.FilerHandlerCounter.WithLabelValues(stats.RepeatErrorUploadContent).Inc()
resp, post_err = HttpClient.Do(req) resp, post_err = uploader.httpClient.Do(req)
defer util.CloseResponse(resp) defer util_http.CloseResponse(resp)
} }
} }
if post_err != nil { if post_err != nil {

View File

@ -4,7 +4,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/filer" "github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/replication/source" "github.com/seaweedfs/seaweedfs/weed/replication/source"
"github.com/seaweedfs/seaweedfs/weed/util" util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
func CopyFromChunkViews(chunkViews *filer.IntervalList[*filer.ChunkView], filerSource *source.FilerSource, writeFunc func(data []byte) error) error { func CopyFromChunkViews(chunkViews *filer.IntervalList[*filer.ChunkView], filerSource *source.FilerSource, writeFunc func(data []byte) error) error {
@ -21,7 +21,7 @@ func CopyFromChunkViews(chunkViews *filer.IntervalList[*filer.ChunkView], filerS
var shouldRetry bool var shouldRetry bool
for _, fileUrl := range fileUrls { for _, fileUrl := range fileUrls {
shouldRetry, err = util.ReadUrlAsStream(fileUrl, chunk.CipherKey, chunk.IsGzipped, chunk.IsFullChunk(), chunk.OffsetInChunk, int(chunk.ViewSize), func(data []byte) { shouldRetry, err = util_http.ReadUrlAsStream(fileUrl, chunk.CipherKey, chunk.IsGzipped, chunk.IsFullChunk(), chunk.OffsetInChunk, int(chunk.ViewSize), func(data []byte) {
writeErr = writeFunc(data) writeErr = writeFunc(data)
}) })
if err != nil { if err != nil {

View File

@ -14,6 +14,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, path string) (replicatedChunks []*filer_pb.FileChunk, err error) { func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, path string) (replicatedChunks []*filer_pb.FileChunk, err error) {
@ -88,9 +89,15 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string)
if err != nil { if err != nil {
return "", fmt.Errorf("read part %s: %v", sourceChunk.GetFileIdString(), err) return "", fmt.Errorf("read part %s: %v", sourceChunk.GetFileIdString(), err)
} }
defer util.CloseResponse(resp) defer util_http.CloseResponse(resp)
fileId, uploadResult, err, _ := operation.UploadWithRetry( uploader, err := operation.NewUploader()
if err != nil {
glog.V(0).Infof("upload source data %v: %v", sourceChunk.GetFileIdString(), err)
return "", fmt.Errorf("upload data: %v", err)
}
fileId, uploadResult, err, _ := uploader.UploadWithRetry(
fs, fs,
&filer_pb.AssignVolumeRequest{ &filer_pb.AssignVolumeRequest{
Count: 1, Count: 1,

View File

@ -15,6 +15,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
type ReplicationSource interface { type ReplicationSource interface {
@ -106,7 +107,7 @@ func (fs *FilerSource) LookupFileId(part string) (fileUrls []string, err error)
func (fs *FilerSource) ReadPart(fileId string) (filename string, header http.Header, resp *http.Response, err error) { func (fs *FilerSource) ReadPart(fileId string) (filename string, header http.Header, resp *http.Response, err error) {
if fs.proxyByFiler { if fs.proxyByFiler {
return util.DownloadFile("http://"+fs.address+"/?proxyChunkId="+fileId, "") return util_http.DownloadFile("http://"+fs.address+"/?proxyChunkId="+fileId, "")
} }
fileUrls, err := fs.LookupFileId(fileId) fileUrls, err := fs.LookupFileId(fileId)
@ -115,7 +116,7 @@ func (fs *FilerSource) ReadPart(fileId string) (filename string, header http.Hea
} }
for _, fileUrl := range fileUrls { for _, fileUrl := range fileUrls {
filename, header, resp, err = util.DownloadFile(fileUrl, "") filename, header, resp, err = util_http.DownloadFile(fileUrl, "")
if err != nil { if err != nil {
glog.V(1).Infof("fail to read from %s: %v", fileUrl, err) glog.V(1).Infof("fail to read from %s: %v", fileUrl, err)
} else { } else {

View File

@ -9,9 +9,9 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err" "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"github.com/seaweedfs/seaweedfs/weed/util"
"net/http" "net/http"
"strings" "strings"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
type AccountManager interface { type AccountManager interface {
@ -32,7 +32,7 @@ func GetAccountId(r *http.Request) string {
// ExtractAcl extracts the acl from the request body, or from the header if request body is empty // ExtractAcl extracts the acl from the request body, or from the header if request body is empty
func ExtractAcl(r *http.Request, accountManager AccountManager, ownership, bucketOwnerId, ownerId, accountId string) (grants []*s3.Grant, errCode s3err.ErrorCode) { func ExtractAcl(r *http.Request, accountManager AccountManager, ownership, bucketOwnerId, ownerId, accountId string) (grants []*s3.Grant, errCode s3err.ErrorCode) {
if r.Body != nil && r.Body != http.NoBody { if r.Body != nil && r.Body != http.NoBody {
defer util.CloseRequest(r) defer util_http.CloseRequest(r)
var acp s3.AccessControlPolicy var acp s3.AccessControlPolicy
err := xmlutil.UnmarshalXML(&acp, xml.NewDecoder(r.Body), "") err := xmlutil.UnmarshalXML(&acp, xml.NewDecoder(r.Body), "")

View File

@ -13,7 +13,6 @@ import (
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3bucket" "github.com/seaweedfs/seaweedfs/weed/s3api/s3bucket"
"github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/filer" "github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
@ -26,6 +25,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
@ -507,7 +507,7 @@ func (s3a *S3ApiServer) PutBucketOwnershipControls(w http.ResponseWriter, r *htt
} }
var v s3.OwnershipControls var v s3.OwnershipControls
defer util.CloseRequest(r) defer util_http.CloseRequest(r)
err := xmlutil.UnmarshalXML(&v, xml.NewDecoder(r.Body), "") err := xmlutil.UnmarshalXML(&v, xml.NewDecoder(r.Body), "")
if err != nil { if err != nil {

View File

@ -16,7 +16,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/util/mem" "github.com/seaweedfs/seaweedfs/weed/util/mem"
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/util" util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
func mimeDetect(r *http.Request, dataReader io.Reader) io.ReadCloser { func mimeDetect(r *http.Request, dataReader io.Reader) io.ReadCloser {
@ -171,7 +171,7 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
return return
} }
defer util.CloseResponse(resp) defer util_http.CloseResponse(resp)
if resp.StatusCode == http.StatusPreconditionFailed { if resp.StatusCode == http.StatusPreconditionFailed {
s3err.WriteErrorResponse(w, r, s3err.ErrPreconditionFailed) s3err.WriteErrorResponse(w, r, s3err.ErrPreconditionFailed)

View File

@ -14,6 +14,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err" "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
const ( const (
@ -87,12 +88,12 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
srcUrl := fmt.Sprintf("http://%s%s/%s%s", srcUrl := fmt.Sprintf("http://%s%s/%s%s",
s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, srcBucket, urlEscapeObject(srcObject)) s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, srcBucket, urlEscapeObject(srcObject))
_, _, resp, err := util.DownloadFile(srcUrl, s3a.maybeGetFilerJwtAuthorizationToken(false)) _, _, resp, err := util_http.DownloadFile(srcUrl, s3a.maybeGetFilerJwtAuthorizationToken(false))
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
return return
} }
defer util.CloseResponse(resp) defer util_http.CloseResponse(resp)
tagErr := processMetadata(r.Header, resp.Header, replaceMeta, replaceTagging, s3a.getTags, dir, name) tagErr := processMetadata(r.Header, resp.Header, replaceMeta, replaceTagging, s3a.getTags, dir, name)
if tagErr != nil { if tagErr != nil {
@ -175,12 +176,12 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
srcUrl := fmt.Sprintf("http://%s%s/%s%s", srcUrl := fmt.Sprintf("http://%s%s/%s%s",
s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, srcBucket, urlEscapeObject(srcObject)) s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, srcBucket, urlEscapeObject(srcObject))
resp, dataReader, err := util.ReadUrlAsReaderCloser(srcUrl, s3a.maybeGetFilerJwtAuthorizationToken(false), rangeHeader) resp, dataReader, err := util_http.ReadUrlAsReaderCloser(srcUrl, s3a.maybeGetFilerJwtAuthorizationToken(false), rangeHeader)
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
return return
} }
defer util.CloseResponse(resp) defer util_http.CloseResponse(resp)
defer dataReader.Close() defer dataReader.Close()
glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl) glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl)

View File

@ -20,6 +20,8 @@ import (
"github.com/seaweedfs/seaweedfs/weed/security" "github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
"google.golang.org/grpc" "google.golang.org/grpc"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
util_http_client "github.com/seaweedfs/seaweedfs/weed/util/http/client"
) )
type S3ApiServerOption struct { type S3ApiServerOption struct {
@ -44,7 +46,7 @@ type S3ApiServer struct {
cb *CircuitBreaker cb *CircuitBreaker
randomClientId int32 randomClientId int32
filerGuard *security.Guard filerGuard *security.Guard
client *http.Client client util_http_client.HTTPClientInterface
bucketRegistry *BucketRegistry bucketRegistry *BucketRegistry
} }
@ -84,10 +86,9 @@ func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer
} }
s3ApiServer.bucketRegistry = NewBucketRegistry(s3ApiServer) s3ApiServer.bucketRegistry = NewBucketRegistry(s3ApiServer)
if option.LocalFilerSocket == "" { if option.LocalFilerSocket == "" {
s3ApiServer.client = &http.Client{Transport: &http.Transport{ if s3ApiServer.client, err = util_http.NewGlobalHttpClient(); err != nil {
MaxIdleConns: 1024, return nil, err
MaxIdleConnsPerHost: 1024, }
}}
} else { } else {
s3ApiServer.client = &http.Client{ s3ApiServer.client = &http.Client{
Transport: &http.Transport{ Transport: &http.Transport{

View File

@ -181,7 +181,12 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterFn ope
PairMap: pu.PairMap, PairMap: pu.PairMap,
Jwt: assignResult.Auth, Jwt: assignResult.Auth,
} }
uploadResult, err := operation.UploadData(pu.Data, uploadOption) uploader, err := operation.NewUploader()
if err != nil {
writeJsonError(w, r, http.StatusInternalServerError, err)
return
}
uploadResult, err := uploader.UploadData(pu.Data, uploadOption)
if err != nil { if err != nil {
writeJsonError(w, r, http.StatusInternalServerError, err) writeJsonError(w, r, http.StatusInternalServerError, err)
return return

View File

@ -3,24 +3,13 @@ package weed_server
import ( import (
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/security" "github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/util/mem" "github.com/seaweedfs/seaweedfs/weed/util/mem"
"io" "io"
"math/rand" "math/rand"
"net/http" "net/http"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var (
client *http.Client
)
func init() {
client = &http.Client{Transport: &http.Transport{
MaxIdleConns: 1024,
MaxIdleConnsPerHost: 1024,
}}
}
func (fs *FilerServer) maybeAddVolumeJwtAuthorization(r *http.Request, fileId string, isWrite bool) { func (fs *FilerServer) maybeAddVolumeJwtAuthorization(r *http.Request, fileId string, isWrite bool) {
encodedJwt := fs.maybeGetVolumeJwtAuthorizationToken(fileId, isWrite) encodedJwt := fs.maybeGetVolumeJwtAuthorizationToken(fileId, isWrite)
@ -71,14 +60,14 @@ func (fs *FilerServer) proxyToVolumeServer(w http.ResponseWriter, r *http.Reques
} }
} }
proxyResponse, postErr := client.Do(proxyReq) proxyResponse, postErr := util_http.GetGlobalHttpClient().Do(proxyReq)
if postErr != nil { if postErr != nil {
glog.Errorf("post to filer: %v", postErr) glog.Errorf("post to filer: %v", postErr)
w.WriteHeader(http.StatusInternalServerError) w.WriteHeader(http.StatusInternalServerError)
return return
} }
defer util.CloseResponse(proxyResponse) defer util_http.CloseResponse(proxyResponse)
for k, v := range proxyResponse.Header { for k, v := range proxyResponse.Header {
w.Header()[k] = v w.Header()[k] = v

View File

@ -18,6 +18,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var ( var (
@ -120,7 +121,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request, conte
fs.autoChunk(ctx, w, r, contentLength, so) fs.autoChunk(ctx, w, r, contentLength, so)
} }
util.CloseRequest(r) util_http.CloseRequest(r)
} }

View File

@ -308,8 +308,14 @@ func (fs *FilerServer) saveAsChunk(so *operation.StorageOption) filer.SaveDataAs
PairMap: nil, PairMap: nil,
Jwt: auth, Jwt: auth,
} }
uploader, uploaderErr := operation.NewUploader()
if uploaderErr != nil {
return uploaderErr
}
var uploadErr error var uploadErr error
uploadResult, uploadErr, _ = operation.Upload(reader, uploadOption) uploadResult, uploadErr, _ = uploader.Upload(reader, uploadOption)
if uploadErr != nil { if uploadErr != nil {
return uploadErr return uploadErr
} }

View File

@ -53,7 +53,13 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht
PairMap: pu.PairMap, PairMap: pu.PairMap,
Jwt: auth, Jwt: auth,
} }
uploadResult, uploadError := operation.UploadData(uncompressedData, uploadOption)
uploader, uploaderErr := operation.NewUploader()
if uploaderErr != nil {
return nil, fmt.Errorf("uploader initialization error: %v", uploaderErr)
}
uploadResult, uploadError := uploader.UploadData(uncompressedData, uploadOption)
if uploadError != nil { if uploadError != nil {
return nil, fmt.Errorf("upload to volume server: %v", uploadError) return nil, fmt.Errorf("upload to volume server: %v", uploadError)
} }

View File

@ -158,7 +158,13 @@ func (fs *FilerServer) doUpload(urlLocation string, limitedReader io.Reader, fil
PairMap: pairMap, PairMap: pairMap,
Jwt: auth, Jwt: auth,
} }
uploadResult, err, data := operation.Upload(limitedReader, uploadOption)
uploader, err := operation.NewUploader()
if err != nil {
return nil, err, []byte{}
}
uploadResult, err, data := uploader.Upload(limitedReader, uploadOption)
if uploadResult != nil && uploadResult.RetryCount > 0 { if uploadResult != nil && uploadResult.RetryCount > 0 {
stats.FilerHandlerCounter.WithLabelValues(stats.ChunkUploadRetry).Add(float64(uploadResult.RetryCount)) stats.FilerHandlerCounter.WithLabelValues(stats.ChunkUploadRetry).Add(float64(uploadResult.RetryCount))
} }

View File

@ -30,6 +30,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/topology" "github.com/seaweedfs/seaweedfs/weed/topology"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/wdclient" "github.com/seaweedfs/seaweedfs/weed/wdclient"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
const ( const (
@ -256,7 +257,7 @@ func (ms *MasterServer) proxyToLeader(f http.HandlerFunc) http.HandlerFunc {
} }
director(req) director(req)
} }
proxy.Transport = util.Transport proxy.Transport = util_http.GetGlobalHttpClient().GetClientTransport()
proxy.ServeHTTP(w, r) proxy.ServeHTTP(w, r)
} }
} }

View File

@ -18,6 +18,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/storage/types" "github.com/seaweedfs/seaweedfs/weed/storage/types"
"github.com/seaweedfs/seaweedfs/weed/topology" "github.com/seaweedfs/seaweedfs/weed/topology"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.Request) { func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.Request) {
@ -113,11 +114,11 @@ func (ms *MasterServer) redirectHandler(w http.ResponseWriter, r *http.Request)
location := ms.findVolumeLocation(collection, vid) location := ms.findVolumeLocation(collection, vid)
if location.Error == "" { if location.Error == "" {
loc := location.Locations[rand.Intn(len(location.Locations))] loc := location.Locations[rand.Intn(len(location.Locations))]
var url string url, _ := util_http.NormalizeUrl(loc.PublicUrl)
if r.URL.RawQuery != "" { if r.URL.RawQuery != "" {
url = util.NormalizeUrl(loc.PublicUrl) + r.URL.Path + "?" + r.URL.RawQuery url = url + r.URL.Path + "?" + r.URL.RawQuery
} else { } else {
url = util.NormalizeUrl(loc.PublicUrl) + r.URL.Path url = url + r.URL.Path
} }
http.Redirect(w, r, url, http.StatusPermanentRedirect) http.Redirect(w, r, url, http.StatusPermanentRedirect)
} else { } else {

View File

@ -70,10 +70,15 @@ func (vs *VolumeServer) FetchAndWriteNeedle(ctx context.Context, req *volume_ser
PairMap: nil, PairMap: nil,
Jwt: security.EncodedJwt(req.Auth), Jwt: security.EncodedJwt(req.Auth),
} }
if _, replicaWriteErr := operation.UploadData(data, uploadOption); replicaWriteErr != nil {
if err == nil { uploader, uploaderErr := operation.NewUploader()
err = fmt.Errorf("remote write needle %d size %d: %v", req.NeedleId, req.Size, err) if uploaderErr != nil && err == nil {
} err = fmt.Errorf("remote write needle %d size %d: %v", req.NeedleId, req.Size, uploaderErr)
return
}
if _, replicaWriteErr := uploader.UploadData(data, uploadOption); replicaWriteErr != nil && err == nil {
err = fmt.Errorf("remote write needle %d size %d: %v", req.NeedleId, req.Size, replicaWriteErr)
} }
}(replica.Url) }(replica.Url)
} }

View File

@ -27,6 +27,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/storage" "github.com/seaweedfs/seaweedfs/weed/storage"
"github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var fileNameEscaper = strings.NewReplacer(`\`, `\\`, `"`, `\"`) var fileNameEscaper = strings.NewReplacer(`\`, `\\`, `"`, `\"`)
@ -81,7 +82,8 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
} }
if vs.ReadMode == "proxy" { if vs.ReadMode == "proxy" {
// proxy client request to target server // proxy client request to target server
u, _ := url.Parse(util.NormalizeUrl(lookupResult.Locations[0].Url)) rawURL, _ := util_http.NormalizeUrl(lookupResult.Locations[0].Url)
u, _ := url.Parse(rawURL)
r.URL.Host = u.Host r.URL.Host = u.Host
r.URL.Scheme = u.Scheme r.URL.Scheme = u.Scheme
request, err := http.NewRequest(http.MethodGet, r.URL.String(), nil) request, err := http.NewRequest(http.MethodGet, r.URL.String(), nil)
@ -96,13 +98,13 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
} }
} }
response, err := client.Do(request) response, err := util_http.GetGlobalHttpClient().Do(request)
if err != nil { if err != nil {
glog.V(0).Infof("request remote url %s: %v", r.URL.String(), err) glog.V(0).Infof("request remote url %s: %v", r.URL.String(), err)
InternalError(w) InternalError(w)
return return
} }
defer util.CloseResponse(response) defer util_http.CloseResponse(response)
// proxy target response to client // proxy target response to client
for k, vv := range response.Header { for k, vv := range response.Header {
for _, v := range vv { for _, v := range vv {
@ -116,7 +118,8 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
return return
} else { } else {
// redirect // redirect
u, _ := url.Parse(util.NormalizeUrl(lookupResult.Locations[0].PublicUrl)) rawURL, _ := util_http.NormalizeUrl(lookupResult.Locations[0].PublicUrl)
u, _ := url.Parse(rawURL)
u.Path = fmt.Sprintf("%s/%s,%s", u.Path, vid, fid) u.Path = fmt.Sprintf("%s/%s,%s", u.Path, vid, fid)
arg := url.Values{} arg := url.Values{}
if c := r.FormValue("collection"); c != "" { if c := r.FormValue("collection"); c != "" {

View File

@ -392,8 +392,13 @@ func (fs *WebDavFileSystem) Stat(ctx context.Context, name string) (os.FileInfo,
} }
func (f *WebDavFile) saveDataAsChunk(reader io.Reader, name string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error) { func (f *WebDavFile) saveDataAsChunk(reader io.Reader, name string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error) {
uploader, uploaderErr := operation.NewUploader()
if uploaderErr != nil {
glog.V(0).Infof("upload data %v: %v", f.name, uploaderErr)
return nil, fmt.Errorf("upload data: %v", uploaderErr)
}
fileId, uploadResult, flushErr, _ := operation.UploadWithRetry( fileId, uploadResult, flushErr, _ := uploader.UploadWithRetry(
f.fs, f.fs,
&filer_pb.AssignVolumeRequest{ &filer_pb.AssignVolumeRequest{
Count: 1, Count: 1,

View File

@ -19,14 +19,10 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
) util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
var (
client *http.Client
) )
func init() { func init() {
client = &http.Client{}
Commands = append(Commands, &commandFsMergeVolumes{}) Commands = append(Commands, &commandFsMergeVolumes{})
} }
@ -104,7 +100,7 @@ func (c *commandFsMergeVolumes) Do(args []string, commandEnv *CommandEnv, writer
return nil return nil
} }
defer client.CloseIdleConnections() defer util_http.GetGlobalHttpClient().CloseIdleConnections()
return commandEnv.WithFilerClient(false, func(filerClient filer_pb.SeaweedFilerClient) error { return commandEnv.WithFilerClient(false, func(filerClient filer_pb.SeaweedFilerClient) error {
return filer_pb.TraverseBfs(commandEnv, util.FullPath(dir), func(parentPath util.FullPath, entry *filer_pb.Entry) { return filer_pb.TraverseBfs(commandEnv, util.FullPath(dir), func(parentPath util.FullPath, entry *filer_pb.Entry) {
@ -304,7 +300,7 @@ func moveChunk(chunk *filer_pb.FileChunk, toVolumeId needle.VolumeId, masterClie
if err != nil { if err != nil {
return err return err
} }
defer util.CloseResponse(resp) defer util_http.CloseResponse(resp)
defer reader.Close() defer reader.Close()
var filename string var filename string
@ -322,7 +318,12 @@ func moveChunk(chunk *filer_pb.FileChunk, toVolumeId needle.VolumeId, masterClie
isCompressed := resp.Header.Get("Content-Encoding") == "gzip" isCompressed := resp.Header.Get("Content-Encoding") == "gzip"
md5 := resp.Header.Get("Content-MD5") md5 := resp.Header.Get("Content-MD5")
_, err, _ = operation.Upload(reader, &operation.UploadOption{ uploader, err := operation.NewUploader()
if err != nil {
return err
}
_, err, _ = uploader.Upload(reader, &operation.UploadOption{
UploadUrl: uploadURL, UploadUrl: uploadURL,
Filename: filename, Filename: filename,
IsInputCompressed: isCompressed, IsInputCompressed: isCompressed,
@ -348,12 +349,12 @@ func readUrl(fileUrl string) (*http.Response, io.ReadCloser, error) {
} }
req.Header.Add("Accept-Encoding", "gzip") req.Header.Add("Accept-Encoding", "gzip")
r, err := client.Do(req) r, err := util_http.GetGlobalHttpClient().Do(req)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
if r.StatusCode >= 400 { if r.StatusCode >= 400 {
util.CloseResponse(r) util_http.CloseResponse(r)
return nil, nil, fmt.Errorf("%s: %s", fileUrl, r.Status) return nil, nil, fmt.Errorf("%s: %s", fileUrl, r.Status)
} }

View File

@ -12,6 +12,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
func init() { func init() {
@ -90,7 +91,7 @@ func (c *commandS3CleanUploads) cleanupUploads(commandEnv *CommandEnv, writer io
deleteUrl := fmt.Sprintf("http://%s%s/%s?recursive=true&ignoreRecursiveError=true", commandEnv.option.FilerAddress.ToHttpAddress(), uploadsDir, staleUpload) deleteUrl := fmt.Sprintf("http://%s%s/%s?recursive=true&ignoreRecursiveError=true", commandEnv.option.FilerAddress.ToHttpAddress(), uploadsDir, staleUpload)
fmt.Fprintf(writer, "purge %s\n", deleteUrl) fmt.Fprintf(writer, "purge %s\n", deleteUrl)
err = util.Delete(deleteUrl, string(encodedJwt)) err = util_http.Delete(deleteUrl, string(encodedJwt))
if err != nil && err.Error() != "" { if err != nil && err.Error() != "" {
return fmt.Errorf("purge %s/%s: %v", uploadsDir, staleUpload, err) return fmt.Errorf("purge %s/%s: %v", uploadsDir, staleUpload, err)
} }

View File

@ -31,6 +31,7 @@ import (
"strings" "strings"
"sync" "sync"
"time" "time"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
func init() { func init() {
@ -552,9 +553,7 @@ func (c *commandVolumeFsck) httpDelete(path util.FullPath) {
fmt.Fprintf(c.writer, "HTTP delete request error: %v\n", err) fmt.Fprintf(c.writer, "HTTP delete request error: %v\n", err)
} }
client := &http.Client{} resp, err := util_http.GetGlobalHttpClient().Do(req)
resp, err := client.Do(req)
if err != nil { if err != nil {
fmt.Fprintf(c.writer, "DELETE fetch error: %v\n", err) fmt.Fprintf(c.writer, "DELETE fetch error: %v\n", err)
} }

View File

@ -20,6 +20,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/storage/types" "github.com/seaweedfs/seaweedfs/weed/storage/types"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/util/buffer_pool" "github.com/seaweedfs/seaweedfs/weed/util/buffer_pool"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
func ReplicatedWrite(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOption, s *storage.Store, volumeId needle.VolumeId, n *needle.Needle, r *http.Request, contentMd5 string) (isUnchanged bool, err error) { func ReplicatedWrite(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOption, s *storage.Store, volumeId needle.VolumeId, n *needle.Needle, r *http.Request, contentMd5 string) (isUnchanged bool, err error) {
@ -105,7 +106,12 @@ func ReplicatedWrite(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOpt
BytesBuffer: bytesBuffer, BytesBuffer: bytesBuffer,
} }
_, err := operation.UploadData(n.Data, uploadOption) uploader, err := operation.NewUploader()
if err != nil {
glog.Errorf("replication-UploadData, err:%v, url:%s", err, u.String())
return err
}
_, err = uploader.UploadData(n.Data, uploadOption)
if err != nil { if err != nil {
glog.Errorf("replication-UploadData, err:%v, url:%s", err, u.String()) glog.Errorf("replication-UploadData, err:%v, url:%s", err, u.String())
} }
@ -144,7 +150,7 @@ func ReplicatedDelete(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOp
if len(remoteLocations) > 0 { //send to other replica locations if len(remoteLocations) > 0 { //send to other replica locations
if err = DistributedOperation(remoteLocations, func(location operation.Location) error { if err = DistributedOperation(remoteLocations, func(location operation.Location) error {
return util.Delete("http://"+location.Url+r.URL.Path+"?type=replicate", string(jwt)) return util_http.Delete("http://"+location.Url+r.URL.Path+"?type=replicate", string(jwt))
}); err != nil { }); err != nil {
size = 0 size = 0
} }

View File

@ -0,0 +1,201 @@
package client
import (
"crypto/tls"
"crypto/x509"
"fmt"
util "github.com/seaweedfs/seaweedfs/weed/util"
"github.com/spf13/viper"
"io"
"net/http"
"net/url"
"os"
"strings"
"sync"
)
var (
loadSecurityConfigOnce sync.Once
)
type HTTPClient struct {
Client *http.Client
Transport *http.Transport
expectHttpsScheme bool
}
func (httpClient *HTTPClient) Do(req *http.Request) (*http.Response, error) {
req.URL.Scheme = httpClient.GetHttpScheme()
return httpClient.Client.Do(req)
}
func (httpClient *HTTPClient) Get(url string) (resp *http.Response, err error) {
url, err = httpClient.NormalizeHttpScheme(url)
if err != nil {
return nil, err
}
return httpClient.Client.Get(url)
}
func (httpClient *HTTPClient) Post(url, contentType string, body io.Reader) (resp *http.Response, err error) {
url, err = httpClient.NormalizeHttpScheme(url)
if err != nil {
return nil, err
}
return httpClient.Client.Post(url, contentType, body)
}
func (httpClient *HTTPClient) PostForm(url string, data url.Values) (resp *http.Response, err error) {
url, err = httpClient.NormalizeHttpScheme(url)
if err != nil {
return nil, err
}
return httpClient.Client.PostForm(url, data)
}
func (httpClient *HTTPClient) Head(url string) (resp *http.Response, err error) {
url, err = httpClient.NormalizeHttpScheme(url)
if err != nil {
return nil, err
}
return httpClient.Client.Head(url)
}
func (httpClient *HTTPClient) CloseIdleConnections() {
httpClient.Client.CloseIdleConnections()
}
func (httpClient *HTTPClient) GetClientTransport() *http.Transport {
return httpClient.Transport
}
func (httpClient *HTTPClient) GetHttpScheme() string {
if httpClient.expectHttpsScheme {
return "https"
}
return "http"
}
func (httpClient *HTTPClient) NormalizeHttpScheme(rawURL string) (string, error) {
expectedScheme := httpClient.GetHttpScheme()
if !(strings.HasPrefix(rawURL, "http://") || strings.HasPrefix(rawURL, "https://")) {
return expectedScheme + "://" + rawURL, nil
}
parsedURL, err := url.Parse(rawURL)
if err != nil {
return "", err
}
if expectedScheme != parsedURL.Scheme {
parsedURL.Scheme = expectedScheme
}
return parsedURL.String(), nil
}
func NewHttpClient(clientName ClientName, opts ...HttpClientOpt) (*HTTPClient, error) {
httpClient := HTTPClient{}
httpClient.expectHttpsScheme = checkIsHttpsClientEnabled(clientName)
var tlsConfig *tls.Config = nil
if httpClient.expectHttpsScheme {
clientCertPair, err := getClientCertPair(clientName)
if err != nil {
return nil, err
}
clientCaCert, clientCaCertName, err := getClientCaCert(clientName)
if err != nil {
return nil, err
}
if clientCertPair != nil || len(clientCaCert) != 0 {
caCertPool, err := createHTTPClientCertPool(clientCaCert, clientCaCertName)
if err != nil {
return nil, err
}
tlsConfig = &tls.Config{
Certificates: []tls.Certificate{},
RootCAs: caCertPool,
InsecureSkipVerify: false,
}
if clientCertPair != nil {
tlsConfig.Certificates = append(tlsConfig.Certificates, *clientCertPair)
}
}
}
httpClient.Transport = &http.Transport{
MaxIdleConns: 1024,
MaxIdleConnsPerHost: 1024,
TLSClientConfig: tlsConfig,
}
httpClient.Client = &http.Client{
Transport: httpClient.Transport,
}
for _, opt := range opts {
opt(&httpClient)
}
return &httpClient, nil
}
func getStringOptionFromSecurityConfiguration(clientName ClientName, stringOptionName string) string {
util.LoadSecurityConfiguration()
return viper.GetString(fmt.Sprintf("https.%s.%s", clientName.LowerCaseString(), stringOptionName))
}
func getBoolOptionFromSecurityConfiguration(clientName ClientName, boolOptionName string) bool {
util.LoadSecurityConfiguration()
return viper.GetBool(fmt.Sprintf("https.%s.%s", clientName.LowerCaseString(), boolOptionName))
}
func checkIsHttpsClientEnabled(clientName ClientName) bool {
return getBoolOptionFromSecurityConfiguration(clientName, "enabled")
}
func getFileContentFromSecurityConfiguration(clientName ClientName, fileType string) ([]byte, string, error) {
if fileName := getStringOptionFromSecurityConfiguration(clientName, fileType); fileName != "" {
fileContent, err := os.ReadFile(fileName)
if err != nil {
return nil, fileName, err
}
return fileContent, fileName, err
}
return nil, "", nil
}
func getClientCertPair(clientName ClientName) (*tls.Certificate, error) {
certFileName := getStringOptionFromSecurityConfiguration(clientName, "cert")
keyFileName := getStringOptionFromSecurityConfiguration(clientName, "key")
if certFileName == "" && keyFileName == "" {
return nil, nil
}
if certFileName != "" && keyFileName != "" {
clientCert, err := tls.LoadX509KeyPair(certFileName, keyFileName)
if err != nil {
return nil, fmt.Errorf("error loading client certificate and key: %s", err)
}
return &clientCert, nil
}
return nil, fmt.Errorf("error loading key pair: key `%s` and certificate `%s`", keyFileName, certFileName)
}
func getClientCaCert(clientName ClientName) ([]byte, string, error) {
return getFileContentFromSecurityConfiguration(clientName, "ca")
}
func createHTTPClientCertPool(certContent []byte, fileName string) (*x509.CertPool, error) {
certPool := x509.NewCertPool()
if len(certContent) == 0 {
return certPool, nil
}
ok := certPool.AppendCertsFromPEM(certContent)
if !ok {
return nil, fmt.Errorf("error processing certificate in %s", fileName)
}
return certPool, nil
}

View File

@ -0,0 +1,16 @@
package client
import (
"io"
"net/http"
"net/url"
)
type HTTPClientInterface interface {
Do(req *http.Request) (*http.Response, error)
Get(url string) (resp *http.Response, err error)
Post(url, contentType string, body io.Reader) (resp *http.Response, err error)
PostForm(url string, data url.Values) (resp *http.Response, err error)
Head(url string) (resp *http.Response, err error)
CloseIdleConnections()
}

View File

@ -0,0 +1,14 @@
package client
import "strings"
type ClientName int
//go:generate stringer -type=ClientName -output=http_client_name_string.go
const (
Client ClientName = iota
)
func (name *ClientName) LowerCaseString() string {
return strings.ToLower(name.String())
}

View File

@ -0,0 +1,23 @@
// Code generated by "stringer -type=ClientName -output=http_client_name_string.go"; DO NOT EDIT.
package client
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[Client-0]
}
const _ClientName_name = "Client"
var _ClientName_index = [...]uint8{0, 6}
func (i ClientName) String() string {
if i < 0 || i >= ClientName(len(_ClientName_index)-1) {
return "ClientName(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _ClientName_name[_ClientName_index[i]:_ClientName_index[i+1]]
}

View File

@ -0,0 +1,18 @@
package client
import (
"net"
"time"
)
type HttpClientOpt = func(clientCfg *HTTPClient)
func AddDialContext(httpClient *HTTPClient) {
dialContext := (&net.Dialer{
Timeout: 10 * time.Second,
KeepAlive: 10 * time.Second,
}).DialContext
httpClient.Transport.DialContext = dialContext
httpClient.Client.Transport = httpClient.Transport
}

View File

@ -0,0 +1,27 @@
package http
import (
"github.com/seaweedfs/seaweedfs/weed/glog"
util_http_client "github.com/seaweedfs/seaweedfs/weed/util/http/client"
)
var (
globalHttpClient *util_http_client.HTTPClient
)
func NewGlobalHttpClient(opt ...util_http_client.HttpClientOpt) (*util_http_client.HTTPClient, error) {
return util_http_client.NewHttpClient(util_http_client.Client, opt...)
}
func GetGlobalHttpClient() *util_http_client.HTTPClient {
return globalHttpClient
}
func InitGlobalHttpClient() {
var err error
globalHttpClient, err = NewGlobalHttpClient()
if err != nil {
glog.Fatalf("error init global http client: %v", err)
}
}

View File

@ -1,4 +1,4 @@
package util package http
import ( import (
"compress/gzip" "compress/gzip"
@ -6,6 +6,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"github.com/seaweedfs/seaweedfs/weed/util/mem" "github.com/seaweedfs/seaweedfs/weed/util/mem"
"github.com/seaweedfs/seaweedfs/weed/util"
"io" "io"
"net/http" "net/http"
"net/url" "net/url"
@ -15,23 +16,8 @@ import (
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
) )
var (
client *http.Client
Transport *http.Transport
)
func init() {
Transport = &http.Transport{
MaxIdleConns: 1024,
MaxIdleConnsPerHost: 1024,
}
client = &http.Client{
Transport: Transport,
}
}
func Post(url string, values url.Values) ([]byte, error) { func Post(url string, values url.Values) ([]byte, error) {
r, err := client.PostForm(url, values) r, err := GetGlobalHttpClient().PostForm(url, values)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -64,7 +50,7 @@ func GetAuthenticated(url, jwt string) ([]byte, bool, error) {
maybeAddAuth(request, jwt) maybeAddAuth(request, jwt)
request.Header.Add("Accept-Encoding", "gzip") request.Header.Add("Accept-Encoding", "gzip")
response, err := client.Do(request) response, err := GetGlobalHttpClient().Do(request)
if err != nil { if err != nil {
return nil, true, err return nil, true, err
} }
@ -94,7 +80,7 @@ func GetAuthenticated(url, jwt string) ([]byte, bool, error) {
} }
func Head(url string) (http.Header, error) { func Head(url string) (http.Header, error) {
r, err := client.Head(url) r, err := GetGlobalHttpClient().Head(url)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -117,7 +103,7 @@ func Delete(url string, jwt string) error {
if err != nil { if err != nil {
return err return err
} }
resp, e := client.Do(req) resp, e := GetGlobalHttpClient().Do(req)
if e != nil { if e != nil {
return e return e
} }
@ -145,7 +131,7 @@ func DeleteProxied(url string, jwt string) (body []byte, httpStatus int, err err
if err != nil { if err != nil {
return return
} }
resp, err := client.Do(req) resp, err := GetGlobalHttpClient().Do(req)
if err != nil { if err != nil {
return return
} }
@ -159,7 +145,7 @@ func DeleteProxied(url string, jwt string) (body []byte, httpStatus int, err err
} }
func GetBufferStream(url string, values url.Values, allocatedBytes []byte, eachBuffer func([]byte)) error { func GetBufferStream(url string, values url.Values, allocatedBytes []byte, eachBuffer func([]byte)) error {
r, err := client.PostForm(url, values) r, err := GetGlobalHttpClient().PostForm(url, values)
if err != nil { if err != nil {
return err return err
} }
@ -182,7 +168,7 @@ func GetBufferStream(url string, values url.Values, allocatedBytes []byte, eachB
} }
func GetUrlStream(url string, values url.Values, readFn func(io.Reader) error) error { func GetUrlStream(url string, values url.Values, readFn func(io.Reader) error) error {
r, err := client.PostForm(url, values) r, err := GetGlobalHttpClient().PostForm(url, values)
if err != nil { if err != nil {
return err return err
} }
@ -201,7 +187,7 @@ func DownloadFile(fileUrl string, jwt string) (filename string, header http.Head
maybeAddAuth(req, jwt) maybeAddAuth(req, jwt)
response, err := client.Do(req) response, err := GetGlobalHttpClient().Do(req)
if err != nil { if err != nil {
return "", nil, nil, err return "", nil, nil, err
} }
@ -219,14 +205,11 @@ func DownloadFile(fileUrl string, jwt string) (filename string, header http.Head
} }
func Do(req *http.Request) (resp *http.Response, err error) { func Do(req *http.Request) (resp *http.Response, err error) {
return client.Do(req) return GetGlobalHttpClient().Do(req)
} }
func NormalizeUrl(url string) string { func NormalizeUrl(url string) (string, error) {
if strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://") { return GetGlobalHttpClient().NormalizeHttpScheme(url)
return url
}
return "http://" + url
} }
func ReadUrl(fileUrl string, cipherKey []byte, isContentCompressed bool, isFullChunk bool, offset int64, size int, buf []byte) (int64, error) { func ReadUrl(fileUrl string, cipherKey []byte, isContentCompressed bool, isFullChunk bool, offset int64, size int, buf []byte) (int64, error) {
@ -249,7 +232,7 @@ func ReadUrl(fileUrl string, cipherKey []byte, isContentCompressed bool, isFullC
req.Header.Set("Accept-Encoding", "gzip") req.Header.Set("Accept-Encoding", "gzip")
} }
r, err := client.Do(req) r, err := GetGlobalHttpClient().Do(req)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -322,7 +305,7 @@ func ReadUrlAsStreamAuthenticated(fileUrl, jwt string, cipherKey []byte, isConte
req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1)) req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1))
} }
r, err := client.Do(req) r, err := GetGlobalHttpClient().Do(req)
if err != nil { if err != nil {
return true, err return true, err
} }
@ -368,12 +351,12 @@ func readEncryptedUrl(fileUrl, jwt string, cipherKey []byte, isContentCompressed
if err != nil { if err != nil {
return retryable, fmt.Errorf("fetch %s: %v", fileUrl, err) return retryable, fmt.Errorf("fetch %s: %v", fileUrl, err)
} }
decryptedData, err := Decrypt(encryptedData, CipherKey(cipherKey)) decryptedData, err := util.Decrypt(encryptedData, util.CipherKey(cipherKey))
if err != nil { if err != nil {
return false, fmt.Errorf("decrypt %s: %v", fileUrl, err) return false, fmt.Errorf("decrypt %s: %v", fileUrl, err)
} }
if isContentCompressed { if isContentCompressed {
decryptedData, err = DecompressData(decryptedData) decryptedData, err = util.DecompressData(decryptedData)
if err != nil { if err != nil {
glog.V(0).Infof("unzip decrypt %s: %v", fileUrl, err) glog.V(0).Infof("unzip decrypt %s: %v", fileUrl, err)
} }
@ -403,7 +386,7 @@ func ReadUrlAsReaderCloser(fileUrl string, jwt string, rangeHeader string) (*htt
maybeAddAuth(req, jwt) maybeAddAuth(req, jwt)
r, err := client.Do(req) r, err := GetGlobalHttpClient().Do(req)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -463,7 +446,7 @@ func RetriedFetchChunkData(buffer []byte, urlStrings []string, cipherKey []byte,
var shouldRetry bool var shouldRetry bool
for waitTime := time.Second; waitTime < RetryWaitTime; waitTime += waitTime / 2 { for waitTime := time.Second; waitTime < util.RetryWaitTime; waitTime += waitTime / 2 {
for _, urlString := range urlStrings { for _, urlString := range urlStrings {
n = 0 n = 0
if strings.Contains(urlString, "%") { if strings.Contains(urlString, "%") {
@ -494,4 +477,4 @@ func RetriedFetchChunkData(buffer []byte, urlStrings []string, cipherKey []byte,
return n, err return n, err
} }

View File

@ -20,6 +20,7 @@ import (
"github.com/getsentry/sentry-go" "github.com/getsentry/sentry-go"
"github.com/seaweedfs/seaweedfs/weed/command" "github.com/seaweedfs/seaweedfs/weed/command"
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
) )
var IsDebug *bool var IsDebug *bool
@ -86,6 +87,7 @@ func main() {
return return
} }
util_http.InitGlobalHttpClient()
for _, cmd := range commands { for _, cmd := range commands {
if cmd.Name() == args[0] && cmd.Run != nil { if cmd.Name() == args[0] && cmd.Run != nil {
cmd.Flag.Usage = func() { cmd.Usage() } cmd.Flag.Usage = func() { cmd.Usage() }