mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-06-07 09:25:23 +08:00
WIP
This commit is contained in:
parent
d8c574a5ef
commit
0d62be4484
2
go.mod
2
go.mod
@ -97,6 +97,7 @@ require (
|
||||
go.etcd.io/etcd/client/v3 v3.5.18
|
||||
go.mongodb.org/mongo-driver v1.17.3
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.uber.org/zap v1.27.0
|
||||
gocloud.dev v0.41.0
|
||||
gocloud.dev/pubsub/natspubsub v0.41.0
|
||||
gocloud.dev/pubsub/rabbitpubsub v0.41.0
|
||||
@ -356,7 +357,6 @@ require (
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.35.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 // indirect
|
||||
|
32
replace_glog.sh
Executable file
32
replace_glog.sh
Executable file
@ -0,0 +1,32 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Find all Go files containing glog calls
|
||||
files=$(grep -l "glog\." --include="*.go" -r .)
|
||||
|
||||
# Check if any files were found
|
||||
if [ -z "$files" ]; then
|
||||
echo "No files found containing glog calls"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Print the files that will be modified
|
||||
echo "The following files will be modified:"
|
||||
echo "$files"
|
||||
echo
|
||||
|
||||
# Ask for confirmation
|
||||
read -p "Do you want to proceed with the replacement? (y/n) " -n 1 -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo "Operation cancelled"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Make the replacements
|
||||
for file in $files; do
|
||||
echo "Processing $file"
|
||||
# Replace all glog function calls with log
|
||||
sed -i '' 's/glog\./log\./g' "$file"
|
||||
done
|
||||
|
||||
echo "Replacement complete!"
|
37
replace_glog_calls.sh
Executable file
37
replace_glog_calls.sh
Executable file
@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Find all Go files containing glog.V calls
|
||||
files=$(grep -l "glog.V" --include="*.go" -r .)
|
||||
|
||||
# Check if any files were found
|
||||
if [ -z "$files" ]; then
|
||||
echo "No files found containing glog.V calls"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Print the files that will be modified
|
||||
echo "The following files will be modified:"
|
||||
echo "$files"
|
||||
echo
|
||||
|
||||
# Ask for confirmation
|
||||
read -p "Do you want to proceed with the replacement? (y/n) " -n 1 -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo "Operation cancelled"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Make the replacements
|
||||
for file in $files; do
|
||||
echo "Processing $file"
|
||||
# Replace glog.V(n).Info with log.V(n).Info for n=0-4
|
||||
for level in {0..4}; do
|
||||
# Replace Info calls
|
||||
sed -i '' "s/glog.V($level).Info/log.V($level).Info/g" "$file"
|
||||
# Replace Infof calls
|
||||
sed -i '' "s/glog.V($level).Infof/log.V($level).Infof/g" "$file"
|
||||
done
|
||||
done
|
||||
|
||||
echo "Replacement complete!"
|
32
replace_imports.sh
Executable file
32
replace_imports.sh
Executable file
@ -0,0 +1,32 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Find all Go files containing the old import path
|
||||
files=$(grep -l "github.com/seaweedfs/seaweedfs/weed/glog" --include="*.go" -r .)
|
||||
|
||||
# Check if any files were found
|
||||
if [ -z "$files" ]; then
|
||||
echo "No files found containing the old import path"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Print the files that will be modified
|
||||
echo "The following files will be modified:"
|
||||
echo "$files"
|
||||
echo
|
||||
|
||||
# Ask for confirmation
|
||||
read -p "Do you want to proceed with the replacement? (y/n) " -n 1 -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo "Operation cancelled"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Make the replacements
|
||||
for file in $files; do
|
||||
echo "Processing $file"
|
||||
# Use sed to replace the import path
|
||||
sed -i '' 's|github.com/seaweedfs/seaweedfs/weed/glog|github.com/seaweedfs/seaweedfs/weed/util/log|g' "$file"
|
||||
done
|
||||
|
||||
echo "Replacement complete!"
|
54
swap_log_levels.sh
Executable file
54
swap_log_levels.sh
Executable file
@ -0,0 +1,54 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Find all Go files containing log.V calls
|
||||
files=$(grep -l "log.V" --include="*.go" -r .)
|
||||
|
||||
if [ -z "$files" ]; then
|
||||
echo "No files found containing log.V calls"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Create a temporary file for sed operations
|
||||
temp_file=$(mktemp)
|
||||
|
||||
# Process each file
|
||||
for file in $files; do
|
||||
echo "Processing $file"
|
||||
|
||||
# First, replace log.V(-1) with a temporary placeholder
|
||||
sed 's/log\.V(-1)/__TEMP_NEG_ONE__/g' "$file" > "$temp_file"
|
||||
mv "$temp_file" "$file"
|
||||
|
||||
# Replace log.V(4) with log.V(-1)
|
||||
sed 's/log\.V(4)/log.V(-1)/g' "$file" > "$temp_file"
|
||||
mv "$temp_file" "$file"
|
||||
|
||||
# Replace the temporary placeholder with log.V(4)
|
||||
sed 's/__TEMP_NEG_ONE__/log.V(4)/g' "$file" > "$temp_file"
|
||||
mv "$temp_file" "$file"
|
||||
|
||||
# Swap log.V(0) and log.V(3)
|
||||
sed 's/log\.V(0)/__TEMP_ZERO__/g' "$file" > "$temp_file"
|
||||
mv "$temp_file" "$file"
|
||||
sed 's/log\.V(3)/log.V(0)/g' "$file" > "$temp_file"
|
||||
mv "$temp_file" "$file"
|
||||
sed 's/__TEMP_ZERO__/log.V(3)/g' "$file" > "$temp_file"
|
||||
mv "$temp_file" "$file"
|
||||
|
||||
# Swap log.V(1) and log.V(2)
|
||||
sed 's/log\.V(1)/__TEMP_ONE__/g' "$file" > "$temp_file"
|
||||
mv "$temp_file" "$file"
|
||||
sed 's/log\.V(2)/log.V(1)/g' "$file" > "$temp_file"
|
||||
mv "$temp_file" "$file"
|
||||
sed 's/__TEMP_ONE__/log.V(2)/g' "$file" > "$temp_file"
|
||||
mv "$temp_file" "$file"
|
||||
|
||||
# Replace any other log.V(n) with log.V(-1)
|
||||
sed -E 's/log\.V\([5-9][0-9]*\)/log.V(-1)/g' "$file" > "$temp_file"
|
||||
mv "$temp_file" "$file"
|
||||
done
|
||||
|
||||
# Clean up
|
||||
rm -f "$temp_file"
|
||||
|
||||
echo "Log level swapping completed!"
|
@ -7,7 +7,7 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/backend"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/super_block"
|
||||
@ -49,7 +49,7 @@ func main() {
|
||||
}
|
||||
datFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".dat"), os.O_RDWR, 0644)
|
||||
if err != nil {
|
||||
glog.Fatalf("Open Volume Data File [ERROR]: %v", err)
|
||||
log.Fatalf("Open Volume Data File [ERROR]: %v", err)
|
||||
}
|
||||
datBackend := backend.NewDiskFile(datFile)
|
||||
defer datBackend.Close()
|
||||
@ -57,7 +57,7 @@ func main() {
|
||||
superBlock, err := super_block.ReadSuperBlock(datBackend)
|
||||
|
||||
if err != nil {
|
||||
glog.Fatalf("cannot parse existing super block: %v", err)
|
||||
log.Fatalf("cannot parse existing super block: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Current Volume Replication: %s\n", superBlock.ReplicaPlacement)
|
||||
@ -69,7 +69,7 @@ func main() {
|
||||
replica, err := super_block.NewReplicaPlacementFromString(*targetReplica)
|
||||
|
||||
if err != nil {
|
||||
glog.Fatalf("cannot parse target replica %s: %v", *targetReplica, err)
|
||||
log.Fatalf("cannot parse target replica %s: %v", *targetReplica, err)
|
||||
}
|
||||
|
||||
fmt.Printf("Changing replication to: %s\n", replica)
|
||||
@ -82,7 +82,7 @@ func main() {
|
||||
ttl, err := needle.ReadTTL(*targetTTL)
|
||||
|
||||
if err != nil {
|
||||
glog.Fatalf("cannot parse target ttl %s: %v", *targetTTL, err)
|
||||
log.Fatalf("cannot parse target ttl %s: %v", *targetTTL, err)
|
||||
}
|
||||
|
||||
fmt.Printf("Changing ttl to: %s\n", ttl)
|
||||
@ -96,7 +96,7 @@ func main() {
|
||||
header := superBlock.Bytes()
|
||||
|
||||
if n, e := datBackend.WriteAt(header, 0); n == 0 || e != nil {
|
||||
glog.Fatalf("cannot write super block: %v", e)
|
||||
log.Fatalf("cannot write super block: %v", e)
|
||||
}
|
||||
|
||||
fmt.Println("Change Applied.")
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
"math"
|
||||
"os"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/operation"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||
@ -49,7 +49,7 @@ func main() {
|
||||
vid := uint32(*volumeId)
|
||||
servers := pb.ServerAddresses(*serversStr).ToAddresses()
|
||||
if len(servers) < 2 {
|
||||
glog.Fatalf("You must specify more than 1 server\n")
|
||||
log.Fatalf("You must specify more than 1 server\n")
|
||||
}
|
||||
var referenceServer pb.ServerAddress
|
||||
var maxOffset int64
|
||||
@ -57,7 +57,7 @@ func main() {
|
||||
for _, addr := range servers {
|
||||
files, offset, err := getVolumeFiles(vid, addr)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to copy idx from volume server %s\n", err)
|
||||
log.Fatalf("Failed to copy idx from volume server %s\n", err)
|
||||
}
|
||||
allFiles[addr] = files
|
||||
if offset > maxOffset {
|
||||
@ -103,7 +103,7 @@ func main() {
|
||||
id, err = getNeedleFileId(vid, nid, addr)
|
||||
}
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to get needle info %d from volume server %s\n", nid, err)
|
||||
log.Fatalf("Failed to get needle info %d from volume server %s\n", nid, err)
|
||||
}
|
||||
fmt.Println(id, addr, diffMsg)
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/backend"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/super_block"
|
||||
@ -45,26 +45,26 @@ func main() {
|
||||
}
|
||||
indexFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".idx"), os.O_RDONLY, 0644)
|
||||
if err != nil {
|
||||
glog.Fatalf("Read Volume Index %v", err)
|
||||
log.Fatalf("Read Volume Index %v", err)
|
||||
}
|
||||
defer indexFile.Close()
|
||||
datFileName := path.Join(*fixVolumePath, fileName+".dat")
|
||||
datFile, err := os.OpenFile(datFileName, os.O_RDONLY, 0644)
|
||||
if err != nil {
|
||||
glog.Fatalf("Read Volume Data %v", err)
|
||||
log.Fatalf("Read Volume Data %v", err)
|
||||
}
|
||||
datBackend := backend.NewDiskFile(datFile)
|
||||
defer datBackend.Close()
|
||||
|
||||
newDatFile, err := os.Create(path.Join(*fixVolumePath, fileName+".dat_fixed"))
|
||||
if err != nil {
|
||||
glog.Fatalf("Write New Volume Data %v", err)
|
||||
log.Fatalf("Write New Volume Data %v", err)
|
||||
}
|
||||
defer newDatFile.Close()
|
||||
|
||||
superBlock, err := super_block.ReadSuperBlock(datBackend)
|
||||
if err != nil {
|
||||
glog.Fatalf("Read Volume Data superblock %v", err)
|
||||
log.Fatalf("Read Volume Data superblock %v", err)
|
||||
}
|
||||
newDatFile.Write(superBlock.Bytes())
|
||||
|
||||
|
@ -3,7 +3,7 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
@ -38,7 +38,7 @@ func main() {
|
||||
return nil
|
||||
}
|
||||
name := event.EventNotification.NewEntry.Name
|
||||
glog.V(0).Infof("=> %s ts:%+v", name, time.Unix(0, event.TsNs))
|
||||
log.V(3).Infof("=> %s ts:%+v", name, time.Unix(0, event.TsNs))
|
||||
id := name[4:]
|
||||
if x, err := strconv.Atoi(id); err == nil {
|
||||
if x != expected {
|
||||
@ -59,7 +59,7 @@ func startGenerateMetadata() {
|
||||
|
||||
for i := 0; i < *n; i++ {
|
||||
name := fmt.Sprintf("file%d", i)
|
||||
glog.V(0).Infof("write %s/%s", *dir, name)
|
||||
log.V(3).Infof("write %s/%s", *dir, name)
|
||||
if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{
|
||||
Directory: *dir,
|
||||
Entry: &filer_pb.Entry{
|
||||
|
@ -6,7 +6,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/backend"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
||||
@ -50,7 +50,7 @@ func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset in
|
||||
newFileName := filepath.Join(*volumePath, "dat_fixed")
|
||||
newDatFile, err := os.Create(newFileName)
|
||||
if err != nil {
|
||||
glog.Fatalf("Write New Volume Data %v", err)
|
||||
log.Fatalf("Write New Volume Data %v", err)
|
||||
}
|
||||
scanner.datBackend = backend.NewDiskFile(newDatFile)
|
||||
scanner.datBackend.WriteAt(scanner.block.Bytes(), 0)
|
||||
@ -59,7 +59,7 @@ func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset in
|
||||
checksum := Checksum(n)
|
||||
|
||||
if scanner.hashes[checksum] {
|
||||
glog.V(0).Infof("duplicate checksum:%s fid:%d,%s%x @ offset:%d", checksum, *volumeId, n.Id, n.Cookie, offset)
|
||||
log.V(3).Infof("duplicate checksum:%s fid:%d,%s%x @ offset:%d", checksum, *volumeId, n.Id, n.Cookie, offset)
|
||||
return nil
|
||||
}
|
||||
scanner.hashes[checksum] = true
|
||||
@ -85,13 +85,13 @@ func main() {
|
||||
|
||||
if _, err := os.Stat(scanner.dir); err != nil {
|
||||
if err := os.MkdirAll(scanner.dir, os.ModePerm); err != nil {
|
||||
glog.Fatalf("could not create output dir : %s", err)
|
||||
log.Fatalf("could not create output dir : %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner)
|
||||
if err != nil {
|
||||
glog.Fatalf("Reading Volume File [ERROR] %s\n", err)
|
||||
log.Fatalf("Reading Volume File [ERROR] %s\n", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -6,7 +6,7 @@ import (
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/super_block"
|
||||
@ -34,7 +34,7 @@ func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool {
|
||||
|
||||
func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
|
||||
t := time.Unix(int64(n.AppendAtNs)/int64(time.Second), int64(n.AppendAtNs)%int64(time.Second))
|
||||
glog.V(0).Infof("%d,%s%08x offset %d size %d(%s) cookie %08x appendedAt %v name %s",
|
||||
log.V(3).Infof("%d,%s%08x offset %d size %d(%s) cookie %08x appendedAt %v name %s",
|
||||
*volumeId, n.Id, n.Cookie, offset, n.Size, util.BytesToHumanReadable(uint64(n.Size)), n.Cookie, t, n.Name)
|
||||
return nil
|
||||
}
|
||||
@ -48,6 +48,6 @@ func main() {
|
||||
scanner := &VolumeFileScanner4SeeDat{}
|
||||
err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner)
|
||||
if err != nil {
|
||||
glog.Fatalf("Reading Volume File [ERROR] %s\n", err)
|
||||
log.Fatalf("Reading Volume File [ERROR] %s\n", err)
|
||||
}
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/idx"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/types"
|
||||
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
|
||||
@ -36,7 +36,7 @@ func main() {
|
||||
}
|
||||
indexFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".idx"), os.O_RDONLY, 0644)
|
||||
if err != nil {
|
||||
glog.Fatalf("Create Volume Index [ERROR] %s\n", err)
|
||||
log.Fatalf("Create Volume Index [ERROR] %s\n", err)
|
||||
}
|
||||
defer indexFile.Close()
|
||||
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/seaweedfs/seaweedfs/weed/cluster/lock_manager"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
@ -73,7 +73,7 @@ func (lc *LockClient) StartLongLivedLock(key string, owner string, onLockOwnerCh
|
||||
for {
|
||||
if isLocked {
|
||||
if err := lock.AttemptToLock(lock_manager.LiveLockTTL); err != nil {
|
||||
glog.V(0).Infof("Lost lock %s: %v", key, err)
|
||||
log.V(3).Infof("Lost lock %s: %v", key, err)
|
||||
isLocked = false
|
||||
}
|
||||
} else {
|
||||
@ -82,7 +82,7 @@ func (lc *LockClient) StartLongLivedLock(key string, owner string, onLockOwnerCh
|
||||
}
|
||||
}
|
||||
if lockOwner != lock.LockOwner() && lock.LockOwner() != "" {
|
||||
glog.V(0).Infof("Lock owner changed from %s to %s", lockOwner, lock.LockOwner())
|
||||
log.V(3).Infof("Lock owner changed from %s to %s", lockOwner, lock.LockOwner())
|
||||
onLockOwnerChange(lock.LockOwner())
|
||||
lockOwner = lock.LockOwner()
|
||||
}
|
||||
@ -102,7 +102,7 @@ func (lock *LiveLock) retryUntilLocked(lockDuration time.Duration) {
|
||||
return lock.AttemptToLock(lockDuration)
|
||||
}, func(err error) (shouldContinue bool) {
|
||||
if err != nil {
|
||||
glog.Warningf("create lock %s: %s", lock.key, err)
|
||||
log.Warningf("create lock %s: %s", lock.key, err)
|
||||
}
|
||||
return lock.renewToken == ""
|
||||
})
|
||||
|
@ -2,7 +2,7 @@ package lock_manager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"time"
|
||||
)
|
||||
@ -56,7 +56,7 @@ func (dlm *DistributedLockManager) FindLockOwner(key string) (owner string, move
|
||||
}
|
||||
if movedTo != dlm.Host {
|
||||
servers := dlm.LockRing.GetSnapshot()
|
||||
glog.V(0).Infof("lock %s not on current %s but on %s from %v", key, dlm.Host, movedTo, servers)
|
||||
log.V(3).Infof("lock %s not on current %s but on %s from %v", key, dlm.Host, movedTo, servers)
|
||||
return
|
||||
}
|
||||
owner, err = dlm.lockManager.GetLockOwner(key)
|
||||
|
@ -3,7 +3,7 @@ package lock_manager
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/google/uuid"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@ -38,19 +38,19 @@ func (lm *LockManager) Lock(path string, expiredAtNs int64, token string, owner
|
||||
lm.accessLock.Lock()
|
||||
defer lm.accessLock.Unlock()
|
||||
|
||||
glog.V(4).Infof("lock %s %v %v %v", path, time.Unix(0, expiredAtNs), token, owner)
|
||||
log.V(-1).Infof("lock %s %v %v %v", path, time.Unix(0, expiredAtNs), token, owner)
|
||||
|
||||
if oldValue, found := lm.locks[path]; found {
|
||||
if oldValue.ExpiredAtNs > 0 && oldValue.ExpiredAtNs < time.Now().UnixNano() {
|
||||
// lock is expired, set to a new lock
|
||||
if token != "" {
|
||||
glog.V(4).Infof("lock expired key %s non-empty token %v owner %v ts %s", path, token, owner, time.Unix(0, oldValue.ExpiredAtNs))
|
||||
log.V(-1).Infof("lock expired key %s non-empty token %v owner %v ts %s", path, token, owner, time.Unix(0, oldValue.ExpiredAtNs))
|
||||
err = LockErrorNonEmptyTokenOnExpiredLock
|
||||
return
|
||||
} else {
|
||||
// new lock
|
||||
renewToken = uuid.New().String()
|
||||
glog.V(4).Infof("key %s new token %v owner %v", path, renewToken, owner)
|
||||
log.V(-1).Infof("key %s new token %v owner %v", path, renewToken, owner)
|
||||
lm.locks[path] = &Lock{Token: renewToken, ExpiredAtNs: expiredAtNs, Owner: owner}
|
||||
return
|
||||
}
|
||||
@ -60,30 +60,30 @@ func (lm *LockManager) Lock(path string, expiredAtNs int64, token string, owner
|
||||
if oldValue.Token == token {
|
||||
// token matches, renew the lock
|
||||
renewToken = uuid.New().String()
|
||||
glog.V(4).Infof("key %s old token %v owner %v => %v owner %v", path, oldValue.Token, oldValue.Owner, renewToken, owner)
|
||||
log.V(-1).Infof("key %s old token %v owner %v => %v owner %v", path, oldValue.Token, oldValue.Owner, renewToken, owner)
|
||||
lm.locks[path] = &Lock{Token: renewToken, ExpiredAtNs: expiredAtNs, Owner: owner}
|
||||
return
|
||||
} else {
|
||||
if token == "" {
|
||||
// new lock
|
||||
glog.V(4).Infof("key %s locked by %v", path, oldValue.Owner)
|
||||
log.V(-1).Infof("key %s locked by %v", path, oldValue.Owner)
|
||||
err = fmt.Errorf("lock already owned by %v", oldValue.Owner)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("key %s expected token %v owner %v received %v from %v", path, oldValue.Token, oldValue.Owner, token, owner)
|
||||
log.V(-1).Infof("key %s expected token %v owner %v received %v from %v", path, oldValue.Token, oldValue.Owner, token, owner)
|
||||
err = fmt.Errorf("lock: token mismatch")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
glog.V(4).Infof("key %s no lock owner %v", path, owner)
|
||||
log.V(-1).Infof("key %s no lock owner %v", path, owner)
|
||||
if token == "" {
|
||||
// new lock
|
||||
glog.V(4).Infof("key %s new token %v owner %v", path, token, owner)
|
||||
log.V(-1).Infof("key %s new token %v owner %v", path, token, owner)
|
||||
renewToken = uuid.New().String()
|
||||
lm.locks[path] = &Lock{Token: renewToken, ExpiredAtNs: expiredAtNs, Owner: owner}
|
||||
return
|
||||
} else {
|
||||
glog.V(4).Infof("key %s non-empty token %v owner %v", path, token, owner)
|
||||
log.V(-1).Infof("key %s non-empty token %v owner %v", path, token, owner)
|
||||
err = LockErrorNonEmptyTokenOnNewLock
|
||||
return
|
||||
}
|
||||
@ -99,13 +99,13 @@ func (lm *LockManager) Unlock(path string, token string) (isUnlocked bool, err e
|
||||
if oldValue.ExpiredAtNs > 0 && oldValue.ExpiredAtNs < now.UnixNano() {
|
||||
// lock is expired, delete it
|
||||
isUnlocked = true
|
||||
glog.V(4).Infof("key %s expired at %v", path, time.Unix(0, oldValue.ExpiredAtNs))
|
||||
log.V(-1).Infof("key %s expired at %v", path, time.Unix(0, oldValue.ExpiredAtNs))
|
||||
delete(lm.locks, path)
|
||||
return
|
||||
}
|
||||
if oldValue.Token == token {
|
||||
isUnlocked = true
|
||||
glog.V(4).Infof("key %s unlocked with %v", path, token)
|
||||
log.V(-1).Infof("key %s unlocked with %v", path, token)
|
||||
delete(lm.locks, path)
|
||||
return
|
||||
} else {
|
||||
@ -130,7 +130,7 @@ func (lm *LockManager) CleanUp() {
|
||||
continue
|
||||
}
|
||||
if now > value.ExpiredAtNs {
|
||||
glog.V(4).Infof("key %s expired at %v", key, time.Unix(0, value.ExpiredAtNs))
|
||||
log.V(-1).Infof("key %s expired at %v", key, time.Unix(0, value.ExpiredAtNs))
|
||||
delete(lm.locks, key)
|
||||
}
|
||||
}
|
||||
@ -148,12 +148,12 @@ func (lm *LockManager) SelectLocks(selectFn func(key string) bool) (locks []*Loc
|
||||
|
||||
for key, lock := range lm.locks {
|
||||
if now > lock.ExpiredAtNs {
|
||||
glog.V(4).Infof("key %s expired at %v", key, time.Unix(0, lock.ExpiredAtNs))
|
||||
log.V(-1).Infof("key %s expired at %v", key, time.Unix(0, lock.ExpiredAtNs))
|
||||
delete(lm.locks, key)
|
||||
continue
|
||||
}
|
||||
if selectFn(key) {
|
||||
glog.V(4).Infof("key %s selected and deleted", key)
|
||||
log.V(-1).Infof("key %s selected and deleted", key)
|
||||
delete(lm.locks, key)
|
||||
lock.Key = key
|
||||
locks = append(locks, lock)
|
||||
|
@ -1,7 +1,7 @@
|
||||
package lock_manager
|
||||
|
||||
import (
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
"sort"
|
||||
@ -40,11 +40,11 @@ func (r *LockRing) SetTakeSnapshotCallback(onTakeSnapshot func(snapshot []pb.Ser
|
||||
// AddServer adds a server to the ring
|
||||
// if the previous snapshot passed the snapshot interval, create a new snapshot
|
||||
func (r *LockRing) AddServer(server pb.ServerAddress) {
|
||||
glog.V(0).Infof("add server %v", server)
|
||||
log.V(3).Infof("add server %v", server)
|
||||
r.Lock()
|
||||
|
||||
if _, found := r.candidateServers[server]; found {
|
||||
glog.V(0).Infof("add server: already exists %v", server)
|
||||
log.V(3).Infof("add server: already exists %v", server)
|
||||
r.Unlock()
|
||||
return
|
||||
}
|
||||
@ -56,7 +56,7 @@ func (r *LockRing) AddServer(server pb.ServerAddress) {
|
||||
}
|
||||
|
||||
func (r *LockRing) RemoveServer(server pb.ServerAddress) {
|
||||
glog.V(0).Infof("remove server %v", server)
|
||||
log.V(3).Infof("remove server %v", server)
|
||||
|
||||
r.Lock()
|
||||
|
||||
|
@ -3,7 +3,7 @@ package cluster
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
|
||||
"google.golang.org/grpc"
|
||||
@ -17,7 +17,7 @@ func ListExistingPeerUpdates(master pb.ServerAddress, grpcDialOption grpc.DialOp
|
||||
FilerGroup: filerGroup,
|
||||
})
|
||||
|
||||
glog.V(0).Infof("the cluster has %d %s\n", len(resp.ClusterNodes), clientType)
|
||||
log.V(3).Infof("the cluster has %d %s\n", len(resp.ClusterNodes), clientType)
|
||||
for _, node := range resp.ClusterNodes {
|
||||
existingNodes = append(existingNodes, &master_pb.ClusterNodeUpdate{
|
||||
NodeType: FilerType,
|
||||
@ -28,7 +28,7 @@ func ListExistingPeerUpdates(master pb.ServerAddress, grpcDialOption grpc.DialOp
|
||||
}
|
||||
return err
|
||||
}); grpcErr != nil {
|
||||
glog.V(0).Infof("connect to %s: %v", master, grpcErr)
|
||||
log.V(3).Infof("connect to %s: %v", master, grpcErr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -17,7 +17,7 @@ import (
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/operation"
|
||||
"github.com/seaweedfs/seaweedfs/weed/security"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
@ -123,7 +123,7 @@ func runBenchmark(cmd *Command, args []string) bool {
|
||||
if *b.cpuprofile != "" {
|
||||
f, err := os.Create(*b.cpuprofile)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
pprof.StartCPUProfile(f)
|
||||
defer pprof.StopCPUProfile()
|
||||
@ -316,7 +316,7 @@ func readFiles(fileIdLineChan chan string, s *stat) {
|
||||
func writeFileIds(fileName string, fileIdLineChan chan string, finishChan chan bool) {
|
||||
file, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
glog.Fatalf("File to create file %s: %s\n", fileName, err)
|
||||
log.Fatalf("File to create file %s: %s\n", fileName, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
@ -335,7 +335,7 @@ func writeFileIds(fileName string, fileIdLineChan chan string, finishChan chan b
|
||||
func readFileIds(fileName string, fileIdLineChan chan string) {
|
||||
file, err := os.Open(fileName) // For read access.
|
||||
if err != nil {
|
||||
glog.Fatalf("File to read file %s: %s\n", fileName, err)
|
||||
log.Fatalf("File to read file %s: %s\n", fileName, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
@ -43,15 +43,15 @@ func runCompact(cmd *Command, args []string) bool {
|
||||
vid := needle.VolumeId(*compactVolumeId)
|
||||
v, err := storage.NewVolume(util.ResolvePath(*compactVolumePath), util.ResolvePath(*compactVolumePath), *compactVolumeCollection, vid, storage.NeedleMapInMemory, nil, nil, preallocate, 0, 0)
|
||||
if err != nil {
|
||||
glog.Fatalf("Load Volume [ERROR] %s\n", err)
|
||||
log.Fatalf("Load Volume [ERROR] %s\n", err)
|
||||
}
|
||||
if *compactMethod == 0 {
|
||||
if err = v.Compact(preallocate, 0); err != nil {
|
||||
glog.Fatalf("Compact Volume [ERROR] %s\n", err)
|
||||
log.Fatalf("Compact Volume [ERROR] %s\n", err)
|
||||
}
|
||||
} else {
|
||||
if err = v.Compact2(preallocate, 0, nil); err != nil {
|
||||
glog.Fatalf("Compact Volume [ERROR] %s\n", err)
|
||||
log.Fatalf("Compact Volume [ERROR] %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/needle_map"
|
||||
@ -111,11 +111,11 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset in
|
||||
vid := scanner.vid
|
||||
|
||||
nv, ok := needleMap.Get(n.Id)
|
||||
glog.V(3).Infof("key %d offset %d size %d disk_size %d compressed %v ok %v nv %+v",
|
||||
log.V(0).Infof("key %d offset %d size %d disk_size %d compressed %v ok %v nv %+v",
|
||||
n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed(), ok, nv)
|
||||
if *showDeleted && n.Size > 0 || ok && nv.Size.IsValid() && nv.Offset.ToActualOffset() == offset {
|
||||
if newerThanUnix >= 0 && n.HasLastModifiedDate() && n.LastModified < uint64(newerThanUnix) {
|
||||
glog.V(3).Infof("Skipping this file, as it's old enough: LastModified %d vs %d",
|
||||
log.V(0).Infof("Skipping this file, as it's old enough: LastModified %d vs %d",
|
||||
n.LastModified, newerThanUnix)
|
||||
return nil
|
||||
}
|
||||
@ -139,9 +139,9 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset in
|
||||
printNeedle(vid, n, scanner.version, true, offset, n.DiskSize(scanner.version))
|
||||
}
|
||||
}
|
||||
glog.V(2).Infof("This seems deleted %d size %d", n.Id, n.Size)
|
||||
log.V(1).Infof("This seems deleted %d size %d", n.Id, n.Size)
|
||||
} else {
|
||||
glog.V(2).Infof("Skipping later-updated Id %d size %d", n.Id, n.Size)
|
||||
log.V(1).Infof("Skipping later-updated Id %d size %d", n.Id, n.Size)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -178,7 +178,7 @@ func runExport(cmd *Command, args []string) bool {
|
||||
outputFile = os.Stdout
|
||||
} else {
|
||||
if outputFile, err = os.Create(*output); err != nil {
|
||||
glog.Fatalf("cannot open output tar %s: %s", *output, err)
|
||||
log.Fatalf("cannot open output tar %s: %s", *output, err)
|
||||
}
|
||||
}
|
||||
defer outputFile.Close()
|
||||
@ -201,7 +201,7 @@ func runExport(cmd *Command, args []string) bool {
|
||||
defer needleMap.Close()
|
||||
|
||||
if err := needleMap.LoadFromIdx(path.Join(util.ResolvePath(*export.dir), fileName+".idx")); err != nil {
|
||||
glog.Fatalf("cannot load needle map from %s.idx: %s", fileName, err)
|
||||
log.Fatalf("cannot load needle map from %s.idx: %s", fileName, err)
|
||||
}
|
||||
|
||||
volumeFileScanner := &VolumeFileScanner4Export{
|
||||
@ -215,7 +215,7 @@ func runExport(cmd *Command, args []string) bool {
|
||||
|
||||
err = storage.ScanVolumeFile(util.ResolvePath(*export.dir), *export.collection, vid, storage.NeedleMapInMemory, volumeFileScanner)
|
||||
if err != nil && err != io.EOF {
|
||||
glog.Errorf("Export Volume File [ERROR] %s\n", err)
|
||||
log.Errorf("Export Volume File [ERROR] %s\n", err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/security"
|
||||
@ -324,44 +324,44 @@ func (fo *FilerOptions) startFiler() {
|
||||
AllowedOrigins: strings.Split(*fo.allowedOrigins, ","),
|
||||
})
|
||||
if nfs_err != nil {
|
||||
glog.Fatalf("Filer startup error: %v", nfs_err)
|
||||
log.Fatalf("Filer startup error: %v", nfs_err)
|
||||
}
|
||||
|
||||
if *fo.publicPort != 0 {
|
||||
publicListeningAddress := util.JoinHostPort(*fo.bindIp, *fo.publicPort)
|
||||
glog.V(0).Infoln("Start Seaweed filer server", util.Version(), "public at", publicListeningAddress)
|
||||
log.V(3).Infoln("Start Seaweed filer server", util.Version(), "public at", publicListeningAddress)
|
||||
publicListener, localPublicListener, e := util.NewIpAndLocalListeners(*fo.bindIp, *fo.publicPort, 0)
|
||||
if e != nil {
|
||||
glog.Fatalf("Filer server public listener error on port %d:%v", *fo.publicPort, e)
|
||||
log.Fatalf("Filer server public listener error on port %d:%v", *fo.publicPort, e)
|
||||
}
|
||||
go func() {
|
||||
if e := http.Serve(publicListener, publicVolumeMux); e != nil {
|
||||
glog.Fatalf("Volume server fail to serve public: %v", e)
|
||||
log.Fatalf("Volume server fail to serve public: %v", e)
|
||||
}
|
||||
}()
|
||||
if localPublicListener != nil {
|
||||
go func() {
|
||||
if e := http.Serve(localPublicListener, publicVolumeMux); e != nil {
|
||||
glog.Errorf("Volume server fail to serve public: %v", e)
|
||||
log.Errorf("Volume server fail to serve public: %v", e)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.Version(), *fo.ip, *fo.port)
|
||||
log.V(3).Infof("Start Seaweed Filer %s at %s:%d", util.Version(), *fo.ip, *fo.port)
|
||||
filerListener, filerLocalListener, e := util.NewIpAndLocalListeners(
|
||||
*fo.bindIp, *fo.port,
|
||||
time.Duration(10)*time.Second,
|
||||
)
|
||||
if e != nil {
|
||||
glog.Fatalf("Filer listener error: %v", e)
|
||||
log.Fatalf("Filer listener error: %v", e)
|
||||
}
|
||||
|
||||
// starting grpc server
|
||||
grpcPort := *fo.portGrpc
|
||||
grpcL, grpcLocalL, err := util.NewIpAndLocalListeners(*fo.bindIp, grpcPort, 0)
|
||||
if err != nil {
|
||||
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
|
||||
log.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
|
||||
}
|
||||
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.filer"))
|
||||
filer_pb.RegisterSeaweedFilerServer(grpcS, fs)
|
||||
@ -378,13 +378,13 @@ func (fo *FilerOptions) startFiler() {
|
||||
localSocket = fmt.Sprintf("/tmp/seaweedfs-filer-%d.sock", *fo.port)
|
||||
}
|
||||
if err := os.Remove(localSocket); err != nil && !os.IsNotExist(err) {
|
||||
glog.Fatalf("Failed to remove %s, error: %s", localSocket, err.Error())
|
||||
log.Fatalf("Failed to remove %s, error: %s", localSocket, err.Error())
|
||||
}
|
||||
go func() {
|
||||
// start on local unix socket
|
||||
filerSocketListener, err := net.Listen("unix", localSocket)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to listen on %s: %v", localSocket, err)
|
||||
log.Fatalf("Failed to listen on %s: %v", localSocket, err)
|
||||
}
|
||||
httpS.Serve(filerSocketListener)
|
||||
}()
|
||||
@ -402,14 +402,14 @@ func (fo *FilerOptions) startFiler() {
|
||||
RefreshDuration: security.CredRefreshingInterval,
|
||||
}
|
||||
if fo.certProvider, err = pemfile.NewProvider(pemfileOptions); err != nil {
|
||||
glog.Fatalf("pemfile.NewProvider(%v) failed: %v", pemfileOptions, err)
|
||||
log.Fatalf("pemfile.NewProvider(%v) failed: %v", pemfileOptions, err)
|
||||
}
|
||||
|
||||
caCertPool := x509.NewCertPool()
|
||||
if caCertFile != "" {
|
||||
caCertFile, err := os.ReadFile(caCertFile)
|
||||
if err != nil {
|
||||
glog.Fatalf("error reading CA certificate: %v", err)
|
||||
log.Fatalf("error reading CA certificate: %v", err)
|
||||
}
|
||||
caCertPool.AppendCertsFromPEM(caCertFile)
|
||||
}
|
||||
@ -428,23 +428,23 @@ func (fo *FilerOptions) startFiler() {
|
||||
if filerLocalListener != nil {
|
||||
go func() {
|
||||
if err := httpS.ServeTLS(filerLocalListener, "", ""); err != nil {
|
||||
glog.Errorf("Filer Fail to serve: %v", e)
|
||||
log.Errorf("Filer Fail to serve: %v", e)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if err := httpS.ServeTLS(filerListener, "", ""); err != nil {
|
||||
glog.Fatalf("Filer Fail to serve: %v", e)
|
||||
log.Fatalf("Filer Fail to serve: %v", e)
|
||||
}
|
||||
} else {
|
||||
if filerLocalListener != nil {
|
||||
go func() {
|
||||
if err := httpS.Serve(filerLocalListener); err != nil {
|
||||
glog.Errorf("Filer Fail to serve: %v", e)
|
||||
log.Errorf("Filer Fail to serve: %v", e)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if err := httpS.Serve(filerListener); err != nil {
|
||||
glog.Fatalf("Filer Fail to serve: %v", e)
|
||||
log.Fatalf("Filer Fail to serve: %v", e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ package command
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/replication/source"
|
||||
@ -78,7 +78,7 @@ func runFilerBackup(cmd *Command, args []string) bool {
|
||||
clientEpoch++
|
||||
err := doFilerBackup(grpcDialOption, &filerBackupOptions, clientId, clientEpoch)
|
||||
if err != nil {
|
||||
glog.Errorf("backup from %s: %v", *filerBackupOptions.filer, err)
|
||||
log.Errorf("backup from %s: %v", *filerBackupOptions.filer, err)
|
||||
time.Sleep(1747 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
@ -118,14 +118,14 @@ func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOpti
|
||||
if timeAgo.Milliseconds() == 0 {
|
||||
lastOffsetTsNs, err := getOffset(grpcDialOption, sourceFiler, BackupKeyPrefix, int32(sinkId))
|
||||
if err != nil {
|
||||
glog.V(0).Infof("starting from %v", startFrom)
|
||||
log.V(3).Infof("starting from %v", startFrom)
|
||||
} else {
|
||||
startFrom = time.Unix(0, lastOffsetTsNs)
|
||||
glog.V(0).Infof("resuming from %v", startFrom)
|
||||
log.V(3).Infof("resuming from %v", startFrom)
|
||||
}
|
||||
} else {
|
||||
startFrom = time.Now().Add(-timeAgo)
|
||||
glog.V(0).Infof("start time is set to %v", startFrom)
|
||||
log.V(3).Infof("start time is set to %v", startFrom)
|
||||
}
|
||||
|
||||
// create filer sink
|
||||
@ -146,7 +146,7 @@ func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOpti
|
||||
return nil
|
||||
}
|
||||
if errors.Is(err, http.ErrNotFound) {
|
||||
glog.V(0).Infof("got 404 error, ignore it: %s", err.Error())
|
||||
log.V(3).Infof("got 404 error, ignore it: %s", err.Error())
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
@ -156,7 +156,7 @@ func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOpti
|
||||
}
|
||||
|
||||
processEventFnWithOffset := pb.AddOffsetFunc(processEventFn, 3*time.Second, func(counter int64, lastTsNs int64) error {
|
||||
glog.V(0).Infof("backup %s progressed to %v %0.2f/sec", sourceFiler, time.Unix(0, lastTsNs), float64(counter)/float64(3))
|
||||
log.V(3).Infof("backup %s progressed to %v %0.2f/sec", sourceFiler, time.Unix(0, lastTsNs), float64(counter)/float64(3))
|
||||
return setOffset(grpcDialOption, sourceFiler, BackupKeyPrefix, int32(sinkId), lastTsNs)
|
||||
})
|
||||
|
||||
@ -167,7 +167,7 @@ func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOpti
|
||||
time.Sleep(time.Hour * 24)
|
||||
key := util.Join(targetPath, now.Add(-1*time.Hour*24*time.Duration(*filerBackupOptions.retentionDays)).Format("2006-01-02"))
|
||||
_ = dataSink.DeleteEntry(util.Join(targetPath, key), true, true, nil)
|
||||
glog.V(0).Infof("incremental backup delete directory:%s", key)
|
||||
log.V(3).Infof("incremental backup delete directory:%s", key)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/spf13/viper"
|
||||
"google.golang.org/grpc"
|
||||
"reflect"
|
||||
@ -64,13 +64,13 @@ func runFilerMetaBackup(cmd *Command, args []string) bool {
|
||||
v.SetConfigFile(*metaBackup.backupFilerConfig)
|
||||
|
||||
if err := v.ReadInConfig(); err != nil { // Handle errors reading the config file
|
||||
glog.Fatalf("Failed to load %s file: %v\nPlease use this command to generate the a %s.toml file\n"+
|
||||
log.Fatalf("Failed to load %s file: %v\nPlease use this command to generate the a %s.toml file\n"+
|
||||
" weed scaffold -config=%s -output=.\n\n\n",
|
||||
*metaBackup.backupFilerConfig, err, "backup_filer", "filer")
|
||||
}
|
||||
|
||||
if err := metaBackup.initStore(v); err != nil {
|
||||
glog.V(0).Infof("init backup filer store: %v", err)
|
||||
log.V(3).Infof("init backup filer store: %v", err)
|
||||
return true
|
||||
}
|
||||
|
||||
@ -81,13 +81,13 @@ func runFilerMetaBackup(cmd *Command, args []string) bool {
|
||||
}
|
||||
|
||||
if *metaBackup.restart || missingPreviousBackup {
|
||||
glog.V(0).Infof("traversing metadata tree...")
|
||||
log.V(3).Infof("traversing metadata tree...")
|
||||
startTime := time.Now()
|
||||
if err := metaBackup.traverseMetadata(); err != nil {
|
||||
glog.Errorf("traverse meta data: %v", err)
|
||||
log.Errorf("traverse meta data: %v", err)
|
||||
return true
|
||||
}
|
||||
glog.V(0).Infof("metadata copied up to %v", startTime)
|
||||
log.V(3).Infof("metadata copied up to %v", startTime)
|
||||
if err := metaBackup.setOffset(startTime); err != nil {
|
||||
startTime = time.Now()
|
||||
}
|
||||
@ -96,7 +96,7 @@ func runFilerMetaBackup(cmd *Command, args []string) bool {
|
||||
for {
|
||||
err := metaBackup.streamMetadataBackup()
|
||||
if err != nil {
|
||||
glog.Errorf("filer meta backup from %s: %v", *metaBackup.filerAddress, err)
|
||||
log.Errorf("filer meta backup from %s: %v", *metaBackup.filerAddress, err)
|
||||
time.Sleep(1747 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
@ -111,9 +111,9 @@ func (metaBackup *FilerMetaBackupOptions) initStore(v *viper.Viper) error {
|
||||
if v.GetBool(store.GetName() + ".enabled") {
|
||||
store = reflect.New(reflect.ValueOf(store).Elem().Type()).Interface().(filer.FilerStore)
|
||||
if err := store.Initialize(v, store.GetName()+"."); err != nil {
|
||||
glog.Fatalf("failed to initialize store for %s: %+v", store.GetName(), err)
|
||||
log.Fatalf("failed to initialize store for %s: %+v", store.GetName(), err)
|
||||
}
|
||||
glog.V(0).Infof("configured filer store to %s", store.GetName())
|
||||
log.V(3).Infof("configured filer store to %s", store.GetName())
|
||||
hasDefaultStoreConfigured = true
|
||||
metaBackup.store = filer.NewFilerStoreWrapper(store)
|
||||
break
|
||||
@ -155,7 +155,7 @@ func (metaBackup *FilerMetaBackupOptions) streamMetadataBackup() error {
|
||||
if err != nil {
|
||||
startTime = time.Now()
|
||||
}
|
||||
glog.V(0).Infof("streaming from %v", startTime)
|
||||
log.V(3).Infof("streaming from %v", startTime)
|
||||
|
||||
store := metaBackup.store
|
||||
|
||||
@ -192,7 +192,7 @@ func (metaBackup *FilerMetaBackupOptions) streamMetadataBackup() error {
|
||||
|
||||
processEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3*time.Second, func(counter int64, lastTsNs int64) error {
|
||||
lastTime := time.Unix(0, lastTsNs)
|
||||
glog.V(0).Infof("meta backup %s progressed to %v %0.2f/sec", *metaBackup.filerAddress, lastTime, float64(counter)/float64(3))
|
||||
log.V(3).Infof("meta backup %s progressed to %v %0.2f/sec", *metaBackup.filerAddress, lastTime, float64(counter)/float64(3))
|
||||
return metaBackup.setOffset(lastTime)
|
||||
})
|
||||
|
||||
|
@ -3,7 +3,7 @@ package command
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/remote_pb"
|
||||
@ -115,7 +115,7 @@ func runFilerRemoteGateway(cmd *Command, args []string) bool {
|
||||
return remoteGatewayOptions.followBucketUpdatesAndUploadToRemote(filerSource)
|
||||
}, func(err error) bool {
|
||||
if err != nil {
|
||||
glog.Errorf("synchronize %s: %v", remoteGatewayOptions.bucketsDir, err)
|
||||
log.Errorf("synchronize %s: %v", remoteGatewayOptions.bucketsDir, err)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
@ -3,7 +3,7 @@ package command
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/remote_pb"
|
||||
@ -43,7 +43,7 @@ func (option *RemoteGatewayOptions) followBucketUpdatesAndUploadToRemote(filerSo
|
||||
return nil
|
||||
}
|
||||
now := time.Now().UnixNano()
|
||||
glog.V(0).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, time.Unix(0, offsetTsNs), float64(counter)/(float64(now-lastLogTsNs)/1e9))
|
||||
log.V(3).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, time.Unix(0, offsetTsNs), float64(counter)/(float64(now-lastLogTsNs)/1e9))
|
||||
lastLogTsNs = now
|
||||
return remote_storage.SetSyncOffset(option.grpcDialOption, pb.ServerAddress(*option.filerAddress), option.bucketsDir, offsetTsNs)
|
||||
})
|
||||
@ -78,12 +78,12 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour
|
||||
}
|
||||
if option.mappings.PrimaryBucketStorageName != "" && *option.createBucketAt == "" {
|
||||
*option.createBucketAt = option.mappings.PrimaryBucketStorageName
|
||||
glog.V(0).Infof("%s is set as the primary remote storage", *option.createBucketAt)
|
||||
log.V(3).Infof("%s is set as the primary remote storage", *option.createBucketAt)
|
||||
}
|
||||
if len(option.mappings.Mappings) == 1 && *option.createBucketAt == "" {
|
||||
for k := range option.mappings.Mappings {
|
||||
*option.createBucketAt = k
|
||||
glog.V(0).Infof("%s is set as the only remote storage", *option.createBucketAt)
|
||||
log.V(3).Infof("%s is set as the only remote storage", *option.createBucketAt)
|
||||
}
|
||||
}
|
||||
if *option.createBucketAt == "" {
|
||||
@ -132,7 +132,7 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour
|
||||
bucketName = remoteLocation.Bucket
|
||||
}
|
||||
|
||||
glog.V(0).Infof("create bucket %s", bucketName)
|
||||
log.V(3).Infof("create bucket %s", bucketName)
|
||||
if err := client.CreateBucket(bucketName); err != nil {
|
||||
return fmt.Errorf("create bucket %s in %s: %v", bucketName, remoteConf.Name, err)
|
||||
}
|
||||
@ -150,7 +150,7 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour
|
||||
return fmt.Errorf("findRemoteStorageClient %s: %v", entry.Name, err)
|
||||
}
|
||||
|
||||
glog.V(0).Infof("delete remote bucket %s", remoteStorageMountLocation.Bucket)
|
||||
log.V(3).Infof("delete remote bucket %s", remoteStorageMountLocation.Bucket)
|
||||
if err := client.DeleteBucket(remoteStorageMountLocation.Bucket); err != nil {
|
||||
return fmt.Errorf("delete remote bucket %s: %v", remoteStorageMountLocation.Bucket, err)
|
||||
}
|
||||
@ -219,17 +219,17 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("create: %+v", resp)
|
||||
log.V(1).Infof("create: %+v", resp)
|
||||
if !shouldSendToRemote(message.NewEntry) {
|
||||
glog.V(2).Infof("skipping creating: %+v", resp)
|
||||
log.V(1).Infof("skipping creating: %+v", resp)
|
||||
return nil
|
||||
}
|
||||
dest := toRemoteStorageLocation(bucket, util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)
|
||||
if message.NewEntry.IsDirectory {
|
||||
glog.V(0).Infof("mkdir %s", remote_storage.FormatLocation(dest))
|
||||
log.V(3).Infof("mkdir %s", remote_storage.FormatLocation(dest))
|
||||
return client.WriteDirectory(dest, message.NewEntry)
|
||||
}
|
||||
glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
|
||||
log.V(3).Infof("create %s", remote_storage.FormatLocation(dest))
|
||||
remoteEntry, writeErr := retriedWriteFile(client, filerSource, message.NewEntry, dest)
|
||||
if writeErr != nil {
|
||||
return writeErr
|
||||
@ -248,13 +248,13 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("delete: %+v", resp)
|
||||
log.V(1).Infof("delete: %+v", resp)
|
||||
dest := toRemoteStorageLocation(bucket, util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)
|
||||
if message.OldEntry.IsDirectory {
|
||||
glog.V(0).Infof("rmdir %s", remote_storage.FormatLocation(dest))
|
||||
log.V(3).Infof("rmdir %s", remote_storage.FormatLocation(dest))
|
||||
return client.RemoveDirectory(dest)
|
||||
}
|
||||
glog.V(0).Infof("delete %s", remote_storage.FormatLocation(dest))
|
||||
log.V(3).Infof("delete %s", remote_storage.FormatLocation(dest))
|
||||
return client.DeleteFile(dest)
|
||||
}
|
||||
if message.OldEntry != nil && message.NewEntry != nil {
|
||||
@ -278,7 +278,7 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour
|
||||
newBucket, newRemoteStorageMountLocation, newRemoteStorage, newOk := option.detectBucketInfo(message.NewParentPath)
|
||||
if oldOk && newOk {
|
||||
if !shouldSendToRemote(message.NewEntry) {
|
||||
glog.V(2).Infof("skipping updating: %+v", resp)
|
||||
log.V(1).Infof("skipping updating: %+v", resp)
|
||||
return nil
|
||||
}
|
||||
client, err := remote_storage.GetRemoteStorage(oldRemoteStorage)
|
||||
@ -292,7 +292,7 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour
|
||||
return nil
|
||||
}
|
||||
if message.OldEntry.RemoteEntry != nil && filer.IsSameData(message.OldEntry, message.NewEntry) {
|
||||
glog.V(2).Infof("update meta: %+v", resp)
|
||||
log.V(1).Infof("update meta: %+v", resp)
|
||||
oldDest := toRemoteStorageLocation(oldBucket, util.NewFullPath(resp.Directory, message.OldEntry.Name), oldRemoteStorageMountLocation)
|
||||
return client.UpdateFileMetadata(oldDest, message.OldEntry, message.NewEntry)
|
||||
} else {
|
||||
@ -316,14 +316,14 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour
|
||||
if message.OldEntry.IsDirectory {
|
||||
return client.RemoveDirectory(oldDest)
|
||||
}
|
||||
glog.V(0).Infof("delete %s", remote_storage.FormatLocation(oldDest))
|
||||
log.V(3).Infof("delete %s", remote_storage.FormatLocation(oldDest))
|
||||
if err := client.DeleteFile(oldDest); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if newOk {
|
||||
if !shouldSendToRemote(message.NewEntry) {
|
||||
glog.V(2).Infof("skipping updating: %+v", resp)
|
||||
log.V(1).Infof("skipping updating: %+v", resp)
|
||||
return nil
|
||||
}
|
||||
client, err := remote_storage.GetRemoteStorage(newRemoteStorage)
|
||||
@ -375,13 +375,13 @@ func (option *RemoteGatewayOptions) detectBucketInfo(actualDir string) (bucket u
|
||||
var isMounted bool
|
||||
remoteStorageMountLocation, isMounted = option.mappings.Mappings[string(bucket)]
|
||||
if !isMounted {
|
||||
glog.Warningf("%s is not mounted", bucket)
|
||||
log.Warningf("%s is not mounted", bucket)
|
||||
return "", nil, nil, false
|
||||
}
|
||||
var hasClient bool
|
||||
remoteConf, hasClient = option.remoteConfs[remoteStorageMountLocation.Name]
|
||||
if !hasClient {
|
||||
glog.Warningf("%s mounted to un-configured %+v", bucket, remoteStorageMountLocation)
|
||||
log.Warningf("%s mounted to un-configured %+v", bucket, remoteStorageMountLocation)
|
||||
return "", nil, nil, false
|
||||
}
|
||||
return bucket, remoteStorageMountLocation, remoteConf, true
|
||||
@ -422,7 +422,7 @@ func (option *RemoteGatewayOptions) collectRemoteStorageConf() (err error) {
|
||||
}, "", false, math.MaxUint32)
|
||||
|
||||
if option.mappings.PrimaryBucketStorageName == "" && len(option.remoteConfs) == 1 {
|
||||
glog.V(0).Infof("%s is set to the default remote storage", lastConfName)
|
||||
log.V(3).Infof("%s is set to the default remote storage", lastConfName)
|
||||
option.mappings.PrimaryBucketStorageName = lastConfName
|
||||
}
|
||||
|
||||
|
@ -2,7 +2,7 @@ package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/replication/source"
|
||||
@ -94,7 +94,7 @@ func runFilerRemoteSynchronize(cmd *Command, args []string) bool {
|
||||
return followUpdatesAndUploadToRemote(&remoteSyncOptions, filerSource, dir)
|
||||
}, func(err error) bool {
|
||||
if err != nil {
|
||||
glog.Errorf("synchronize %s: %v", dir, err)
|
||||
log.Errorf("synchronize %s: %v", dir, err)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/remote_pb"
|
||||
@ -57,7 +57,7 @@ func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *sour
|
||||
}
|
||||
// use processor.processedTsWatermark instead of the lastTsNs from the most recent job
|
||||
now := time.Now().UnixNano()
|
||||
glog.V(0).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, time.Unix(0, offsetTsNs), float64(counter)/(float64(now-lastLogTsNs)/1e9))
|
||||
log.V(3).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, time.Unix(0, offsetTsNs), float64(counter)/(float64(now-lastLogTsNs)/1e9))
|
||||
lastLogTsNs = now
|
||||
return remote_storage.SetSyncOffset(option.grpcDialOption, pb.ServerAddress(*option.filerAddress), mountedDir, offsetTsNs)
|
||||
})
|
||||
@ -103,10 +103,10 @@ func (option *RemoteSyncOptions) makeEventProcessor(remoteStorage *remote_pb.Rem
|
||||
}
|
||||
if remoteLoc, found := mappings.Mappings[mountedDir]; found {
|
||||
if remoteStorageMountLocation.Bucket != remoteLoc.Bucket || remoteStorageMountLocation.Path != remoteLoc.Path {
|
||||
glog.Fatalf("Unexpected mount changes %+v => %+v", remoteStorageMountLocation, remoteLoc)
|
||||
log.Fatalf("Unexpected mount changes %+v => %+v", remoteStorageMountLocation, remoteLoc)
|
||||
}
|
||||
} else {
|
||||
glog.V(0).Infof("unmounted %s exiting ...", mountedDir)
|
||||
log.V(3).Infof("unmounted %s exiting ...", mountedDir)
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
@ -142,17 +142,17 @@ func (option *RemoteSyncOptions) makeEventProcessor(remoteStorage *remote_pb.Rem
|
||||
if !filer.HasData(message.NewEntry) {
|
||||
return nil
|
||||
}
|
||||
glog.V(2).Infof("create: %+v", resp)
|
||||
log.V(1).Infof("create: %+v", resp)
|
||||
if !shouldSendToRemote(message.NewEntry) {
|
||||
glog.V(2).Infof("skipping creating: %+v", resp)
|
||||
log.V(1).Infof("skipping creating: %+v", resp)
|
||||
return nil
|
||||
}
|
||||
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)
|
||||
if message.NewEntry.IsDirectory {
|
||||
glog.V(0).Infof("mkdir %s", remote_storage.FormatLocation(dest))
|
||||
log.V(3).Infof("mkdir %s", remote_storage.FormatLocation(dest))
|
||||
return client.WriteDirectory(dest, message.NewEntry)
|
||||
}
|
||||
glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
|
||||
log.V(3).Infof("create %s", remote_storage.FormatLocation(dest))
|
||||
remoteEntry, writeErr := retriedWriteFile(client, filerSource, message.NewEntry, dest)
|
||||
if writeErr != nil {
|
||||
return writeErr
|
||||
@ -160,13 +160,13 @@ func (option *RemoteSyncOptions) makeEventProcessor(remoteStorage *remote_pb.Rem
|
||||
return updateLocalEntry(option, message.NewParentPath, message.NewEntry, remoteEntry)
|
||||
}
|
||||
if filer_pb.IsDelete(resp) {
|
||||
glog.V(2).Infof("delete: %+v", resp)
|
||||
log.V(1).Infof("delete: %+v", resp)
|
||||
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)
|
||||
if message.OldEntry.IsDirectory {
|
||||
glog.V(0).Infof("rmdir %s", remote_storage.FormatLocation(dest))
|
||||
log.V(3).Infof("rmdir %s", remote_storage.FormatLocation(dest))
|
||||
return client.RemoveDirectory(dest)
|
||||
}
|
||||
glog.V(0).Infof("delete %s", remote_storage.FormatLocation(dest))
|
||||
log.V(3).Infof("delete %s", remote_storage.FormatLocation(dest))
|
||||
return client.DeleteFile(dest)
|
||||
}
|
||||
if message.OldEntry != nil && message.NewEntry != nil {
|
||||
@ -176,7 +176,7 @@ func (option *RemoteSyncOptions) makeEventProcessor(remoteStorage *remote_pb.Rem
|
||||
oldDest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)
|
||||
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)
|
||||
if !shouldSendToRemote(message.NewEntry) {
|
||||
glog.V(2).Infof("skipping updating: %+v", resp)
|
||||
log.V(1).Infof("skipping updating: %+v", resp)
|
||||
return nil
|
||||
}
|
||||
if message.NewEntry.IsDirectory {
|
||||
@ -184,12 +184,12 @@ func (option *RemoteSyncOptions) makeEventProcessor(remoteStorage *remote_pb.Rem
|
||||
}
|
||||
if resp.Directory == message.NewParentPath && message.OldEntry.Name == message.NewEntry.Name {
|
||||
if filer.IsSameData(message.OldEntry, message.NewEntry) {
|
||||
glog.V(2).Infof("update meta: %+v", resp)
|
||||
log.V(1).Infof("update meta: %+v", resp)
|
||||
return client.UpdateFileMetadata(dest, message.OldEntry, message.NewEntry)
|
||||
}
|
||||
}
|
||||
glog.V(2).Infof("update: %+v", resp)
|
||||
glog.V(0).Infof("delete %s", remote_storage.FormatLocation(oldDest))
|
||||
log.V(1).Infof("update: %+v", resp)
|
||||
log.V(3).Infof("delete %s", remote_storage.FormatLocation(oldDest))
|
||||
if err := client.DeleteFile(oldDest); err != nil {
|
||||
if isMultipartUploadFile(resp.Directory, message.OldEntry.Name) {
|
||||
return nil
|
||||
@ -211,7 +211,7 @@ func retriedWriteFile(client remote_storage.RemoteStorageClient, filerSource *so
|
||||
var writeErr error
|
||||
err = util.Retry("writeFile", func() error {
|
||||
reader := filer.NewFileReader(filerSource, newEntry)
|
||||
glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
|
||||
log.V(3).Infof("create %s", remote_storage.FormatLocation(dest))
|
||||
remoteEntry, writeErr = client.WriteFile(dest, newEntry, reader)
|
||||
if writeErr != nil {
|
||||
return writeErr
|
||||
@ -219,7 +219,7 @@ func retriedWriteFile(client remote_storage.RemoteStorageClient, filerSource *so
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.Errorf("write to %s: %v", dest, err)
|
||||
log.Errorf("write to %s: %v", dest, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -232,7 +232,7 @@ func collectLastSyncOffset(filerClient filer_pb.FilerClient, grpcDialOption grpc
|
||||
if timeAgo == 0 {
|
||||
mountedDirEntry, err := filer_pb.GetEntry(filerClient, util.FullPath(mountedDir))
|
||||
if err != nil {
|
||||
glog.V(0).Infof("get mounted directory %s: %v", mountedDir, err)
|
||||
log.V(3).Infof("get mounted directory %s: %v", mountedDir, err)
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
@ -240,7 +240,7 @@ func collectLastSyncOffset(filerClient filer_pb.FilerClient, grpcDialOption grpc
|
||||
if mountedDirEntry != nil {
|
||||
if err == nil && mountedDirEntry.Attributes.Crtime < lastOffsetTsNs/1000000 {
|
||||
lastOffsetTs = time.Unix(0, lastOffsetTsNs)
|
||||
glog.V(0).Infof("resume from %v", lastOffsetTs)
|
||||
log.V(3).Infof("resume from %v", lastOffsetTs)
|
||||
} else {
|
||||
lastOffsetTs = time.Unix(mountedDirEntry.Attributes.Crtime, 0)
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/replication"
|
||||
"github.com/seaweedfs/seaweedfs/weed/replication/sink"
|
||||
"github.com/seaweedfs/seaweedfs/weed/replication/sub"
|
||||
@ -42,10 +42,10 @@ func runFilerReplicate(cmd *Command, args []string) bool {
|
||||
for _, input := range sub.NotificationInputs {
|
||||
if config.GetBool("notification." + input.GetName() + ".enabled") {
|
||||
if err := input.Initialize(config, "notification."+input.GetName()+"."); err != nil {
|
||||
glog.Fatalf("Failed to initialize notification input for %s: %+v",
|
||||
log.Fatalf("Failed to initialize notification input for %s: %+v",
|
||||
input.GetName(), err)
|
||||
}
|
||||
glog.V(0).Infof("Configure notification input to %s", input.GetName())
|
||||
log.V(3).Infof("Configure notification input to %s", input.GetName())
|
||||
notificationInput = input
|
||||
break
|
||||
}
|
||||
@ -63,7 +63,7 @@ func runFilerReplicate(cmd *Command, args []string) bool {
|
||||
fromDir := config.GetString("source.filer.directory")
|
||||
toDir := config.GetString("sink.filer.directory")
|
||||
if strings.HasPrefix(toDir, fromDir) {
|
||||
glog.Fatalf("recursive replication! source directory %s includes the sink directory %s", fromDir, toDir)
|
||||
log.Fatalf("recursive replication! source directory %s includes the sink directory %s", fromDir, toDir)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -83,7 +83,7 @@ func runFilerReplicate(cmd *Command, args []string) bool {
|
||||
for {
|
||||
key, m, onSuccessFn, onFailureFn, err := notificationInput.ReceiveMessage()
|
||||
if err != nil {
|
||||
glog.Errorf("receive %s: %+v", key, err)
|
||||
log.Errorf("receive %s: %+v", key, err)
|
||||
if onFailureFn != nil {
|
||||
onFailureFn()
|
||||
}
|
||||
@ -97,19 +97,19 @@ func runFilerReplicate(cmd *Command, args []string) bool {
|
||||
continue
|
||||
}
|
||||
if m.OldEntry != nil && m.NewEntry == nil {
|
||||
glog.V(1).Infof("delete: %s", key)
|
||||
log.V(2).Infof("delete: %s", key)
|
||||
} else if m.OldEntry == nil && m.NewEntry != nil {
|
||||
glog.V(1).Infof("add: %s", key)
|
||||
log.V(2).Infof("add: %s", key)
|
||||
} else {
|
||||
glog.V(1).Infof("modify: %s", key)
|
||||
log.V(2).Infof("modify: %s", key)
|
||||
}
|
||||
if err = replicator.Replicate(context.Background(), key, m); err != nil {
|
||||
glog.Errorf("replicate %s: %+v", key, err)
|
||||
log.Errorf("replicate %s: %+v", key, err)
|
||||
if onFailureFn != nil {
|
||||
onFailureFn()
|
||||
}
|
||||
} else {
|
||||
glog.V(1).Infof("replicated %s", key)
|
||||
log.V(2).Infof("replicated %s", key)
|
||||
if onSuccessFn != nil {
|
||||
onSuccessFn()
|
||||
}
|
||||
@ -123,10 +123,10 @@ func findSink(config *util.ViperProxy) sink.ReplicationSink {
|
||||
for _, sk := range sink.Sinks {
|
||||
if config.GetBool("sink." + sk.GetName() + ".enabled") {
|
||||
if err := sk.Initialize(config, "sink."+sk.GetName()+"."); err != nil {
|
||||
glog.Fatalf("Failed to initialize sink for %s: %+v",
|
||||
log.Fatalf("Failed to initialize sink for %s: %+v",
|
||||
sk.GetName(), err)
|
||||
}
|
||||
glog.V(0).Infof("Configure sink to %s", sk.GetName())
|
||||
log.V(3).Infof("Configure sink to %s", sk.GetName())
|
||||
dataSink = sk
|
||||
break
|
||||
}
|
||||
@ -141,7 +141,7 @@ func validateOneEnabledInput(config *util.ViperProxy) {
|
||||
if enabledInput == "" {
|
||||
enabledInput = input.GetName()
|
||||
} else {
|
||||
glog.Fatalf("Notification input is enabled for both %s and %s", enabledInput, input.GetName())
|
||||
log.Fatalf("Notification input is enabled for both %s and %s", enabledInput, input.GetName())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/replication"
|
||||
@ -133,13 +133,13 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
|
||||
// read a filer signature
|
||||
aFilerSignature, aFilerErr := replication.ReadFilerSignature(grpcDialOption, filerA)
|
||||
if aFilerErr != nil {
|
||||
glog.Errorf("get filer 'a' signature %d error from %s to %s: %v", aFilerSignature, *syncOptions.filerA, *syncOptions.filerB, aFilerErr)
|
||||
log.Errorf("get filer 'a' signature %d error from %s to %s: %v", aFilerSignature, *syncOptions.filerA, *syncOptions.filerB, aFilerErr)
|
||||
return true
|
||||
}
|
||||
// read b filer signature
|
||||
bFilerSignature, bFilerErr := replication.ReadFilerSignature(grpcDialOption, filerB)
|
||||
if bFilerErr != nil {
|
||||
glog.Errorf("get filer 'b' signature %d error from %s to %s: %v", bFilerSignature, *syncOptions.filerA, *syncOptions.filerB, bFilerErr)
|
||||
log.Errorf("get filer 'b' signature %d error from %s to %s: %v", bFilerSignature, *syncOptions.filerA, *syncOptions.filerB, bFilerErr)
|
||||
return true
|
||||
}
|
||||
|
||||
@ -148,7 +148,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
|
||||
// set synchronization start timestamp to offset
|
||||
initOffsetError := initOffsetFromTsMs(grpcDialOption, filerB, aFilerSignature, *syncOptions.bFromTsMs, getSignaturePrefixByPath(*syncOptions.aPath))
|
||||
if initOffsetError != nil {
|
||||
glog.Errorf("init offset from timestamp %d error from %s to %s: %v", *syncOptions.bFromTsMs, *syncOptions.filerA, *syncOptions.filerB, initOffsetError)
|
||||
log.Errorf("init offset from timestamp %d error from %s to %s: %v", *syncOptions.bFromTsMs, *syncOptions.filerA, *syncOptions.filerB, initOffsetError)
|
||||
os.Exit(2)
|
||||
}
|
||||
for {
|
||||
@ -174,7 +174,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
|
||||
aFilerSignature,
|
||||
bFilerSignature)
|
||||
if err != nil {
|
||||
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err)
|
||||
log.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err)
|
||||
time.Sleep(1747 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
@ -185,7 +185,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
|
||||
// set synchronization start timestamp to offset
|
||||
initOffsetError := initOffsetFromTsMs(grpcDialOption, filerA, bFilerSignature, *syncOptions.aFromTsMs, getSignaturePrefixByPath(*syncOptions.bPath))
|
||||
if initOffsetError != nil {
|
||||
glog.Errorf("init offset from timestamp %d error from %s to %s: %v", *syncOptions.aFromTsMs, *syncOptions.filerB, *syncOptions.filerA, initOffsetError)
|
||||
log.Errorf("init offset from timestamp %d error from %s to %s: %v", *syncOptions.aFromTsMs, *syncOptions.filerB, *syncOptions.filerA, initOffsetError)
|
||||
os.Exit(2)
|
||||
}
|
||||
go func() {
|
||||
@ -212,7 +212,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
|
||||
bFilerSignature,
|
||||
aFilerSignature)
|
||||
if err != nil {
|
||||
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err)
|
||||
log.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err)
|
||||
time.Sleep(2147 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
@ -236,7 +236,7 @@ func initOffsetFromTsMs(grpcDialOption grpc.DialOption, targetFiler pb.ServerAdd
|
||||
if setOffsetErr != nil {
|
||||
return setOffsetErr
|
||||
}
|
||||
glog.Infof("setOffset from timestamp ms success! start offset: %d from %s to %s", fromTsNs, *syncOptions.filerA, *syncOptions.filerB)
|
||||
log.Infof("setOffset from timestamp ms success! start offset: %d from %s to %s", fromTsNs, *syncOptions.filerA, *syncOptions.filerB)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -250,7 +250,7 @@ func doSubscribeFilerMetaChanges(clientId int32, clientEpoch int32, grpcDialOpti
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(0).Infof("start sync %s(%d) => %s(%d) from %v(%d)", sourceFiler, sourceFilerSignature, targetFiler, targetFilerSignature, time.Unix(0, sourceFilerOffsetTsNs), sourceFilerOffsetTsNs)
|
||||
log.V(3).Infof("start sync %s(%d) => %s(%d) from %v(%d)", sourceFiler, sourceFilerSignature, targetFiler, targetFilerSignature, time.Unix(0, sourceFilerOffsetTsNs), sourceFilerOffsetTsNs)
|
||||
|
||||
// create filer sink
|
||||
filerSource := &source.FilerSource{}
|
||||
@ -273,7 +273,7 @@ func doSubscribeFilerMetaChanges(clientId int32, clientEpoch int32, grpcDialOpti
|
||||
}
|
||||
|
||||
if concurrency < 0 || concurrency > 1024 {
|
||||
glog.Warningf("invalid concurrency value, using default: %d", DefaultConcurrencyLimit)
|
||||
log.Warningf("invalid concurrency value, using default: %d", DefaultConcurrencyLimit)
|
||||
concurrency = DefaultConcurrencyLimit
|
||||
}
|
||||
processor := NewMetadataProcessor(processEventFn, concurrency, sourceFilerOffsetTsNs)
|
||||
@ -290,7 +290,7 @@ func doSubscribeFilerMetaChanges(clientId int32, clientEpoch int32, grpcDialOpti
|
||||
}
|
||||
// use processor.processedTsWatermark instead of the lastTsNs from the most recent job
|
||||
now := time.Now().UnixNano()
|
||||
glog.V(0).Infof("sync %s to %s progressed to %v %0.2f/sec", sourceFiler, targetFiler, time.Unix(0, offsetTsNs), float64(counter)/(float64(now-lastLogTsNs)/1e9))
|
||||
log.V(3).Infof("sync %s to %s progressed to %v %0.2f/sec", sourceFiler, targetFiler, time.Unix(0, offsetTsNs), float64(counter)/(float64(now-lastLogTsNs)/1e9))
|
||||
lastLogTsNs = now
|
||||
// collect synchronous offset
|
||||
statsCollect.FilerSyncOffsetGauge.WithLabelValues(sourceFiler.String(), targetFiler.String(), clientName, sourcePath).Set(float64(offsetTsNs))
|
||||
@ -397,7 +397,7 @@ func genProcessFunction(sourcePath string, targetPath string, excludePaths []str
|
||||
}
|
||||
|
||||
if debug {
|
||||
glog.V(0).Infof("received %v", resp)
|
||||
log.V(3).Infof("received %v", resp)
|
||||
}
|
||||
|
||||
if isMultipartUploadDir(resp.Directory + "/") {
|
||||
|
@ -1,7 +1,7 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
@ -46,7 +46,7 @@ func (t *MetadataProcessor) AddSyncJob(resp *filer_pb.SubscribeMetadataResponse)
|
||||
if err := util.Retry("metadata processor", func() error {
|
||||
return t.fn(resp)
|
||||
}); err != nil {
|
||||
glog.Errorf("process %v: %v", resp, err)
|
||||
log.Errorf("process %v: %v", resp, err)
|
||||
}
|
||||
|
||||
t.activeJobsLock.Lock()
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/needle_map"
|
||||
@ -53,7 +53,7 @@ func (scanner *VolumeFileScanner4Fix) ReadNeedleBody() bool {
|
||||
}
|
||||
|
||||
func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
|
||||
glog.V(2).Infof("key %v offset %d size %d disk_size %d compressed %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed())
|
||||
log.V(1).Infof("key %v offset %d size %d disk_size %d compressed %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed())
|
||||
if n.Size.IsValid() {
|
||||
if pe := scanner.nm.Set(n.Id, types.ToOffset(offset), n.Size); pe != nil {
|
||||
return fmt.Errorf("saved %d with error %v", n.Size, pe)
|
||||
@ -64,7 +64,7 @@ func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *needle.Needle, offset int64
|
||||
return fmt.Errorf("saved deleted %d with error %v", n.Size, pe)
|
||||
}
|
||||
} else {
|
||||
glog.V(2).Infof("skipping deleted file ...")
|
||||
log.V(1).Infof("skipping deleted file ...")
|
||||
return scanner.nm.Delete(n.Id)
|
||||
}
|
||||
}
|
||||
@ -163,19 +163,19 @@ func doFixOneVolume(basepath string, baseFileName string, collection string, vol
|
||||
if err := storage.ScanVolumeFile(basepath, collection, vid, storage.NeedleMapInMemory, scanner); err != nil {
|
||||
err := fmt.Errorf("scan .dat File: %v", err)
|
||||
if *fixIgnoreError {
|
||||
glog.Error(err)
|
||||
log.Error(err)
|
||||
} else {
|
||||
glog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := SaveToIdx(scanner, indexFileName); err != nil {
|
||||
err := fmt.Errorf("save to .idx File: %v", err)
|
||||
if *fixIgnoreError {
|
||||
glog.Error(err)
|
||||
log.Error(err)
|
||||
} else {
|
||||
os.Remove(indexFileName)
|
||||
glog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/iamapi"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
@ -56,14 +56,14 @@ func (iamopt *IamOptions) startIamServer() bool {
|
||||
if err != nil {
|
||||
return fmt.Errorf("get filer %s configuration: %v", filerAddress, err)
|
||||
}
|
||||
glog.V(0).Infof("IAM read filer configuration: %s", resp)
|
||||
log.V(3).Infof("IAM read filer configuration: %s", resp)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *iamopt.filer, filerAddress.ToGrpcAddress())
|
||||
log.V(3).Infof("wait to connect to filer %s grpc address %s", *iamopt.filer, filerAddress.ToGrpcAddress())
|
||||
time.Sleep(time.Second)
|
||||
} else {
|
||||
glog.V(0).Infof("connected to filer %s grpc address %s", *iamopt.filer, filerAddress.ToGrpcAddress())
|
||||
log.V(3).Infof("connected to filer %s grpc address %s", *iamopt.filer, filerAddress.ToGrpcAddress())
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -76,9 +76,9 @@ func (iamopt *IamOptions) startIamServer() bool {
|
||||
Port: *iamopt.port,
|
||||
GrpcDialOption: grpcDialOption,
|
||||
})
|
||||
glog.V(0).Info("NewIamApiServer created")
|
||||
log.V(3).Info("NewIamApiServer created")
|
||||
if iamApiServer_err != nil {
|
||||
glog.Fatalf("IAM API Server startup error: %v", iamApiServer_err)
|
||||
log.Fatalf("IAM API Server startup error: %v", iamApiServer_err)
|
||||
}
|
||||
|
||||
httpS := &http.Server{Handler: router}
|
||||
@ -86,19 +86,19 @@ func (iamopt *IamOptions) startIamServer() bool {
|
||||
listenAddress := fmt.Sprintf(":%d", *iamopt.port)
|
||||
iamApiListener, iamApiLocalListener, err := util.NewIpAndLocalListeners(*iamopt.ip, *iamopt.port, time.Duration(10)*time.Second)
|
||||
if err != nil {
|
||||
glog.Fatalf("IAM API Server listener on %s error: %v", listenAddress, err)
|
||||
log.Fatalf("IAM API Server listener on %s error: %v", listenAddress, err)
|
||||
}
|
||||
|
||||
glog.V(0).Infof("Start Seaweed IAM API Server %s at http port %d", util.Version(), *iamopt.port)
|
||||
log.V(3).Infof("Start Seaweed IAM API Server %s at http port %d", util.Version(), *iamopt.port)
|
||||
if iamApiLocalListener != nil {
|
||||
go func() {
|
||||
if err = httpS.Serve(iamApiLocalListener); err != nil {
|
||||
glog.Errorf("IAM API Server Fail to serve: %v", err)
|
||||
log.Errorf("IAM API Server Fail to serve: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if err = httpS.Serve(iamApiListener); err != nil {
|
||||
glog.Fatalf("IAM API Server Fail to serve: %v", err)
|
||||
log.Fatalf("IAM API Server Fail to serve: %v", err)
|
||||
}
|
||||
|
||||
return true
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/grace"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/security"
|
||||
@ -117,12 +117,12 @@ func runMaster(cmd *Command, args []string) bool {
|
||||
os.MkdirAll(*m.metaFolder, 0755)
|
||||
}
|
||||
if err := util.TestFolderWritable(util.ResolvePath(*m.metaFolder)); err != nil {
|
||||
glog.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *m.metaFolder, err)
|
||||
log.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *m.metaFolder, err)
|
||||
}
|
||||
|
||||
masterWhiteList := util.StringSplit(*m.whiteList, ",")
|
||||
if *m.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 {
|
||||
glog.Fatalf("volumeSizeLimitMB should be smaller than 30000")
|
||||
log.Fatalf("volumeSizeLimitMB should be smaller than 30000")
|
||||
}
|
||||
|
||||
switch {
|
||||
@ -160,10 +160,10 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
|
||||
r := mux.NewRouter()
|
||||
ms := weed_server.NewMasterServer(r, masterOption.toMasterOption(masterWhiteList), masterPeers)
|
||||
listeningAddress := util.JoinHostPort(*masterOption.ipBind, *masterOption.port)
|
||||
glog.V(0).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress)
|
||||
log.V(3).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress)
|
||||
masterListener, masterLocalListener, e := util.NewIpAndLocalListeners(*masterOption.ipBind, *masterOption.port, 0)
|
||||
if e != nil {
|
||||
glog.Fatalf("Master startup error: %v", e)
|
||||
log.Fatalf("Master startup error: %v", e)
|
||||
}
|
||||
|
||||
// start raftServer
|
||||
@ -183,12 +183,12 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
|
||||
var err error
|
||||
if *masterOption.raftHashicorp {
|
||||
if raftServer, err = weed_server.NewHashicorpRaftServer(raftServerOption); err != nil {
|
||||
glog.Fatalf("NewHashicorpRaftServer: %s", err)
|
||||
log.Fatalf("NewHashicorpRaftServer: %s", err)
|
||||
}
|
||||
} else {
|
||||
raftServer, err = weed_server.NewRaftServer(raftServerOption)
|
||||
if raftServer == nil {
|
||||
glog.Fatalf("please verify %s is writable, see https://github.com/seaweedfs/seaweedfs/issues/717: %s", *masterOption.metaFolder, err)
|
||||
log.Fatalf("please verify %s is writable, see https://github.com/seaweedfs/seaweedfs/issues/717: %s", *masterOption.metaFolder, err)
|
||||
}
|
||||
}
|
||||
ms.SetRaftServer(raftServer)
|
||||
@ -201,7 +201,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
|
||||
grpcPort := *masterOption.portGrpc
|
||||
grpcL, grpcLocalL, err := util.NewIpAndLocalListeners(*masterOption.ipBind, grpcPort, 0)
|
||||
if err != nil {
|
||||
glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
|
||||
log.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
|
||||
}
|
||||
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master"))
|
||||
master_pb.RegisterSeaweedServer(grpcS, ms)
|
||||
@ -211,7 +211,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
|
||||
protobuf.RegisterRaftServer(grpcS, raftServer)
|
||||
}
|
||||
reflection.Register(grpcS)
|
||||
glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOption.ipBind, grpcPort)
|
||||
log.V(3).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOption.ipBind, grpcPort)
|
||||
if grpcLocalL != nil {
|
||||
go grpcS.Serve(grpcLocalL)
|
||||
}
|
||||
@ -279,7 +279,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
|
||||
}
|
||||
|
||||
func checkPeers(masterIp string, masterPort int, masterGrpcPort int, peers string) (masterAddress pb.ServerAddress, cleanedPeers []pb.ServerAddress) {
|
||||
glog.V(0).Infof("current: %s:%d peers:%s", masterIp, masterPort, peers)
|
||||
log.V(3).Infof("current: %s:%d peers:%s", masterIp, masterPort, peers)
|
||||
masterAddress = pb.NewServerAddress(masterIp, masterPort, masterGrpcPort)
|
||||
cleanedPeers = pb.ServerAddresses(peers).ToAddresses()
|
||||
|
||||
@ -295,7 +295,7 @@ func checkPeers(masterIp string, masterPort int, masterGrpcPort int, peers strin
|
||||
cleanedPeers = append(cleanedPeers, masterAddress)
|
||||
}
|
||||
if len(cleanedPeers)%2 == 0 {
|
||||
glog.Fatalf("Only odd number of masters are supported: %+v", cleanedPeers)
|
||||
log.Fatalf("Only odd number of masters are supported: %+v", cleanedPeers)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/security"
|
||||
@ -99,13 +99,13 @@ func startMasterFollower(masterOptions MasterOptions) {
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.V(0).Infof("failed to talk to filer %v: %v", masters, err)
|
||||
glog.V(0).Infof("wait for %d seconds ...", i+1)
|
||||
log.V(3).Infof("failed to talk to filer %v: %v", masters, err)
|
||||
log.V(3).Infof("wait for %d seconds ...", i+1)
|
||||
time.Sleep(time.Duration(i+1) * time.Second)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("failed to talk to filer %v: %v", masters, err)
|
||||
log.Errorf("failed to talk to filer %v: %v", masters, err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -119,22 +119,22 @@ func startMasterFollower(masterOptions MasterOptions) {
|
||||
r := mux.NewRouter()
|
||||
ms := weed_server.NewMasterServer(r, option, masters)
|
||||
listeningAddress := util.JoinHostPort(*masterOptions.ipBind, *masterOptions.port)
|
||||
glog.V(0).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress)
|
||||
log.V(3).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress)
|
||||
masterListener, masterLocalListener, e := util.NewIpAndLocalListeners(*masterOptions.ipBind, *masterOptions.port, 0)
|
||||
if e != nil {
|
||||
glog.Fatalf("Master startup error: %v", e)
|
||||
log.Fatalf("Master startup error: %v", e)
|
||||
}
|
||||
|
||||
// starting grpc server
|
||||
grpcPort := *masterOptions.portGrpc
|
||||
grpcL, grpcLocalL, err := util.NewIpAndLocalListeners(*masterOptions.ipBind, grpcPort, 0)
|
||||
if err != nil {
|
||||
glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
|
||||
log.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
|
||||
}
|
||||
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master"))
|
||||
master_pb.RegisterSeaweedServer(grpcS, ms)
|
||||
reflection.Register(grpcS)
|
||||
glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOptions.ip, grpcPort)
|
||||
log.V(3).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOptions.ip, grpcPort)
|
||||
if grpcLocalL != nil {
|
||||
go grpcS.Serve(grpcLocalL)
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ package command
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
@ -144,7 +144,7 @@ func checkMountPointAvailable(dir string) bool {
|
||||
|
||||
if mounted, err := mounted(mountPoint); err != nil || mounted {
|
||||
if err != nil {
|
||||
glog.Errorf("check %s: %v", mountPoint, err)
|
||||
log.Errorf("check %s: %v", mountPoint, err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/mount"
|
||||
"github.com/seaweedfs/seaweedfs/weed/mount/meta_cache"
|
||||
"github.com/seaweedfs/seaweedfs/weed/mount/unmount"
|
||||
@ -81,13 +81,13 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.V(0).Infof("failed to talk to filer %v: %v", filerAddresses, err)
|
||||
glog.V(0).Infof("wait for %d seconds ...", i+1)
|
||||
log.V(3).Infof("failed to talk to filer %v: %v", filerAddresses, err)
|
||||
log.V(3).Infof("wait for %d seconds ...", i+1)
|
||||
time.Sleep(time.Duration(i+1) * time.Second)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("failed to talk to filer %v: %v", filerAddresses, err)
|
||||
log.Errorf("failed to talk to filer %v: %v", filerAddresses, err)
|
||||
return true
|
||||
}
|
||||
|
||||
@ -111,11 +111,11 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
||||
*option.localSocket = fmt.Sprintf("/tmp/seaweedfs-mount-%d.sock", mountDirHash)
|
||||
}
|
||||
if err := os.Remove(*option.localSocket); err != nil && !os.IsNotExist(err) {
|
||||
glog.Fatalf("Failed to remove %s, error: %s", *option.localSocket, err.Error())
|
||||
log.Fatalf("Failed to remove %s, error: %s", *option.localSocket, err.Error())
|
||||
}
|
||||
montSocketListener, err := net.Listen("unix", *option.localSocket)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to listen on %s: %v", *option.localSocket, err)
|
||||
log.Fatalf("Failed to listen on %s: %v", *option.localSocket, err)
|
||||
}
|
||||
|
||||
// detect mount folder mode
|
||||
@ -158,7 +158,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
||||
|
||||
// Ensure target mount point availability
|
||||
if isValid := checkMountPointAvailable(dir); !isValid {
|
||||
glog.Fatalf("Target mount point is not available: %s, please check!", dir)
|
||||
log.Fatalf("Target mount point is not available: %s, please check!", dir)
|
||||
return true
|
||||
}
|
||||
|
||||
@ -262,7 +262,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
||||
|
||||
server, err := fuse.NewServer(seaweedFileSystem, dir, fuseMountOptions)
|
||||
if err != nil {
|
||||
glog.Fatalf("Mount fail: %v", err)
|
||||
log.Fatalf("Mount fail: %v", err)
|
||||
}
|
||||
grace.OnInterrupt(func() {
|
||||
unmount.Unmount(dir)
|
||||
@ -279,8 +279,8 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
glog.V(0).Infof("mounted %s%s to %v", *option.filer, mountRoot, dir)
|
||||
glog.V(0).Infof("This is SeaweedFS version %s %s %s", util.Version(), runtime.GOOS, runtime.GOARCH)
|
||||
log.V(3).Infof("mounted %s%s to %v", *option.filer, mountRoot, dir)
|
||||
log.V(3).Infof("This is SeaweedFS version %s %s %s", util.Version(), runtime.GOOS, runtime.GOARCH)
|
||||
|
||||
server.Serve()
|
||||
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/mq_agent_pb"
|
||||
"google.golang.org/grpc/reflection"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/security"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
@ -62,9 +62,9 @@ func (mqAgentOpt *MessageQueueAgentOptions) startQueueAgent() bool {
|
||||
// start grpc listener
|
||||
grpcL, _, err := util.NewIpAndLocalListeners(*mqAgentOpt.ip, *mqAgentOpt.port, 0)
|
||||
if err != nil {
|
||||
glog.Fatalf("failed to listen on grpc port %d: %v", *mqAgentOpt.port, err)
|
||||
log.Fatalf("failed to listen on grpc port %d: %v", *mqAgentOpt.port, err)
|
||||
}
|
||||
glog.Infof("Start Seaweed Message Queue Agent on %s:%d", *mqAgentOpt.ip, *mqAgentOpt.port)
|
||||
log.Infof("Start Seaweed Message Queue Agent on %s:%d", *mqAgentOpt.ip, *mqAgentOpt.port)
|
||||
grpcS := pb.NewGrpcServer()
|
||||
mq_agent_pb.RegisterSeaweedMessagingAgentServer(grpcS, agentServer)
|
||||
reflection.Register(grpcS)
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/grace"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/mq/broker"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
|
||||
@ -79,13 +79,13 @@ func (mqBrokerOpt *MessageQueueBrokerOptions) startQueueServer() bool {
|
||||
Port: *mqBrokerOpt.port,
|
||||
}, grpcDialOption)
|
||||
if err != nil {
|
||||
glog.Fatalf("failed to create new message broker for queue server: %v", err)
|
||||
log.Fatalf("failed to create new message broker for queue server: %v", err)
|
||||
}
|
||||
|
||||
// start grpc listener
|
||||
grpcL, _, err := util.NewIpAndLocalListeners("", *mqBrokerOpt.port, 0)
|
||||
if err != nil {
|
||||
glog.Fatalf("failed to listen on grpc port %d: %v", *mqBrokerOpt.port, err)
|
||||
log.Fatalf("failed to listen on grpc port %d: %v", *mqBrokerOpt.port, err)
|
||||
}
|
||||
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.msg_broker"))
|
||||
mq_pb.RegisterSeaweedMessagingServer(grpcS, qs)
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/s3api"
|
||||
stats_collect "github.com/seaweedfs/seaweedfs/weed/stats"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
@ -212,14 +212,14 @@ func (s3opt *S3Options) startS3Server() bool {
|
||||
filerBucketsPath = resp.DirBuckets
|
||||
filerGroup = resp.FilerGroup
|
||||
metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSec)
|
||||
glog.V(0).Infof("S3 read filer buckets dir: %s", filerBucketsPath)
|
||||
log.V(3).Infof("S3 read filer buckets dir: %s", filerBucketsPath)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerAddress.ToGrpcAddress())
|
||||
log.V(3).Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerAddress.ToGrpcAddress())
|
||||
time.Sleep(time.Second)
|
||||
} else {
|
||||
glog.V(0).Infof("connected to filer %s grpc address %s", *s3opt.filer, filerAddress.ToGrpcAddress())
|
||||
log.V(3).Infof("connected to filer %s grpc address %s", *s3opt.filer, filerAddress.ToGrpcAddress())
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -246,7 +246,7 @@ func (s3opt *S3Options) startS3Server() bool {
|
||||
FilerGroup: filerGroup,
|
||||
})
|
||||
if s3ApiServer_err != nil {
|
||||
glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)
|
||||
log.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)
|
||||
}
|
||||
|
||||
httpS := &http.Server{Handler: router}
|
||||
@ -264,13 +264,13 @@ func (s3opt *S3Options) startS3Server() bool {
|
||||
localSocket = fmt.Sprintf("/tmp/seaweedfs-s3-%d.sock", *s3opt.port)
|
||||
}
|
||||
if err := os.Remove(localSocket); err != nil && !os.IsNotExist(err) {
|
||||
glog.Fatalf("Failed to remove %s, error: %s", localSocket, err.Error())
|
||||
log.Fatalf("Failed to remove %s, error: %s", localSocket, err.Error())
|
||||
}
|
||||
go func() {
|
||||
// start on local unix socket
|
||||
s3SocketListener, err := net.Listen("unix", localSocket)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to listen on %s: %v", localSocket, err)
|
||||
log.Fatalf("Failed to listen on %s: %v", localSocket, err)
|
||||
}
|
||||
httpS.Serve(s3SocketListener)
|
||||
}()
|
||||
@ -280,7 +280,7 @@ func (s3opt *S3Options) startS3Server() bool {
|
||||
s3ApiListener, s3ApiLocalListener, err := util.NewIpAndLocalListeners(
|
||||
*s3opt.bindIp, *s3opt.port, time.Duration(*s3opt.idleTimeout)*time.Second)
|
||||
if err != nil {
|
||||
glog.Fatalf("S3 API Server listener on %s error: %v", listenAddress, err)
|
||||
log.Fatalf("S3 API Server listener on %s error: %v", listenAddress, err)
|
||||
}
|
||||
|
||||
if len(*s3opt.auditLogConfig) > 0 {
|
||||
@ -294,7 +294,7 @@ func (s3opt *S3Options) startS3Server() bool {
|
||||
grpcPort := *s3opt.portGrpc
|
||||
grpcL, grpcLocalL, err := util.NewIpAndLocalListeners(*s3opt.bindIp, grpcPort, 0)
|
||||
if err != nil {
|
||||
glog.Fatalf("s3 failed to listen on grpc port %d: %v", grpcPort, err)
|
||||
log.Fatalf("s3 failed to listen on grpc port %d: %v", grpcPort, err)
|
||||
}
|
||||
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.s3"))
|
||||
s3_pb.RegisterSeaweedS3Server(grpcS, s3ApiServer)
|
||||
@ -311,7 +311,7 @@ func (s3opt *S3Options) startS3Server() bool {
|
||||
RefreshDuration: security.CredRefreshingInterval,
|
||||
}
|
||||
if s3opt.certProvider, err = pemfile.NewProvider(pemfileOptions); err != nil {
|
||||
glog.Fatalf("pemfile.NewProvider(%v) failed: %v", pemfileOptions, err)
|
||||
log.Fatalf("pemfile.NewProvider(%v) failed: %v", pemfileOptions, err)
|
||||
}
|
||||
|
||||
caCertPool := x509.NewCertPool()
|
||||
@ -319,7 +319,7 @@ func (s3opt *S3Options) startS3Server() bool {
|
||||
// load CA certificate file and add it to list of client CAs
|
||||
caCertFile, err := ioutil.ReadFile(*s3opt.tlsCACertificate)
|
||||
if err != nil {
|
||||
glog.Fatalf("error reading CA certificate: %v", err)
|
||||
log.Fatalf("error reading CA certificate: %v", err)
|
||||
}
|
||||
caCertPool.AppendCertsFromPEM(caCertFile)
|
||||
}
|
||||
@ -336,49 +336,49 @@ func (s3opt *S3Options) startS3Server() bool {
|
||||
}
|
||||
err = security.FixTlsConfig(util.GetViper(), httpS.TLSConfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("error with tls config: %v", err)
|
||||
log.Fatalf("error with tls config: %v", err)
|
||||
}
|
||||
if *s3opt.portHttps == 0 {
|
||||
glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.port)
|
||||
log.V(3).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.port)
|
||||
if s3ApiLocalListener != nil {
|
||||
go func() {
|
||||
if err = httpS.ServeTLS(s3ApiLocalListener, "", ""); err != nil {
|
||||
glog.Fatalf("S3 API Server Fail to serve: %v", err)
|
||||
log.Fatalf("S3 API Server Fail to serve: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if err = httpS.ServeTLS(s3ApiListener, "", ""); err != nil {
|
||||
glog.Fatalf("S3 API Server Fail to serve: %v", err)
|
||||
log.Fatalf("S3 API Server Fail to serve: %v", err)
|
||||
}
|
||||
} else {
|
||||
glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.portHttps)
|
||||
log.V(3).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.portHttps)
|
||||
s3ApiListenerHttps, s3ApiLocalListenerHttps, _ := util.NewIpAndLocalListeners(
|
||||
*s3opt.bindIp, *s3opt.portHttps, time.Duration(*s3opt.idleTimeout)*time.Second)
|
||||
if s3ApiLocalListenerHttps != nil {
|
||||
go func() {
|
||||
if err = httpS.ServeTLS(s3ApiLocalListenerHttps, "", ""); err != nil {
|
||||
glog.Fatalf("S3 API Server Fail to serve: %v", err)
|
||||
log.Fatalf("S3 API Server Fail to serve: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
if err = httpS.ServeTLS(s3ApiListenerHttps, "", ""); err != nil {
|
||||
glog.Fatalf("S3 API Server Fail to serve: %v", err)
|
||||
log.Fatalf("S3 API Server Fail to serve: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
if *s3opt.tlsPrivateKey == "" || *s3opt.portHttps > 0 {
|
||||
glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.Version(), *s3opt.port)
|
||||
log.V(3).Infof("Start Seaweed S3 API Server %s at http port %d", util.Version(), *s3opt.port)
|
||||
if s3ApiLocalListener != nil {
|
||||
go func() {
|
||||
if err = httpS.Serve(s3ApiLocalListener); err != nil {
|
||||
glog.Fatalf("S3 API Server Fail to serve: %v", err)
|
||||
log.Fatalf("S3 API Server Fail to serve: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if err = httpS.Serve(s3ApiListener); err != nil {
|
||||
glog.Fatalf("S3 API Server Fail to serve: %v", err)
|
||||
log.Fatalf("S3 API Server Fail to serve: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
|
||||
stats_collect "github.com/seaweedfs/seaweedfs/weed/stats"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/grace"
|
||||
@ -281,14 +281,14 @@ func runServer(cmd *Command, args []string) bool {
|
||||
folders := strings.Split(*volumeDataFolders, ",")
|
||||
|
||||
if *masterOptions.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 {
|
||||
glog.Fatalf("masterVolumeSizeLimitMB should be less than 30000")
|
||||
log.Fatalf("masterVolumeSizeLimitMB should be less than 30000")
|
||||
}
|
||||
|
||||
if *masterOptions.metaFolder == "" {
|
||||
*masterOptions.metaFolder = folders[0]
|
||||
}
|
||||
if err := util.TestFolderWritable(util.ResolvePath(*masterOptions.metaFolder)); err != nil {
|
||||
glog.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterOptions.metaFolder, err)
|
||||
log.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterOptions.metaFolder, err)
|
||||
}
|
||||
filerOptions.defaultLevelDbDirectory = masterOptions.metaFolder
|
||||
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
filer_pb "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/security"
|
||||
@ -110,14 +110,14 @@ func (sftpOpt *SftpOptions) startSftpServer() bool {
|
||||
}
|
||||
metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSec)
|
||||
filerGroup = resp.FilerGroup
|
||||
glog.V(0).Infof("SFTP read filer configuration, using filer at: %s", filerAddress)
|
||||
log.V(3).Infof("SFTP read filer configuration, using filer at: %s", filerAddress)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.V(0).Infof("Waiting to connect to filer %s grpc address %s...", *sftpOpt.filer, filerAddress.ToGrpcAddress())
|
||||
log.V(3).Infof("Waiting to connect to filer %s grpc address %s...", *sftpOpt.filer, filerAddress.ToGrpcAddress())
|
||||
time.Sleep(time.Second)
|
||||
} else {
|
||||
glog.V(0).Infof("Connected to filer %s grpc address %s", *sftpOpt.filer, filerAddress.ToGrpcAddress())
|
||||
log.V(3).Infof("Connected to filer %s grpc address %s", *sftpOpt.filer, filerAddress.ToGrpcAddress())
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -154,16 +154,16 @@ func (sftpOpt *SftpOptions) startSftpServer() bool {
|
||||
localSocket = fmt.Sprintf("/tmp/seaweedfs-sftp-%d.sock", *sftpOpt.port)
|
||||
}
|
||||
if err := os.Remove(localSocket); err != nil && !os.IsNotExist(err) {
|
||||
glog.Fatalf("Failed to remove %s, error: %s", localSocket, err.Error())
|
||||
log.Fatalf("Failed to remove %s, error: %s", localSocket, err.Error())
|
||||
}
|
||||
go func() {
|
||||
// start on local unix socket
|
||||
sftpSocketListener, err := net.Listen("unix", localSocket)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to listen on %s: %v", localSocket, err)
|
||||
log.Fatalf("Failed to listen on %s: %v", localSocket, err)
|
||||
}
|
||||
if err := service.Serve(sftpSocketListener); err != nil {
|
||||
glog.Fatalf("Failed to serve SFTP on socket %s: %v", localSocket, err)
|
||||
log.Fatalf("Failed to serve SFTP on socket %s: %v", localSocket, err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@ -172,21 +172,21 @@ func (sftpOpt *SftpOptions) startSftpServer() bool {
|
||||
listenAddress := fmt.Sprintf("%s:%d", *sftpOpt.bindIp, *sftpOpt.port)
|
||||
sftpListener, sftpLocalListener, err := util.NewIpAndLocalListeners(*sftpOpt.bindIp, *sftpOpt.port, time.Duration(10)*time.Second)
|
||||
if err != nil {
|
||||
glog.Fatalf("SFTP server listener on %s error: %v", listenAddress, err)
|
||||
log.Fatalf("SFTP server listener on %s error: %v", listenAddress, err)
|
||||
}
|
||||
|
||||
glog.V(0).Infof("Start Seaweed SFTP Server %s at %s", util.Version(), listenAddress)
|
||||
log.V(3).Infof("Start Seaweed SFTP Server %s at %s", util.Version(), listenAddress)
|
||||
|
||||
if sftpLocalListener != nil {
|
||||
go func() {
|
||||
if err := service.Serve(sftpLocalListener); err != nil {
|
||||
glog.Fatalf("SFTP Server failed to serve on local listener: %v", err)
|
||||
log.Fatalf("SFTP Server failed to serve on local listener: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if err := service.Serve(sftpListener); err != nil {
|
||||
glog.Fatalf("SFTP Server failed to serve: %v", err)
|
||||
log.Fatalf("SFTP Server failed to serve: %v", err)
|
||||
}
|
||||
|
||||
return true
|
||||
|
@ -18,7 +18,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
@ -86,7 +86,7 @@ func runUpdate(cmd *Command, args []string) bool {
|
||||
|
||||
if *updateOpt.dir != "" {
|
||||
if err := util.TestFolderWritable(util.ResolvePath(*updateOpt.dir)); err != nil {
|
||||
glog.Fatalf("Check Folder(-dir) Writable %s : %s", *updateOpt.dir, err)
|
||||
log.Fatalf("Check Folder(-dir) Writable %s : %s", *updateOpt.dir, err)
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
@ -101,16 +101,16 @@ func runUpdate(cmd *Command, args []string) bool {
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
if target == path {
|
||||
glog.Fatalf("On windows, name of the new weed shouldn't be same to the original name.")
|
||||
log.Fatalf("On windows, name of the new weed shouldn't be same to the original name.")
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(0).Infof("new weed will be saved to %s", target)
|
||||
log.V(3).Infof("new weed will be saved to %s", target)
|
||||
|
||||
_, err := downloadRelease(context.Background(), target, *updateOpt.Version)
|
||||
if err != nil {
|
||||
glog.Errorf("unable to download weed: %v", err)
|
||||
log.Errorf("unable to download weed: %v", err)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@ -125,14 +125,14 @@ func downloadRelease(ctx context.Context, target string, ver string) (version st
|
||||
|
||||
if rel.Version == currentVersion {
|
||||
if ver == "0" {
|
||||
glog.V(0).Infof("weed is up to date")
|
||||
log.V(3).Infof("weed is up to date")
|
||||
} else {
|
||||
glog.V(0).Infof("no need to download the same version of weed ")
|
||||
log.V(3).Infof("no need to download the same version of weed ")
|
||||
}
|
||||
return currentVersion, nil
|
||||
}
|
||||
|
||||
glog.V(0).Infof("download version: %s", rel.Version)
|
||||
log.V(3).Infof("download version: %s", rel.Version)
|
||||
|
||||
largeDiskSuffix := ""
|
||||
if util.VolumeSizeLimitGB == 8000 {
|
||||
@ -165,7 +165,7 @@ func downloadRelease(ctx context.Context, target string, ver string) (version st
|
||||
md5Ctx.Write(buf)
|
||||
binaryMd5 := md5Ctx.Sum(nil)
|
||||
if hex.EncodeToString(binaryMd5) != string(md5Val[0:32]) {
|
||||
glog.Errorf("md5:'%s' '%s'", hex.EncodeToString(binaryMd5), string(md5Val[0:32]))
|
||||
log.Errorf("md5:'%s' '%s'", hex.EncodeToString(binaryMd5), string(md5Val[0:32]))
|
||||
err = fmt.Errorf("binary md5sum doesn't match")
|
||||
return "", err
|
||||
}
|
||||
@ -174,7 +174,7 @@ func downloadRelease(ctx context.Context, target string, ver string) (version st
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else {
|
||||
glog.V(0).Infof("successfully updated weed to version %v\n", rel.Version)
|
||||
log.V(3).Infof("successfully updated weed to version %v\n", rel.Version)
|
||||
}
|
||||
|
||||
return rel.Version, nil
|
||||
@ -228,7 +228,7 @@ func GitHubLatestRelease(ctx context.Context, ver string, owner, repo string) (R
|
||||
}
|
||||
if ver == "0" {
|
||||
release = releaseList[0]
|
||||
glog.V(0).Infof("latest version is %v\n", release.TagName)
|
||||
log.V(3).Infof("latest version is %v\n", release.TagName)
|
||||
} else {
|
||||
for _, r := range releaseList {
|
||||
if r.TagName == ver {
|
||||
@ -287,7 +287,7 @@ func getGithubDataFile(ctx context.Context, assets []Asset, suffix string) (file
|
||||
return "", nil, fmt.Errorf("unable to find file with suffix %v", suffix)
|
||||
}
|
||||
|
||||
glog.V(0).Infof("download %v\n", filename)
|
||||
log.V(3).Infof("download %v\n", filename)
|
||||
data, err = getGithubData(ctx, url)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
@ -310,9 +310,9 @@ func extractToFile(buf []byte, filename, target string) error {
|
||||
hdr, terr := trd.Next()
|
||||
if terr != nil {
|
||||
if hdr != nil {
|
||||
glog.Errorf("uncompress file(%s) failed:%s", hdr.Name, terr)
|
||||
log.Errorf("uncompress file(%s) failed:%s", hdr.Name, terr)
|
||||
} else {
|
||||
glog.Errorf("uncompress file is nil, failed:%s", terr)
|
||||
log.Errorf("uncompress file is nil, failed:%s", terr)
|
||||
}
|
||||
|
||||
return terr
|
||||
@ -371,6 +371,6 @@ func extractToFile(buf []byte, filename, target string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(0).Infof("saved %d bytes in %v\n", n, target)
|
||||
log.V(3).Infof("saved %d bytes in %v\n", n, target)
|
||||
return os.Chmod(target, mode)
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
|
||||
"google.golang.org/grpc/reflection"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||
weed_server "github.com/seaweedfs/seaweedfs/weed/server"
|
||||
stats_collect "github.com/seaweedfs/seaweedfs/weed/stats"
|
||||
@ -156,7 +156,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
|
||||
v.folders = strings.Split(volumeFolders, ",")
|
||||
for _, folder := range v.folders {
|
||||
if err := util.TestFolderWritable(util.ResolvePath(folder)); err != nil {
|
||||
glog.Fatalf("Check Data Folder(-dir) Writable %s : %s", folder, err)
|
||||
log.Fatalf("Check Data Folder(-dir) Writable %s : %s", folder, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -166,7 +166,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
|
||||
if max, e := strconv.ParseInt(maxString, 10, 64); e == nil {
|
||||
v.folderMaxLimits = append(v.folderMaxLimits, int32(max))
|
||||
} else {
|
||||
glog.Fatalf("The max specified in -max not a valid number %s", maxString)
|
||||
log.Fatalf("The max specified in -max not a valid number %s", maxString)
|
||||
}
|
||||
}
|
||||
if len(v.folderMaxLimits) == 1 && len(v.folders) > 1 {
|
||||
@ -175,7 +175,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
|
||||
}
|
||||
}
|
||||
if len(v.folders) != len(v.folderMaxLimits) {
|
||||
glog.Fatalf("%d directories by -dir, but only %d max is set by -max", len(v.folders), len(v.folderMaxLimits))
|
||||
log.Fatalf("%d directories by -dir, but only %d max is set by -max", len(v.folders), len(v.folderMaxLimits))
|
||||
}
|
||||
|
||||
if len(minFreeSpaces) == 1 && len(v.folders) > 1 {
|
||||
@ -184,7 +184,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
|
||||
}
|
||||
}
|
||||
if len(v.folders) != len(minFreeSpaces) {
|
||||
glog.Fatalf("%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent", len(v.folders), len(minFreeSpaces))
|
||||
log.Fatalf("%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent", len(v.folders), len(minFreeSpaces))
|
||||
}
|
||||
|
||||
// set disk types
|
||||
@ -199,7 +199,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
|
||||
}
|
||||
}
|
||||
if len(v.folders) != len(diskTypes) {
|
||||
glog.Fatalf("%d directories by -dir, but only %d disk types is set by -disk", len(v.folders), len(diskTypes))
|
||||
log.Fatalf("%d directories by -dir, but only %d disk types is set by -disk", len(v.folders), len(diskTypes))
|
||||
}
|
||||
|
||||
// security related white list configuration
|
||||
@ -207,7 +207,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
|
||||
|
||||
if *v.ip == "" {
|
||||
*v.ip = util.DetectedHostAddress()
|
||||
glog.V(0).Infof("detected volume server ip address: %v", *v.ip)
|
||||
log.V(3).Infof("detected volume server ip address: %v", *v.ip)
|
||||
}
|
||||
if *v.bindIp == "" {
|
||||
*v.bindIp = *v.ip
|
||||
@ -272,7 +272,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
|
||||
if v.isSeparatedPublicPort() {
|
||||
publicHttpDown = v.startPublicHttpService(publicVolumeMux)
|
||||
if nil == publicHttpDown {
|
||||
glog.Fatalf("start public http service failed")
|
||||
log.Fatalf("start public http service failed")
|
||||
}
|
||||
}
|
||||
|
||||
@ -289,7 +289,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
|
||||
// Stop heartbeats
|
||||
if !volumeServer.StopHeartbeat() {
|
||||
volumeServer.SetStopping()
|
||||
glog.V(0).Infof("stop send heartbeat and wait %d seconds until shutdown ...", *v.preStopSeconds)
|
||||
log.V(3).Infof("stop send heartbeat and wait %d seconds until shutdown ...", *v.preStopSeconds)
|
||||
time.Sleep(time.Duration(*v.preStopSeconds) * time.Second)
|
||||
}
|
||||
|
||||
@ -307,18 +307,18 @@ func shutdown(publicHttpDown httpdown.Server, clusterHttpServer httpdown.Server,
|
||||
|
||||
// firstly, stop the public http service to prevent from receiving new user request
|
||||
if nil != publicHttpDown {
|
||||
glog.V(0).Infof("stop public http server ... ")
|
||||
log.V(3).Infof("stop public http server ... ")
|
||||
if err := publicHttpDown.Stop(); err != nil {
|
||||
glog.Warningf("stop the public http server failed, %v", err)
|
||||
log.Warningf("stop the public http server failed, %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(0).Infof("graceful stop cluster http server ... ")
|
||||
log.V(3).Infof("graceful stop cluster http server ... ")
|
||||
if err := clusterHttpServer.Stop(); err != nil {
|
||||
glog.Warningf("stop the cluster http server failed, %v", err)
|
||||
log.Warningf("stop the cluster http server failed, %v", err)
|
||||
}
|
||||
|
||||
glog.V(0).Infof("graceful stop gRPC ...")
|
||||
log.V(3).Infof("graceful stop gRPC ...")
|
||||
grpcS.GracefulStop()
|
||||
|
||||
volumeServer.Shutdown()
|
||||
@ -336,14 +336,14 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe
|
||||
grpcPort := *v.portGrpc
|
||||
grpcL, err := util.NewListener(util.JoinHostPort(*v.bindIp, grpcPort), 0)
|
||||
if err != nil {
|
||||
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
|
||||
log.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
|
||||
}
|
||||
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.volume"))
|
||||
volume_server_pb.RegisterVolumeServerServer(grpcS, vs)
|
||||
reflection.Register(grpcS)
|
||||
go func() {
|
||||
if err := grpcS.Serve(grpcL); err != nil {
|
||||
glog.Fatalf("start gRPC service failed, %s", err)
|
||||
log.Fatalf("start gRPC service failed, %s", err)
|
||||
}
|
||||
}()
|
||||
return grpcS
|
||||
@ -351,17 +351,17 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe
|
||||
|
||||
func (v VolumeServerOptions) startPublicHttpService(handler http.Handler) httpdown.Server {
|
||||
publicListeningAddress := util.JoinHostPort(*v.bindIp, *v.publicPort)
|
||||
glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "public at", publicListeningAddress)
|
||||
log.V(3).Infoln("Start Seaweed volume server", util.Version(), "public at", publicListeningAddress)
|
||||
publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
|
||||
if e != nil {
|
||||
glog.Fatalf("Volume server listener error:%v", e)
|
||||
log.Fatalf("Volume server listener error:%v", e)
|
||||
}
|
||||
|
||||
pubHttp := httpdown.HTTP{StopTimeout: 5 * time.Minute, KillTimeout: 5 * time.Minute}
|
||||
publicHttpDown := pubHttp.Serve(&http.Server{Handler: handler}, publicListener)
|
||||
go func() {
|
||||
if err := publicHttpDown.Wait(); err != nil {
|
||||
glog.Errorf("public http down wait failed, %v", err)
|
||||
log.Errorf("public http down wait failed, %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -378,10 +378,10 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd
|
||||
}
|
||||
|
||||
listeningAddress := util.JoinHostPort(*v.bindIp, *v.port)
|
||||
glog.V(0).Infof("Start Seaweed volume server %s at %s", util.Version(), listeningAddress)
|
||||
log.V(3).Infof("Start Seaweed volume server %s at %s", util.Version(), listeningAddress)
|
||||
listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
|
||||
if e != nil {
|
||||
glog.Fatalf("Volume server listener error:%v", e)
|
||||
log.Fatalf("Volume server listener error:%v", e)
|
||||
}
|
||||
|
||||
httpDown := httpdown.HTTP{
|
||||
@ -399,7 +399,7 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd
|
||||
clusterHttpServer := httpDown.Serve(httpS, listener)
|
||||
go func() {
|
||||
if e := clusterHttpServer.Wait(); e != nil {
|
||||
glog.Fatalf("Volume server fail to serve: %v", e)
|
||||
log.Fatalf("Volume server fail to serve: %v", e)
|
||||
}
|
||||
}()
|
||||
return clusterHttpServer
|
||||
|
@ -5,9 +5,9 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
)
|
||||
|
||||
func TestXYZ(t *testing.T) {
|
||||
glog.V(0).Infoln("Last-Modified", time.Unix(int64(1373273596), 0).UTC().Format(http.TimeFormat))
|
||||
log.V(3).Infoln("Last-Modified", time.Unix(int64(1373273596), 0).UTC().Format(http.TimeFormat))
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/security"
|
||||
@ -65,7 +65,7 @@ func runWebDav(cmd *Command, args []string) bool {
|
||||
util.LoadSecurityConfiguration()
|
||||
|
||||
listenAddress := fmt.Sprintf("%s:%d", *webDavStandaloneOptions.ipBind, *webDavStandaloneOptions.port)
|
||||
glog.V(0).Infof("Starting Seaweed WebDav Server %s at %s", util.Version(), listenAddress)
|
||||
log.V(3).Infof("Starting Seaweed WebDav Server %s at %s", util.Version(), listenAddress)
|
||||
|
||||
return webDavStandaloneOptions.startWebDav()
|
||||
|
||||
@ -101,10 +101,10 @@ func (wo *WebDavOption) startWebDav() bool {
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerAddress.ToGrpcAddress())
|
||||
log.V(3).Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerAddress.ToGrpcAddress())
|
||||
time.Sleep(time.Second)
|
||||
} else {
|
||||
glog.V(0).Infof("connected to filer %s grpc address %s", *wo.filer, filerAddress.ToGrpcAddress())
|
||||
log.V(3).Infof("connected to filer %s grpc address %s", *wo.filer, filerAddress.ToGrpcAddress())
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -124,7 +124,7 @@ func (wo *WebDavOption) startWebDav() bool {
|
||||
MaxMB: *wo.maxMB,
|
||||
})
|
||||
if webdavServer_err != nil {
|
||||
glog.Fatalf("WebDav Server startup error: %v", webdavServer_err)
|
||||
log.Fatalf("WebDav Server startup error: %v", webdavServer_err)
|
||||
}
|
||||
|
||||
httpS := &http.Server{Handler: ws.Handler}
|
||||
@ -132,18 +132,18 @@ func (wo *WebDavOption) startWebDav() bool {
|
||||
listenAddress := fmt.Sprintf("%s:%d", *wo.ipBind, *wo.port)
|
||||
webDavListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second)
|
||||
if err != nil {
|
||||
glog.Fatalf("WebDav Server listener on %s error: %v", listenAddress, err)
|
||||
log.Fatalf("WebDav Server listener on %s error: %v", listenAddress, err)
|
||||
}
|
||||
|
||||
if *wo.tlsPrivateKey != "" {
|
||||
glog.V(0).Infof("Start Seaweed WebDav Server %s at https %s", util.Version(), listenAddress)
|
||||
log.V(3).Infof("Start Seaweed WebDav Server %s at https %s", util.Version(), listenAddress)
|
||||
if err = httpS.ServeTLS(webDavListener, *wo.tlsCertificate, *wo.tlsPrivateKey); err != nil {
|
||||
glog.Fatalf("WebDav Server Fail to serve: %v", err)
|
||||
log.Fatalf("WebDav Server Fail to serve: %v", err)
|
||||
}
|
||||
} else {
|
||||
glog.V(0).Infof("Start Seaweed WebDav Server %s at http %s", util.Version(), listenAddress)
|
||||
log.V(3).Infof("Start Seaweed WebDav Server %s at http %s", util.Version(), listenAddress)
|
||||
if err = httpS.Serve(webDavListener); err != nil {
|
||||
glog.Fatalf("WebDav Server Fail to serve: %v", err)
|
||||
log.Fatalf("WebDav Server Fail to serve: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/s3api/s3bucket"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
@ -169,7 +169,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent
|
||||
if err != nil && strings.Contains(strings.ToLower(err.Error()), "duplicate entry") {
|
||||
// now the insert failed possibly due to duplication constraints
|
||||
sqlInsert = "falls back to update"
|
||||
glog.V(1).Infof("insert %s %s: %v", entry.FullPath, sqlInsert, err)
|
||||
log.V(2).Infof("insert %s %s: %v", entry.FullPath, sqlInsert, err)
|
||||
res, err = db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir)
|
||||
}
|
||||
if err != nil {
|
||||
@ -277,7 +277,7 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(4).Infof("delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)))
|
||||
log.V(-1).Infof("delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)))
|
||||
res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), string(shortPath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err)
|
||||
@ -312,7 +312,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context,
|
||||
var name string
|
||||
var data []byte
|
||||
if err = rows.Scan(&name, &data); err != nil {
|
||||
glog.V(0).Infof("scan %s : %v", dirPath, err)
|
||||
log.V(3).Infof("scan %s : %v", dirPath, err)
|
||||
return lastFileName, fmt.Errorf("scan %s: %v", dirPath, err)
|
||||
}
|
||||
lastFileName = name
|
||||
@ -321,7 +321,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context,
|
||||
FullPath: util.NewFullPath(string(dirPath), name),
|
||||
}
|
||||
if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil {
|
||||
glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err)
|
||||
log.V(3).Infof("scan decode %s : %v", entry.FullPath, err)
|
||||
return lastFileName, fmt.Errorf("scan decode %s : %v", entry.FullPath, err)
|
||||
}
|
||||
|
||||
|
@ -6,7 +6,7 @@ import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
"strings"
|
||||
)
|
||||
@ -31,7 +31,7 @@ func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []by
|
||||
}
|
||||
|
||||
// now the insert failed possibly due to duplication constraints
|
||||
glog.V(1).Infof("kv insert falls back to update: %s", err)
|
||||
log.V(2).Infof("kv insert falls back to update: %s", err)
|
||||
|
||||
res, err = db.ExecContext(ctx, store.GetSqlUpdate(DEFAULT_TABLE), value, dirHash, name, dirStr)
|
||||
if err != nil {
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
"github.com/arangodb/go-driver"
|
||||
"github.com/arangodb/go-driver/http"
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
@ -233,7 +233,7 @@ func (store *ArangodbStore) FindEntry(ctx context.Context, fullpath util.FullPat
|
||||
if driver.IsNotFound(err) {
|
||||
return nil, filer_pb.ErrNotFound
|
||||
}
|
||||
glog.Errorf("find %s: %v", fullpath, err)
|
||||
log.Errorf("find %s: %v", fullpath, err)
|
||||
return nil, filer_pb.ErrNotFound
|
||||
}
|
||||
if len(data.Meta) == 0 {
|
||||
@ -257,7 +257,7 @@ func (store *ArangodbStore) DeleteEntry(ctx context.Context, fullpath util.FullP
|
||||
}
|
||||
_, err = targetCollection.RemoveDocument(ctx, hashString(string(fullpath)))
|
||||
if err != nil && !driver.IsNotFound(err) {
|
||||
glog.Errorf("find %s: %v", fullpath, err)
|
||||
log.Errorf("find %s: %v", fullpath, err)
|
||||
return fmt.Errorf("delete %s : %v", fullpath, err)
|
||||
}
|
||||
return nil
|
||||
@ -331,7 +331,7 @@ sort d.name asc
|
||||
converted := arrayToBytes(data.Meta)
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(converted)); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
log.V(3).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
|
||||
|
@ -7,7 +7,7 @@ import (
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
)
|
||||
|
||||
var _ filer.BucketAware = (*ArangodbStore)(nil)
|
||||
@ -18,7 +18,7 @@ func (store *ArangodbStore) OnBucketCreation(bucket string) {
|
||||
// create the collection && add to cache
|
||||
_, err := store.ensureBucket(timeout, bucket)
|
||||
if err != nil {
|
||||
glog.Errorf("bucket create %s: %v", bucket, err)
|
||||
log.Errorf("bucket create %s: %v", bucket, err)
|
||||
}
|
||||
}
|
||||
func (store *ArangodbStore) OnBucketDeletion(bucket string) {
|
||||
@ -26,12 +26,12 @@ func (store *ArangodbStore) OnBucketDeletion(bucket string) {
|
||||
defer cancel()
|
||||
collection, err := store.ensureBucket(timeout, bucket)
|
||||
if err != nil {
|
||||
glog.Errorf("bucket delete %s: %v", bucket, err)
|
||||
log.Errorf("bucket delete %s: %v", bucket, err)
|
||||
return
|
||||
}
|
||||
err = collection.Remove(timeout)
|
||||
if err != nil && !driver.IsNotFound(err) {
|
||||
glog.Errorf("bucket delete %s: %v", bucket, err)
|
||||
log.Errorf("bucket delete %s: %v", bucket, err)
|
||||
return
|
||||
}
|
||||
store.mu.Lock()
|
||||
|
@ -6,7 +6,7 @@ import (
|
||||
|
||||
"github.com/arangodb/go-driver"
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
)
|
||||
|
||||
func (store *ArangodbStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
|
||||
@ -38,7 +38,7 @@ func (store *ArangodbStore) KvGet(ctx context.Context, key []byte) (value []byte
|
||||
return nil, filer.ErrKvNotFound
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("kv get: %s %v", string(key), err)
|
||||
log.Errorf("kv get: %s %v", string(key), err)
|
||||
return nil, filer.ErrKvNotFound
|
||||
}
|
||||
return arrayToBytes(model.Meta), nil
|
||||
@ -47,7 +47,7 @@ func (store *ArangodbStore) KvGet(ctx context.Context, key []byte) (value []byte
|
||||
func (store *ArangodbStore) KvDelete(ctx context.Context, key []byte) (err error) {
|
||||
_, err = store.kvCollection.RemoveDocument(ctx, hashString(".kvstore."+string(key)))
|
||||
if err != nil {
|
||||
glog.Errorf("kv del: %v", err)
|
||||
log.Errorf("kv del: %v", err)
|
||||
return filer.ErrKvNotFound
|
||||
}
|
||||
return nil
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
@ -51,7 +51,7 @@ func (store *CassandraStore) initialize(keyspace string, hosts []string, usernam
|
||||
}
|
||||
store.cluster.Keyspace = keyspace
|
||||
store.cluster.Timeout = time.Duration(timeout) * time.Millisecond
|
||||
glog.V(0).Infof("timeout = %d", timeout)
|
||||
log.V(3).Infof("timeout = %d", timeout)
|
||||
fallback := gocql.RoundRobinHostPolicy()
|
||||
if localDC != "" {
|
||||
fallback = gocql.DCAwareRoundRobinPolicy(localDC)
|
||||
@ -61,7 +61,7 @@ func (store *CassandraStore) initialize(keyspace string, hosts []string, usernam
|
||||
|
||||
store.session, err = store.cluster.CreateSession()
|
||||
if err != nil {
|
||||
glog.V(0).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace)
|
||||
log.V(3).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace)
|
||||
}
|
||||
|
||||
// set directory hash
|
||||
@ -72,7 +72,7 @@ func (store *CassandraStore) initialize(keyspace string, hosts []string, usernam
|
||||
dirHash := util.Md5String([]byte(dir))[:4]
|
||||
store.superLargeDirectoryHash[dir] = dirHash
|
||||
if existingDir, found := existingHash[dirHash]; found {
|
||||
glog.Fatalf("directory %s has the same hash as %s", dir, existingDir)
|
||||
log.Fatalf("directory %s has the same hash as %s", dir, existingDir)
|
||||
}
|
||||
existingHash[dirHash] = dir
|
||||
}
|
||||
@ -202,7 +202,7 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath u
|
||||
lastFileName = name
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
log.V(3).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
if !eachEntryFunc(entry) {
|
||||
@ -210,7 +210,7 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath u
|
||||
}
|
||||
}
|
||||
if err = iter.Close(); err != nil {
|
||||
glog.V(0).Infof("list iterator close: %v", err)
|
||||
log.V(3).Infof("list iterator close: %v", err)
|
||||
}
|
||||
|
||||
return lastFileName, err
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
@ -51,7 +51,7 @@ func (store *Cassandra2Store) initialize(keyspace string, hosts []string, userna
|
||||
}
|
||||
store.cluster.Keyspace = keyspace
|
||||
store.cluster.Timeout = time.Duration(timeout) * time.Millisecond
|
||||
glog.V(0).Infof("timeout = %d", timeout)
|
||||
log.V(3).Infof("timeout = %d", timeout)
|
||||
fallback := gocql.RoundRobinHostPolicy()
|
||||
if localDC != "" {
|
||||
fallback = gocql.DCAwareRoundRobinPolicy(localDC)
|
||||
@ -61,7 +61,7 @@ func (store *Cassandra2Store) initialize(keyspace string, hosts []string, userna
|
||||
|
||||
store.session, err = store.cluster.CreateSession()
|
||||
if err != nil {
|
||||
glog.V(0).Infof("Failed to open cassandra2 store, hosts %v, keyspace %s", hosts, keyspace)
|
||||
log.V(3).Infof("Failed to open cassandra2 store, hosts %v, keyspace %s", hosts, keyspace)
|
||||
}
|
||||
|
||||
// set directory hash
|
||||
@ -72,7 +72,7 @@ func (store *Cassandra2Store) initialize(keyspace string, hosts []string, userna
|
||||
dirHash := util.Md5String([]byte(dir))[:4]
|
||||
store.superLargeDirectoryHash[dir] = dirHash
|
||||
if existingDir, found := existingHash[dirHash]; found {
|
||||
glog.Fatalf("directory %s has the same hash as %s", dir, existingDir)
|
||||
log.Fatalf("directory %s has the same hash as %s", dir, existingDir)
|
||||
}
|
||||
existingHash[dirHash] = dir
|
||||
}
|
||||
@ -202,7 +202,7 @@ func (store *Cassandra2Store) ListDirectoryEntries(ctx context.Context, dirPath
|
||||
lastFileName = name
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
log.V(3).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
if !eachEntryFunc(entry) {
|
||||
@ -210,7 +210,7 @@ func (store *Cassandra2Store) ListDirectoryEntries(ctx context.Context, dirPath
|
||||
}
|
||||
}
|
||||
if err = iter.Close(); err != nil {
|
||||
glog.V(0).Infof("list iterator close: %v", err)
|
||||
log.V(3).Infof("list iterator close: %v", err)
|
||||
}
|
||||
|
||||
return lastFileName, err
|
||||
|
@ -1,7 +1,7 @@
|
||||
package filer
|
||||
|
||||
import (
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
"os"
|
||||
"reflect"
|
||||
@ -22,10 +22,10 @@ func (f *Filer) LoadConfiguration(config *util.ViperProxy) (isFresh bool) {
|
||||
if config.GetBool(store.GetName() + ".enabled") {
|
||||
store = reflect.New(reflect.ValueOf(store).Elem().Type()).Interface().(FilerStore)
|
||||
if err := store.Initialize(config, store.GetName()+"."); err != nil {
|
||||
glog.Fatalf("failed to initialize store for %s: %+v", store.GetName(), err)
|
||||
log.Fatalf("failed to initialize store for %s: %+v", store.GetName(), err)
|
||||
}
|
||||
isFresh = f.SetStore(store)
|
||||
glog.V(0).Infof("configured filer store to %s", store.GetName())
|
||||
log.V(3).Infof("configured filer store to %s", store.GetName())
|
||||
hasDefaultStoreConfigured = true
|
||||
break
|
||||
}
|
||||
@ -70,16 +70,16 @@ func (f *Filer) LoadConfiguration(config *util.ViperProxy) (isFresh bool) {
|
||||
|
||||
store = reflect.New(reflect.ValueOf(store).Elem().Type()).Interface().(FilerStore)
|
||||
if err := store.Initialize(config, key+"."); err != nil {
|
||||
glog.Fatalf("Failed to initialize store for %s: %+v", key, err)
|
||||
log.Fatalf("Failed to initialize store for %s: %+v", key, err)
|
||||
}
|
||||
location := config.GetString(key + ".location")
|
||||
if location == "" {
|
||||
glog.Errorf("path-specific filer store needs %s", key+".location")
|
||||
log.Errorf("path-specific filer store needs %s", key+".location")
|
||||
os.Exit(-1)
|
||||
}
|
||||
f.Store.AddPathSpecificStore(location, storeId, store)
|
||||
|
||||
glog.V(0).Infof("configure filer %s for %s", store.GetName(), location)
|
||||
log.V(3).Infof("configure filer %s for %s", store.GetName(), location)
|
||||
}
|
||||
|
||||
return
|
||||
@ -92,7 +92,7 @@ func validateOneEnabledStore(config *util.ViperProxy) {
|
||||
if enabledStore == "" {
|
||||
enabledStore = store.GetName()
|
||||
} else {
|
||||
glog.Fatalf("Filer store is enabled for both %s and %s", enabledStore, store.GetName())
|
||||
log.Fatalf("Filer store is enabled for both %s and %s", enabledStore, store.GetName())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
elastic "github.com/olivere/elastic/v7"
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
weed_util "github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
@ -70,7 +70,7 @@ func (store *ElasticStore) Initialize(configuration weed_util.Configuration, pre
|
||||
if store.maxPageSize <= 0 {
|
||||
store.maxPageSize = 10000
|
||||
}
|
||||
glog.Infof("filer store elastic endpoints: %v.", servers)
|
||||
log.Infof("filer store elastic endpoints: %v.", servers)
|
||||
return store.initialize(options)
|
||||
}
|
||||
|
||||
@ -113,7 +113,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry)
|
||||
}
|
||||
value, err := jsoniter.Marshal(esEntry)
|
||||
if err != nil {
|
||||
glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
|
||||
log.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
|
||||
return fmt.Errorf("insert entry marshal %v", err)
|
||||
}
|
||||
_, err = store.client.Index().
|
||||
@ -123,7 +123,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry)
|
||||
BodyJson(string(value)).
|
||||
Do(ctx)
|
||||
if err != nil {
|
||||
glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
|
||||
log.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
|
||||
return fmt.Errorf("insert entry %v", err)
|
||||
}
|
||||
return nil
|
||||
@ -152,7 +152,7 @@ func (store *ElasticStore) FindEntry(ctx context.Context, fullpath weed_util.Ful
|
||||
err := jsoniter.Unmarshal(searchResult.Source, esEntry)
|
||||
return esEntry.Entry, err
|
||||
}
|
||||
glog.Errorf("find entry(%s),%v.", string(fullpath), err)
|
||||
log.Errorf("find entry(%s),%v.", string(fullpath), err)
|
||||
return nil, filer_pb.ErrNotFound
|
||||
}
|
||||
|
||||
@ -178,7 +178,7 @@ func (store *ElasticStore) deleteIndex(ctx context.Context, index string) (err e
|
||||
if elastic.IsNotFound(err) || (err == nil && deleteResult.Acknowledged) {
|
||||
return nil
|
||||
}
|
||||
glog.Errorf("delete index(%s) %v.", index, err)
|
||||
log.Errorf("delete index(%s) %v.", index, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -193,14 +193,14 @@ func (store *ElasticStore) deleteEntry(ctx context.Context, index, id string) (e
|
||||
return nil
|
||||
}
|
||||
}
|
||||
glog.Errorf("delete entry(index:%s,_id:%s) %v.", index, id, err)
|
||||
log.Errorf("delete entry(index:%s,_id:%s) %v.", index, id, err)
|
||||
return fmt.Errorf("delete entry %v", err)
|
||||
}
|
||||
|
||||
func (store *ElasticStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
|
||||
_, err = store.ListDirectoryEntries(ctx, fullpath, "", false, math.MaxInt32, func(entry *filer.Entry) bool {
|
||||
if err := store.DeleteEntry(ctx, entry.FullPath); err != nil {
|
||||
glog.Errorf("elastic delete %s: %v.", entry.FullPath, err)
|
||||
log.Errorf("elastic delete %s: %v.", entry.FullPath, err)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@ -228,7 +228,7 @@ func (store *ElasticStore) listDirectoryEntries(
|
||||
result := &elastic.SearchResult{}
|
||||
if (startFileName == "" && first) || inclusive {
|
||||
if result, err = store.search(ctx, index, parentId); err != nil {
|
||||
glog.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
|
||||
log.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
@ -238,7 +238,7 @@ func (store *ElasticStore) listDirectoryEntries(
|
||||
}
|
||||
after := weed_util.Md5String([]byte(fullPath))
|
||||
if result, err = store.searchAfter(ctx, index, parentId, after); err != nil {
|
||||
glog.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
|
||||
log.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
elastic "github.com/olivere/elastic/v7"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
)
|
||||
|
||||
func (store *ElasticStore) KvDelete(ctx context.Context, key []byte) (err error) {
|
||||
@ -25,7 +25,7 @@ func (store *ElasticStore) KvDelete(ctx context.Context, key []byte) (err error)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
glog.Errorf("delete key(id:%s) %v.", string(key), err)
|
||||
log.Errorf("delete key(id:%s) %v.", string(key), err)
|
||||
return fmt.Errorf("delete key %v", err)
|
||||
}
|
||||
|
||||
@ -44,7 +44,7 @@ func (store *ElasticStore) KvGet(ctx context.Context, key []byte) (value []byte,
|
||||
return esEntry.Value, nil
|
||||
}
|
||||
}
|
||||
glog.Errorf("find key(%s),%v.", string(key), err)
|
||||
log.Errorf("find key(%s),%v.", string(key), err)
|
||||
return value, filer.ErrKvNotFound
|
||||
}
|
||||
|
||||
@ -52,7 +52,7 @@ func (store *ElasticStore) KvPut(ctx context.Context, key []byte, value []byte)
|
||||
esEntry := &ESKVEntry{value}
|
||||
val, err := jsoniter.Marshal(esEntry)
|
||||
if err != nil {
|
||||
glog.Errorf("insert key(%s) %v.", string(key), err)
|
||||
log.Errorf("insert key(%s) %v.", string(key), err)
|
||||
return fmt.Errorf("insert key %v", err)
|
||||
}
|
||||
_, err = store.client.Index().
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
weed_util "github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
@ -73,7 +73,7 @@ func (store *EtcdStore) Initialize(configuration weed_util.Configuration, prefix
|
||||
}
|
||||
|
||||
func (store *EtcdStore) initialize(servers, username, password string, timeout time.Duration, tlsConfig *tls.Config) error {
|
||||
glog.Infof("filer store etcd: %s", servers)
|
||||
log.Infof("filer store etcd: %s", servers)
|
||||
|
||||
client, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: strings.Split(servers, ","),
|
||||
@ -95,7 +95,7 @@ func (store *EtcdStore) initialize(servers, username, password string, timeout t
|
||||
return fmt.Errorf("error checking etcd connection: %s", err)
|
||||
}
|
||||
|
||||
glog.V(0).Infof("сonnection to etcd has been successfully verified. etcd version: %s", resp.Version)
|
||||
log.V(3).Infof("сonnection to etcd has been successfully verified. etcd version: %s", resp.Version)
|
||||
store.client = client
|
||||
|
||||
return nil
|
||||
@ -208,7 +208,7 @@ func (store *EtcdStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPat
|
||||
}
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(kv.Value)); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
log.V(3).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
if !eachEntryFunc(entry) {
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
|
||||
@ -105,7 +105,7 @@ func ResolveOneChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, c
|
||||
func fetchWholeChunk(bytesBuffer *bytes.Buffer, lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) error {
|
||||
urlStrings, err := lookupFileIdFn(fileId)
|
||||
if err != nil {
|
||||
glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
|
||||
log.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
|
||||
return err
|
||||
}
|
||||
err = retriedStreamFetchChunkData(bytesBuffer, urlStrings, "", cipherKey, isGzipped, true, 0, 0)
|
||||
@ -118,7 +118,7 @@ func fetchWholeChunk(bytesBuffer *bytes.Buffer, lookupFileIdFn wdclient.LookupFi
|
||||
func fetchChunkRange(buffer []byte, lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool, offset int64) (int, error) {
|
||||
urlStrings, err := lookupFileIdFn(fileId)
|
||||
if err != nil {
|
||||
glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
|
||||
log.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
|
||||
return 0, err
|
||||
}
|
||||
return util_http.RetriedFetchChunkData(buffer, urlStrings, cipherKey, isGzipped, false, offset)
|
||||
@ -158,7 +158,7 @@ func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, jwt stri
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
glog.V(0).Infof("read %s failed, err: %v", urlString, err)
|
||||
log.V(3).Infof("read %s failed, err: %v", urlString, err)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
@ -168,7 +168,7 @@ func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, jwt stri
|
||||
break
|
||||
}
|
||||
if err != nil && shouldRetry {
|
||||
glog.V(0).Infof("retry reading in %v", waitTime)
|
||||
log.V(3).Infof("retry reading in %v", waitTime)
|
||||
time.Sleep(waitTime)
|
||||
} else {
|
||||
break
|
||||
|
@ -6,7 +6,7 @@ import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
)
|
||||
|
||||
@ -80,6 +80,6 @@ func printChunks(name string, chunks []*filer_pb.FileChunk) {
|
||||
return int(a.Offset - b.Offset)
|
||||
})
|
||||
for _, chunk := range chunks {
|
||||
glog.V(0).Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
|
||||
log.V(3).Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
|
||||
}
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ import (
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
|
||||
@ -92,7 +92,7 @@ func (f *Filer) MaybeBootstrapFromOnePeer(self pb.ServerAddress, existingNodes [
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(0).Infof("bootstrap from %v clientId:%d", earliestNode.Address, f.UniqueFilerId)
|
||||
log.V(3).Infof("bootstrap from %v clientId:%d", earliestNode.Address, f.UniqueFilerId)
|
||||
|
||||
return pb.WithFilerClient(false, f.UniqueFilerId, pb.ServerAddress(earliestNode.Address), f.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
return filer_pb.StreamBfs(client, "/", snapshotTime.UnixNano(), func(parentPath util.FullPath, entry *filer_pb.Entry) error {
|
||||
@ -110,7 +110,7 @@ func (f *Filer) AggregateFromPeers(self pb.ServerAddress, existingNodes []*maste
|
||||
snapshot = append(snapshot, address)
|
||||
}
|
||||
f.Dlm.LockRing.SetSnapshot(snapshot)
|
||||
glog.V(0).Infof("%s aggregate from peers %+v", self, snapshot)
|
||||
log.V(3).Infof("%s aggregate from peers %+v", self, snapshot)
|
||||
|
||||
f.MetaAggregator = NewMetaAggregator(f, self, f.GrpcDialOption)
|
||||
f.MasterClient.SetOnPeerUpdateFn(func(update *master_pb.ClusterNodeUpdate, startFrom time.Time) {
|
||||
@ -150,15 +150,15 @@ func (f *Filer) setOrLoadFilerStoreSignature(store FilerStore) (isFresh bool) {
|
||||
storeIdBytes = make([]byte, 4)
|
||||
util.Uint32toBytes(storeIdBytes, uint32(f.Signature))
|
||||
if err = store.KvPut(context.Background(), []byte(FilerStoreId), storeIdBytes); err != nil {
|
||||
glog.Fatalf("set %s=%d : %v", FilerStoreId, f.Signature, err)
|
||||
log.Fatalf("set %s=%d : %v", FilerStoreId, f.Signature, err)
|
||||
}
|
||||
glog.V(0).Infof("create %s to %d", FilerStoreId, f.Signature)
|
||||
log.V(3).Infof("create %s to %d", FilerStoreId, f.Signature)
|
||||
return true
|
||||
} else if err == nil && len(storeIdBytes) == 4 {
|
||||
f.Signature = int32(util.BytesToUint32(storeIdBytes))
|
||||
glog.V(0).Infof("existing %s = %d", FilerStoreId, f.Signature)
|
||||
log.V(3).Infof("existing %s = %d", FilerStoreId, f.Signature)
|
||||
} else {
|
||||
glog.Fatalf("read %v=%v : %v", FilerStoreId, string(storeIdBytes), err)
|
||||
log.Fatalf("read %v=%v : %v", FilerStoreId, string(storeIdBytes), err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
@ -201,7 +201,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
|
||||
|
||||
/*
|
||||
if !hasWritePermission(lastDirectoryEntry, entry) {
|
||||
glog.V(0).Infof("directory %s: %v, entry: uid=%d gid=%d",
|
||||
log.V(3).Infof("directory %s: %v, entry: uid=%d gid=%d",
|
||||
lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid)
|
||||
return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath)
|
||||
}
|
||||
@ -216,19 +216,19 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(4).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
|
||||
log.V(-1).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
|
||||
if err := f.Store.InsertEntry(ctx, entry); err != nil {
|
||||
glog.Errorf("insert entry %s: %v", entry.FullPath, err)
|
||||
log.Errorf("insert entry %s: %v", entry.FullPath, err)
|
||||
return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
|
||||
}
|
||||
} else {
|
||||
if o_excl {
|
||||
glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath)
|
||||
log.V(0).Infof("EEXIST: entry %s already exists", entry.FullPath)
|
||||
return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath)
|
||||
}
|
||||
glog.V(4).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
|
||||
log.V(-1).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
|
||||
if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil {
|
||||
glog.Errorf("update entry %s: %v", entry.FullPath, err)
|
||||
log.Errorf("update entry %s: %v", entry.FullPath, err)
|
||||
return fmt.Errorf("update entry %s: %v", entry.FullPath, err)
|
||||
}
|
||||
}
|
||||
@ -237,7 +237,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
|
||||
|
||||
f.deleteChunksIfNotNew(oldEntry, entry)
|
||||
|
||||
glog.V(4).Infof("CreateEntry %s: created", entry.FullPath)
|
||||
log.V(-1).Infof("CreateEntry %s: created", entry.FullPath)
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -252,7 +252,7 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di
|
||||
// fmt.Printf("%d dirPath: %+v\n", level, dirPath)
|
||||
|
||||
// check the store directly
|
||||
glog.V(4).Infof("find uncached directory: %s", dirPath)
|
||||
log.V(-1).Infof("find uncached directory: %s", dirPath)
|
||||
dirEntry, _ := f.FindEntry(ctx, util.FullPath(dirPath))
|
||||
|
||||
// no such existing directory
|
||||
@ -287,11 +287,11 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di
|
||||
},
|
||||
}
|
||||
|
||||
glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
|
||||
log.V(1).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
|
||||
mkdirErr := f.Store.InsertEntry(ctx, dirEntry)
|
||||
if mkdirErr != nil {
|
||||
if fEntry, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound || fEntry == nil {
|
||||
glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr)
|
||||
log.V(0).Infof("mkdir %s: %v", dirPath, mkdirErr)
|
||||
return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
|
||||
}
|
||||
} else {
|
||||
@ -301,7 +301,7 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di
|
||||
}
|
||||
|
||||
} else if !dirEntry.IsDirectory() {
|
||||
glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
|
||||
log.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
|
||||
return fmt.Errorf("%s is a file", dirPath)
|
||||
}
|
||||
|
||||
@ -312,11 +312,11 @@ func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err er
|
||||
if oldEntry != nil {
|
||||
entry.Attr.Crtime = oldEntry.Attr.Crtime
|
||||
if oldEntry.IsDirectory() && !entry.IsDirectory() {
|
||||
glog.Errorf("existing %s is a directory", oldEntry.FullPath)
|
||||
log.Errorf("existing %s is a directory", oldEntry.FullPath)
|
||||
return fmt.Errorf("existing %s is a directory", oldEntry.FullPath)
|
||||
}
|
||||
if !oldEntry.IsDirectory() && entry.IsDirectory() {
|
||||
glog.Errorf("existing %s is a file", oldEntry.FullPath)
|
||||
log.Errorf("existing %s is a file", oldEntry.FullPath)
|
||||
return fmt.Errorf("existing %s is a file", oldEntry.FullPath)
|
||||
}
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
"github.com/seaweedfs/seaweedfs/weed/wdclient"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
"github.com/viant/ptrie"
|
||||
@ -68,7 +68,7 @@ func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) {
|
||||
if err == filer_pb.ErrNotFound {
|
||||
return nil
|
||||
}
|
||||
glog.Errorf("read filer conf entry %s: %v", filerConfPath, err)
|
||||
log.Errorf("read filer conf entry %s: %v", filerConfPath, err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -83,7 +83,7 @@ func (fc *FilerConf) loadFromChunks(filer *Filer, content []byte, chunks []*file
|
||||
if len(content) == 0 {
|
||||
content, err = filer.readEntry(chunks, size)
|
||||
if err != nil {
|
||||
glog.Errorf("read filer conf content: %v", err)
|
||||
log.Errorf("read filer conf content: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -119,7 +119,7 @@ func (fc *FilerConf) GetLocationConf(locationPrefix string) (locConf *filer_pb.F
|
||||
func (fc *FilerConf) SetLocationConf(locConf *filer_pb.FilerConf_PathConf) (err error) {
|
||||
err = fc.rules.Put([]byte(locConf.LocationPrefix), locConf)
|
||||
if err != nil {
|
||||
glog.Errorf("put location prefix: %v", err)
|
||||
log.Errorf("put location prefix: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -132,7 +132,7 @@ func (fc *FilerConf) AddLocationConf(locConf *filer_pb.FilerConf_PathConf) (err
|
||||
}
|
||||
err = fc.rules.Put([]byte(locConf.LocationPrefix), locConf)
|
||||
if err != nil {
|
||||
glog.Errorf("put location prefix: %v", err)
|
||||
log.Errorf("put location prefix: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
@ -41,7 +41,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.V(2).Infof("delete directory %s: %v", p, err)
|
||||
log.V(1).Infof("delete directory %s: %v", p, err)
|
||||
return fmt.Errorf("delete directory %s: %v", p, err)
|
||||
}
|
||||
}
|
||||
@ -74,12 +74,12 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
|
||||
for {
|
||||
entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "", "")
|
||||
if err != nil {
|
||||
glog.Errorf("list folder %s: %v", entry.FullPath, err)
|
||||
log.Errorf("list folder %s: %v", entry.FullPath, err)
|
||||
return fmt.Errorf("list folder %s: %v", entry.FullPath, err)
|
||||
}
|
||||
if lastFileName == "" && !isRecursive && len(entries) > 0 {
|
||||
// only for first iteration in the loop
|
||||
glog.V(2).Infof("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
|
||||
log.V(1).Infof("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
|
||||
return fmt.Errorf("%s: %s", MsgFailDelNonEmptyFolder, entry.FullPath)
|
||||
}
|
||||
|
||||
@ -110,7 +110,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(3).Infof("deleting directory %v delete chunks: %v", entry.FullPath, shouldDeleteChunks)
|
||||
log.V(0).Infof("deleting directory %v delete chunks: %v", entry.FullPath, shouldDeleteChunks)
|
||||
|
||||
if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil {
|
||||
return fmt.Errorf("filer store delete: %v", storeDeletionErr)
|
||||
@ -124,7 +124,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
|
||||
|
||||
func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool, signatures []int32) (err error) {
|
||||
|
||||
glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
|
||||
log.V(0).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
|
||||
|
||||
if storeDeletionErr := f.Store.DeleteOneEntry(ctx, entry); storeDeletionErr != nil {
|
||||
return fmt.Errorf("filer store delete: %v", storeDeletionErr)
|
||||
@ -143,7 +143,7 @@ func (f *Filer) DoDeleteCollection(collectionName string) (err error) {
|
||||
Name: collectionName,
|
||||
})
|
||||
if err != nil {
|
||||
glog.Infof("delete collection %s: %v", collectionName, err)
|
||||
log.Infof("delete collection %s: %v", collectionName, err)
|
||||
}
|
||||
return err
|
||||
})
|
||||
@ -153,7 +153,7 @@ func (f *Filer) DoDeleteCollection(collectionName string) (err error) {
|
||||
func (f *Filer) maybeDeleteHardLinks(hardLinkIds []HardLinkId) {
|
||||
for _, hardLinkId := range hardLinkIds {
|
||||
if err := f.Store.DeleteHardLink(context.Background(), hardLinkId); err != nil {
|
||||
glog.Errorf("delete hard link id %d : %v", hardLinkId, err)
|
||||
log.Errorf("delete hard link id %d : %v", hardLinkId, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -7,7 +7,7 @@ import (
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/operation"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/wdclient"
|
||||
@ -58,10 +58,10 @@ func (f *Filer) loopProcessingDeletion() {
|
||||
_, err := operation.DeleteFileIdsWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc)
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), storage.ErrorDeleted.Error()) {
|
||||
glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err)
|
||||
log.V(3).Infof("deleting fileIds len=%d error: %v", deletionCount, err)
|
||||
}
|
||||
} else {
|
||||
glog.V(2).Infof("deleting fileIds %+v", toDeleteFileIds)
|
||||
log.V(1).Infof("deleting fileIds %+v", toDeleteFileIds)
|
||||
}
|
||||
}
|
||||
})
|
||||
@ -92,7 +92,7 @@ func (f *Filer) doDeleteChunks(chunks []*filer_pb.FileChunk) {
|
||||
}
|
||||
dataChunks, manifestResolveErr := ResolveOneChunkManifest(f.MasterClient.LookupFileId, chunk)
|
||||
if manifestResolveErr != nil {
|
||||
glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
|
||||
log.V(3).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
|
||||
}
|
||||
for _, dChunk := range dataChunks {
|
||||
f.fileIdDeletionQueue.EnQueue(dChunk.GetFileIdString())
|
||||
@ -118,7 +118,7 @@ func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) {
|
||||
|
||||
toDelete, err := MinusChunks(f.MasterClient.GetLookupFileIdFunction(), oldChunks, newChunks)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s", newChunks, oldChunks)
|
||||
log.Errorf("Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s", newChunks, oldChunks)
|
||||
return
|
||||
}
|
||||
f.DeleteChunksNotRecursive(toDelete)
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/notification"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
@ -56,10 +56,10 @@ func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry
|
||||
}
|
||||
|
||||
if notification.Queue != nil {
|
||||
glog.V(3).Infof("notifying entry update %v", fullpath)
|
||||
log.V(0).Infof("notifying entry update %v", fullpath)
|
||||
if err := notification.Queue.SendMessage(fullpath, eventNotification); err != nil {
|
||||
// throw message
|
||||
glog.Error(err)
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -78,7 +78,7 @@ func (f *Filer) logMetaEvent(ctx context.Context, fullpath string, eventNotifica
|
||||
}
|
||||
data, err := proto.Marshal(event)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
|
||||
log.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -101,7 +101,7 @@ func (f *Filer) logFlushFunc(logBuffer *log_buffer.LogBuffer, startTime, stopTim
|
||||
|
||||
for {
|
||||
if err := f.appendToFile(targetFile, buf); err != nil {
|
||||
glog.V(0).Infof("metadata log write failed %s: %v", targetFile, err)
|
||||
log.V(3).Infof("metadata log write failed %s: %v", targetFile, err)
|
||||
time.Sleep(737 * time.Millisecond)
|
||||
} else {
|
||||
break
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
"github.com/seaweedfs/seaweedfs/weed/wdclient"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
@ -217,7 +217,7 @@ func (c *LogFileEntryCollector) collectMore(v *OrderedLogVisitor) (err error) {
|
||||
// println(" enqueue", tsMinute)
|
||||
t, parseErr := time.Parse("2006-01-02-15-04", tsMinute)
|
||||
if parseErr != nil {
|
||||
glog.Errorf("failed to parse %s: %v", tsMinute, parseErr)
|
||||
log.Errorf("failed to parse %s: %v", tsMinute, parseErr)
|
||||
continue
|
||||
}
|
||||
filerId := getFilerId(hourMinuteEntry.Name())
|
||||
@ -237,7 +237,7 @@ func (c *LogFileEntryCollector) collectMore(v *OrderedLogVisitor) (err error) {
|
||||
for filerId, entryName := range freshFilerIds {
|
||||
iter, found := v.perFilerIteratorMap[filerId]
|
||||
if !found {
|
||||
glog.Errorf("Unexpected! failed to find iterator for filer %s", filerId)
|
||||
log.Errorf("Unexpected! failed to find iterator for filer %s", filerId)
|
||||
continue
|
||||
}
|
||||
next, nextErr := iter.getNext(v)
|
||||
|
@ -2,7 +2,7 @@ package filer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
@ -43,7 +43,7 @@ func (f *Filer) maybeReloadFilerConfiguration(event *filer_pb.SubscribeMetadataR
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(0).Infof("procesing %v", event)
|
||||
log.V(3).Infof("procesing %v", event)
|
||||
if entry.Name == FilerConfName {
|
||||
f.reloadFilerConfiguration(entry)
|
||||
}
|
||||
@ -62,7 +62,7 @@ func (f *Filer) reloadFilerConfiguration(entry *filer_pb.Entry) {
|
||||
fc := NewFilerConf()
|
||||
err := fc.loadFromChunks(f, entry.Content, entry.GetChunks(), FileSize(entry))
|
||||
if err != nil {
|
||||
glog.Errorf("read filer conf chunks: %v", err)
|
||||
log.Errorf("read filer conf chunks: %v", err)
|
||||
return
|
||||
}
|
||||
f.FilerConf = fc
|
||||
@ -74,7 +74,7 @@ func (f *Filer) LoadFilerConf() {
|
||||
return fc.loadFromFiler(f)
|
||||
})
|
||||
if err != nil {
|
||||
glog.Errorf("read filer conf: %v", err)
|
||||
log.Errorf("read filer conf: %v", err)
|
||||
return
|
||||
}
|
||||
f.FilerConf = fc
|
||||
@ -85,7 +85,7 @@ func (f *Filer) LoadFilerConf() {
|
||||
// //////////////////////////////////
|
||||
func (f *Filer) LoadRemoteStorageConfAndMapping() {
|
||||
if err := f.RemoteStorage.LoadRemoteStorageConfigurationsAndMapping(f); err != nil {
|
||||
glog.Errorf("read remote conf and mapping: %v", err)
|
||||
log.Errorf("read remote conf and mapping: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
)
|
||||
|
||||
@ -22,7 +22,7 @@ func (fsw *FilerStoreWrapper) handleUpdateToHardLinks(ctx context.Context, entry
|
||||
}
|
||||
|
||||
// check what is existing entry
|
||||
// glog.V(4).Infof("handleUpdateToHardLinks FindEntry %s", entry.FullPath)
|
||||
// log.V(-1).Infof("handleUpdateToHardLinks FindEntry %s", entry.FullPath)
|
||||
actualStore := fsw.getActualStore(entry.FullPath)
|
||||
existingEntry, err := actualStore.FindEntry(ctx, entry.FullPath)
|
||||
if err != nil && err != filer_pb.ErrNotFound {
|
||||
@ -31,7 +31,7 @@ func (fsw *FilerStoreWrapper) handleUpdateToHardLinks(ctx context.Context, entry
|
||||
|
||||
// remove old hard link
|
||||
if err == nil && len(existingEntry.HardLinkId) != 0 && bytes.Compare(existingEntry.HardLinkId, entry.HardLinkId) != 0 {
|
||||
glog.V(4).Infof("handleUpdateToHardLinks DeleteHardLink %s", entry.FullPath)
|
||||
log.V(-1).Infof("handleUpdateToHardLinks DeleteHardLink %s", entry.FullPath)
|
||||
if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -50,7 +50,7 @@ func (fsw *FilerStoreWrapper) setHardLink(ctx context.Context, entry *Entry) err
|
||||
return encodeErr
|
||||
}
|
||||
|
||||
glog.V(4).Infof("setHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter)
|
||||
log.V(-1).Infof("setHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter)
|
||||
|
||||
return fsw.KvPut(ctx, key, newBlob)
|
||||
}
|
||||
@ -63,16 +63,16 @@ func (fsw *FilerStoreWrapper) maybeReadHardLink(ctx context.Context, entry *Entr
|
||||
|
||||
value, err := fsw.KvGet(ctx, key)
|
||||
if err != nil {
|
||||
glog.Errorf("read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
|
||||
log.Errorf("read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = entry.DecodeAttributesAndChunks(value); err != nil {
|
||||
glog.Errorf("decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
|
||||
log.Errorf("decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("maybeReadHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter)
|
||||
log.V(-1).Infof("maybeReadHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter)
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -94,7 +94,7 @@ func (fsw *FilerStoreWrapper) DeleteHardLink(ctx context.Context, hardLinkId Har
|
||||
|
||||
entry.HardLinkCounter--
|
||||
if entry.HardLinkCounter <= 0 {
|
||||
glog.V(4).Infof("DeleteHardLink KvDelete %v", key)
|
||||
log.V(-1).Infof("DeleteHardLink KvDelete %v", key)
|
||||
return fsw.KvDelete(ctx, key)
|
||||
}
|
||||
|
||||
@ -103,7 +103,7 @@ func (fsw *FilerStoreWrapper) DeleteHardLink(ctx context.Context, hardLinkId Har
|
||||
return encodeErr
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DeleteHardLink KvPut %v", key)
|
||||
log.V(-1).Infof("DeleteHardLink KvPut %v", key)
|
||||
return fsw.KvPut(ctx, key, newBlob)
|
||||
|
||||
}
|
||||
|
@ -7,7 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/viant/ptrie"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
@ -79,7 +79,7 @@ func (fsw *FilerStoreWrapper) AddPathSpecificStore(path string, storeId string,
|
||||
fsw.storeIdToStore[storeId] = NewFilerStorePathTranslator(path, store)
|
||||
err := fsw.pathToStore.Put([]byte(path), storeId)
|
||||
if err != nil {
|
||||
glog.Fatalf("put path specific store: %v", err)
|
||||
log.Fatalf("put path specific store: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -128,7 +128,7 @@ func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) err
|
||||
return err
|
||||
}
|
||||
|
||||
// glog.V(4).Infof("InsertEntry %s", entry.FullPath)
|
||||
// log.V(-1).Infof("InsertEntry %s", entry.FullPath)
|
||||
return actualStore.InsertEntry(ctx, entry)
|
||||
}
|
||||
|
||||
@ -149,7 +149,7 @@ func (fsw *FilerStoreWrapper) UpdateEntry(ctx context.Context, entry *Entry) err
|
||||
return err
|
||||
}
|
||||
|
||||
// glog.V(4).Infof("UpdateEntry %s", entry.FullPath)
|
||||
// log.V(-1).Infof("UpdateEntry %s", entry.FullPath)
|
||||
return actualStore.UpdateEntry(ctx, entry)
|
||||
}
|
||||
|
||||
@ -162,7 +162,7 @@ func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp util.FullPath) (
|
||||
}()
|
||||
|
||||
entry, err = actualStore.FindEntry(ctx, fp)
|
||||
// glog.V(4).Infof("FindEntry %s: %v", fp, err)
|
||||
// log.V(-1).Infof("FindEntry %s: %v", fp, err)
|
||||
if err != nil {
|
||||
if fsw.CanDropWholeBucket() && strings.Contains(err.Error(), "Table") && strings.Contains(err.Error(), "doesn't exist") {
|
||||
err = filer_pb.ErrNotFound
|
||||
@ -192,14 +192,14 @@ func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp util.FullPath)
|
||||
// remove hard link
|
||||
op := ctx.Value("OP")
|
||||
if op != "MV" {
|
||||
glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath)
|
||||
log.V(-1).Infof("DeleteHardLink %s", existingEntry.FullPath)
|
||||
if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// glog.V(4).Infof("DeleteEntry %s", fp)
|
||||
// log.V(-1).Infof("DeleteEntry %s", fp)
|
||||
return actualStore.DeleteEntry(ctx, fp)
|
||||
}
|
||||
|
||||
@ -215,14 +215,14 @@ func (fsw *FilerStoreWrapper) DeleteOneEntry(ctx context.Context, existingEntry
|
||||
// remove hard link
|
||||
op := ctx.Value("OP")
|
||||
if op != "MV" {
|
||||
glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath)
|
||||
log.V(-1).Infof("DeleteHardLink %s", existingEntry.FullPath)
|
||||
if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// glog.V(4).Infof("DeleteOneEntry %s", existingEntry.FullPath)
|
||||
// log.V(-1).Infof("DeleteOneEntry %s", existingEntry.FullPath)
|
||||
return actualStore.DeleteEntry(ctx, existingEntry.FullPath)
|
||||
}
|
||||
|
||||
@ -234,7 +234,7 @@ func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp util.
|
||||
stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "deleteFolderChildren").Observe(time.Since(start).Seconds())
|
||||
}()
|
||||
|
||||
// glog.V(4).Infof("DeleteFolderChildren %s", fp)
|
||||
// log.V(-1).Infof("DeleteFolderChildren %s", fp)
|
||||
return actualStore.DeleteFolderChildren(ctx, fp)
|
||||
}
|
||||
|
||||
@ -246,7 +246,7 @@ func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath
|
||||
stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "list").Observe(time.Since(start).Seconds())
|
||||
}()
|
||||
|
||||
// glog.V(4).Infof("ListDirectoryEntries %s from %s limit %d", dirPath, startFileName, limit)
|
||||
// log.V(-1).Infof("ListDirectoryEntries %s from %s limit %d", dirPath, startFileName, limit)
|
||||
return actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *Entry) bool {
|
||||
fsw.maybeReadHardLink(ctx, entry)
|
||||
filer_pb.AfterEntryDeserialization(entry.GetChunks())
|
||||
@ -264,7 +264,7 @@ func (fsw *FilerStoreWrapper) ListDirectoryPrefixedEntries(ctx context.Context,
|
||||
if limit > math.MaxInt32-1 {
|
||||
limit = math.MaxInt32 - 1
|
||||
}
|
||||
// glog.V(4).Infof("ListDirectoryPrefixedEntries %s from %s prefix %s limit %d", dirPath, startFileName, prefix, limit)
|
||||
// log.V(-1).Infof("ListDirectoryPrefixedEntries %s from %s prefix %s limit %d", dirPath, startFileName, prefix, limit)
|
||||
adjustedEntryFunc := func(entry *Entry) bool {
|
||||
fsw.maybeReadHardLink(ctx, entry)
|
||||
filer_pb.AfterEntryDeserialization(entry.GetChunks())
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
"github.com/tsuna/gohbase"
|
||||
@ -203,7 +203,7 @@ func (store *HbaseStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPa
|
||||
}
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(value)); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
log.V(3).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
if !eachEntryFunc(entry) {
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
weed_util "github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
@ -44,7 +44,7 @@ func (store *LevelDBStore) Initialize(configuration weed_util.Configuration, pre
|
||||
}
|
||||
|
||||
func (store *LevelDBStore) initialize(dir string) (err error) {
|
||||
glog.V(0).Infof("filer store dir: %s", dir)
|
||||
log.V(3).Infof("filer store dir: %s", dir)
|
||||
os.MkdirAll(dir, 0755)
|
||||
if err := weed_util.TestFolderWritable(dir); err != nil {
|
||||
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
|
||||
@ -61,7 +61,7 @@ func (store *LevelDBStore) initialize(dir string) (err error) {
|
||||
store.db, err = leveldb.RecoverFile(dir, opts)
|
||||
}
|
||||
if err != nil {
|
||||
glog.Infof("filer store open dir %s: %v", dir, err)
|
||||
log.Infof("filer store open dir %s: %v", dir, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -205,7 +205,7 @@ func (store *LevelDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
|
||||
}
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
log.V(3).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
if !eachEntryFunc(entry) {
|
||||
|
@ -15,7 +15,7 @@ import (
|
||||
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
weed_util "github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
@ -40,7 +40,7 @@ func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration, pr
|
||||
}
|
||||
|
||||
func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) {
|
||||
glog.Infof("filer store leveldb2 dir: %s", dir)
|
||||
log.Infof("filer store leveldb2 dir: %s", dir)
|
||||
os.MkdirAll(dir, 0755)
|
||||
if err := weed_util.TestFolderWritable(dir); err != nil {
|
||||
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
|
||||
@ -61,7 +61,7 @@ func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) {
|
||||
db, dbErr = leveldb.RecoverFile(dbFolder, opts)
|
||||
}
|
||||
if dbErr != nil {
|
||||
glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr)
|
||||
log.Errorf("filer store open dir %s: %v", dbFolder, dbErr)
|
||||
return dbErr
|
||||
}
|
||||
store.dbs = append(store.dbs, db)
|
||||
@ -213,7 +213,7 @@ func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, di
|
||||
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
log.V(3).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
if !eachEntryFunc(entry) {
|
||||
|
@ -17,7 +17,7 @@ import (
|
||||
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
weed_util "github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
@ -47,7 +47,7 @@ func (store *LevelDB3Store) Initialize(configuration weed_util.Configuration, pr
|
||||
}
|
||||
|
||||
func (store *LevelDB3Store) initialize(dir string) (err error) {
|
||||
glog.Infof("filer store leveldb3 dir: %s", dir)
|
||||
log.Infof("filer store leveldb3 dir: %s", dir)
|
||||
os.MkdirAll(dir, 0755)
|
||||
if err := weed_util.TestFolderWritable(dir); err != nil {
|
||||
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
|
||||
@ -88,7 +88,7 @@ func (store *LevelDB3Store) loadDB(name string) (*leveldb.DB, error) {
|
||||
db, dbErr = leveldb.RecoverFile(dbFolder, opts)
|
||||
}
|
||||
if dbErr != nil {
|
||||
glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr)
|
||||
log.Errorf("filer store open dir %s: %v", dbFolder, dbErr)
|
||||
return nil, dbErr
|
||||
}
|
||||
return db, nil
|
||||
@ -342,7 +342,7 @@ func (store *LevelDB3Store) ListDirectoryPrefixedEntries(ctx context.Context, di
|
||||
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
log.V(3).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
if !eachEntryFunc(entry) {
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
|
||||
@ -73,23 +73,23 @@ func (ma *MetaAggregator) OnPeerUpdate(update *master_pb.ClusterNodeUpdate, star
|
||||
func (ma *MetaAggregator) loopSubscribeToOneFiler(f *Filer, self pb.ServerAddress, peer pb.ServerAddress, startFrom time.Time, stopChan chan struct{}) {
|
||||
lastTsNs := startFrom.UnixNano()
|
||||
for {
|
||||
glog.V(0).Infof("loopSubscribeToOneFiler read %s start from %v %d", peer, time.Unix(0, lastTsNs), lastTsNs)
|
||||
log.V(3).Infof("loopSubscribeToOneFiler read %s start from %v %d", peer, time.Unix(0, lastTsNs), lastTsNs)
|
||||
nextLastTsNs, err := ma.doSubscribeToOneFiler(f, self, peer, lastTsNs)
|
||||
|
||||
// check stopChan to see if we should stop
|
||||
select {
|
||||
case <-stopChan:
|
||||
glog.V(0).Infof("stop subscribing peer %s meta change", peer)
|
||||
log.V(3).Infof("stop subscribing peer %s meta change", peer)
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
errLvl := glog.Level(0)
|
||||
errLvl := log.Level(0)
|
||||
if strings.Contains(err.Error(), "duplicated local subscription detected") {
|
||||
errLvl = glog.Level(4)
|
||||
errLvl = log.Level(4)
|
||||
}
|
||||
glog.V(errLvl).Infof("subscribing remote %s meta change: %v", peer, err)
|
||||
log.V(errLvl).Infof("subscribing remote %s meta change: %v", peer, err)
|
||||
}
|
||||
if lastTsNs < nextLastTsNs {
|
||||
lastTsNs = nextLastTsNs
|
||||
@ -126,35 +126,35 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress,
|
||||
defer func(prevTsNs int64) {
|
||||
if lastTsNs != prevTsNs && lastTsNs != lastPersistTime.UnixNano() {
|
||||
if err := ma.updateOffset(f, peer, peerSignature, lastTsNs); err == nil {
|
||||
glog.V(0).Infof("last sync time with %s at %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs)
|
||||
log.V(3).Infof("last sync time with %s at %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs)
|
||||
} else {
|
||||
glog.Errorf("failed to save last sync time with %s at %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs)
|
||||
log.Errorf("failed to save last sync time with %s at %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs)
|
||||
}
|
||||
}
|
||||
}(prevTsNs)
|
||||
}
|
||||
|
||||
glog.V(0).Infof("follow peer: %v, last %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs)
|
||||
log.V(3).Infof("follow peer: %v, last %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs)
|
||||
var counter int64
|
||||
var synced bool
|
||||
maybeReplicateMetadataChange = func(event *filer_pb.SubscribeMetadataResponse) {
|
||||
if err := Replay(f.Store, event); err != nil {
|
||||
glog.Errorf("failed to reply metadata change from %v: %v", peer, err)
|
||||
log.Errorf("failed to reply metadata change from %v: %v", peer, err)
|
||||
return
|
||||
}
|
||||
counter++
|
||||
if lastPersistTime.Add(time.Minute).Before(time.Now()) {
|
||||
if err := ma.updateOffset(f, peer, peerSignature, event.TsNs); err == nil {
|
||||
if event.TsNs < time.Now().Add(-2*time.Minute).UnixNano() {
|
||||
glog.V(0).Infof("sync with %s progressed to: %v %0.2f/sec", peer, time.Unix(0, event.TsNs), float64(counter)/60.0)
|
||||
log.V(3).Infof("sync with %s progressed to: %v %0.2f/sec", peer, time.Unix(0, event.TsNs), float64(counter)/60.0)
|
||||
} else if !synced {
|
||||
synced = true
|
||||
glog.V(0).Infof("synced with %s", peer)
|
||||
log.V(3).Infof("synced with %s", peer)
|
||||
}
|
||||
lastPersistTime = time.Now()
|
||||
counter = 0
|
||||
} else {
|
||||
glog.V(0).Infof("failed to update offset for %v: %v", peer, err)
|
||||
log.V(3).Infof("failed to update offset for %v: %v", peer, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -163,7 +163,7 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress,
|
||||
processEventFn := func(event *filer_pb.SubscribeMetadataResponse) error {
|
||||
data, err := proto.Marshal(event)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to marshal subscribed filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
|
||||
log.Errorf("failed to marshal subscribed filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
|
||||
return err
|
||||
}
|
||||
dir := event.Directory
|
||||
@ -175,7 +175,7 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress,
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(0).Infof("subscribing remote %s meta change: %v, clientId:%d", peer, time.Unix(0, lastTsNs), ma.filer.UniqueFilerId)
|
||||
log.V(3).Infof("subscribing remote %s meta change: %v, clientId:%d", peer, time.Unix(0, lastTsNs), ma.filer.UniqueFilerId)
|
||||
err = pb.WithFilerClient(true, 0, peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@ -188,7 +188,7 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress,
|
||||
ClientEpoch: atomic.LoadInt32(&ma.filer.UniqueFilerEpoch),
|
||||
})
|
||||
if err != nil {
|
||||
glog.V(0).Infof("SubscribeLocalMetadata %v: %v", peer, err)
|
||||
log.V(3).Infof("SubscribeLocalMetadata %v: %v", peer, err)
|
||||
return fmt.Errorf("subscribe: %v", err)
|
||||
}
|
||||
|
||||
@ -198,12 +198,12 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress,
|
||||
return nil
|
||||
}
|
||||
if listenErr != nil {
|
||||
glog.V(0).Infof("SubscribeLocalMetadata stream %v: %v", peer, listenErr)
|
||||
log.V(3).Infof("SubscribeLocalMetadata stream %v: %v", peer, listenErr)
|
||||
return listenErr
|
||||
}
|
||||
|
||||
if err := processEventFn(resp); err != nil {
|
||||
glog.V(0).Infof("SubscribeLocalMetadata process %v: %v", resp, err)
|
||||
log.V(3).Infof("SubscribeLocalMetadata process %v: %v", resp, err)
|
||||
return fmt.Errorf("process %v: %v", resp, err)
|
||||
}
|
||||
|
||||
@ -248,7 +248,7 @@ func (ma *MetaAggregator) readOffset(f *Filer, peer pb.ServerAddress, peerSignat
|
||||
|
||||
lastTsNs = int64(util.BytesToUint64(value))
|
||||
|
||||
glog.V(0).Infof("readOffset %s : %d", peer, lastTsNs)
|
||||
log.V(3).Infof("readOffset %s : %d", peer, lastTsNs)
|
||||
|
||||
return
|
||||
}
|
||||
@ -266,7 +266,7 @@ func (ma *MetaAggregator) updateOffset(f *Filer, peer pb.ServerAddress, peerSign
|
||||
return fmt.Errorf("updateOffset %s : %v", peer, err)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("updateOffset %s : %d", peer, lastTsNs)
|
||||
log.V(-1).Infof("updateOffset %s : %d", peer, lastTsNs)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
@ -15,7 +15,7 @@ func Replay(filerStore FilerStore, resp *filer_pb.SubscribeMetadataResponse) err
|
||||
var newEntry *Entry
|
||||
if message.OldEntry != nil {
|
||||
oldPath = util.NewFullPath(resp.Directory, message.OldEntry.Name)
|
||||
glog.V(4).Infof("deleting %v", oldPath)
|
||||
log.V(-1).Infof("deleting %v", oldPath)
|
||||
if err := filerStore.DeleteEntry(context.Background(), oldPath); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -27,7 +27,7 @@ func Replay(filerStore FilerStore, resp *filer_pb.SubscribeMetadataResponse) err
|
||||
dir = message.NewParentPath
|
||||
}
|
||||
key := util.NewFullPath(dir, message.NewEntry.Name)
|
||||
glog.V(4).Infof("creating %v", key)
|
||||
log.V(-1).Infof("creating %v", key)
|
||||
newEntry = FromPbEntry(dir, message.NewEntry)
|
||||
if err := filerStore.InsertEntry(context.Background(), newEntry); err != nil {
|
||||
return err
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
@ -187,7 +187,7 @@ func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath
|
||||
var where = bson.M{"directory": dir, "name": name}
|
||||
err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
|
||||
if err != mongo.ErrNoDocuments && err != nil {
|
||||
glog.Errorf("find %s: %v", fullpath, err)
|
||||
log.Errorf("find %s: %v", fullpath, err)
|
||||
return nil, filer_pb.ErrNotFound
|
||||
}
|
||||
|
||||
@ -264,7 +264,7 @@ func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
|
||||
lastFileName = data.Name
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta)); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
log.V(3).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
|
||||
@ -275,7 +275,7 @@ func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
|
||||
}
|
||||
|
||||
if err := cur.Close(ctx); err != nil {
|
||||
glog.V(0).Infof("list iterator close: %v", err)
|
||||
log.V(3).Infof("list iterator close: %v", err)
|
||||
}
|
||||
|
||||
return lastFileName, err
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
@ -37,7 +37,7 @@ func (store *MongodbStore) KvGet(ctx context.Context, key []byte) (value []byte,
|
||||
var where = bson.M{"directory": dir, "name": name}
|
||||
err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
|
||||
if err != mongo.ErrNoDocuments && err != nil {
|
||||
glog.Errorf("kv get: %v", err)
|
||||
log.Errorf("kv get: %v", err)
|
||||
return nil, filer.ErrKvNotFound
|
||||
}
|
||||
|
||||
|
@ -7,7 +7,7 @@ import (
|
||||
"math/rand"
|
||||
"sync"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
"github.com/seaweedfs/seaweedfs/weed/wdclient"
|
||||
@ -47,7 +47,7 @@ func LookupFn(filerClient filer_pb.FilerClient) wdclient.LookupFileIdFunctionTyp
|
||||
|
||||
locations = resp.LocationsMap[vid]
|
||||
if locations == nil || len(locations.Locations) == 0 {
|
||||
glog.V(0).Infof("failed to locate %s", fileId)
|
||||
log.V(3).Infof("failed to locate %s", fileId)
|
||||
return fmt.Errorf("failed to locate %s", fileId)
|
||||
}
|
||||
vicCacheLock.Lock()
|
||||
@ -113,7 +113,7 @@ func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) {
|
||||
c.chunkViews.Lock.RLock()
|
||||
defer c.chunkViews.Lock.RUnlock()
|
||||
|
||||
// glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews))
|
||||
// log.V(-1).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews))
|
||||
n, _, err = c.doReadAt(p, offset)
|
||||
return
|
||||
}
|
||||
@ -125,7 +125,7 @@ func (c *ChunkReadAt) ReadAtWithTime(p []byte, offset int64) (n int, ts int64, e
|
||||
c.chunkViews.Lock.RLock()
|
||||
defer c.chunkViews.Lock.RUnlock()
|
||||
|
||||
// glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews))
|
||||
// log.V(-1).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews))
|
||||
return c.doReadAt(p, offset)
|
||||
}
|
||||
|
||||
@ -143,7 +143,7 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, ts int64, err err
|
||||
}
|
||||
if startOffset < chunk.ViewOffset {
|
||||
gap := chunk.ViewOffset - startOffset
|
||||
glog.V(4).Infof("zero [%d,%d)", startOffset, chunk.ViewOffset)
|
||||
log.V(-1).Infof("zero [%d,%d)", startOffset, chunk.ViewOffset)
|
||||
n += zero(p, startOffset-offset, gap)
|
||||
startOffset, remaining = chunk.ViewOffset, remaining-gap
|
||||
if remaining <= 0 {
|
||||
@ -155,12 +155,12 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, ts int64, err err
|
||||
if chunkStart >= chunkStop {
|
||||
continue
|
||||
}
|
||||
// glog.V(4).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.ViewOffset-chunk.Offset, chunk.ViewOffset-chunk.Offset+int64(chunk.ViewSize))
|
||||
// log.V(-1).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.ViewOffset-chunk.Offset, chunk.ViewOffset-chunk.Offset+int64(chunk.ViewSize))
|
||||
bufferOffset := chunkStart - chunk.ViewOffset + chunk.OffsetInChunk
|
||||
ts = chunk.ModifiedTsNs
|
||||
copied, err := c.readChunkSliceAt(p[startOffset-offset:chunkStop-chunkStart+startOffset-offset], chunk, nextChunks, uint64(bufferOffset))
|
||||
if err != nil {
|
||||
glog.Errorf("fetching chunk %+v: %v\n", chunk, err)
|
||||
log.Errorf("fetching chunk %+v: %v\n", chunk, err)
|
||||
return copied, ts, err
|
||||
}
|
||||
|
||||
@ -168,7 +168,7 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, ts int64, err err
|
||||
startOffset, remaining = startOffset+int64(copied), remaining-int64(copied)
|
||||
}
|
||||
|
||||
// glog.V(4).Infof("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err)
|
||||
// log.V(-1).Infof("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err)
|
||||
|
||||
// zero the remaining bytes if a gap exists at the end of the last chunk (or a fully sparse file)
|
||||
if err == nil && remaining > 0 {
|
||||
@ -178,7 +178,7 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, ts int64, err err
|
||||
startOffset -= offset
|
||||
}
|
||||
if delta > 0 {
|
||||
glog.V(4).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+delta, c.fileSize)
|
||||
log.V(-1).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+delta, c.fileSize)
|
||||
n += zero(p, startOffset, delta)
|
||||
}
|
||||
}
|
||||
|
@ -6,7 +6,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/chunk_cache"
|
||||
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/mem"
|
||||
@ -63,7 +63,7 @@ func (rc *ReaderCache) MaybeCache(chunkViews *Interval[*ChunkView]) {
|
||||
continue
|
||||
}
|
||||
if rc.chunkCache.IsInCache(chunkView.FileId, true) {
|
||||
glog.V(4).Infof("%s is in cache", chunkView.FileId)
|
||||
log.V(-1).Infof("%s is in cache", chunkView.FileId)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -72,7 +72,7 @@ func (rc *ReaderCache) MaybeCache(chunkViews *Interval[*ChunkView]) {
|
||||
return
|
||||
}
|
||||
|
||||
// glog.V(4).Infof("prefetch %s offset %d", chunkView.FileId, chunkView.ViewOffset)
|
||||
// log.V(-1).Infof("prefetch %s offset %d", chunkView.FileId, chunkView.ViewOffset)
|
||||
// cache this chunk if not yet
|
||||
shouldCache := (uint64(chunkView.ViewOffset) + chunkView.ChunkSize) <= rc.chunkCache.GetMaxFilePartSizeInCache()
|
||||
cacher := newSingleChunkCacher(rc, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int(chunkView.ChunkSize), shouldCache)
|
||||
@ -118,7 +118,7 @@ func (rc *ReaderCache) ReadChunkAt(buffer []byte, fileId string, cipherKey []byt
|
||||
}
|
||||
}
|
||||
|
||||
// glog.V(4).Infof("cache1 %s", fileId)
|
||||
// log.V(-1).Infof("cache1 %s", fileId)
|
||||
|
||||
cacher := newSingleChunkCacher(rc, fileId, cipherKey, isGzipped, chunkSize, shouldCache)
|
||||
go cacher.startCaching()
|
||||
@ -132,7 +132,7 @@ func (rc *ReaderCache) ReadChunkAt(buffer []byte, fileId string, cipherKey []byt
|
||||
func (rc *ReaderCache) UnCache(fileId string) {
|
||||
rc.Lock()
|
||||
defer rc.Unlock()
|
||||
// glog.V(4).Infof("uncache %s", fileId)
|
||||
// log.V(-1).Infof("uncache %s", fileId)
|
||||
if downloader, found := rc.downloaders[fileId]; found {
|
||||
downloader.destroy()
|
||||
delete(rc.downloaders, fileId)
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
"github.com/redis/go-redis/v9"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
@ -179,7 +179,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, dirP
|
||||
entry, err := store.FindEntry(ctx, path)
|
||||
lastFileName = fileName
|
||||
if err != nil {
|
||||
glog.V(0).Infof("list %s : %v", path, err)
|
||||
log.V(3).Infof("list %s : %v", path, err)
|
||||
if err == filer_pb.ErrNotFound {
|
||||
continue
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
@ -41,22 +41,22 @@ func (store *Redis2Store) initialize(hostPort string, password string, database
|
||||
if enableMtls {
|
||||
clientCert, err := tls.LoadX509KeyPair(clientCertPath, clientKeyPath)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error loading client certificate and key pair: %v", err)
|
||||
log.Fatalf("Error loading client certificate and key pair: %v", err)
|
||||
}
|
||||
|
||||
caCertBytes, err := os.ReadFile(caCertPath)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error reading CA certificate file: %v", err)
|
||||
log.Fatalf("Error reading CA certificate file: %v", err)
|
||||
}
|
||||
|
||||
caCertPool := x509.NewCertPool()
|
||||
if ok := caCertPool.AppendCertsFromPEM(caCertBytes); !ok {
|
||||
glog.Fatalf("Error appending CA certificate to pool")
|
||||
log.Fatalf("Error appending CA certificate to pool")
|
||||
}
|
||||
|
||||
redisHost, _, err := net.SplitHostPort(hostPort)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error parsing redis host and port from %s: %v", hostPort, err)
|
||||
log.Fatalf("Error parsing redis host and port from %s: %v", hostPort, err)
|
||||
}
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
"github.com/redis/go-redis/v9"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
@ -194,7 +194,7 @@ func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, dir
|
||||
entry, err := store.FindEntry(ctx, path)
|
||||
lastFileName = fileName
|
||||
if err != nil {
|
||||
glog.V(0).Infof("list %s : %v", path, err)
|
||||
log.V(3).Infof("list %s : %v", path, err)
|
||||
if err == filer_pb.ErrNotFound {
|
||||
continue
|
||||
}
|
||||
|
@ -2,7 +2,7 @@ package redis3
|
||||
|
||||
import (
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/skiplist"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
@ -22,7 +22,7 @@ func LoadItemList(data []byte, prefix string, client redis.UniversalClient, stor
|
||||
|
||||
message := &skiplist.SkipListProto{}
|
||||
if err := proto.Unmarshal(data, message); err != nil {
|
||||
glog.Errorf("loading skiplist: %v", err)
|
||||
log.Errorf("loading skiplist: %v", err)
|
||||
}
|
||||
nl.skipList.MaxNewLevel = int(message.MaxNewLevel)
|
||||
nl.skipList.MaxLevel = int(message.MaxLevel)
|
||||
@ -69,7 +69,7 @@ func (nl *ItemList) ToBytes() []byte {
|
||||
}
|
||||
data, err := proto.Marshal(message)
|
||||
if err != nil {
|
||||
glog.Errorf("marshal skiplist: %v", err)
|
||||
log.Errorf("marshal skiplist: %v", err)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
)
|
||||
|
||||
const maxNameBatchSizeLimit = 1000000
|
||||
@ -31,7 +31,7 @@ func insertChild(ctx context.Context, redisStore *UniversalRedis3Store, key stri
|
||||
nameList := LoadItemList([]byte(data), key, client, store, maxNameBatchSizeLimit)
|
||||
|
||||
if err := nameList.WriteName(name); err != nil {
|
||||
glog.Errorf("add %s %s: %v", key, name, err)
|
||||
log.Errorf("add %s %s: %v", key, name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -100,7 +100,7 @@ func removeChildren(ctx context.Context, redisStore *UniversalRedis3Store, key s
|
||||
|
||||
if err = nameList.ListNames("", func(name string) bool {
|
||||
if err := onDeleteFn(name); err != nil {
|
||||
glog.Errorf("delete %s child %s: %v", key, name, err)
|
||||
log.Errorf("delete %s child %s: %v", key, name, err)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
"github.com/go-redsync/redsync/v4/redis/goredis/v9"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
@ -42,22 +42,22 @@ func (store *Redis3Store) initialize(hostPort string, password string, database
|
||||
if enableMtls {
|
||||
clientCert, err := tls.LoadX509KeyPair(clientCertPath, clientKeyPath)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error loading client certificate and key pair: %v", err)
|
||||
log.Fatalf("Error loading client certificate and key pair: %v", err)
|
||||
}
|
||||
|
||||
caCertBytes, err := os.ReadFile(caCertPath)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error reading CA certificate file: %v", err)
|
||||
log.Fatalf("Error reading CA certificate file: %v", err)
|
||||
}
|
||||
|
||||
caCertPool := x509.NewCertPool()
|
||||
if ok := caCertPool.AppendCertsFromPEM(caCertBytes); !ok {
|
||||
glog.Fatalf("Error appending CA certificate to pool")
|
||||
log.Fatalf("Error appending CA certificate to pool")
|
||||
}
|
||||
|
||||
redisHost, _, err := net.SplitHostPort(hostPort)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error parsing redis host and port from %s: %v", hostPort, err)
|
||||
log.Fatalf("Error parsing redis host and port from %s: %v", hostPort, err)
|
||||
}
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/skiplist"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
@ -27,7 +27,7 @@ func (m *SkipListElementStore) SaveElement(id int64, element *skiplist.SkipListE
|
||||
key := fmt.Sprintf("%s%d", m.Prefix, id)
|
||||
data, err := proto.Marshal(element)
|
||||
if err != nil {
|
||||
glog.Errorf("marshal %s: %v", key, err)
|
||||
log.Errorf("marshal %s: %v", key, err)
|
||||
}
|
||||
return m.client.Set(context.Background(), key, data, 0).Err()
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
|
||||
redsync "github.com/go-redsync/redsync/v4"
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
@ -151,7 +151,7 @@ func (store *UniversalRedis3Store) ListDirectoryEntries(ctx context.Context, dir
|
||||
entry, err := store.FindEntry(ctx, path)
|
||||
lastFileName = fileName
|
||||
if err != nil {
|
||||
glog.V(0).Infof("list %s : %v", path, err)
|
||||
log.V(3).Infof("list %s : %v", path, err)
|
||||
if err == filer_pb.ErrNotFound {
|
||||
return true
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer/redis_lua/stored_procedure"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
@ -162,7 +162,7 @@ func (store *UniversalRedisLuaStore) ListDirectoryEntries(ctx context.Context, d
|
||||
entry, err := store.FindEntry(ctx, path)
|
||||
lastFileName = fileName
|
||||
if err != nil {
|
||||
glog.V(0).Infof("list %s : %v", path, err)
|
||||
log.V(3).Infof("list %s : %v", path, err)
|
||||
if err == filer_pb.ErrNotFound {
|
||||
continue
|
||||
}
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/viant/ptrie"
|
||||
)
|
||||
@ -43,7 +43,7 @@ func (rs *FilerRemoteStorage) LoadRemoteStorageConfigurationsAndMapping(filer *F
|
||||
if err == filer_pb.ErrNotFound {
|
||||
return nil
|
||||
}
|
||||
glog.Errorf("read remote storage %s: %v", DirectoryEtcRemote, err)
|
||||
log.Errorf("read remote storage %s: %v", DirectoryEtcRemote, err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -125,7 +125,7 @@ func UnmarshalRemoteStorageMappings(oldContent []byte) (mappings *remote_pb.Remo
|
||||
}
|
||||
if len(oldContent) > 0 {
|
||||
if err = proto.Unmarshal(oldContent, mappings); err != nil {
|
||||
glog.Warningf("unmarshal existing mappings: %v", err)
|
||||
log.Warningf("unmarshal existing mappings: %v", err)
|
||||
}
|
||||
}
|
||||
return
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
gorocksdb "github.com/linxGnu/grocksdb"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
weed_util "github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
@ -60,7 +60,7 @@ func (store *RocksDBStore) Initialize(configuration weed_util.Configuration, pre
|
||||
}
|
||||
|
||||
func (store *RocksDBStore) initialize(dir string) (err error) {
|
||||
glog.Infof("filer store rocksdb dir: %s", dir)
|
||||
log.Infof("filer store rocksdb dir: %s", dir)
|
||||
os.MkdirAll(dir, 0755)
|
||||
if err := weed_util.TestFolderWritable(dir); err != nil {
|
||||
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
|
||||
@ -262,7 +262,7 @@ func (store *RocksDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
|
||||
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(value); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
log.V(3).Infof("list %s : %v", entry.FullPath, err)
|
||||
return false
|
||||
}
|
||||
if !eachEntryFunc(entry) {
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
|
||||
"slices"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/stats"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
@ -81,7 +81,7 @@ func noJwtFunc(string) string {
|
||||
}
|
||||
|
||||
func PrepareStreamContentWithThrottler(masterClient wdclient.HasLookupFileIdFunction, jwtFunc VolumeServerJwtFunction, chunks []*filer_pb.FileChunk, offset int64, size int64, downloadMaxBytesPs int64) (DoStreamContent, error) {
|
||||
glog.V(4).Infof("prepare to stream content for chunks: %d", len(chunks))
|
||||
log.V(-1).Infof("prepare to stream content for chunks: %d", len(chunks))
|
||||
chunkViews := ViewFromChunks(masterClient.GetLookupFileIdFunction(), chunks, offset, size)
|
||||
|
||||
fileId2Url := make(map[string][]string)
|
||||
@ -95,15 +95,15 @@ func PrepareStreamContentWithThrottler(masterClient wdclient.HasLookupFileIdFunc
|
||||
if err == nil && len(urlStrings) > 0 {
|
||||
break
|
||||
}
|
||||
glog.V(4).Infof("waiting for chunk: %s", chunkView.FileId)
|
||||
log.V(-1).Infof("waiting for chunk: %s", chunkView.FileId)
|
||||
time.Sleep(backoff)
|
||||
}
|
||||
if err != nil {
|
||||
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
|
||||
log.V(2).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
|
||||
return nil, err
|
||||
} else if len(urlStrings) == 0 {
|
||||
errUrlNotFound := fmt.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId)
|
||||
glog.Error(errUrlNotFound)
|
||||
log.Error(errUrlNotFound)
|
||||
return nil, errUrlNotFound
|
||||
}
|
||||
fileId2Url[chunkView.FileId] = urlStrings
|
||||
@ -117,7 +117,7 @@ func PrepareStreamContentWithThrottler(masterClient wdclient.HasLookupFileIdFunc
|
||||
if offset < chunkView.ViewOffset {
|
||||
gap := chunkView.ViewOffset - offset
|
||||
remaining -= gap
|
||||
glog.V(4).Infof("zero [%d,%d)", offset, chunkView.ViewOffset)
|
||||
log.V(-1).Infof("zero [%d,%d)", offset, chunkView.ViewOffset)
|
||||
err := writeZero(writer, gap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write zero [%d,%d)", offset, chunkView.ViewOffset)
|
||||
@ -139,7 +139,7 @@ func PrepareStreamContentWithThrottler(masterClient wdclient.HasLookupFileIdFunc
|
||||
downloadThrottler.MaybeSlowdown(int64(chunkView.ViewSize))
|
||||
}
|
||||
if remaining > 0 {
|
||||
glog.V(4).Infof("zero [%d,%d)", offset, offset+remaining)
|
||||
log.V(-1).Infof("zero [%d,%d)", offset, offset+remaining)
|
||||
err := writeZero(writer, remaining)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write zero [%d,%d)", offset, offset+remaining)
|
||||
@ -191,7 +191,7 @@ func ReadAll(buffer []byte, masterClient *wdclient.MasterClient, chunks []*filer
|
||||
chunkView := x.Value
|
||||
urlStrings, err := lookupFileIdFn(chunkView.FileId)
|
||||
if err != nil {
|
||||
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
|
||||
log.V(2).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -319,13 +319,13 @@ func (c *ChunkStreamReader) prepareBufferFor(offset int64) (err error) {
|
||||
if c.bufferOffset <= offset && offset < c.bufferOffset+int64(len(c.buffer)) {
|
||||
return nil
|
||||
}
|
||||
// glog.V(2).Infof("c.chunkView: %v buffer:[%d,%d) offset:%d totalSize:%d", c.chunkView, c.bufferOffset, c.bufferOffset+int64(len(c.buffer)), offset, c.totalSize)
|
||||
// log.V(1).Infof("c.chunkView: %v buffer:[%d,%d) offset:%d totalSize:%d", c.chunkView, c.bufferOffset, c.bufferOffset+int64(len(c.buffer)), offset, c.totalSize)
|
||||
|
||||
// find a possible chunk view
|
||||
p := c.chunkView
|
||||
for p != nil {
|
||||
chunk := p.Value
|
||||
// glog.V(2).Infof("prepareBufferFor check chunk:[%d,%d)", chunk.ViewOffset, chunk.ViewOffset+int64(chunk.ViewSize))
|
||||
// log.V(1).Infof("prepareBufferFor check chunk:[%d,%d)", chunk.ViewOffset, chunk.ViewOffset+int64(chunk.ViewSize))
|
||||
if insideChunk(offset, chunk) {
|
||||
if c.isBufferEmpty() || c.bufferOffset != chunk.ViewOffset {
|
||||
c.chunkView = p
|
||||
@ -345,7 +345,7 @@ func (c *ChunkStreamReader) prepareBufferFor(offset int64) (err error) {
|
||||
func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
|
||||
urlStrings, err := c.lookupFileId(chunkView.FileId)
|
||||
if err != nil {
|
||||
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
|
||||
log.V(2).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
|
||||
return err
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
@ -358,7 +358,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
|
||||
log.V(2).Infof("read %s failed, err: %v", chunkView.FileId, err)
|
||||
buffer.Reset()
|
||||
} else {
|
||||
break
|
||||
@ -371,7 +371,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
|
||||
c.bufferOffset = chunkView.ViewOffset
|
||||
c.chunk = chunkView.FileId
|
||||
|
||||
// glog.V(0).Infof("fetched %s [%d,%d)", chunkView.FileId, chunkView.ViewOffset, chunkView.ViewOffset+int64(chunkView.ViewSize))
|
||||
// log.V(3).Infof("fetched %s [%d,%d)", chunkView.FileId, chunkView.ViewOffset, chunkView.ViewOffset+int64(chunkView.ViewSize))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
weed_util "github.com/seaweedfs/seaweedfs/weed/util"
|
||||
@ -260,39 +260,39 @@ func (store *TarantoolStore) ListDirectoryEntries(ctx context.Context, dirPath w
|
||||
}
|
||||
|
||||
if len(results) < 1 {
|
||||
glog.Errorf("Can't find results, data is empty")
|
||||
log.Errorf("Can't find results, data is empty")
|
||||
return
|
||||
}
|
||||
|
||||
rows, ok := results[0].([]interface{})
|
||||
if !ok {
|
||||
glog.Errorf("Can't convert results[0] to list")
|
||||
log.Errorf("Can't convert results[0] to list")
|
||||
return
|
||||
}
|
||||
|
||||
for _, result := range rows {
|
||||
row, ok := result.([]interface{})
|
||||
if !ok {
|
||||
glog.Errorf("Can't convert result to list")
|
||||
log.Errorf("Can't convert result to list")
|
||||
return
|
||||
}
|
||||
|
||||
if len(row) < 5 {
|
||||
glog.Errorf("Length of result is less than needed: %v", len(row))
|
||||
log.Errorf("Length of result is less than needed: %v", len(row))
|
||||
return
|
||||
}
|
||||
|
||||
nameRaw := row[2]
|
||||
name, ok := nameRaw.(string)
|
||||
if !ok {
|
||||
glog.Errorf("Can't convert name field to string. Actual type: %v, value: %v", reflect.TypeOf(nameRaw), nameRaw)
|
||||
log.Errorf("Can't convert name field to string. Actual type: %v, value: %v", reflect.TypeOf(nameRaw), nameRaw)
|
||||
return
|
||||
}
|
||||
|
||||
dataRaw := row[4]
|
||||
data, ok := dataRaw.(string)
|
||||
if !ok {
|
||||
glog.Errorf("Can't convert data field to string. Actual type: %v, value: %v", reflect.TypeOf(dataRaw), dataRaw)
|
||||
log.Errorf("Can't convert data field to string. Actual type: %v, value: %v", reflect.TypeOf(dataRaw), dataRaw)
|
||||
return
|
||||
}
|
||||
|
||||
@ -302,7 +302,7 @@ func (store *TarantoolStore) ListDirectoryEntries(ctx context.Context, dirPath w
|
||||
lastFileName = name
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData([]byte(data))); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
log.V(3).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
if !eachEntryFunc(entry) {
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
"github.com/tikv/client-go/v2/config"
|
||||
@ -66,7 +66,7 @@ func (store *TikvStore) initialize(ca, cert, key string, verify_cn, pdAddrs []st
|
||||
func (store *TikvStore) Shutdown() {
|
||||
err := store.client.Close()
|
||||
if err != nil {
|
||||
glog.V(0).Infof("Shutdown TiKV client got error: %v", err)
|
||||
log.V(3).Infof("Shutdown TiKV client got error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -249,7 +249,7 @@ func (store *TikvStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPat
|
||||
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(iter.Value())); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
log.V(3).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
if err := iter.Next(); !eachEntryFunc(entry) || err != nil {
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer/abstract_sql"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
environ "github.com/ydb-platform/ydb-go-sdk-auth-environ"
|
||||
@ -69,7 +69,7 @@ func (store *YdbStore) initialize(dirBuckets string, dsn string, tablePathPrefix
|
||||
store.dirBuckets = dirBuckets
|
||||
store.SupportBucketTable = useBucketPrefix
|
||||
if store.SupportBucketTable {
|
||||
glog.V(0).Infof("enabled BucketPrefix")
|
||||
log.V(3).Infof("enabled BucketPrefix")
|
||||
}
|
||||
store.dbs = make(map[string]bool)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@ -203,7 +203,7 @@ func (store *YdbStore) DeleteEntry(ctx context.Context, fullpath util.FullPath)
|
||||
dir, name := fullpath.DirAndName()
|
||||
tablePathPrefix, shortDir := store.getPrefix(ctx, &dir)
|
||||
query := withPragma(tablePathPrefix, deleteQuery)
|
||||
glog.V(4).Infof("DeleteEntry %s, tablePathPrefix %s, shortDir %s", fullpath, *tablePathPrefix, *shortDir)
|
||||
log.V(-1).Infof("DeleteEntry %s, tablePathPrefix %s, shortDir %s", fullpath, *tablePathPrefix, *shortDir)
|
||||
queryParams := table.NewQueryParameters(
|
||||
table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))),
|
||||
table.ValueParam("$name", types.UTF8Value(name)))
|
||||
@ -251,7 +251,7 @@ func (store *YdbStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath
|
||||
if chunkLimit > maxChunk {
|
||||
chunkLimit = maxChunk
|
||||
}
|
||||
glog.V(4).Infof("startFileName %s, restLimit %d, chunkLimit %d", startFileName, restLimit, chunkLimit)
|
||||
log.V(-1).Infof("startFileName %s, restLimit %d, chunkLimit %d", startFileName, restLimit, chunkLimit)
|
||||
|
||||
queryParams := table.NewQueryParameters(
|
||||
table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))),
|
||||
@ -268,14 +268,14 @@ func (store *YdbStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath
|
||||
return nil
|
||||
}
|
||||
truncated = res.CurrentResultSet().Truncated()
|
||||
glog.V(4).Infof("truncated %v, entryCount %d", truncated, entryCount)
|
||||
log.V(-1).Infof("truncated %v, entryCount %d", truncated, entryCount)
|
||||
for res.NextRow() {
|
||||
if err := res.ScanNamed(
|
||||
named.OptionalWithDefault("name", &name),
|
||||
named.OptionalWithDefault("meta", &data)); err != nil {
|
||||
return fmt.Errorf("list scanNamed %s : %v", dir, err)
|
||||
}
|
||||
glog.V(8).Infof("name %s, fullpath %s", name, util.NewFullPath(dir, name))
|
||||
log.V(-1).Infof("name %s, fullpath %s", name, util.NewFullPath(dir, name))
|
||||
lastFileName = name
|
||||
entry := &filer.Entry{
|
||||
FullPath: util.NewFullPath(dir, name),
|
||||
@ -345,7 +345,7 @@ func (store *YdbStore) OnBucketCreation(bucket string) {
|
||||
defer store.dbsLock.Unlock()
|
||||
|
||||
if err := store.createTable(context.Background(), prefix); err != nil {
|
||||
glog.Errorf("createTable %s: %v", prefix, err)
|
||||
log.Errorf("createTable %s: %v", prefix, err)
|
||||
}
|
||||
|
||||
if store.dbs == nil {
|
||||
@ -362,14 +362,14 @@ func (store *YdbStore) OnBucketDeletion(bucket string) {
|
||||
defer store.dbsLock.Unlock()
|
||||
|
||||
prefix := path.Join(store.tablePathPrefix, bucket)
|
||||
glog.V(4).Infof("deleting table %s", prefix)
|
||||
log.V(-1).Infof("deleting table %s", prefix)
|
||||
|
||||
if err := store.deleteTable(context.Background(), prefix); err != nil {
|
||||
glog.Errorf("deleteTable %s: %v", prefix, err)
|
||||
log.Errorf("deleteTable %s: %v", prefix, err)
|
||||
}
|
||||
|
||||
if err := store.DB.Scheme().RemoveDirectory(context.Background(), prefix); err != nil {
|
||||
glog.Errorf("remove directory %s: %v", prefix, err)
|
||||
log.Errorf("remove directory %s: %v", prefix, err)
|
||||
}
|
||||
|
||||
if store.dbs == nil {
|
||||
@ -393,7 +393,7 @@ func (store *YdbStore) deleteTable(ctx context.Context, prefix string) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("deleted table %s", prefix)
|
||||
log.V(-1).Infof("deleted table %s", prefix)
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -406,11 +406,11 @@ func (store *YdbStore) getPrefix(ctx context.Context, dir *string) (tablePathPre
|
||||
}
|
||||
|
||||
prefixBuckets := store.dirBuckets + "/"
|
||||
glog.V(4).Infof("dir: %s, prefixBuckets: %s", *dir, prefixBuckets)
|
||||
log.V(-1).Infof("dir: %s, prefixBuckets: %s", *dir, prefixBuckets)
|
||||
if strings.HasPrefix(*dir, prefixBuckets) {
|
||||
// detect bucket
|
||||
bucketAndDir := (*dir)[len(prefixBuckets):]
|
||||
glog.V(4).Infof("bucketAndDir: %s", bucketAndDir)
|
||||
log.V(-1).Infof("bucketAndDir: %s", bucketAndDir)
|
||||
var bucket string
|
||||
if t := strings.Index(bucketAndDir, "/"); t > 0 {
|
||||
bucket = bucketAndDir[:t]
|
||||
@ -428,9 +428,9 @@ func (store *YdbStore) getPrefix(ctx context.Context, dir *string) (tablePathPre
|
||||
if _, found := store.dbs[bucket]; !found {
|
||||
if err := store.createTable(ctx, tablePathPrefixWithBucket); err == nil {
|
||||
store.dbs[bucket] = true
|
||||
glog.V(4).Infof("created table %s", tablePathPrefixWithBucket)
|
||||
log.V(-1).Infof("created table %s", tablePathPrefixWithBucket)
|
||||
} else {
|
||||
glog.Errorf("createTable %s: %v", tablePathPrefixWithBucket, err)
|
||||
log.Errorf("createTable %s: %v", tablePathPrefixWithBucket, err)
|
||||
}
|
||||
}
|
||||
tablePathPrefix = &tablePathPrefixWithBucket
|
||||
@ -441,7 +441,7 @@ func (store *YdbStore) getPrefix(ctx context.Context, dir *string) (tablePathPre
|
||||
func (store *YdbStore) ensureTables(ctx context.Context) error {
|
||||
prefixFull := store.tablePathPrefix
|
||||
|
||||
glog.V(4).Infof("creating base table %s", prefixFull)
|
||||
log.V(-1).Infof("creating base table %s", prefixFull)
|
||||
baseTable := path.Join(prefixFull, abstract_sql.DEFAULT_TABLE)
|
||||
if err := store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error {
|
||||
return s.CreateTable(ctx, baseTable, createTableOptions()...)
|
||||
@ -449,17 +449,17 @@ func (store *YdbStore) ensureTables(ctx context.Context) error {
|
||||
return fmt.Errorf("failed to create base table %s: %v", baseTable, err)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("creating bucket tables")
|
||||
log.V(-1).Infof("creating bucket tables")
|
||||
if store.SupportBucketTable {
|
||||
store.dbsLock.Lock()
|
||||
defer store.dbsLock.Unlock()
|
||||
for bucket := range store.dbs {
|
||||
glog.V(4).Infof("creating bucket table %s", bucket)
|
||||
log.V(-1).Infof("creating bucket table %s", bucket)
|
||||
bucketTable := path.Join(prefixFull, bucket, abstract_sql.DEFAULT_TABLE)
|
||||
if err := store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error {
|
||||
return s.CreateTable(ctx, bucketTable, createTableOptions()...)
|
||||
}); err != nil {
|
||||
glog.Errorf("failed to create bucket table %s: %v", bucketTable, err)
|
||||
log.Errorf("failed to create bucket table %s: %v", bucketTable, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
package filer_client
|
||||
|
||||
import (
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
@ -20,7 +20,7 @@ func (fca *FilerClientAccessor) WithFilerClient(streamingMode bool, fn func(file
|
||||
|
||||
func (fca *FilerClientAccessor) SaveTopicConfToFiler(t topic.Topic, conf *mq_pb.ConfigureTopicResponse) error {
|
||||
|
||||
glog.V(0).Infof("save conf for topic %v to filer", t)
|
||||
log.V(3).Infof("save conf for topic %v to filer", t)
|
||||
|
||||
// save the topic configuration on filer
|
||||
return fca.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
||||
@ -30,7 +30,7 @@ func (fca *FilerClientAccessor) SaveTopicConfToFiler(t topic.Topic, conf *mq_pb.
|
||||
|
||||
func (fca *FilerClientAccessor) ReadTopicConfFromFiler(t topic.Topic) (conf *mq_pb.ConfigureTopicResponse, err error) {
|
||||
|
||||
glog.V(1).Infof("load conf for topic %v from filer", t)
|
||||
log.V(2).Infof("load conf for topic %v from filer", t)
|
||||
|
||||
if err = fca.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
||||
conf, err = t.ReadConfFile(client)
|
||||
|
@ -20,17 +20,17 @@
|
||||
//
|
||||
// Basic examples:
|
||||
//
|
||||
// glog.Info("Prepare to repel boarders")
|
||||
// log.Info("Prepare to repel boarders")
|
||||
//
|
||||
// glog.Fatalf("Initialization failed: %s", err)
|
||||
// log.Fatalf("Initialization failed: %s", err)
|
||||
//
|
||||
// See the documentation for the V function for an explanation of these examples:
|
||||
//
|
||||
// if glog.V(2) {
|
||||
// glog.Info("Starting transaction...")
|
||||
// if log.V(1) {
|
||||
// log.Info("Starting transaction...")
|
||||
// }
|
||||
//
|
||||
// glog.V(2).Infoln("Processed", nItems, "elements")
|
||||
// log.V(1).Infoln("Processed", nItems, "elements")
|
||||
//
|
||||
// Log output is buffered and written periodically using Flush. Programs
|
||||
// should call Flush before exiting to guarantee all log output is written.
|
||||
@ -740,7 +740,7 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo
|
||||
|
||||
// timeoutFlush calls Flush and returns when it completes or after timeout
|
||||
// elapses, whichever happens first. This is needed because the hooks invoked
|
||||
// by Flush may deadlock when glog.Fatal is called from a hook that holds
|
||||
// by Flush may deadlock when log.Fatal is called from a hook that holds
|
||||
// a lock.
|
||||
func timeoutFlush(timeout time.Duration) {
|
||||
done := make(chan bool, 1)
|
||||
@ -989,11 +989,11 @@ type Verbose bool
|
||||
// and Infof. These methods will write to the Info log if called.
|
||||
// Thus, one may write either
|
||||
//
|
||||
// if glog.V(2) { glog.Info("log this") }
|
||||
// if log.V(1) { log.Info("log this") }
|
||||
//
|
||||
// or
|
||||
//
|
||||
// glog.V(2).Info("log this")
|
||||
// log.V(1).Info("log this")
|
||||
//
|
||||
// The second form is shorter but the first is cheaper if logging is off because it does
|
||||
// not evaluate its arguments.
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
|
||||
)
|
||||
|
||||
@ -20,13 +20,13 @@ func writeIamErrorResponse(w http.ResponseWriter, r *http.Request, iamError *Iam
|
||||
|
||||
if iamError == nil {
|
||||
// Do nothing if there is no error
|
||||
glog.Errorf("No error found")
|
||||
log.Errorf("No error found")
|
||||
return
|
||||
}
|
||||
|
||||
errCode := iamError.Code
|
||||
errMsg := iamError.Error.Error()
|
||||
glog.Errorf("Response %+v", errMsg)
|
||||
log.Errorf("Response %+v", errMsg)
|
||||
|
||||
errorResp := newErrorResponse(errCode, errMsg)
|
||||
internalErrorResponse := newErrorResponse(iam.ErrCodeServiceFailureException, "Internal server error")
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/iam_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
|
||||
@ -245,7 +245,7 @@ func (iama *IamApiServer) PutUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values
|
||||
return PutUserPolicyResponse{}, &IamError{Code: iam.ErrCodeMalformedPolicyDocumentException, Error: err}
|
||||
}
|
||||
// Log the actions
|
||||
glog.V(3).Infof("PutUserPolicy: actions=%v", actions)
|
||||
log.V(0).Infof("PutUserPolicy: actions=%v", actions)
|
||||
for _, ident := range s3cfg.Identities {
|
||||
if userName != ident.Name {
|
||||
continue
|
||||
@ -332,14 +332,14 @@ func GetActions(policy *PolicyDocument) ([]string, error) {
|
||||
// Parse "arn:aws:s3:::my-bucket/shared/*"
|
||||
res := strings.Split(resource, ":")
|
||||
if len(res) != 6 || res[0] != "arn" || res[1] != "aws" || res[2] != "s3" {
|
||||
glog.Infof("not a valid resource: %s", res)
|
||||
log.Infof("not a valid resource: %s", res)
|
||||
continue
|
||||
}
|
||||
for _, action := range statement.Action {
|
||||
// Parse "s3:Get*"
|
||||
act := strings.Split(action, ":")
|
||||
if len(act) != 2 || act[0] != "s3" {
|
||||
glog.Infof("not a valid action: %s", act)
|
||||
log.Infof("not a valid action: %s", act)
|
||||
continue
|
||||
}
|
||||
statementAction := MapToStatementAction(act[1])
|
||||
@ -423,7 +423,7 @@ func handleImplicitUsername(r *http.Request, values url.Values) {
|
||||
// "AWS4-HMAC-SHA256 Credential=197FSAQ7HHTA48X64O3A/20220420/test1/iam/aws4_request, SignedHeaders=content-type;
|
||||
// host;x-amz-date, Signature=6757dc6b3d7534d67e17842760310e99ee695408497f6edc4fdb84770c252dc8",
|
||||
// the "test1" will be extracted as the username
|
||||
glog.V(4).Infof("Authorization field: %v", r.Header["Authorization"][0])
|
||||
log.V(-1).Infof("Authorization field: %v", r.Header["Authorization"][0])
|
||||
s := strings.Split(r.Header["Authorization"][0], "Credential=")
|
||||
if len(s) < 2 {
|
||||
return
|
||||
@ -452,7 +452,7 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(4).Infof("DoActions: %+v", values)
|
||||
log.V(-1).Infof("DoActions: %+v", values)
|
||||
var response interface{}
|
||||
var iamError *IamError
|
||||
changed := true
|
||||
@ -477,7 +477,7 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
|
||||
case "UpdateUser":
|
||||
response, iamError = iama.UpdateUser(s3cfg, values)
|
||||
if iamError != nil {
|
||||
glog.Errorf("UpdateUser: %+v", iamError.Error)
|
||||
log.Errorf("UpdateUser: %+v", iamError.Error)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest)
|
||||
return
|
||||
}
|
||||
@ -497,7 +497,7 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
|
||||
case "CreatePolicy":
|
||||
response, iamError = iama.CreatePolicy(s3cfg, values)
|
||||
if iamError != nil {
|
||||
glog.Errorf("CreatePolicy: %+v", iamError.Error)
|
||||
log.Errorf("CreatePolicy: %+v", iamError.Error)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest)
|
||||
return
|
||||
}
|
||||
@ -505,7 +505,7 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
|
||||
var iamError *IamError
|
||||
response, iamError = iama.PutUserPolicy(s3cfg, values)
|
||||
if iamError != nil {
|
||||
glog.Errorf("PutUserPolicy: %+v", iamError.Error)
|
||||
log.Errorf("PutUserPolicy: %+v", iamError.Error)
|
||||
|
||||
writeIamErrorResponse(w, r, iamError)
|
||||
return
|
||||
|
@ -10,13 +10,13 @@ import (
|
||||
|
||||
"github.com/cognusion/imaging"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
)
|
||||
|
||||
func Cropped(ext string, read io.ReadSeeker, x1, y1, x2, y2 int) (cropped io.ReadSeeker, err error) {
|
||||
srcImage, _, err := image.Decode(read)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
log.Error(err)
|
||||
return read, err
|
||||
}
|
||||
|
||||
@ -32,15 +32,15 @@ func Cropped(ext string, read io.ReadSeeker, x1, y1, x2, y2 int) (cropped io.Rea
|
||||
switch ext {
|
||||
case ".jpg", ".jpeg":
|
||||
if err = jpeg.Encode(&buf, dstImage, nil); err != nil {
|
||||
glog.Error(err)
|
||||
log.Error(err)
|
||||
}
|
||||
case ".png":
|
||||
if err = png.Encode(&buf, dstImage); err != nil {
|
||||
glog.Error(err)
|
||||
log.Error(err)
|
||||
}
|
||||
case ".gif":
|
||||
if err = gif.Encode(&buf, dstImage, nil); err != nil {
|
||||
glog.Error(err)
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
return bytes.NewReader(buf.Bytes()), err
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
|
||||
"github.com/cognusion/imaging"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
|
||||
_ "golang.org/x/image/webp"
|
||||
)
|
||||
@ -55,7 +55,7 @@ func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (re
|
||||
}
|
||||
return bytes.NewReader(buf.Bytes()), dstImage.Bounds().Dx(), dstImage.Bounds().Dy()
|
||||
} else {
|
||||
glog.Error(err)
|
||||
log.Error(err)
|
||||
}
|
||||
return read, 0, 0
|
||||
}
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/mount/page_writer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
)
|
||||
@ -41,7 +41,7 @@ func newMemoryChunkPages(fh *FileHandle, chunkSize int64) *ChunkedDirtyPages {
|
||||
func (pages *ChunkedDirtyPages) AddPage(offset int64, data []byte, isSequential bool, tsNs int64) {
|
||||
pages.hasWrites = true
|
||||
|
||||
glog.V(4).Infof("%v memory AddPage [%d, %d)", pages.fh.fh, offset, offset+int64(len(data)))
|
||||
log.V(-1).Infof("%v memory AddPage [%d, %d)", pages.fh.fh, offset, offset+int64(len(data)))
|
||||
pages.uploadPipeline.SaveDataAt(data, offset, isSequential, tsNs)
|
||||
|
||||
return
|
||||
@ -73,13 +73,13 @@ func (pages *ChunkedDirtyPages) saveChunkedFileIntervalToStorage(reader io.Reade
|
||||
fileName := fileFullPath.Name()
|
||||
chunk, err := pages.fh.wfs.saveDataAsChunk(fileFullPath)(reader, fileName, offset, modifiedTsNs)
|
||||
if err != nil {
|
||||
glog.V(0).Infof("%v saveToStorage [%d,%d): %v", fileFullPath, offset, offset+size, err)
|
||||
log.V(3).Infof("%v saveToStorage [%d,%d): %v", fileFullPath, offset, offset+size, err)
|
||||
pages.lastErr = err
|
||||
return
|
||||
}
|
||||
pages.fh.AddChunks([]*filer_pb.FileChunk{chunk})
|
||||
pages.fh.entryChunkGroup.AddChunk(chunk)
|
||||
glog.V(3).Infof("%v saveToStorage %s [%d,%d)", fileFullPath, chunk.FileId, offset, offset+size)
|
||||
log.V(0).Infof("%v saveToStorage %s [%d,%d)", fileFullPath, chunk.FileId, offset, offset+size)
|
||||
|
||||
}
|
||||
|
||||
|
@ -2,7 +2,7 @@ package mount
|
||||
|
||||
import (
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
"os"
|
||||
@ -77,10 +77,10 @@ func (fh *FileHandle) SetEntry(entry *filer_pb.Entry) {
|
||||
var resolveManifestErr error
|
||||
fh.entryChunkGroup, resolveManifestErr = filer.NewChunkGroup(fh.wfs.LookupFn(), fh.wfs.chunkCache, entry.Chunks)
|
||||
if resolveManifestErr != nil {
|
||||
glog.Warningf("failed to resolve manifest chunks in %+v", entry)
|
||||
log.Warningf("failed to resolve manifest chunks in %+v", entry)
|
||||
}
|
||||
} else {
|
||||
glog.Fatalf("setting file handle entry to nil")
|
||||
log.Fatalf("setting file handle entry to nil")
|
||||
}
|
||||
fh.entry.SetEntry(entry)
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user