diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
index 111f9aa6e..53423f4b7 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -8,6 +8,7 @@ assignees: ''
---
Sponsors SeaweedFS via Patreon https://www.patreon.com/seaweedfs
+Report issues here. Ask questions here https://stackoverflow.com/questions/tagged/seaweedfs
example of a good issue report:
https://github.com/chrislusf/seaweedfs/issues/1005
diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml
new file mode 100644
index 000000000..00781fcf6
--- /dev/null
+++ b/.github/workflows/go.yml
@@ -0,0 +1,37 @@
+name: Go
+
+on:
+ push:
+ branches: [ master ]
+ pull_request:
+ branches: [ master ]
+
+jobs:
+
+ build:
+ name: Build
+ runs-on: ubuntu-latest
+ steps:
+
+ - name: Set up Go 1.x
+ uses: actions/setup-go@v2
+ with:
+ go-version: ^1.13
+ id: go
+
+ - name: Check out code into the Go module directory
+ uses: actions/checkout@v2
+
+ - name: Get dependencies
+ run: |
+ cd weed; go get -v -t -d ./...
+ if [ -f Gopkg.toml ]; then
+ curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
+ dep ensure
+ fi
+
+ - name: Build
+ run: cd weed; go build -v .
+
+ - name: Test
+ run: cd weed; go test -v .
diff --git a/.travis.yml b/.travis.yml
index b7467ab8a..bad4a77f1 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,9 +1,9 @@
sudo: false
language: go
go:
- - 1.11.x
- 1.12.x
- 1.13.x
+ - 1.14.x
before_install:
- export PATH=/home/travis/gopath/bin:$PATH
@@ -45,4 +45,4 @@ deploy:
on:
tags: true
repo: chrislusf/seaweedfs
- go: 1.13.x
+ go: 1.14.x
diff --git a/Makefile b/Makefile
index ce20a482b..59a3c46a2 100644
--- a/Makefile
+++ b/Makefile
@@ -8,11 +8,14 @@ appname := weed
sources := $(wildcard *.go)
-build = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -ldflags "-extldflags -static" -o build/$(appname)$(3) $(SOURCE_DIR)
+COMMIT ?= $(shell git rev-parse --short HEAD)
+LDFLAGS ?= -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${COMMIT}
+
+build = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -ldflags "-extldflags -static $(LDFLAGS)" -o build/$(appname)$(3) $(SOURCE_DIR)
tar = cd build && tar -cvzf $(1)_$(2).tar.gz $(appname)$(3) && rm $(appname)$(3)
zip = cd build && zip $(1)_$(2).zip $(appname)$(3) && rm $(appname)$(3)
-build_large = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -tags 5BytesOffset -ldflags "-extldflags -static" -o build/$(appname)$(3) $(SOURCE_DIR)
+build_large = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -tags 5BytesOffset -ldflags "-extldflags -static $(LDFLAGS)" -o build/$(appname)$(3) $(SOURCE_DIR)
tar_large = cd build && tar -cvzf $(1)_$(2)_large_disk.tar.gz $(appname)$(3) && rm $(appname)$(3)
zip_large = cd build && zip $(1)_$(2)_large_disk.zip $(appname)$(3) && rm $(appname)$(3)
@@ -31,11 +34,11 @@ deps:
rm -rf /home/travis/gopath/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace
build: deps
- go build $(GO_FLAGS) -o $(BINARY) $(SOURCE_DIR)
+ go build $(GO_FLAGS) -ldflags "$(LDFLAGS)" -o $(BINARY) $(SOURCE_DIR)
linux: deps
mkdir -p linux
- GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -o linux/$(BINARY) $(SOURCE_DIR)
+ GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -ldflags "$(LDFLAGS)" -o linux/$(BINARY) $(SOURCE_DIR)
release: deps windows_build darwin_build linux_build bsd_build 5_byte_linux_build 5_byte_darwin_build 5_byte_windows_build
diff --git a/README.md b/README.md
index 8c32a4e58..25ace96a8 100644
--- a/README.md
+++ b/README.md
@@ -50,6 +50,7 @@ Your support will be really appreciated by me and other supporters!
- [Download Binaries for different platforms](https://github.com/chrislusf/seaweedfs/releases/latest)
- [SeaweedFS on Slack](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY)
+- [SeaweedFS on Twitter](https://twitter.com/SeaweedFS)
- [SeaweedFS Mailing List](https://groups.google.com/d/forum/seaweedfs)
- [Wiki Documentation](https://github.com/chrislusf/seaweedfs/wiki)
- [SeaweedFS Introduction Slides](https://www.slideshare.net/chrislusf/seaweedfs-introduction)
@@ -89,7 +90,7 @@ There is only 40 bytes of disk storage overhead for each file's metadata. It is
SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). Also, SeaweedFS implements erasure coding with ideas from [f4: Facebookâs Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf)
-On top of the object store, optional [Filer] can support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql, Postgres, Redis, Etcd, Cassandra, LevelDB, MemSql, TiDB, TiKV, CockroachDB, etc.
+On top of the object store, optional [Filer] can support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql, Postgres, Mongodb, Redis, Etcd, Cassandra, LevelDB, MemSql, TiDB, TiKV, CockroachDB, etc.
[Back to TOC](#table-of-contents)
@@ -98,9 +99,10 @@ On top of the object store, optional [Filer] can support directories and POSIX a
* Automatic master servers failover - no single point of failure (SPOF).
* Automatic Gzip compression depending on file mime type.
* Automatic compaction to reclaim disk space after deletion or update.
-* Servers in the same cluster can have different disk spaces, file systems, OS etc.
+* [Automatic entry TTL expiration][VolumeServerTTL].
+* Any server with some disk spaces can add to the total storage space.
* Adding/Removing servers does **not** cause any data re-balancing.
-* Optionally fix the orientation for jpeg pictures.
+* Optional picture resizing.
* Support ETag, Accept-Range, Last-Modified, etc.
* Support in-memory/leveldb/readonly mode tuning for memory/performance balance.
* Support rebalancing the writable and readonly volumes.
@@ -116,6 +118,8 @@ On top of the object store, optional [Filer] can support directories and POSIX a
* [Hadoop Compatible File System][Hadoop] to access files from Hadoop/Spark/Flink/etc jobs.
* [Async Backup To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze.
* [WebDAV] access as a mapped drive on Mac and Windows, or from mobile devices.
+* [AES256-GCM Encrypted Storage][FilerDataEncryption] safely stores the encrypted data.
+* [File TTL][FilerTTL] automatically purge file metadata and actual file data.
[Filer]: https://github.com/chrislusf/seaweedfs/wiki/Directories-and-Files
[Mount]: https://github.com/chrislusf/seaweedfs/wiki/Mount
@@ -125,6 +129,9 @@ On top of the object store, optional [Filer] can support directories and POSIX a
[WebDAV]: https://github.com/chrislusf/seaweedfs/wiki/WebDAV
[ErasureCoding]: https://github.com/chrislusf/seaweedfs/wiki/Erasure-coding-for-warm-storage
[CloudTier]: https://github.com/chrislusf/seaweedfs/wiki/Cloud-Tier
+[FilerDataEncryption]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Data-Encryption
+[FilerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Stores
+[VolumeServerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Store-file-with-a-Time-To-Live
[Back to TOC](#table-of-contents)
@@ -354,7 +361,7 @@ The architectures are mostly the same. SeaweedFS aims to store and read files fa
* SeaweedFS optimizes for small files, ensuring O(1) disk seek operation, and can also handle large files.
* SeaweedFS statically assigns a volume id for a file. Locating file content becomes just a lookup of the volume id, which can be easily cached.
-* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Cassandra, Redis, Etcd, MySql, Postgres, MemSql, TiDB, CockroachDB, etc, and is easy to customized.
+* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Cassandra, Mongodb, Redis, Etcd, MySql, Postgres, MemSql, TiDB, CockroachDB, etc, and is easy to customized.
* SeaweedFS Volume server also communicates directly with clients via HTTP, supporting range queries, direct uploads, etc.
| System | File Meta | File Content Read| POSIX | REST API | Optimized for small files |
@@ -363,6 +370,7 @@ The architectures are mostly the same. SeaweedFS aims to store and read files fa
| SeaweedFS Filer| Linearly Scalable, Customizable | O(1) disk seek | FUSE | Yes | Yes |
| GlusterFS | hashing | | FUSE, NFS | | |
| Ceph | hashing + rules | | FUSE | Yes | |
+| MooseFS | in memory | | FUSE | | No |
[Back to TOC](#table-of-contents)
@@ -374,6 +382,14 @@ GlusterFS hashes the path and filename into ids, and assigned to virtual volumes
[Back to TOC](#table-of-contents)
+### Compared to MooseFS ###
+
+MooseFS chooses to neglect small file issue. From moosefs 3.0 manual, "even a small file will occupy 64KiB plus additionally 4KiB of checksums and 1KiB for the header", because it "was initially designed for keeping large amounts (like several thousands) of very big files"
+
+MooseFS Master Server keeps all meta data in memory. Same issue as HDFS namenode.
+
+[Back to TOC](#table-of-contents)
+
### Compared to Ceph ###
Ceph can be setup similar to SeaweedFS as a key->blob store. It is much more complicated, with the need to support layers on top of it. [Here is a more detailed comparison](https://github.com/chrislusf/seaweedfs/issues/120)
@@ -386,7 +402,7 @@ Ceph uses CRUSH hashing to automatically manage the data placement. SeaweedFS pl
SeaweedFS is optimized for small files. Small files are stored as one continuous block of content, with at most 8 unused bytes between files. Small file access is O(1) disk read.
-SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Redis, Etcd, Cassandra, MemSql, TiDB, CockroachCB, to manage file directories. There are proven, scalable, and easier to manage.
+SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Mongodb, Redis, Etcd, Cassandra, MemSql, TiDB, CockroachCB, to manage file directories. These stores are proven, scalable, and easier to manage.
| SeaweedFS | comparable to Ceph | advantage |
| ------------- | ------------- | ---------------- |
@@ -434,6 +450,12 @@ go get github.com/chrislusf/seaweedfs/weed
Once this is done, you will find the executable "weed" in your `$GOPATH/bin` directory
+Note:
+* If you got into this problem, try to `rm -Rf $GOPATH/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace` and build again.
+```
+panic: /debug/requests is already registered. You may have two independent copies of golang.org/x/net/trace in your binary, trying to maintain separate state. This may involve a vendored copy of golang.org/x/net/trace.
+```
+
Step 4: after you modify your code locally, you could start a local build by calling `go install` under
```
@@ -461,50 +483,49 @@ My Own Unscientific Single Machine Results on Mac Book with Solid State Disk, CP
Write 1 million 1KB file:
```
Concurrency Level: 16
-Time taken for tests: 88.796 seconds
+Time taken for tests: 66.753 seconds
Complete requests: 1048576
Failed requests: 0
-Total transferred: 1106764659 bytes
-Requests per second: 11808.87 [#/sec]
-Transfer rate: 12172.05 [Kbytes/sec]
+Total transferred: 1106789009 bytes
+Requests per second: 15708.23 [#/sec]
+Transfer rate: 16191.69 [Kbytes/sec]
Connection Times (ms)
min avg max std
-Total: 0.2 1.3 44.8 0.9
+Total: 0.3 1.0 84.3 0.9
Percentage of the requests served within a certain time (ms)
- 50% 1.1 ms
- 66% 1.3 ms
- 75% 1.5 ms
- 80% 1.7 ms
- 90% 2.1 ms
- 95% 2.6 ms
- 98% 3.7 ms
- 99% 4.6 ms
- 100% 44.8 ms
+ 50% 0.8 ms
+ 66% 1.0 ms
+ 75% 1.1 ms
+ 80% 1.2 ms
+ 90% 1.4 ms
+ 95% 1.7 ms
+ 98% 2.1 ms
+ 99% 2.6 ms
+ 100% 84.3 ms
```
Randomly read 1 million files:
```
Concurrency Level: 16
-Time taken for tests: 34.263 seconds
+Time taken for tests: 22.301 seconds
Complete requests: 1048576
Failed requests: 0
-Total transferred: 1106762945 bytes
-Requests per second: 30603.34 [#/sec]
-Transfer rate: 31544.49 [Kbytes/sec]
+Total transferred: 1106812873 bytes
+Requests per second: 47019.38 [#/sec]
+Transfer rate: 48467.57 [Kbytes/sec]
Connection Times (ms)
min avg max std
-Total: 0.0 0.5 20.7 0.7
+Total: 0.0 0.3 54.1 0.2
Percentage of the requests served within a certain time (ms)
- 50% 0.4 ms
- 75% 0.5 ms
- 95% 0.6 ms
- 98% 0.8 ms
- 99% 1.2 ms
- 100% 20.7 ms
+ 50% 0.3 ms
+ 90% 0.4 ms
+ 98% 0.6 ms
+ 99% 0.7 ms
+ 100% 54.1 ms
```
[Back to TOC](#table-of-contents)
diff --git a/backers.md b/backers.md
index 97a5e9081..725615237 100644
--- a/backers.md
+++ b/backers.md
@@ -6,6 +6,7 @@
- [4Sight Imaging](https://www.4sightimaging.com/)
- [Evercam Camera Management Software](https://evercam.io/)
+- [Admiral](https://getadmiral.com)
Backers
diff --git a/docker/Dockerfile.local b/docker/Dockerfile.local
new file mode 100644
index 000000000..693d8a952
--- /dev/null
+++ b/docker/Dockerfile.local
@@ -0,0 +1,29 @@
+FROM alpine AS final
+LABEL author="Chris Lu"
+COPY ./weed /usr/bin/
+RUN mkdir -p /etc/seaweedfs
+COPY ./filer.toml /etc/seaweedfs/filer.toml
+COPY ./entrypoint.sh /entrypoint.sh
+
+# volume server grpc port
+EXPOSE 18080
+# volume server http port
+EXPOSE 8080
+# filer server grpc port
+EXPOSE 18888
+# filer server http port
+EXPOSE 8888
+# master server shared grpc port
+EXPOSE 19333
+# master server shared http port
+EXPOSE 9333
+# s3 server http port
+EXPOSE 8333
+
+RUN mkdir -p /data/filerldb2
+
+VOLUME /data
+
+RUN chmod +x /entrypoint.sh
+
+ENTRYPOINT ["/entrypoint.sh"]
diff --git a/docker/Makefile b/docker/Makefile
new file mode 100644
index 000000000..166188bc3
--- /dev/null
+++ b/docker/Makefile
@@ -0,0 +1,19 @@
+all: gen
+
+.PHONY : gen
+
+gen: dev
+
+build:
+ cd ../weed; GOOS=linux go build; mv weed ../docker/
+ docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.local .
+ rm ./weed
+
+dev: build
+ docker-compose -f local-dev-compose.yml -p seaweedfs up
+
+cluster: build
+ docker-compose -f local-cluster-compose.yml -p seaweedfs up
+
+clean:
+ rm ./weed
diff --git a/docker/README.md b/docker/README.md
index 1a2833c7e..65241b517 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -25,7 +25,5 @@ docker-compose -f seaweedfs-dev-compose.yml -p seaweedfs up
```bash
cd $GOPATH/src/github.com/chrislusf/seaweedfs/docker
-
-docker-compose -f local-dev-compose.yml -p seaweedfs up
-
+make
```
diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh
index 791527d3a..05db7a672 100755
--- a/docker/entrypoint.sh
+++ b/docker/entrypoint.sh
@@ -3,44 +3,33 @@
case "$1" in
'master')
- ARGS="-mdir /data"
- # Is this instance linked with an other master? (Docker commandline "--link master1:master")
- if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then
- ARGS="$ARGS -peers=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT"
- fi
+ ARGS="-mdir=/data -volumePreallocate -volumeSizeLimitMB=1024"
exec /usr/bin/weed $@ $ARGS
;;
'volume')
- ARGS="-ip `hostname -i` -dir /data"
- # Is this instance linked with a master? (Docker commandline "--link master1:master")
- if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then
- ARGS="$ARGS -mserver=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT"
- fi
+ ARGS="-dir=/data -max=0"
+ if [[ $@ == *"-max="* ]]; then
+ ARGS="-dir=/data"
+ fi
exec /usr/bin/weed $@ $ARGS
;;
'server')
- ARGS="-ip `hostname -i` -dir /data"
- if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then
- ARGS="$ARGS -master.peers=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT"
- fi
+ ARGS="-dir=/data -volume.max=0 -master.volumePreallocate -master.volumeSizeLimitMB=1024"
+ if [[ $@ == *"-volume.max="* ]]; then
+ ARGS="-dir=/data -master.volumePreallocate -master.volumeSizeLimitMB=1024"
+ fi
exec /usr/bin/weed $@ $ARGS
;;
'filer')
ARGS=""
- if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then
- ARGS="$ARGS -master=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT"
- fi
exec /usr/bin/weed $@ $ARGS
;;
's3')
ARGS="-domainName=$S3_DOMAIN_NAME -key.file=$S3_KEY_FILE -cert.file=$S3_CERT_FILE"
- if [ -n "$FILER_PORT_8888_TCP_ADDR" ] ; then
- ARGS="$ARGS -filer=$FILER_PORT_8888_TCP_ADDR:$FILER_PORT_8888_TCP_PORT"
- fi
exec /usr/bin/weed $@ $ARGS
;;
diff --git a/docker/local-cluster-compose.yml b/docker/local-cluster-compose.yml
new file mode 100644
index 000000000..a1ac824e7
--- /dev/null
+++ b/docker/local-cluster-compose.yml
@@ -0,0 +1,53 @@
+version: '2'
+
+services:
+ master0:
+ image: chrislusf/seaweedfs:local
+ ports:
+ - 9333:9333
+ - 19333:19333
+ command: "master -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335"
+ master1:
+ image: chrislusf/seaweedfs:local
+ ports:
+ - 9334:9334
+ - 19334:19334
+ command: "master -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335"
+ master2:
+ image: chrislusf/seaweedfs:local
+ ports:
+ - 9335:9335
+ - 19335:19335
+ command: "master -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335"
+ volume:
+ image: chrislusf/seaweedfs:local
+ ports:
+ - 8080:8080
+ - 18080:18080
+ command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume'
+ depends_on:
+ - master0
+ - master1
+ - master2
+ filer:
+ image: chrislusf/seaweedfs:local
+ ports:
+ - 8888:8888
+ - 18888:18888
+ command: 'filer -master="master0:9333,master1:9334,master2:9335"'
+ depends_on:
+ - master0
+ - master1
+ - master2
+ - volume
+ s3:
+ image: chrislusf/seaweedfs:local
+ ports:
+ - 8333:8333
+ command: 's3 -filer="filer:8888"'
+ depends_on:
+ - master0
+ - master1
+ - master2
+ - volume
+ - filer
diff --git a/docker/local-dev-compose.yml b/docker/local-dev-compose.yml
index c2f588a60..f6fd0f4ce 100644
--- a/docker/local-dev-compose.yml
+++ b/docker/local-dev-compose.yml
@@ -2,42 +2,34 @@ version: '2'
services:
master:
- build:
- context: .
- dockerfile: Dockerfile.go_build
+ image: chrislusf/seaweedfs:local
ports:
- - 9333:9333
- - 19333:19333
+ - 9333:9333
+ - 19333:19333
command: "master -ip=master"
volume:
- build:
- context: .
- dockerfile: Dockerfile.go_build
+ image: chrislusf/seaweedfs:local
ports:
- - 8080:8080
- - 18080:18080
- command: '-v=2 volume -max=5 -mserver="master:9333" -port=8080 -ip=volume'
+ - 8080:8080
+ - 18080:18080
+ command: "volume -mserver=master:9333 -port=8080 -ip=volume"
depends_on:
- - master
+ - master
filer:
- build:
- context: .
- dockerfile: Dockerfile.go_build
+ image: chrislusf/seaweedfs:local
ports:
- - 8888:8888
- - 18888:18888
- command: '-v=4 filer -master="master:9333"'
+ - 8888:8888
+ - 18888:18888
+ command: 'filer -master="master:9333"'
depends_on:
- - master
- - volume
+ - master
+ - volume
s3:
- build:
- context: .
- dockerfile: Dockerfile.go_build
+ image: chrislusf/seaweedfs:local
ports:
- - 8333:8333
- command: '-v=4 s3 -filer="filer:8888"'
+ - 8333:8333
+ command: 's3 -filer="filer:8888"'
depends_on:
- - master
- - volume
- - filer
+ - master
+ - volume
+ - filer
diff --git a/docker/seaweedfs-compose.yml b/docker/seaweedfs-compose.yml
index 7f0cbc6f9..70d005017 100644
--- a/docker/seaweedfs-compose.yml
+++ b/docker/seaweedfs-compose.yml
@@ -4,28 +4,28 @@ services:
master:
image: chrislusf/seaweedfs # use a remote image
ports:
- - 9333:9333
- - 19333:19333
+ - 9333:9333
+ - 19333:19333
command: "master -ip=master"
volume:
image: chrislusf/seaweedfs # use a remote image
ports:
- - 8080:8080
- - 18080:18080
- command: 'volume -max=15 -mserver="master:9333" -port=8080'
+ - 8080:8080
+ - 18080:18080
+ command: 'volume -mserver="master:9333" -port=8080'
depends_on:
- - master
+ - master
filer:
image: chrislusf/seaweedfs # use a remote image
ports:
- - 8888:8888
- - 18888:18888
+ - 8888:8888
+ - 18888:18888
command: 'filer -master="master:9333"'
tty: true
stdin_open: true
depends_on:
- - master
- - volume
+ - master
+ - volume
cronjob:
image: chrislusf/seaweedfs # use a remote image
command: 'cronjob'
@@ -34,14 +34,14 @@ services:
CRON_SCHEDULE: '*/2 * * * * *' # Default: '*/5 * * * * *'
WEED_MASTER: master:9333 # Default: localhost:9333
depends_on:
- - master
- - volume
+ - master
+ - volume
s3:
image: chrislusf/seaweedfs # use a remote image
ports:
- - 8333:8333
+ - 8333:8333
command: 's3 -filer="filer:8888"'
depends_on:
- - master
- - volume
- - filer
+ - master
+ - volume
+ - filer
diff --git a/docker/seaweedfs-dev-compose.yml b/docker/seaweedfs-dev-compose.yml
index 765770084..75801102e 100644
--- a/docker/seaweedfs-dev-compose.yml
+++ b/docker/seaweedfs-dev-compose.yml
@@ -4,32 +4,32 @@ services:
master:
image: chrislusf/seaweedfs:dev # use a remote dev image
ports:
- - 9333:9333
- - 19333:19333
+ - 9333:9333
+ - 19333:19333
command: "master -ip=master"
volume:
image: chrislusf/seaweedfs:dev # use a remote dev image
ports:
- - 8080:8080
- - 18080:18080
- command: '-v=2 volume -max=5 -mserver="master:9333" -port=8080 -ip=volume'
+ - 8080:8080
+ - 18080:18080
+ command: 'volume -mserver="master:9333" -port=8080 -ip=volume'
depends_on:
- - master
+ - master
filer:
image: chrislusf/seaweedfs:dev # use a remote dev image
ports:
- - 8888:8888
- - 18888:18888
- command: '-v=4 filer -master="master:9333"'
+ - 8888:8888
+ - 18888:18888
+ command: 'filer -master="master:9333"'
depends_on:
- - master
- - volume
+ - master
+ - volume
s3:
image: chrislusf/seaweedfs:dev # use a remote dev image
ports:
- - 8333:8333
- command: '-v=4 s3 -filer="filer:8888"'
+ - 8333:8333
+ command: 's3 -filer="filer:8888"'
depends_on:
- - master
- - volume
- - filer
+ - master
+ - volume
+ - filer
diff --git a/go.mod b/go.mod
index 48879fd8c..2ba8658d3 100644
--- a/go.mod
+++ b/go.mod
@@ -7,39 +7,45 @@ require (
github.com/Azure/azure-pipeline-go v0.2.2 // indirect
github.com/Azure/azure-storage-blob-go v0.8.0
github.com/DataDog/zstd v1.4.1 // indirect
+ github.com/OneOfOne/xxhash v1.2.2
github.com/Shopify/sarama v1.23.1
github.com/aws/aws-sdk-go v1.23.13
+ github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72
+ github.com/cespare/xxhash v1.1.0
github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92
+ github.com/coreos/bbolt v1.3.3 // indirect
github.com/coreos/etcd v3.3.15+incompatible // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible
- github.com/disintegration/imaging v1.6.1
+ github.com/disintegration/imaging v1.6.2
github.com/dustin/go-humanize v1.0.0
github.com/eapache/go-resiliency v1.2.0 // indirect
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a
+ github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c // indirect
+ github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4
+ github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect
github.com/frankban/quicktest v1.7.2 // indirect
- github.com/gabriel-vasile/mimetype v1.0.0
- github.com/go-redis/redis v6.15.2+incompatible
+ github.com/go-redis/redis v6.15.7+incompatible
github.com/go-sql-driver/mysql v1.4.1
github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 // indirect
+ github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect
github.com/golang/protobuf v1.3.2
github.com/google/btree v1.0.0
github.com/google/uuid v1.1.1
github.com/gorilla/mux v1.7.3
github.com/gorilla/websocket v1.4.1 // indirect
+ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect
github.com/hashicorp/golang-lru v0.5.3 // indirect
- github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f
github.com/jcmturner/gofork v1.0.0 // indirect
github.com/karlseguin/ccache v2.0.3+incompatible
github.com/karlseguin/expect v1.0.1 // indirect
github.com/klauspost/cpuid v1.2.1 // indirect
github.com/klauspost/crc32 v1.2.0
github.com/klauspost/reedsolomon v1.9.2
- github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
github.com/kurin/blazer v0.5.3
github.com/lib/pq v1.2.0
github.com/magiconair/properties v1.8.1 // indirect
@@ -48,48 +54,43 @@ require (
github.com/nats-io/nats-server/v2 v2.0.4 // indirect
github.com/onsi/ginkgo v1.10.1 // indirect
github.com/onsi/gomega v1.7.0 // indirect
- github.com/opentracing/opentracing-go v1.1.0 // indirect
- github.com/pelletier/go-toml v1.4.0 // indirect
github.com/peterh/liner v1.1.0
github.com/pierrec/lz4 v2.2.7+incompatible // indirect
- github.com/pingcap/kvproto v0.0.0-20191022073741-81b2a2d9e986 // indirect
- github.com/pingcap/tidb v1.1.0-beta.0.20191023070859-58fc7d44f73b
- github.com/pingcap/tipb v0.0.0-20191022094114-a2e8c3fa634b // indirect
github.com/prometheus/client_golang v1.1.0
- github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 // indirect
github.com/prometheus/procfs v0.0.4 // indirect
- github.com/rakyll/statik v0.1.6
+ github.com/rakyll/statik v0.1.7
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect
- github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 // indirect
- github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd
github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff
- github.com/sirupsen/logrus v1.4.2 // indirect
+ github.com/seaweedfs/goexif v1.0.2
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/afero v1.2.2 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/viper v1.4.0
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 // indirect
- github.com/stretchr/testify v1.4.0
+ github.com/stretchr/testify v1.3.0
github.com/syndtr/goleveldb v1.0.0
github.com/tidwall/gjson v1.3.2
github.com/tidwall/match v1.0.1
- github.com/uber-go/atomic v1.4.0 // indirect
- github.com/uber/jaeger-client-go v2.17.0+incompatible // indirect
- github.com/uber/jaeger-lib v2.0.0+incompatible // indirect
github.com/willf/bitset v1.1.10 // indirect
github.com/willf/bloom v2.0.3+incompatible
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 // indirect
+ go.etcd.io/bbolt v1.3.3 // indirect
go.etcd.io/etcd v3.3.15+incompatible
+ go.mongodb.org/mongo-driver v1.3.2
+ go.uber.org/multierr v1.2.0 // indirect
gocloud.dev v0.16.0
gocloud.dev/pubsub/natspubsub v0.16.0
gocloud.dev/pubsub/rabbitpubsub v0.16.0
- golang.org/x/image v0.0.0-20190829233526-b3c06291d021 // indirect
+ golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 // indirect
+ golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect
golang.org/x/net v0.0.0-20190909003024-a7b16738d86b
golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b
golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110
google.golang.org/api v0.9.0
google.golang.org/appengine v1.6.2 // indirect
- google.golang.org/grpc v1.23.0
+ google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 // indirect
+ google.golang.org/grpc v1.26.0
+ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect
gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect
gopkg.in/karlseguin/expect.v1 v1.0.1 // indirect
diff --git a/go.sum b/go.sum
index d16280568..00329907e 100644
--- a/go.sum
+++ b/go.sum
@@ -35,13 +35,12 @@ github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/uf
github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM=
github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190605020000-c4ba1fdf4d36/go.mod h1:aJ4qN3TfrelA6NZ6AXsXRfmEVaYin3EDbSPJrKS8OXo=
+github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/Shopify/sarama v1.23.1 h1:XxJBCZEoWJtoWjf/xRbmGUpAmTZGnuuF0ON0EvxxBrs=
github.com/Shopify/sarama v1.23.1/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs=
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
-github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f h1:5ZfJxyXo8KyX8DgGXC5B7ILL8y51fci/qYz2B4j8iLY=
-github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
@@ -55,63 +54,49 @@ github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY=
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
-github.com/blacktear23/go-proxyprotocol v0.0.0-20180807104634-af7a81e8dd0d/go.mod h1:VKt7CNAQxpFpSDz3sXyj9hY/GbVsQCr0sB3w59nE7lU=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
+github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72 h1:fUmDBbSvv1uOzo/t8WaxZMVb7BxJ8JECo5lGoR9c5bA=
+github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72/go.mod h1:OEE5igu/CDjGegM1Jn6ZMo7R6LlV/JChAkjfQQIRLpg=
github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4=
github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92 h1:lM9SFsh0EPXkyJyrTJqLZPAIJBtNFP6LNkYXu2MnSZI=
github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92/go.mod h1:4jyiUCD5y548+yKW+oiHtccBiMaLCCbFBpK2t7X4eUo=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20171208011716-f6d7a1f6fbf3/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=
-github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY=
github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE=
github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso=
-github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
-github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65 h1:hxuZop6tSoOi0sxFzoGGYdRqNrPubyaIf9KoBG9tPiE=
-github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f h1:dDxpBYafY/GYpcl+LS4Bn3ziLPuEdGRkRjYAbSlWxSA=
-github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
-github.com/disintegration/imaging v1.6.1 h1:JnBbK6ECIZb1NsWIikP9pd8gIlTIRx7fuDNpU9fsxOE=
-github.com/disintegration/imaging v1.6.1/go.mod h1:xuIt+sRxDFrHS0drzXUlCJthkJ8k7lkkUojDSR247MQ=
-github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
+github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
@@ -122,12 +107,18 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
-github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o=
-github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA=
+github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0=
+github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
+github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A=
+github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg=
github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4 h1:0YtRCqIZs2+Tz49QuH6cJVw/IFqzo39gEqZ0iYLxD2M=
github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4/go.mod h1:vsJz7uE339KUCpBXx3JAJzSRH7Uk4iGGyJzR529qDIA=
+github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk=
+github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
@@ -135,40 +126,51 @@ github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3B
github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/gabriel-vasile/mimetype v0.3.17 h1:NGWgggJJqTofUcTV1E7hkk2zVjZ54EfJa1z5O3z6By4=
-github.com/gabriel-vasile/mimetype v0.3.17/go.mod h1:kMJbg3SlWZCsj4R73F1WDzbT9AyGCOVmUtIxxwO5pmI=
-github.com/gabriel-vasile/mimetype v1.0.0 h1:0QKnAQQhG6oOsb4GK7iPlet7RtjHi9us8RF/nXoTxhI=
-github.com/gabriel-vasile/mimetype v1.0.0/go.mod h1:6CDPel/o/3/s4+bp6kIbsWATq8pmgOisOPG40CJa6To=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I=
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
-github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
-github.com/go-playground/overalls v0.0.0-20180201144345-22ec1a223b7c/go.mod h1:UqxAgEOt89sCiXlrc/ycnx00LVvUO/eS8tMUkWX4R7w=
-github.com/go-redis/redis v6.15.2+incompatible h1:9SpNVG76gr6InJGxoZ6IuuxaCOQwDAhzyXg+Bs+0Sb4=
-github.com/go-redis/redis v6.15.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
-github.com/go-sql-driver/mysql v0.0.0-20170715192408-3955978caca4/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-redis/redis v6.15.7+incompatible h1:3skhDh95XQMpnqeqNftPkQD9jL9e5e36z/1SUm6dy1U=
+github.com/go-redis/redis v6.15.7+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
+github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
+github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
+github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs=
+github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
+github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
+github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk=
+github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28=
+github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo=
+github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk=
+github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw=
+github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360=
+github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg=
+github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE=
+github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8=
+github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
+github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
+github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
+github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
+github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
+github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
+github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6 h1:P66kRWyEoIx6URKgAC3ijx9jo9gEid7bEhLQ/Z0G65A=
github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6/go.mod h1:Q7Sru5153KG8D9zwueuQJB3ccJf9/bIwF/x8b3oKgT8=
-github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 h1:X+zN6RZXsvnrSJaAIQhZezPfAfvsqihKKR8oiLHid34=
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
@@ -176,7 +178,6 @@ github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4er
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/protobuf v0.0.0-20180814211427-aa810b61a9c7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
@@ -185,7 +186,6 @@ github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8l
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -204,9 +204,7 @@ github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible h1:x
github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE=
github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
-github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/wire v0.3.0 h1:imGQZGEVEHpje5056+K+cgdO72p0LQv2xIIFXNGUf60=
@@ -216,14 +214,8 @@ github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
-github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
-github.com/gorilla/mux v1.6.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
@@ -234,8 +226,6 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
-github.com/grpc-ecosystem/grpc-gateway v1.5.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.0 h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
@@ -256,8 +246,6 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f h1:X+tnaqoCcBgAwSTJtoYW6p0qKiuPyMfofEHEFUf2kdU=
-github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f/go.mod h1:Ip4fOwzCrnDVuluHBd7FXIMb7SHOKfkt9/UDrYSZvqI=
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM=
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
@@ -272,17 +260,18 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/juju/ratelimit v1.0.1 h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY=
-github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/karlseguin/ccache v2.0.3+incompatible h1:j68C9tWOROiOLWTS/kCGg9IcJG+ACqn5+0+t8Oh83UU=
github.com/karlseguin/ccache v2.0.3+incompatible/go.mod h1:CM9tNPzT6EdRh14+jiW8mEF9mkNZuuE51qmgGYUB93w=
github.com/karlseguin/expect v1.0.1 h1:z4wy4npwwHSWKjGWH85WNJO42VQhovxTCZDSzhjo8hY=
github.com/karlseguin/expect v1.0.1/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8=
+github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
+github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
+github.com/klauspost/compress v1.9.5 h1:U+CaK85mrNNb4k8BNOfgJtJ/gr6kswUCFj6miSzVC6M=
+github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/crc32 v1.2.0 h1:0VuyqOCruD33/lJ/ojXNvzVyl8Zr5zdTmj9l9qLZ86I=
@@ -296,7 +285,6 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxv
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pty v1.0.0/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
@@ -309,22 +297,19 @@ github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDe
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
+github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149 h1:HfxbT6/JcvIljmERptWhwa8XzP7H3T+Z2N26gTsaDaA=
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb h1:hXqqXzQtJbENrsb+rsIqkVqcg4FUJL0SQFGw08Dgivw=
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
-github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
-github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@@ -336,9 +321,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/montanaflynn/stats v0.0.0-20151014174947-eeaced052adb/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
-github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808 h1:pmpDGKLw4n82EtrNiLqB+xSz/JQwFOaZuMALYUHwX5s=
-github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
+github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nats-io/jwt v0.2.6/go.mod h1:mQxQ0uHQ9FhEVPIcTSKwx2lqZEpXWWcCgA7R6NrWvvY=
github.com/nats-io/jwt v0.2.14 h1:wA50KvFz/JXGXMHRygTWsRGh/ixxgC5E3kHvmtGLNf4=
@@ -354,31 +337,18 @@ github.com/nats-io/nkeys v0.1.0 h1:qMd4+pRHgdr1nAClu+2h/2a5F2TmKcCzjCDazVgRoX4=
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
-github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 h1:7KAv7KMGTTqSmYZtNdcNTgsos+vFzULLwyElndwn+5c=
-github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8n5lC/jjNEhwcjbRDpl5NT7n2h+4UNcI=
-github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef h1:K0Fn+DoFqNqktdZtdV3bPQ/0cuYh2H4rkg0tytX/07k=
-github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef/go.mod h1:7WjlapSfwQyo6LNmIvEWzsW1hbBQfpUO4JWnuQRmva8=
-github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo=
-github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
-github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
-github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg=
github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
github.com/peterh/liner v1.1.0 h1:f+aAedNJA6uk7+6rXsYBnhdo4Xux7ESLe+kcuVUF5os=
@@ -386,65 +356,29 @@ github.com/peterh/liner v1.1.0/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf
github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
github.com/pierrec/lz4 v2.2.7+incompatible h1:Eerk9aiqeZo2QzsbWOAsELUf9ddvAxEdMY9LYze/DEc=
github.com/pierrec/lz4 v2.2.7+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
-github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg=
-github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ=
-github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9 h1:KH4f4Si9XK6/IW50HtoaiLIFHGkapOM6w83za47UYik=
-github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM=
-github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
-github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
-github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
-github.com/pingcap/failpoint v0.0.0-20190512135322-30cc7431d99c h1:hvQd3aOLKLF7xvRV6DzvPkKY4QXzfVbjU1BhW0d9yL8=
-github.com/pingcap/failpoint v0.0.0-20190512135322-30cc7431d99c/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI=
-github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e h1:P73/4dPCL96rGrobssy1nVy2VaVpNCuLpCbr+FEaTA8=
-github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw=
-github.com/pingcap/kvproto v0.0.0-20190822090350-11ea838aedf7/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY=
-github.com/pingcap/kvproto v0.0.0-20190910074005-0e61b6f435c1/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY=
-github.com/pingcap/kvproto v0.0.0-20191022073741-81b2a2d9e986 h1:XFh7n8Cheo00pakfhpUofnlptHCuz9lkp4p/jXPb8lM=
-github.com/pingcap/kvproto v0.0.0-20191022073741-81b2a2d9e986/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY=
-github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd/go.mod h1:WpHUKhNZ18v116SvGrmjkA9CBhYmuUTKL+p8JC9ANEw=
-github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 h1:AJD9pZYm72vMgPcQDww9rkZ1DnWfl0pXV3BOWlkYIjA=
-github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
-github.com/pingcap/parser v0.0.0-20191021083151-7c64f78a5100 h1:TRyps2d+2TsJv1Vk4S2D+5COMDVKClnAO5aNmGGVyj0=
-github.com/pingcap/parser v0.0.0-20191021083151-7c64f78a5100/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA=
-github.com/pingcap/pd v1.1.0-beta.0.20190923032047-5c648dc365e0 h1:GIEq+wZfrl2bcJxpuSrEH4H7/nlf5YdmpS+dU9lNIt8=
-github.com/pingcap/pd v1.1.0-beta.0.20190923032047-5c648dc365e0/go.mod h1:G/6rJpnYwM0LKMec2rI82/5Kg6GaZMvlfB+e6/tvYmI=
-github.com/pingcap/tidb v1.1.0-beta.0.20191023070859-58fc7d44f73b h1:6GfcYOX9/CCxPnNOivVxiDYXbZrCHU1mRp691iw9EYs=
-github.com/pingcap/tidb v1.1.0-beta.0.20191023070859-58fc7d44f73b/go.mod h1:YfrHdQ613A+E2FSugyXOdJmeZQbXNjpXX2doNe8MGj8=
-github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible h1:MkWCxgZpJBgY2f4HtwWMMFzSBb3+JPzeJgF3VrXE/bU=
-github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM=
-github.com/pingcap/tipb v0.0.0-20191015023537-709b39e7f8bb/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI=
-github.com/pingcap/tipb v0.0.0-20191022094114-a2e8c3fa634b h1:DZ0cTsn4lGMNaRjkUFKBtHn4s2F8KFMm83lWvSo+x7c=
-github.com/pingcap/tipb v0.0.0-20191022094114-a2e8c3fa634b/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
-github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
-github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
@@ -453,28 +387,24 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa
github.com/prometheus/procfs v0.0.4 h1:w8DjqFMJDjuVwdZBQoOozr4MVWOnwF7RcL/7uxBjY78=
github.com/prometheus/procfs v0.0.4/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/rakyll/statik v0.1.6 h1:uICcfUXpgqtw2VopbIncslhAmE5hwc4g20TEyEENBNs=
-github.com/rakyll/statik v0.1.6/go.mod h1:OEi9wJV/fMUAGx1eNjq75DKDsJVuEv1U0oYdX6GX8Zs=
+github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ=
+github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ=
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
-github.com/remyoudompheng/bigfft v0.0.0-20190512091148-babf20351dd7/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
-github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook=
-github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd h1:CmH9+J6ZSsIjUK3dcGsnCnO41eRBOnY12zwkn5qVwgc=
-github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
+github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff h1:uLd5zBvf5OA67wcVRePHrFt60bR4LSskaVhgVwyk0Jg=
github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff/go.mod h1:cubdLmQFqEUZ9vNJrznhgc3m3VMAJi/nY2Ix2axXkG0=
-github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
-github.com/shirou/gopsutil v2.18.10+incompatible h1:cy84jW6EVRPa5g9HAHrlbxMSIjBhDSX0OFYyMYminYs=
-github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
-github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
-github.com/shurcooL/vfsgen v0.0.0-20181020040650-a97a25d856ca/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
-github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/seaweedfs/goexif v1.0.2 h1:p+rTXYdQ2mgxd+1JaTrQ9N8DvYuw9UH9xgYmJ+Bb29E=
+github.com/seaweedfs/goexif v1.0.2/go.mod h1:MrKs5LK0HXdffrdCZrW3OIMegL2xXpC6ThLyXMyjdrk=
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
@@ -493,7 +423,6 @@ github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
-github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=
@@ -510,72 +439,50 @@ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoH
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/struCoder/pidusage v0.1.2/go.mod h1:pWBlW3YuSwRl6h7R5KbvA4N8oOqe9LjaKW5CwT1SPjI=
-github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
-github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfKggNGDuadAa0LElHrByyrz4JPZ9fFx6Gs7nx7ZZU=
github.com/tidwall/gjson v1.3.2 h1:+7p3qQFaH3fOMXAJSrdZwGKcOO/lYdGS0HqGhPqDdTI=
github.com/tidwall/gjson v1.3.2/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls=
github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g=
-github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o=
-github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g=
-github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
-github.com/uber/jaeger-client-go v2.17.0+incompatible h1:35tpDuT3k0oBiN/aGoSWuiFaqKgKZSciSMnWrazhSHE=
-github.com/uber/jaeger-client-go v2.17.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
-github.com/uber/jaeger-lib v1.5.0/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
-github.com/uber/jaeger-lib v2.0.0+incompatible h1:iMSCV0rmXEogjNWPh2D0xk9YVKvrtGoHJNe9ebLu/pw=
-github.com/uber/jaeger-lib v2.0.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
-github.com/ugorji/go v1.1.2/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
-github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA=
-github.com/unrolled/render v0.0.0-20171102162132-65450fb6b2d3/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg=
-github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d h1:ggUgChAeyge4NZ4QUw6lhHsVymzwSDJOZcE0s2X8S20=
-github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg=
-github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/negroni v0.3.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc=
github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bloom v2.0.3+incompatible h1:QDacWdqcAUI1MPOwIQZRy9kOR7yxfyEmxX8Wdm2/JPA=
github.com/willf/bloom v2.0.3+incompatible/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8=
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlVjOeLOBz+CPAI8dnbqNSVwUwRrkp7vQ=
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
+github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
+github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
-github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Ya9AIoYBpE=
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/etcd v0.0.0-20190320044326-77d4b742cdbf/go.mod h1:KSGwdbiFchh5KIC9My2+ZVl5/3ANcwohw50dpPwa2cw=
go.etcd.io/etcd v3.3.15+incompatible h1:0VpOVCF6EFnJptt8Jh0EWEHO4j2fepyV1fpu9xz/UoQ=
go.etcd.io/etcd v3.3.15+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI=
+go.mongodb.org/mongo-driver v1.3.2 h1:IYppNjEV/C+/3VPbhHVxQ4t04eVW0cLp0/pNdW++6Ug=
+go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE=
go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.2.0 h1:6I+W7f5VwC5SV9dNrZ3qXrDB9mD0dyGOi/ZJmYw03T4=
go.uber.org/multierr v1.2.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
gocloud.dev v0.16.0 h1:hWeaQWxamGerwsU7B9xSWvUjx0p7TwG8fcHro2TzbbM=
@@ -584,25 +491,24 @@ gocloud.dev/pubsub/natspubsub v0.16.0 h1:MoBGXULDzb1fVaZsGWO5cUCgr6yoI/DHhau8OPG
gocloud.dev/pubsub/natspubsub v0.16.0/go.mod h1:0n7pT7PkLMClBUHDrOkHfOFVr/o/6kawNMwsyAbwadI=
gocloud.dev/pubsub/rabbitpubsub v0.16.0 h1:Bkv2njMSl2tmT3tGbvbwpiIDAXBIpqzP9dmts+rhD4E=
gocloud.dev/pubsub/rabbitpubsub v0.16.0/go.mod h1:JJVdUUIqwgaaMJg/1xHQza0g4sI/4KHHSNiGE+pn4JM=
-golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 h1:0hQKqeLdqlt5iIwVOBErRisrHJAN57yOiPRQItI20fU=
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067 h1:KYGJGHOQy8oSi1fDlSpcZF0+juKwk/hEMv5SiwHogR0=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190829233526-b3c06291d021 h1:j6QOxNFMpEL1wIQX6TUdBPNfGZKmBOJS/vfSm8a7tdM=
-golang.org/x/image v0.0.0-20190829233526-b3c06291d021/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200119044424-58c23975cae1 h1:5h3ngYt7+vXCDZCup/HkCQgW5XwmSvR/nA2JmJ0RErg=
+golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -612,7 +518,6 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -637,6 +542,7 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -649,23 +555,23 @@ golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190909082730-f460065e899a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b h1:3S2h5FadpNr0zUUCVZjlKIEYF+KaX/OBplTGo89CYHI=
golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -677,10 +583,14 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110 h1:6S6bidS7O4yAwA5ORRbRIjvNQ9tGbLd5e+LRIaTeVDQ=
@@ -703,9 +613,7 @@ google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.2 h1:j8RI1yW0SkI+paT6uGwMlrMI/6zwYA6/CFil8rxOzGI=
google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20181004005441-af9cb2a35e7f/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -714,30 +622,27 @@ google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRn
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 h1:oFSK4421fpCKRrpzIpybyBVWyht05NegY9+L/3TLAZs=
google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
-google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
-gopkg.in/alecthomas/gometalinter.v2 v2.0.12/go.mod h1:NDRytsqEZyolNuAgTzJkZMkSQM7FIKyzVzGhjB/qfYo=
+google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
-gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20180810215634-df19058c872c/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=
@@ -754,8 +659,6 @@ gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=
gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
gopkg.in/karlseguin/expect.v1 v1.0.1 h1:9u0iUltnhFbJTHaSIH0EP+cuTU5rafIgmcsEsg2JQFw=
gopkg.in/karlseguin/expect.v1 v1.0.1/go.mod h1:uB7QIJBcclvYbwlUDkSCsGjAOMis3fP280LyhuDEf2I=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
@@ -771,6 +674,3 @@ pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
-sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4 h1:VO9oZbbkvTwqLimlQt15QNdOOBArT2dw/bvzsMZBiqQ=
-sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
-sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67/go.mod h1:L5q+DGLGOQFpo1snNEkLOJT2d1YTW66rWNzatr3He1k=
diff --git a/k8s/README.md b/k8s/README.md
new file mode 100644
index 000000000..5ec3ab407
--- /dev/null
+++ b/k8s/README.md
@@ -0,0 +1,23 @@
+## SEAWEEDFS - helm chart (2.x)
+
+### info:
+* master/filer/volume are stateful sets with anti-affinity on the hostname,
+so your deployment will be spread/HA.
+* chart is using memsql(mysql) as the filer backend to enable HA (multiple filer instances)
+and backup/HA memsql can provide.
+* mysql user/password are created in a k8s secret (secret-seaweedfs-db.yaml) and injected to the filer
+with ENV.
+* cert config exists and can be enabled, but not been tested.
+
+### current instances config (AIO):
+1 instance for each type (master/filer/volume/s3)
+
+instances need node labels:
+* sw-volume: true (for volume instance, specific tag)
+* sw-backend: true (for all others, as they less resource demanding)
+
+you can update the replicas count for each node type in values.yaml,
+need to add more nodes with the corresponding label.
+
+most of the configuration are available through values.yaml
+
diff --git a/k8s/seaweedfs/.helmignore b/k8s/seaweedfs/.helmignore
new file mode 100644
index 000000000..50af03172
--- /dev/null
+++ b/k8s/seaweedfs/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/k8s/seaweedfs/Chart.yaml b/k8s/seaweedfs/Chart.yaml
new file mode 100644
index 000000000..7ebee122b
--- /dev/null
+++ b/k8s/seaweedfs/Chart.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: SeaweedFS
+name: seaweedfs
+version: 1.79
diff --git a/k8s/seaweedfs/templates/_helpers.tpl b/k8s/seaweedfs/templates/_helpers.tpl
new file mode 100644
index 000000000..04a782f8b
--- /dev/null
+++ b/k8s/seaweedfs/templates/_helpers.tpl
@@ -0,0 +1,114 @@
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to
+this (by the DNS naming spec). If release name contains chart name it will
+be used as a full name.
+*/}}
+{{- define "seaweedfs.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "seaweedfs.chart" -}}
+{{- printf "%s-helm" .Chart.Name | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "seaweedfs.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Inject extra environment vars in the format key:value, if populated
+*/}}
+{{- define "seaweedfs.extraEnvironmentVars" -}}
+{{- if .extraEnvironmentVars -}}
+{{- range $key, $value := .extraEnvironmentVars }}
+- name: {{ $key }}
+ value: {{ $value | quote }}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/* Return the proper filer image */}}
+{{- define "filer.image" -}}
+{{- if .Values.filer.imageOverride -}}
+{{- $imageOverride := .Values.filer.imageOverride -}}
+{{- printf "%s" $imageOverride -}}
+{{- else -}}
+{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
+{{- $repositoryName := .Values.image.repository | toString -}}
+{{- $name := .Values.global.imageName | toString -}}
+{{- $tag := .Values.global.imageTag | toString -}}
+{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
+{{- end -}}
+{{- end -}}
+
+{{/* Return the proper postgresqlSchema image */}}
+{{- define "filer.dbSchema.image" -}}
+{{- if .Values.filer.dbSchema.imageOverride -}}
+{{- $imageOverride := .Values.filer.dbSchema.imageOverride -}}
+{{- printf "%s" $imageOverride -}}
+{{- else -}}
+{{- $registryName := default .Values.global.registry .Values.global.localRegistry | toString -}}
+{{- $repositoryName := .Values.global.repository | toString -}}
+{{- $name := .Values.filer.dbSchema.imageName | toString -}}
+{{- $tag := .Values.filer.dbSchema.imageTag | toString -}}
+{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
+{{- end -}}
+{{- end -}}
+
+{{/* Return the proper master image */}}
+{{- define "master.image" -}}
+{{- if .Values.master.imageOverride -}}
+{{- $imageOverride := .Values.master.imageOverride -}}
+{{- printf "%s" $imageOverride -}}
+{{- else -}}
+{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
+{{- $repositoryName := .Values.image.repository | toString -}}
+{{- $name := .Values.global.imageName | toString -}}
+{{- $tag := .Values.global.imageTag | toString -}}
+{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
+{{- end -}}
+{{- end -}}
+
+{{/* Return the proper s3 image */}}
+{{- define "s3.image" -}}
+{{- if .Values.s3.imageOverride -}}
+{{- $imageOverride := .Values.s3.imageOverride -}}
+{{- printf "%s" $imageOverride -}}
+{{- else -}}
+{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
+{{- $repositoryName := .Values.image.repository | toString -}}
+{{- $name := .Values.global.imageName | toString -}}
+{{- $tag := .Values.global.imageTag | toString -}}
+{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
+{{- end -}}
+{{- end -}}
+
+{{/* Return the proper volume image */}}
+{{- define "volume.image" -}}
+{{- if .Values.volume.imageOverride -}}
+{{- $imageOverride := .Values.volume.imageOverride -}}
+{{- printf "%s" $imageOverride -}}
+{{- else -}}
+{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
+{{- $repositoryName := .Values.image.repository | toString -}}
+{{- $name := .Values.global.imageName | toString -}}
+{{- $tag := .Values.global.imageTag | toString -}}
+{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
+{{- end -}}
+{{- end -}}
\ No newline at end of file
diff --git a/k8s/seaweedfs/templates/ca-cert.yaml b/k8s/seaweedfs/templates/ca-cert.yaml
new file mode 100644
index 000000000..056f01502
--- /dev/null
+++ b/k8s/seaweedfs/templates/ca-cert.yaml
@@ -0,0 +1,14 @@
+{{- if .Values.global.enableSecurity }}
+apiVersion: certmanager.k8s.io/v1alpha1
+kind: Certificate
+metadata:
+ name: {{ template "seaweedfs.name" . }}-ca-cert
+ namespace: {{ .Release.Namespace }}
+spec:
+ secretName: {{ template "seaweedfs.name" . }}-ca-cert
+ commonName: "{{ template "seaweedfs.name" . }}-root-ca"
+ isCA: true
+ issuerRef:
+ name: {{ template "seaweedfs.name" . }}-clusterissuer
+ kind: ClusterIssuer
+{{- end }}
diff --git a/k8s/seaweedfs/templates/cert-clusterissuer.yaml b/k8s/seaweedfs/templates/cert-clusterissuer.yaml
new file mode 100644
index 000000000..d0bd42593
--- /dev/null
+++ b/k8s/seaweedfs/templates/cert-clusterissuer.yaml
@@ -0,0 +1,8 @@
+{{- if .Values.global.enableSecurity }}
+apiVersion: certmanager.k8s.io/v1alpha1
+kind: ClusterIssuer
+metadata:
+ name: {{ template "seaweedfs.name" . }}-clusterissuer
+spec:
+ selfSigned: {}
+{{- end }}
diff --git a/k8s/seaweedfs/templates/client-cert.yaml b/k8s/seaweedfs/templates/client-cert.yaml
new file mode 100644
index 000000000..4d27b5659
--- /dev/null
+++ b/k8s/seaweedfs/templates/client-cert.yaml
@@ -0,0 +1,33 @@
+{{- if .Values.global.enableSecurity }}
+apiVersion: certmanager.k8s.io/v1alpha1
+kind: Certificate
+metadata:
+ name: {{ template "seaweedfs.name" . }}-client-cert
+ namespace: {{ .Release.Namespace }}
+spec:
+ secretName: {{ template "seaweedfs.name" . }}-client-cert
+ issuerRef:
+ name: {{ template "seaweedfs.name" . }}-clusterissuer
+ kind: ClusterIssuer
+ commonName: {{ .Values.certificates.commonName }}
+ organization:
+ - "SeaweedFS CA"
+ dnsNames:
+ - '*.{{ .Release.Namespace }}'
+ - '*.{{ .Release.Namespace }}.svc'
+ - '*.{{ .Release.Namespace }}.svc.cluster.local'
+ - '*.{{ template "seaweedfs.name" . }}-master'
+ - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}'
+ - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc'
+ - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local'
+{{- if .Values.certificates.ipAddresses }}
+ ipAddresses:
+ {{- range .Values.certificates.ipAddresses }}
+ - {{ . }}
+ {{- end }}
+{{- end }}
+ keyAlgorithm: {{ .Values.certificates.keyAlgorithm }}
+ keySize: {{ .Values.certificates.keySize }}
+ duration: {{ .Values.certificates.duration }}
+ renewBefore: {{ .Values.certificates.renewBefore }}
+{{- end }}
diff --git a/k8s/seaweedfs/templates/filer-cert.yaml b/k8s/seaweedfs/templates/filer-cert.yaml
new file mode 100644
index 000000000..855183c54
--- /dev/null
+++ b/k8s/seaweedfs/templates/filer-cert.yaml
@@ -0,0 +1,33 @@
+{{- if .Values.global.enableSecurity }}
+apiVersion: certmanager.k8s.io/v1alpha1
+kind: Certificate
+metadata:
+ name: {{ template "seaweedfs.name" . }}-filer-cert
+ namespace: {{ .Release.Namespace }}
+spec:
+ secretName: {{ template "seaweedfs.name" . }}-filer-cert
+ issuerRef:
+ name: {{ template "seaweedfs.name" . }}-clusterissuer
+ kind: ClusterIssuer
+ commonName: {{ .Values.certificates.commonName }}
+ organization:
+ - "SeaweedFS CA"
+ dnsNames:
+ - '*.{{ .Release.Namespace }}'
+ - '*.{{ .Release.Namespace }}.svc'
+ - '*.{{ .Release.Namespace }}.svc.cluster.local'
+ - '*.{{ template "seaweedfs.name" . }}-master'
+ - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}'
+ - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc'
+ - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local'
+{{- if .Values.certificates.ipAddresses }}
+ ipAddresses:
+ {{- range .Values.certificates.ipAddresses }}
+ - {{ . }}
+ {{- end }}
+{{- end }}
+ keyAlgorithm: {{ .Values.certificates.keyAlgorithm }}
+ keySize: {{ .Values.certificates.keySize }}
+ duration: {{ .Values.certificates.duration }}
+ renewBefore: {{ .Values.certificates.renewBefore }}
+{{- end }}
diff --git a/k8s/seaweedfs/templates/filer-service.yaml b/k8s/seaweedfs/templates/filer-service.yaml
new file mode 100644
index 000000000..493859e36
--- /dev/null
+++ b/k8s/seaweedfs/templates/filer-service.yaml
@@ -0,0 +1,22 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "seaweedfs.name" . }}-filer
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ template "seaweedfs.name" . }}
+ component: filer
+spec:
+ clusterIP: None
+ ports:
+ - name: "swfs-filer"
+ port: {{ .Values.filer.port }}
+ targetPort: {{ .Values.filer.port }}
+ protocol: TCP
+ - name: "swfs-filer-grpc"
+ port: {{ .Values.filer.grpcPort }}
+ targetPort: {{ .Values.filer.grpcPort }}
+ protocol: TCP
+ selector:
+ app: {{ template "seaweedfs.name" . }}
+ component: filer
\ No newline at end of file
diff --git a/k8s/seaweedfs/templates/filer-statefulset.yaml b/k8s/seaweedfs/templates/filer-statefulset.yaml
new file mode 100644
index 000000000..43da74c43
--- /dev/null
+++ b/k8s/seaweedfs/templates/filer-statefulset.yaml
@@ -0,0 +1,207 @@
+{{- if .Values.filer.enabled }}
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ template "seaweedfs.name" . }}-filer
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ template "seaweedfs.name" . }}
+ chart: {{ template "seaweedfs.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+spec:
+ serviceName: {{ template "seaweedfs.name" . }}-filer
+ podManagementPolicy: Parallel
+ replicas: {{ .Values.filer.replicas }}
+ {{- if (gt (int .Values.filer.updatePartition) 0) }}
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ partition: {{ .Values.filer.updatePartition }}
+ {{- end }}
+ selector:
+ matchLabels:
+ app: {{ template "seaweedfs.name" . }}
+ chart: {{ template "seaweedfs.chart" . }}
+ release: {{ .Release.Name }}
+ component: filer
+ template:
+ metadata:
+ labels:
+ app: {{ template "seaweedfs.name" . }}
+ chart: {{ template "seaweedfs.chart" . }}
+ release: {{ .Release.Name }}
+ component: filer
+ spec:
+ restartPolicy: {{ default .Values.global.restartPolicy .Values.filer.restartPolicy }}
+ {{- if .Values.filer.affinity }}
+ affinity:
+ {{ tpl .Values.filer.affinity . | nindent 8 | trim }}
+ {{- end }}
+ {{- if .Values.filer.tolerations }}
+ tolerations:
+ {{ tpl .Values.filer.tolerations . | nindent 8 | trim }}
+ {{- end }}
+ {{- if .Values.global.imagePullSecrets }}
+ imagePullSecrets:
+ - name: {{ .Values.global.imagePullSecrets }}
+ {{- end }}
+ serviceAccountName: seaweefds-rw-sa #hack for delete pod master after migration
+ terminationGracePeriodSeconds: 60
+ {{- if .Values.filer.priorityClassName }}
+ priorityClassName: {{ .Values.filer.priorityClassName | quote }}
+ {{- end }}
+ enableServiceLinks: false
+ containers:
+ - name: seaweedfs
+ image: {{ template "filer.image" . }}
+ imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
+ env:
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: WEED_MYSQL_USERNAME
+ valueFrom:
+ secretKeyRef:
+ name: secret-seaweedfs-db
+ key: user
+ - name: WEED_MYSQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: secret-seaweedfs-db
+ key: password
+ - name: SEAWEEDFS_FULLNAME
+ value: "{{ template "seaweedfs.name" . }}"
+ {{- if .Values.filer.extraEnvironmentVars }}
+ {{- range $key, $value := .Values.filer.extraEnvironmentVars }}
+ - name: {{ $key }}
+ value: {{ $value | quote }}
+ {{- end }}
+ {{- end }}
+ command:
+ - "/bin/sh"
+ - "-ec"
+ - |
+ exec /usr/bin/weed -logdir=/logs \
+ {{- if .Values.filer.loggingOverrideLevel }}
+ -v={{ .Values.filer.loggingOverrideLevel }} \
+ {{- else }}
+ -v={{ .Values.global.loggingLevel }} \
+ {{- end }}
+ filer \
+ -port={{ .Values.filer.port }} \
+ {{- if .Values.filer.disableHttp }}
+ -disableHttp \
+ {{- end }}
+ {{- if .Values.filer.disableDirListing }}
+ -disableDirListing \
+ {{- end }}
+ -dirListLimit={{ .Values.filer.dirListLimit }} \
+ -ip=${POD_IP} \
+ -master={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
+ {{- if or (.Values.global.enableSecurity) (.Values.filer.extraVolumeMounts) }}
+ volumeMounts:
+ - name: seaweedfs-filer-log-volume
+ mountPath: "/logs/"
+ {{- if .Values.global.enableSecurity }}
+ - name: security-config
+ readOnly: true
+ mountPath: /etc/seaweedfs/security.toml
+ subPath: security.toml
+ - name: ca-cert
+ readOnly: true
+ mountPath: /usr/local/share/ca-certificates/ca/
+ - name: master-cert
+ readOnly: true
+ mountPath: /usr/local/share/ca-certificates/master/
+ - name: volume-cert
+ readOnly: true
+ mountPath: /usr/local/share/ca-certificates/volume/
+ - name: filer-cert
+ readOnly: true
+ mountPath: /usr/local/share/ca-certificates/filer/
+ - name: client-cert
+ readOnly: true
+ mountPath: /usr/local/share/ca-certificates/client/
+ {{- end }}
+ {{ tpl .Values.filer.extraVolumeMounts . | nindent 12 | trim }}
+ {{- end }}
+ ports:
+ - containerPort: {{ .Values.filer.port }}
+ name: swfs-filer
+ - containerPort: {{ .Values.filer.grpcPort }}
+ #name: swfs-filer-grpc
+ readinessProbe:
+ httpGet:
+ path: /
+ port: {{ .Values.filer.port }}
+ scheme: HTTP
+ initialDelaySeconds: 10
+ periodSeconds: 15
+ successThreshold: 1
+ failureThreshold: 100
+ livenessProbe:
+ httpGet:
+ path: /
+ port: {{ .Values.filer.port }}
+ scheme: HTTP
+ initialDelaySeconds: 20
+ periodSeconds: 30
+ successThreshold: 1
+ failureThreshold: 5
+ {{- if .Values.filer.resources }}
+ resources:
+ {{ tpl .Values.filer.resources . | nindent 12 | trim }}
+ {{- end }}
+ volumes:
+ - name: seaweedfs-filer-log-volume
+ hostPath:
+ path: /storage/logs/seaweedfs/filer
+ type: DirectoryOrCreate
+ {{- if .Values.global.enableSecurity }}
+ - name: security-config
+ configMap:
+ name: {{ template "seaweedfs.name" . }}-security-config
+ - name: ca-cert
+ secret:
+ secretName: {{ template "seaweedfs.name" . }}-ca-cert
+ - name: master-cert
+ secret:
+ secretName: {{ template "seaweedfs.name" . }}-master-cert
+ - name: volume-cert
+ secret:
+ secretName: {{ template "seaweedfs.name" . }}-volume-cert
+ - name: filer-cert
+ secret:
+ secretName: {{ template "seaweedfs.name" . }}-filer-cert
+ - name: client-cert
+ secret:
+ secretName: {{ template "seaweedfs.name" . }}-client-cert
+ {{- end }}
+ {{ tpl .Values.filer.extraVolumes . | indent 8 | trim }}
+ {{- if .Values.filer.nodeSelector }}
+ nodeSelector:
+ {{ tpl .Values.filer.nodeSelector . | indent 8 | trim }}
+ {{- end }}
+{{/* volumeClaimTemplates:*/}}
+{{/* - metadata:*/}}
+{{/* name: data-{{ .Release.Namespace }}*/}}
+{{/* spec:*/}}
+{{/* accessModes:*/}}
+{{/* - ReadWriteOnce*/}}
+{{/* resources:*/}}
+{{/* requests:*/}}
+{{/* storage: {{ .Values.filer.storage }}*/}}
+{{/* {{- if .Values.filer.storageClass }}*/}}
+{{/* storageClassName: {{ .Values.filer.storageClass }}*/}}
+{{/* {{- end }}*/}}
+{{- end }}
diff --git a/k8s/seaweedfs/templates/ingress.yaml b/k8s/seaweedfs/templates/ingress.yaml
new file mode 100644
index 000000000..dcd52c138
--- /dev/null
+++ b/k8s/seaweedfs/templates/ingress.yaml
@@ -0,0 +1,59 @@
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: ingress-{{ template "seaweedfs.name" . }}-filer
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/auth-type: "basic"
+ nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
+ nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Filer'
+ nginx.ingress.kubernetes.io/service-upstream: "true"
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/use-regex: "true"
+ nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
+ nginx.ingress.kubernetes.io/ssl-redirect: "false"
+ nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
+ nginx.ingress.kubernetes.io/configuration-snippet: |
+ sub_filter '' ' '; #add base url
+ sub_filter '="/' '="./'; #make absolute paths to relative
+ sub_filter '=/' '=./';
+ sub_filter '/seaweedfsstatic' './seaweedfsstatic';
+ sub_filter_once off;
+spec:
+ rules:
+ - http:
+ paths:
+ - path: /sw-filer/?(.*)
+ backend:
+ serviceName: {{ template "seaweedfs.name" . }}-filer
+ servicePort: {{ .Values.filer.port }}
+---
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: ingress-{{ template "seaweedfs.name" . }}-master
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/auth-type: "basic"
+ nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
+ nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master'
+ nginx.ingress.kubernetes.io/service-upstream: "true"
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/use-regex: "true"
+ nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
+ nginx.ingress.kubernetes.io/ssl-redirect: "false"
+ nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
+ nginx.ingress.kubernetes.io/configuration-snippet: |
+ sub_filter '' ' '; #add base url
+ sub_filter '="/' '="./'; #make absolute paths to relative
+ sub_filter '=/' '=./';
+ sub_filter '/seaweedfsstatic' './seaweedfsstatic';
+ sub_filter_once off;
+spec:
+ rules:
+ - http:
+ paths:
+ - path: /sw-master/?(.*)
+ backend:
+ serviceName: {{ template "seaweedfs.name" . }}-master
+ servicePort: {{ .Values.master.port }}
diff --git a/k8s/seaweedfs/templates/master-cert.yaml b/k8s/seaweedfs/templates/master-cert.yaml
new file mode 100644
index 000000000..a8b0fc1d1
--- /dev/null
+++ b/k8s/seaweedfs/templates/master-cert.yaml
@@ -0,0 +1,33 @@
+{{- if .Values.global.enableSecurity }}
+apiVersion: certmanager.k8s.io/v1alpha1
+kind: Certificate
+metadata:
+ name: {{ template "seaweedfs.name" . }}-master-cert
+ namespace: {{ .Release.Namespace }}
+spec:
+ secretName: {{ template "seaweedfs.name" . }}-master-cert
+ issuerRef:
+ name: {{ template "seaweedfs.name" . }}-clusterissuer
+ kind: ClusterIssuer
+ commonName: {{ .Values.certificates.commonName }}
+ organization:
+ - "SeaweedFS CA"
+ dnsNames:
+ - '*.{{ .Release.Namespace }}'
+ - '*.{{ .Release.Namespace }}.svc'
+ - '*.{{ .Release.Namespace }}.svc.cluster.local'
+ - '*.{{ template "seaweedfs.name" . }}-master'
+ - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}'
+ - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc'
+ - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local'
+{{- if .Values.certificates.ipAddresses }}
+ ipAddresses:
+ {{- range .Values.certificates.ipAddresses }}
+ - {{ . }}
+ {{- end }}
+{{- end }}
+ keyAlgorithm: {{ .Values.certificates.keyAlgorithm }}
+ keySize: {{ .Values.certificates.keySize }}
+ duration: {{ .Values.certificates.duration }}
+ renewBefore: {{ .Values.certificates.renewBefore }}
+{{- end }}
diff --git a/k8s/seaweedfs/templates/master-service.yaml b/k8s/seaweedfs/templates/master-service.yaml
new file mode 100644
index 000000000..f7603bd91
--- /dev/null
+++ b/k8s/seaweedfs/templates/master-service.yaml
@@ -0,0 +1,24 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "seaweedfs.name" . }}-master
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ template "seaweedfs.name" . }}
+ component: master
+ annotations:
+ service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
+spec:
+ clusterIP: None
+ ports:
+ - name: "swfs-master"
+ port: {{ .Values.master.port }}
+ targetPort: {{ .Values.master.port }}
+ protocol: TCP
+ - name: "swfs-master-grpc"
+ port: {{ .Values.master.grpcPort }}
+ targetPort: {{ .Values.master.grpcPort }}
+ protocol: TCP
+ selector:
+ app: {{ template "seaweedfs.name" . }}
+ component: master
\ No newline at end of file
diff --git a/k8s/seaweedfs/templates/master-statefulset.yaml b/k8s/seaweedfs/templates/master-statefulset.yaml
new file mode 100644
index 000000000..87050534f
--- /dev/null
+++ b/k8s/seaweedfs/templates/master-statefulset.yaml
@@ -0,0 +1,199 @@
+{{- if .Values.master.enabled }}
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ template "seaweedfs.name" . }}-master
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ template "seaweedfs.name" . }}
+ chart: {{ template "seaweedfs.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+spec:
+ serviceName: {{ template "seaweedfs.name" . }}-master
+ podManagementPolicy: Parallel
+ replicas: {{ .Values.master.replicas }}
+ {{- if (gt (int .Values.master.updatePartition) 0) }}
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ partition: {{ .Values.master.updatePartition }}
+ {{- end }}
+ selector:
+ matchLabels:
+ app: {{ template "seaweedfs.name" . }}
+ chart: {{ template "seaweedfs.chart" . }}
+ release: {{ .Release.Name }}
+ component: master
+ template:
+ metadata:
+ labels:
+ app: {{ template "seaweedfs.name" . }}
+ chart: {{ template "seaweedfs.chart" . }}
+ release: {{ .Release.Name }}
+ component: master
+ spec:
+ restartPolicy: {{ default .Values.global.restartPolicy .Values.master.restartPolicy }}
+ {{- if .Values.master.affinity }}
+ affinity:
+ {{ tpl .Values.master.affinity . | nindent 8 | trim }}
+ {{- end }}
+ {{- if .Values.master.tolerations }}
+ tolerations:
+ {{ tpl .Values.master.tolerations . | nindent 8 | trim }}
+ {{- end }}
+ {{- if .Values.global.imagePullSecrets }}
+ imagePullSecrets:
+ - name: {{ .Values.global.imagePullSecrets }}
+ {{- end }}
+ terminationGracePeriodSeconds: 60
+ {{- if .Values.master.priorityClassName }}
+ priorityClassName: {{ .Values.master.priorityClassName | quote }}
+ {{- end }}
+ enableServiceLinks: false
+ containers:
+ - name: seaweedfs
+ image: {{ template "master.image" . }}
+ imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
+ env:
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SEAWEEDFS_FULLNAME
+ value: "{{ template "seaweedfs.name" . }}"
+ command:
+ - "/bin/sh"
+ - "-ec"
+ - |
+ exec /usr/bin/weed -logdir=/logs \
+ {{- if .Values.master.loggingOverrideLevel }}
+ -v={{ .Values.master.loggingOverrideLevel }} \
+ {{- else }}
+ -v={{ .Values.global.loggingLevel }} \
+ {{- end }}
+ master \
+ -port={{ .Values.master.port }} \
+ -mdir=/data \
+ -ip.bind={{ .Values.master.ipBind }} \
+ {{- if .Values.master.volumePreallocate }}
+ -volumePreallocate \
+ {{- end }}
+ {{- if .Values.global.monitoring.enabled }}
+ -metrics.address="{{ .Values.global.monitoring.gatewayHost }}:{{ .Values.global.monitoring.gatewayPort }}" \
+ {{- end }}
+ -volumeSizeLimitMB={{ .Values.master.volumeSizeLimitMB }} \
+ {{- if .Values.master.disableHttp }}
+ -disableHttp \
+ {{- end }}
+ -ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-master \
+ -peers={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
+ volumeMounts:
+ - name : data-{{ .Release.Namespace }}
+ mountPath: /data
+ - name: seaweedfs-master-log-volume
+ mountPath: "/logs/"
+ {{- if .Values.global.enableSecurity }}
+ - name: security-config
+ readOnly: true
+ mountPath: /etc/seaweedfs/security.toml
+ subPath: security.toml
+ - name: ca-cert
+ readOnly: true
+ mountPath: /usr/local/share/ca-certificates/ca/
+ - name: master-cert
+ readOnly: true
+ mountPath: /usr/local/share/ca-certificates/master/
+ - name: volume-cert
+ readOnly: true
+ mountPath: /usr/local/share/ca-certificates/volume/
+ - name: filer-cert
+ readOnly: true
+ mountPath: /usr/local/share/ca-certificates/filer/
+ - name: client-cert
+ readOnly: true
+ mountPath: /usr/local/share/ca-certificates/client/
+ {{- end }}
+ {{ tpl .Values.master.extraVolumeMounts . | nindent 12 | trim }}
+ ports:
+ - containerPort: {{ .Values.master.port }}
+ name: swfs-master
+ - containerPort: {{ .Values.master.grpcPort }}
+ #name: swfs-master-grpc
+ readinessProbe:
+ httpGet:
+ path: /cluster/status
+ port: {{ .Values.master.port }}
+ scheme: HTTP
+ initialDelaySeconds: 5
+ periodSeconds: 15
+ successThreshold: 2
+ failureThreshold: 100
+ livenessProbe:
+ httpGet:
+ path: /cluster/status
+ port: {{ .Values.master.port }}
+ scheme: HTTP
+ initialDelaySeconds: 20
+ periodSeconds: 10
+ successThreshold: 1
+ failureThreshold: 6
+ {{- if .Values.master.resources }}
+ resources:
+ {{ tpl .Values.master.resources . | nindent 12 | trim }}
+ {{- end }}
+ volumes:
+ - name: seaweedfs-master-log-volume
+ hostPath:
+ path: /storage/logs/seaweedfs/master
+ type: DirectoryOrCreate
+ - name: data-{{ .Release.Namespace }}
+ hostPath:
+ path: /ssd/seaweed-master/
+ type: DirectoryOrCreate
+ {{- if .Values.global.enableSecurity }}
+ - name: security-config
+ configMap:
+ name: {{ template "seaweedfs.name" . }}-security-config
+ - name: ca-cert
+ secret:
+ secretName: {{ template "seaweedfs.name" . }}-ca-cert
+ - name: master-cert
+ secret:
+ secretName: {{ template "seaweedfs.name" . }}-master-cert
+ - name: volume-cert
+ secret:
+ secretName: {{ template "seaweedfs.name" . }}-volume-cert
+ - name: filer-cert
+ secret:
+ secretName: {{ template "seaweedfs.name" . }}-filer-cert
+ - name: client-cert
+ secret:
+ secretName: {{ template "seaweedfs.name" . }}-client-cert
+ {{- end }}
+ {{ tpl .Values.master.extraVolumes . | indent 8 | trim }}
+ {{- if .Values.master.nodeSelector }}
+ nodeSelector:
+ {{ tpl .Values.master.nodeSelector . | indent 8 | trim }}
+ {{- end }}
+{{/* volumeClaimTemplates:*/}}
+{{/* - metadata:*/}}
+{{/* name: data-{{ .Release.Namespace }}*/}}
+{{/* spec:*/}}
+{{/* accessModes:*/}}
+{{/* - ReadWriteOnce*/}}
+{{/* resources:*/}}
+{{/* requests:*/}}
+{{/* storage: {{ .Values.master.storage }}*/}}
+{{/* {{- if .Values.master.storageClass }}*/}}
+{{/* storageClassName: {{ .Values.master.storageClass }}*/}}
+{{/* {{- end }}*/}}
+{{- end }}
diff --git a/k8s/seaweedfs/templates/s3-deployment.yaml b/k8s/seaweedfs/templates/s3-deployment.yaml
new file mode 100644
index 000000000..1bb3283f1
--- /dev/null
+++ b/k8s/seaweedfs/templates/s3-deployment.yaml
@@ -0,0 +1,158 @@
+{{- if .Values.s3.enabled }}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ template "seaweedfs.name" . }}-s3
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ template "seaweedfs.name" . }}
+ chart: {{ template "seaweedfs.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+spec:
+ serviceName: {{ template "seaweedfs.name" . }}-s3
+ replicas: {{ .Values.s3.replicas }}
+ selector:
+ matchLabels:
+ app: {{ template "seaweedfs.name" . }}
+ chart: {{ template "seaweedfs.chart" . }}
+ release: {{ .Release.Name }}
+ component: s3
+ template:
+ metadata:
+ labels:
+ app: {{ template "seaweedfs.name" . }}
+ chart: {{ template "seaweedfs.chart" . }}
+ release: {{ .Release.Name }}
+ component: s3
+ spec:
+ restartPolicy: {{ default .Values.global.restartPolicy .Values.s3.restartPolicy }}
+ {{- if .Values.s3.tolerations }}
+ tolerations:
+ {{ tpl .Values.s3.tolerations . | nindent 8 | trim }}
+ {{- end }}
+ {{- if .Values.global.imagePullSecrets }}
+ imagePullSecrets:
+ - name: {{ .Values.global.imagePullSecrets }}
+ {{- end }}
+ terminationGracePeriodSeconds: 10
+ {{- if .Values.s3.priorityClassName }}
+ priorityClassName: {{ .Values.s3.priorityClassName | quote }}
+ {{- end }}
+ enableServiceLinks: false
+ containers:
+ - name: seaweedfs
+ image: {{ template "s3.image" . }}
+ imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
+ env:
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SEAWEEDFS_FULLNAME
+ value: "{{ template "seaweedfs.name" . }}"
+ command:
+ - "/bin/sh"
+ - "-ec"
+ - |
+ exec /usr/bin/weed \
+ {{- if .Values.s3.loggingOverrideLevel }}
+ -v={{ .Values.s3.loggingOverrideLevel }} \
+ {{- else }}
+ -v={{ .Values.global.loggingLevel }} \
+ {{- end }}
+ s3 \
+ -port={{ .Values.s3.port }} \
+ {{- if .Values.global.enableSecurity }}
+ -cert.file=/usr/local/share/ca-certificates/client/tls.crt \
+ -key.file=/usr/local/share/ca-certificates/client/tls.key \
+ {{- end }}
+ {{- if .Values.s3.domainName }}
+ -domainName={{ .Values.s3.domainName }} \
+ {{- end }}
+ -filer={{ template "seaweedfs.name" . }}-filer:{{ .Values.filer.port }}
+ {{- if or (.Values.global.enableSecurity) (.Values.s3.extraVolumeMounts) }}
+ volumeMounts:
+ {{- if .Values.global.enableSecurity }}
+ - name: security-config
+ readOnly: true
+ mountPath: /etc/seaweedfs/security.toml
+ subPath: security.toml
+ - name: ca-cert
+ readOnly: true
+ mountPath: /usr/local/share/ca-certificates/ca/
+ - name: master-cert
+ readOnly: true
+ mountPath: /usr/local/share/ca-certificates/master/
+ - name: volume-cert
+ readOnly: true
+ mountPath: /usr/local/share/ca-certificates/volume/
+ - name: filer-cert
+ readOnly: true
+ mountPath: /usr/local/share/ca-certificates/filer/
+ - name: client-cert
+ readOnly: true
+ mountPath: /usr/local/share/ca-certificates/client/
+ {{- end }}
+ {{ tpl .Values.s3.extraVolumeMounts . | nindent 12 | trim }}
+ {{- end }}
+ ports:
+ - containerPort: {{ .Values.s3.port }}
+ name: swfs-s3
+ readinessProbe:
+ httpGet:
+ path: /
+ port: {{ .Values.s3.port }}
+ scheme: HTTP
+ initialDelaySeconds: 15
+ periodSeconds: 15
+ successThreshold: 1
+ failureThreshold: 100
+ livenessProbe:
+ httpGet:
+ path: /
+ port: {{ .Values.s3.port }}
+ scheme: HTTP
+ initialDelaySeconds: 20
+ periodSeconds: 60
+ successThreshold: 1
+ failureThreshold: 20
+ {{- if .Values.s3.resources }}
+ resources:
+ {{ tpl .Values.s3.resources . | nindent 12 | trim }}
+ {{- end }}
+ volumes:
+ {{- if .Values.global.enableSecurity }}
+ - name: security-config
+ configMap:
+ name: {{ template "seaweedfs.name" . }}-security-config
+ - name: ca-cert
+ secret:
+ secretName: {{ template "seaweedfs.name" . }}-ca-cert
+ - name: master-cert
+ secret:
+ secretName: {{ template "seaweedfs.name" . }}-master-cert
+ - name: volume-cert
+ secret:
+ secretName: {{ template "seaweedfs.name" . }}-volume-cert
+ - name: filer-cert
+ secret:
+ secretName: {{ template "seaweedfs.name" . }}-filer-cert
+ - name: client-cert
+ secret:
+ secretName: {{ template "seaweedfs.name" . }}-client-cert
+ {{- end }}
+ {{ tpl .Values.s3.extraVolumes . | indent 8 | trim }}
+ {{- if .Values.s3.nodeSelector }}
+ nodeSelector:
+ {{ tpl .Values.s3.nodeSelector . | indent 8 | trim }}
+ {{- end }}
+{{- end }}
diff --git a/k8s/seaweedfs/templates/s3-service.yaml b/k8s/seaweedfs/templates/s3-service.yaml
new file mode 100644
index 000000000..b088e25fa
--- /dev/null
+++ b/k8s/seaweedfs/templates/s3-service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "seaweedfs.name" . }}-s3
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ template "seaweedfs.name" . }}
+ component: s3
+spec:
+ ports:
+ - name: "swfs-s3"
+ port: {{ .Values.s3.port }}
+ targetPort: {{ .Values.s3.port }}
+ protocol: TCP
+ selector:
+ app: {{ template "seaweedfs.name" . }}
+ component: s3
\ No newline at end of file
diff --git a/k8s/seaweedfs/templates/seaweefs-grafana-dashboard.yaml b/k8s/seaweedfs/templates/seaweefs-grafana-dashboard.yaml
new file mode 100644
index 000000000..c943ea50f
--- /dev/null
+++ b/k8s/seaweedfs/templates/seaweefs-grafana-dashboard.yaml
@@ -0,0 +1,1352 @@
+{{- if .Values.global.monitoring.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: seaweefsfs-grafana-dashboard
+ labels:
+ grafana_dashboard: "1"
+data:
+ seaweedfs.json: |-
+ {
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "Prometheus",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "limit": 100,
+ "name": "Annotations & Alerts",
+ "showIn": 0,
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "gnetId": 10423,
+ "graphTooltip": 0,
+ "hideControls": false,
+ "id": null,
+ "links": [],
+ "refresh": "30s",
+ "rows": [
+ {
+ "collapse": false,
+ "height": 251,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 46,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.80, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "average",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "expr": "histogram_quantile(0.80, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{type}}`}}",
+ "refId": "B",
+ "step": 60
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Filer Request Duration 95th percentile",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 49,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "average",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{type}}`}}",
+ "refId": "B",
+ "step": 60
+ },
+ {
+ "expr": "",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "C"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Filer Request Duration 95th percentile",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 45,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "average",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{type}}`}}",
+ "refId": "B",
+ "step": 60
+ },
+ {
+ "expr": "",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "C"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Filer Request Duration 99th percentile",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 2,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": true,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 250,
+ "sort": "max",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "minSpan": 12,
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "total",
+ "lines": false
+ }
+ ],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(SeaweedFS_filer_request_total[1m]) * 5",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{type}}`}}",
+ "refId": "A",
+ "step": 30
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Filer QPS",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Filer",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": 252,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 47,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_volumeServer_request_seconds_bucket[1m])) by (le, exported_instance))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{exported_instance}}`}}",
+ "refId": "B"
+ },
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_volumeServer_request_seconds_bucket[1m])) by (le))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "average",
+ "refId": "C"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Volume Server Request Duration 99th percentile",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 40,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "total",
+ "sortDesc": true,
+ "total": true,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(SeaweedFS_volumeServer_request_total[1m])) by (type)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{type}}`}}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Volume Server QPS",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "fill": 1,
+ "id": 48,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(SeaweedFS_volumeServer_volumes) by (collection, type)",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{collection}}`}} {{`{{type}}`}}",
+ "refId": "A"
+ },
+ {
+ "expr": "sum(SeaweedFS_volumeServer_max_volumes)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "Total",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Volume Count",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "fill": 1,
+ "id": 50,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(SeaweedFS_volumeServer_total_disk_size) by (collection, type)",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{collection}}`}} {{`{{type}}`}}",
+ "refId": "A"
+ },
+ {
+ "expr": "sum(SeaweedFS_volumeServer_total_disk_size)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "Total",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Used Disk Space by Collection and Type",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "fill": 1,
+ "id": 51,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(SeaweedFS_volumeServer_total_disk_size) by (exported_instance)",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{exported_instance}}`}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Used Disk Space by Host",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Volume Server",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": 251,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 12,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le, type))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{type}}`}}",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Filer Store Request Duration 99th percentile",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 14,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(SeaweedFS_filerStore_request_total [1m])) by (type)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{type}}`}}",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Filer Store QPS",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Filer Store",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": 242,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 52,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "go_memstats_alloc_bytes{exported_job=\"filer\"}",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "bytes allocated",
+ "refId": "B"
+ },
+ {
+ "expr": "rate(go_memstats_alloc_bytes_total{exported_job=\"filer\"}[30s])",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "alloc rate",
+ "refId": "A"
+ },
+ {
+ "expr": "go_memstats_stack_inuse_bytes{exported_job=\"filer\"}",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "stack inuse",
+ "refId": "C"
+ },
+ {
+ "expr": "go_memstats_heap_inuse_bytes{exported_job=\"filer\"}",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "heap inuse",
+ "refId": "D"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Filer Go Memory Stats",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 54,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "go_gc_duration_seconds{exported_job=\"filer\"}",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{quantile}}`}}",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Filer Go GC duration quantiles",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "Prometheus",
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 53,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "go_goroutines{exported_job=\"filer\"}",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{`{{exported_instance}}`}}",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Filer Go Routines",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Filer Instances",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": []
+ },
+ "time": {
+ "from": "now-15m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "SeaweedFS",
+ "version": 3
+ }
+{{- end }}
\ No newline at end of file
diff --git a/k8s/seaweedfs/templates/secret-seaweedfs-db.yaml b/k8s/seaweedfs/templates/secret-seaweedfs-db.yaml
new file mode 100644
index 000000000..c6132c9ea
--- /dev/null
+++ b/k8s/seaweedfs/templates/secret-seaweedfs-db.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Secret
+type: Opaque
+metadata:
+ name: secret-seaweedfs-db
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ "helm.sh/resource-policy": keep
+ "helm.sh/hook": "pre-install"
+stringData:
+ user: "YourSWUser"
+ password: "HardCodedPassword"
+ # better to random generate and create in DB
+ # password: {{ randAlphaNum 10 | sha256sum | b64enc | trunc 32 }}
diff --git a/k8s/seaweedfs/templates/security-configmap.yaml b/k8s/seaweedfs/templates/security-configmap.yaml
new file mode 100644
index 000000000..7d06614ec
--- /dev/null
+++ b/k8s/seaweedfs/templates/security-configmap.yaml
@@ -0,0 +1,52 @@
+{{- if .Values.global.enableSecurity }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "seaweedfs.name" . }}-security-config
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ template "seaweedfs.name" . }}
+ chart: {{ template "seaweedfs.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+data:
+ security.toml: |-
+ # this file is read by master, volume server, and filer
+
+ # the jwt signing key is read by master and volume server
+ # a jwt expires in 10 seconds
+ [jwt.signing]
+ key = "{{ randAlphaNum 10 | b64enc }}"
+
+ # all grpc tls authentications are mutual
+ # the values for the following ca, cert, and key are paths to the PERM files.
+ [grpc]
+ ca = "/usr/local/share/ca-certificates/ca/tls.crt"
+
+ [grpc.volume]
+ cert = "/usr/local/share/ca-certificates/volume/tls.crt"
+ key = "/usr/local/share/ca-certificates/volume/tls.key"
+
+ [grpc.master]
+ cert = "/usr/local/share/ca-certificates/master/tls.crt"
+ key = "/usr/local/share/ca-certificates/master/tls.key"
+
+ [grpc.filer]
+ cert = "/usr/local/share/ca-certificates/filer/tls.crt"
+ key = "/usr/local/share/ca-certificates/filer/tls.key"
+
+ # use this for any place needs a grpc client
+ # i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
+ [grpc.client]
+ cert = "/usr/local/share/ca-certificates/client/tls.crt"
+ key = "/usr/local/share/ca-certificates/client/tls.key"
+
+ # volume server https options
+ # Note: work in progress!
+ # this does not work with other clients, e.g., "weed filer|mount" etc, yet.
+ [https.client]
+ enabled = false
+ [https.volume]
+ cert = ""
+ key = ""
+{{- end }}
diff --git a/k8s/seaweedfs/templates/service-account.yaml b/k8s/seaweedfs/templates/service-account.yaml
new file mode 100644
index 000000000..e82ef7d62
--- /dev/null
+++ b/k8s/seaweedfs/templates/service-account.yaml
@@ -0,0 +1,29 @@
+#hack for delete pod master after migration
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: seaweefds-rw-cr
+rules:
+ - apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: seaweefds-rw-sa
+ namespace: {{ .Release.Namespace }}
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: system:serviceaccount:seaweefds-rw-sa:default
+subjects:
+- kind: ServiceAccount
+ name: seaweefds-rw-sa
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: seaweefds-rw-cr
\ No newline at end of file
diff --git a/k8s/seaweedfs/templates/volume-cert.yaml b/k8s/seaweedfs/templates/volume-cert.yaml
new file mode 100644
index 000000000..72c62a0f5
--- /dev/null
+++ b/k8s/seaweedfs/templates/volume-cert.yaml
@@ -0,0 +1,33 @@
+{{- if .Values.global.enableSecurity }}
+apiVersion: certmanager.k8s.io/v1alpha1
+kind: Certificate
+metadata:
+ name: {{ template "seaweedfs.name" . }}-volume-cert
+ namespace: {{ .Release.Namespace }}
+spec:
+ secretName: {{ template "seaweedfs.name" . }}-volume-cert
+ issuerRef:
+ name: {{ template "seaweedfs.name" . }}-clusterissuer
+ kind: ClusterIssuer
+ commonName: {{ .Values.certificates.commonName }}
+ organization:
+ - "SeaweedFS CA"
+ dnsNames:
+ - '*.{{ .Release.Namespace }}'
+ - '*.{{ .Release.Namespace }}.svc'
+ - '*.{{ .Release.Namespace }}.svc.cluster.local'
+ - '*.{{ template "seaweedfs.name" . }}-master'
+ - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}'
+ - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc'
+ - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local'
+{{- if .Values.certificates.ipAddresses }}
+ ipAddresses:
+ {{- range .Values.certificates.ipAddresses }}
+ - {{ . }}
+ {{- end }}
+{{- end }}
+ keyAlgorithm: {{ .Values.certificates.keyAlgorithm }}
+ keySize: {{ .Values.certificates.keySize }}
+ duration: {{ .Values.certificates.duration }}
+ renewBefore: {{ .Values.certificates.renewBefore }}
+{{- end }}
diff --git a/k8s/seaweedfs/templates/volume-service.yaml b/k8s/seaweedfs/templates/volume-service.yaml
new file mode 100644
index 000000000..fc7716681
--- /dev/null
+++ b/k8s/seaweedfs/templates/volume-service.yaml
@@ -0,0 +1,22 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "seaweedfs.name" . }}-volume
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ template "seaweedfs.name" . }}
+ component: volume
+spec:
+ clusterIP: None
+ ports:
+ - name: "swfs-volume"
+ port: {{ .Values.volume.port }}
+ targetPort: {{ .Values.volume.port }}
+ protocol: TCP
+ - name: "swfs-volume-18080"
+ port: {{ .Values.volume.grpcPort }}
+ targetPort: {{ .Values.volume.grpcPort }}
+ protocol: TCP
+ selector:
+ app: {{ template "seaweedfs.name" . }}
+ component: volume
\ No newline at end of file
diff --git a/k8s/seaweedfs/templates/volume-statefulset.yaml b/k8s/seaweedfs/templates/volume-statefulset.yaml
new file mode 100644
index 000000000..9c6ddcd9f
--- /dev/null
+++ b/k8s/seaweedfs/templates/volume-statefulset.yaml
@@ -0,0 +1,187 @@
+{{- if .Values.volume.enabled }}
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ template "seaweedfs.name" . }}-volume
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ template "seaweedfs.name" . }}
+ chart: {{ template "seaweedfs.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+spec:
+ serviceName: {{ template "seaweedfs.name" . }}-volume
+ replicas: {{ .Values.volume.replicas }}
+ selector:
+ matchLabels:
+ app: {{ template "seaweedfs.name" . }}
+ chart: {{ template "seaweedfs.chart" . }}
+ release: {{ .Release.Name }}
+ component: volume
+ template:
+ metadata:
+ labels:
+ app: {{ template "seaweedfs.name" . }}
+ chart: {{ template "seaweedfs.chart" . }}
+ release: {{ .Release.Name }}
+ component: volume
+ spec:
+ {{- if .Values.volume.affinity }}
+ affinity:
+ {{ tpl .Values.volume.affinity . | nindent 8 | trim }}
+ {{- end }}
+ restartPolicy: {{ default .Values.global.restartPolicy .Values.volume.restartPolicy }}
+ {{- if .Values.volume.tolerations }}
+ tolerations:
+ {{ tpl .Values.volume.tolerations . | nindent 8 | trim }}
+ {{- end }}
+ {{- if .Values.global.imagePullSecrets }}
+ imagePullSecrets:
+ - name: {{ .Values.global.imagePullSecrets }}
+ {{- end }}
+ terminationGracePeriodSeconds: 10
+ {{- if .Values.volume.priorityClassName }}
+ priorityClassName: {{ .Values.volume.priorityClassName | quote }}
+ {{- end }}
+ enableServiceLinks: false
+ containers:
+ - name: seaweedfs
+ image: {{ template "volume.image" . }}
+ imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: SEAWEEDFS_FULLNAME
+ value: "{{ template "seaweedfs.name" . }}"
+ command:
+ - "/bin/sh"
+ - "-ec"
+ - |
+ exec /usr/bin/weed -logdir=/logs \
+ {{- if .Values.volume.loggingOverrideLevel }}
+ -v={{ .Values.volume.loggingOverrideLevel }} \
+ {{- else }}
+ -v={{ .Values.global.loggingLevel }} \
+ {{- end }}
+ volume \
+ -port={{ .Values.volume.port }} \
+ -dir={{ .Values.volume.dir }} \
+ -max={{ .Values.volume.maxVolumes }} \
+ {{- if .Values.volume.rack }}
+ -rack={{ .Values.volume.rack }} \
+ {{- end }}
+ {{- if .Values.volume.dataCenter }}
+ -dataCenter={{ .Values.volume.dataCenter }} \
+ {{- end }}
+ -ip.bind={{ .Values.volume.ipBind }} \
+ -read.redirect={{ .Values.volume.readRedirect }} \
+ {{- if .Values.volume.whiteList }}
+ -whiteList={{ .Values.volume.whiteList }} \
+ {{- end }}
+ {{- if .Values.volume.imagesFixOrientation }}
+ -images.fix.orientation \
+ {{- end }}
+ -ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-volume \
+ -compactionMBps={{ .Values.volume.compactionMBps }} \
+ -mserver={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
+ volumeMounts:
+ - name: seaweedfs-volume-storage
+ mountPath: "/data/"
+ - name: seaweedfs-volume-log-volume
+ mountPath: "/logs/"
+ {{- if .Values.global.enableSecurity }}
+ - name: security-config
+ readOnly: true
+ mountPath: /etc/seaweedfs/security.toml
+ subPath: security.toml
+ - name: ca-cert
+ readOnly: true
+ mountPath: /usr/local/share/ca-certificates/ca/
+ - name: master-cert
+ readOnly: true
+ mountPath: /usr/local/share/ca-certificates/master/
+ - name: volume-cert
+ readOnly: true
+ mountPath: /usr/local/share/ca-certificates/volume/
+ - name: filer-cert
+ readOnly: true
+ mountPath: /usr/local/share/ca-certificates/filer/
+ - name: client-cert
+ readOnly: true
+ mountPath: /usr/local/share/ca-certificates/client/
+ {{- end }}
+ {{ tpl .Values.volume.extraVolumeMounts . | nindent 12 | trim }}
+ ports:
+ - containerPort: {{ .Values.volume.port }}
+ name: swfs-vol
+ - containerPort: {{ .Values.volume.grpcPort }}
+ #name: swfs-vol-grpc
+ readinessProbe:
+ httpGet:
+ path: /status
+ port: {{ .Values.volume.port }}
+ scheme: HTTP
+ initialDelaySeconds: 5
+ periodSeconds: 15
+ successThreshold: 1
+ failureThreshold: 100
+ livenessProbe:
+ httpGet:
+ path: /status
+ port: {{ .Values.volume.port }}
+ scheme: HTTP
+ initialDelaySeconds: 20
+ periodSeconds: 30
+ successThreshold: 1
+ failureThreshold: 10
+ {{- if .Values.volume.resources }}
+ resources:
+ {{ tpl .Values.volume.resources . | nindent 12 | trim }}
+ {{- end }}
+ volumes:
+ - name: seaweedfs-volume-log-volume
+ hostPath:
+ path: /storage/logs/seaweedfs/volume
+ type: DirectoryOrCreate
+ - name: seaweedfs-volume-storage
+ hostPath:
+ path: /storage/object_store/
+ type: DirectoryOrCreate
+ {{- if .Values.global.enableSecurity }}
+ - name: security-config
+ configMap:
+ name: {{ template "seaweedfs.name" . }}-security-config
+ - name: ca-cert
+ secret:
+ secretName: {{ template "seaweedfs.name" . }}-ca-cert
+ - name: master-cert
+ secret:
+ secretName: {{ template "seaweedfs.name" . }}-master-cert
+ - name: volume-cert
+ secret:
+ secretName: {{ template "seaweedfs.name" . }}-volume-cert
+ - name: filer-cert
+ secret:
+ secretName: {{ template "seaweedfs.name" . }}-filer-cert
+ - name: client-cert
+ secret:
+ secretName: {{ template "seaweedfs.name" . }}-client-cert
+ {{- end }}
+ {{- if .Values.volume.extraVolumes }}
+ {{ tpl .Values.volume.extraVolumes . | indent 8 | trim }}
+ {{- end }}
+ {{- if .Values.volume.nodeSelector }}
+ nodeSelector:
+ {{ tpl .Values.volume.nodeSelector . | indent 8 | trim }}
+ {{- end }}
+{{- end }}
diff --git a/k8s/seaweedfs/values.yaml b/k8s/seaweedfs/values.yaml
new file mode 100644
index 000000000..cfc48d828
--- /dev/null
+++ b/k8s/seaweedfs/values.yaml
@@ -0,0 +1,309 @@
+# Available parameters and their default values for the SeaweedFS chart.
+
+global:
+ registry: ""
+ repository: ""
+ imageName: chrislusf/seaweedfs
+ imageTag: "1.79"
+ imagePullPolicy: IfNotPresent
+ imagePullSecrets: imagepullsecret
+ restartPolicy: Always
+ loggingLevel: 1
+ enableSecurity: false
+ monitoring:
+ enabled: false
+ gatewayHost: null
+ gatewayPort: null
+
+image:
+ registry: ""
+ repository: ""
+
+master:
+ enabled: true
+ repository: null
+ imageName: null
+ imageTag: null
+ imageOverride: null
+ restartPolicy: null
+ replicas: 1
+ port: 9333
+ grpcPort: 19333
+ ipBind: "0.0.0.0"
+ volumePreallocate: false
+ volumeSizeLimitMB: 30000
+ loggingOverrideLevel: null
+
+ # Disable http request, only gRpc operations are allowed
+ disableHttp: false
+
+ extraVolumes: ""
+ extraVolumeMounts: ""
+
+ # storage and storageClass are the settings for configuring stateful
+ # storage for the master pods. storage should be set to the disk size of
+ # the attached volume. storageClass is the class of storage which defaults
+ # to null (the Kube cluster will pick the default).
+ storage: 25Gi
+ storageClass: null
+
+ # Resource requests, limits, etc. for the master cluster placement. This
+ # should map directly to the value of the resources field for a PodSpec,
+ # formatted as a multi-line string. By default no direct resource request
+ # is made.
+ resources: null
+
+ # updatePartition is used to control a careful rolling update of SeaweedFS
+ # masters.
+ updatePartition: 0
+
+ # Affinity Settings
+ # Commenting out or setting as empty the affinity variable, will allow
+ # deployment to single node services such as Minikube
+ affinity: |
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels:
+ app: {{ template "seaweedfs.name" . }}
+ release: "{{ .Release.Name }}"
+ component: master
+ topologyKey: kubernetes.io/hostname
+
+ # Toleration Settings for master pods
+ # This should be a multi-line string matching the Toleration array
+ # in a PodSpec.
+ tolerations: ""
+
+ # nodeSelector labels for master pod assignment, formatted as a muli-line string.
+ # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+ # Example:
+ # nodeSelector: |
+ # beta.kubernetes.io/arch: amd64
+ nodeSelector: |
+ sw-backend: "true"
+
+ # used to assign priority to master pods
+ # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
+ priorityClassName: ""
+
+
+volume:
+ enabled: true
+ repository: null
+ imageName: null
+ imageTag: null
+ imageOverride: null
+ restartPolicy: null
+ port: 8080
+ grpcPort: 18080
+ ipBind: "0.0.0.0"
+ replicas: 1
+ loggingOverrideLevel: null
+
+ # limit background compaction or copying speed in mega bytes per second
+ compactionMBps: "40"
+
+ # Directories to store data files. dir[,dir]... (default "/tmp")
+ dir: "/data"
+
+ # Maximum numbers of volumes, count[,count]...
+ # If set to zero on non-windows OS, the limit will be auto configured. (default "7")
+ maxVolumes: "0"
+
+ # Volume server's rack name
+ rack: null
+
+ # Volume server's data center name
+ dataCenter: null
+
+ # Redirect moved or non-local volumes. (default true)
+ readRedirect: true
+
+ # Comma separated Ip addresses having write permission. No limit if empty.
+ whiteList: null
+
+ # Adjust jpg orientation when uploading.
+ imagesFixOrientation: false
+
+ extraVolumes: ""
+ extraVolumeMounts: ""
+
+ # Affinity Settings
+ # Commenting out or setting as empty the affinity variable, will allow
+ # deployment to single node services such as Minikube
+ affinity: |
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels:
+ app: {{ template "seaweedfs.name" . }}
+ release: "{{ .Release.Name }}"
+ component: volume
+ topologyKey: kubernetes.io/hostname
+
+ # Resource requests, limits, etc. for the server cluster placement. This
+ # should map directly to the value of the resources field for a PodSpec,
+ # formatted as a multi-line string. By default no direct resource request
+ # is made.
+ resources: null
+
+ # Toleration Settings for server pods
+ # This should be a multi-line string matching the Toleration array
+ # in a PodSpec.
+ tolerations: ""
+
+ # nodeSelector labels for server pod assignment, formatted as a muli-line string.
+ # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+ # Example:
+ # nodeSelector: |
+ # beta.kubernetes.io/arch: amd64
+ nodeSelector: |
+ sw-volume: "true"
+
+ # used to assign priority to server pods
+ # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
+ priorityClassName: ""
+
+
+filer:
+ enabled: true
+ repository: null
+ imageName: null
+ imageTag: null
+ imageOverride: null
+ restartPolicy: null
+ replicas: 1
+ port: 8888
+ grpcPort: 18888
+ loggingOverrideLevel: null
+
+ # Limit sub dir listing size (default 100000)
+ dirListLimit: 100000
+
+ # Turn off directory listing
+ disableDirListing: false
+
+ # Disable http request, only gRpc operations are allowed
+ disableHttp: false
+
+ # storage and storageClass are the settings for configuring stateful
+ # storage for the master pods. storage should be set to the disk size of
+ # the attached volume. storageClass is the class of storage which defaults
+ # to null (the Kube cluster will pick the default).
+ storage: 25Gi
+ storageClass: null
+
+ extraVolumes: ""
+ extraVolumeMounts: ""
+
+ # Affinity Settings
+ # Commenting out or setting as empty the affinity variable, will allow
+ # deployment to single node services such as Minikube
+ affinity: |
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels:
+ app: {{ template "seaweedfs.name" . }}
+ release: "{{ .Release.Name }}"
+ component: filer
+ topologyKey: kubernetes.io/hostname
+
+ # updatePartition is used to control a careful rolling update of SeaweedFS
+ # masters.
+ updatePartition: 0
+
+ # Resource requests, limits, etc. for the server cluster placement. This
+ # should map directly to the value of the resources field for a PodSpec,
+ # formatted as a multi-line string. By default no direct resource request
+ # is made.
+ resources: null
+
+ # Toleration Settings for server pods
+ # This should be a multi-line string matching the Toleration array
+ # in a PodSpec.
+ tolerations: ""
+
+ # nodeSelector labels for server pod assignment, formatted as a muli-line string.
+ # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+ # Example:
+ # nodeSelector: |
+ # beta.kubernetes.io/arch: amd64
+ nodeSelector: |
+ sw-backend: "true"
+
+ # used to assign priority to server pods
+ # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
+ priorityClassName: ""
+
+ dbSchema:
+ imageName: db-schema
+ imageTag: "development"
+ imageOverride: ""
+
+ # extraEnvVars is a list of extra enviroment variables to set with the stateful set.
+ extraEnvironmentVars:
+ WEED_MYSQL_ENABLED: "true"
+ WEED_MYSQL_HOSTNAME: "mysql-db-host"
+ WEED_MYSQL_PORT: "3306"
+ WEED_MYSQL_DATABASE: "sw_database"
+ WEED_MYSQL_CONNECTION_MAX_IDLE: "10"
+ WEED_MYSQL_CONNECTION_MAX_OPEN: "150"
+ # enable usage of memsql as filer backend
+ WEED_MYSQL_INTERPOLATEPARAMS: "true"
+ WEED_LEVELDB2_ENABLED: "false"
+ # with http DELETE, by default the filer would check whether a folder is empty.
+ # recursive_delete will delete all sub folders and files, similar to "rm -Rf"
+ WEED_FILER_OPTIONS_RECURSIVE_DELETE: "false"
+ # directories under this folder will be automatically creating a separate bucket
+ WEED_FILER_BUCKETS_FOLDER: "/buckets"
+ # directories under this folder will be store message queue data
+ WEED_FILER_QUEUES_FOLDER: "/queues"
+
+s3:
+ enabled: true
+ repository: null
+ imageName: null
+ imageTag: null
+ restartPolicy: null
+ replicas: 1
+ port: 8333
+ loggingOverrideLevel: null
+
+ # Suffix of the host name, {bucket}.{domainName}
+ domainName: ""
+
+ extraVolumes: ""
+ extraVolumeMounts: ""
+
+ # Resource requests, limits, etc. for the server cluster placement. This
+ # should map directly to the value of the resources field for a PodSpec,
+ # formatted as a multi-line string. By default no direct resource request
+ # is made.
+ resources: null
+
+ # Toleration Settings for server pods
+ # This should be a multi-line string matching the Toleration array
+ # in a PodSpec.
+ tolerations: ""
+
+ # nodeSelector labels for server pod assignment, formatted as a muli-line string.
+ # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+ # Example:
+ # nodeSelector: |
+ # beta.kubernetes.io/arch: amd64
+ nodeSelector: |
+ sw-backend: "true"
+
+ # used to assign priority to server pods
+ # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
+ priorityClassName: ""
+
+certificates:
+ commonName: "SeaweedFS CA"
+ ipAddresses: []
+ keyAlgorithm: rsa
+ keySize: 2048
+ duration: 2160h # 90d
+ renewBefore: 360h # 15d
diff --git a/other/java/client/pom.xml b/other/java/client/pom.xml
index 0c585a941..a8b561251 100644
--- a/other/java/client/pom.xml
+++ b/other/java/client/pom.xml
@@ -1,10 +1,11 @@
-
+
4.0.0
com.github.chrislusf
seaweedfs-client
- 1.2.4
+ 1.2.8
org.sonatype.oss
@@ -88,8 +89,8 @@
org.apache.maven.plugins
maven-compiler-plugin
- 7
- 7
+ 8
+ 8
@@ -97,9 +98,11 @@
protobuf-maven-plugin
0.6.1
- com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}
+ com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}
+
grpc-java
- io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}
+ io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}
+
diff --git a/other/java/client/pom_debug.xml b/other/java/client/pom_debug.xml
new file mode 100644
index 000000000..88447f7e7
--- /dev/null
+++ b/other/java/client/pom_debug.xml
@@ -0,0 +1,139 @@
+
+
+ 4.0.0
+
+ com.github.chrislusf
+ seaweedfs-client
+ 1.2.8
+
+
+ org.sonatype.oss
+ oss-parent
+ 9
+
+
+
+ 3.9.1
+
+ 1.23.0
+ 28.0-jre
+
+
+
+
+ com.moandjiezana.toml
+ toml4j
+ 0.7.2
+
+
+
+ com.google.protobuf
+ protobuf-java
+ ${protobuf.version}
+
+
+ com.google.guava
+ guava
+ ${guava.version}
+
+
+ io.grpc
+ grpc-netty-shaded
+ ${grpc.version}
+
+
+ io.grpc
+ grpc-protobuf
+ ${grpc.version}
+
+
+ io.grpc
+ grpc-stub
+ ${grpc.version}
+
+
+ org.slf4j
+ slf4j-api
+ 1.7.25
+
+
+ org.apache.httpcomponents
+ httpmime
+ 4.5.6
+
+
+ junit
+ junit
+ 4.12
+ test
+
+
+
+
+
+
+ kr.motd.maven
+ os-maven-plugin
+ 1.6.2
+
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+ 8
+ 8
+
+
+
+ org.xolstice.maven.plugins
+ protobuf-maven-plugin
+ 0.6.1
+
+ com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}
+
+ grpc-java
+ io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}
+
+
+
+
+
+ compile
+ compile-custom
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-source-plugin
+ 2.2.1
+
+
+ attach-sources
+
+ jar-no-fork
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-javadoc-plugin
+ 2.9.1
+
+
+ attach-javadocs
+
+ jar
+
+
+
+
+
+
+
+
diff --git a/other/java/client/src/main/java/seaweedfs/client/ChunkCache.java b/other/java/client/src/main/java/seaweedfs/client/ChunkCache.java
new file mode 100644
index 000000000..e249d4524
--- /dev/null
+++ b/other/java/client/src/main/java/seaweedfs/client/ChunkCache.java
@@ -0,0 +1,27 @@
+package seaweedfs.client;
+
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+
+import java.util.concurrent.TimeUnit;
+
+public class ChunkCache {
+
+ private final Cache cache;
+
+ public ChunkCache(int maxEntries) {
+ this.cache = CacheBuilder.newBuilder()
+ .maximumSize(maxEntries)
+ .expireAfterAccess(1, TimeUnit.HOURS)
+ .build();
+ }
+
+ public byte[] getChunk(String fileId) {
+ return this.cache.getIfPresent(fileId);
+ }
+
+ public void setChunk(String fileId, byte[] data) {
+ this.cache.put(fileId, data);
+ }
+
+}
diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerClient.java b/other/java/client/src/main/java/seaweedfs/client/FilerClient.java
index 84aa26ad9..ef32c7e9a 100644
--- a/other/java/client/src/main/java/seaweedfs/client/FilerClient.java
+++ b/other/java/client/src/main/java/seaweedfs/client/FilerClient.java
@@ -14,7 +14,7 @@ public class FilerClient {
private static final Logger LOG = LoggerFactory.getLogger(FilerClient.class);
- private FilerGrpcClient filerGrpcClient;
+ private final FilerGrpcClient filerGrpcClient;
public FilerClient(String host, int grpcPort) {
filerGrpcClient = new FilerGrpcClient(host, grpcPort);
@@ -181,7 +181,7 @@ public class FilerClient {
.setLimit(limit)
.build());
List entries = new ArrayList<>();
- while (iter.hasNext()){
+ while (iter.hasNext()) {
FilerProto.ListEntriesResponse resp = iter.next();
entries.add(fixEntryAfterReading(resp.getEntry()));
}
@@ -195,9 +195,12 @@ public class FilerClient {
.setDirectory(directory)
.setName(entryName)
.build()).getEntry();
+ if (entry == null) {
+ return null;
+ }
return fixEntryAfterReading(entry);
} catch (Exception e) {
- if (e.getMessage().indexOf("filer: no entry is found in filer store")>0){
+ if (e.getMessage().indexOf("filer: no entry is found in filer store") > 0) {
return null;
}
LOG.warn("lookupEntry {}/{}: {}", directory, entryName, e);
diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java b/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java
index 3626c76de..3f5d1e8e9 100644
--- a/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java
+++ b/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java
@@ -14,12 +14,6 @@ import java.util.concurrent.TimeUnit;
public class FilerGrpcClient {
private static final Logger logger = LoggerFactory.getLogger(FilerGrpcClient.class);
-
- private final ManagedChannel channel;
- private final SeaweedFilerGrpc.SeaweedFilerBlockingStub blockingStub;
- private final SeaweedFilerGrpc.SeaweedFilerStub asyncStub;
- private final SeaweedFilerGrpc.SeaweedFilerFutureStub futureStub;
-
static SslContext sslContext;
static {
@@ -30,6 +24,14 @@ public class FilerGrpcClient {
}
}
+ private final ManagedChannel channel;
+ private final SeaweedFilerGrpc.SeaweedFilerBlockingStub blockingStub;
+ private final SeaweedFilerGrpc.SeaweedFilerStub asyncStub;
+ private final SeaweedFilerGrpc.SeaweedFilerFutureStub futureStub;
+ private boolean cipher = false;
+ private String collection = "";
+ private String replication = "";
+
public FilerGrpcClient(String host, int grpcPort) {
this(host, grpcPort, sslContext);
}
@@ -42,6 +44,13 @@ public class FilerGrpcClient {
.negotiationType(NegotiationType.TLS)
.sslContext(sslContext));
+ FilerProto.GetFilerConfigurationResponse filerConfigurationResponse =
+ this.getBlockingStub().getFilerConfiguration(
+ FilerProto.GetFilerConfigurationRequest.newBuilder().build());
+ cipher = filerConfigurationResponse.getCipher();
+ collection = filerConfigurationResponse.getCollection();
+ replication = filerConfigurationResponse.getReplication();
+
}
public FilerGrpcClient(ManagedChannelBuilder> channelBuilder) {
@@ -51,6 +60,18 @@ public class FilerGrpcClient {
futureStub = SeaweedFilerGrpc.newFutureStub(channel);
}
+ public boolean isCipher() {
+ return cipher;
+ }
+
+ public String getCollection() {
+ return collection;
+ }
+
+ public String getReplication() {
+ return replication;
+ }
+
public void shutdown() throws InterruptedException {
channel.shutdown().awaitTermination(5, TimeUnit.SECONDS);
}
diff --git a/other/java/client/src/main/java/seaweedfs/client/Gzip.java b/other/java/client/src/main/java/seaweedfs/client/Gzip.java
new file mode 100644
index 000000000..248285dd3
--- /dev/null
+++ b/other/java/client/src/main/java/seaweedfs/client/Gzip.java
@@ -0,0 +1,37 @@
+package seaweedfs.client;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.zip.GZIPInputStream;
+import java.util.zip.GZIPOutputStream;
+
+public class Gzip {
+ public static byte[] compress(byte[] data) throws IOException {
+ ByteArrayOutputStream bos = new ByteArrayOutputStream(data.length);
+ GZIPOutputStream gzip = new GZIPOutputStream(bos);
+ gzip.write(data);
+ gzip.close();
+ byte[] compressed = bos.toByteArray();
+ bos.close();
+ return compressed;
+ }
+
+ public static byte[] decompress(byte[] compressed) throws IOException {
+ ByteArrayInputStream bis = new ByteArrayInputStream(compressed);
+ GZIPInputStream gis = new GZIPInputStream(bis);
+ return readAll(gis);
+ }
+
+ private static byte[] readAll(InputStream input) throws IOException {
+ try( ByteArrayOutputStream output = new ByteArrayOutputStream()){
+ byte[] buffer = new byte[4096];
+ int n;
+ while (-1 != (n = input.read(buffer))) {
+ output.write(buffer, 0, n);
+ }
+ return output.toByteArray();
+ }
+ }
+}
diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedCipher.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedCipher.java
new file mode 100644
index 000000000..8d0ebd755
--- /dev/null
+++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedCipher.java
@@ -0,0 +1,55 @@
+package seaweedfs.client;
+
+import javax.crypto.Cipher;
+import javax.crypto.spec.GCMParameterSpec;
+import javax.crypto.spec.SecretKeySpec;
+import java.security.SecureRandom;
+
+public class SeaweedCipher {
+ // AES-GCM parameters
+ public static final int AES_KEY_SIZE = 256; // in bits
+ public static final int GCM_NONCE_LENGTH = 12; // in bytes
+ public static final int GCM_TAG_LENGTH = 16; // in bytes
+
+ private static SecureRandom random = new SecureRandom();
+
+ public static byte[] genCipherKey() throws Exception {
+ byte[] key = new byte[AES_KEY_SIZE / 8];
+ random.nextBytes(key);
+ return key;
+ }
+
+ public static byte[] encrypt(byte[] clearTextbytes, byte[] cipherKey) throws Exception {
+ return encrypt(clearTextbytes, 0, clearTextbytes.length, cipherKey);
+ }
+
+ public static byte[] encrypt(byte[] clearTextbytes, int offset, int length, byte[] cipherKey) throws Exception {
+
+ final byte[] nonce = new byte[GCM_NONCE_LENGTH];
+ random.nextBytes(nonce);
+ GCMParameterSpec spec = new GCMParameterSpec(GCM_TAG_LENGTH * 8, nonce);
+ SecretKeySpec keySpec = new SecretKeySpec(cipherKey, "AES");
+
+ Cipher AES_cipherInstance = Cipher.getInstance("AES/GCM/NoPadding");
+ AES_cipherInstance.init(Cipher.ENCRYPT_MODE, keySpec, spec);
+
+ byte[] encryptedText = AES_cipherInstance.doFinal(clearTextbytes, offset, length);
+
+ byte[] iv = AES_cipherInstance.getIV();
+ byte[] message = new byte[GCM_NONCE_LENGTH + clearTextbytes.length + GCM_TAG_LENGTH];
+ System.arraycopy(iv, 0, message, 0, GCM_NONCE_LENGTH);
+ System.arraycopy(encryptedText, 0, message, GCM_NONCE_LENGTH, encryptedText.length);
+
+ return message;
+ }
+
+ public static byte[] decrypt(byte[] encryptedText, byte[] cipherKey) throws Exception {
+ final Cipher AES_cipherInstance = Cipher.getInstance("AES/GCM/NoPadding");
+ GCMParameterSpec params = new GCMParameterSpec(GCM_TAG_LENGTH * 8, encryptedText, 0, GCM_NONCE_LENGTH);
+ SecretKeySpec keySpec = new SecretKeySpec(cipherKey, "AES");
+ AES_cipherInstance.init(Cipher.DECRYPT_MODE, keySpec, params);
+ byte[] decryptedText = AES_cipherInstance.doFinal(encryptedText, GCM_NONCE_LENGTH, encryptedText.length - GCM_NONCE_LENGTH);
+ return decryptedText;
+ }
+
+}
diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java
index b08c14467..7be39da53 100644
--- a/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java
+++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java
@@ -6,16 +6,19 @@ import org.apache.http.HttpResponse;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.DefaultHttpClient;
+import org.apache.http.util.EntityUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.ByteBuffer;
import java.util.*;
public class SeaweedRead {
- // private static final Logger LOG = LoggerFactory.getLogger(SeaweedRead.class);
+ private static final Logger LOG = LoggerFactory.getLogger(SeaweedRead.class);
+
+ static ChunkCache chunkCache = new ChunkCache(1000);
// returns bytesRead
public static long read(FilerGrpcClient filerGrpcClient, List visibleIntervals,
@@ -31,7 +34,7 @@ public class SeaweedRead {
}
FilerProto.LookupVolumeResponse lookupResponse = filerGrpcClient
- .getBlockingStub().lookupVolume(lookupRequest.build());
+ .getBlockingStub().lookupVolume(lookupRequest.build());
Map vid2Locations = lookupResponse.getLocationsMapMap();
@@ -56,26 +59,38 @@ public class SeaweedRead {
}
private static int readChunkView(long position, byte[] buffer, int startOffset, ChunkView chunkView, FilerProto.Locations locations) throws IOException {
+
+ byte[] chunkData = chunkCache.getChunk(chunkView.fileId);
+
+ if (chunkData == null) {
+ chunkData = doFetchFullChunkData(chunkView, locations);
+ }
+
+ int len = (int) chunkView.size;
+ LOG.debug("readChunkView fid:{} chunkData.length:{} chunkView.offset:{} buffer.length:{} startOffset:{} len:{}",
+ chunkView.fileId, chunkData.length, chunkView.offset, buffer.length, startOffset, len);
+ System.arraycopy(chunkData, (int) chunkView.offset, buffer, startOffset, len);
+
+ chunkCache.setChunk(chunkView.fileId, chunkData);
+
+ return len;
+ }
+
+ private static byte[] doFetchFullChunkData(ChunkView chunkView, FilerProto.Locations locations) throws IOException {
+
HttpClient client = new DefaultHttpClient();
HttpGet request = new HttpGet(
- String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId));
+ String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId));
- if (!chunkView.isFullChunk) {
- request.setHeader(HttpHeaders.ACCEPT_ENCODING, "");
- request.setHeader(HttpHeaders.RANGE,
- String.format("bytes=%d-%d", chunkView.offset, chunkView.offset + chunkView.size - 1));
- }
+ request.setHeader(HttpHeaders.ACCEPT_ENCODING, "");
+
+ byte[] data = null;
try {
HttpResponse response = client.execute(request);
HttpEntity entity = response.getEntity();
- int len = (int) (chunkView.logicOffset - position + chunkView.size);
- OutputStream outputStream = new ByteBufferOutputStream(ByteBuffer.wrap(buffer, startOffset, len));
- entity.writeTo(outputStream);
- // LOG.debug("* read chunkView:{} startOffset:{} length:{}", chunkView, startOffset, len);
-
- return len;
+ data = EntityUtils.toByteArray(entity);
} finally {
if (client instanceof Closeable) {
@@ -83,6 +98,21 @@ public class SeaweedRead {
t.close();
}
}
+
+ if (chunkView.isGzipped) {
+ data = Gzip.decompress(data);
+ }
+
+ if (chunkView.cipherKey != null && chunkView.cipherKey.length != 0) {
+ try {
+ data = SeaweedCipher.decrypt(data, chunkView.cipherKey);
+ } catch (Exception e) {
+ throw new IOException("fail to decrypt", e);
+ }
+ }
+
+ return data;
+
}
protected static List viewFromVisibles(List visibleIntervals, long offset, long size) {
@@ -93,11 +123,13 @@ public class SeaweedRead {
if (chunk.start <= offset && offset < chunk.stop && offset < stop) {
boolean isFullChunk = chunk.isFullChunk && chunk.start == offset && chunk.stop <= stop;
views.add(new ChunkView(
- chunk.fileId,
- offset - chunk.start,
- Math.min(chunk.stop, stop) - offset,
- offset,
- isFullChunk
+ chunk.fileId,
+ offset - chunk.start,
+ Math.min(chunk.stop, stop) - offset,
+ offset,
+ isFullChunk,
+ chunk.cipherKey,
+ chunk.isGzipped
));
offset = Math.min(chunk.stop, stop);
}
@@ -127,11 +159,13 @@ public class SeaweedRead {
List newVisibles,
FilerProto.FileChunk chunk) {
VisibleInterval newV = new VisibleInterval(
- chunk.getOffset(),
- chunk.getOffset() + chunk.getSize(),
- chunk.getFileId(),
- chunk.getMtime(),
- true
+ chunk.getOffset(),
+ chunk.getOffset() + chunk.getSize(),
+ chunk.getFileId(),
+ chunk.getMtime(),
+ true,
+ chunk.getCipherKey().toByteArray(),
+ chunk.getIsGzipped()
);
// easy cases to speed up
@@ -147,21 +181,25 @@ public class SeaweedRead {
for (VisibleInterval v : visibles) {
if (v.start < chunk.getOffset() && chunk.getOffset() < v.stop) {
newVisibles.add(new VisibleInterval(
- v.start,
- chunk.getOffset(),
- v.fileId,
- v.modifiedTime,
- false
+ v.start,
+ chunk.getOffset(),
+ v.fileId,
+ v.modifiedTime,
+ false,
+ v.cipherKey,
+ v.isGzipped
));
}
long chunkStop = chunk.getOffset() + chunk.getSize();
if (v.start < chunkStop && chunkStop < v.stop) {
newVisibles.add(new VisibleInterval(
- chunkStop,
- v.stop,
- v.fileId,
- v.modifiedTime,
- false
+ chunkStop,
+ v.stop,
+ v.fileId,
+ v.modifiedTime,
+ false,
+ v.cipherKey,
+ v.isGzipped
));
}
if (chunkStop <= v.start || v.stop <= chunk.getOffset()) {
@@ -208,24 +246,30 @@ public class SeaweedRead {
public final long modifiedTime;
public final String fileId;
public final boolean isFullChunk;
+ public final byte[] cipherKey;
+ public final boolean isGzipped;
- public VisibleInterval(long start, long stop, String fileId, long modifiedTime, boolean isFullChunk) {
+ public VisibleInterval(long start, long stop, String fileId, long modifiedTime, boolean isFullChunk, byte[] cipherKey, boolean isGzipped) {
this.start = start;
this.stop = stop;
this.modifiedTime = modifiedTime;
this.fileId = fileId;
this.isFullChunk = isFullChunk;
+ this.cipherKey = cipherKey;
+ this.isGzipped = isGzipped;
}
@Override
public String toString() {
return "VisibleInterval{" +
- "start=" + start +
- ", stop=" + stop +
- ", modifiedTime=" + modifiedTime +
- ", fileId='" + fileId + '\'' +
- ", isFullChunk=" + isFullChunk +
- '}';
+ "start=" + start +
+ ", stop=" + stop +
+ ", modifiedTime=" + modifiedTime +
+ ", fileId='" + fileId + '\'' +
+ ", isFullChunk=" + isFullChunk +
+ ", cipherKey=" + Arrays.toString(cipherKey) +
+ ", isGzipped=" + isGzipped +
+ '}';
}
}
@@ -235,24 +279,30 @@ public class SeaweedRead {
public final long size;
public final long logicOffset;
public final boolean isFullChunk;
+ public final byte[] cipherKey;
+ public final boolean isGzipped;
- public ChunkView(String fileId, long offset, long size, long logicOffset, boolean isFullChunk) {
+ public ChunkView(String fileId, long offset, long size, long logicOffset, boolean isFullChunk, byte[] cipherKey, boolean isGzipped) {
this.fileId = fileId;
this.offset = offset;
this.size = size;
this.logicOffset = logicOffset;
this.isFullChunk = isFullChunk;
+ this.cipherKey = cipherKey;
+ this.isGzipped = isGzipped;
}
@Override
public String toString() {
return "ChunkView{" +
- "fileId='" + fileId + '\'' +
- ", offset=" + offset +
- ", size=" + size +
- ", logicOffset=" + logicOffset +
- ", isFullChunk=" + isFullChunk +
- '}';
+ "fileId='" + fileId + '\'' +
+ ", offset=" + offset +
+ ", size=" + size +
+ ", logicOffset=" + logicOffset +
+ ", isFullChunk=" + isFullChunk +
+ ", cipherKey=" + Arrays.toString(cipherKey) +
+ ", isGzipped=" + isGzipped +
+ '}';
}
}
diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java
index 0663e8d98..dc6203e52 100644
--- a/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java
+++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java
@@ -1,5 +1,6 @@
package seaweedfs.client;
+import com.google.protobuf.ByteString;
import org.apache.http.HttpResponse;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpPost;
@@ -11,9 +12,12 @@ import java.io.ByteArrayInputStream;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
+import java.security.SecureRandom;
public class SeaweedWrite {
+ private static SecureRandom random = new SecureRandom();
+
public static void writeData(FilerProto.Entry.Builder entry,
final String replication,
final FilerGrpcClient filerGrpcClient,
@@ -22,10 +26,9 @@ public class SeaweedWrite {
final long bytesOffset, final long bytesLength) throws IOException {
FilerProto.AssignVolumeResponse response = filerGrpcClient.getBlockingStub().assignVolume(
FilerProto.AssignVolumeRequest.newBuilder()
- .setCollection("")
- .setReplication(replication)
+ .setCollection(filerGrpcClient.getCollection())
+ .setReplication(replication == null ? filerGrpcClient.getReplication() : replication)
.setDataCenter("")
- .setReplication("")
.setTtlSec(0)
.build());
String fileId = response.getFileId();
@@ -33,7 +36,17 @@ public class SeaweedWrite {
String auth = response.getAuth();
String targetUrl = String.format("http://%s/%s", url, fileId);
- String etag = multipartUpload(targetUrl, auth, bytes, bytesOffset, bytesLength);
+ ByteString cipherKeyString = com.google.protobuf.ByteString.EMPTY;
+ byte[] cipherKey = null;
+ if (filerGrpcClient.isCipher()) {
+ cipherKey = genCipherKey();
+ cipherKeyString = ByteString.copyFrom(cipherKey);
+ }
+
+ String etag = multipartUpload(targetUrl, auth, bytes, bytesOffset, bytesLength, cipherKey);
+
+ // cache fileId ~ bytes
+ SeaweedRead.chunkCache.setChunk(fileId, bytes);
entry.addChunks(FilerProto.FileChunk.newBuilder()
.setFileId(fileId)
@@ -41,6 +54,7 @@ public class SeaweedWrite {
.setSize(bytesLength)
.setMtime(System.currentTimeMillis() / 10000L)
.setETag(etag)
+ .setCipherKey(cipherKeyString)
);
}
@@ -58,11 +72,22 @@ public class SeaweedWrite {
private static String multipartUpload(String targetUrl,
String auth,
final byte[] bytes,
- final long bytesOffset, final long bytesLength) throws IOException {
+ final long bytesOffset, final long bytesLength,
+ byte[] cipherKey) throws IOException {
HttpClient client = new DefaultHttpClient();
- InputStream inputStream = new ByteArrayInputStream(bytes, (int) bytesOffset, (int) bytesLength);
+ InputStream inputStream = null;
+ if (cipherKey == null || cipherKey.length == 0) {
+ inputStream = new ByteArrayInputStream(bytes, (int) bytesOffset, (int) bytesLength);
+ } else {
+ try {
+ byte[] encryptedBytes = SeaweedCipher.encrypt(bytes, (int) bytesOffset, (int) bytesLength, cipherKey);
+ inputStream = new ByteArrayInputStream(encryptedBytes, 0, encryptedBytes.length);
+ } catch (Exception e) {
+ throw new IOException("fail to encrypt data", e);
+ }
+ }
HttpPost post = new HttpPost(targetUrl);
if (auth != null && auth.length() != 0) {
@@ -92,4 +117,10 @@ public class SeaweedWrite {
}
}
+
+ private static byte[] genCipherKey() {
+ byte[] b = new byte[32];
+ random.nextBytes(b);
+ return b;
+ }
}
diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto
index 6357d971f..1fc8ef63d 100644
--- a/other/java/client/src/main/proto/filer.proto
+++ b/other/java/client/src/main/proto/filer.proto
@@ -21,6 +21,9 @@ service SeaweedFiler {
rpc UpdateEntry (UpdateEntryRequest) returns (UpdateEntryResponse) {
}
+ rpc AppendToEntry (AppendToEntryRequest) returns (AppendToEntryResponse) {
+ }
+
rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) {
}
@@ -42,6 +45,15 @@ service SeaweedFiler {
rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) {
}
+ rpc SubscribeMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) {
+ }
+
+ rpc KeepConnected (stream KeepConnectedRequest) returns (stream KeepConnectedResponse) {
+ }
+
+ rpc LocateBroker (LocateBrokerRequest) returns (LocateBrokerResponse) {
+ }
+
}
//////////////////////////////////////////////////
@@ -96,6 +108,8 @@ message FileChunk {
string source_file_id = 6; // to be deprecated
FileId fid = 7;
FileId source_fid = 8;
+ bytes cipher_key = 9;
+ bool is_gzipped = 10;
}
message FileId {
@@ -118,6 +132,7 @@ message FuseAttributes {
string user_name = 11; // for hdfs
repeated string group_name = 12; // for hdfs
string symlink_target = 13;
+ bytes md5 = 14;
}
message CreateEntryRequest {
@@ -137,6 +152,14 @@ message UpdateEntryRequest {
message UpdateEntryResponse {
}
+message AppendToEntryRequest {
+ string directory = 1;
+ string entry_name = 2;
+ repeated FileChunk chunks = 3;
+}
+message AppendToEntryResponse {
+}
+
message DeleteEntryRequest {
string directory = 1;
string name = 2;
@@ -147,6 +170,7 @@ message DeleteEntryRequest {
}
message DeleteEntryResponse {
+ string error = 1;
}
message AtomicRenameEntryRequest {
@@ -165,6 +189,7 @@ message AssignVolumeRequest {
string replication = 3;
int32 ttl_sec = 4;
string data_center = 5;
+ string parent_path = 6;
}
message AssignVolumeResponse {
@@ -173,6 +198,9 @@ message AssignVolumeResponse {
string public_url = 3;
int32 count = 4;
string auth = 5;
+ string collection = 6;
+ string replication = 7;
+ string error = 8;
}
message LookupVolumeRequest {
@@ -219,4 +247,45 @@ message GetFilerConfigurationResponse {
string replication = 2;
string collection = 3;
uint32 max_mb = 4;
+ string dir_buckets = 5;
+ bool cipher = 7;
+}
+
+message SubscribeMetadataRequest {
+ string client_name = 1;
+ string path_prefix = 2;
+ int64 since_ns = 3;
+}
+message SubscribeMetadataResponse {
+ string directory = 1;
+ EventNotification event_notification = 2;
+ int64 ts_ns = 3;
+}
+
+message LogEntry {
+ int64 ts_ns = 1;
+ int32 partition_key_hash = 2;
+ bytes data = 3;
+}
+
+message KeepConnectedRequest {
+ string name = 1;
+ uint32 grpc_port = 2;
+ repeated string resources = 3;
+}
+message KeepConnectedResponse {
+}
+
+message LocateBrokerRequest {
+ string resource = 1;
+}
+message LocateBrokerResponse {
+ bool found = 1;
+ // if found, send the exact address
+ // if not found, send the full list of existing brokers
+ message Resource {
+ string grpc_addresses = 1;
+ int32 resource_count = 2;
+ }
+ repeated Resource resources = 2;
}
diff --git a/other/java/client/src/test/java/seaweedfs/client/SeaweedCipherTest.java b/other/java/client/src/test/java/seaweedfs/client/SeaweedCipherTest.java
new file mode 100644
index 000000000..7b5e53e19
--- /dev/null
+++ b/other/java/client/src/test/java/seaweedfs/client/SeaweedCipherTest.java
@@ -0,0 +1,42 @@
+package seaweedfs.client;
+
+import org.junit.Test;
+
+import java.util.Base64;
+
+import static seaweedfs.client.SeaweedCipher.decrypt;
+import static seaweedfs.client.SeaweedCipher.encrypt;
+
+public class SeaweedCipherTest {
+
+ @Test
+ public void testSameAsGoImplemnetation() throws Exception {
+ byte[] secretKey = "256-bit key for AES 256 GCM encr".getBytes();
+
+ String plainText = "Now we need to generate a 256-bit key for AES 256 GCM";
+
+ System.out.println("Original Text : " + plainText);
+
+ byte[] cipherText = encrypt(plainText.getBytes(), secretKey);
+ System.out.println("Encrypted Text : " + Base64.getEncoder().encodeToString(cipherText));
+
+ byte[] decryptedText = decrypt(cipherText, secretKey);
+ System.out.println("DeCrypted Text : " + new String(decryptedText));
+ }
+
+ @Test
+ public void testEncryptDecrypt() throws Exception {
+ byte[] secretKey = SeaweedCipher.genCipherKey();
+
+ String plainText = "Now we need to generate a 256-bit key for AES 256 GCM";
+
+ System.out.println("Original Text : " + plainText);
+
+ byte[] cipherText = encrypt(plainText.getBytes(), secretKey);
+ System.out.println("Encrypted Text : " + Base64.getEncoder().encodeToString(cipherText));
+
+ byte[] decryptedText = decrypt(cipherText, secretKey);
+ System.out.println("DeCrypted Text : " + new String(decryptedText));
+ }
+
+}
diff --git a/other/java/hdfs2/dependency-reduced-pom.xml b/other/java/hdfs2/dependency-reduced-pom.xml
index d818bc878..bef448f3f 100644
--- a/other/java/hdfs2/dependency-reduced-pom.xml
+++ b/other/java/hdfs2/dependency-reduced-pom.xml
@@ -127,7 +127,7 @@
- 1.2.4
+ 1.2.8
2.9.2
diff --git a/other/java/hdfs2/pom.xml b/other/java/hdfs2/pom.xml
index b8c8cb891..f3086fab8 100644
--- a/other/java/hdfs2/pom.xml
+++ b/other/java/hdfs2/pom.xml
@@ -5,7 +5,7 @@
4.0.0
- 1.2.4
+ 1.2.8
2.9.2
diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
index 774c090e8..9617a38be 100644
--- a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
+++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
@@ -64,6 +64,16 @@ public class SeaweedFileSystemStore {
public FileStatus[] listEntries(final Path path) {
LOG.debug("listEntries path: {}", path);
+ FileStatus pathStatus = getFileStatus(path);
+
+ if (pathStatus == null) {
+ return new FileStatus[0];
+ }
+
+ if (!pathStatus.isDirectory()) {
+ return new FileStatus[]{pathStatus};
+ }
+
List fileStatuses = new ArrayList();
List entries = filerClient.listEntries(path.toUri().getPath());
@@ -74,7 +84,9 @@ public class SeaweedFileSystemStore {
fileStatuses.add(fileStatus);
}
+ LOG.debug("listEntries path: {} size {}", fileStatuses, fileStatuses.size());
return fileStatuses.toArray(new FileStatus[0]);
+
}
public FileStatus getFileStatus(final Path path) {
diff --git a/other/java/hdfs3/dependency-reduced-pom.xml b/other/java/hdfs3/dependency-reduced-pom.xml
index ca53ffd22..f2056b7b1 100644
--- a/other/java/hdfs3/dependency-reduced-pom.xml
+++ b/other/java/hdfs3/dependency-reduced-pom.xml
@@ -127,7 +127,7 @@
- 1.2.4
+ 1.2.8
3.1.1
diff --git a/other/java/hdfs3/pom.xml b/other/java/hdfs3/pom.xml
index f5207213c..6ca210f78 100644
--- a/other/java/hdfs3/pom.xml
+++ b/other/java/hdfs3/pom.xml
@@ -5,7 +5,7 @@
4.0.0
- 1.2.4
+ 1.2.8
3.1.1
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
index 774c090e8..9617a38be 100644
--- a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
@@ -64,6 +64,16 @@ public class SeaweedFileSystemStore {
public FileStatus[] listEntries(final Path path) {
LOG.debug("listEntries path: {}", path);
+ FileStatus pathStatus = getFileStatus(path);
+
+ if (pathStatus == null) {
+ return new FileStatus[0];
+ }
+
+ if (!pathStatus.isDirectory()) {
+ return new FileStatus[]{pathStatus};
+ }
+
List fileStatuses = new ArrayList();
List entries = filerClient.listEntries(path.toUri().getPath());
@@ -74,7 +84,9 @@ public class SeaweedFileSystemStore {
fileStatuses.add(fileStatus);
}
+ LOG.debug("listEntries path: {} size {}", fileStatuses, fileStatuses.size());
return fileStatuses.toArray(new FileStatus[0]);
+
}
public FileStatus getFileStatus(final Path path) {
diff --git a/snap/README.md b/snap/README.md
new file mode 100644
index 000000000..5752bd4af
--- /dev/null
+++ b/snap/README.md
@@ -0,0 +1,49 @@
+Hi
+
+This PR adds support for building a snap package of seaweedfs. Snaps are cross distro Linux software packages. One snap can be installed on Ubuntu all supported LTS and non LTS releases from 14.04 onward. Additionally they can installed on Debian, Manjaro, Fedora, OpenSUSE and others. Making a snap of seaweedfs enables you to provide automatic updates on your schedule to your users via the snap store.
+
+If accepted, you can use snapcraft locally, a CI system such as travis or circle-ci, or our free build system (build.snapcraft.io) to create snaps and upload to the store (snapcraft.io/store). The store supports
+
+To test this PR locally, I used an Ubuntu 16.04 VM, with the following steps.
+
+```
+snap install snapcraft --classic
+git clone https://github.com/popey/seaweedfs
+cd seaweedfs
+git checkout add-snapcraft
+snapcraft
+```
+
+The generated a .snap file from the tip of master (I could have checked out a stable release instead). It can be installed with:-
+
+ snap install seaweedfs_0.99+git30.79371c0-dirty_amd64.snap --dangerous
+
+(the --dangerous is necessary because weâre installing an app which hasnât gone through the snap store review process)
+
+Once installed, the (namespaced) weed command can be executed. If you accept this and land the snap in the store, we can request an âaliasâ so users can use the âweedâ command rather than the namespaced âseaweedfs.weedâ
+
+- Run the command
+- Create sample config. Snaps are securely confined so their home directory is in a different place
+ mkdir ~/snap/seaweedfs/current/.seaweedfs
+ seaweedfs.weed scaffold > ~/snap/seaweed/current/.seaweedfs/filer.toml
+- Run a server
+ seaweedfs.weed server
+- Run a benchmark
+ seaweedfs.weed benchmark
+
+Results from my test run: https://paste.ubuntu.com/p/95Xk8zFQ7w/
+
+If landed, you will need to:-
+
+- Register an account in the snap store https://snapcraft.io/account
+- Register the âseaweedfsâ name in the store
+ - snapcraft login
+ - snapcraft register seaweedfs
+- Upload a built snap to the store
+ - snapcraft push seaweedfs_0.99+git30.79371c0-dirty_amd64.snap --release edge
+- Test installing on a clean Ubuntu 16.04 machine
+ - snap install seaweedfs --edge
+
+The store supports multiple risk levels as âchannelsâ with the edge channel typically used to host the latest build from git master. Stable is where stable releases are pushed. Optionally beta and candidate channels can also be used if needed.
+
+Once you are happy, you can push a stable release to the stable channel, update the store page, and promote the application online (we can help there).
diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml
new file mode 100644
index 000000000..6449e9bfb
--- /dev/null
+++ b/snap/snapcraft.yaml
@@ -0,0 +1,53 @@
+# Name of snap as registered in the store
+name: seaweedfs
+# Automatically derive snap version from git tags
+version: git
+# Short human readable name as seen in 'snap find $SNAPNAME'
+summary: SeaweedFS
+# Longer multi-line description found in 'snap info $SNAPNAME'
+description: |
+ SeaweedFS is a simple and highly scalable distributed file system,
+ to store and serve billions of files fast!
+ SeaweedFS implements an object store with O(1) disk seek,
+ transparent cloud integration, and an optional Filer with POSIX interface,
+ supporting S3 API, Rack-Aware Erasure Coding for warm storage,
+ FUSE mount, Hadoop compatible, WebDAV.
+
+# Grade is stable for snaps expected to land in the stable channel
+grade: stable
+# Uses the strict confinement model and uses interfaces to open up access to
+# resources on the target host
+confinement: strict
+
+# List of parts which comprise the snap
+parts:
+ # The main part which defines how to build the application in the snap
+ seaweedfs:
+ # This part needs a newer version of golang, so we use a separate part
+ # which defines how to get a newer golang during the build
+ after: [go]
+ # The go plugin knows how to build go applications into a snap
+ plugin: go
+ # Snapcraft will look in this location for the source of the application
+ source: .
+ go-importpath: github.com/chrislusf/seaweedfs
+ go:
+ # Defines the version of golang which will be bootstrapped into the snap
+ source-tag: go1.14
+
+# Apps exposes the binaries inside the snap to the host system once installed
+apps:
+ # We expose the weed command.
+ # This differs from the snap name, so it will be namespaced as seaweedfs.weed
+ # An alias can be added to expose this as 'weed' if requested in the snapcraft forum
+ weed:
+ # The path to the binary inside the snap, relative to the $SNAP home
+ command: bin/weed
+ # Plugs connect the snap to resources on the host system. We enable network connectivity
+ # We also add home and removable-media (latter not autoconnected by default)
+ # so users can access files in their home or on removable disks
+ plugs:
+ - network
+ - network-bind
+ - home
+ - removable-media
diff --git a/unmaintained/check_disk_size/check_disk_size.go b/unmaintained/check_disk_size/check_disk_size.go
new file mode 100644
index 000000000..4a8b92b88
--- /dev/null
+++ b/unmaintained/check_disk_size/check_disk_size.go
@@ -0,0 +1,42 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "runtime"
+ "syscall"
+)
+
+var (
+ dir = flag.String("dir", ".", "the directory which uses a disk")
+)
+
+func main() {
+ flag.Parse()
+
+ fillInDiskStatus(*dir)
+
+ fmt.Printf("OS: %v\n", runtime.GOOS)
+ fmt.Printf("Arch: %v\n", runtime.GOARCH)
+
+}
+
+func fillInDiskStatus(dir string) {
+ fs := syscall.Statfs_t{}
+ err := syscall.Statfs(dir, &fs)
+ if err != nil {
+ fmt.Printf("failed to statfs on %s: %v\n", dir, err)
+ return
+ }
+ fmt.Printf("statfs: %+v\n", fs)
+ fmt.Println()
+
+ total := fs.Blocks * uint64(fs.Bsize)
+ free := fs.Bfree * uint64(fs.Bsize)
+ fmt.Printf("Total: %d blocks x %d block size = %d bytes\n", fs.Blocks, uint64(fs.Bsize), total)
+ fmt.Printf("Free : %d blocks x %d block size = %d bytes\n", fs.Bfree, uint64(fs.Bsize), free)
+ fmt.Printf("Used : %d blocks x %d block size = %d bytes\n", fs.Blocks-fs.Bfree, uint64(fs.Bsize), total-free)
+ fmt.Printf("Free Percentage : %.2f%%\n", float32((float64(free)/float64(total))*100))
+ fmt.Printf("Used Percentage : %.2f%%\n", float32((float64(total-free)/float64(total))*100))
+ return
+}
diff --git a/unmaintained/compact_leveldb/compact_leveldb.go b/unmaintained/compact_leveldb/compact_leveldb.go
index 317356c3f..9be5697de 100644
--- a/unmaintained/compact_leveldb/compact_leveldb.go
+++ b/unmaintained/compact_leveldb/compact_leveldb.go
@@ -5,6 +5,7 @@ import (
"log"
"github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
)
@@ -25,6 +26,9 @@ func main() {
}
db, err := leveldb.OpenFile(*dir, opts)
+ if errors.IsCorrupted(err) {
+ db, err = leveldb.RecoverFile(*dir, opts)
+ }
if err != nil {
log.Fatal(err)
}
diff --git a/unmaintained/repeated_vacuum/repeated_vacuum.go b/unmaintained/repeated_vacuum/repeated_vacuum.go
index 718b6faa1..12ac42dbe 100644
--- a/unmaintained/repeated_vacuum/repeated_vacuum.go
+++ b/unmaintained/repeated_vacuum/repeated_vacuum.go
@@ -1,11 +1,13 @@
package main
import (
- "bytes"
"flag"
"fmt"
"log"
"math/rand"
+ "time"
+
+ "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/security"
@@ -13,8 +15,10 @@ import (
)
var (
- master = flag.String("master", "127.0.0.1:9333", "the master server")
- repeat = flag.Int("n", 5, "repeat how many times")
+ master = flag.String("master", "127.0.0.1:9333", "the master server")
+ repeat = flag.Int("n", 5, "repeat how many times")
+ garbageThreshold = flag.Float64("garbageThreshold", 0.3, "garbageThreshold")
+ replication = flag.String("replication", "", "replication 000, 001, 002, etc")
)
func main() {
@@ -23,27 +27,47 @@ func main() {
util.LoadConfiguration("security", false)
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+ genFile(grpcDialOption, 0)
+
+ go func() {
+ for {
+ println("vacuum threshold", *garbageThreshold)
+ _, err := util.Get(fmt.Sprintf("http://%s/vol/vacuum?garbageThreshold=%f", *master, *garbageThreshold))
+ if err != nil {
+ log.Fatalf("vacuum: %v", err)
+ }
+ time.Sleep(time.Second)
+ }
+ }()
+
for i := 0; i < *repeat; i++ {
- assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{Count: 1})
- if err != nil {
- log.Fatalf("assign: %v", err)
- }
+ // create 2 files, and delete one of them
- data := make([]byte, 1024)
- rand.Read(data)
- reader := bytes.NewReader(data)
-
- targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid)
-
- _, err = operation.Upload(targetUrl, fmt.Sprintf("test%d", i), reader, false, "", nil, assignResult.Auth)
- if err != nil {
- log.Fatalf("upload: %v", err)
- }
+ assignResult, targetUrl := genFile(grpcDialOption, i)
util.Delete(targetUrl, string(assignResult.Auth))
- util.Get(fmt.Sprintf("http://%s/vol/vacuum", *master))
-
}
}
+
+func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, string) {
+ assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{
+ Count: 1,
+ Replication: *replication,
+ })
+ if err != nil {
+ log.Fatalf("assign: %v", err)
+ }
+
+ data := make([]byte, 1024)
+ rand.Read(data)
+
+ targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid)
+
+ _, err = operation.UploadData(targetUrl, fmt.Sprintf("test%d", i), false, data, false, "bench/test", nil, assignResult.Auth)
+ if err != nil {
+ log.Fatalf("upload: %v", err)
+ }
+ return assignResult, targetUrl
+}
diff --git a/unmaintained/see_dat/see_dat.go b/unmaintained/see_dat/see_dat.go
index efc58e751..17c494841 100644
--- a/unmaintained/see_dat/see_dat.go
+++ b/unmaintained/see_dat/see_dat.go
@@ -2,6 +2,7 @@ package main
import (
"flag"
+ "github.com/chrislusf/seaweedfs/weed/util"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
@@ -31,7 +32,8 @@ func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool {
func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
t := time.Unix(int64(n.AppendAtNs)/int64(time.Second), int64(n.AppendAtNs)%int64(time.Second))
- glog.V(0).Infof("%d,%s%x offset %d size %d cookie %x appendedAt %v", *volumeId, n.Id, n.Cookie, offset, n.Size, n.Cookie, t)
+ glog.V(0).Infof("%d,%s%x offset %d size %d(%s) cookie %x appendedAt %v",
+ *volumeId, n.Id, n.Cookie, offset, n.Size, util.BytesToHumanReadable(uint64(n.Size)), n.Cookie, t)
return nil
}
diff --git a/unmaintained/see_idx/see_idx.go b/unmaintained/see_idx/see_idx.go
index 777af1821..47cbd291b 100644
--- a/unmaintained/see_idx/see_idx.go
+++ b/unmaintained/see_idx/see_idx.go
@@ -3,6 +3,7 @@ package main
import (
"flag"
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/util"
"os"
"path"
"strconv"
@@ -36,7 +37,7 @@ func main() {
defer indexFile.Close()
idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error {
- fmt.Printf("key:%v offset:%v size:%v\n", key, offset, size)
+ fmt.Printf("key:%v offset:%v size:%v(%v)\n", key, offset, size, util.BytesToHumanReadable(uint64(size)))
return nil
})
diff --git a/unmaintained/see_log_entry/see_log_entry.go b/unmaintained/see_log_entry/see_log_entry.go
new file mode 100644
index 000000000..34965f6be
--- /dev/null
+++ b/unmaintained/see_log_entry/see_log_entry.go
@@ -0,0 +1,75 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "os"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+var (
+ logdataFile = flag.String("logdata", "", "log data file saved under "+ filer2.SystemLogDir)
+)
+
+func main() {
+ flag.Parse()
+
+ dst, err := os.OpenFile(*logdataFile, os.O_RDONLY, 0644)
+ if err != nil {
+ log.Fatalf("failed to open %s: %v", *logdataFile, err)
+ }
+ defer dst.Close()
+
+ err = walkLogEntryFile(dst)
+ if err != nil {
+ log.Fatalf("failed to visit %s: %v", *logdataFile, err)
+ }
+
+}
+
+func walkLogEntryFile(dst *os.File) error {
+
+ sizeBuf := make([]byte, 4)
+
+ for {
+ if n, err := dst.Read(sizeBuf); n != 4 {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+
+ size := util.BytesToUint32(sizeBuf)
+
+ data := make([]byte, int(size))
+
+ if n, err := dst.Read(data); n != len(data) {
+ return err
+ }
+
+ logEntry := &filer_pb.LogEntry{}
+ err := proto.Unmarshal(data, logEntry)
+ if err != nil {
+ log.Printf("unexpected unmarshal filer_pb.LogEntry: %v", err)
+ return nil
+ }
+
+ event := &filer_pb.SubscribeMetadataResponse{}
+ err = proto.Unmarshal(logEntry.Data, event)
+ if err != nil {
+ log.Printf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
+ return nil
+ }
+
+ fmt.Printf("event: %+v\n", event)
+
+ }
+
+}
diff --git a/unmaintained/see_meta/see_meta.go b/unmaintained/see_meta/see_meta.go
index 0d2ac8de1..452badfd6 100644
--- a/unmaintained/see_meta/see_meta.go
+++ b/unmaintained/see_meta/see_meta.go
@@ -7,10 +7,10 @@ import (
"log"
"os"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/golang/protobuf/proto"
+
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/golang/protobuf/proto"
)
var (
@@ -58,7 +58,7 @@ func walkMetaFile(dst *os.File) error {
return err
}
- fmt.Fprintf(os.Stdout, "file %s %v\n", filer2.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name), fullEntry.Entry.Attributes.String())
+ fmt.Fprintf(os.Stdout, "file %s %v\n", util.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name), fullEntry.Entry.Attributes.String())
for i, chunk := range fullEntry.Entry.Chunks {
fmt.Fprintf(os.Stdout, " chunk %d %v\n", i+1, chunk.String())
}
diff --git a/unmaintained/volume_tailer/volume_tailer.go b/unmaintained/volume_tailer/volume_tailer.go
index 3c2d36d22..d9220d2de 100644
--- a/unmaintained/volume_tailer/volume_tailer.go
+++ b/unmaintained/volume_tailer/volume_tailer.go
@@ -9,7 +9,6 @@ import (
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
util2 "github.com/chrislusf/seaweedfs/weed/util"
- "github.com/spf13/viper"
"golang.org/x/tools/godoc/util"
)
@@ -25,7 +24,7 @@ func main() {
flag.Parse()
util2.LoadConfiguration("security", false)
- grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+ grpcDialOption := security.LoadClientTLS(util2.GetViper(), "grpc.client")
vid := needle.VolumeId(*volumeId)
diff --git a/weed/command/backup.go b/weed/command/backup.go
index eb2b5ba4a..615be80cf 100644
--- a/weed/command/backup.go
+++ b/weed/command/backup.go
@@ -119,7 +119,7 @@ func runBackup(cmd *Command, args []string) bool {
}
if v.SuperBlock.CompactionRevision < uint16(stats.CompactRevision) {
- if err = v.Compact2(30 * 1024 * 1024 * 1024); err != nil {
+ if err = v.Compact2(30*1024*1024*1024, 0); err != nil {
fmt.Printf("Compact Volume before synchronizing %v\n", err)
return true
}
diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go
index 382e7c850..de44fac75 100644
--- a/weed/command/benchmark.go
+++ b/weed/command/benchmark.go
@@ -19,6 +19,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/wdclient"
@@ -40,6 +41,8 @@ type BenchmarkOptions struct {
maxCpu *int
grpcDialOption grpc.DialOption
masterClient *wdclient.MasterClient
+ grpcRead *bool
+ fsync *bool
}
var (
@@ -64,6 +67,8 @@ func init() {
b.replication = cmdBenchmark.Flag.String("replication", "000", "replication type")
b.cpuprofile = cmdBenchmark.Flag.String("cpuprofile", "", "cpu profile output file")
b.maxCpu = cmdBenchmark.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs")
+ b.grpcRead = cmdBenchmark.Flag.Bool("grpcRead", false, "use grpc API to read")
+ b.fsync = cmdBenchmark.Flag.Bool("fsync", false, "flush data to disk after write")
sharedBytes = make([]byte, 1024)
}
@@ -110,7 +115,7 @@ func runBenchmark(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
b.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
- fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH)
+ fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
if *b.maxCpu < 1 {
*b.maxCpu = runtime.NumCPU()
}
@@ -124,7 +129,7 @@ func runBenchmark(cmd *Command, args []string) bool {
defer pprof.StopCPUProfile()
}
- b.masterClient = wdclient.NewMasterClient(context.Background(), b.grpcDialOption, "client", strings.Split(*b.masters, ","))
+ b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "client", "", 0, strings.Split(*b.masters, ","))
go b.masterClient.KeepConnectedToMaster()
b.masterClient.WaitUntilConnected()
@@ -224,9 +229,10 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
start := time.Now()
fileSize := int64(*b.fileSize + random.Intn(64))
fp := &operation.FilePart{
- Reader: &FakeReader{id: uint64(id), size: fileSize},
+ Reader: &FakeReader{id: uint64(id), size: fileSize, random: random},
FileSize: fileSize,
MimeType: "image/bench", // prevent gzip benchmark content
+ Fsync: *b.fsync,
}
ar := &operation.VolumeAssignRequest{
Count: 1,
@@ -238,7 +244,7 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
if !isSecure && assignResult.Auth != "" {
isSecure = true
}
- if _, err := fp.Upload(0, b.masterClient.GetMaster(), assignResult.Auth, b.grpcDialOption); err == nil {
+ if _, err := fp.Upload(0, b.masterClient.GetMaster(), false, assignResult.Auth, b.grpcDialOption); err == nil {
if random.Intn(100) < *b.deletePercentage {
s.total++
delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp}
@@ -278,23 +284,61 @@ func readFiles(fileIdLineChan chan string, s *stat) {
fmt.Printf("reading file %s\n", fid)
}
start := time.Now()
- url, err := b.masterClient.LookupFileId(fid)
- if err != nil {
- s.failed++
- println("!!!! ", fid, " location not found!!!!!")
- continue
+ var bytesRead int
+ var err error
+ if *b.grpcRead {
+ volumeServer, err := b.masterClient.LookupVolumeServer(fid)
+ if err != nil {
+ s.failed++
+ println("!!!! ", fid, " location not found!!!!!")
+ continue
+ }
+ bytesRead, err = grpcFileGet(volumeServer, fid, b.grpcDialOption)
+ } else {
+ url, err := b.masterClient.LookupFileId(fid)
+ if err != nil {
+ s.failed++
+ println("!!!! ", fid, " location not found!!!!!")
+ continue
+ }
+ var bytes []byte
+ bytes, err = util.Get(url)
+ bytesRead = len(bytes)
}
- if bytesRead, err := util.Get(url); err == nil {
+ if err == nil {
s.completed++
- s.transferred += int64(len(bytesRead))
+ s.transferred += int64(bytesRead)
readStats.addSample(time.Now().Sub(start))
} else {
s.failed++
- fmt.Printf("Failed to read %s error:%v\n", url, err)
+ fmt.Printf("Failed to read %s error:%v\n", fid, err)
}
}
}
+func grpcFileGet(volumeServer, fid string, grpcDialOption grpc.DialOption) (bytesRead int, err error) {
+ err = operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
+ fileGetClient, err := client.FileGet(context.Background(), &volume_server_pb.FileGetRequest{FileId: fid})
+ if err != nil {
+ return err
+ }
+
+ for {
+ resp, respErr := fileGetClient.Recv()
+ if resp != nil {
+ bytesRead += len(resp.Data)
+ }
+ if respErr != nil {
+ if respErr == io.EOF {
+ return nil
+ }
+ return respErr
+ }
+ }
+ })
+ return
+}
+
func writeFileIds(fileName string, fileIdLineChan chan string, finishChan chan bool) {
file, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
@@ -509,8 +553,9 @@ func (s *stats) printStats() {
// a fake reader to generate content to upload
type FakeReader struct {
- id uint64 // an id number
- size int64 // max bytes
+ id uint64 // an id number
+ size int64 // max bytes
+ random *rand.Rand
}
func (l *FakeReader) Read(p []byte) (n int, err error) {
@@ -526,6 +571,7 @@ func (l *FakeReader) Read(p []byte) (n int, err error) {
for i := 0; i < 8; i++ {
p[i] = byte(l.id >> uint(i*8))
}
+ l.random.Read(p[8:])
}
l.size -= int64(n)
return
diff --git a/weed/command/command.go b/weed/command/command.go
index 79c00d4cd..9a41a8a7c 100644
--- a/weed/command/command.go
+++ b/weed/command/command.go
@@ -12,20 +12,22 @@ var Commands = []*Command{
cmdBackup,
cmdCompact,
cmdCopy,
- cmdFix,
- cmdFilerReplicate,
- cmdServer,
- cmdMaster,
- cmdFiler,
- cmdS3,
- cmdUpload,
cmdDownload,
+ cmdExport,
+ cmdFiler,
+ cmdFilerReplicate,
+ cmdFix,
+ cmdMaster,
+ cmdMount,
+ cmdS3,
+ cmdMsgBroker,
cmdScaffold,
+ cmdServer,
cmdShell,
+ cmdWatch,
+ cmdUpload,
cmdVersion,
cmdVolume,
- cmdExport,
- cmdMount,
cmdWebDav,
}
diff --git a/weed/command/compact.go b/weed/command/compact.go
index 85313b749..4e28aa725 100644
--- a/weed/command/compact.go
+++ b/weed/command/compact.go
@@ -50,7 +50,7 @@ func runCompact(cmd *Command, args []string) bool {
glog.Fatalf("Compact Volume [ERROR] %s\n", err)
}
} else {
- if err = v.Compact2(preallocate); err != nil {
+ if err = v.Compact2(preallocate, 0); err != nil {
glog.Fatalf("Compact Volume [ERROR] %s\n", err)
}
}
diff --git a/weed/command/download.go b/weed/command/download.go
index b3e33defd..be0eb47e5 100644
--- a/weed/command/download.go
+++ b/weed/command/download.go
@@ -71,6 +71,7 @@ func downloadToFile(server, fileId, saveDir string) error {
}
f, err := os.OpenFile(path.Join(saveDir, filename), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm)
if err != nil {
+ io.Copy(ioutil.Discard, rc)
return err
}
defer f.Close()
diff --git a/weed/command/export.go b/weed/command/export.go
index 8d664ad3b..8c32b3f4d 100644
--- a/weed/command/export.go
+++ b/weed/command/export.go
@@ -195,6 +195,8 @@ func runExport(cmd *Command, args []string) bool {
vid := needle.VolumeId(*export.volumeId)
needleMap := needle_map.NewMemDb()
+ defer needleMap.Close()
+
if err := needleMap.LoadFromIdx(path.Join(*export.dir, fileName+".idx")); err != nil {
glog.Fatalf("cannot load needle map from %s.idx: %s", fileName, err)
}
diff --git a/weed/command/filer.go b/weed/command/filer.go
index ea8392fac..e258b695d 100644
--- a/weed/command/filer.go
+++ b/weed/command/filer.go
@@ -9,6 +9,7 @@ import (
"google.golang.org/grpc/reflection"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/server"
@@ -22,17 +23,18 @@ var (
type FilerOptions struct {
masters *string
ip *string
+ bindIp *string
port *int
publicPort *int
collection *string
defaultReplicaPlacement *string
- redirectOnRead *bool
disableDirListing *bool
maxMB *int
dirListingLimit *int
dataCenter *string
enableNotification *bool
disableHttp *bool
+ cipher *bool
// default leveldb directory, used in "weed server" mode
defaultLevelDbDirectory *string
@@ -42,16 +44,17 @@ func init() {
cmdFiler.Run = runFiler // break init cycle
f.masters = cmdFiler.Flag.String("master", "localhost:9333", "comma-separated master servers")
f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this collection")
- f.ip = cmdFiler.Flag.String("ip", "", "filer server http listen ip address")
+ f.ip = cmdFiler.Flag.String("ip", util.DetectedHostAddress(), "filer server http listen ip address")
+ f.bindIp = cmdFiler.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port")
f.publicPort = cmdFiler.Flag.Int("port.readonly", 0, "readonly port opened to public")
f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "000", "default replication type if not specified")
- f.redirectOnRead = cmdFiler.Flag.Bool("redirectOnRead", false, "whether proxy or redirect to volume server during file GET request")
f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing")
f.maxMB = cmdFiler.Flag.Int("maxMB", 32, "split files larger than the limit")
f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size")
f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to write to volumes in this data center")
f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed")
+ f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers")
}
var cmdFiler = &Command{
@@ -102,22 +105,23 @@ func (fo *FilerOptions) startFiler() {
Masters: strings.Split(*fo.masters, ","),
Collection: *fo.collection,
DefaultReplication: *fo.defaultReplicaPlacement,
- RedirectOnRead: *fo.redirectOnRead,
DisableDirListing: *fo.disableDirListing,
MaxMB: *fo.maxMB,
DirListingLimit: *fo.dirListingLimit,
DataCenter: *fo.dataCenter,
DefaultLevelDbDir: defaultLevelDbDirectory,
DisableHttp: *fo.disableHttp,
- Port: *fo.port,
+ Host: *fo.ip,
+ Port: uint32(*fo.port),
+ Cipher: *fo.cipher,
})
if nfs_err != nil {
glog.Fatalf("Filer startup error: %v", nfs_err)
}
if *fo.publicPort != 0 {
- publicListeningAddress := *fo.ip + ":" + strconv.Itoa(*fo.publicPort)
- glog.V(0).Infoln("Start Seaweed filer server", util.VERSION, "public at", publicListeningAddress)
+ publicListeningAddress := *fo.bindIp + ":" + strconv.Itoa(*fo.publicPort)
+ glog.V(0).Infoln("Start Seaweed filer server", util.Version(), "public at", publicListeningAddress)
publicListener, e := util.NewListener(publicListeningAddress, 0)
if e != nil {
glog.Fatalf("Filer server public listener error on port %d:%v", *fo.publicPort, e)
@@ -129,9 +133,9 @@ func (fo *FilerOptions) startFiler() {
}()
}
- glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.VERSION, *fo.ip, *fo.port)
+ glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.Version(), *fo.ip, *fo.port)
filerListener, e := util.NewListener(
- *fo.ip+":"+strconv.Itoa(*fo.port),
+ *fo.bindIp+":"+strconv.Itoa(*fo.port),
time.Duration(10)*time.Second,
)
if e != nil {
@@ -144,7 +148,7 @@ func (fo *FilerOptions) startFiler() {
if err != nil {
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
}
- grpcS := util.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.filer"))
+ grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.filer"))
filer_pb.RegisterSeaweedFilerServer(grpcS, fs)
reflection.Register(grpcS)
go grpcS.Serve(grpcL)
diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go
index e5979d786..2d6ba94d6 100644
--- a/weed/command/filer_copy.go
+++ b/weed/command/filer_copy.go
@@ -16,9 +16,13 @@ import (
"google.golang.org/grpc"
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
+
"github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/wdclient"
)
@@ -37,9 +41,10 @@ type CopyOptions struct {
masterClient *wdclient.MasterClient
concurrenctFiles *int
concurrenctChunks *int
- compressionLevel *int
grpcDialOption grpc.DialOption
masters []string
+ cipher bool
+ ttlSec int32
}
func init() {
@@ -52,7 +57,6 @@ func init() {
copy.maxMB = cmdCopy.Flag.Int("maxMB", 32, "split files larger than the limit")
copy.concurrenctFiles = cmdCopy.Flag.Int("c", 8, "concurrent file copy goroutines")
copy.concurrenctChunks = cmdCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file")
- copy.compressionLevel = cmdCopy.Flag.Int("compressionLevel", 9, "local file compression level 1 ~ 9")
}
var cmdCopy = &Command{
@@ -107,9 +111,7 @@ func runCopy(cmd *Command, args []string) bool {
filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort)
copy.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
- ctx := context.Background()
-
- masters, collection, replication, maxMB, err := readFilerConfiguration(ctx, copy.grpcDialOption, filerGrpcAddress)
+ masters, collection, replication, maxMB, cipher, err := readFilerConfiguration(copy.grpcDialOption, filerGrpcAddress)
if err != nil {
fmt.Printf("read from filer %s: %v\n", filerGrpcAddress, err)
return false
@@ -124,13 +126,17 @@ func runCopy(cmd *Command, args []string) bool {
*copy.maxMB = int(maxMB)
}
copy.masters = masters
+ copy.cipher = cipher
- copy.masterClient = wdclient.NewMasterClient(ctx, copy.grpcDialOption, "client", copy.masters)
- go copy.masterClient.KeepConnectedToMaster()
- copy.masterClient.WaitUntilConnected()
+ ttl, err := needle.ReadTTL(*copy.ttl)
+ if err != nil {
+ fmt.Printf("parsing ttl %s: %v\n", *copy.ttl, err)
+ return false
+ }
+ copy.ttlSec = int32(ttl.Minutes()) * 60
if *cmdCopy.IsDebug {
- util.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof")
+ grace.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof")
}
fileCopyTaskChan := make(chan FileCopyTask, *copy.concurrenctFiles)
@@ -153,7 +159,7 @@ func runCopy(cmd *Command, args []string) bool {
filerHost: filerUrl.Host,
filerGrpcAddress: filerGrpcAddress,
}
- if err := worker.copyFiles(ctx, fileCopyTaskChan); err != nil {
+ if err := worker.copyFiles(fileCopyTaskChan); err != nil {
fmt.Fprintf(os.Stderr, "copy file error: %v\n", err)
return
}
@@ -164,13 +170,14 @@ func runCopy(cmd *Command, args []string) bool {
return true
}
-func readFilerConfiguration(ctx context.Context, grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, maxMB uint32, err error) {
- err = withFilerClient(ctx, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
- resp, err := client.GetFilerConfiguration(ctx, &filer_pb.GetFilerConfigurationRequest{})
+func readFilerConfiguration(grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, maxMB uint32, cipher bool, err error) {
+ err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil {
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
}
masters, collection, replication, maxMB = resp.Masters, resp.Collection, resp.Replication, resp.MaxMb
+ cipher = resp.Cipher
return nil
})
return
@@ -215,9 +222,9 @@ type FileCopyWorker struct {
filerGrpcAddress string
}
-func (worker *FileCopyWorker) copyFiles(ctx context.Context, fileCopyTaskChan chan FileCopyTask) error {
+func (worker *FileCopyWorker) copyFiles(fileCopyTaskChan chan FileCopyTask) error {
for task := range fileCopyTaskChan {
- if err := worker.doEachCopy(ctx, task); err != nil {
+ if err := worker.doEachCopy(task); err != nil {
return err
}
}
@@ -233,7 +240,7 @@ type FileCopyTask struct {
gid uint32
}
-func (worker *FileCopyWorker) doEachCopy(ctx context.Context, task FileCopyTask) error {
+func (worker *FileCopyWorker) doEachCopy(task FileCopyTask) error {
f, err := os.Open(task.sourceLocation)
if err != nil {
@@ -261,36 +268,55 @@ func (worker *FileCopyWorker) doEachCopy(ctx context.Context, task FileCopyTask)
}
if chunkCount == 1 {
- return worker.uploadFileAsOne(ctx, task, f)
+ return worker.uploadFileAsOne(task, f)
}
- return worker.uploadFileInChunks(ctx, task, f, chunkCount, chunkSize)
+ return worker.uploadFileInChunks(task, f, chunkCount, chunkSize)
}
-func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopyTask, f *os.File) error {
+func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) error {
// upload the file content
fileName := filepath.Base(f.Name())
mimeType := detectMimeType(f)
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return err
+ }
var chunks []*filer_pb.FileChunk
+ var assignResult *filer_pb.AssignVolumeResponse
+ var assignError error
if task.fileSize > 0 {
// assign a volume
- assignResult, err := operation.Assign(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, &operation.VolumeAssignRequest{
- Count: 1,
- Replication: *worker.options.replication,
- Collection: *worker.options.collection,
- Ttl: *worker.options.ttl,
+ err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.AssignVolumeRequest{
+ Count: 1,
+ Replication: *worker.options.replication,
+ Collection: *worker.options.collection,
+ TtlSec: worker.options.ttlSec,
+ ParentPath: task.destinationUrlPath,
+ }
+
+ assignResult, assignError = client.AssignVolume(context.Background(), request)
+ if assignError != nil {
+ return fmt.Errorf("assign volume failure %v: %v", request, assignError)
+ }
+ if assignResult.Error != "" {
+ return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
+ }
+ return nil
})
if err != nil {
- fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
+ return fmt.Errorf("Failed to assign from %v: %v\n", worker.options.masters, err)
}
- targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
+ targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId
- uploadResult, err := operation.UploadWithLocalCompressionLevel(targetUrl, fileName, f, false, mimeType, nil, assignResult.Auth, *worker.options.compressionLevel)
+ uploadResult, err := operation.UploadData(targetUrl, fileName, worker.options.cipher, data, false, mimeType, nil, security.EncodedJwt(assignResult.Auth))
if err != nil {
return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err)
}
@@ -299,18 +325,12 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy
}
fmt.Printf("uploaded %s to %s\n", fileName, targetUrl)
- chunks = append(chunks, &filer_pb.FileChunk{
- FileId: assignResult.Fid,
- Offset: 0,
- Size: uint64(uploadResult.Size),
- Mtime: time.Now().UnixNano(),
- ETag: uploadResult.ETag,
- })
+ chunks = append(chunks, uploadResult.ToPbFileChunk(assignResult.FileId, 0))
fmt.Printf("copied %s => http://%s%s%s\n", fileName, worker.filerHost, task.destinationUrlPath, fileName)
}
- if err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{
Directory: task.destinationUrlPath,
Entry: &filer_pb.Entry{
@@ -325,13 +345,13 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy
Mime: mimeType,
Replication: *worker.options.replication,
Collection: *worker.options.collection,
- TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)),
+ TtlSec: worker.options.ttlSec,
},
Chunks: chunks,
},
}
- if err := filer_pb.CreateEntry(ctx, client, request); err != nil {
+ if err := filer_pb.CreateEntry(client, request); err != nil {
return fmt.Errorf("update fh: %v", err)
}
return nil
@@ -342,7 +362,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy
return nil
}
-func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileCopyTask, f *os.File, chunkCount int, chunkSize int64) error {
+func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, chunkCount int, chunkSize int64) error {
fileName := filepath.Base(f.Name())
mimeType := detectMimeType(f)
@@ -352,6 +372,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC
concurrentChunks := make(chan struct{}, *worker.options.concurrenctChunks)
var wg sync.WaitGroup
var uploadError error
+ var collection, replication string
fmt.Printf("uploading %s in %d chunks ...\n", fileName, chunkCount)
for i := int64(0); i < int64(chunkCount) && uploadError == nil; i++ {
@@ -363,22 +384,42 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC
<-concurrentChunks
}()
// assign a volume
- assignResult, err := operation.Assign(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, &operation.VolumeAssignRequest{
- Count: 1,
- Replication: *worker.options.replication,
- Collection: *worker.options.collection,
- Ttl: *worker.options.ttl,
+ var assignResult *filer_pb.AssignVolumeResponse
+ var assignError error
+ err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ request := &filer_pb.AssignVolumeRequest{
+ Count: 1,
+ Replication: *worker.options.replication,
+ Collection: *worker.options.collection,
+ TtlSec: worker.options.ttlSec,
+ ParentPath: task.destinationUrlPath,
+ }
+
+ assignResult, assignError = client.AssignVolume(context.Background(), request)
+ if assignError != nil {
+ return fmt.Errorf("assign volume failure %v: %v", request, assignError)
+ }
+ if assignResult.Error != "" {
+ return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
+ }
+ return nil
})
if err != nil {
fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
}
+ if err != nil {
+ fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
+ }
- targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
+ targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId
+ if collection == "" {
+ collection = assignResult.Collection
+ }
+ if replication == "" {
+ replication = assignResult.Replication
+ }
- uploadResult, err := operation.Upload(targetUrl,
- fileName+"-"+strconv.FormatInt(i+1, 10),
- io.NewSectionReader(f, i*chunkSize, chunkSize),
- false, "", nil, assignResult.Auth)
+ uploadResult, err, _ := operation.Upload(targetUrl, fileName+"-"+strconv.FormatInt(i+1, 10), worker.options.cipher, io.NewSectionReader(f, i*chunkSize, chunkSize), false, "", nil, security.EncodedJwt(assignResult.Auth))
if err != nil {
uploadError = fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err)
return
@@ -387,13 +428,8 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC
uploadError = fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error)
return
}
- chunksChan <- &filer_pb.FileChunk{
- FileId: assignResult.Fid,
- Offset: i * chunkSize,
- Size: uint64(uploadResult.Size),
- Mtime: time.Now().UnixNano(),
- ETag: uploadResult.ETag,
- }
+ chunksChan <- uploadResult.ToPbFileChunk(assignResult.FileId, i*chunkSize)
+
fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size))
}(i)
}
@@ -410,11 +446,11 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC
for _, chunk := range chunks {
fileIds = append(fileIds, chunk.FileId)
}
- operation.DeleteFiles(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, fileIds)
+ operation.DeleteFiles(copy.masters[0], false, worker.options.grpcDialOption, fileIds)
return uploadError
}
- if err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{
Directory: task.destinationUrlPath,
Entry: &filer_pb.Entry{
@@ -427,15 +463,15 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC
FileSize: uint64(task.fileSize),
FileMode: uint32(task.fileMode),
Mime: mimeType,
- Replication: *worker.options.replication,
- Collection: *worker.options.collection,
- TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)),
+ Replication: replication,
+ Collection: collection,
+ TtlSec: worker.options.ttlSec,
},
Chunks: chunks,
},
}
- if err := filer_pb.CreateEntry(ctx, client, request); err != nil {
+ if err := filer_pb.CreateEntry(client, request); err != nil {
return fmt.Errorf("update fh: %v", err)
}
return nil
@@ -457,18 +493,12 @@ func detectMimeType(f *os.File) string {
}
if err != nil {
fmt.Printf("read head of %v: %v\n", f.Name(), err)
- return "application/octet-stream"
+ return ""
}
f.Seek(0, io.SeekStart)
mimeType := http.DetectContentType(head[:n])
+ if mimeType == "application/octet-stream" {
+ return ""
+ }
return mimeType
}
-
-func withFilerClient(ctx context.Context, filerAddress string, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error {
-
- return util.WithCachedGrpcClient(ctx, func(ctx context.Context, clientConn *grpc.ClientConn) error {
- client := filer_pb.NewSeaweedFilerClient(clientConn)
- return fn(client)
- }, filerAddress, grpcDialOption)
-
-}
diff --git a/weed/command/filer_replication.go b/weed/command/filer_replication.go
index 737f0d24a..40f2b570b 100644
--- a/weed/command/filer_replication.go
+++ b/weed/command/filer_replication.go
@@ -121,7 +121,6 @@ func runFilerReplicate(cmd *Command, args []string) bool {
}
}
- return true
}
func validateOneEnabledInput(config *viper.Viper) {
diff --git a/weed/command/fix.go b/weed/command/fix.go
index 8903595fa..90d1c4893 100644
--- a/weed/command/fix.go
+++ b/weed/command/fix.go
@@ -70,6 +70,7 @@ func runFix(cmd *Command, args []string) bool {
indexFileName := path.Join(*fixVolumePath, baseFileName+".idx")
nm := needle_map.NewMemDb()
+ defer nm.Close()
vid := needle.VolumeId(*fixVolumeId)
scanner := &VolumeFileScanner4Fix{
diff --git a/weed/command/master.go b/weed/command/master.go
index c4b11119b..cb6864edd 100644
--- a/weed/command/master.go
+++ b/weed/command/master.go
@@ -8,10 +8,12 @@ import (
"strings"
"github.com/chrislusf/raft/protobuf"
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
"github.com/gorilla/mux"
"google.golang.org/grpc/reflection"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/server"
@@ -43,10 +45,10 @@ type MasterOptions struct {
func init() {
cmdMaster.Run = runMaster // break init cycle
m.port = cmdMaster.Flag.Int("port", 9333, "http listen port")
- m.ip = cmdMaster.Flag.String("ip", "localhost", "master | address")
+ m.ip = cmdMaster.Flag.String("ip", util.DetectedHostAddress(), "master | address")
m.ipBind = cmdMaster.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
m.metaFolder = cmdMaster.Flag.String("mdir", os.TempDir(), "data directory to store meta data")
- m.peers = cmdMaster.Flag.String("peers", "", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094")
+ m.peers = cmdMaster.Flag.String("peers", "", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094,127.0.0.1:9095")
m.volumeSizeLimitMB = cmdMaster.Flag.Uint("volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.")
m.volumePreallocate = cmdMaster.Flag.Bool("volumePreallocate", false, "Preallocate disk space for volumes.")
m.pulseSeconds = cmdMaster.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
@@ -81,7 +83,7 @@ func runMaster(cmd *Command, args []string) bool {
util.LoadConfiguration("master", false)
runtime.GOMAXPROCS(runtime.NumCPU())
- util.SetupProfiling(*masterCpuProfile, *masterMemProfile)
+ grace.SetupProfiling(*masterCpuProfile, *masterMemProfile)
if err := util.TestFolderWritable(*m.metaFolder); err != nil {
glog.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *m.metaFolder, err)
@@ -109,7 +111,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
r := mux.NewRouter()
ms := weed_server.NewMasterServer(r, masterOption.toMasterOption(masterWhiteList), peers)
listeningAddress := *masterOption.ipBind + ":" + strconv.Itoa(*masterOption.port)
- glog.V(0).Infof("Start Seaweed Master %s at %s", util.VERSION, listeningAddress)
+ glog.V(0).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress)
masterListener, e := util.NewListener(listeningAddress, 0)
if e != nil {
glog.Fatalf("Master startup error: %v", e)
@@ -129,11 +131,11 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
}
// Create your protocol servers.
- grpcS := util.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master"))
+ grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master"))
master_pb.RegisterSeaweedServer(grpcS, ms)
protobuf.RegisterRaftServer(grpcS, raftServer)
reflection.Register(grpcS)
- glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.VERSION, *masterOption.ipBind, grpcPort)
+ glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOption.ipBind, grpcPort)
go grpcS.Serve(grpcL)
go ms.MasterClient.KeepConnectedToMaster()
@@ -146,6 +148,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
}
func checkPeers(masterIp string, masterPort int, peers string) (masterAddress string, cleanedPeers []string) {
+ glog.V(0).Infof("current: %s:%d peers:%s", masterIp, masterPort, peers)
masterAddress = masterIp + ":" + strconv.Itoa(masterPort)
if peers != "" {
cleanedPeers = strings.Split(peers, ",")
@@ -170,6 +173,7 @@ func checkPeers(masterIp string, masterPort int, peers string) (masterAddress st
func (m *MasterOptions) toMasterOption(whiteList []string) *weed_server.MasterOption {
return &weed_server.MasterOption{
+ Host: *m.ip,
Port: *m.port,
MetaFolder: *m.metaFolder,
VolumeSizeLimitMB: *m.volumeSizeLimitMB,
diff --git a/weed/command/mount.go b/weed/command/mount.go
index f09b285f7..21c8e7744 100644
--- a/weed/command/mount.go
+++ b/weed/command/mount.go
@@ -1,23 +1,26 @@
package command
import (
- "fmt"
- "strconv"
- "strings"
+ "os"
)
type MountOptions struct {
- filer *string
- filerMountRootPath *string
- dir *string
- dirListCacheLimit *int64
- collection *string
- replication *string
- ttlSec *int
- chunkSizeLimitMB *int
- dataCenter *string
- allowOthers *bool
- umaskString *string
+ filer *string
+ filerMountRootPath *string
+ dir *string
+ dirListCacheLimit *int64
+ collection *string
+ replication *string
+ ttlSec *int
+ chunkSizeLimitMB *int
+ cacheDir *string
+ cacheSizeMB *int64
+ dataCenter *string
+ allowOthers *bool
+ umaskString *string
+ nonempty *bool
+ outsideContainerClusterMode *bool
+ asyncMetaDataCaching *bool
}
var (
@@ -35,12 +38,17 @@ func init() {
mountOptions.collection = cmdMount.Flag.String("collection", "", "collection to create the files")
mountOptions.replication = cmdMount.Flag.String("replication", "", "replication(e.g. 000, 001) to create to files. If empty, let filer decide.")
mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds")
- mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 4, "local write buffer size, also chunk large files")
+ mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 16, "local write buffer size, also chunk large files")
+ mountOptions.cacheDir = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks")
+ mountOptions.cacheSizeMB = cmdMount.Flag.Int64("cacheCapacityMB", 1000, "local cache capacity in MB (0 will disable cache)")
mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center")
mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system")
mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111")
+ mountOptions.nonempty = cmdMount.Flag.Bool("nonempty", false, "allows the mounting over a non-empty directory")
mountCpuProfile = cmdMount.Flag.String("cpuprofile", "", "cpu profile output file")
mountMemProfile = cmdMount.Flag.String("memprofile", "", "memory profile output file")
+ mountOptions.outsideContainerClusterMode = cmdMount.Flag.Bool("outsideContainerClusterMode", false, "allows other users to access the file system")
+ mountOptions.asyncMetaDataCaching = cmdMount.Flag.Bool("asyncMetaDataCaching", true, "async meta data caching. this feature will be permanent and this option will be removed.")
}
var cmdMount = &Command{
@@ -58,21 +66,11 @@ var cmdMount = &Command{
On OS X, it requires OSXFUSE (http://osxfuse.github.com/).
+ If the SeaweedFS system runs in a container cluster, e.g. managed by kubernetes or docker compose,
+ the volume servers are not accessible by their own ip addresses.
+ In "outsideContainerClusterMode", the mount will use the filer ip address instead, assuming:
+ * All volume server containers are accessible through the same hostname or IP address as the filer.
+ * All volume server container ports are open external to the cluster.
+
`,
}
-
-func parseFilerGrpcAddress(filer string) (filerGrpcAddress string, err error) {
- hostnameAndPort := strings.Split(filer, ":")
- if len(hostnameAndPort) != 2 {
- return "", fmt.Errorf("filer should have hostname:port format: %v", hostnameAndPort)
- }
-
- filerPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64)
- if parseErr != nil {
- return "", fmt.Errorf("filer port parse error: %v", parseErr)
- }
-
- filerGrpcPort := int(filerPort) + 10000
-
- return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil
-}
diff --git a/weed/command/mount_linux.go b/weed/command/mount_linux.go
index 80a5f9da4..25c4f72cf 100644
--- a/weed/command/mount_linux.go
+++ b/weed/command/mount_linux.go
@@ -138,9 +138,7 @@ func parseInfoFile(r io.Reader) ([]*Info, error) {
}
func osSpecificMountOptions() []fuse.MountOption {
- return []fuse.MountOption{
- fuse.AllowNonEmptyMount(),
- }
+ return []fuse.MountOption{}
}
func checkMountPointAvailable(dir string) bool {
diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go
index 891810e61..4e83a44a0 100644
--- a/weed/command/mount_std.go
+++ b/weed/command/mount_std.go
@@ -3,6 +3,7 @@
package command
import (
+ "context"
"fmt"
"os"
"os/user"
@@ -12,19 +13,20 @@ import (
"strings"
"time"
- "github.com/jacobsa/daemonize"
-
"github.com/chrislusf/seaweedfs/weed/filesys"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
)
func runMount(cmd *Command, args []string) bool {
- util.SetupProfiling(*mountCpuProfile, *mountMemProfile)
+ grace.SetupProfiling(*mountCpuProfile, *mountMemProfile)
umask, umaskErr := strconv.ParseUint(*mountOptions.umaskString, 8, 64)
if umaskErr != nil {
@@ -32,27 +34,46 @@ func runMount(cmd *Command, args []string) bool {
return false
}
- return RunMount(
- *mountOptions.filer,
- *mountOptions.filerMountRootPath,
- *mountOptions.dir,
- *mountOptions.collection,
- *mountOptions.replication,
- *mountOptions.dataCenter,
- *mountOptions.chunkSizeLimitMB,
- *mountOptions.allowOthers,
- *mountOptions.ttlSec,
- *mountOptions.dirListCacheLimit,
- os.FileMode(umask),
- )
+ if len(args)>0 {
+ return false
+ }
+
+ return RunMount(&mountOptions, os.FileMode(umask))
}
-func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCenter string, chunkSizeLimitMB int,
- allowOthers bool, ttlSec int, dirListCacheLimit int64, umask os.FileMode) bool {
+func RunMount(option *MountOptions, umask os.FileMode) bool {
+
+ filer := *option.filer
+ // parse filer grpc address
+ filerGrpcAddress, err := pb.ParseFilerGrpcAddress(filer)
+ if err != nil {
+ glog.V(0).Infof("ParseFilerGrpcAddress: %v", err)
+ return true
+ }
+
+ // try to connect to filer, filerBucketsPath may be useful later
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+ var cipher bool
+ err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get filer grpc address %s configuration: %v", filerGrpcAddress, err)
+ }
+ cipher = resp.Cipher
+ return nil
+ })
+ if err != nil {
+ glog.Infof("failed to talk to filer %s: %v", filerGrpcAddress, err)
+ return true
+ }
+
+ filerMountRootPath := *option.filerMountRootPath
+ dir := *option.dir
+ chunkSizeLimitMB := *mountOptions.chunkSizeLimitMB
util.LoadConfiguration("security", false)
- fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH)
+ fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
if dir == "" {
fmt.Printf("Please specify the mount directory via \"-dir\"")
return false
@@ -90,7 +111,7 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente
// Ensure target mount point availability
if isValid := checkMountPointAvailable(dir); !isValid {
glog.Fatalf("Expected mount to still be active, target mount point: %s, please check!", dir)
- return false
+ return true
}
mountName := path.Base(dir)
@@ -99,7 +120,7 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente
fuse.VolumeName(mountName),
fuse.FSName(filer + ":" + filerMountRootPath),
fuse.Subtype("seaweedfs"),
- fuse.NoAppleDouble(),
+ // fuse.NoAppleDouble(), // include .DS_Store, otherwise can not delete non-empty folders
fuse.NoAppleXattr(),
fuse.NoBrowse(),
fuse.AutoXattr(),
@@ -110,68 +131,66 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente
fuse.MaxReadahead(1024 * 128),
fuse.AsyncRead(),
fuse.WritebackCache(),
- fuse.AllowNonEmptyMount(),
}
options = append(options, osSpecificMountOptions()...)
-
- if allowOthers {
+ if *option.allowOthers {
options = append(options, fuse.AllowOther())
}
-
- c, err := fuse.Mount(dir, options...)
- if err != nil {
- glog.V(0).Infof("mount: %v", err)
- daemonize.SignalOutcome(err)
- return true
- }
-
- util.OnInterrupt(func() {
- fuse.Unmount(dir)
- c.Close()
- })
-
- filerGrpcAddress, err := parseFilerGrpcAddress(filer)
- if err != nil {
- glog.V(0).Infof("parseFilerGrpcAddress: %v", err)
- daemonize.SignalOutcome(err)
- return true
+ if *option.nonempty {
+ options = append(options, fuse.AllowNonEmptyMount())
}
+ // find mount point
mountRoot := filerMountRootPath
if mountRoot != "/" && strings.HasSuffix(mountRoot, "/") {
mountRoot = mountRoot[0 : len(mountRoot)-1]
}
- daemonize.SignalOutcome(nil)
+ seaweedFileSystem := filesys.NewSeaweedFileSystem(&filesys.Option{
+ FilerGrpcAddress: filerGrpcAddress,
+ GrpcDialOption: grpcDialOption,
+ FilerMountRootPath: mountRoot,
+ Collection: *option.collection,
+ Replication: *option.replication,
+ TtlSec: int32(*option.ttlSec),
+ ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024,
+ CacheDir: *option.cacheDir,
+ CacheSizeMB: *option.cacheSizeMB,
+ DataCenter: *option.dataCenter,
+ DirListCacheLimit: *option.dirListCacheLimit,
+ EntryCacheTtl: 3 * time.Second,
+ MountUid: uid,
+ MountGid: gid,
+ MountMode: mountMode,
+ MountCtime: fileInfo.ModTime(),
+ MountMtime: time.Now(),
+ Umask: umask,
+ OutsideContainerClusterMode: *mountOptions.outsideContainerClusterMode,
+ AsyncMetaDataCaching: *mountOptions.asyncMetaDataCaching,
+ Cipher: cipher,
+ })
- err = fs.Serve(c, filesys.NewSeaweedFileSystem(&filesys.Option{
- FilerGrpcAddress: filerGrpcAddress,
- GrpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.client"),
- FilerMountRootPath: mountRoot,
- Collection: collection,
- Replication: replication,
- TtlSec: int32(ttlSec),
- ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024,
- DataCenter: dataCenter,
- DirListCacheLimit: dirListCacheLimit,
- EntryCacheTtl: 3 * time.Second,
- MountUid: uid,
- MountGid: gid,
- MountMode: mountMode,
- MountCtime: fileInfo.ModTime(),
- MountMtime: time.Now(),
- Umask: umask,
- }))
+ // mount
+ c, err := fuse.Mount(dir, options...)
if err != nil {
- fuse.Unmount(dir)
+ glog.V(0).Infof("mount: %v", err)
+ return true
}
+ defer fuse.Unmount(dir)
+
+ grace.OnInterrupt(func() {
+ fuse.Unmount(dir)
+ c.Close()
+ })
+
+ glog.V(0).Infof("mounted %s%s to %s", filer, mountRoot, dir)
+ err = fs.Serve(c, seaweedFileSystem)
// check if the mount process has an error to report
<-c.Ready
if err := c.MountError; err != nil {
glog.V(0).Infof("mount process: %v", err)
- daemonize.SignalOutcome(err)
return true
}
diff --git a/weed/command/msg_broker.go b/weed/command/msg_broker.go
new file mode 100644
index 000000000..b4b5855ff
--- /dev/null
+++ b/weed/command/msg_broker.go
@@ -0,0 +1,114 @@
+package command
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "time"
+
+ "google.golang.org/grpc/reflection"
+
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/messaging/broker"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+var (
+ messageBrokerStandaloneOptions MessageBrokerOptions
+)
+
+type MessageBrokerOptions struct {
+ filer *string
+ ip *string
+ port *int
+ cpuprofile *string
+ memprofile *string
+}
+
+func init() {
+ cmdMsgBroker.Run = runMsgBroker // break init cycle
+ messageBrokerStandaloneOptions.filer = cmdMsgBroker.Flag.String("filer", "localhost:8888", "filer server address")
+ messageBrokerStandaloneOptions.ip = cmdMsgBroker.Flag.String("ip", util.DetectedHostAddress(), "broker host address")
+ messageBrokerStandaloneOptions.port = cmdMsgBroker.Flag.Int("port", 17777, "broker gRPC listen port")
+ messageBrokerStandaloneOptions.cpuprofile = cmdMsgBroker.Flag.String("cpuprofile", "", "cpu profile output file")
+ messageBrokerStandaloneOptions.memprofile = cmdMsgBroker.Flag.String("memprofile", "", "memory profile output file")
+}
+
+var cmdMsgBroker = &Command{
+ UsageLine: "msgBroker [-port=17777] [-filer=]",
+ Short: "start a message queue broker",
+ Long: `start a message queue broker
+
+ The broker can accept gRPC calls to write or read messages. The messages are stored via filer.
+ The brokers are stateless. To scale up, just add more brokers.
+
+`,
+}
+
+func runMsgBroker(cmd *Command, args []string) bool {
+
+ util.LoadConfiguration("security", false)
+
+ return messageBrokerStandaloneOptions.startQueueServer()
+
+}
+
+func (msgBrokerOpt *MessageBrokerOptions) startQueueServer() bool {
+
+ grace.SetupProfiling(*messageBrokerStandaloneOptions.cpuprofile, *messageBrokerStandaloneOptions.memprofile)
+
+ filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*msgBrokerOpt.filer)
+ if err != nil {
+ glog.Fatal(err)
+ return false
+ }
+
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.msg_broker")
+ cipher := false
+
+ for {
+ err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
+ }
+ cipher = resp.Cipher
+ return nil
+ })
+ if err != nil {
+ glog.V(0).Infof("wait to connect to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
+ time.Sleep(time.Second)
+ } else {
+ glog.V(0).Infof("connected to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
+ break
+ }
+ }
+
+ qs, err := broker.NewMessageBroker(&broker.MessageBrokerOption{
+ Filers: []string{*msgBrokerOpt.filer},
+ DefaultReplication: "",
+ MaxMB: 0,
+ Ip: *msgBrokerOpt.ip,
+ Port: *msgBrokerOpt.port,
+ Cipher: cipher,
+ }, grpcDialOption)
+
+ // start grpc listener
+ grpcL, err := util.NewListener(":"+strconv.Itoa(*msgBrokerOpt.port), 0)
+ if err != nil {
+ glog.Fatalf("failed to listen on grpc port %d: %v", *msgBrokerOpt.port, err)
+ }
+ grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.msg_broker"))
+ messaging_pb.RegisterSeaweedMessagingServer(grpcS, qs)
+ reflection.Register(grpcS)
+ grpcS.Serve(grpcL)
+
+ return true
+
+}
diff --git a/weed/command/s3.go b/weed/command/s3.go
index 10a486657..7ebd4fab0 100644
--- a/weed/command/s3.go
+++ b/weed/command/s3.go
@@ -1,10 +1,13 @@
package command
import (
+ "context"
"fmt"
"net/http"
"time"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/gorilla/mux"
@@ -19,29 +22,89 @@ var (
)
type S3Options struct {
- filer *string
- filerBucketsPath *string
- port *int
- domainName *string
- tlsPrivateKey *string
- tlsCertificate *string
+ filer *string
+ port *int
+ config *string
+ domainName *string
+ tlsPrivateKey *string
+ tlsCertificate *string
}
func init() {
cmdS3.Run = runS3 // break init cycle
s3StandaloneOptions.filer = cmdS3.Flag.String("filer", "localhost:8888", "filer server address")
- s3StandaloneOptions.filerBucketsPath = cmdS3.Flag.String("filer.dir.buckets", "/buckets", "folder on filer to store all buckets")
s3StandaloneOptions.port = cmdS3.Flag.Int("port", 8333, "s3 server http listen port")
s3StandaloneOptions.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name, {bucket}.{domainName}")
+ s3StandaloneOptions.config = cmdS3.Flag.String("config", "", "path to the config file")
s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file")
s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file")
}
var cmdS3 = &Command{
- UsageLine: "s3 -port=8333 -filer=",
+ UsageLine: "s3 [-port=8333] [-filer=] [-config=]",
Short: "start a s3 API compatible server that is backed by a filer",
Long: `start a s3 API compatible server that is backed by a filer.
+ By default, you can use any access key and secret key to access the S3 APIs.
+ To enable credential based access, create a config.json file similar to this:
+
+{
+ "identities": [
+ {
+ "name": "some_name",
+ "credentials": [
+ {
+ "accessKey": "some_access_key1",
+ "secretKey": "some_secret_key1"
+ }
+ ],
+ "actions": [
+ "Admin",
+ "Read",
+ "Write"
+ ]
+ },
+ {
+ "name": "some_read_only_user",
+ "credentials": [
+ {
+ "accessKey": "some_access_key2",
+ "secretKey": "some_secret_key2"
+ }
+ ],
+ "actions": [
+ "Read"
+ ]
+ },
+ {
+ "name": "some_normal_user",
+ "credentials": [
+ {
+ "accessKey": "some_access_key3",
+ "secretKey": "some_secret_key3"
+ }
+ ],
+ "actions": [
+ "Read",
+ "Write"
+ ]
+ },
+ {
+ "name": "user_limited_to_bucket1",
+ "credentials": [
+ {
+ "accessKey": "some_access_key4",
+ "secretKey": "some_secret_key4"
+ }
+ ],
+ "actions": [
+ "Read:bucket1",
+ "Write:bucket1"
+ ]
+ }
+ ]
+}
+
`,
}
@@ -55,20 +118,44 @@ func runS3(cmd *Command, args []string) bool {
func (s3opt *S3Options) startS3Server() bool {
- filerGrpcAddress, err := parseFilerGrpcAddress(*s3opt.filer)
+ filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*s3opt.filer)
if err != nil {
glog.Fatal(err)
return false
}
+ filerBucketsPath := "/buckets"
+
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+
+ for {
+ err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
+ }
+ filerBucketsPath = resp.DirBuckets
+ glog.V(0).Infof("S3 read filer buckets dir: %s", filerBucketsPath)
+ return nil
+ })
+ if err != nil {
+ glog.V(0).Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
+ time.Sleep(time.Second)
+ } else {
+ glog.V(0).Infof("connected to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
+ break
+ }
+ }
+
router := mux.NewRouter().SkipClean(true)
_, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{
Filer: *s3opt.filer,
FilerGrpcAddress: filerGrpcAddress,
+ Config: *s3opt.config,
DomainName: *s3opt.domainName,
- BucketsPath: *s3opt.filerBucketsPath,
- GrpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.client"),
+ BucketsPath: filerBucketsPath,
+ GrpcDialOption: grpcDialOption,
})
if s3ApiServer_err != nil {
glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)
@@ -83,12 +170,12 @@ func (s3opt *S3Options) startS3Server() bool {
}
if *s3opt.tlsPrivateKey != "" {
- glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.VERSION, *s3opt.port)
+ glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.port)
if err = httpS.ServeTLS(s3ApiListener, *s3opt.tlsCertificate, *s3opt.tlsPrivateKey); err != nil {
glog.Fatalf("S3 API Server Fail to serve: %v", err)
}
} else {
- glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.VERSION, *s3opt.port)
+ glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.Version(), *s3opt.port)
if err = httpS.Serve(s3ApiListener); err != nil {
glog.Fatalf("S3 API Server Fail to serve: %v", err)
}
diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go
index ab658735f..b199f2d2d 100644
--- a/weed/command/scaffold.go
+++ b/weed/command/scaffold.go
@@ -18,7 +18,7 @@ var cmdScaffold = &Command{
For example, the filer.toml mysql password can be overwritten by environment variable
export WEED_MYSQL_PASSWORD=some_password
Environment variable rules:
- * Prefix fix with "WEED_"
+ * Prefix the variable name with "WEED_"
* Upppercase the reset of variable name.
* Replace '.' with '_'
@@ -74,7 +74,12 @@ const (
# with http DELETE, by default the filer would check whether a folder is empty.
# recursive_delete will delete all sub folders and files, similar to "rm -Rf"
recursive_delete = false
-
+# directories under this folder will be automatically creating a separate bucket
+buckets_folder = "/buckets"
+buckets_fsync = [ # a list of buckets with all write requests fsync=true
+ "important_bucket",
+ "should_always_fsync",
+]
####################################################
# The following are filer store options
@@ -136,13 +141,13 @@ hosts=[
"localhost:9042",
]
-[redis]
+[redis2]
enabled = false
address = "localhost:6379"
password = ""
database = 0
-[redis_cluster]
+[redis_cluster2]
enabled = false
addresses = [
"localhost:30001",
@@ -163,11 +168,11 @@ enabled = false
servers = "localhost:2379"
timeout = "3s"
-[tikv]
+[mongodb]
enabled = false
-pdAddress = "192.168.199.113:2379"
-
-
+uri = "mongodb://localhost:27017"
+option_pool_size = 0
+database = "seaweedfs"
`
NOTIFICATION_TOML_EXAMPLE = `
@@ -262,6 +267,7 @@ aws_secret_access_key = "" # if empty, loads from the shared credentials fil
region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket
directory = "/" # destination directory
+endpoint = ""
[sink.google_cloud_storage]
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
@@ -323,6 +329,10 @@ key = ""
cert = ""
key = ""
+[grpc.msg_broker]
+cert = ""
+key = ""
+
# use this for any place needs a grpc client
# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
[grpc.client]
@@ -352,15 +362,19 @@ key = ""
[master.maintenance]
# periodically run these scripts are the same as running them from 'weed shell'
scripts = """
+ lock
ec.encode -fullPercent=95 -quietFor=1h
ec.rebuild -force
ec.balance -force
volume.balance -force
+ volume.fix.replication
+ unlock
"""
sleep_minutes = 17 # sleep minutes between each script execution
[master.filer]
-default_filer_url = "http://localhost:8888/"
+default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands
+
[master.sequencer]
type = "memory" # Choose [memory|etcd] type for storing the file id sequence
@@ -378,13 +392,27 @@ sequencer_etcd_urls = "http://127.0.0.1:2379"
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket
+ endpoint = ""
# create this number of logical volumes if no more writable volumes
+# count_x means how many copies of data.
+# e.g.:
+# 000 has only one copy, copy_1
+# 010 and 001 has two copies, copy_2
+# 011 has only 3 copies, copy_3
[master.volume_growth]
-count_1 = 7 # create 1 x 7 = 7 actual volumes
-count_2 = 6 # create 2 x 6 = 12 actual volumes
-count_3 = 3 # create 3 x 3 = 9 actual volumes
-count_other = 1 # create n x 1 = n actual volumes
+copy_1 = 7 # create 1 x 7 = 7 actual volumes
+copy_2 = 6 # create 2 x 6 = 12 actual volumes
+copy_3 = 3 # create 3 x 3 = 9 actual volumes
+copy_other = 1 # create n x 1 = n actual volumes
+
+# configuration flags for replication
+[master.replication]
+# any replication counts should be considered minimums. If you specify 010 and
+# have 3 different racks, that's still considered writable. Writes will still
+# try to replicate to all available volumes. You should only use this option
+# if you are doing your own replication or periodic sync of volumes.
+treat_replication_as_minimums = false
`
)
diff --git a/weed/command/server.go b/weed/command/server.go
index 6aa68b6d2..c006f00eb 100644
--- a/weed/command/server.go
+++ b/weed/command/server.go
@@ -18,10 +18,11 @@ type ServerOptions struct {
}
var (
- serverOptions ServerOptions
- masterOptions MasterOptions
- filerOptions FilerOptions
- s3Options S3Options
+ serverOptions ServerOptions
+ masterOptions MasterOptions
+ filerOptions FilerOptions
+ s3Options S3Options
+ msgBrokerOptions MessageBrokerOptions
)
func init() {
@@ -45,7 +46,7 @@ var cmdServer = &Command{
}
var (
- serverIp = cmdServer.Flag.String("ip", "localhost", "ip or server name")
+ serverIp = cmdServer.Flag.String("ip", util.DetectedHostAddress(), "ip or server name")
serverBindIp = cmdServer.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
serverTimeout = cmdServer.Flag.Int("idleTimeout", 30, "connection idle seconds")
serverDataCenter = cmdServer.Flag.String("dataCenter", "", "current volume server's data center name")
@@ -53,10 +54,11 @@ var (
serverWhiteListOption = cmdServer.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.")
serverDisableHttp = cmdServer.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.")
volumeDataFolders = cmdServer.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...")
- volumeMaxDataVolumeCounts = cmdServer.Flag.String("volume.max", "7", "maximum numbers of volumes, count[,count]...")
+ volumeMaxDataVolumeCounts = cmdServer.Flag.String("volume.max", "7", "maximum numbers of volumes, count[,count]... If set to zero on non-windows OS, the limit will be auto configured.")
pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer")
isStartingS3 = cmdServer.Flag.Bool("s3", false, "whether to start S3 gateway")
+ isStartingMsgBroker = cmdServer.Flag.Bool("msgBroker", false, "whether to start message broker")
serverWhiteList []string
)
@@ -78,10 +80,10 @@ func init() {
filerOptions.port = cmdServer.Flag.Int("filer.port", 8888, "filer server http listen port")
filerOptions.publicPort = cmdServer.Flag.Int("filer.port.public", 0, "filer server public http listen port")
filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "Default replication type if not specified during runtime.")
- filerOptions.redirectOnRead = cmdServer.Flag.Bool("filer.redirectOnRead", false, "whether proxy or redirect to volume server during file GET request")
filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing")
filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 32, "split files larger than the limit")
filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size")
+ filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers")
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port")
@@ -92,11 +94,13 @@ func init() {
serverOptions.v.fileSizeLimitMB = cmdServer.Flag.Int("volume.fileSizeLimitMB", 256, "limit file size to avoid out of memory")
serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address")
- s3Options.filerBucketsPath = cmdServer.Flag.String("s3.filer.dir.buckets", "/buckets", "folder on filer to store all buckets")
s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port")
s3Options.domainName = cmdServer.Flag.String("s3.domainName", "", "suffix of the host name, {bucket}.{domainName}")
s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file")
s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
+ s3Options.config = cmdServer.Flag.String("s3.config", "", "path to the config file")
+
+ msgBrokerOptions.port = cmdServer.Flag.Int("msgBroker.port", 17777, "broker gRPC listen port")
}
@@ -114,11 +118,10 @@ func runServer(cmd *Command, args []string) bool {
defer pprof.StopCPUProfile()
}
- if *filerOptions.redirectOnRead {
+ if *isStartingS3 {
*isStartingFiler = true
}
-
- if *isStartingS3 {
+ if *isStartingMsgBroker {
*isStartingFiler = true
}
@@ -129,13 +132,15 @@ func runServer(cmd *Command, args []string) bool {
masterOptions.ip = serverIp
masterOptions.ipBind = serverBindIp
filerOptions.masters = &peers
- filerOptions.ip = serverBindIp
+ filerOptions.ip = serverIp
+ filerOptions.bindIp = serverBindIp
serverOptions.v.ip = serverIp
serverOptions.v.bindIp = serverBindIp
serverOptions.v.masters = &peers
serverOptions.v.idleConnectionTimeout = serverTimeout
serverOptions.v.dataCenter = serverDataCenter
serverOptions.v.rack = serverRack
+ msgBrokerOptions.ip = serverIp
serverOptions.v.pulseSeconds = pulseSeconds
masterOptions.pulseSeconds = pulseSeconds
@@ -148,6 +153,7 @@ func runServer(cmd *Command, args []string) bool {
filerAddress := fmt.Sprintf("%s:%d", *serverIp, *filerOptions.port)
s3Options.filer = &filerAddress
+ msgBrokerOptions.filer = &filerAddress
if *filerOptions.defaultReplicaPlacement == "" {
*filerOptions.defaultReplicaPlacement = *masterOptions.defaultReplication
@@ -191,6 +197,13 @@ func runServer(cmd *Command, args []string) bool {
}()
}
+ if *isStartingMsgBroker {
+ go func() {
+ time.Sleep(2 * time.Second)
+ msgBrokerOptions.startQueueServer()
+ }()
+ }
+
// start volume server
{
go serverOptions.v.startVolumeServer(*volumeDataFolders, *volumeMaxDataVolumeCounts, *serverWhiteListOption)
diff --git a/weed/command/shell.go b/weed/command/shell.go
index dcf70608f..6dd768f47 100644
--- a/weed/command/shell.go
+++ b/weed/command/shell.go
@@ -9,14 +9,14 @@ import (
)
var (
- shellOptions shell.ShellOptions
- shellInitialFilerUrl *string
+ shellOptions shell.ShellOptions
+ shellInitialFiler *string
)
func init() {
cmdShell.Run = runShell // break init cycle
shellOptions.Masters = cmdShell.Flag.String("master", "localhost:9333", "comma-separated master servers")
- shellInitialFilerUrl = cmdShell.Flag.String("filer.url", "http://localhost:8888/", "initial filer url")
+ shellInitialFiler = cmdShell.Flag.String("filer", "localhost:8888", "filer host and port")
}
var cmdShell = &Command{
@@ -32,12 +32,13 @@ func runShell(command *Command, args []string) bool {
util.LoadConfiguration("security", false)
shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
- var filerPwdErr error
- shellOptions.FilerHost, shellOptions.FilerPort, shellOptions.Directory, filerPwdErr = util.ParseFilerUrl(*shellInitialFilerUrl)
- if filerPwdErr != nil {
- fmt.Printf("failed to parse url filer.url=%s : %v\n", *shellInitialFilerUrl, filerPwdErr)
+ var err error
+ shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(*shellInitialFiler)
+ if err != nil {
+ fmt.Printf("failed to parse filer %s: %v\n", *shellInitialFiler, err)
return false
}
+ shellOptions.Directory = "/"
shell.RunShell(shellOptions)
diff --git a/weed/command/upload.go b/weed/command/upload.go
index d71046131..358897aee 100644
--- a/weed/command/upload.go
+++ b/weed/command/upload.go
@@ -16,14 +16,15 @@ var (
)
type UploadOptions struct {
- master *string
- dir *string
- include *string
- replication *string
- collection *string
- dataCenter *string
- ttl *string
- maxMB *int
+ master *string
+ dir *string
+ include *string
+ replication *string
+ collection *string
+ dataCenter *string
+ ttl *string
+ maxMB *int
+ usePublicUrl *bool
}
func init() {
@@ -37,6 +38,7 @@ func init() {
upload.dataCenter = cmdUpload.Flag.String("dataCenter", "", "optional data center name")
upload.ttl = cmdUpload.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
upload.maxMB = cmdUpload.Flag.Int("maxMB", 32, "split files larger than the limit")
+ upload.usePublicUrl = cmdUpload.Flag.Bool("usePublicUrl", false, "upload to public url from volume server")
}
var cmdUpload = &Command{
@@ -79,9 +81,7 @@ func runUpload(cmd *Command, args []string) bool {
if e != nil {
return e
}
- results, e := operation.SubmitFiles(*upload.master, grpcDialOption, parts,
- *upload.replication, *upload.collection, *upload.dataCenter,
- *upload.ttl, *upload.maxMB)
+ results, e := operation.SubmitFiles(*upload.master, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.maxMB, *upload.usePublicUrl)
bytes, _ := json.Marshal(results)
fmt.Println(string(bytes))
if e != nil {
@@ -98,9 +98,7 @@ func runUpload(cmd *Command, args []string) bool {
if e != nil {
fmt.Println(e.Error())
}
- results, _ := operation.SubmitFiles(*upload.master, grpcDialOption, parts,
- *upload.replication, *upload.collection, *upload.dataCenter,
- *upload.ttl, *upload.maxMB)
+ results, _ := operation.SubmitFiles(*upload.master, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.maxMB, *upload.usePublicUrl)
bytes, _ := json.Marshal(results)
fmt.Println(string(bytes))
}
diff --git a/weed/command/version.go b/weed/command/version.go
index 8fdd68ec8..9caf7dc4e 100644
--- a/weed/command/version.go
+++ b/weed/command/version.go
@@ -19,6 +19,6 @@ func runVersion(cmd *Command, args []string) bool {
cmd.Usage()
}
- fmt.Printf("version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH)
+ fmt.Printf("version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
return true
}
diff --git a/weed/command/volume.go b/weed/command/volume.go
index 9d665d143..f942ec50b 100644
--- a/weed/command/volume.go
+++ b/weed/command/volume.go
@@ -10,9 +10,11 @@ import (
"strings"
"time"
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
"github.com/spf13/viper"
"google.golang.org/grpc"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util/httpdown"
@@ -56,7 +58,7 @@ func init() {
cmdVolume.Run = runVolume // break init cycle
v.port = cmdVolume.Flag.Int("port", 8080, "http listen port")
v.publicPort = cmdVolume.Flag.Int("port.public", 0, "port opened to public")
- v.ip = cmdVolume.Flag.String("ip", "", "ip or server name")
+ v.ip = cmdVolume.Flag.String("ip", util.DetectedHostAddress(), "ip or server name")
v.publicUrl = cmdVolume.Flag.String("publicUrl", "", "Publicly accessible address")
v.bindIp = cmdVolume.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
v.masters = cmdVolume.Flag.String("mserver", "localhost:9333", "comma-separated master servers")
@@ -83,7 +85,7 @@ var cmdVolume = &Command{
var (
volumeFolders = cmdVolume.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...")
- maxVolumeCounts = cmdVolume.Flag.String("max", "7", "maximum numbers of volumes, count[,count]...")
+ maxVolumeCounts = cmdVolume.Flag.String("max", "7", "maximum numbers of volumes, count[,count]... If set to zero on non-windows OS, the limit will be auto configured.")
volumeWhiteListOption = cmdVolume.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.")
)
@@ -92,7 +94,7 @@ func runVolume(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
runtime.GOMAXPROCS(runtime.NumCPU())
- util.SetupProfiling(*v.cpuProfile, *v.memProfile)
+ grace.SetupProfiling(*v.cpuProfile, *v.memProfile)
v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption)
@@ -126,7 +128,8 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
}
if *v.ip == "" {
- *v.ip = "127.0.0.1"
+ *v.ip = util.DetectedHostAddress()
+ glog.V(0).Infof("detected volume server ip address: %v", *v.ip)
}
if *v.publicPort == 0 {
@@ -181,7 +184,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
clusterHttpServer := v.startClusterHttpService(volumeMux)
stopChain := make(chan struct{})
- util.OnInterrupt(func() {
+ grace.OnInterrupt(func() {
fmt.Println("volume server has be killed")
var startTime time.Time
@@ -234,7 +237,7 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe
if err != nil {
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
}
- grpcS := util.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.volume"))
+ grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.volume"))
volume_server_pb.RegisterVolumeServerServer(grpcS, vs)
reflection.Register(grpcS)
go func() {
@@ -247,7 +250,7 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe
func (v VolumeServerOptions) startPublicHttpService(handler http.Handler) httpdown.Server {
publicListeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.publicPort)
- glog.V(0).Infoln("Start Seaweed volume server", util.VERSION, "public at", publicListeningAddress)
+ glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "public at", publicListeningAddress)
publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
if e != nil {
glog.Fatalf("Volume server listener error:%v", e)
@@ -274,7 +277,7 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd
}
listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port)
- glog.V(0).Infof("Start Seaweed volume server %s at %s", util.VERSION, listeningAddress)
+ glog.V(0).Infof("Start Seaweed volume server %s at %s", util.Version(), listeningAddress)
listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
if e != nil {
glog.Fatalf("Volume server listener error:%v", e)
diff --git a/weed/command/watch.go b/weed/command/watch.go
new file mode 100644
index 000000000..b46707a62
--- /dev/null
+++ b/weed/command/watch.go
@@ -0,0 +1,65 @@
+package command
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func init() {
+ cmdWatch.Run = runWatch // break init cycle
+}
+
+var cmdWatch = &Command{
+ UsageLine: "watch [-filer=localhost:8888] [-target=/]",
+ Short: "see recent changes on a filer",
+ Long: `See recent changes on a filer.
+
+ `,
+}
+
+var (
+ watchFiler = cmdWatch.Flag.String("filer", "localhost:8888", "filer hostname:port")
+ watchTarget = cmdWatch.Flag.String("pathPrefix", "/", "path to a folder or file, or common prefix for the folders or files on filer")
+ watchStart = cmdWatch.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
+)
+
+func runWatch(cmd *Command, args []string) bool {
+
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+
+ watchErr := pb.WithFilerClient(*watchFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+
+ stream, err := client.SubscribeMetadata(context.Background(), &filer_pb.SubscribeMetadataRequest{
+ ClientName: "watch",
+ PathPrefix: *watchTarget,
+ SinceNs: time.Now().Add(-*watchStart).UnixNano(),
+ })
+ if err != nil {
+ return fmt.Errorf("listen: %v", err)
+ }
+
+ for {
+ resp, listenErr := stream.Recv()
+ if listenErr == io.EOF {
+ return nil
+ }
+ if listenErr != nil {
+ return listenErr
+ }
+ fmt.Printf("events: %+v\n", resp.EventNotification)
+ }
+
+ })
+ if watchErr != nil {
+ fmt.Printf("watch %s: %v\n", *watchFiler, watchErr)
+ }
+
+ return true
+}
diff --git a/weed/command/webdav.go b/weed/command/webdav.go
index 0e6f89040..b9676c909 100644
--- a/weed/command/webdav.go
+++ b/weed/command/webdav.go
@@ -1,13 +1,17 @@
package command
import (
+ "context"
"fmt"
"net/http"
+ "os"
"os/user"
"strconv"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/server"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -23,6 +27,8 @@ type WebDavOption struct {
collection *string
tlsPrivateKey *string
tlsCertificate *string
+ cacheDir *string
+ cacheSizeMB *int64
}
func init() {
@@ -32,11 +38,13 @@ func init() {
webDavStandaloneOptions.collection = cmdWebDav.Flag.String("collection", "", "collection to create the files")
webDavStandaloneOptions.tlsPrivateKey = cmdWebDav.Flag.String("key.file", "", "path to the TLS private key file")
webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file")
+ webDavStandaloneOptions.cacheDir = cmdWebDav.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks")
+ webDavStandaloneOptions.cacheSizeMB = cmdWebDav.Flag.Int64("cacheCapacityMB", 1000, "local cache capacity in MB")
}
var cmdWebDav = &Command{
UsageLine: "webdav -port=7333 -filer=",
- Short: " start a webdav server that is backed by a filer",
+ Short: "start a webdav server that is backed by a filer",
Long: `start a webdav server that is backed by a filer.
`,
@@ -46,7 +54,7 @@ func runWebDav(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
- glog.V(0).Infof("Starting Seaweed WebDav Server %s at https port %d", util.VERSION, *webDavStandaloneOptions.port)
+ glog.V(0).Infof("Starting Seaweed WebDav Server %s at https port %d", util.Version(), *webDavStandaloneOptions.port)
return webDavStandaloneOptions.startWebDav()
@@ -54,12 +62,6 @@ func runWebDav(cmd *Command, args []string) bool {
func (wo *WebDavOption) startWebDav() bool {
- filerGrpcAddress, err := parseFilerGrpcAddress(*wo.filer)
- if err != nil {
- glog.Fatal(err)
- return false
- }
-
// detect current user
uid, gid := uint32(0), uint32(0)
if u, err := user.Current(); err == nil {
@@ -71,13 +73,45 @@ func (wo *WebDavOption) startWebDav() bool {
}
}
+ // parse filer grpc address
+ filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*wo.filer)
+ if err != nil {
+ glog.Fatal(err)
+ return false
+ }
+
+ grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+
+ var cipher bool
+ // connect to filer
+ for {
+ err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
+ }
+ cipher = resp.Cipher
+ return nil
+ })
+ if err != nil {
+ glog.V(0).Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
+ time.Sleep(time.Second)
+ } else {
+ glog.V(0).Infof("connected to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
+ break
+ }
+ }
+
ws, webdavServer_err := weed_server.NewWebDavServer(&weed_server.WebDavOption{
Filer: *wo.filer,
FilerGrpcAddress: filerGrpcAddress,
- GrpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.client"),
+ GrpcDialOption: grpcDialOption,
Collection: *wo.collection,
Uid: uid,
Gid: gid,
+ Cipher: cipher,
+ CacheDir: *wo.cacheDir,
+ CacheSizeMB: *wo.cacheSizeMB,
})
if webdavServer_err != nil {
glog.Fatalf("WebDav Server startup error: %v", webdavServer_err)
@@ -92,12 +126,12 @@ func (wo *WebDavOption) startWebDav() bool {
}
if *wo.tlsPrivateKey != "" {
- glog.V(0).Infof("Start Seaweed WebDav Server %s at https port %d", util.VERSION, *wo.port)
+ glog.V(0).Infof("Start Seaweed WebDav Server %s at https port %d", util.Version(), *wo.port)
if err = httpS.ServeTLS(webDavListener, *wo.tlsCertificate, *wo.tlsPrivateKey); err != nil {
glog.Fatalf("WebDav Server Fail to serve: %v", err)
}
} else {
- glog.V(0).Infof("Start Seaweed WebDav Server %s at http port %d", util.VERSION, *wo.port)
+ glog.V(0).Infof("Start Seaweed WebDav Server %s at http port %d", util.Version(), *wo.port)
if err = httpS.Serve(webDavListener); err != nil {
glog.Fatalf("WebDav Server Fail to serve: %v", err)
}
diff --git a/weed/filer2/abstract_sql/abstract_sql_store.go b/weed/filer2/abstract_sql/abstract_sql_store.go
index 47fe507a1..5ade18960 100644
--- a/weed/filer2/abstract_sql/abstract_sql_store.go
+++ b/weed/filer2/abstract_sql/abstract_sql_store.go
@@ -7,6 +7,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -98,13 +99,13 @@ func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.En
return nil
}
-func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (*filer2.Entry, error) {
+func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath util.FullPath) (*filer2.Entry, error) {
dir, name := fullpath.DirAndName()
row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, util.HashStringToLong(dir), name, dir)
var data []byte
if err := row.Scan(&data); err != nil {
- return nil, filer2.ErrNotFound
+ return nil, filer_pb.ErrNotFound
}
entry := &filer2.Entry{
@@ -117,7 +118,7 @@ func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath filer2.Fu
return entry, nil
}
-func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) error {
+func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error {
dir, name := fullpath.DirAndName()
@@ -134,7 +135,7 @@ func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath filer2.
return nil
}
-func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) error {
+func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error {
res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDeleteFolderChildren, util.HashStringToLong(string(fullpath)), fullpath)
if err != nil {
@@ -149,7 +150,7 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat
return nil
}
-func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
+func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
sqlText := store.SqlListExclusive
if inclusive {
@@ -171,7 +172,7 @@ func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpat
}
entry := &filer2.Entry{
- FullPath: filer2.NewFullPath(string(fullpath), name),
+ FullPath: util.NewFullPath(string(fullpath), name),
}
if err = entry.DecodeAttributesAndChunks(data); err != nil {
glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err)
@@ -183,3 +184,7 @@ func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpat
return entries, nil
}
+
+func (store *AbstractSqlStore) Shutdown() {
+ store.DB.Close()
+}
diff --git a/weed/filer2/cassandra/cassandra_store.go b/weed/filer2/cassandra/cassandra_store.go
index f81ef946f..5dd7d8036 100644
--- a/weed/filer2/cassandra/cassandra_store.go
+++ b/weed/filer2/cassandra/cassandra_store.go
@@ -3,10 +3,13 @@ package cassandra
import (
"context"
"fmt"
+
+ "github.com/gocql/gocql"
+
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/gocql/gocql"
)
func init() {
@@ -72,7 +75,7 @@ func (store *CassandraStore) UpdateEntry(ctx context.Context, entry *filer2.Entr
return store.InsertEntry(ctx, entry)
}
-func (store *CassandraStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
+func (store *CassandraStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
dir, name := fullpath.DirAndName()
var data []byte
@@ -80,12 +83,12 @@ func (store *CassandraStore) FindEntry(ctx context.Context, fullpath filer2.Full
"SELECT meta FROM filemeta WHERE directory=? AND name=?",
dir, name).Consistency(gocql.One).Scan(&data); err != nil {
if err != gocql.ErrNotFound {
- return nil, filer2.ErrNotFound
+ return nil, filer_pb.ErrNotFound
}
}
if len(data) == 0 {
- return nil, filer2.ErrNotFound
+ return nil, filer_pb.ErrNotFound
}
entry = &filer2.Entry{
@@ -99,7 +102,7 @@ func (store *CassandraStore) FindEntry(ctx context.Context, fullpath filer2.Full
return entry, nil
}
-func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) error {
+func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error {
dir, name := fullpath.DirAndName()
@@ -112,7 +115,7 @@ func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath filer2.Fu
return nil
}
-func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) error {
+func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error {
if err := store.session.Query(
"DELETE FROM filemeta WHERE directory=?",
@@ -123,7 +126,7 @@ func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath
return nil
}
-func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool,
+func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer2.Entry, err error) {
cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?"
@@ -136,7 +139,7 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath
iter := store.session.Query(cqlStr, string(fullpath), startFileName, limit).Iter()
for iter.Scan(&name, &data) {
entry := &filer2.Entry{
- FullPath: filer2.NewFullPath(string(fullpath), name),
+ FullPath: util.NewFullPath(string(fullpath), name),
}
if decodeErr := entry.DecodeAttributesAndChunks(data); decodeErr != nil {
err = decodeErr
@@ -151,3 +154,7 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath
return entries, err
}
+
+func (store *CassandraStore) Shutdown() {
+ store.session.Close()
+}
diff --git a/weed/filer2/entry.go b/weed/filer2/entry.go
index c901927bb..00b9b132d 100644
--- a/weed/filer2/entry.go
+++ b/weed/filer2/entry.go
@@ -5,6 +5,7 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
type Attr struct {
@@ -20,6 +21,7 @@ type Attr struct {
UserName string
GroupNames []string
SymlinkTarget string
+ Md5 []byte
}
func (attr Attr) IsDirectory() bool {
@@ -27,7 +29,7 @@ func (attr Attr) IsDirectory() bool {
}
type Entry struct {
- FullPath
+ util.FullPath
Attr
Extended map[string][]byte
@@ -71,3 +73,11 @@ func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry {
Entry: entry.ToProtoEntry(),
}
}
+
+func FromPbEntry(dir string, entry *filer_pb.Entry) *Entry {
+ return &Entry{
+ FullPath: util.NewFullPath(dir, entry.Name),
+ Attr: PbToEntryAttribute(entry.Attributes),
+ Chunks: entry.Chunks,
+ }
+}
diff --git a/weed/filer2/entry_codec.go b/weed/filer2/entry_codec.go
index 3a2dc6134..47c911011 100644
--- a/weed/filer2/entry_codec.go
+++ b/weed/filer2/entry_codec.go
@@ -52,6 +52,7 @@ func EntryAttributeToPb(entry *Entry) *filer_pb.FuseAttributes {
UserName: entry.Attr.UserName,
GroupName: entry.Attr.GroupNames,
SymlinkTarget: entry.Attr.SymlinkTarget,
+ Md5: entry.Attr.Md5,
}
}
@@ -71,6 +72,7 @@ func PbToEntryAttribute(attr *filer_pb.FuseAttributes) Attr {
t.UserName = attr.UserName
t.GroupNames = attr.GroupName
t.SymlinkTarget = attr.SymlinkTarget
+ t.Md5 = attr.Md5
return t
}
@@ -93,6 +95,10 @@ func EqualEntry(a, b *Entry) bool {
return false
}
+ if !bytes.Equal(a.Md5, b.Md5) {
+ return false
+ }
+
for i := 0; i < len(a.Chunks); i++ {
if !proto.Equal(a.Chunks[i], b.Chunks[i]) {
return false
diff --git a/weed/filer2/etcd/etcd_store.go b/weed/filer2/etcd/etcd_store.go
index 0f0c01426..2ef65b4a0 100644
--- a/weed/filer2/etcd/etcd_store.go
+++ b/weed/filer2/etcd/etcd_store.go
@@ -6,10 +6,12 @@ import (
"strings"
"time"
+ "go.etcd.io/etcd/clientv3"
+
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util"
- "go.etcd.io/etcd/clientv3"
)
const (
@@ -90,7 +92,7 @@ func (store *EtcdStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (e
return store.InsertEntry(ctx, entry)
}
-func (store *EtcdStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
+func (store *EtcdStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer2.Entry, err error) {
key := genKey(fullpath.DirAndName())
resp, err := store.client.Get(ctx, string(key))
@@ -99,7 +101,7 @@ func (store *EtcdStore) FindEntry(ctx context.Context, fullpath filer2.FullPath)
}
if len(resp.Kvs) == 0 {
- return nil, filer2.ErrNotFound
+ return nil, filer_pb.ErrNotFound
}
entry = &filer2.Entry{
@@ -113,7 +115,7 @@ func (store *EtcdStore) FindEntry(ctx context.Context, fullpath filer2.FullPath)
return entry, nil
}
-func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) {
+func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) {
key := genKey(fullpath.DirAndName())
if _, err := store.client.Delete(ctx, string(key)); err != nil {
@@ -123,7 +125,7 @@ func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPat
return nil
}
-func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) {
+func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
if _, err := store.client.Delete(ctx, string(directoryPrefix), clientv3.WithPrefix()); err != nil {
@@ -134,7 +136,7 @@ func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath filer
}
func (store *EtcdStore) ListDirectoryEntries(
- ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int,
+ ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int,
) (entries []*filer2.Entry, err error) {
directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
@@ -157,7 +159,7 @@ func (store *EtcdStore) ListDirectoryEntries(
break
}
entry := &filer2.Entry{
- FullPath: filer2.NewFullPath(string(fullpath), fileName),
+ FullPath: weed_util.NewFullPath(string(fullpath), fileName),
}
if decodeErr := entry.DecodeAttributesAndChunks(kv.Value); decodeErr != nil {
err = decodeErr
@@ -177,7 +179,7 @@ func genKey(dirPath, fileName string) (key []byte) {
return key
}
-func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string) (keyPrefix []byte) {
+func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) {
keyPrefix = []byte(string(fullpath))
keyPrefix = append(keyPrefix, DIR_FILE_SEPARATOR)
if len(startFileName) > 0 {
@@ -194,3 +196,7 @@ func getNameFromKey(key []byte) string {
return string(key[sepIndex+1:])
}
+
+func (store *EtcdStore) Shutdown() {
+ store.client.Close()
+}
diff --git a/weed/filer2/filechunks.go b/weed/filer2/filechunks.go
index b5876df82..2ddfb3c30 100644
--- a/weed/filer2/filechunks.go
+++ b/weed/filer2/filechunks.go
@@ -3,6 +3,7 @@ package filer2
import (
"fmt"
"hash/fnv"
+ "math"
"sort"
"sync"
@@ -19,7 +20,21 @@ func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) {
return
}
-func ETag(chunks []*filer_pb.FileChunk) (etag string) {
+func ETag(entry *filer_pb.Entry) (etag string) {
+ if entry.Attributes == nil || entry.Attributes.Md5 == nil {
+ return ETagChunks(entry.Chunks)
+ }
+ return fmt.Sprintf("%x", entry.Attributes.Md5)
+}
+
+func ETagEntry(entry *Entry) (etag string) {
+ if entry.Attr.Md5 == nil {
+ return ETagChunks(entry.Chunks)
+ }
+ return fmt.Sprintf("%x", entry.Attr.Md5)
+}
+
+func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
if len(chunks) == 1 {
return chunks[0].ETag
}
@@ -70,10 +85,16 @@ type ChunkView struct {
Offset int64
Size uint64
LogicOffset int64
- IsFullChunk bool
+ ChunkSize uint64
+ CipherKey []byte
+ IsGzipped bool
}
-func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views []*ChunkView) {
+func (cv *ChunkView) IsFullChunk() bool {
+ return cv.Size == cv.ChunkSize
+}
+
+func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) {
visibles := NonOverlappingVisibleIntervals(chunks)
@@ -81,19 +102,27 @@ func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views
}
-func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int) (views []*ChunkView) {
+func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int64) (views []*ChunkView) {
- stop := offset + int64(size)
+ stop := offset + size
+ if size == math.MaxInt64 {
+ stop = math.MaxInt64
+ }
+ if stop < offset {
+ stop = math.MaxInt64
+ }
for _, chunk := range visibles {
+
if chunk.start <= offset && offset < chunk.stop && offset < stop {
- isFullChunk := chunk.isFullChunk && chunk.start == offset && chunk.stop <= stop
views = append(views, &ChunkView{
FileId: chunk.fileId,
Offset: offset - chunk.start, // offset is the data starting location in this file id
Size: uint64(min(chunk.stop, stop) - offset),
LogicOffset: offset,
- IsFullChunk: isFullChunk,
+ ChunkSize: chunk.chunkSize,
+ CipherKey: chunk.cipherKey,
+ IsGzipped: chunk.isGzipped,
})
offset = min(chunk.stop, stop)
}
@@ -120,13 +149,7 @@ var bufPool = sync.Pool{
func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb.FileChunk) []VisibleInterval {
- newV := newVisibleInterval(
- chunk.Offset,
- chunk.Offset+int64(chunk.Size),
- chunk.GetFileIdString(),
- chunk.Mtime,
- true,
- )
+ newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, chunk.Size, chunk.CipherKey, chunk.IsGzipped)
length := len(visibles)
if length == 0 {
@@ -140,23 +163,11 @@ func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb.
logPrintf(" before", visibles)
for _, v := range visibles {
if v.start < chunk.Offset && chunk.Offset < v.stop {
- newVisibles = append(newVisibles, newVisibleInterval(
- v.start,
- chunk.Offset,
- v.fileId,
- v.modifiedTime,
- false,
- ))
+ newVisibles = append(newVisibles, newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, chunk.Size, v.cipherKey, v.isGzipped))
}
chunkStop := chunk.Offset + int64(chunk.Size)
if v.start < chunkStop && chunkStop < v.stop {
- newVisibles = append(newVisibles, newVisibleInterval(
- chunkStop,
- v.stop,
- v.fileId,
- v.modifiedTime,
- false,
- ))
+ newVisibles = append(newVisibles, newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, chunk.Size, v.cipherKey, v.isGzipped))
}
if chunkStop <= v.start || v.stop <= chunk.Offset {
newVisibles = append(newVisibles, v)
@@ -187,6 +198,7 @@ func NonOverlappingVisibleIntervals(chunks []*filer_pb.FileChunk) (visibles []Vi
var newVisibles []VisibleInterval
for _, chunk := range chunks {
+
newVisibles = MergeIntoVisibles(visibles, newVisibles, chunk)
t := visibles[:0]
visibles = newVisibles
@@ -207,16 +219,20 @@ type VisibleInterval struct {
stop int64
modifiedTime int64
fileId string
- isFullChunk bool
+ chunkSize uint64
+ cipherKey []byte
+ isGzipped bool
}
-func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, isFullChunk bool) VisibleInterval {
+func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, chunkSize uint64, cipherKey []byte, isGzipped bool) VisibleInterval {
return VisibleInterval{
start: start,
stop: stop,
fileId: fileId,
modifiedTime: modifiedTime,
- isFullChunk: isFullChunk,
+ chunkSize: chunkSize,
+ cipherKey: cipherKey,
+ isGzipped: isGzipped,
}
}
diff --git a/weed/filer2/filechunks_test.go b/weed/filer2/filechunks_test.go
index bb4a6c74d..7b1133b85 100644
--- a/weed/filer2/filechunks_test.go
+++ b/weed/filer2/filechunks_test.go
@@ -218,7 +218,7 @@ func TestChunksReading(t *testing.T) {
testcases := []struct {
Chunks []*filer_pb.FileChunk
Offset int64
- Size int
+ Size int64
Expected []*ChunkView
}{
// case 0: normal
diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go
index a0af942e0..666ab8fe4 100644
--- a/weed/filer2/filer.go
+++ b/weed/filer2/filer.go
@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"os"
- "path/filepath"
"strings"
"time"
@@ -13,6 +12,9 @@ import (
"github.com/karlseguin/ccache"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/util/log_buffer"
"github.com/chrislusf/seaweedfs/weed/wdclient"
)
@@ -24,20 +26,30 @@ var (
)
type Filer struct {
- store *FilerStoreWrapper
- directoryCache *ccache.Cache
- MasterClient *wdclient.MasterClient
- fileIdDeletionChan chan string
- GrpcDialOption grpc.DialOption
+ store *FilerStoreWrapper
+ directoryCache *ccache.Cache
+ MasterClient *wdclient.MasterClient
+ fileIdDeletionQueue *util.UnboundedQueue
+ GrpcDialOption grpc.DialOption
+ DirBucketsPath string
+ FsyncBuckets []string
+ buckets *FilerBuckets
+ Cipher bool
+ MetaLogBuffer *log_buffer.LogBuffer
+ metaLogCollection string
+ metaLogReplication string
}
-func NewFiler(masters []string, grpcDialOption grpc.DialOption) *Filer {
+func NewFiler(masters []string, grpcDialOption grpc.DialOption, filerHost string, filerGrpcPort uint32, collection string, replication string, notifyFn func()) *Filer {
f := &Filer{
- directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)),
- MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "filer", masters),
- fileIdDeletionChan: make(chan string, PaginationSize),
- GrpcDialOption: grpcDialOption,
+ directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)),
+ MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", filerHost, filerGrpcPort, masters),
+ fileIdDeletionQueue: util.NewUnboundedQueue(),
+ GrpcDialOption: grpcDialOption,
}
+ f.MetaLogBuffer = log_buffer.NewLogBuffer(time.Minute, f.logFlushFunc, notifyFn)
+ f.metaLogCollection = collection
+ f.metaLogReplication = replication
go f.loopProcessingDeletion()
@@ -85,7 +97,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro
var lastDirectoryEntry *Entry
for i := 1; i < len(dirParts); i++ {
- dirPath := "/" + filepath.ToSlash(filepath.Join(dirParts[:i]...))
+ dirPath := "/" + util.Join(dirParts[:i]...)
// fmt.Printf("%d directory: %+v\n", i, dirPath)
// first check local cache
@@ -94,7 +106,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro
// not found, check the store directly
if dirEntry == nil {
glog.V(4).Infof("find uncached directory: %s", dirPath)
- dirEntry, _ = f.FindEntry(ctx, FullPath(dirPath))
+ dirEntry, _ = f.FindEntry(ctx, util.FullPath(dirPath))
} else {
// glog.V(4).Infof("found cached directory: %s", dirPath)
}
@@ -106,24 +118,29 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro
now := time.Now()
dirEntry = &Entry{
- FullPath: FullPath(dirPath),
+ FullPath: util.FullPath(dirPath),
Attr: Attr{
- Mtime: now,
- Crtime: now,
- Mode: os.ModeDir | 0770,
- Uid: entry.Uid,
- Gid: entry.Gid,
+ Mtime: now,
+ Crtime: now,
+ Mode: os.ModeDir | entry.Mode | 0110,
+ Uid: entry.Uid,
+ Gid: entry.Gid,
+ Collection: entry.Collection,
+ Replication: entry.Replication,
+ UserName: entry.UserName,
+ GroupNames: entry.GroupNames,
},
}
glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
mkdirErr := f.store.InsertEntry(ctx, dirEntry)
if mkdirErr != nil {
- if _, err := f.FindEntry(ctx, FullPath(dirPath)); err == ErrNotFound {
+ if _, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound {
glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr)
return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
}
} else {
+ f.maybeAddBucket(dirEntry)
f.NotifyUpdateEvent(nil, dirEntry, false)
}
@@ -174,6 +191,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro
}
}
+ f.maybeAddBucket(entry)
f.NotifyUpdateEvent(oldEntry, entry, true)
f.deleteChunksIfNotNew(oldEntry, entry)
@@ -197,7 +215,7 @@ func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err er
return f.store.UpdateEntry(ctx, entry)
}
-func (f *Filer) FindEntry(ctx context.Context, p FullPath) (entry *Entry, err error) {
+func (f *Filer) FindEntry(ctx context.Context, p util.FullPath) (entry *Entry, err error) {
now := time.Now()
@@ -213,14 +231,51 @@ func (f *Filer) FindEntry(ctx context.Context, p FullPath) (entry *Entry, err er
},
}, nil
}
- return f.store.FindEntry(ctx, p)
+ entry, err = f.store.FindEntry(ctx, p)
+ if entry != nil && entry.TtlSec > 0 {
+ if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
+ f.store.DeleteEntry(ctx, p.Child(entry.Name()))
+ return nil, filer_pb.ErrNotFound
+ }
+ }
+ return
+
}
-func (f *Filer) ListDirectoryEntries(ctx context.Context, p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) {
+func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) {
if strings.HasSuffix(string(p), "/") && len(p) > 1 {
p = p[0 : len(p)-1]
}
- return f.store.ListDirectoryEntries(ctx, p, startFileName, inclusive, limit)
+
+ var makeupEntries []*Entry
+ entries, expiredCount, lastFileName, err := f.doListDirectoryEntries(ctx, p, startFileName, inclusive, limit)
+ for expiredCount > 0 && err == nil {
+ makeupEntries, expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, lastFileName, false, expiredCount)
+ if err == nil {
+ entries = append(entries, makeupEntries...)
+ }
+ }
+
+ return entries, err
+}
+
+func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int) (entries []*Entry, expiredCount int, lastFileName string, err error) {
+ listedEntries, listErr := f.store.ListDirectoryEntries(ctx, p, startFileName, inclusive, limit)
+ if listErr != nil {
+ return listedEntries, expiredCount, "", listErr
+ }
+ for _, entry := range listedEntries {
+ lastFileName = entry.Name()
+ if entry.TtlSec > 0 {
+ if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
+ f.store.DeleteEntry(ctx, p.Child(entry.Name()))
+ expiredCount++
+ continue
+ }
+ }
+ entries = append(entries, entry)
+ }
+ return
}
func (f *Filer) cacheDelDirectory(dirpath string) {
@@ -261,3 +316,8 @@ func (f *Filer) cacheSetDirectory(dirpath string, dirEntry *Entry, level int) {
f.directoryCache.Set(dirpath, dirEntry, time.Duration(minutes)*time.Minute)
}
+
+func (f *Filer) Shutdown() {
+ f.MetaLogBuffer.Shutdown()
+ f.store.Shutdown()
+}
diff --git a/weed/filer2/filer_buckets.go b/weed/filer2/filer_buckets.go
new file mode 100644
index 000000000..7a57e7ee1
--- /dev/null
+++ b/weed/filer2/filer_buckets.go
@@ -0,0 +1,121 @@
+package filer2
+
+import (
+ "context"
+ "math"
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+type BucketName string
+type BucketOption struct {
+ Name BucketName
+ Replication string
+ fsync bool
+}
+type FilerBuckets struct {
+ dirBucketsPath string
+ buckets map[BucketName]*BucketOption
+ sync.RWMutex
+}
+
+func (f *Filer) LoadBuckets() {
+
+ f.buckets = &FilerBuckets{
+ buckets: make(map[BucketName]*BucketOption),
+ }
+
+ limit := math.MaxInt32
+
+ entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit)
+
+ if err != nil {
+ glog.V(1).Infof("no buckets found: %v", err)
+ return
+ }
+
+ shouldFsyncMap := make(map[string]bool)
+ for _, bucket := range f.FsyncBuckets {
+ shouldFsyncMap[bucket] = true
+ }
+
+ glog.V(1).Infof("buckets found: %d", len(entries))
+
+ f.buckets.Lock()
+ for _, entry := range entries {
+ _, shouldFsnyc := shouldFsyncMap[entry.Name()]
+ f.buckets.buckets[BucketName(entry.Name())] = &BucketOption{
+ Name: BucketName(entry.Name()),
+ Replication: entry.Replication,
+ fsync: shouldFsnyc,
+ }
+ }
+ f.buckets.Unlock()
+
+}
+
+func (f *Filer) ReadBucketOption(buketName string) (replication string, fsync bool) {
+
+ f.buckets.RLock()
+ defer f.buckets.RUnlock()
+
+ option, found := f.buckets.buckets[BucketName(buketName)]
+
+ if !found {
+ return "", false
+ }
+ return option.Replication, option.fsync
+
+}
+
+func (f *Filer) isBucket(entry *Entry) bool {
+ if !entry.IsDirectory() {
+ return false
+ }
+ parent, dirName := entry.FullPath.DirAndName()
+ if parent != f.DirBucketsPath {
+ return false
+ }
+
+ f.buckets.RLock()
+ defer f.buckets.RUnlock()
+
+ _, found := f.buckets.buckets[BucketName(dirName)]
+
+ return found
+
+}
+
+func (f *Filer) maybeAddBucket(entry *Entry) {
+ if !entry.IsDirectory() {
+ return
+ }
+ parent, dirName := entry.FullPath.DirAndName()
+ if parent != f.DirBucketsPath {
+ return
+ }
+ f.addBucket(dirName, &BucketOption{
+ Name: BucketName(dirName),
+ Replication: entry.Replication,
+ })
+}
+
+func (f *Filer) addBucket(buketName string, bucketOption *BucketOption) {
+
+ f.buckets.Lock()
+ defer f.buckets.Unlock()
+
+ f.buckets.buckets[BucketName(buketName)] = bucketOption
+
+}
+
+func (f *Filer) deleteBucket(buketName string) {
+
+ f.buckets.Lock()
+ defer f.buckets.Unlock()
+
+ delete(f.buckets.buckets, BucketName(buketName))
+
+}
diff --git a/weed/filer2/filer_client_util.go b/weed/filer2/filer_client_util.go
deleted file mode 100644
index af804b909..000000000
--- a/weed/filer2/filer_client_util.go
+++ /dev/null
@@ -1,172 +0,0 @@
-package filer2
-
-import (
- "context"
- "fmt"
- "io"
- "math"
- "strings"
- "sync"
-
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/chrislusf/seaweedfs/weed/util"
-)
-
-func VolumeId(fileId string) string {
- lastCommaIndex := strings.LastIndex(fileId, ",")
- if lastCommaIndex > 0 {
- return fileId[:lastCommaIndex]
- }
- return fileId
-}
-
-type FilerClient interface {
- WithFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error
-}
-
-func ReadIntoBuffer(ctx context.Context, filerClient FilerClient, fullFilePath FullPath, buff []byte, chunkViews []*ChunkView, baseOffset int64) (totalRead int64, err error) {
- var vids []string
- for _, chunkView := range chunkViews {
- vids = append(vids, VolumeId(chunkView.FileId))
- }
-
- vid2Locations := make(map[string]*filer_pb.Locations)
-
- err = filerClient.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
-
- glog.V(4).Infof("read fh lookup volume id locations: %v", vids)
- resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{
- VolumeIds: vids,
- })
- if err != nil {
- return err
- }
-
- vid2Locations = resp.LocationsMap
-
- return nil
- })
-
- if err != nil {
- return 0, fmt.Errorf("failed to lookup volume ids %v: %v", vids, err)
- }
-
- var wg sync.WaitGroup
- for _, chunkView := range chunkViews {
- wg.Add(1)
- go func(chunkView *ChunkView) {
- defer wg.Done()
-
- glog.V(4).Infof("read fh reading chunk: %+v", chunkView)
-
- locations := vid2Locations[VolumeId(chunkView.FileId)]
- if locations == nil || len(locations.Locations) == 0 {
- glog.V(0).Infof("failed to locate %s", chunkView.FileId)
- err = fmt.Errorf("failed to locate %s", chunkView.FileId)
- return
- }
-
- var n int64
- n, err = util.ReadUrl(
- fmt.Sprintf("http://%s/%s", locations.Locations[0].Url, chunkView.FileId),
- chunkView.Offset,
- int(chunkView.Size),
- buff[chunkView.LogicOffset-baseOffset:chunkView.LogicOffset-baseOffset+int64(chunkView.Size)],
- !chunkView.IsFullChunk)
-
- if err != nil {
-
- glog.V(0).Infof("%v read http://%s/%v %v bytes: %v", fullFilePath, locations.Locations[0].Url, chunkView.FileId, n, err)
-
- err = fmt.Errorf("failed to read http://%s/%s: %v",
- locations.Locations[0].Url, chunkView.FileId, err)
- return
- }
-
- glog.V(4).Infof("read fh read %d bytes: %+v", n, chunkView)
- totalRead += n
-
- }(chunkView)
- }
- wg.Wait()
- return
-}
-
-func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath FullPath) (entry *filer_pb.Entry, err error) {
-
- dir, name := fullFilePath.DirAndName()
-
- err = filerClient.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
-
- request := &filer_pb.LookupDirectoryEntryRequest{
- Directory: dir,
- Name: name,
- }
-
- // glog.V(3).Infof("read %s request: %v", fullFilePath, request)
- resp, err := client.LookupDirectoryEntry(ctx, request)
- if err != nil {
- if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) {
- return nil
- }
- glog.V(3).Infof("read %s %v: %v", fullFilePath, resp, err)
- return err
- }
-
- if resp.Entry == nil {
- // glog.V(3).Infof("read %s entry: %v", fullFilePath, entry)
- return nil
- }
-
- entry = resp.Entry
- return nil
- })
-
- return
-}
-
-func ReadDirAllEntries(ctx context.Context, filerClient FilerClient, fullDirPath FullPath, prefix string, fn func(entry *filer_pb.Entry, isLast bool)) (err error) {
-
- err = filerClient.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
-
- lastEntryName := ""
-
- request := &filer_pb.ListEntriesRequest{
- Directory: string(fullDirPath),
- Prefix: prefix,
- StartFromFileName: lastEntryName,
- Limit: math.MaxUint32,
- }
-
- glog.V(3).Infof("read directory: %v", request)
- stream, err := client.ListEntries(ctx, request)
- if err != nil {
- return fmt.Errorf("list %s: %v", fullDirPath, err)
- }
-
- var prevEntry *filer_pb.Entry
- for {
- resp, recvErr := stream.Recv()
- if recvErr != nil {
- if recvErr == io.EOF {
- if prevEntry != nil {
- fn(prevEntry, true)
- }
- break
- } else {
- return recvErr
- }
- }
- if prevEntry != nil {
- fn(prevEntry, false)
- }
- prevEntry = resp.Entry
- }
-
- return nil
-
- })
-
- return
-}
diff --git a/weed/filer2/filer_delete_entry.go b/weed/filer2/filer_delete_entry.go
index 75a09e7ef..2fb53c579 100644
--- a/weed/filer2/filer_delete_entry.go
+++ b/weed/filer2/filer_delete_entry.go
@@ -6,9 +6,11 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
-func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p FullPath, isRecursive bool, ignoreRecursiveError, shouldDeleteChunks bool) (err error) {
+func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isRecursive bool, ignoreRecursiveError, shouldDeleteChunks bool) (err error) {
if p == "/" {
return nil
}
@@ -18,27 +20,35 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p FullPath, isRecurs
return findErr
}
+ isCollection := f.isBucket(entry)
+
var chunks []*filer_pb.FileChunk
chunks = append(chunks, entry.Chunks...)
if entry.IsDirectory() {
// delete the folder children, not including the folder itself
var dirChunks []*filer_pb.FileChunk
- dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks)
+ dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isCollection)
if err != nil {
+ glog.V(0).Infof("delete directory %s: %v", p, err)
return fmt.Errorf("delete directory %s: %v", p, err)
}
chunks = append(chunks, dirChunks...)
- f.cacheDelDirectory(string(p))
}
+
// delete the file or folder
err = f.doDeleteEntryMetaAndData(ctx, entry, shouldDeleteChunks)
if err != nil {
return fmt.Errorf("delete file %s: %v", p, err)
}
- if shouldDeleteChunks {
+ if shouldDeleteChunks && !isCollection {
go f.DeleteChunks(chunks)
}
+ if isCollection {
+ collectionName := entry.Name()
+ f.doDeleteCollection(collectionName)
+ f.deleteBucket(collectionName)
+ }
return nil
}
@@ -63,13 +73,15 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
var dirChunks []*filer_pb.FileChunk
if sub.IsDirectory() {
dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks)
+ f.cacheDelDirectory(string(sub.FullPath))
+ f.NotifyUpdateEvent(sub, nil, shouldDeleteChunks)
+ chunks = append(chunks, dirChunks...)
+ } else {
+ chunks = append(chunks, sub.Chunks...)
}
if err != nil && !ignoreRecursiveError {
return nil, err
}
- if shouldDeleteChunks {
- chunks = append(chunks, dirChunks...)
- }
}
if len(entries) < PaginationSize {
@@ -77,26 +89,40 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
}
}
- f.cacheDelDirectory(string(entry.FullPath))
-
- glog.V(3).Infof("deleting directory %v", entry.FullPath)
+ glog.V(3).Infof("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks)
if storeDeletionErr := f.store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil {
return nil, fmt.Errorf("filer store delete: %v", storeDeletionErr)
}
- f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks)
return chunks, nil
}
func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool) (err error) {
- glog.V(3).Infof("deleting entry %v", entry.FullPath)
+ glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
if storeDeletionErr := f.store.DeleteEntry(ctx, entry.FullPath); storeDeletionErr != nil {
return fmt.Errorf("filer store delete: %v", storeDeletionErr)
}
+ if entry.IsDirectory() {
+ f.cacheDelDirectory(string(entry.FullPath))
+ }
f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks)
return nil
}
+
+func (f *Filer) doDeleteCollection(collectionName string) (err error) {
+
+ return f.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ _, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{
+ Name: collectionName,
+ })
+ if err != nil {
+ glog.Infof("delete collection %s: %v", collectionName, err)
+ }
+ return err
+ })
+
+}
diff --git a/weed/filer2/filer_deletion.go b/weed/filer2/filer_deletion.go
index 9937685f7..a6b229771 100644
--- a/weed/filer2/filer_deletion.go
+++ b/weed/filer2/filer_deletion.go
@@ -6,16 +6,14 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
)
-func (f *Filer) loopProcessingDeletion() {
-
- ticker := time.NewTicker(5 * time.Second)
-
- lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) {
+func LookupByMasterClientFn(masterClient *wdclient.MasterClient) func(vids []string) (map[string]operation.LookupResult, error) {
+ return func(vids []string) (map[string]operation.LookupResult, error) {
m := make(map[string]operation.LookupResult)
for _, vid := range vids {
- locs, _ := f.MasterClient.GetVidLocations(vid)
+ locs, _ := masterClient.GetVidLocations(vid)
var locations []operation.Location
for _, loc := range locs {
locations = append(locations, operation.Location{
@@ -30,37 +28,56 @@ func (f *Filer) loopProcessingDeletion() {
}
return m, nil
}
+}
- var fileIds []string
+func (f *Filer) loopProcessingDeletion() {
+
+ lookupFunc := LookupByMasterClientFn(f.MasterClient)
+
+ DeletionBatchSize := 100000 // roughly 20 bytes cost per file id.
+
+ var deletionCount int
for {
- select {
- case fid := <-f.fileIdDeletionChan:
- fileIds = append(fileIds, fid)
- if len(fileIds) >= 4096 {
- glog.V(1).Infof("deleting fileIds len=%d", len(fileIds))
- operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc)
- fileIds = fileIds[:0]
- }
- case <-ticker.C:
- if len(fileIds) > 0 {
- glog.V(1).Infof("timed deletion fileIds len=%d", len(fileIds))
- operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc)
- fileIds = fileIds[:0]
+ deletionCount = 0
+ f.fileIdDeletionQueue.Consume(func(fileIds []string) {
+ for len(fileIds) > 0 {
+ var toDeleteFileIds []string
+ if len(fileIds) > DeletionBatchSize {
+ toDeleteFileIds = fileIds[:DeletionBatchSize]
+ fileIds = fileIds[DeletionBatchSize:]
+ } else {
+ toDeleteFileIds = fileIds
+ fileIds = fileIds[:0]
+ }
+ deletionCount = len(toDeleteFileIds)
+ deleteResults, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc)
+ if err != nil {
+ glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err)
+ } else {
+ glog.V(1).Infof("deleting fileIds len=%d", deletionCount)
+ }
+ if len(deleteResults) != deletionCount {
+ glog.V(0).Infof("delete %d fileIds actual %d", deletionCount, len(deleteResults))
+ }
}
+ })
+
+ if deletionCount == 0 {
+ time.Sleep(1123 * time.Millisecond)
}
}
}
func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) {
for _, chunk := range chunks {
- f.fileIdDeletionChan <- chunk.GetFileIdString()
+ f.fileIdDeletionQueue.EnQueue(chunk.GetFileIdString())
}
}
// DeleteFileByFileId direct delete by file id.
// Only used when the fileId is not being managed by snapshots.
func (f *Filer) DeleteFileByFileId(fileId string) {
- f.fileIdDeletionChan <- fileId
+ f.fileIdDeletionQueue.EnQueue(fileId)
}
func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) {
diff --git a/weed/filer2/filer_notify.go b/weed/filer2/filer_notify.go
index c37381116..ecb488373 100644
--- a/weed/filer2/filer_notify.go
+++ b/weed/filer2/filer_notify.go
@@ -1,39 +1,157 @@
package filer2
import (
+ "context"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func (f *Filer) NotifyUpdateEvent(oldEntry, newEntry *Entry, deleteChunks bool) {
- var key string
+ var fullpath string
if oldEntry != nil {
- key = string(oldEntry.FullPath)
+ fullpath = string(oldEntry.FullPath)
} else if newEntry != nil {
- key = string(newEntry.FullPath)
+ fullpath = string(newEntry.FullPath)
} else {
return
}
+ // println("fullpath:", fullpath)
+
+ if strings.HasPrefix(fullpath, SystemLogDir) {
+ return
+ }
+
+ newParentPath := ""
+ if newEntry != nil {
+ newParentPath, _ = newEntry.FullPath.DirAndName()
+ }
+ eventNotification := &filer_pb.EventNotification{
+ OldEntry: oldEntry.ToProtoEntry(),
+ NewEntry: newEntry.ToProtoEntry(),
+ DeleteChunks: deleteChunks,
+ NewParentPath: newParentPath,
+ }
+
if notification.Queue != nil {
+ glog.V(3).Infof("notifying entry update %v", fullpath)
+ notification.Queue.SendMessage(fullpath, eventNotification)
+ }
- glog.V(3).Infof("notifying entry update %v", key)
+ f.logMetaEvent(fullpath, eventNotification)
- newParentPath := ""
- if newEntry != nil {
- newParentPath, _ = newEntry.FullPath.DirAndName()
- }
+}
- notification.Queue.SendMessage(
- key,
- &filer_pb.EventNotification{
- OldEntry: oldEntry.ToProtoEntry(),
- NewEntry: newEntry.ToProtoEntry(),
- DeleteChunks: deleteChunks,
- NewParentPath: newParentPath,
- },
- )
+func (f *Filer) logMetaEvent(fullpath string, eventNotification *filer_pb.EventNotification) {
+ dir, _ := util.FullPath(fullpath).DirAndName()
+
+ event := &filer_pb.SubscribeMetadataResponse{
+ Directory: dir,
+ EventNotification: eventNotification,
+ TsNs: time.Now().UnixNano(),
+ }
+ data, err := proto.Marshal(event)
+ if err != nil {
+ glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
+ return
+ }
+
+ f.MetaLogBuffer.AddToBuffer([]byte(dir), data)
+
+}
+
+func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) {
+
+ targetFile := fmt.Sprintf("%s/%04d-%02d-%02d/%02d-%02d.segment", SystemLogDir,
+ startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(),
+ // startTime.Second(), startTime.Nanosecond(),
+ )
+
+ if err := f.appendToFile(targetFile, buf); err != nil {
+ glog.V(0).Infof("log write failed %s: %v", targetFile, err)
+ }
+}
+
+func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) error {
+
+ startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day())
+ startHourMinute := fmt.Sprintf("%02d-%02d.segment", startTime.Hour(), startTime.Minute())
+
+ sizeBuf := make([]byte, 4)
+ startTsNs := startTime.UnixNano()
+
+ dayEntries, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366)
+ if listDayErr != nil {
+ return fmt.Errorf("fail to list log by day: %v", listDayErr)
+ }
+ for _, dayEntry := range dayEntries {
+ // println("checking day", dayEntry.FullPath)
+ hourMinuteEntries, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60)
+ if listHourMinuteErr != nil {
+ return fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr)
+ }
+ for _, hourMinuteEntry := range hourMinuteEntries {
+ // println("checking hh-mm", hourMinuteEntry.FullPath)
+ if dayEntry.Name() == startDate {
+ if strings.Compare(hourMinuteEntry.Name(), startHourMinute) < 0 {
+ continue
+ }
+ }
+ // println("processing", hourMinuteEntry.FullPath)
+ chunkedFileReader := NewChunkStreamReaderFromFiler(f.MasterClient, hourMinuteEntry.Chunks)
+ if err := ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil {
+ chunkedFileReader.Close()
+ if err == io.EOF {
+ break
+ }
+ return fmt.Errorf("reading %s: %v", hourMinuteEntry.FullPath, err)
+ }
+ chunkedFileReader.Close()
+ }
+ }
+
+ return nil
+}
+
+func ReadEachLogEntry(r io.Reader, sizeBuf []byte, ns int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) error {
+ for {
+ n, err := r.Read(sizeBuf)
+ if err != nil {
+ return err
+ }
+ if n != 4 {
+ return fmt.Errorf("size %d bytes, expected 4 bytes", n)
+ }
+ size := util.BytesToUint32(sizeBuf)
+ // println("entry size", size)
+ entryData := make([]byte, size)
+ n, err = r.Read(entryData)
+ if err != nil {
+ return err
+ }
+ if n != int(size) {
+ return fmt.Errorf("entry data %d bytes, expected %d bytes", n, size)
+ }
+ logEntry := &filer_pb.LogEntry{}
+ if err = proto.Unmarshal(entryData, logEntry); err != nil {
+ return err
+ }
+ if logEntry.TsNs <= ns {
+ return nil
+ }
+ // println("each log: ", logEntry.TsNs)
+ if err := eachLogEntryFn(logEntry); err != nil {
+ return err
+ }
}
}
diff --git a/weed/filer2/filer_notify_append.go b/weed/filer2/filer_notify_append.go
new file mode 100644
index 000000000..af291058c
--- /dev/null
+++ b/weed/filer2/filer_notify_append.go
@@ -0,0 +1,73 @@
+package filer2
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func (f *Filer) appendToFile(targetFile string, data []byte) error {
+
+ assignResult, uploadResult, err2 := f.assignAndUpload(data)
+ if err2 != nil {
+ return err2
+ }
+
+ // find out existing entry
+ fullpath := util.FullPath(targetFile)
+ entry, err := f.FindEntry(context.Background(), fullpath)
+ var offset int64 = 0
+ if err == filer_pb.ErrNotFound {
+ entry = &Entry{
+ FullPath: fullpath,
+ Attr: Attr{
+ Crtime: time.Now(),
+ Mtime: time.Now(),
+ Mode: os.FileMode(0644),
+ Uid: OS_UID,
+ Gid: OS_GID,
+ },
+ }
+ } else {
+ offset = int64(TotalSize(entry.Chunks))
+ }
+
+ // append to existing chunks
+ entry.Chunks = append(entry.Chunks, uploadResult.ToPbFileChunk(assignResult.Fid, offset))
+
+ // update the entry
+ err = f.CreateEntry(context.Background(), entry, false)
+
+ return err
+}
+
+func (f *Filer) assignAndUpload(data []byte) (*operation.AssignResult, *operation.UploadResult, error) {
+ // assign a volume location
+ assignRequest := &operation.VolumeAssignRequest{
+ Count: 1,
+ Collection: f.metaLogCollection,
+ Replication: f.metaLogReplication,
+ WritableVolumeCount: 1,
+ }
+ assignResult, err := operation.Assign(f.GetMaster(), f.GrpcDialOption, assignRequest)
+ if err != nil {
+ return nil, nil, fmt.Errorf("AssignVolume: %v", err)
+ }
+ if assignResult.Error != "" {
+ return nil, nil, fmt.Errorf("AssignVolume error: %v", assignResult.Error)
+ }
+
+ // upload data
+ targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
+ uploadResult, err := operation.UploadData(targetUrl, "", f.Cipher, data, false, "", nil, assignResult.Auth)
+ if err != nil {
+ return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err)
+ }
+ // println("uploaded to", targetUrl)
+ return assignResult, uploadResult, nil
+}
diff --git a/weed/filer2/filer_notify_test.go b/weed/filer2/filer_notify_test.go
index b74e2ad35..29170bfdf 100644
--- a/weed/filer2/filer_notify_test.go
+++ b/weed/filer2/filer_notify_test.go
@@ -5,13 +5,15 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+
"github.com/golang/protobuf/proto"
)
func TestProtoMarshalText(t *testing.T) {
oldEntry := &Entry{
- FullPath: FullPath("/this/path/to"),
+ FullPath: util.FullPath("/this/path/to"),
Attr: Attr{
Mtime: time.Now(),
Mode: 0644,
diff --git a/weed/filer2/filerstore.go b/weed/filer2/filerstore.go
index ae25534ed..f36c74f14 100644
--- a/weed/filer2/filerstore.go
+++ b/weed/filer2/filerstore.go
@@ -2,7 +2,6 @@ package filer2
import (
"context"
- "errors"
"time"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -18,17 +17,17 @@ type FilerStore interface {
InsertEntry(context.Context, *Entry) error
UpdateEntry(context.Context, *Entry) (err error)
// err == filer2.ErrNotFound if not found
- FindEntry(context.Context, FullPath) (entry *Entry, err error)
- DeleteEntry(context.Context, FullPath) (err error)
- DeleteFolderChildren(context.Context, FullPath) (err error)
- ListDirectoryEntries(ctx context.Context, dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error)
+ FindEntry(context.Context, util.FullPath) (entry *Entry, err error)
+ DeleteEntry(context.Context, util.FullPath) (err error)
+ DeleteFolderChildren(context.Context, util.FullPath) (err error)
+ ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error)
BeginTransaction(ctx context.Context) (context.Context, error)
CommitTransaction(ctx context.Context) error
RollbackTransaction(ctx context.Context) error
-}
-var ErrNotFound = errors.New("filer: no entry is found in filer store")
+ Shutdown()
+}
type FilerStoreWrapper struct {
actualStore FilerStore
@@ -73,7 +72,7 @@ func (fsw *FilerStoreWrapper) UpdateEntry(ctx context.Context, entry *Entry) err
return fsw.actualStore.UpdateEntry(ctx, entry)
}
-func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp FullPath) (entry *Entry, err error) {
+func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp util.FullPath) (entry *Entry, err error) {
stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "find").Inc()
start := time.Now()
defer func() {
@@ -88,7 +87,7 @@ func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp FullPath) (entry
return
}
-func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp FullPath) (err error) {
+func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) {
stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "delete").Inc()
start := time.Now()
defer func() {
@@ -98,7 +97,7 @@ func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp FullPath) (err
return fsw.actualStore.DeleteEntry(ctx, fp)
}
-func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp FullPath) (err error) {
+func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp util.FullPath) (err error) {
stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "deleteFolderChildren").Inc()
start := time.Now()
defer func() {
@@ -108,7 +107,7 @@ func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp FullP
return fsw.actualStore.DeleteFolderChildren(ctx, fp)
}
-func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) {
+func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) {
stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "list").Inc()
start := time.Now()
defer func() {
@@ -136,3 +135,7 @@ func (fsw *FilerStoreWrapper) CommitTransaction(ctx context.Context) error {
func (fsw *FilerStoreWrapper) RollbackTransaction(ctx context.Context) error {
return fsw.actualStore.RollbackTransaction(ctx)
}
+
+func (fsw *FilerStoreWrapper) Shutdown() {
+ fsw.actualStore.Shutdown()
+}
diff --git a/weed/filer2/leveldb/leveldb_store.go b/weed/filer2/leveldb/leveldb_store.go
index 44e6ac0eb..31919ca49 100644
--- a/weed/filer2/leveldb/leveldb_store.go
+++ b/weed/filer2/leveldb/leveldb_store.go
@@ -6,11 +6,13 @@ import (
"fmt"
"github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/opt"
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util"
)
@@ -48,8 +50,13 @@ func (store *LevelDBStore) initialize(dir string) (err error) {
}
if store.db, err = leveldb.OpenFile(dir, opts); err != nil {
- glog.Infof("filer store open dir %s: %v", dir, err)
- return
+ if errors.IsCorrupted(err) {
+ store.db, err = leveldb.RecoverFile(dir, opts)
+ }
+ if err != nil {
+ glog.Infof("filer store open dir %s: %v", dir, err)
+ return
+ }
}
return
}
@@ -88,13 +95,13 @@ func (store *LevelDBStore) UpdateEntry(ctx context.Context, entry *filer2.Entry)
return store.InsertEntry(ctx, entry)
}
-func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
+func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer2.Entry, err error) {
key := genKey(fullpath.DirAndName())
data, err := store.db.Get(key, nil)
if err == leveldb.ErrNotFound {
- return nil, filer2.ErrNotFound
+ return nil, filer_pb.ErrNotFound
}
if err != nil {
return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
@@ -113,7 +120,7 @@ func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath filer2.FullPa
return entry, nil
}
-func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) {
+func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) {
key := genKey(fullpath.DirAndName())
err = store.db.Delete(key, nil)
@@ -124,7 +131,7 @@ func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath filer2.Full
return nil
}
-func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) {
+func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
batch := new(leveldb.Batch)
@@ -152,7 +159,7 @@ func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath fi
return nil
}
-func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool,
+func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer2.Entry, err error) {
directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
@@ -175,7 +182,7 @@ func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath fi
break
}
entry := &filer2.Entry{
- FullPath: filer2.NewFullPath(string(fullpath), fileName),
+ FullPath: weed_util.NewFullPath(string(fullpath), fileName),
}
if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil {
err = decodeErr
@@ -196,7 +203,7 @@ func genKey(dirPath, fileName string) (key []byte) {
return key
}
-func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string) (keyPrefix []byte) {
+func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) {
keyPrefix = []byte(string(fullpath))
keyPrefix = append(keyPrefix, DIR_FILE_SEPARATOR)
if len(startFileName) > 0 {
@@ -215,3 +222,7 @@ func getNameFromKey(key []byte) string {
return string(key[sepIndex+1:])
}
+
+func (store *LevelDBStore) Shutdown() {
+ store.db.Close()
+}
diff --git a/weed/filer2/leveldb/leveldb_store_test.go b/weed/filer2/leveldb/leveldb_store_test.go
index 983e1cbe9..1daa47c97 100644
--- a/weed/filer2/leveldb/leveldb_store_test.go
+++ b/weed/filer2/leveldb/leveldb_store_test.go
@@ -2,14 +2,16 @@ package leveldb
import (
"context"
- "github.com/chrislusf/seaweedfs/weed/filer2"
"io/ioutil"
"os"
"testing"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func TestCreateAndFind(t *testing.T) {
- filer := filer2.NewFiler(nil, nil)
+ filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
defer os.RemoveAll(dir)
store := &LevelDBStore{}
@@ -17,7 +19,7 @@ func TestCreateAndFind(t *testing.T) {
filer.SetStore(store)
filer.DisableDirectoryCache()
- fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg")
+ fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg")
ctx := context.Background()
@@ -48,14 +50,14 @@ func TestCreateAndFind(t *testing.T) {
}
// checking one upper directory
- entries, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100)
+ entries, _ := filer.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100)
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
// checking one upper directory
- entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100)
+ entries, _ = filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100)
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
@@ -64,7 +66,7 @@ func TestCreateAndFind(t *testing.T) {
}
func TestEmptyRoot(t *testing.T) {
- filer := filer2.NewFiler(nil, nil)
+ filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
defer os.RemoveAll(dir)
store := &LevelDBStore{}
@@ -75,7 +77,7 @@ func TestEmptyRoot(t *testing.T) {
ctx := context.Background()
// checking one upper directory
- entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100)
+ entries, err := filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100)
if err != nil {
t.Errorf("list entries: %v", err)
return
diff --git a/weed/filer2/leveldb2/leveldb2_store.go b/weed/filer2/leveldb2/leveldb2_store.go
index 358d4d92a..c907e8746 100644
--- a/weed/filer2/leveldb2/leveldb2_store.go
+++ b/weed/filer2/leveldb2/leveldb2_store.go
@@ -9,11 +9,13 @@ import (
"os"
"github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/opt"
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util"
)
@@ -51,9 +53,12 @@ func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) {
dbFolder := fmt.Sprintf("%s/%02d", dir, d)
os.MkdirAll(dbFolder, 0755)
db, dbErr := leveldb.OpenFile(dbFolder, opts)
+ if errors.IsCorrupted(dbErr) {
+ db, dbErr = leveldb.RecoverFile(dbFolder, opts)
+ }
if dbErr != nil {
glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr)
- return
+ return dbErr
}
store.dbs = append(store.dbs, db)
}
@@ -97,14 +102,14 @@ func (store *LevelDB2Store) UpdateEntry(ctx context.Context, entry *filer2.Entry
return store.InsertEntry(ctx, entry)
}
-func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
+func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer2.Entry, err error) {
dir, name := fullpath.DirAndName()
key, partitionId := genKey(dir, name, store.dbCount)
data, err := store.dbs[partitionId].Get(key, nil)
if err == leveldb.ErrNotFound {
- return nil, filer2.ErrNotFound
+ return nil, filer_pb.ErrNotFound
}
if err != nil {
return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
@@ -123,7 +128,7 @@ func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath filer2.FullP
return entry, nil
}
-func (store *LevelDB2Store) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) {
+func (store *LevelDB2Store) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) {
dir, name := fullpath.DirAndName()
key, partitionId := genKey(dir, name, store.dbCount)
@@ -135,7 +140,7 @@ func (store *LevelDB2Store) DeleteEntry(ctx context.Context, fullpath filer2.Ful
return nil
}
-func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) {
+func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount)
batch := new(leveldb.Batch)
@@ -163,7 +168,7 @@ func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath f
return nil
}
-func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool,
+func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer2.Entry, err error) {
directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount)
@@ -187,7 +192,7 @@ func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath f
break
}
entry := &filer2.Entry{
- FullPath: filer2.NewFullPath(string(fullpath), fileName),
+ FullPath: weed_util.NewFullPath(string(fullpath), fileName),
}
// println("list", entry.FullPath, "chunks", len(entry.Chunks))
@@ -210,7 +215,7 @@ func genKey(dirPath, fileName string, dbCount int) (key []byte, partitionId int)
return key, partitionId
}
-func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string, dbCount int) (keyPrefix []byte, partitionId int) {
+func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string, dbCount int) (keyPrefix []byte, partitionId int) {
keyPrefix, partitionId = hashToBytes(string(fullpath), dbCount)
if len(startFileName) > 0 {
keyPrefix = append(keyPrefix, []byte(startFileName)...)
@@ -235,3 +240,9 @@ func hashToBytes(dir string, dbCount int) ([]byte, int) {
return b, int(x) % dbCount
}
+
+func (store *LevelDB2Store) Shutdown() {
+ for d := 0; d < store.dbCount; d++ {
+ store.dbs[d].Close()
+ }
+}
diff --git a/weed/filer2/leveldb2/leveldb2_store_test.go b/weed/filer2/leveldb2/leveldb2_store_test.go
index 58637b7b6..9ad168233 100644
--- a/weed/filer2/leveldb2/leveldb2_store_test.go
+++ b/weed/filer2/leveldb2/leveldb2_store_test.go
@@ -2,14 +2,16 @@ package leveldb
import (
"context"
- "github.com/chrislusf/seaweedfs/weed/filer2"
"io/ioutil"
"os"
"testing"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func TestCreateAndFind(t *testing.T) {
- filer := filer2.NewFiler(nil, nil)
+ filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
defer os.RemoveAll(dir)
store := &LevelDB2Store{}
@@ -17,7 +19,7 @@ func TestCreateAndFind(t *testing.T) {
filer.SetStore(store)
filer.DisableDirectoryCache()
- fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg")
+ fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg")
ctx := context.Background()
@@ -48,14 +50,14 @@ func TestCreateAndFind(t *testing.T) {
}
// checking one upper directory
- entries, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100)
+ entries, _ := filer.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100)
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
// checking one upper directory
- entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100)
+ entries, _ = filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100)
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
@@ -64,7 +66,7 @@ func TestCreateAndFind(t *testing.T) {
}
func TestEmptyRoot(t *testing.T) {
- filer := filer2.NewFiler(nil, nil)
+ filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
defer os.RemoveAll(dir)
store := &LevelDB2Store{}
@@ -75,7 +77,7 @@ func TestEmptyRoot(t *testing.T) {
ctx := context.Background()
// checking one upper directory
- entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100)
+ entries, err := filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100)
if err != nil {
t.Errorf("list entries: %v", err)
return
diff --git a/weed/filer2/mongodb/mongodb_store.go b/weed/filer2/mongodb/mongodb_store.go
new file mode 100644
index 000000000..375a457a4
--- /dev/null
+++ b/weed/filer2/mongodb/mongodb_store.go
@@ -0,0 +1,210 @@
+package mongodb
+
+import (
+ "context"
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "go.mongodb.org/mongo-driver/bson"
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+ "go.mongodb.org/mongo-driver/x/bsonx"
+ "time"
+)
+
+func init() {
+ filer2.Stores = append(filer2.Stores, &MongodbStore{})
+}
+
+type MongodbStore struct {
+ connect *mongo.Client
+ database string
+ collectionName string
+}
+
+type Model struct {
+ Directory string `bson:"directory"`
+ Name string `bson:"name"`
+ Meta []byte `bson:"meta"`
+}
+
+func (store *MongodbStore) GetName() string {
+ return "mongodb"
+}
+
+func (store *MongodbStore) Initialize(configuration util.Configuration, prefix string) (err error) {
+ store.database = configuration.GetString(prefix + "database")
+ store.collectionName = "filemeta"
+ poolSize := configuration.GetInt(prefix + "option_pool_size")
+ return store.connection(configuration.GetString(prefix+"uri"), uint64(poolSize))
+}
+
+func (store *MongodbStore) connection(uri string, poolSize uint64) (err error) {
+ ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
+ opts := options.Client().ApplyURI(uri)
+
+ if poolSize > 0 {
+ opts.SetMaxPoolSize(poolSize)
+ }
+
+ client, err := mongo.Connect(ctx, opts)
+ if err != nil {
+ return err
+ }
+
+ c := client.Database(store.database).Collection(store.collectionName)
+ err = store.indexUnique(c)
+ store.connect = client
+ return err
+}
+
+func (store *MongodbStore) createIndex(c *mongo.Collection, index mongo.IndexModel, opts *options.CreateIndexesOptions) error {
+ _, err := c.Indexes().CreateOne(context.Background(), index, opts)
+ return err
+}
+
+func (store *MongodbStore) indexUnique(c *mongo.Collection) error {
+ opts := options.CreateIndexes().SetMaxTime(10 * time.Second)
+
+ unique := new(bool)
+ *unique = true
+
+ index := mongo.IndexModel{
+ Keys: bsonx.Doc{{Key: "directory", Value: bsonx.Int32(1)}, {Key: "name", Value: bsonx.Int32(1)}},
+ Options: &options.IndexOptions{
+ Unique: unique,
+ },
+ }
+
+ return store.createIndex(c, index, opts)
+}
+
+func (store *MongodbStore) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+
+func (store *MongodbStore) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *MongodbStore) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *MongodbStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+
+ dir, name := entry.FullPath.DirAndName()
+ meta, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encode %s: %s", entry.FullPath, err)
+ }
+
+ c := store.connect.Database(store.database).Collection(store.collectionName)
+
+ _, err = c.InsertOne(ctx, Model{
+ Directory: dir,
+ Name: name,
+ Meta: meta,
+ })
+
+ return nil
+}
+
+func (store *MongodbStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+ return store.InsertEntry(ctx, entry)
+}
+
+func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
+
+ dir, name := fullpath.DirAndName()
+ var data Model
+
+ var where = bson.M{"directory": dir, "name": name}
+ err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
+ if err != mongo.ErrNoDocuments && err != nil {
+ return nil, filer_pb.ErrNotFound
+ }
+
+ if len(data.Meta) == 0 {
+ return nil, filer_pb.ErrNotFound
+ }
+
+ entry = &filer2.Entry{
+ FullPath: fullpath,
+ }
+
+ err = entry.DecodeAttributesAndChunks(data.Meta)
+ if err != nil {
+ return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
+ }
+
+ return entry, nil
+}
+
+func (store *MongodbStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error {
+
+ dir, name := fullpath.DirAndName()
+
+ where := bson.M{"directory": dir, "name": name}
+ _, err := store.connect.Database(store.database).Collection(store.collectionName).DeleteOne(ctx, where)
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *MongodbStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error {
+
+ where := bson.M{"directory": fullpath}
+ _, err := store.connect.Database(store.database).Collection(store.collectionName).DeleteMany(ctx, where)
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ return nil
+}
+
+func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
+
+ var where = bson.M{"directory": string(fullpath), "name": bson.M{"$gt": startFileName}}
+ if inclusive {
+ where["name"] = bson.M{
+ "$gte": startFileName,
+ }
+ }
+ optLimit := int64(limit)
+ opts := &options.FindOptions{Limit: &optLimit, Sort: bson.M{"name": 1}}
+ cur, err := store.connect.Database(store.database).Collection(store.collectionName).Find(ctx, where, opts)
+ for cur.Next(ctx) {
+ var data Model
+ err := cur.Decode(&data)
+ if err != nil && err != mongo.ErrNoDocuments {
+ return nil, err
+ }
+
+ entry := &filer2.Entry{
+ FullPath: util.NewFullPath(string(fullpath), data.Name),
+ }
+ if decodeErr := entry.DecodeAttributesAndChunks(data.Meta); decodeErr != nil {
+ err = decodeErr
+ glog.V(0).Infof("list %s : %v", entry.FullPath, err)
+ break
+ }
+
+ entries = append(entries, entry)
+ }
+
+ if err := cur.Close(ctx); err != nil {
+ glog.V(0).Infof("list iterator close: %v", err)
+ }
+
+ return entries, err
+}
+
+func (store *MongodbStore) Shutdown() {
+ ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
+ store.connect.Disconnect(ctx)
+}
diff --git a/weed/filer2/postgres/postgres_store.go b/weed/filer2/postgres/postgres_store.go
index 27a0c2513..51c069aae 100644
--- a/weed/filer2/postgres/postgres_store.go
+++ b/weed/filer2/postgres/postgres_store.go
@@ -11,7 +11,7 @@ import (
)
const (
- CONNECTION_URL_PATTERN = "host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30"
+ CONNECTION_URL_PATTERN = "host=%s port=%d user=%s sslmode=%s connect_timeout=30"
)
func init() {
@@ -49,7 +49,13 @@ func (store *PostgresStore) initialize(user, password, hostname string, port int
store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4"
store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>=$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4"
- sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, user, password, database, sslmode)
+ sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, user, sslmode)
+ if password != "" {
+ sqlUrl += " password=" + password
+ }
+ if database != "" {
+ sqlUrl += " dbname=" + database
+ }
var dbErr error
store.DB, dbErr = sql.Open("postgres", sqlUrl)
if dbErr != nil {
diff --git a/weed/filer2/reader_at.go b/weed/filer2/reader_at.go
new file mode 100644
index 000000000..53fff7672
--- /dev/null
+++ b/weed/filer2/reader_at.go
@@ -0,0 +1,156 @@
+package filer2
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
+)
+
+type ChunkReadAt struct {
+ masterClient *wdclient.MasterClient
+ chunkViews []*ChunkView
+ buffer []byte
+ bufferOffset int64
+ lookupFileId func(fileId string) (targetUrl string, err error)
+ readerLock sync.Mutex
+
+ chunkCache *chunk_cache.ChunkCache
+}
+
+// var _ = io.ReaderAt(&ChunkReadAt{})
+
+type LookupFileIdFunctionType func(fileId string) (targetUrl string, err error)
+
+func LookupFn(filerClient filer_pb.FilerClient) LookupFileIdFunctionType {
+ return func(fileId string) (targetUrl string, err error) {
+ err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ vid := VolumeId(fileId)
+ resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
+ VolumeIds: []string{vid},
+ })
+ if err != nil {
+ return err
+ }
+
+ locations := resp.LocationsMap[vid]
+ if locations == nil || len(locations.Locations) == 0 {
+ glog.V(0).Infof("failed to locate %s", fileId)
+ return fmt.Errorf("failed to locate %s", fileId)
+ }
+
+ volumeServerAddress := filerClient.AdjustedUrl(locations.Locations[0].Url)
+
+ targetUrl = fmt.Sprintf("http://%s/%s", volumeServerAddress, fileId)
+
+ return nil
+ })
+ return
+ }
+}
+
+func NewChunkReaderAtFromClient(filerClient filer_pb.FilerClient, chunkViews []*ChunkView, chunkCache *chunk_cache.ChunkCache) *ChunkReadAt {
+
+ return &ChunkReadAt{
+ chunkViews: chunkViews,
+ lookupFileId: LookupFn(filerClient),
+ bufferOffset: -1,
+ chunkCache: chunkCache,
+ }
+}
+
+func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) {
+
+ c.readerLock.Lock()
+ defer c.readerLock.Unlock()
+
+ for n < len(p) && err == nil {
+ readCount, readErr := c.doReadAt(p[n:], offset+int64(n))
+ n += readCount
+ err = readErr
+ if readCount == 0 {
+ return n, nil
+ }
+ }
+ return
+}
+
+func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
+
+ var found bool
+ for _, chunk := range c.chunkViews {
+ if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) {
+ found = true
+ if c.bufferOffset != chunk.LogicOffset {
+ c.buffer, err = c.fetchChunkData(chunk)
+ c.bufferOffset = chunk.LogicOffset
+ }
+ break
+ }
+ }
+ if !found {
+ return 0, io.EOF
+ }
+
+ n = copy(p, c.buffer[offset-c.bufferOffset:])
+
+ // fmt.Printf("> doReadAt [%d,%d), buffer:[%d,%d)\n", offset, offset+int64(n), c.bufferOffset, c.bufferOffset+int64(len(c.buffer)))
+
+ return
+
+}
+
+func (c *ChunkReadAt) fetchChunkData(chunkView *ChunkView) (data []byte, err error) {
+
+ // fmt.Printf("fetching %s [%d,%d)\n", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
+
+ hasDataInCache := false
+ chunkData := c.chunkCache.GetChunk(chunkView.FileId, chunkView.ChunkSize)
+ if chunkData != nil {
+ glog.V(3).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
+ hasDataInCache = true
+ } else {
+ chunkData, err = c.doFetchFullChunkData(chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if int64(len(chunkData)) < chunkView.Offset+int64(chunkView.Size) {
+ return nil, fmt.Errorf("unexpected larger chunkView [%d,%d) than chunk %d", chunkView.Offset, chunkView.Offset+int64(chunkView.Size), len(chunkData))
+ }
+
+ data = chunkData[chunkView.Offset : chunkView.Offset+int64(chunkView.Size)]
+
+ if !hasDataInCache {
+ c.chunkCache.SetChunk(chunkView.FileId, chunkData)
+ }
+
+ return data, nil
+}
+
+func (c *ChunkReadAt) doFetchFullChunkData(fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) {
+
+ urlString, err := c.lookupFileId(fileId)
+ if err != nil {
+ glog.V(1).Infof("operation LookupFileId %s failed, err: %v", fileId, err)
+ return nil, err
+ }
+ var buffer bytes.Buffer
+ err = util.ReadUrlAsStream(urlString, cipherKey, isGzipped, true, 0, 0, func(data []byte) {
+ buffer.Write(data)
+ })
+ if err != nil {
+ glog.V(1).Infof("read %s failed, err: %v", fileId, err)
+ return nil, err
+ }
+
+ return buffer.Bytes(), nil
+}
diff --git a/weed/filer2/redis/universal_redis_store.go b/weed/filer2/redis/universal_redis_store.go
index 62257e91e..e5b9e8840 100644
--- a/weed/filer2/redis/universal_redis_store.go
+++ b/weed/filer2/redis/universal_redis_store.go
@@ -3,12 +3,16 @@ package redis
import (
"context"
"fmt"
- "github.com/chrislusf/seaweedfs/weed/filer2"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/go-redis/redis"
"sort"
"strings"
"time"
+
+ "github.com/go-redis/redis"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
const (
@@ -58,11 +62,11 @@ func (store *UniversalRedisStore) UpdateEntry(ctx context.Context, entry *filer2
return store.InsertEntry(ctx, entry)
}
-func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
+func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
data, err := store.Client.Get(string(fullpath)).Result()
if err == redis.Nil {
- return nil, filer2.ErrNotFound
+ return nil, filer_pb.ErrNotFound
}
if err != nil {
@@ -80,7 +84,7 @@ func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath filer2
return entry, nil
}
-func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) {
+func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) {
_, err = store.Client.Del(string(fullpath)).Result()
@@ -99,7 +103,7 @@ func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath file
return nil
}
-func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) {
+func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) {
members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result()
if err != nil {
@@ -107,7 +111,7 @@ func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, full
}
for _, fileName := range members {
- path := filer2.NewFullPath(string(fullpath), fileName)
+ path := util.NewFullPath(string(fullpath), fileName)
_, err = store.Client.Del(string(path)).Result()
if err != nil {
return fmt.Errorf("delete %s in parent dir: %v", fullpath, err)
@@ -117,10 +121,11 @@ func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, full
return nil
}
-func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool,
+func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer2.Entry, err error) {
- members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result()
+ dirListKey := genDirectoryListKey(string(fullpath))
+ members, err := store.Client.SMembers(dirListKey).Result()
if err != nil {
return nil, fmt.Errorf("list %s : %v", fullpath, err)
}
@@ -154,11 +159,18 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, full
// fetch entry meta
for _, fileName := range members {
- path := filer2.NewFullPath(string(fullpath), fileName)
+ path := util.NewFullPath(string(fullpath), fileName)
entry, err := store.FindEntry(ctx, path)
if err != nil {
glog.V(0).Infof("list %s : %v", path, err)
} else {
+ if entry.TtlSec > 0 {
+ if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
+ store.Client.Del(string(path)).Result()
+ store.Client.SRem(dirListKey, fileName).Result()
+ continue
+ }
+ }
entries = append(entries, entry)
}
}
@@ -169,3 +181,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, full
func genDirectoryListKey(dir string) (dirList string) {
return dir + DIR_LIST_MARKER
}
+
+func (store *UniversalRedisStore) Shutdown() {
+ store.Client.Close()
+}
diff --git a/weed/filer2/redis2/redis_cluster_store.go b/weed/filer2/redis2/redis_cluster_store.go
new file mode 100644
index 000000000..b252eabab
--- /dev/null
+++ b/weed/filer2/redis2/redis_cluster_store.go
@@ -0,0 +1,42 @@
+package redis2
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/go-redis/redis"
+)
+
+func init() {
+ filer2.Stores = append(filer2.Stores, &RedisCluster2Store{})
+}
+
+type RedisCluster2Store struct {
+ UniversalRedis2Store
+}
+
+func (store *RedisCluster2Store) GetName() string {
+ return "redis_cluster2"
+}
+
+func (store *RedisCluster2Store) Initialize(configuration util.Configuration, prefix string) (err error) {
+
+ configuration.SetDefault(prefix+"useReadOnly", true)
+ configuration.SetDefault(prefix+"routeByLatency", true)
+
+ return store.initialize(
+ configuration.GetStringSlice(prefix+"addresses"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetBool(prefix+"useReadOnly"),
+ configuration.GetBool(prefix+"routeByLatency"),
+ )
+}
+
+func (store *RedisCluster2Store) initialize(addresses []string, password string, readOnly, routeByLatency bool) (err error) {
+ store.Client = redis.NewClusterClient(&redis.ClusterOptions{
+ Addrs: addresses,
+ Password: password,
+ ReadOnly: readOnly,
+ RouteByLatency: routeByLatency,
+ })
+ return
+}
diff --git a/weed/filer2/redis2/redis_store.go b/weed/filer2/redis2/redis_store.go
new file mode 100644
index 000000000..1e2a20043
--- /dev/null
+++ b/weed/filer2/redis2/redis_store.go
@@ -0,0 +1,36 @@
+package redis2
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/go-redis/redis"
+)
+
+func init() {
+ filer2.Stores = append(filer2.Stores, &Redis2Store{})
+}
+
+type Redis2Store struct {
+ UniversalRedis2Store
+}
+
+func (store *Redis2Store) GetName() string {
+ return "redis2"
+}
+
+func (store *Redis2Store) Initialize(configuration util.Configuration, prefix string) (err error) {
+ return store.initialize(
+ configuration.GetString(prefix+"address"),
+ configuration.GetString(prefix+"password"),
+ configuration.GetInt(prefix+"database"),
+ )
+}
+
+func (store *Redis2Store) initialize(hostPort string, password string, database int) (err error) {
+ store.Client = redis.NewClient(&redis.Options{
+ Addr: hostPort,
+ Password: password,
+ DB: database,
+ })
+ return
+}
diff --git a/weed/filer2/redis2/universal_redis_store.go b/weed/filer2/redis2/universal_redis_store.go
new file mode 100644
index 000000000..420336b46
--- /dev/null
+++ b/weed/filer2/redis2/universal_redis_store.go
@@ -0,0 +1,162 @@
+package redis2
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/go-redis/redis"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+const (
+ DIR_LIST_MARKER = "\x00"
+)
+
+type UniversalRedis2Store struct {
+ Client redis.UniversalClient
+}
+
+func (store *UniversalRedis2Store) BeginTransaction(ctx context.Context) (context.Context, error) {
+ return ctx, nil
+}
+func (store *UniversalRedis2Store) CommitTransaction(ctx context.Context) error {
+ return nil
+}
+func (store *UniversalRedis2Store) RollbackTransaction(ctx context.Context) error {
+ return nil
+}
+
+func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+
+ value, err := entry.EncodeAttributesAndChunks()
+ if err != nil {
+ return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
+ }
+
+ if err = store.Client.Set(string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Err(); err != nil {
+ return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
+ }
+
+ dir, name := entry.FullPath.DirAndName()
+ if name != "" {
+ if err = store.Client.ZAddNX(genDirectoryListKey(dir), redis.Z{Score: 0, Member: name}).Err(); err != nil {
+ return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err)
+ }
+ }
+
+ return nil
+}
+
+func (store *UniversalRedis2Store) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
+
+ return store.InsertEntry(ctx, entry)
+}
+
+func (store *UniversalRedis2Store) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
+
+ data, err := store.Client.Get(string(fullpath)).Result()
+ if err == redis.Nil {
+ return nil, filer_pb.ErrNotFound
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("get %s : %v", fullpath, err)
+ }
+
+ entry = &filer2.Entry{
+ FullPath: fullpath,
+ }
+ err = entry.DecodeAttributesAndChunks([]byte(data))
+ if err != nil {
+ return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
+ }
+
+ return entry, nil
+}
+
+func (store *UniversalRedis2Store) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) {
+
+ _, err = store.Client.Del(string(fullpath)).Result()
+
+ if err != nil {
+ return fmt.Errorf("delete %s : %v", fullpath, err)
+ }
+
+ dir, name := fullpath.DirAndName()
+ if name != "" {
+ _, err = store.Client.ZRem(genDirectoryListKey(dir), name).Result()
+ if err != nil {
+ return fmt.Errorf("delete %s in parent dir: %v", fullpath, err)
+ }
+ }
+
+ return nil
+}
+
+func (store *UniversalRedis2Store) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) {
+
+ members, err := store.Client.ZRange(genDirectoryListKey(string(fullpath)), 0, -1).Result()
+ if err != nil {
+ return fmt.Errorf("delete folder %s : %v", fullpath, err)
+ }
+
+ for _, fileName := range members {
+ path := util.NewFullPath(string(fullpath), fileName)
+ _, err = store.Client.Del(string(path)).Result()
+ if err != nil {
+ return fmt.Errorf("delete %s in parent dir: %v", fullpath, err)
+ }
+ }
+
+ return nil
+}
+
+func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
+ limit int) (entries []*filer2.Entry, err error) {
+
+ dirListKey := genDirectoryListKey(string(fullpath))
+ start := int64(0)
+ if startFileName != "" {
+ start, _ = store.Client.ZRank(dirListKey, startFileName).Result()
+ if !inclusive {
+ start++
+ }
+ }
+ members, err := store.Client.ZRange(dirListKey, start, start+int64(limit)-1).Result()
+ if err != nil {
+ return nil, fmt.Errorf("list %s : %v", fullpath, err)
+ }
+
+ // fetch entry meta
+ for _, fileName := range members {
+ path := util.NewFullPath(string(fullpath), fileName)
+ entry, err := store.FindEntry(ctx, path)
+ if err != nil {
+ glog.V(0).Infof("list %s : %v", path, err)
+ } else {
+ if entry.TtlSec > 0 {
+ if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
+ store.Client.Del(string(path)).Result()
+ store.Client.ZRem(dirListKey, fileName).Result()
+ continue
+ }
+ }
+ entries = append(entries, entry)
+ }
+ }
+
+ return entries, err
+}
+
+func genDirectoryListKey(dir string) (dirList string) {
+ return dir + DIR_LIST_MARKER
+}
+
+func (store *UniversalRedis2Store) Shutdown() {
+ store.Client.Close()
+}
diff --git a/weed/filer2/stream.go b/weed/filer2/stream.go
index 01b87cad1..033a8dd13 100644
--- a/weed/filer2/stream.go
+++ b/weed/filer2/stream.go
@@ -1,7 +1,10 @@
package filer2
import (
+ "bytes"
"io"
+ "math"
+ "strings"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -9,7 +12,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/wdclient"
)
-func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int) error {
+func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error {
chunkViews := ViewFromChunks(chunks, offset, size)
@@ -26,8 +29,9 @@ func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*f
}
for _, chunkView := range chunkViews {
+
urlString := fileId2Url[chunkView.FileId]
- _, err := util.ReadUrlAsStream(urlString, chunkView.Offset, int(chunkView.Size), func(data []byte) {
+ err := util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
w.Write(data)
})
if err != nil {
@@ -39,3 +43,157 @@ func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*f
return nil
}
+
+// ---------------- ReadAllReader ----------------------------------
+
+func ReadAll(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) ([]byte, error) {
+
+ buffer := bytes.Buffer{}
+
+ chunkViews := ViewFromChunks(chunks, 0, math.MaxInt32)
+
+ lookupFileId := func(fileId string) (targetUrl string, err error) {
+ return masterClient.LookupFileId(fileId)
+ }
+
+ for _, chunkView := range chunkViews {
+ urlString, err := lookupFileId(chunkView.FileId)
+ if err != nil {
+ glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
+ return nil, err
+ }
+ err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
+ buffer.Write(data)
+ })
+ if err != nil {
+ glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
+ return nil, err
+ }
+ }
+ return buffer.Bytes(), nil
+}
+
+// ---------------- ChunkStreamReader ----------------------------------
+type ChunkStreamReader struct {
+ chunkViews []*ChunkView
+ logicOffset int64
+ buffer []byte
+ bufferOffset int64
+ bufferPos int
+ chunkIndex int
+ lookupFileId LookupFileIdFunctionType
+}
+
+var _ = io.ReadSeeker(&ChunkStreamReader{})
+
+func NewChunkStreamReaderFromFiler(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader {
+
+ chunkViews := ViewFromChunks(chunks, 0, math.MaxInt32)
+
+ return &ChunkStreamReader{
+ chunkViews: chunkViews,
+ lookupFileId: func(fileId string) (targetUrl string, err error) {
+ return masterClient.LookupFileId(fileId)
+ },
+ }
+}
+
+func NewChunkStreamReader(filerClient filer_pb.FilerClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader {
+
+ chunkViews := ViewFromChunks(chunks, 0, math.MaxInt32)
+
+ return &ChunkStreamReader{
+ chunkViews: chunkViews,
+ lookupFileId: LookupFn(filerClient),
+ }
+}
+
+func (c *ChunkStreamReader) Read(p []byte) (n int, err error) {
+ for n < len(p) {
+ if c.isBufferEmpty() {
+ if c.chunkIndex >= len(c.chunkViews) {
+ return n, io.EOF
+ }
+ chunkView := c.chunkViews[c.chunkIndex]
+ c.fetchChunkToBuffer(chunkView)
+ c.chunkIndex++
+ }
+ t := copy(p[n:], c.buffer[c.bufferPos:])
+ c.bufferPos += t
+ n += t
+ }
+ return
+}
+
+func (c *ChunkStreamReader) isBufferEmpty() bool {
+ return len(c.buffer) <= c.bufferPos
+}
+
+func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) {
+
+ var totalSize int64
+ for _, chunk := range c.chunkViews {
+ totalSize += int64(chunk.Size)
+ }
+
+ var err error
+ switch whence {
+ case io.SeekStart:
+ case io.SeekCurrent:
+ offset += c.bufferOffset + int64(c.bufferPos)
+ case io.SeekEnd:
+ offset = totalSize + offset
+ }
+ if offset > totalSize {
+ err = io.ErrUnexpectedEOF
+ }
+
+ for i, chunk := range c.chunkViews {
+ if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) {
+ if c.isBufferEmpty() || c.bufferOffset != chunk.LogicOffset {
+ c.fetchChunkToBuffer(chunk)
+ c.chunkIndex = i + 1
+ break
+ }
+ }
+ }
+ c.bufferPos = int(offset - c.bufferOffset)
+
+ return offset, err
+
+}
+
+func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
+ urlString, err := c.lookupFileId(chunkView.FileId)
+ if err != nil {
+ glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
+ return err
+ }
+ var buffer bytes.Buffer
+ err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
+ buffer.Write(data)
+ })
+ if err != nil {
+ glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
+ return err
+ }
+ c.buffer = buffer.Bytes()
+ c.bufferPos = 0
+ c.bufferOffset = chunkView.LogicOffset
+
+ // glog.V(0).Infof("read %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
+
+ return nil
+}
+
+func (c *ChunkStreamReader) Close() {
+ // TODO try to release and reuse buffer
+}
+
+func VolumeId(fileId string) string {
+ lastCommaIndex := strings.LastIndex(fileId, ",")
+ if lastCommaIndex > 0 {
+ return fileId[:lastCommaIndex]
+ }
+ return fileId
+}
diff --git a/weed/filer2/tikv/tikv_store.go b/weed/filer2/tikv/tikv_store.go
deleted file mode 100644
index 24e05e3ad..000000000
--- a/weed/filer2/tikv/tikv_store.go
+++ /dev/null
@@ -1,251 +0,0 @@
-// +build !386
-// +build !arm
-
-package tikv
-
-import (
- "bytes"
- "context"
- "crypto/md5"
- "fmt"
- "io"
-
- "github.com/chrislusf/seaweedfs/weed/filer2"
- "github.com/chrislusf/seaweedfs/weed/glog"
- weed_util "github.com/chrislusf/seaweedfs/weed/util"
-
- "github.com/pingcap/tidb/kv"
- "github.com/pingcap/tidb/store/tikv"
-)
-
-func init() {
- filer2.Stores = append(filer2.Stores, &TikvStore{})
-}
-
-type TikvStore struct {
- store kv.Storage
-}
-
-func (store *TikvStore) GetName() string {
- return "tikv"
-}
-
-func (store *TikvStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) {
- pdAddr := configuration.GetString(prefix + "pdAddress")
- return store.initialize(pdAddr)
-}
-
-func (store *TikvStore) initialize(pdAddr string) (err error) {
- glog.Infof("filer store tikv pd address: %s", pdAddr)
-
- driver := tikv.Driver{}
-
- store.store, err = driver.Open(fmt.Sprintf("tikv://%s", pdAddr))
-
- if err != nil {
- return fmt.Errorf("open tikv %s : %v", pdAddr, err)
- }
-
- return
-}
-
-func (store *TikvStore) BeginTransaction(ctx context.Context) (context.Context, error) {
- tx, err := store.store.Begin()
- if err != nil {
- return ctx, err
- }
- return context.WithValue(ctx, "tx", tx), nil
-}
-func (store *TikvStore) CommitTransaction(ctx context.Context) error {
- tx, ok := ctx.Value("tx").(kv.Transaction)
- if ok {
- return tx.Commit(ctx)
- }
- return nil
-}
-func (store *TikvStore) RollbackTransaction(ctx context.Context) error {
- tx, ok := ctx.Value("tx").(kv.Transaction)
- if ok {
- return tx.Rollback()
- }
- return nil
-}
-
-func (store *TikvStore) getTx(ctx context.Context) kv.Transaction {
- if tx, ok := ctx.Value("tx").(kv.Transaction); ok {
- return tx
- }
- return nil
-}
-
-func (store *TikvStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
- dir, name := entry.DirAndName()
- key := genKey(dir, name)
-
- value, err := entry.EncodeAttributesAndChunks()
- if err != nil {
- return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
- }
-
- err = store.getTx(ctx).Set(key, value)
-
- if err != nil {
- return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
- }
-
- // println("saved", entry.FullPath, "chunks", len(entry.Chunks))
-
- return nil
-}
-
-func (store *TikvStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
-
- return store.InsertEntry(ctx, entry)
-}
-
-func (store *TikvStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
- dir, name := fullpath.DirAndName()
- key := genKey(dir, name)
-
- data, err := store.getTx(ctx).Get(ctx, key)
-
- if err == kv.ErrNotExist {
- return nil, filer2.ErrNotFound
- }
- if err != nil {
- return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
- }
-
- entry = &filer2.Entry{
- FullPath: fullpath,
- }
- err = entry.DecodeAttributesAndChunks(data)
- if err != nil {
- return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
- }
-
- // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
-
- return entry, nil
-}
-
-func (store *TikvStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) {
- dir, name := fullpath.DirAndName()
- key := genKey(dir, name)
-
- err = store.getTx(ctx).Delete(key)
- if err != nil {
- return fmt.Errorf("delete %s : %v", fullpath, err)
- }
-
- return nil
-}
-
-func (store *TikvStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) {
-
- directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
-
- tx := store.getTx(ctx)
-
- iter, err := tx.Iter(directoryPrefix, nil)
- if err != nil {
- return fmt.Errorf("deleteFolderChildren %s: %v", fullpath, err)
- }
- defer iter.Close()
- for iter.Valid() {
- key := iter.Key()
- if !bytes.HasPrefix(key, directoryPrefix) {
- break
- }
- fileName := getNameFromKey(key)
- if fileName == "" {
- iter.Next()
- continue
- }
-
- if err = tx.Delete(genKey(string(fullpath), fileName)); err != nil {
- return fmt.Errorf("delete %s : %v", fullpath, err)
- }
-
- iter.Next()
- }
-
- return nil
-}
-
-func (store *TikvStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool,
- limit int) (entries []*filer2.Entry, err error) {
-
- directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
- lastFileStart := genDirectoryKeyPrefix(fullpath, startFileName)
-
- iter, err := store.getTx(ctx).Iter(lastFileStart, nil)
- if err != nil {
- return nil, fmt.Errorf("list %s: %v", fullpath, err)
- }
- defer iter.Close()
- for iter.Valid() {
- key := iter.Key()
- if !bytes.HasPrefix(key, directoryPrefix) {
- break
- }
- fileName := getNameFromKey(key)
- if fileName == "" {
- iter.Next()
- continue
- }
- if fileName == startFileName && !inclusive {
- iter.Next()
- continue
- }
- limit--
- if limit < 0 {
- break
- }
- entry := &filer2.Entry{
- FullPath: filer2.NewFullPath(string(fullpath), fileName),
- }
-
- // println("list", entry.FullPath, "chunks", len(entry.Chunks))
-
- if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil {
- err = decodeErr
- glog.V(0).Infof("list %s : %v", entry.FullPath, err)
- break
- }
- entries = append(entries, entry)
- iter.Next()
- }
-
- return entries, err
-}
-
-func genKey(dirPath, fileName string) (key []byte) {
- key = hashToBytes(dirPath)
- key = append(key, []byte(fileName)...)
- return key
-}
-
-func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string) (keyPrefix []byte) {
- keyPrefix = hashToBytes(string(fullpath))
- if len(startFileName) > 0 {
- keyPrefix = append(keyPrefix, []byte(startFileName)...)
- }
- return keyPrefix
-}
-
-func getNameFromKey(key []byte) string {
-
- return string(key[md5.Size:])
-
-}
-
-// hash directory
-func hashToBytes(dir string) []byte {
- h := md5.New()
- io.WriteString(h, dir)
-
- b := h.Sum(nil)
-
- return b
-}
diff --git a/weed/filer2/tikv/tikv_store_unsupported.go b/weed/filer2/tikv/tikv_store_unsupported.go
deleted file mode 100644
index daf29612e..000000000
--- a/weed/filer2/tikv/tikv_store_unsupported.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// +build 386 arm
-
-package tikv
-
-import (
- "context"
- "fmt"
-
- "github.com/chrislusf/seaweedfs/weed/filer2"
- weed_util "github.com/chrislusf/seaweedfs/weed/util"
-)
-
-func init() {
- filer2.Stores = append(filer2.Stores, &TikvStore{})
-}
-
-type TikvStore struct {
-}
-
-func (store *TikvStore) GetName() string {
- return "tikv"
-}
-
-func (store *TikvStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) {
- return fmt.Errorf("not implemented for 32 bit computers")
-}
-
-func (store *TikvStore) initialize(pdAddr string) (err error) {
- return fmt.Errorf("not implemented for 32 bit computers")
-}
-
-func (store *TikvStore) BeginTransaction(ctx context.Context) (context.Context, error) {
- return nil, fmt.Errorf("not implemented for 32 bit computers")
-}
-func (store *TikvStore) CommitTransaction(ctx context.Context) error {
- return fmt.Errorf("not implemented for 32 bit computers")
-}
-func (store *TikvStore) RollbackTransaction(ctx context.Context) error {
- return fmt.Errorf("not implemented for 32 bit computers")
-}
-
-func (store *TikvStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
- return fmt.Errorf("not implemented for 32 bit computers")
-}
-
-func (store *TikvStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
- return fmt.Errorf("not implemented for 32 bit computers")
-}
-
-func (store *TikvStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
- return nil, fmt.Errorf("not implemented for 32 bit computers")
-}
-
-func (store *TikvStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) {
- return fmt.Errorf("not implemented for 32 bit computers")
-}
-
-func (store *TikvStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) {
- return fmt.Errorf("not implemented for 32 bit computers")
-}
-
-func (store *TikvStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool,
- limit int) (entries []*filer2.Entry, err error) {
- return nil, fmt.Errorf("not implemented for 32 bit computers")
-}
diff --git a/weed/filer2/topics.go b/weed/filer2/topics.go
new file mode 100644
index 000000000..9c6e5c88d
--- /dev/null
+++ b/weed/filer2/topics.go
@@ -0,0 +1,6 @@
+package filer2
+
+const (
+ TopicsDir = "/topics"
+ SystemLogDir = TopicsDir + "/.system/log"
+)
diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go
index abe5a21a6..e4260d56f 100644
--- a/weed/filesys/dir.go
+++ b/weed/filesys/dir.go
@@ -1,6 +1,7 @@
package filesys
import (
+ "bytes"
"context"
"os"
"strings"
@@ -9,14 +10,16 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
)
type Dir struct {
- Path string
- wfs *WFS
- entry *filer_pb.Entry
+ name string
+ wfs *WFS
+ entry *filer_pb.Entry
+ parent *Dir
}
var _ = fs.Node(&Dir{})
@@ -35,39 +38,37 @@ var _ = fs.NodeForgetter(&Dir{})
func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
- glog.V(3).Infof("dir Attr %s, existing attr: %+v", dir.Path, attr)
-
// https://github.com/bazil/fuse/issues/196
attr.Valid = time.Second
- if dir.Path == dir.wfs.option.FilerMountRootPath {
+ if dir.FullPath() == dir.wfs.option.FilerMountRootPath {
dir.setRootDirAttributes(attr)
- glog.V(3).Infof("root dir Attr %s, attr: %+v", dir.Path, attr)
+ glog.V(3).Infof("root dir Attr %s, attr: %+v", dir.FullPath(), attr)
return nil
}
- if err := dir.maybeLoadEntry(ctx); err != nil {
- glog.V(3).Infof("dir Attr %s,err: %+v", dir.Path, err)
+ if err := dir.maybeLoadEntry(); err != nil {
+ glog.V(3).Infof("dir Attr %s,err: %+v", dir.FullPath(), err)
return err
}
- attr.Inode = filer2.FullPath(dir.Path).AsInode()
+ attr.Inode = util.FullPath(dir.FullPath()).AsInode()
attr.Mode = os.FileMode(dir.entry.Attributes.FileMode) | os.ModeDir
attr.Mtime = time.Unix(dir.entry.Attributes.Mtime, 0)
- attr.Ctime = time.Unix(dir.entry.Attributes.Crtime, 0)
+ attr.Crtime = time.Unix(dir.entry.Attributes.Crtime, 0)
attr.Gid = dir.entry.Attributes.Gid
attr.Uid = dir.entry.Attributes.Uid
- glog.V(3).Infof("dir Attr %s, attr: %+v", dir.Path, attr)
+ glog.V(4).Infof("dir Attr %s, attr: %+v", dir.FullPath(), attr)
return nil
}
func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
- glog.V(4).Infof("dir Getxattr %s", dir.Path)
+ glog.V(4).Infof("dir Getxattr %s", dir.FullPath())
- if err := dir.maybeLoadEntry(ctx); err != nil {
+ if err := dir.maybeLoadEntry(); err != nil {
return err
}
@@ -88,7 +89,7 @@ func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) {
}
func (dir *Dir) newFile(name string, entry *filer_pb.Entry) fs.Node {
- return dir.wfs.getNode(filer2.NewFullPath(dir.Path, name), func() fs.Node {
+ return dir.wfs.fsNodeCache.EnsureFsNode(util.NewFullPath(dir.FullPath(), name), func() fs.Node {
return &File{
Name: name,
dir: dir,
@@ -99,17 +100,19 @@ func (dir *Dir) newFile(name string, entry *filer_pb.Entry) fs.Node {
})
}
-func (dir *Dir) newDirectory(fullpath filer2.FullPath, entry *filer_pb.Entry) fs.Node {
- return dir.wfs.getNode(fullpath, func() fs.Node {
- return &Dir{Path: string(fullpath), wfs: dir.wfs, entry: entry}
+func (dir *Dir) newDirectory(fullpath util.FullPath, entry *filer_pb.Entry) fs.Node {
+
+ return dir.wfs.fsNodeCache.EnsureFsNode(fullpath, func() fs.Node {
+ return &Dir{name: entry.Name, wfs: dir.wfs, entry: entry, parent: dir}
})
+
}
func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
request := &filer_pb.CreateEntryRequest{
- Directory: dir.Path,
+ Directory: dir.FullPath(),
Entry: &filer_pb.Entry{
Name: req.Name,
IsDirectory: req.Mode&os.ModeDir > 0,
@@ -126,22 +129,27 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
},
OExcl: req.Flags&fuse.OpenExclusive != 0,
}
- glog.V(1).Infof("create: %v", req.String())
+ glog.V(1).Infof("create %s/%s: %v", dir.FullPath(), req.Name, req.Flags)
- if err := dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
- if err := filer_pb.CreateEntry(ctx, client, request); err != nil {
+ if err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ if err := filer_pb.CreateEntry(client, request); err != nil {
if strings.Contains(err.Error(), "EEXIST") {
return fuse.EEXIST
}
return fuse.EIO
}
+
+ if dir.wfs.option.AsyncMetaDataCaching {
+ dir.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
+ }
+
return nil
}); err != nil {
return nil, nil, err
}
var node fs.Node
if request.Entry.IsDirectory {
- node = dir.newDirectory(filer2.NewFullPath(dir.Path, req.Name), request.Entry)
+ node = dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name), request.Entry)
return node, nil, nil
}
@@ -155,6 +163,8 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
+ glog.V(4).Infof("mkdir %s: %s", dir.FullPath(), req.Name)
+
newEntry := &filer_pb.Entry{
Name: req.Name,
IsDirectory: true,
@@ -167,40 +177,55 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err
},
}
- err := dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+ err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{
- Directory: dir.Path,
+ Directory: dir.FullPath(),
Entry: newEntry,
}
glog.V(1).Infof("mkdir: %v", request)
- if err := filer_pb.CreateEntry(ctx, client, request); err != nil {
- glog.V(0).Infof("mkdir %s/%s: %v", dir.Path, req.Name, err)
+ if err := filer_pb.CreateEntry(client, request); err != nil {
+ glog.V(0).Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
return err
}
+ if dir.wfs.option.AsyncMetaDataCaching {
+ dir.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
+ }
+
return nil
})
if err == nil {
- node := dir.newDirectory(filer2.NewFullPath(dir.Path, req.Name), newEntry)
+ node := dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name), newEntry)
+
return node, nil
}
+ glog.V(0).Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
+
return nil, fuse.EIO
}
func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fs.Node, err error) {
- glog.V(4).Infof("dir Lookup %s: %s", dir.Path, req.Name)
+ glog.V(4).Infof("dir Lookup %s: %s by %s", dir.FullPath(), req.Name, req.Header.String())
- fullFilePath := filer2.NewFullPath(dir.Path, req.Name)
+ fullFilePath := util.NewFullPath(dir.FullPath(), req.Name)
entry := dir.wfs.cacheGet(fullFilePath)
+ if dir.wfs.option.AsyncMetaDataCaching {
+ cachedEntry, cacheErr := dir.wfs.metaCache.FindEntry(context.Background(), fullFilePath)
+ if cacheErr == filer_pb.ErrNotFound {
+ return nil, fuse.ENOENT
+ }
+ entry = cachedEntry.ToProtoEntry()
+ }
+
if entry == nil {
// glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath)
- entry, err = filer2.GetEntry(ctx, dir.wfs, fullFilePath)
+ entry, err = filer_pb.GetEntry(dir.wfs, fullFilePath)
if err != nil {
glog.V(1).Infof("dir GetEntry %s: %v", fullFilePath, err)
return nil, fuse.ENOENT
@@ -221,7 +246,7 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
resp.Attr.Inode = fullFilePath.AsInode()
resp.Attr.Valid = time.Second
resp.Attr.Mtime = time.Unix(entry.Attributes.Mtime, 0)
- resp.Attr.Ctime = time.Unix(entry.Attributes.Crtime, 0)
+ resp.Attr.Crtime = time.Unix(entry.Attributes.Crtime, 0)
resp.Attr.Mode = os.FileMode(entry.Attributes.FileMode)
resp.Attr.Gid = entry.Attributes.Gid
resp.Attr.Uid = entry.Attributes.Uid
@@ -229,18 +254,17 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
return node, nil
}
- glog.V(1).Infof("not found dir GetEntry %s: %v", fullFilePath, err)
+ glog.V(4).Infof("not found dir GetEntry %s: %v", fullFilePath, err)
return nil, fuse.ENOENT
}
func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
- glog.V(3).Infof("dir ReadDirAll %s", dir.Path)
+ glog.V(3).Infof("dir ReadDirAll %s", dir.FullPath())
cacheTtl := 5 * time.Minute
-
- readErr := filer2.ReadDirAllEntries(ctx, dir.wfs, filer2.FullPath(dir.Path), "", func(entry *filer_pb.Entry, isLast bool) {
- fullpath := filer2.NewFullPath(dir.Path, entry.Name)
+ processEachEntryFn := func(entry *filer_pb.Entry, isLast bool) error {
+ fullpath := util.NewFullPath(dir.FullPath(), entry.Name)
inode := fullpath.AsInode()
if entry.IsDirectory {
dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: fuse.DT_Dir}
@@ -250,9 +274,24 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
ret = append(ret, dirent)
}
dir.wfs.cacheSet(fullpath, entry, cacheTtl)
- })
+ return nil
+ }
+
+ if dir.wfs.option.AsyncMetaDataCaching {
+ listedEntries, listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), util.FullPath(dir.FullPath()), "", false, int(dir.wfs.option.DirListCacheLimit))
+ if listErr != nil {
+ glog.Errorf("list meta cache: %v", listErr)
+ return nil, fuse.EIO
+ }
+ for _, cachedEntry := range listedEntries {
+ processEachEntryFn(cachedEntry.ToProtoEntry(), false)
+ }
+ return
+ }
+
+ readErr := filer_pb.ReadDirAllEntries(dir.wfs, util.FullPath(dir.FullPath()), "", processEachEntryFn)
if readErr != nil {
- glog.V(0).Infof("list %s: %v", dir.Path, err)
+ glog.V(0).Infof("list %s: %v", dir.FullPath(), err)
return ret, fuse.EIO
}
@@ -262,74 +301,70 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
if !req.Dir {
- return dir.removeOneFile(ctx, req)
+ return dir.removeOneFile(req)
}
- return dir.removeFolder(ctx, req)
+ return dir.removeFolder(req)
}
-func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) error {
+func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error {
- filePath := filer2.NewFullPath(dir.Path, req.Name)
- entry, err := filer2.GetEntry(ctx, dir.wfs, filePath)
+ filePath := util.NewFullPath(dir.FullPath(), req.Name)
+ entry, err := filer_pb.GetEntry(dir.wfs, filePath)
if err != nil {
return err
}
+ if entry == nil {
+ return nil
+ }
- dir.wfs.deleteFileChunks(ctx, entry.Chunks)
+ dir.wfs.deleteFileChunks(entry.Chunks)
dir.wfs.cacheDelete(filePath)
+ dir.wfs.fsNodeCache.DeleteFsNode(filePath)
- return dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+ if dir.wfs.option.AsyncMetaDataCaching {
+ dir.wfs.metaCache.DeleteEntry(context.Background(), filePath)
+ }
- request := &filer_pb.DeleteEntryRequest{
- Directory: dir.Path,
- Name: req.Name,
- IsDeleteData: false,
- }
+ glog.V(3).Infof("remove file: %v", req)
+ err = filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, false, false, false)
+ if err != nil {
+ glog.V(3).Infof("not found remove file %s/%s: %v", dir.FullPath(), req.Name, err)
+ return fuse.ENOENT
+ }
- glog.V(3).Infof("remove file: %v", request)
- _, err := client.DeleteEntry(ctx, request)
- if err != nil {
- glog.V(3).Infof("not found remove file %s/%s: %v", dir.Path, req.Name, err)
- return fuse.ENOENT
- }
-
- return nil
- })
+ return nil
}
-func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error {
+func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error {
- dir.wfs.cacheDelete(filer2.NewFullPath(dir.Path, req.Name))
+ t := util.NewFullPath(dir.FullPath(), req.Name)
+ dir.wfs.cacheDelete(t)
+ dir.wfs.fsNodeCache.DeleteFsNode(t)
- return dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+ if dir.wfs.option.AsyncMetaDataCaching {
+ dir.wfs.metaCache.DeleteEntry(context.Background(), t)
+ }
- request := &filer_pb.DeleteEntryRequest{
- Directory: dir.Path,
- Name: req.Name,
- IsDeleteData: true,
- }
+ glog.V(3).Infof("remove directory entry: %v", req)
+ err := filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, true, false, false)
+ if err != nil {
+ glog.V(3).Infof("not found remove %s/%s: %v", dir.FullPath(), req.Name, err)
+ return fuse.ENOENT
+ }
- glog.V(3).Infof("remove directory entry: %v", request)
- _, err := client.DeleteEntry(ctx, request)
- if err != nil {
- glog.V(3).Infof("not found remove %s/%s: %v", dir.Path, req.Name, err)
- return fuse.ENOENT
- }
-
- return nil
- })
+ return nil
}
func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
- glog.V(3).Infof("%v dir setattr %+v", dir.Path, req)
+ glog.V(3).Infof("%v dir setattr %+v", dir.FullPath(), req)
- if err := dir.maybeLoadEntry(ctx); err != nil {
+ if err := dir.maybeLoadEntry(); err != nil {
return err
}
@@ -349,17 +384,17 @@ func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fus
dir.entry.Attributes.Mtime = req.Mtime.Unix()
}
- dir.wfs.cacheDelete(filer2.FullPath(dir.Path))
+ dir.wfs.cacheDelete(util.FullPath(dir.FullPath()))
- return dir.saveEntry(ctx)
+ return dir.saveEntry()
}
func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
- glog.V(4).Infof("dir Setxattr %s: %s", dir.Path, req.Name)
+ glog.V(4).Infof("dir Setxattr %s: %s", dir.FullPath(), req.Name)
- if err := dir.maybeLoadEntry(ctx); err != nil {
+ if err := dir.maybeLoadEntry(); err != nil {
return err
}
@@ -367,17 +402,17 @@ func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
return err
}
- dir.wfs.cacheDelete(filer2.FullPath(dir.Path))
+ dir.wfs.cacheDelete(util.FullPath(dir.FullPath()))
- return dir.saveEntry(ctx)
+ return dir.saveEntry()
}
func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error {
- glog.V(4).Infof("dir Removexattr %s: %s", dir.Path, req.Name)
+ glog.V(4).Infof("dir Removexattr %s: %s", dir.FullPath(), req.Name)
- if err := dir.maybeLoadEntry(ctx); err != nil {
+ if err := dir.maybeLoadEntry(); err != nil {
return err
}
@@ -385,17 +420,17 @@ func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) e
return err
}
- dir.wfs.cacheDelete(filer2.FullPath(dir.Path))
+ dir.wfs.cacheDelete(util.FullPath(dir.FullPath()))
- return dir.saveEntry(ctx)
+ return dir.saveEntry()
}
func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
- glog.V(4).Infof("dir Listxattr %s", dir.Path)
+ glog.V(4).Infof("dir Listxattr %s", dir.FullPath())
- if err := dir.maybeLoadEntry(ctx); err != nil {
+ if err := dir.maybeLoadEntry(); err != nil {
return err
}
@@ -408,15 +443,15 @@ func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp
}
func (dir *Dir) Forget() {
- glog.V(3).Infof("Forget dir %s", dir.Path)
+ glog.V(3).Infof("Forget dir %s", dir.FullPath())
- dir.wfs.forgetNode(filer2.FullPath(dir.Path))
+ dir.wfs.fsNodeCache.DeleteFsNode(util.FullPath(dir.FullPath()))
}
-func (dir *Dir) maybeLoadEntry(ctx context.Context) error {
+func (dir *Dir) maybeLoadEntry() error {
if dir.entry == nil {
- parentDirPath, name := filer2.FullPath(dir.Path).DirAndName()
- entry, err := dir.wfs.maybeLoadEntry(ctx, parentDirPath, name)
+ parentDirPath, name := util.FullPath(dir.FullPath()).DirAndName()
+ entry, err := dir.wfs.maybeLoadEntry(parentDirPath, name)
if err != nil {
return err
}
@@ -425,11 +460,11 @@ func (dir *Dir) maybeLoadEntry(ctx context.Context) error {
return nil
}
-func (dir *Dir) saveEntry(ctx context.Context) error {
+func (dir *Dir) saveEntry() error {
- parentDir, name := filer2.FullPath(dir.Path).DirAndName()
+ parentDir, name := util.FullPath(dir.FullPath()).DirAndName()
- return dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+ return dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.UpdateEntryRequest{
Directory: parentDir,
@@ -437,12 +472,40 @@ func (dir *Dir) saveEntry(ctx context.Context) error {
}
glog.V(1).Infof("save dir entry: %v", request)
- _, err := client.UpdateEntry(ctx, request)
+ _, err := client.UpdateEntry(context.Background(), request)
if err != nil {
glog.V(0).Infof("UpdateEntry dir %s/%s: %v", parentDir, name, err)
return fuse.EIO
}
+ if dir.wfs.option.AsyncMetaDataCaching {
+ dir.wfs.metaCache.UpdateEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
+ }
+
return nil
})
}
+
+func (dir *Dir) FullPath() string {
+ var parts []string
+ for p := dir; p != nil; p = p.parent {
+ if strings.HasPrefix(p.name, "/") {
+ if len(p.name) > 1 {
+ parts = append(parts, p.name[1:])
+ }
+ } else {
+ parts = append(parts, p.name)
+ }
+ }
+
+ if len(parts) == 0 {
+ return "/"
+ }
+
+ var buf bytes.Buffer
+ for i := len(parts) - 1; i >= 0; i-- {
+ buf.WriteString("/")
+ buf.WriteString(parts[i])
+ }
+ return buf.String()
+}
diff --git a/weed/filesys/dir_link.go b/weed/filesys/dir_link.go
index 8b7ec7e89..d1858e99b 100644
--- a/weed/filesys/dir_link.go
+++ b/weed/filesys/dir_link.go
@@ -6,6 +6,7 @@ import (
"syscall"
"time"
+ "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/fuse"
@@ -17,10 +18,10 @@ var _ = fs.NodeReadlinker(&File{})
func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, error) {
- glog.V(3).Infof("Symlink: %v/%v to %v", dir.Path, req.NewName, req.Target)
+ glog.V(3).Infof("Symlink: %v/%v to %v", dir.FullPath(), req.NewName, req.Target)
request := &filer_pb.CreateEntryRequest{
- Directory: dir.Path,
+ Directory: dir.FullPath(),
Entry: &filer_pb.Entry{
Name: req.NewName,
IsDirectory: false,
@@ -35,11 +36,16 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node,
},
}
- err := dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
- if err := filer_pb.CreateEntry(ctx, client, request); err != nil {
- glog.V(0).Infof("symlink %s/%s: %v", dir.Path, req.NewName, err)
+ err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ if err := filer_pb.CreateEntry(client, request); err != nil {
+ glog.V(0).Infof("symlink %s/%s: %v", dir.FullPath(), req.NewName, err)
return fuse.EIO
}
+
+ if dir.wfs.option.AsyncMetaDataCaching {
+ dir.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
+ }
+
return nil
})
@@ -59,7 +65,7 @@ func (file *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (stri
return "", fuse.Errno(syscall.EINVAL)
}
- glog.V(3).Infof("Readlink: %v/%v => %v", file.dir.Path, file.Name, file.entry.Attributes.SymlinkTarget)
+ glog.V(3).Infof("Readlink: %v/%v => %v", file.dir.FullPath(), file.Name, file.entry.Attributes.SymlinkTarget)
return file.entry.Attributes.SymlinkTarget, nil
diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go
index 4eb3c15b5..ea40f5c31 100644
--- a/weed/filesys/dir_rename.go
+++ b/weed/filesys/dir_rename.go
@@ -3,9 +3,9 @@ package filesys
import (
"context"
- "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
)
@@ -13,20 +13,24 @@ import (
func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirectory fs.Node) error {
newDir := newDirectory.(*Dir)
- glog.V(4).Infof("dir Rename %s/%s => %s/%s", dir.Path, req.OldName, newDir.Path, req.NewName)
- err := dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+ newPath := util.NewFullPath(newDir.FullPath(), req.NewName)
+ oldPath := util.NewFullPath(dir.FullPath(), req.OldName)
+
+ glog.V(4).Infof("dir Rename %s => %s", oldPath, newPath)
+
+ err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AtomicRenameEntryRequest{
- OldDirectory: dir.Path,
+ OldDirectory: dir.FullPath(),
OldName: req.OldName,
- NewDirectory: newDir.Path,
+ NewDirectory: newDir.FullPath(),
NewName: req.NewName,
}
- _, err := client.AtomicRenameEntry(ctx, request)
+ _, err := client.AtomicRenameEntry(context.Background(), request)
if err != nil {
- glog.V(0).Infof("dir Rename %s/%s => %s/%s : %v", dir.Path, req.OldName, newDir.Path, req.NewName, err)
+ glog.V(0).Infof("dir Rename %s => %s : %v", oldPath, newPath, err)
return fuse.EIO
}
@@ -35,28 +39,12 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
})
if err == nil {
- newPath := filer2.NewFullPath(newDir.Path, req.NewName)
- oldPath := filer2.NewFullPath(dir.Path, req.OldName)
dir.wfs.cacheDelete(newPath)
dir.wfs.cacheDelete(oldPath)
- oldFileNode := dir.wfs.getNode(oldPath, func() fs.Node {
- return nil
- })
- newDirNode := dir.wfs.getNode(filer2.FullPath(dir.Path), func() fs.Node {
- return nil
- })
- dir.wfs.forgetNode(newPath)
- dir.wfs.forgetNode(oldPath)
- if oldFileNode != nil && newDirNode != nil {
- oldFile := oldFileNode.(*File)
- oldFile.Name = req.NewName
- oldFile.dir = newDirNode.(*Dir)
- dir.wfs.getNode(newPath, func() fs.Node {
- return oldFile
- })
+ // fmt.Printf("rename path: %v => %v\n", oldPath, newPath)
+ dir.wfs.fsNodeCache.Move(oldPath, newPath)
- }
}
return err
diff --git a/weed/filesys/dir_test.go b/weed/filesys/dir_test.go
new file mode 100644
index 000000000..49c76eb5e
--- /dev/null
+++ b/weed/filesys/dir_test.go
@@ -0,0 +1,34 @@
+package filesys
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestDirPath(t *testing.T) {
+
+ p := &Dir{name: "/some"}
+ p = &Dir{name: "path", parent: p}
+ p = &Dir{name: "to", parent: p}
+ p = &Dir{name: "a", parent: p}
+ p = &Dir{name: "file", parent: p}
+
+ assert.Equal(t, "/some/path/to/a/file", p.FullPath())
+
+ p = &Dir{name: "/some"}
+ assert.Equal(t, "/some", p.FullPath())
+
+ p = &Dir{name: "/"}
+ assert.Equal(t, "/", p.FullPath())
+
+ p = &Dir{name: "/"}
+ p = &Dir{name: "path", parent: p}
+ assert.Equal(t, "/path", p.FullPath())
+
+ p = &Dir{name: "/"}
+ p = &Dir{name: "path", parent: p}
+ p = &Dir{name: "to", parent: p}
+ assert.Equal(t, "/path/to", p.FullPath())
+
+}
diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go
index 5ff128323..45224b3e7 100644
--- a/weed/filesys/dirty_page.go
+++ b/weed/filesys/dirty_page.go
@@ -15,9 +15,11 @@ import (
)
type ContinuousDirtyPages struct {
- intervals *ContinuousIntervals
- f *File
- lock sync.Mutex
+ intervals *ContinuousIntervals
+ f *File
+ lock sync.Mutex
+ collection string
+ replication string
}
func newDirtyPages(file *File) *ContinuousDirtyPages {
@@ -32,7 +34,7 @@ func (pages *ContinuousDirtyPages) releaseResource() {
var counter = int32(0)
-func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {
+func (pages *ContinuousDirtyPages) AddPage(offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {
pages.lock.Lock()
defer pages.lock.Unlock()
@@ -41,7 +43,7 @@ func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, da
if len(data) > int(pages.f.wfs.option.ChunkSizeLimit) {
// this is more than what buffer can hold.
- return pages.flushAndSave(ctx, offset, data)
+ return pages.flushAndSave(offset, data)
}
pages.intervals.AddInterval(data, offset)
@@ -50,7 +52,7 @@ func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, da
var hasSavedData bool
if pages.intervals.TotalSize() > pages.f.wfs.option.ChunkSizeLimit {
- chunk, hasSavedData, err = pages.saveExistingLargestPageToStorage(ctx)
+ chunk, hasSavedData, err = pages.saveExistingLargestPageToStorage()
if hasSavedData {
chunks = append(chunks, chunk)
}
@@ -59,13 +61,13 @@ func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, da
return
}
-func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {
+func (pages *ContinuousDirtyPages) flushAndSave(offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {
var chunk *filer_pb.FileChunk
var newChunks []*filer_pb.FileChunk
// flush existing
- if newChunks, err = pages.saveExistingPagesToStorage(ctx); err == nil {
+ if newChunks, err = pages.saveExistingPagesToStorage(); err == nil {
if newChunks != nil {
chunks = append(chunks, newChunks...)
}
@@ -74,35 +76,35 @@ func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int6
}
// flush the new page
- if chunk, err = pages.saveToStorage(ctx, bytes.NewReader(data), offset, int64(len(data))); err == nil {
+ if chunk, err = pages.saveToStorage(bytes.NewReader(data), offset, int64(len(data))); err == nil {
if chunk != nil {
- glog.V(4).Infof("%s/%s flush big request [%d,%d) to %s", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId)
+ glog.V(4).Infof("%s/%s flush big request [%d,%d) to %s", pages.f.dir.FullPath(), pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId)
chunks = append(chunks, chunk)
}
} else {
- glog.V(0).Infof("%s/%s failed to flush2 [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)
+ glog.V(0).Infof("%s/%s failed to flush2 [%d,%d): %v", pages.f.dir.FullPath(), pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)
return
}
return
}
-func (pages *ContinuousDirtyPages) FlushToStorage(ctx context.Context) (chunks []*filer_pb.FileChunk, err error) {
+func (pages *ContinuousDirtyPages) FlushToStorage() (chunks []*filer_pb.FileChunk, err error) {
pages.lock.Lock()
defer pages.lock.Unlock()
- return pages.saveExistingPagesToStorage(ctx)
+ return pages.saveExistingPagesToStorage()
}
-func (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Context) (chunks []*filer_pb.FileChunk, err error) {
+func (pages *ContinuousDirtyPages) saveExistingPagesToStorage() (chunks []*filer_pb.FileChunk, err error) {
var hasSavedData bool
var chunk *filer_pb.FileChunk
for {
- chunk, hasSavedData, err = pages.saveExistingLargestPageToStorage(ctx)
+ chunk, hasSavedData, err = pages.saveExistingLargestPageToStorage()
if !hasSavedData {
return chunks, err
}
@@ -116,31 +118,35 @@ func (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Contex
}
-func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage(ctx context.Context) (chunk *filer_pb.FileChunk, hasSavedData bool, err error) {
+func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage() (chunk *filer_pb.FileChunk, hasSavedData bool, err error) {
maxList := pages.intervals.RemoveLargestIntervalLinkedList()
if maxList == nil {
return nil, false, nil
}
- chunk, err = pages.saveToStorage(ctx, maxList.ToReader(), maxList.Offset(), maxList.Size())
- if err == nil {
- hasSavedData = true
- glog.V(3).Infof("%s saveToStorage [%d,%d) %s", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), chunk.FileId)
- } else {
- glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), err)
- return
+ for {
+ chunk, err = pages.saveToStorage(maxList.ToReader(), maxList.Offset(), maxList.Size())
+ if err == nil {
+ hasSavedData = true
+ glog.V(3).Infof("%s saveToStorage [%d,%d) %s", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), chunk.FileId)
+ return
+ } else {
+ glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), err)
+ time.Sleep(5 * time.Second)
+ }
}
- return
}
-func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, reader io.Reader, offset int64, size int64) (*filer_pb.FileChunk, error) {
+func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64, size int64) (*filer_pb.FileChunk, error) {
var fileId, host string
var auth security.EncodedJwt
- if err := pages.f.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+ dir, _ := pages.f.fullpath().DirAndName()
+
+ if err := pages.f.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
@@ -148,15 +154,21 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, reader io.
Collection: pages.f.wfs.option.Collection,
TtlSec: pages.f.wfs.option.TtlSec,
DataCenter: pages.f.wfs.option.DataCenter,
+ ParentPath: dir,
}
- resp, err := client.AssignVolume(ctx, request)
+ resp, err := client.AssignVolume(context.Background(), request)
if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
return err
}
+ if resp.Error != "" {
+ return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
+ }
fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
+ host = pages.f.wfs.AdjustedUrl(host)
+ pages.collection, pages.replication = resp.Collection, resp.Replication
return nil
}); err != nil {
@@ -164,7 +176,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, reader io.
}
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
- uploadResult, err := operation.Upload(fileUrl, pages.f.Name, reader, false, "", nil, auth)
+ uploadResult, err, data := operation.Upload(fileUrl, pages.f.Name, pages.f.wfs.option.Cipher, reader, false, "", nil, auth)
if err != nil {
glog.V(0).Infof("upload data %v to %s: %v", pages.f.Name, fileUrl, err)
return nil, fmt.Errorf("upload data: %v", err)
@@ -173,14 +185,9 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, reader io.
glog.V(0).Infof("upload failure %v to %s: %v", pages.f.Name, fileUrl, err)
return nil, fmt.Errorf("upload result: %v", uploadResult.Error)
}
+ pages.f.wfs.chunkCache.SetChunk(fileId, data)
- return &filer_pb.FileChunk{
- FileId: fileId,
- Offset: offset,
- Size: uint64(size),
- Mtime: time.Now().UnixNano(),
- ETag: uploadResult.ETag,
- }, nil
+ return uploadResult.ToPbFileChunk(fileId, offset), nil
}
@@ -197,7 +204,7 @@ func min(x, y int64) int64 {
return y
}
-func (pages *ContinuousDirtyPages) ReadDirtyData(ctx context.Context, data []byte, startOffset int64) (offset int64, size int) {
+func (pages *ContinuousDirtyPages) ReadDirtyData(data []byte, startOffset int64) (offset int64, size int) {
pages.lock.Lock()
defer pages.lock.Unlock()
diff --git a/weed/filesys/dirty_page_interval_test.go b/weed/filesys/dirty_page_interval_test.go
index 184be2f3b..ab3b37b7c 100644
--- a/weed/filesys/dirty_page_interval_test.go
+++ b/weed/filesys/dirty_page_interval_test.go
@@ -35,6 +35,23 @@ func TestContinuousIntervals_AddIntervalFullOverwrite(t *testing.T) {
c := &ContinuousIntervals{}
+ // 1,
+ c.AddInterval(getBytes(1, 1), 0)
+ // _, 2,
+ c.AddInterval(getBytes(2, 1), 1)
+ // _, _, 3, 3, 3
+ c.AddInterval(getBytes(3, 3), 2)
+ // _, _, _, 4, 4, 4
+ c.AddInterval(getBytes(4, 3), 3)
+
+ expectedData(t, c, 0, 1, 2, 3, 4, 4, 4)
+
+}
+
+func TestContinuousIntervals_RealCase1(t *testing.T) {
+
+ c := &ContinuousIntervals{}
+
// 25,
c.AddInterval(getBytes(25, 1), 0)
// _, _, _, _, 23, 23
diff --git a/weed/filesys/file.go b/weed/filesys/file.go
index eccef4e58..bafbd7cc8 100644
--- a/weed/filesys/file.go
+++ b/weed/filesys/file.go
@@ -2,6 +2,7 @@ package filesys
import (
"context"
+ "io"
"os"
"sort"
"time"
@@ -9,6 +10,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
)
@@ -32,10 +34,11 @@ type File struct {
entry *filer_pb.Entry
entryViewCache []filer2.VisibleInterval
isOpen int
+ reader io.ReaderAt
}
-func (file *File) fullpath() filer2.FullPath {
- return filer2.NewFullPath(file.dir.Path, file.Name)
+func (file *File) fullpath() util.FullPath {
+ return util.NewFullPath(file.dir.FullPath(), file.Name)
}
func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error {
@@ -69,7 +72,7 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error {
func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
- // glog.V(4).Infof("file Getxattr %s", file.fullpath())
+ glog.V(4).Infof("file Getxattr %s", file.fullpath())
if err := file.maybeLoadEntry(ctx); err != nil {
return err
@@ -119,6 +122,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
}
file.entry.Chunks = chunks
file.entryViewCache = nil
+ file.reader = nil
}
file.entry.Attributes.FileSize = req.Size
}
@@ -148,7 +152,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
file.wfs.cacheDelete(file.fullpath())
- return file.saveEntry(ctx)
+ return file.saveEntry()
}
@@ -166,7 +170,7 @@ func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error
file.wfs.cacheDelete(file.fullpath())
- return file.saveEntry(ctx)
+ return file.saveEntry()
}
@@ -184,7 +188,7 @@ func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest)
file.wfs.cacheDelete(file.fullpath())
- return file.saveEntry(ctx)
+ return file.saveEntry()
}
@@ -207,22 +211,22 @@ func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, res
func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
// fsync works at OS level
// write the file chunks to the filerGrpcAddress
- glog.V(3).Infof("%s/%s fsync file %+v", file.dir.Path, file.Name, req)
+ glog.V(3).Infof("%s/%s fsync file %+v", file.dir.FullPath(), file.Name, req)
return nil
}
func (file *File) Forget() {
- glog.V(3).Infof("Forget file %s/%s", file.dir.Path, file.Name)
-
- file.wfs.forgetNode(filer2.NewFullPath(file.dir.Path, file.Name))
-
+ t := util.NewFullPath(file.dir.FullPath(), file.Name)
+ glog.V(3).Infof("Forget file %s", t)
+ file.wfs.fsNodeCache.DeleteFsNode(t)
}
func (file *File) maybeLoadEntry(ctx context.Context) error {
if file.entry == nil || file.isOpen <= 0 {
- entry, err := file.wfs.maybeLoadEntry(ctx, file.dir.Path, file.Name)
+ entry, err := file.wfs.maybeLoadEntry(file.dir.FullPath(), file.Name)
if err != nil {
+ glog.V(3).Infof("maybeLoadEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
return err
}
if entry != nil {
@@ -246,6 +250,8 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
newVisibles = t
}
+ file.reader = nil
+
glog.V(3).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks))
file.entry.Chunks = append(file.entry.Chunks, chunks...)
@@ -254,23 +260,28 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
func (file *File) setEntry(entry *filer_pb.Entry) {
file.entry = entry
file.entryViewCache = filer2.NonOverlappingVisibleIntervals(file.entry.Chunks)
+ file.reader = nil
}
-func (file *File) saveEntry(ctx context.Context) error {
- return file.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+func (file *File) saveEntry() error {
+ return file.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.UpdateEntryRequest{
- Directory: file.dir.Path,
+ Directory: file.dir.FullPath(),
Entry: file.entry,
}
glog.V(1).Infof("save file entry: %v", request)
- _, err := client.UpdateEntry(ctx, request)
+ _, err := client.UpdateEntry(context.Background(), request)
if err != nil {
- glog.V(0).Infof("UpdateEntry file %s/%s: %v", file.dir.Path, file.Name, err)
+ glog.V(0).Infof("UpdateEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
return fuse.EIO
}
+ if file.wfs.option.AsyncMetaDataCaching {
+ file.wfs.metaCache.UpdateEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
+ }
+
return nil
})
}
diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go
index cf253a7ed..372d742ea 100644
--- a/weed/filesys/filehandle.go
+++ b/weed/filesys/filehandle.go
@@ -3,12 +3,10 @@ package filesys
import (
"context"
"fmt"
- "mime"
- "path"
+ "math"
+ "net/http"
"time"
- "github.com/gabriel-vasile/mimetype"
-
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -28,15 +26,20 @@ type FileHandle struct {
NodeId fuse.NodeID // file or directory the request is about
Uid uint32 // user ID of process making request
Gid uint32 // group ID of process making request
+
}
func newFileHandle(file *File, uid, gid uint32) *FileHandle {
- return &FileHandle{
+ fh := &FileHandle{
f: file,
dirtyPages: newDirtyPages(file),
Uid: uid,
Gid: gid,
}
+ if fh.f.entry != nil {
+ fh.f.entry.Attributes.FileSize = filer2.TotalSize(fh.f.entry.Chunks)
+ }
+ return fh
}
var _ = fs.Handle(&FileHandle{})
@@ -53,9 +56,9 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus
buff := make([]byte, req.Size)
- totalRead, err := fh.readFromChunks(ctx, buff, req.Offset)
+ totalRead, err := fh.readFromChunks(buff, req.Offset)
if err == nil {
- dirtyOffset, dirtySize := fh.readFromDirtyPages(ctx, buff, req.Offset)
+ dirtyOffset, dirtySize := fh.readFromDirtyPages(buff, req.Offset)
if totalRead+req.Offset < dirtyOffset+int64(dirtySize) {
totalRead = dirtyOffset + int64(dirtySize) - req.Offset
}
@@ -71,11 +74,11 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus
return err
}
-func (fh *FileHandle) readFromDirtyPages(ctx context.Context, buff []byte, startOffset int64) (offset int64, size int) {
- return fh.dirtyPages.ReadDirtyData(ctx, buff, startOffset)
+func (fh *FileHandle) readFromDirtyPages(buff []byte, startOffset int64) (offset int64, size int) {
+ return fh.dirtyPages.ReadDirtyData(buff, startOffset)
}
-func (fh *FileHandle) readFromChunks(ctx context.Context, buff []byte, offset int64) (int64, error) {
+func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
// this value should come from the filer instead of the old f
if len(fh.f.entry.Chunks) == 0 {
@@ -85,43 +88,46 @@ func (fh *FileHandle) readFromChunks(ctx context.Context, buff []byte, offset in
if fh.f.entryViewCache == nil {
fh.f.entryViewCache = filer2.NonOverlappingVisibleIntervals(fh.f.entry.Chunks)
+ fh.f.reader = nil
}
- chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, offset, len(buff))
+ if fh.f.reader == nil {
+ chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt32)
+ fh.f.reader = filer2.NewChunkReaderAtFromClient(fh.f.wfs, chunkViews, fh.f.wfs.chunkCache)
+ }
- totalRead, err := filer2.ReadIntoBuffer(ctx, fh.f.wfs, fh.f.fullpath(), buff, chunkViews, offset)
+ totalRead, err := fh.f.reader.ReadAt(buff, offset)
if err != nil {
glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err)
}
- return totalRead, err
+ // glog.V(0).Infof("file handle read %s [%d,%d] %d : %v", fh.f.fullpath(), offset, offset+int64(totalRead), totalRead, err)
+
+ return int64(totalRead), err
}
// Write to the file handle
func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
// write the request to volume servers
+ data := make([]byte, len(req.Data))
+ copy(data, req.Data)
- fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(req.Data)), int64(fh.f.entry.Attributes.FileSize)))
+ fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(data)), int64(fh.f.entry.Attributes.FileSize)))
// glog.V(0).Infof("%v write [%d,%d)", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)))
- chunks, err := fh.dirtyPages.AddPage(ctx, req.Offset, req.Data)
+ chunks, err := fh.dirtyPages.AddPage(req.Offset, data)
if err != nil {
- glog.Errorf("%v write fh %d: [%d,%d): %v", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(len(req.Data)), err)
+ glog.Errorf("%v write fh %d: [%d,%d): %v", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(len(data)), err)
return fuse.EIO
}
- resp.Size = len(req.Data)
+ resp.Size = len(data)
if req.Offset == 0 {
// detect mime type
- detectedMIME := mimetype.Detect(req.Data)
- fh.contentType = detectedMIME.String()
- if ext := path.Ext(fh.f.Name); ext != detectedMIME.Extension() {
- fh.contentType = mime.TypeByExtension(ext)
- }
-
+ fh.contentType = http.DetectContentType(data)
fh.dirtyMetadata = true
}
@@ -145,6 +151,8 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err
fh.dirtyPages.releaseResource()
fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle))
}
+ fh.f.entryViewCache = nil
+ fh.f.reader = nil
return nil
}
@@ -154,14 +162,14 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
// send the data to the OS
glog.V(4).Infof("%s fh %d flush %v", fh.f.fullpath(), fh.handle, req)
- chunks, err := fh.dirtyPages.FlushToStorage(ctx)
+ chunks, err := fh.dirtyPages.FlushToStorage()
if err != nil {
glog.Errorf("flush %s: %v", fh.f.fullpath(), err)
return fuse.EIO
}
- fh.f.addChunks(chunks)
if len(chunks) > 0 {
+ fh.f.addChunks(chunks)
fh.dirtyMetadata = true
}
@@ -169,7 +177,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
return nil
}
- err = fh.f.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+ err = fh.f.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
if fh.f.entry.Attributes != nil {
fh.f.entry.Attributes.Mime = fh.contentType
@@ -177,11 +185,13 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
fh.f.entry.Attributes.Gid = req.Gid
fh.f.entry.Attributes.Mtime = time.Now().Unix()
fh.f.entry.Attributes.Crtime = time.Now().Unix()
- fh.f.entry.Attributes.FileMode = uint32(0777 &^ fh.f.wfs.option.Umask)
+ fh.f.entry.Attributes.FileMode = uint32(0666 &^ fh.f.wfs.option.Umask)
+ fh.f.entry.Attributes.Collection = fh.dirtyPages.collection
+ fh.f.entry.Attributes.Replication = fh.dirtyPages.replication
}
request := &filer_pb.CreateEntryRequest{
- Directory: fh.f.dir.Path,
+ Directory: fh.f.dir.FullPath(),
Entry: fh.f.entry,
}
@@ -194,12 +204,16 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
fh.f.entry.Chunks = chunks
// fh.f.entryViewCache = nil
- if err := filer_pb.CreateEntry(ctx, client, request); err != nil {
- glog.Errorf("update fh: %v", err)
- return fmt.Errorf("update fh: %v", err)
+ if err := filer_pb.CreateEntry(client, request); err != nil {
+ glog.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
+ return fmt.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
}
- fh.f.wfs.deleteFileChunks(ctx, garbages)
+ if fh.f.wfs.option.AsyncMetaDataCaching {
+ fh.f.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry))
+ }
+
+ fh.f.wfs.deleteFileChunks(garbages)
for i, chunk := range garbages {
glog.V(3).Infof("garbage %s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size))
}
diff --git a/weed/filesys/fscache.go b/weed/filesys/fscache.go
new file mode 100644
index 000000000..b146f0615
--- /dev/null
+++ b/weed/filesys/fscache.go
@@ -0,0 +1,207 @@
+package filesys
+
+import (
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/seaweedfs/fuse/fs"
+)
+
+type FsCache struct {
+ root *FsNode
+ sync.RWMutex
+}
+type FsNode struct {
+ parent *FsNode
+ node fs.Node
+ name string
+ childrenLock sync.RWMutex
+ children map[string]*FsNode
+}
+
+func newFsCache(root fs.Node) *FsCache {
+ return &FsCache{
+ root: &FsNode{
+ node: root,
+ },
+ }
+}
+
+func (c *FsCache) GetFsNode(path util.FullPath) fs.Node {
+
+ c.RLock()
+ defer c.RUnlock()
+
+ return c.doGetFsNode(path)
+}
+
+func (c *FsCache) doGetFsNode(path util.FullPath) fs.Node {
+ t := c.root
+ for _, p := range path.Split() {
+ t = t.findChild(p)
+ if t == nil {
+ return nil
+ }
+ }
+ return t.node
+}
+
+func (c *FsCache) SetFsNode(path util.FullPath, node fs.Node) {
+
+ c.Lock()
+ defer c.Unlock()
+
+ c.doSetFsNode(path, node)
+}
+
+func (c *FsCache) doSetFsNode(path util.FullPath, node fs.Node) {
+ t := c.root
+ for _, p := range path.Split() {
+ t = t.ensureChild(p)
+ }
+ t.node = node
+}
+
+func (c *FsCache) EnsureFsNode(path util.FullPath, genNodeFn func() fs.Node) fs.Node {
+
+ c.Lock()
+ defer c.Unlock()
+
+ t := c.doGetFsNode(path)
+ if t != nil {
+ return t
+ }
+ t = genNodeFn()
+ c.doSetFsNode(path, t)
+ return t
+}
+
+func (c *FsCache) DeleteFsNode(path util.FullPath) {
+
+ c.Lock()
+ defer c.Unlock()
+
+ t := c.root
+ for _, p := range path.Split() {
+ t = t.findChild(p)
+ if t == nil {
+ return
+ }
+ }
+ if t.parent != nil {
+ t.parent.disconnectChild(t)
+ }
+ t.deleteSelf()
+}
+
+// oldPath and newPath are full path including the new name
+func (c *FsCache) Move(oldPath util.FullPath, newPath util.FullPath) *FsNode {
+
+ c.Lock()
+ defer c.Unlock()
+
+ // find old node
+ src := c.root
+ for _, p := range oldPath.Split() {
+ src = src.findChild(p)
+ if src == nil {
+ return src
+ }
+ }
+ if src.parent != nil {
+ src.parent.disconnectChild(src)
+ }
+
+ // find new node
+ target := c.root
+ for _, p := range newPath.Split() {
+ target = target.ensureChild(p)
+ }
+ parent := target.parent
+ src.name = target.name
+ if dir, ok := src.node.(*Dir); ok {
+ dir.name = target.name // target is not Dir, but a shortcut
+ }
+ if f, ok := src.node.(*File); ok {
+ f.Name = target.name
+ if f.entry != nil {
+ f.entry.Name = f.Name
+ }
+ }
+ parent.disconnectChild(target)
+
+ target.deleteSelf()
+
+ src.connectToParent(parent)
+
+ return src
+}
+
+func (n *FsNode) connectToParent(parent *FsNode) {
+ n.parent = parent
+ oldNode := parent.findChild(n.name)
+ if oldNode != nil {
+ oldNode.deleteSelf()
+ }
+ if dir, ok := n.node.(*Dir); ok {
+ dir.parent = parent.node.(*Dir)
+ }
+ if f, ok := n.node.(*File); ok {
+ f.dir = parent.node.(*Dir)
+ }
+ n.childrenLock.Lock()
+ parent.children[n.name] = n
+ n.childrenLock.Unlock()
+}
+
+func (n *FsNode) findChild(name string) *FsNode {
+ n.childrenLock.RLock()
+ defer n.childrenLock.RUnlock()
+
+ child, found := n.children[name]
+ if found {
+ return child
+ }
+ return nil
+}
+
+func (n *FsNode) ensureChild(name string) *FsNode {
+ n.childrenLock.Lock()
+ defer n.childrenLock.Unlock()
+
+ if n.children == nil {
+ n.children = make(map[string]*FsNode)
+ }
+ child, found := n.children[name]
+ if found {
+ return child
+ }
+ t := &FsNode{
+ parent: n,
+ node: nil,
+ name: name,
+ children: nil,
+ }
+ n.children[name] = t
+ return t
+}
+
+func (n *FsNode) disconnectChild(child *FsNode) {
+ n.childrenLock.Lock()
+ delete(n.children, child.name)
+ n.childrenLock.Unlock()
+ child.parent = nil
+}
+
+func (n *FsNode) deleteSelf() {
+ n.childrenLock.Lock()
+ for _, child := range n.children {
+ child.deleteSelf()
+ }
+ n.children = nil
+ n.childrenLock.Unlock()
+
+ n.node = nil
+ n.parent = nil
+
+}
diff --git a/weed/filesys/fscache_test.go b/weed/filesys/fscache_test.go
new file mode 100644
index 000000000..67f9aacc8
--- /dev/null
+++ b/weed/filesys/fscache_test.go
@@ -0,0 +1,96 @@
+package filesys
+
+import (
+ "testing"
+
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func TestPathSplit(t *testing.T) {
+ parts := util.FullPath("/").Split()
+ if len(parts) != 0 {
+ t.Errorf("expecting an empty list, but getting %d", len(parts))
+ }
+
+ parts = util.FullPath("/readme.md").Split()
+ if len(parts) != 1 {
+ t.Errorf("expecting an empty list, but getting %d", len(parts))
+ }
+
+}
+
+func TestFsCache(t *testing.T) {
+
+ cache := newFsCache(nil)
+
+ x := cache.GetFsNode(util.FullPath("/y/x"))
+ if x != nil {
+ t.Errorf("wrong node!")
+ }
+
+ p := util.FullPath("/a/b/c")
+ cache.SetFsNode(p, &File{Name: "cc"})
+ tNode := cache.GetFsNode(p)
+ tFile := tNode.(*File)
+ if tFile.Name != "cc" {
+ t.Errorf("expecting a FsNode")
+ }
+
+ cache.SetFsNode(util.FullPath("/a/b/d"), &File{Name: "dd"})
+ cache.SetFsNode(util.FullPath("/a/b/e"), &File{Name: "ee"})
+ cache.SetFsNode(util.FullPath("/a/b/f"), &File{Name: "ff"})
+ cache.SetFsNode(util.FullPath("/z"), &File{Name: "zz"})
+ cache.SetFsNode(util.FullPath("/a"), &File{Name: "aa"})
+
+ b := cache.GetFsNode(util.FullPath("/a/b"))
+ if b != nil {
+ t.Errorf("unexpected node!")
+ }
+
+ a := cache.GetFsNode(util.FullPath("/a"))
+ if a == nil {
+ t.Errorf("missing node!")
+ }
+
+ cache.DeleteFsNode(util.FullPath("/a"))
+ if b != nil {
+ t.Errorf("unexpected node!")
+ }
+
+ a = cache.GetFsNode(util.FullPath("/a"))
+ if a != nil {
+ t.Errorf("wrong DeleteFsNode!")
+ }
+
+ z := cache.GetFsNode(util.FullPath("/z"))
+ if z == nil {
+ t.Errorf("missing node!")
+ }
+
+ y := cache.GetFsNode(util.FullPath("/x/y"))
+ if y != nil {
+ t.Errorf("wrong node!")
+ }
+
+}
+
+func TestFsCacheMove(t *testing.T) {
+
+ cache := newFsCache(nil)
+
+ cache.SetFsNode(util.FullPath("/a/b/d"), &File{Name: "dd"})
+ cache.SetFsNode(util.FullPath("/a/b/e"), &File{Name: "ee"})
+ cache.SetFsNode(util.FullPath("/z"), &File{Name: "zz"})
+ cache.SetFsNode(util.FullPath("/a"), &File{Name: "aa"})
+
+ cache.Move(util.FullPath("/a/b"), util.FullPath("/z/x"))
+
+ d := cache.GetFsNode(util.FullPath("/z/x/d"))
+ if d == nil {
+ t.Errorf("unexpected nil node!")
+ }
+ if d.(*File).Name != "dd" {
+ t.Errorf("unexpected non dd node!")
+ }
+
+}
diff --git a/weed/filesys/meta_cache/cache_config.go b/weed/filesys/meta_cache/cache_config.go
new file mode 100644
index 000000000..e6593ebde
--- /dev/null
+++ b/weed/filesys/meta_cache/cache_config.go
@@ -0,0 +1,32 @@
+package meta_cache
+
+import "github.com/chrislusf/seaweedfs/weed/util"
+
+var (
+ _ = util.Configuration(&cacheConfig{})
+)
+
+// implementing util.Configuraion
+type cacheConfig struct {
+ dir string
+}
+
+func (c cacheConfig) GetString(key string) string {
+ return c.dir
+}
+
+func (c cacheConfig) GetBool(key string) bool {
+ panic("implement me")
+}
+
+func (c cacheConfig) GetInt(key string) int {
+ panic("implement me")
+}
+
+func (c cacheConfig) GetStringSlice(key string) []string {
+ panic("implement me")
+}
+
+func (c cacheConfig) SetDefault(key string, value interface{}) {
+ panic("implement me")
+}
diff --git a/weed/filesys/meta_cache/meta_cache.go b/weed/filesys/meta_cache/meta_cache.go
new file mode 100644
index 000000000..4c9090d42
--- /dev/null
+++ b/weed/filesys/meta_cache/meta_cache.go
@@ -0,0 +1,93 @@
+package meta_cache
+
+import (
+ "context"
+ "os"
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+type MetaCache struct {
+ actualStore filer2.FilerStore
+ sync.RWMutex
+}
+
+func NewMetaCache(dbFolder string) *MetaCache {
+ return &MetaCache{
+ actualStore: openMetaStore(dbFolder),
+ }
+}
+
+func openMetaStore(dbFolder string) filer2.FilerStore {
+
+ os.RemoveAll(dbFolder)
+ os.MkdirAll(dbFolder, 0755)
+
+ store := &leveldb.LevelDBStore{}
+ config := &cacheConfig{
+ dir: dbFolder,
+ }
+
+ if err := store.Initialize(config, ""); err != nil {
+ glog.Fatalf("Failed to initialize metadata cache store for %s: %+v", store.GetName(), err)
+ }
+
+ return store
+
+}
+
+func (mc *MetaCache) InsertEntry(ctx context.Context, entry *filer2.Entry) error {
+ mc.Lock()
+ defer mc.Unlock()
+ return mc.actualStore.InsertEntry(ctx, entry)
+}
+
+func (mc *MetaCache) AtomicUpdateEntry(ctx context.Context, oldPath util.FullPath, newEntry *filer2.Entry) error {
+ mc.Lock()
+ defer mc.Unlock()
+ if oldPath != "" {
+ if err := mc.actualStore.DeleteEntry(ctx, oldPath); err != nil {
+ return err
+ }
+ }
+ if newEntry != nil {
+ if err := mc.actualStore.InsertEntry(ctx, newEntry); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (mc *MetaCache) UpdateEntry(ctx context.Context, entry *filer2.Entry) error {
+ mc.Lock()
+ defer mc.Unlock()
+ return mc.actualStore.UpdateEntry(ctx, entry)
+}
+
+func (mc *MetaCache) FindEntry(ctx context.Context, fp util.FullPath) (entry *filer2.Entry, err error) {
+ mc.RLock()
+ defer mc.RUnlock()
+ return mc.actualStore.FindEntry(ctx, fp)
+}
+
+func (mc *MetaCache) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) {
+ mc.Lock()
+ defer mc.Unlock()
+ return mc.actualStore.DeleteEntry(ctx, fp)
+}
+
+func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*filer2.Entry, error) {
+ mc.RLock()
+ defer mc.RUnlock()
+ return mc.actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit)
+}
+
+func (mc *MetaCache) Shutdown() {
+ mc.Lock()
+ defer mc.Unlock()
+ mc.actualStore.Shutdown()
+}
diff --git a/weed/filesys/meta_cache/meta_cache_init.go b/weed/filesys/meta_cache/meta_cache_init.go
new file mode 100644
index 000000000..58bf6862e
--- /dev/null
+++ b/weed/filesys/meta_cache/meta_cache_init.go
@@ -0,0 +1,21 @@
+package meta_cache
+
+import (
+ "context"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func InitMetaCache(mc *MetaCache, client filer_pb.FilerClient, path string) error {
+ glog.V(0).Infof("synchronizing meta data ...")
+ filer_pb.TraverseBfs(client, util.FullPath(path), func(parentPath util.FullPath, pbEntry *filer_pb.Entry) {
+ entry := filer2.FromPbEntry(string(parentPath), pbEntry)
+ if err := mc.InsertEntry(context.Background(), entry); err != nil {
+ glog.V(0).Infof("read %s: %v", entry.FullPath, err)
+ }
+ })
+ return nil
+}
diff --git a/weed/filesys/meta_cache/meta_cache_subscribe.go b/weed/filesys/meta_cache/meta_cache_subscribe.go
new file mode 100644
index 000000000..2e411a48a
--- /dev/null
+++ b/weed/filesys/meta_cache/meta_cache_subscribe.go
@@ -0,0 +1,69 @@
+package meta_cache
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func SubscribeMetaEvents(mc *MetaCache, client filer_pb.FilerClient, dir string, lastTsNs int64) error {
+
+ processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error {
+ message := resp.EventNotification
+ var oldPath util.FullPath
+ var newEntry *filer2.Entry
+ if message.OldEntry != nil {
+ oldPath = util.NewFullPath(resp.Directory, message.OldEntry.Name)
+ glog.V(4).Infof("deleting %v", oldPath)
+ }
+
+ if message.NewEntry != nil {
+ dir := resp.Directory
+ if message.NewParentPath != "" {
+ dir = message.NewParentPath
+ }
+ key := util.NewFullPath(dir, message.NewEntry.Name)
+ glog.V(4).Infof("creating %v", key)
+ newEntry = filer2.FromPbEntry(dir, message.NewEntry)
+ }
+ return mc.AtomicUpdateEntry(context.Background(), oldPath, newEntry)
+ }
+
+ for {
+ err := client.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ stream, err := client.SubscribeMetadata(context.Background(), &filer_pb.SubscribeMetadataRequest{
+ ClientName: "mount",
+ PathPrefix: dir,
+ SinceNs: lastTsNs,
+ })
+ if err != nil {
+ return fmt.Errorf("subscribe: %v", err)
+ }
+
+ for {
+ resp, listenErr := stream.Recv()
+ if listenErr == io.EOF {
+ return nil
+ }
+ if listenErr != nil {
+ return listenErr
+ }
+
+ if err := processEventFn(resp); err != nil {
+ return fmt.Errorf("process %v: %v", resp, err)
+ }
+ lastTsNs = resp.TsNs
+ }
+ })
+ if err != nil {
+ glog.V(0).Infof("subscribing filer meta change: %v", err)
+ time.Sleep(time.Second)
+ }
+ }
+}
diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go
index 4807e367b..67dd2a62c 100644
--- a/weed/filesys/wfs.go
+++ b/weed/filesys/wfs.go
@@ -5,17 +5,21 @@ import (
"fmt"
"math"
"os"
+ "path"
"strings"
"sync"
"time"
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
"github.com/karlseguin/ccache"
"google.golang.org/grpc"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
)
@@ -28,6 +32,8 @@ type Option struct {
Replication string
TtlSec int32
ChunkSizeLimit int64
+ CacheDir string
+ CacheSizeMB int64
DataCenter string
DirListCacheLimit int64
EntryCacheTtl time.Duration
@@ -38,6 +44,11 @@ type Option struct {
MountMode os.FileMode
MountCtime time.Time
MountMtime time.Time
+
+ OutsideContainerClusterMode bool // whether the mount runs outside SeaweedFS containers
+ Cipher bool // whether encrypt data on volume server
+ AsyncMetaDataCaching bool // whether asynchronously cache meta data
+
}
var _ = fs.FS(&WFS{})
@@ -48,18 +59,18 @@ type WFS struct {
listDirectoryEntriesCache *ccache.Cache
// contains all open handles, protected by handlesLock
- handlesLock sync.Mutex
- handles []*FileHandle
- pathToHandleIndex map[filer2.FullPath]int
+ handlesLock sync.Mutex
+ handles map[uint64]*FileHandle
bufPool sync.Pool
stats statsCache
- // nodes, protected by nodesLock
- nodesLock sync.Mutex
- nodes map[uint64]fs.Node
- root fs.Node
+ root fs.Node
+ fsNodeCache *FsCache
+
+ chunkCache *chunk_cache.ChunkCache
+ metaCache *meta_cache.MetaCache
}
type statsCache struct {
filer_pb.StatisticsResponse
@@ -70,16 +81,34 @@ func NewSeaweedFileSystem(option *Option) *WFS {
wfs := &WFS{
option: option,
listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(option.DirListCacheLimit * 3).ItemsToPrune(100)),
- pathToHandleIndex: make(map[filer2.FullPath]int),
+ handles: make(map[uint64]*FileHandle),
bufPool: sync.Pool{
New: func() interface{} {
return make([]byte, option.ChunkSizeLimit)
},
},
- nodes: make(map[uint64]fs.Node),
+ }
+ if option.CacheSizeMB > 0 {
+ wfs.chunkCache = chunk_cache.NewChunkCache(256, option.CacheDir, option.CacheSizeMB)
+ grace.OnInterrupt(func() {
+ wfs.chunkCache.Shutdown()
+ })
+ }
+ if wfs.option.AsyncMetaDataCaching {
+ wfs.metaCache = meta_cache.NewMetaCache(path.Join(option.CacheDir, "meta"))
+ startTime := time.Now()
+ if err := meta_cache.InitMetaCache(wfs.metaCache, wfs, wfs.option.FilerMountRootPath); err != nil {
+ glog.V(0).Infof("failed to init meta cache: %v", err)
+ } else {
+ go meta_cache.SubscribeMetaEvents(wfs.metaCache, wfs, wfs.option.FilerMountRootPath, startTime.UnixNano())
+ grace.OnInterrupt(func() {
+ wfs.metaCache.Shutdown()
+ })
+ }
}
- wfs.root = &Dir{Path: wfs.option.FilerMountRootPath, wfs: wfs}
+ wfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs}
+ wfs.fsNodeCache = newFsCache(wfs.root)
return wfs
}
@@ -88,23 +117,18 @@ func (wfs *WFS) Root() (fs.Node, error) {
return wfs.root, nil
}
-func (wfs *WFS) WithFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error {
+var _ = filer_pb.FilerClient(&WFS{})
- err := util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error {
+func (wfs *WFS) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
+
+ err := pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
client := filer_pb.NewSeaweedFilerClient(grpcConnection)
- return fn(ctx2, client)
+ return fn(client)
}, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption)
if err == nil {
return nil
}
- if strings.Contains(err.Error(), "context canceled") {
- glog.V(2).Infoln("retry context canceled request...")
- return util.WithCachedGrpcClient(context.Background(), func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error {
- client := filer_pb.NewSeaweedFilerClient(grpcConnection)
- return fn(ctx2, client)
- }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption)
- }
return err
}
@@ -117,40 +141,27 @@ func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHand
wfs.handlesLock.Lock()
defer wfs.handlesLock.Unlock()
- index, found := wfs.pathToHandleIndex[fullpath]
- if found && wfs.handles[index] != nil {
- glog.V(2).Infoln(fullpath, "found fileHandle id", index)
- return wfs.handles[index]
+ inodeId := file.fullpath().AsInode()
+ existingHandle, found := wfs.handles[inodeId]
+ if found && existingHandle != nil {
+ return existingHandle
}
fileHandle = newFileHandle(file, uid, gid)
- for i, h := range wfs.handles {
- if h == nil {
- wfs.handles[i] = fileHandle
- fileHandle.handle = uint64(i)
- wfs.pathToHandleIndex[fullpath] = i
- glog.V(4).Infof("%s reuse fh %d", fullpath, fileHandle.handle)
- return
- }
- }
-
- wfs.handles = append(wfs.handles, fileHandle)
- fileHandle.handle = uint64(len(wfs.handles) - 1)
- wfs.pathToHandleIndex[fullpath] = int(fileHandle.handle)
+ wfs.handles[inodeId] = fileHandle
+ fileHandle.handle = inodeId
glog.V(4).Infof("%s new fh %d", fullpath, fileHandle.handle)
return
}
-func (wfs *WFS) ReleaseHandle(fullpath filer2.FullPath, handleId fuse.HandleID) {
+func (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) {
wfs.handlesLock.Lock()
defer wfs.handlesLock.Unlock()
glog.V(4).Infof("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles))
- delete(wfs.pathToHandleIndex, fullpath)
- if int(handleId) < len(wfs.handles) {
- wfs.handles[int(handleId)] = nil
- }
+
+ delete(wfs.handles, fullpath.AsInode())
return
}
@@ -162,7 +173,7 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.
if wfs.stats.lastChecked < time.Now().Unix()-20 {
- err := wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+ err := wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.StatisticsRequest{
Collection: wfs.option.Collection,
@@ -171,7 +182,7 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.
}
glog.V(4).Infof("reading filer stats: %+v", request)
- resp, err := client.Statistics(ctx, request)
+ resp, err := client.Statistics(context.Background(), request)
if err != nil {
glog.V(0).Infof("reading filer stats %v: %v", request, err)
return err
@@ -217,43 +228,33 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.
return nil
}
-func (wfs *WFS) cacheGet(path filer2.FullPath) *filer_pb.Entry {
+func (wfs *WFS) cacheGet(path util.FullPath) *filer_pb.Entry {
item := wfs.listDirectoryEntriesCache.Get(string(path))
if item != nil && !item.Expired() {
return item.Value().(*filer_pb.Entry)
}
return nil
}
-func (wfs *WFS) cacheSet(path filer2.FullPath, entry *filer_pb.Entry, ttl time.Duration) {
+func (wfs *WFS) cacheSet(path util.FullPath, entry *filer_pb.Entry, ttl time.Duration) {
if entry == nil {
wfs.listDirectoryEntriesCache.Delete(string(path))
} else {
wfs.listDirectoryEntriesCache.Set(string(path), entry, ttl)
}
}
-func (wfs *WFS) cacheDelete(path filer2.FullPath) {
+func (wfs *WFS) cacheDelete(path util.FullPath) {
wfs.listDirectoryEntriesCache.Delete(string(path))
}
-func (wfs *WFS) getNode(fullpath filer2.FullPath, fn func() fs.Node) fs.Node {
- wfs.nodesLock.Lock()
- defer wfs.nodesLock.Unlock()
-
- node, found := wfs.nodes[fullpath.AsInode()]
- if found {
- return node
+func (wfs *WFS) AdjustedUrl(hostAndPort string) string {
+ if !wfs.option.OutsideContainerClusterMode {
+ return hostAndPort
}
- node = fn()
- if node != nil {
- wfs.nodes[fullpath.AsInode()] = node
+ commaIndex := strings.Index(hostAndPort, ":")
+ if commaIndex < 0 {
+ return hostAndPort
}
- return node
-}
-
-func (wfs *WFS) forgetNode(fullpath filer2.FullPath) {
- wfs.nodesLock.Lock()
- defer wfs.nodesLock.Unlock()
-
- delete(wfs.nodes, fullpath.AsInode())
+ filerCommaIndex := strings.Index(wfs.option.FilerGrpcAddress, ":")
+ return fmt.Sprintf("%s:%s", wfs.option.FilerGrpcAddress[:filerCommaIndex], hostAndPort[commaIndex+1:])
}
diff --git a/weed/filesys/wfs_deletion.go b/weed/filesys/wfs_deletion.go
index cce0c792c..bf21b1808 100644
--- a/weed/filesys/wfs_deletion.go
+++ b/weed/filesys/wfs_deletion.go
@@ -3,14 +3,15 @@ package filesys
import (
"context"
+ "google.golang.org/grpc"
+
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "google.golang.org/grpc"
)
-func (wfs *WFS) deleteFileChunks(ctx context.Context, chunks []*filer_pb.FileChunk) {
+func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) {
if len(chunks) == 0 {
return
}
@@ -20,13 +21,13 @@ func (wfs *WFS) deleteFileChunks(ctx context.Context, chunks []*filer_pb.FileChu
fileIds = append(fileIds, chunk.GetFileIdString())
}
- wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
- deleteFileIds(ctx, wfs.option.GrpcDialOption, client, fileIds)
+ wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ wfs.deleteFileIds(wfs.option.GrpcDialOption, client, fileIds)
return nil
})
}
-func deleteFileIds(ctx context.Context, grpcDialOption grpc.DialOption, client filer_pb.SeaweedFilerClient, fileIds []string) error {
+func (wfs *WFS) deleteFileIds(grpcDialOption grpc.DialOption, client filer_pb.SeaweedFilerClient, fileIds []string) error {
var vids []string
for _, fileId := range fileIds {
@@ -38,7 +39,7 @@ func deleteFileIds(ctx context.Context, grpcDialOption grpc.DialOption, client f
m := make(map[string]operation.LookupResult)
glog.V(4).Infof("remove file lookup volume id locations: %v", vids)
- resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{
+ resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
VolumeIds: vids,
})
if err != nil {
@@ -56,7 +57,7 @@ func deleteFileIds(ctx context.Context, grpcDialOption grpc.DialOption, client f
}
for _, loc := range locations.Locations {
lr.Locations = append(lr.Locations, operation.Location{
- Url: loc.Url,
+ Url: wfs.AdjustedUrl(loc.Url),
PublicUrl: loc.PublicUrl,
})
}
diff --git a/weed/filesys/xattr.go b/weed/filesys/xattr.go
index 9dfb491fd..7e7b8c60b 100644
--- a/weed/filesys/xattr.go
+++ b/weed/filesys/xattr.go
@@ -2,11 +2,10 @@ package filesys
import (
"context"
- "strings"
- "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
"github.com/seaweedfs/fuse"
)
@@ -108,25 +107,34 @@ func listxattr(entry *filer_pb.Entry, req *fuse.ListxattrRequest, resp *fuse.Lis
}
-func (wfs *WFS) maybeLoadEntry(ctx context.Context, dir, name string) (entry *filer_pb.Entry, err error) {
+func (wfs *WFS) maybeLoadEntry(dir, name string) (entry *filer_pb.Entry, err error) {
- fullpath := filer2.NewFullPath(dir, name)
+ fullpath := util.NewFullPath(dir, name)
entry = wfs.cacheGet(fullpath)
if entry != nil {
return
}
// glog.V(3).Infof("read entry cache miss %s", fullpath)
- err = wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+ // read from async meta cache
+ if wfs.option.AsyncMetaDataCaching {
+ cachedEntry, cacheErr := wfs.metaCache.FindEntry(context.Background(), fullpath)
+ if cacheErr == filer_pb.ErrNotFound {
+ return nil, fuse.ENOENT
+ }
+ return cachedEntry.ToProtoEntry(), nil
+ }
+
+ err = wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{
Name: name,
Directory: dir,
}
- resp, err := client.LookupDirectoryEntry(ctx, request)
- if err != nil || resp == nil || resp.Entry == nil {
- if err == filer2.ErrNotFound || strings.Contains(err.Error(), filer2.ErrNotFound.Error()) {
+ resp, err := filer_pb.LookupEntry(client, request)
+ if err != nil {
+ if err == filer_pb.ErrNotFound {
glog.V(3).Infof("file attr read not found file %v: %v", request, err)
return fuse.ENOENT
}
diff --git a/weed/images/orientation.go b/weed/images/orientation.go
index 4bff89311..a592a7d8b 100644
--- a/weed/images/orientation.go
+++ b/weed/images/orientation.go
@@ -7,7 +7,7 @@ import (
"image/jpeg"
"log"
- "github.com/rwcarlsen/goexif/exif"
+ "github.com/seaweedfs/goexif/exif"
)
//many code is copied from http://camlistore.org/pkg/images/images.go
diff --git a/weed/images/resizing.go b/weed/images/resizing.go
index ff0eff5e1..b048daa1c 100644
--- a/weed/images/resizing.go
+++ b/weed/images/resizing.go
@@ -6,10 +6,11 @@ import (
"image/gif"
"image/jpeg"
"image/png"
+ "io"
+
+ "github.com/disintegration/imaging"
"github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/disintegration/imaging"
- "io"
)
func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (resized io.ReadSeeker, w int, h int) {
@@ -35,6 +36,7 @@ func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (re
}
}
} else {
+ read.Seek(0, 0)
return read, bounds.Dx(), bounds.Dy()
}
var buf bytes.Buffer
diff --git a/weed/messaging/broker/broker_append.go b/weed/messaging/broker/broker_append.go
new file mode 100644
index 000000000..80f107e00
--- /dev/null
+++ b/weed/messaging/broker/broker_append.go
@@ -0,0 +1,113 @@
+package broker
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func (broker *MessageBroker) appendToFile(targetFile string, topicConfig *messaging_pb.TopicConfiguration, data []byte) error {
+
+ assignResult, uploadResult, err2 := broker.assignAndUpload(topicConfig, data)
+ if err2 != nil {
+ return err2
+ }
+
+ dir, name := util.FullPath(targetFile).DirAndName()
+
+ // append the chunk
+ if err := broker.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.AppendToEntryRequest{
+ Directory: dir,
+ EntryName: name,
+ Chunks: []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(assignResult.Fid, 0)},
+ }
+
+ _, err := client.AppendToEntry(context.Background(), request)
+ if err != nil {
+ glog.V(0).Infof("append to file %v: %v", request, err)
+ return err
+ }
+
+ return nil
+ }); err != nil {
+ return fmt.Errorf("append to file %v: %v", targetFile, err)
+ }
+
+ return nil
+}
+
+func (broker *MessageBroker) assignAndUpload(topicConfig *messaging_pb.TopicConfiguration, data []byte) (*operation.AssignResult, *operation.UploadResult, error) {
+
+ var assignResult = &operation.AssignResult{}
+
+ // assign a volume location
+ if err := broker.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ request := &filer_pb.AssignVolumeRequest{
+ Count: 1,
+ Replication: topicConfig.Replication,
+ Collection: topicConfig.Collection,
+ }
+
+ resp, err := client.AssignVolume(context.Background(), request)
+ if err != nil {
+ glog.V(0).Infof("assign volume failure %v: %v", request, err)
+ return err
+ }
+ if resp.Error != "" {
+ return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
+ }
+
+ assignResult.Auth = security.EncodedJwt(resp.Auth)
+ assignResult.Fid = resp.FileId
+ assignResult.Url = resp.Url
+ assignResult.PublicUrl = resp.PublicUrl
+ assignResult.Count = uint64(resp.Count)
+
+ return nil
+ }); err != nil {
+ return nil, nil, err
+ }
+
+ // upload data
+ targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid)
+ uploadResult, err := operation.UploadData(targetUrl, "", broker.option.Cipher, data, false, "", nil, assignResult.Auth)
+ if err != nil {
+ return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err)
+ }
+ // println("uploaded to", targetUrl)
+ return assignResult, uploadResult, nil
+}
+
+var _ = filer_pb.FilerClient(&MessageBroker{})
+
+func (broker *MessageBroker) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) (err error) {
+
+ for _, filer := range broker.option.Filers {
+ if err = pb.WithFilerClient(filer, broker.grpcDialOption, fn); err != nil {
+ if err == io.EOF {
+ return
+ }
+ glog.V(0).Infof("fail to connect to %s: %v", filer, err)
+ } else {
+ break
+ }
+ }
+
+ return
+
+}
+
+func (broker *MessageBroker) AdjustedUrl(hostAndPort string) string {
+ return hostAndPort
+}
diff --git a/weed/messaging/broker/broker_grpc_server.go b/weed/messaging/broker/broker_grpc_server.go
new file mode 100644
index 000000000..6918a28a6
--- /dev/null
+++ b/weed/messaging/broker/broker_grpc_server.go
@@ -0,0 +1,37 @@
+package broker
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+func (broker *MessageBroker) ConfigureTopic(c context.Context, request *messaging_pb.ConfigureTopicRequest) (*messaging_pb.ConfigureTopicResponse, error) {
+ panic("implement me")
+}
+
+func (broker *MessageBroker) DeleteTopic(c context.Context, request *messaging_pb.DeleteTopicRequest) (*messaging_pb.DeleteTopicResponse, error) {
+ resp := &messaging_pb.DeleteTopicResponse{}
+ dir, entry := genTopicDirEntry(request.Namespace, request.Topic)
+ if exists, err := filer_pb.Exists(broker, dir, entry, true); err != nil {
+ return nil, err
+ } else if exists {
+ err = filer_pb.Remove(broker, dir, entry, true, true, true)
+ }
+ return resp, nil
+}
+
+func (broker *MessageBroker) GetTopicConfiguration(c context.Context, request *messaging_pb.GetTopicConfigurationRequest) (*messaging_pb.GetTopicConfigurationResponse, error) {
+ panic("implement me")
+}
+
+func genTopicDir(namespace, topic string) string {
+ return fmt.Sprintf("%s/%s/%s", filer2.TopicsDir, namespace, topic)
+}
+
+func genTopicDirEntry(namespace, topic string) (dir, entry string) {
+ return fmt.Sprintf("%s/%s", filer2.TopicsDir, namespace), topic
+}
diff --git a/weed/messaging/broker/broker_grpc_server_discovery.go b/weed/messaging/broker/broker_grpc_server_discovery.go
new file mode 100644
index 000000000..3c14f3220
--- /dev/null
+++ b/weed/messaging/broker/broker_grpc_server_discovery.go
@@ -0,0 +1,116 @@
+package broker
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+/*
+Topic discovery:
+
+When pub or sub connects, it ask for the whole broker list, and run consistent hashing to find the broker.
+
+The broker will check peers whether it is already hosted by some other broker, if that broker is alive and acknowledged alive, redirect to it.
+Otherwise, just host the topic.
+
+So, if the pub or sub connects around the same time, they would connect to the same broker. Everyone is happy.
+If one of the pub or sub connects very late, and the system topo changed quite a bit with new servers added or old servers died, checking peers will help.
+
+*/
+
+func (broker *MessageBroker) FindBroker(c context.Context, request *messaging_pb.FindBrokerRequest) (*messaging_pb.FindBrokerResponse, error) {
+
+ t := &messaging_pb.FindBrokerResponse{}
+ var peers []string
+
+ targetTopicPartition := fmt.Sprintf(TopicPartitionFmt, request.Namespace, request.Topic, request.Parition)
+
+ for _, filer := range broker.option.Filers {
+ err := broker.withFilerClient(filer, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.LocateBroker(context.Background(), &filer_pb.LocateBrokerRequest{
+ Resource: targetTopicPartition,
+ })
+ if err != nil {
+ return err
+ }
+ if resp.Found && len(resp.Resources) > 0 {
+ t.Broker = resp.Resources[0].GrpcAddresses
+ return nil
+ }
+ for _, b := range resp.Resources {
+ peers = append(peers, b.GrpcAddresses)
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ t.Broker = PickMember(peers, []byte(targetTopicPartition))
+
+ return t, nil
+
+}
+
+func (broker *MessageBroker) checkFilers() {
+
+ // contact a filer about masters
+ var masters []string
+ found := false
+ for !found {
+ for _, filer := range broker.option.Filers {
+ err := broker.withFilerClient(filer, func(client filer_pb.SeaweedFilerClient) error {
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return err
+ }
+ masters = append(masters, resp.Masters...)
+ return nil
+ })
+ if err == nil {
+ found = true
+ break
+ }
+ glog.V(0).Infof("failed to read masters from %+v: %v", broker.option.Filers, err)
+ time.Sleep(time.Second)
+ }
+ }
+ glog.V(0).Infof("received master list: %s", masters)
+
+ // contact each masters for filers
+ var filers []string
+ found = false
+ for !found {
+ for _, master := range masters {
+ err := broker.withMasterClient(master, func(client master_pb.SeaweedClient) error {
+ resp, err := client.ListMasterClients(context.Background(), &master_pb.ListMasterClientsRequest{
+ ClientType: "filer",
+ })
+ if err != nil {
+ return err
+ }
+
+ filers = append(filers, resp.GrpcAddresses...)
+
+ return nil
+ })
+ if err == nil {
+ found = true
+ break
+ }
+ glog.V(0).Infof("failed to list filers: %v", err)
+ time.Sleep(time.Second)
+ }
+ }
+ glog.V(0).Infof("received filer list: %s", filers)
+
+ broker.option.Filers = filers
+
+}
diff --git a/weed/messaging/broker/broker_grpc_server_publish.go b/weed/messaging/broker/broker_grpc_server_publish.go
new file mode 100644
index 000000000..dc11061af
--- /dev/null
+++ b/weed/messaging/broker/broker_grpc_server_publish.go
@@ -0,0 +1,112 @@
+package broker
+
+import (
+ "crypto/md5"
+ "fmt"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_PublishServer) error {
+
+ // process initial request
+ in, err := stream.Recv()
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ // TODO look it up
+ topicConfig := &messaging_pb.TopicConfiguration{
+ // IsTransient: true,
+ }
+
+ // send init response
+ initResponse := &messaging_pb.PublishResponse{
+ Config: nil,
+ Redirect: nil,
+ }
+ err = stream.Send(initResponse)
+ if err != nil {
+ return err
+ }
+ if initResponse.Redirect != nil {
+ return nil
+ }
+
+ // get lock
+ tp := TopicPartition{
+ Namespace: in.Init.Namespace,
+ Topic: in.Init.Topic,
+ Partition: in.Init.Partition,
+ }
+
+ tpDir := fmt.Sprintf("%s/%s/%s", filer2.TopicsDir, tp.Namespace, tp.Topic)
+ md5File := fmt.Sprintf("p%02d.md5", tp.Partition)
+ // println("chan data stored under", tpDir, "as", md5File)
+
+ if exists, err := filer_pb.Exists(broker, tpDir, md5File, false); err == nil && exists {
+ return fmt.Errorf("channel is already closed")
+ }
+
+ tl := broker.topicManager.RequestLock(tp, topicConfig, true)
+ defer broker.topicManager.ReleaseLock(tp, true)
+
+ md5hash := md5.New()
+ // process each message
+ for {
+ // println("recv")
+ in, err := stream.Recv()
+ // glog.V(0).Infof("recieved %v err: %v", in, err)
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ if in.Data == nil {
+ continue
+ }
+
+ // fmt.Printf("received: %d : %s\n", len(in.Data.Value), string(in.Data.Value))
+
+ data, err := proto.Marshal(in.Data)
+ if err != nil {
+ glog.Errorf("marshall error: %v\n", err)
+ continue
+ }
+
+ tl.logBuffer.AddToBuffer(in.Data.Key, data)
+
+ if in.Data.IsClose {
+ // println("server received closing")
+ break
+ }
+
+ md5hash.Write(in.Data.Value)
+
+ }
+
+ if err := broker.appendToFile(tpDir+"/"+md5File, topicConfig, md5hash.Sum(nil)); err != nil {
+ glog.V(0).Infof("err writing %s: %v", md5File, err)
+ }
+
+ // fmt.Printf("received md5 %X\n", md5hash.Sum(nil))
+
+ // send the close ack
+ // println("server send ack closing")
+ if err := stream.Send(&messaging_pb.PublishResponse{IsClosed: true}); err != nil {
+ glog.V(0).Infof("err sending close response: %v", err)
+ }
+ return nil
+
+}
diff --git a/weed/messaging/broker/broker_grpc_server_subscribe.go b/weed/messaging/broker/broker_grpc_server_subscribe.go
new file mode 100644
index 000000000..9538d3063
--- /dev/null
+++ b/weed/messaging/broker/broker_grpc_server_subscribe.go
@@ -0,0 +1,162 @@
+package broker
+
+import (
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_SubscribeServer) error {
+
+ // process initial request
+ in, err := stream.Recv()
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ var processedTsNs int64
+ var messageCount int64
+ subscriberId := in.Init.SubscriberId
+
+ // TODO look it up
+ topicConfig := &messaging_pb.TopicConfiguration{
+ // IsTransient: true,
+ }
+
+ // get lock
+ tp := TopicPartition{
+ Namespace: in.Init.Namespace,
+ Topic: in.Init.Topic,
+ Partition: in.Init.Partition,
+ }
+ fmt.Printf("+ subscriber %s for %s\n", subscriberId, tp.String())
+ defer func() {
+ fmt.Printf("- subscriber %s for %s %d messages last %v\n", subscriberId, tp.String(), messageCount, time.Unix(0, processedTsNs))
+ }()
+
+ lock := broker.topicManager.RequestLock(tp, topicConfig, false)
+ defer broker.topicManager.ReleaseLock(tp, false)
+
+ isConnected := true
+ go func() {
+ for isConnected {
+ if _, err := stream.Recv(); err != nil {
+ // println("disconnecting connection to", subscriberId, tp.String())
+ isConnected = false
+ lock.cond.Signal()
+ }
+ }
+ }()
+
+ lastReadTime := time.Now()
+ switch in.Init.StartPosition {
+ case messaging_pb.SubscriberMessage_InitMessage_TIMESTAMP:
+ lastReadTime = time.Unix(0, in.Init.TimestampNs)
+ case messaging_pb.SubscriberMessage_InitMessage_LATEST:
+ case messaging_pb.SubscriberMessage_InitMessage_EARLIEST:
+ lastReadTime = time.Unix(0, 0)
+ }
+
+ // how to process each message
+ // an error returned will end the subscription
+ eachMessageFn := func(m *messaging_pb.Message) error {
+ err := stream.Send(&messaging_pb.BrokerMessage{
+ Data: m,
+ })
+ if err != nil {
+ glog.V(0).Infof("=> subscriber %v: %+v", subscriberId, err)
+ }
+ return err
+ }
+
+ eachLogEntryFn := func(logEntry *filer_pb.LogEntry) error {
+ m := &messaging_pb.Message{}
+ if err = proto.Unmarshal(logEntry.Data, m); err != nil {
+ glog.Errorf("unexpected unmarshal messaging_pb.Message: %v", err)
+ return err
+ }
+ // fmt.Printf("sending : %d bytes ts %d\n", len(m.Value), logEntry.TsNs)
+ if err = eachMessageFn(m); err != nil {
+ glog.Errorf("sending %d bytes to %s: %s", len(m.Value), subscriberId, err)
+ return err
+ }
+ if m.IsClose {
+ // println("processed EOF")
+ return io.EOF
+ }
+ processedTsNs = logEntry.TsNs
+ messageCount++
+ return nil
+ }
+
+ if err := broker.readPersistedLogBuffer(&tp, lastReadTime, eachLogEntryFn); err != nil {
+ if err != io.EOF {
+ // println("stopping from persisted logs", err.Error())
+ return err
+ }
+ }
+
+ if processedTsNs != 0 {
+ lastReadTime = time.Unix(0, processedTsNs)
+ }
+
+ // fmt.Printf("subscriber %s read %d on disk log %v\n", subscriberId, messageCount, lastReadTime)
+
+ err = lock.logBuffer.LoopProcessLogData(lastReadTime, func() bool {
+ lock.Mutex.Lock()
+ lock.cond.Wait()
+ lock.Mutex.Unlock()
+ return isConnected
+ }, eachLogEntryFn)
+
+ return err
+
+}
+
+func (broker *MessageBroker) readPersistedLogBuffer(tp *TopicPartition, startTime time.Time, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (err error) {
+ startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day())
+ startHourMinute := fmt.Sprintf("%02d-%02d.segment", startTime.Hour(), startTime.Minute())
+
+ sizeBuf := make([]byte, 4)
+ startTsNs := startTime.UnixNano()
+
+ topicDir := genTopicDir(tp.Namespace, tp.Topic)
+ partitionSuffix := fmt.Sprintf(".part%02d", tp.Partition)
+
+ return filer_pb.List(broker, topicDir, "", func(dayEntry *filer_pb.Entry, isLast bool) error {
+ dayDir := fmt.Sprintf("%s/%s", topicDir, dayEntry.Name)
+ return filer_pb.List(broker, dayDir, "", func(hourMinuteEntry *filer_pb.Entry, isLast bool) error {
+ if dayEntry.Name == startDate {
+ if strings.Compare(hourMinuteEntry.Name, startHourMinute) < 0 {
+ return nil
+ }
+ }
+ if !strings.HasSuffix(hourMinuteEntry.Name, partitionSuffix) {
+ return nil
+ }
+ // println("partition", tp.Partition, "processing", dayDir, "/", hourMinuteEntry.Name)
+ chunkedFileReader := filer2.NewChunkStreamReader(broker, hourMinuteEntry.Chunks)
+ defer chunkedFileReader.Close()
+ if err := filer2.ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil {
+ chunkedFileReader.Close()
+ if err == io.EOF {
+ return err
+ }
+ return fmt.Errorf("reading %s/%s: %v", dayDir, hourMinuteEntry.Name, err)
+ }
+ return nil
+ }, "", false, 24*60)
+ }, startDate, true, 366)
+
+}
diff --git a/weed/messaging/broker/broker_server.go b/weed/messaging/broker/broker_server.go
new file mode 100644
index 000000000..0c04d2841
--- /dev/null
+++ b/weed/messaging/broker/broker_server.go
@@ -0,0 +1,112 @@
+package broker
+
+import (
+ "context"
+ "time"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+)
+
+type MessageBrokerOption struct {
+ Filers []string
+ DefaultReplication string
+ MaxMB int
+ Ip string
+ Port int
+ Cipher bool
+}
+
+type MessageBroker struct {
+ option *MessageBrokerOption
+ grpcDialOption grpc.DialOption
+ topicManager *TopicManager
+}
+
+func NewMessageBroker(option *MessageBrokerOption, grpcDialOption grpc.DialOption) (messageBroker *MessageBroker, err error) {
+
+ messageBroker = &MessageBroker{
+ option: option,
+ grpcDialOption: grpcDialOption,
+ }
+
+ messageBroker.topicManager = NewTopicManager(messageBroker)
+
+ messageBroker.checkFilers()
+
+ go messageBroker.keepConnectedToOneFiler()
+
+ return messageBroker, nil
+}
+
+func (broker *MessageBroker) keepConnectedToOneFiler() {
+
+ for {
+ for _, filer := range broker.option.Filers {
+ broker.withFilerClient(filer, func(client filer_pb.SeaweedFilerClient) error {
+ stream, err := client.KeepConnected(context.Background())
+ if err != nil {
+ glog.V(0).Infof("%s:%d failed to keep connected to %s: %v", broker.option.Ip, broker.option.Port, filer, err)
+ return err
+ }
+
+ initRequest := &filer_pb.KeepConnectedRequest{
+ Name: broker.option.Ip,
+ GrpcPort: uint32(broker.option.Port),
+ }
+ for _, tp := range broker.topicManager.ListTopicPartitions() {
+ initRequest.Resources = append(initRequest.Resources, tp.String())
+ }
+ if err := stream.Send(&filer_pb.KeepConnectedRequest{
+ Name: broker.option.Ip,
+ GrpcPort: uint32(broker.option.Port),
+ }); err != nil {
+ glog.V(0).Infof("broker %s:%d failed to init at %s: %v", broker.option.Ip, broker.option.Port, filer, err)
+ return err
+ }
+
+ // TODO send events of adding/removing topics
+
+ glog.V(0).Infof("conntected with filer: %v", filer)
+ for {
+ if err := stream.Send(&filer_pb.KeepConnectedRequest{
+ Name: broker.option.Ip,
+ GrpcPort: uint32(broker.option.Port),
+ }); err != nil {
+ glog.V(0).Infof("%s:%d failed to sendto %s: %v", broker.option.Ip, broker.option.Port, filer, err)
+ return err
+ }
+ // println("send heartbeat")
+ if _, err := stream.Recv(); err != nil {
+ glog.V(0).Infof("%s:%d failed to receive from %s: %v", broker.option.Ip, broker.option.Port, filer, err)
+ return err
+ }
+ // println("received reply")
+ time.Sleep(11 * time.Second)
+ // println("woke up")
+ }
+ return nil
+ })
+ time.Sleep(3 * time.Second)
+ }
+ }
+
+}
+
+func (broker *MessageBroker) withFilerClient(filer string, fn func(filer_pb.SeaweedFilerClient) error) error {
+
+ return pb.WithFilerClient(filer, broker.grpcDialOption, fn)
+
+}
+
+func (broker *MessageBroker) withMasterClient(master string, fn func(client master_pb.SeaweedClient) error) error {
+
+ return pb.WithMasterClient(master, broker.grpcDialOption, func(client master_pb.SeaweedClient) error {
+ return fn(client)
+ })
+
+}
diff --git a/weed/messaging/broker/consistent_distribution.go b/weed/messaging/broker/consistent_distribution.go
new file mode 100644
index 000000000..465a2a8f2
--- /dev/null
+++ b/weed/messaging/broker/consistent_distribution.go
@@ -0,0 +1,38 @@
+package broker
+
+import (
+ "github.com/buraksezer/consistent"
+ "github.com/cespare/xxhash"
+)
+
+type Member string
+
+func (m Member) String() string {
+ return string(m)
+}
+
+type hasher struct{}
+
+func (h hasher) Sum64(data []byte) uint64 {
+ return xxhash.Sum64(data)
+}
+
+func PickMember(members []string, key []byte) string {
+ cfg := consistent.Config{
+ PartitionCount: 9791,
+ ReplicationFactor: 2,
+ Load: 1.25,
+ Hasher: hasher{},
+ }
+
+ cmembers := []consistent.Member{}
+ for _, m := range members {
+ cmembers = append(cmembers, Member(m))
+ }
+
+ c := consistent.New(cmembers, cfg)
+
+ m := c.LocateKey(key)
+
+ return m.String()
+}
diff --git a/weed/messaging/broker/consistent_distribution_test.go b/weed/messaging/broker/consistent_distribution_test.go
new file mode 100644
index 000000000..f58fe4e0e
--- /dev/null
+++ b/weed/messaging/broker/consistent_distribution_test.go
@@ -0,0 +1,32 @@
+package broker
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestPickMember(t *testing.T) {
+
+ servers := []string{
+ "s1:port",
+ "s2:port",
+ "s3:port",
+ "s5:port",
+ "s4:port",
+ }
+
+ total := 1000
+
+ distribution := make(map[string]int)
+ for i := 0; i < total; i++ {
+ tp := fmt.Sprintf("tp:%2d", i)
+ m := PickMember(servers, []byte(tp))
+ // println(tp, "=>", m)
+ distribution[m]++
+ }
+
+ for member, count := range distribution {
+ fmt.Printf("member: %s, key count: %d load=%.2f\n", member, count, float64(count*100)/float64(total/len(servers)))
+ }
+
+}
diff --git a/weed/messaging/broker/topic_manager.go b/weed/messaging/broker/topic_manager.go
new file mode 100644
index 000000000..b563fffa1
--- /dev/null
+++ b/weed/messaging/broker/topic_manager.go
@@ -0,0 +1,123 @@
+package broker
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+ "github.com/chrislusf/seaweedfs/weed/util/log_buffer"
+)
+
+type TopicPartition struct {
+ Namespace string
+ Topic string
+ Partition int32
+}
+
+const (
+ TopicPartitionFmt = "%s/%s_%02d"
+)
+
+func (tp *TopicPartition) String() string {
+ return fmt.Sprintf(TopicPartitionFmt, tp.Namespace, tp.Topic, tp.Partition)
+}
+
+type TopicControl struct {
+ sync.Mutex
+ cond *sync.Cond
+ subscriberCount int
+ publisherCount int
+ logBuffer *log_buffer.LogBuffer
+}
+
+type TopicManager struct {
+ sync.Mutex
+ topicControls map[TopicPartition]*TopicControl
+ broker *MessageBroker
+}
+
+func NewTopicManager(messageBroker *MessageBroker) *TopicManager {
+ return &TopicManager{
+ topicControls: make(map[TopicPartition]*TopicControl),
+ broker: messageBroker,
+ }
+}
+
+func (tm *TopicManager) buildLogBuffer(tl *TopicControl, tp TopicPartition, topicConfig *messaging_pb.TopicConfiguration) *log_buffer.LogBuffer {
+
+ flushFn := func(startTime, stopTime time.Time, buf []byte) {
+
+ if topicConfig.IsTransient {
+ // return
+ }
+
+ // fmt.Printf("flushing with topic config %+v\n", topicConfig)
+
+ targetFile := fmt.Sprintf(
+ "%s/%s/%s/%04d-%02d-%02d/%02d-%02d.part%02d",
+ filer2.TopicsDir, tp.Namespace, tp.Topic,
+ startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(),
+ tp.Partition,
+ )
+
+ if err := tm.broker.appendToFile(targetFile, topicConfig, buf); err != nil {
+ glog.V(0).Infof("log write failed %s: %v", targetFile, err)
+ }
+ }
+ logBuffer := log_buffer.NewLogBuffer(time.Minute, flushFn, func() {
+ tl.cond.Broadcast()
+ })
+
+ return logBuffer
+}
+
+func (tm *TopicManager) RequestLock(partition TopicPartition, topicConfig *messaging_pb.TopicConfiguration, isPublisher bool) *TopicControl {
+ tm.Lock()
+ defer tm.Unlock()
+
+ tc, found := tm.topicControls[partition]
+ if !found {
+ tc = &TopicControl{}
+ tc.cond = sync.NewCond(&tc.Mutex)
+ tm.topicControls[partition] = tc
+ tc.logBuffer = tm.buildLogBuffer(tc, partition, topicConfig)
+ }
+ if isPublisher {
+ tc.publisherCount++
+ } else {
+ tc.subscriberCount++
+ }
+ return tc
+}
+
+func (tm *TopicManager) ReleaseLock(partition TopicPartition, isPublisher bool) {
+ tm.Lock()
+ defer tm.Unlock()
+
+ lock, found := tm.topicControls[partition]
+ if !found {
+ return
+ }
+ if isPublisher {
+ lock.publisherCount--
+ } else {
+ lock.subscriberCount--
+ }
+ if lock.subscriberCount <= 0 && lock.publisherCount <= 0 {
+ delete(tm.topicControls, partition)
+ lock.logBuffer.Shutdown()
+ }
+}
+
+func (tm *TopicManager) ListTopicPartitions() (tps []TopicPartition) {
+ tm.Lock()
+ defer tm.Unlock()
+
+ for k := range tm.topicControls {
+ tps = append(tps, k)
+ }
+ return
+}
diff --git a/weed/messaging/msgclient/chan_config.go b/weed/messaging/msgclient/chan_config.go
new file mode 100644
index 000000000..a75678815
--- /dev/null
+++ b/weed/messaging/msgclient/chan_config.go
@@ -0,0 +1,5 @@
+package msgclient
+
+func (mc *MessagingClient) DeleteChannel(chanName string) error {
+ return mc.DeleteTopic("chan", chanName)
+}
diff --git a/weed/messaging/msgclient/chan_pub.go b/weed/messaging/msgclient/chan_pub.go
new file mode 100644
index 000000000..9bc88f7c0
--- /dev/null
+++ b/weed/messaging/msgclient/chan_pub.go
@@ -0,0 +1,76 @@
+package msgclient
+
+import (
+ "crypto/md5"
+ "hash"
+ "io"
+ "log"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/messaging/broker"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+type PubChannel struct {
+ client messaging_pb.SeaweedMessaging_PublishClient
+ grpcConnection *grpc.ClientConn
+ md5hash hash.Hash
+}
+
+func (mc *MessagingClient) NewPubChannel(chanName string) (*PubChannel, error) {
+ tp := broker.TopicPartition{
+ Namespace: "chan",
+ Topic: chanName,
+ Partition: 0,
+ }
+ grpcConnection, err := mc.findBroker(tp)
+ if err != nil {
+ return nil, err
+ }
+ pc, err := setupPublisherClient(grpcConnection, tp)
+ if err != nil {
+ return nil, err
+ }
+ return &PubChannel{
+ client: pc,
+ grpcConnection: grpcConnection,
+ md5hash: md5.New(),
+ }, nil
+}
+
+func (pc *PubChannel) Publish(m []byte) error {
+ err := pc.client.Send(&messaging_pb.PublishRequest{
+ Data: &messaging_pb.Message{
+ Value: m,
+ },
+ })
+ if err == nil {
+ pc.md5hash.Write(m)
+ }
+ return err
+}
+func (pc *PubChannel) Close() error {
+
+ // println("send closing")
+ if err := pc.client.Send(&messaging_pb.PublishRequest{
+ Data: &messaging_pb.Message{
+ IsClose: true,
+ },
+ }); err != nil {
+ log.Printf("err send close: %v", err)
+ }
+ // println("receive closing")
+ if _, err := pc.client.Recv(); err != nil && err != io.EOF {
+ log.Printf("err receive close: %v", err)
+ }
+ // println("close connection")
+ if err := pc.grpcConnection.Close(); err != nil {
+ log.Printf("err connection close: %v", err)
+ }
+ return nil
+}
+
+func (pc *PubChannel) Md5() []byte {
+ return pc.md5hash.Sum(nil)
+}
diff --git a/weed/messaging/msgclient/chan_sub.go b/weed/messaging/msgclient/chan_sub.go
new file mode 100644
index 000000000..213ff4666
--- /dev/null
+++ b/weed/messaging/msgclient/chan_sub.go
@@ -0,0 +1,85 @@
+package msgclient
+
+import (
+ "context"
+ "crypto/md5"
+ "hash"
+ "io"
+ "log"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/messaging/broker"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+type SubChannel struct {
+ ch chan []byte
+ stream messaging_pb.SeaweedMessaging_SubscribeClient
+ md5hash hash.Hash
+ cancel context.CancelFunc
+}
+
+func (mc *MessagingClient) NewSubChannel(subscriberId, chanName string) (*SubChannel, error) {
+ tp := broker.TopicPartition{
+ Namespace: "chan",
+ Topic: chanName,
+ Partition: 0,
+ }
+ grpcConnection, err := mc.findBroker(tp)
+ if err != nil {
+ return nil, err
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ sc, err := setupSubscriberClient(ctx, grpcConnection, tp, subscriberId, time.Unix(0, 0))
+ if err != nil {
+ return nil, err
+ }
+
+ t := &SubChannel{
+ ch: make(chan []byte),
+ stream: sc,
+ md5hash: md5.New(),
+ cancel: cancel,
+ }
+
+ go func() {
+ for {
+ resp, subErr := t.stream.Recv()
+ if subErr == io.EOF {
+ return
+ }
+ if subErr != nil {
+ log.Printf("fail to receive from netchan %s: %v", chanName, subErr)
+ return
+ }
+ if resp.Data == nil {
+ // this could be heartbeat from broker
+ continue
+ }
+ if resp.Data.IsClose {
+ t.stream.Send(&messaging_pb.SubscriberMessage{
+ IsClose: true,
+ })
+ close(t.ch)
+ cancel()
+ return
+ }
+ t.ch <- resp.Data.Value
+ t.md5hash.Write(resp.Data.Value)
+ }
+ }()
+
+ return t, nil
+}
+
+func (sc *SubChannel) Channel() chan []byte {
+ return sc.ch
+}
+
+func (sc *SubChannel) Md5() []byte {
+ return sc.md5hash.Sum(nil)
+}
+
+func (sc *SubChannel) Cancel() {
+ sc.cancel()
+}
diff --git a/weed/messaging/msgclient/client.go b/weed/messaging/msgclient/client.go
new file mode 100644
index 000000000..4d7ef2b8e
--- /dev/null
+++ b/weed/messaging/msgclient/client.go
@@ -0,0 +1,55 @@
+package msgclient
+
+import (
+ "context"
+ "fmt"
+ "log"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/messaging/broker"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+ "github.com/chrislusf/seaweedfs/weed/security"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+type MessagingClient struct {
+ bootstrapBrokers []string
+ grpcConnections map[broker.TopicPartition]*grpc.ClientConn
+ grpcDialOption grpc.DialOption
+}
+
+func NewMessagingClient(bootstrapBrokers ...string) *MessagingClient {
+ return &MessagingClient{
+ bootstrapBrokers: bootstrapBrokers,
+ grpcConnections: make(map[broker.TopicPartition]*grpc.ClientConn),
+ grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.msg_client"),
+ }
+}
+
+func (mc *MessagingClient) findBroker(tp broker.TopicPartition) (*grpc.ClientConn, error) {
+
+ for _, broker := range mc.bootstrapBrokers {
+ grpcConnection, err := pb.GrpcDial(context.Background(), broker, mc.grpcDialOption)
+ if err != nil {
+ log.Printf("dial broker %s: %v", broker, err)
+ continue
+ }
+ defer grpcConnection.Close()
+
+ resp, err := messaging_pb.NewSeaweedMessagingClient(grpcConnection).FindBroker(context.Background(),
+ &messaging_pb.FindBrokerRequest{
+ Namespace: tp.Namespace,
+ Topic: tp.Topic,
+ Parition: tp.Partition,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ targetBroker := resp.Broker
+ return pb.GrpcDial(context.Background(), targetBroker, mc.grpcDialOption)
+ }
+ return nil, fmt.Errorf("no broker found for %+v", tp)
+}
diff --git a/weed/messaging/msgclient/config.go b/weed/messaging/msgclient/config.go
new file mode 100644
index 000000000..2b9eba1a8
--- /dev/null
+++ b/weed/messaging/msgclient/config.go
@@ -0,0 +1,63 @@
+package msgclient
+
+import (
+ "context"
+ "log"
+
+ "github.com/chrislusf/seaweedfs/weed/messaging/broker"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+func (mc *MessagingClient) configureTopic(tp broker.TopicPartition) error {
+
+ return mc.withAnyBroker(func(client messaging_pb.SeaweedMessagingClient) error {
+ _, err := client.ConfigureTopic(context.Background(),
+ &messaging_pb.ConfigureTopicRequest{
+ Namespace: tp.Namespace,
+ Topic: tp.Topic,
+ Configuration: &messaging_pb.TopicConfiguration{
+ PartitionCount: 0,
+ Collection: "",
+ Replication: "",
+ IsTransient: false,
+ Partitoning: 0,
+ },
+ })
+ return err
+ })
+
+}
+
+func (mc *MessagingClient) DeleteTopic(namespace, topic string) error {
+
+ return mc.withAnyBroker(func(client messaging_pb.SeaweedMessagingClient) error {
+ _, err := client.DeleteTopic(context.Background(),
+ &messaging_pb.DeleteTopicRequest{
+ Namespace: namespace,
+ Topic: topic,
+ })
+ return err
+ })
+}
+
+func (mc *MessagingClient) withAnyBroker(fn func(client messaging_pb.SeaweedMessagingClient) error) error {
+
+ var lastErr error
+ for _, broker := range mc.bootstrapBrokers {
+ grpcConnection, err := pb.GrpcDial(context.Background(), broker, mc.grpcDialOption)
+ if err != nil {
+ log.Printf("dial broker %s: %v", broker, err)
+ continue
+ }
+ defer grpcConnection.Close()
+
+ err = fn(messaging_pb.NewSeaweedMessagingClient(grpcConnection))
+ if err == nil {
+ return nil
+ }
+ lastErr = err
+ }
+
+ return lastErr
+}
diff --git a/weed/messaging/msgclient/publisher.go b/weed/messaging/msgclient/publisher.go
new file mode 100644
index 000000000..b0fc5afbf
--- /dev/null
+++ b/weed/messaging/msgclient/publisher.go
@@ -0,0 +1,118 @@
+package msgclient
+
+import (
+ "context"
+
+ "github.com/OneOfOne/xxhash"
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/messaging/broker"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+type Publisher struct {
+ publishClients []messaging_pb.SeaweedMessaging_PublishClient
+ topicConfiguration *messaging_pb.TopicConfiguration
+ messageCount uint64
+ publisherId string
+}
+
+func (mc *MessagingClient) NewPublisher(publisherId, namespace, topic string) (*Publisher, error) {
+ // read topic configuration
+ topicConfiguration := &messaging_pb.TopicConfiguration{
+ PartitionCount: 4,
+ }
+ publishClients := make([]messaging_pb.SeaweedMessaging_PublishClient, topicConfiguration.PartitionCount)
+ for i := 0; i < int(topicConfiguration.PartitionCount); i++ {
+ tp := broker.TopicPartition{
+ Namespace: namespace,
+ Topic: topic,
+ Partition: int32(i),
+ }
+ grpcClientConn, err := mc.findBroker(tp)
+ if err != nil {
+ return nil, err
+ }
+ client, err := setupPublisherClient(grpcClientConn, tp)
+ if err != nil {
+ return nil, err
+ }
+ publishClients[i] = client
+ }
+ return &Publisher{
+ publishClients: publishClients,
+ topicConfiguration: topicConfiguration,
+ }, nil
+}
+
+func setupPublisherClient(grpcConnection *grpc.ClientConn, tp broker.TopicPartition) (messaging_pb.SeaweedMessaging_PublishClient, error) {
+
+ stream, err := messaging_pb.NewSeaweedMessagingClient(grpcConnection).Publish(context.Background())
+ if err != nil {
+ return nil, err
+ }
+
+ // send init message
+ err = stream.Send(&messaging_pb.PublishRequest{
+ Init: &messaging_pb.PublishRequest_InitMessage{
+ Namespace: tp.Namespace,
+ Topic: tp.Topic,
+ Partition: tp.Partition,
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // process init response
+ initResponse, err := stream.Recv()
+ if err != nil {
+ return nil, err
+ }
+ if initResponse.Redirect != nil {
+ // TODO follow redirection
+ }
+ if initResponse.Config != nil {
+ }
+
+ // setup looks for control messages
+ doneChan := make(chan error, 1)
+ go func() {
+ for {
+ in, err := stream.Recv()
+ if err != nil {
+ doneChan <- err
+ return
+ }
+ if in.Redirect != nil {
+ }
+ if in.Config != nil {
+ }
+ }
+ }()
+
+ return stream, nil
+
+}
+
+func (p *Publisher) Publish(m *messaging_pb.Message) error {
+ hashValue := p.messageCount
+ p.messageCount++
+ if p.topicConfiguration.Partitoning == messaging_pb.TopicConfiguration_NonNullKeyHash {
+ if m.Key != nil {
+ hashValue = xxhash.Checksum64(m.Key)
+ }
+ } else if p.topicConfiguration.Partitoning == messaging_pb.TopicConfiguration_KeyHash {
+ hashValue = xxhash.Checksum64(m.Key)
+ } else {
+ // round robin
+ }
+
+ idx := int(hashValue) % len(p.publishClients)
+ if idx < 0 {
+ idx += len(p.publishClients)
+ }
+ return p.publishClients[idx].Send(&messaging_pb.PublishRequest{
+ Data: m,
+ })
+}
diff --git a/weed/messaging/msgclient/subscriber.go b/weed/messaging/msgclient/subscriber.go
new file mode 100644
index 000000000..caa795626
--- /dev/null
+++ b/weed/messaging/msgclient/subscriber.go
@@ -0,0 +1,120 @@
+package msgclient
+
+import (
+ "context"
+ "io"
+ "time"
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/messaging/broker"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+ "google.golang.org/grpc"
+)
+
+type Subscriber struct {
+ subscriberClients []messaging_pb.SeaweedMessaging_SubscribeClient
+ subscriberCancels []context.CancelFunc
+ subscriberId string
+}
+
+func (mc *MessagingClient) NewSubscriber(subscriberId, namespace, topic string, partitionId int, startTime time.Time) (*Subscriber, error) {
+ // read topic configuration
+ topicConfiguration := &messaging_pb.TopicConfiguration{
+ PartitionCount: 4,
+ }
+ subscriberClients := make([]messaging_pb.SeaweedMessaging_SubscribeClient, topicConfiguration.PartitionCount)
+ subscriberCancels := make([]context.CancelFunc, topicConfiguration.PartitionCount)
+
+ for i := 0; i < int(topicConfiguration.PartitionCount); i++ {
+ if partitionId>=0 && i != partitionId {
+ continue
+ }
+ tp := broker.TopicPartition{
+ Namespace: namespace,
+ Topic: topic,
+ Partition: int32(i),
+ }
+ grpcClientConn, err := mc.findBroker(tp)
+ if err != nil {
+ return nil, err
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ client, err := setupSubscriberClient(ctx, grpcClientConn, tp, subscriberId, startTime)
+ if err != nil {
+ return nil, err
+ }
+ subscriberClients[i] = client
+ subscriberCancels[i] = cancel
+ }
+
+ return &Subscriber{
+ subscriberClients: subscriberClients,
+ subscriberCancels: subscriberCancels,
+ subscriberId: subscriberId,
+ }, nil
+}
+
+func setupSubscriberClient(ctx context.Context, grpcConnection *grpc.ClientConn, tp broker.TopicPartition, subscriberId string, startTime time.Time) (stream messaging_pb.SeaweedMessaging_SubscribeClient, err error) {
+ stream, err = messaging_pb.NewSeaweedMessagingClient(grpcConnection).Subscribe(ctx)
+ if err != nil {
+ return
+ }
+
+ // send init message
+ err = stream.Send(&messaging_pb.SubscriberMessage{
+ Init: &messaging_pb.SubscriberMessage_InitMessage{
+ Namespace: tp.Namespace,
+ Topic: tp.Topic,
+ Partition: tp.Partition,
+ StartPosition: messaging_pb.SubscriberMessage_InitMessage_TIMESTAMP,
+ TimestampNs: startTime.UnixNano(),
+ SubscriberId: subscriberId,
+ },
+ })
+ if err != nil {
+ return
+ }
+
+ return stream, nil
+}
+
+func doSubscribe(subscriberClient messaging_pb.SeaweedMessaging_SubscribeClient, processFn func(m *messaging_pb.Message)) error {
+ for {
+ resp, listenErr := subscriberClient.Recv()
+ if listenErr == io.EOF {
+ return nil
+ }
+ if listenErr != nil {
+ println(listenErr.Error())
+ return listenErr
+ }
+ if resp.Data == nil {
+ // this could be heartbeat from broker
+ continue
+ }
+ processFn(resp.Data)
+ }
+}
+
+// Subscribe starts goroutines to process the messages
+func (s *Subscriber) Subscribe(processFn func(m *messaging_pb.Message)) {
+ var wg sync.WaitGroup
+ for i := 0; i < len(s.subscriberClients); i++ {
+ if s.subscriberClients[i] != nil {
+ wg.Add(1)
+ go func(subscriberClient messaging_pb.SeaweedMessaging_SubscribeClient) {
+ defer wg.Done()
+ doSubscribe(subscriberClient, processFn)
+ }(s.subscriberClients[i])
+ }
+ }
+ wg.Wait()
+}
+
+func (s *Subscriber) Shutdown() {
+ for i := 0; i < len(s.subscriberClients); i++ {
+ if s.subscriberCancels[i] != nil {
+ s.subscriberCancels[i]()
+ }
+ }
+}
diff --git a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go
index 706261b3a..1ae102509 100644
--- a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go
+++ b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go
@@ -60,8 +60,7 @@ func (k *GoCDKPubSub) SendMessage(key string, message proto.Message) error {
if err != nil {
return err
}
- ctx := context.Background()
- err = k.topic.Send(ctx, &pubsub.Message{
+ err = k.topic.Send(context.Background(), &pubsub.Message{
Body: bytes,
Metadata: map[string]string{"key": key},
})
diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go
index b67d8b708..893bf516c 100644
--- a/weed/operation/assign_file_id.go
+++ b/weed/operation/assign_file_id.go
@@ -3,11 +3,13 @@ package operation
import (
"context"
"fmt"
+ "strings"
+
+ "google.golang.org/grpc"
+
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
- "google.golang.org/grpc"
- "strings"
)
type VolumeAssignRequest struct {
@@ -44,7 +46,7 @@ func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *Volum
continue
}
- lastError = WithMasterServerClient(server, grpcDialOption, func(ctx context.Context, masterClient master_pb.SeaweedClient) error {
+ lastError = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
req := &master_pb.AssignRequest{
Count: primaryRequest.Count,
diff --git a/weed/operation/chunked_file.go b/weed/operation/chunked_file.go
index 295204dd8..baa0038c4 100644
--- a/weed/operation/chunked_file.go
+++ b/weed/operation/chunked_file.go
@@ -8,11 +8,10 @@ import (
"io/ioutil"
"net/http"
"sort"
+ "sync"
"google.golang.org/grpc"
- "sync"
-
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -41,12 +40,13 @@ type ChunkManifest struct {
// seekable chunked file reader
type ChunkedFileReader struct {
- Manifest *ChunkManifest
- Master string
- pos int64
- pr *io.PipeReader
- pw *io.PipeWriter
- mutex sync.Mutex
+ totalSize int64
+ chunkList []*ChunkInfo
+ master string
+ pos int64
+ pr *io.PipeReader
+ pw *io.PipeWriter
+ mutex sync.Mutex
}
func (s ChunkList) Len() int { return len(s) }
@@ -72,12 +72,12 @@ func (cm *ChunkManifest) Marshal() ([]byte, error) {
return json.Marshal(cm)
}
-func (cm *ChunkManifest) DeleteChunks(master string, grpcDialOption grpc.DialOption) error {
+func (cm *ChunkManifest) DeleteChunks(master string, usePublicUrl bool, grpcDialOption grpc.DialOption) error {
var fileIds []string
for _, ci := range cm.Chunks {
fileIds = append(fileIds, ci.Fid)
}
- results, err := DeleteFiles(master, grpcDialOption, fileIds)
+ results, err := DeleteFiles(master, usePublicUrl, grpcDialOption, fileIds)
if err != nil {
glog.V(0).Infof("delete %+v: %v", fileIds, err)
return fmt.Errorf("chunk delete: %v", err)
@@ -126,16 +126,29 @@ func readChunkNeedle(fileUrl string, w io.Writer, offset int64) (written int64,
return io.Copy(w, resp.Body)
}
+func NewChunkedFileReader(chunkList []*ChunkInfo, master string) *ChunkedFileReader {
+ var totalSize int64
+ for _, chunk := range chunkList {
+ totalSize += chunk.Size
+ }
+ sort.Sort(ChunkList(chunkList))
+ return &ChunkedFileReader{
+ totalSize: totalSize,
+ chunkList: chunkList,
+ master: master,
+ }
+}
+
func (cf *ChunkedFileReader) Seek(offset int64, whence int) (int64, error) {
var err error
switch whence {
- case 0:
- case 1:
+ case io.SeekStart:
+ case io.SeekCurrent:
offset += cf.pos
- case 2:
- offset = cf.Manifest.Size - offset
+ case io.SeekEnd:
+ offset = cf.totalSize + offset
}
- if offset > cf.Manifest.Size {
+ if offset > cf.totalSize {
err = ErrInvalidRange
}
if cf.pos != offset {
@@ -146,10 +159,9 @@ func (cf *ChunkedFileReader) Seek(offset int64, whence int) (int64, error) {
}
func (cf *ChunkedFileReader) WriteTo(w io.Writer) (n int64, err error) {
- cm := cf.Manifest
chunkIndex := -1
chunkStartOffset := int64(0)
- for i, ci := range cm.Chunks {
+ for i, ci := range cf.chunkList {
if cf.pos >= ci.Offset && cf.pos < ci.Offset+ci.Size {
chunkIndex = i
chunkStartOffset = cf.pos - ci.Offset
@@ -159,10 +171,10 @@ func (cf *ChunkedFileReader) WriteTo(w io.Writer) (n int64, err error) {
if chunkIndex < 0 {
return n, ErrInvalidRange
}
- for ; chunkIndex < cm.Chunks.Len(); chunkIndex++ {
- ci := cm.Chunks[chunkIndex]
+ for ; chunkIndex < len(cf.chunkList); chunkIndex++ {
+ ci := cf.chunkList[chunkIndex]
// if we need read date from local volume server first?
- fileUrl, lookupError := LookupFileId(cf.Master, ci.Fid)
+ fileUrl, lookupError := LookupFileId(cf.master, ci.Fid)
if lookupError != nil {
return n, lookupError
}
diff --git a/weed/operation/delete_content.go b/weed/operation/delete_content.go
index 95bbde9f9..9868a411d 100644
--- a/weed/operation/delete_content.go
+++ b/weed/operation/delete_content.go
@@ -10,7 +10,6 @@ import (
"google.golang.org/grpc"
- "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
)
@@ -30,10 +29,18 @@ func ParseFileId(fid string) (vid string, key_cookie string, err error) {
}
// DeleteFiles batch deletes a list of fileIds
-func DeleteFiles(master string, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) {
+func DeleteFiles(master string, usePublicUrl bool, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) {
- lookupFunc := func(vids []string) (map[string]LookupResult, error) {
- return LookupVolumeIds(master, grpcDialOption, vids)
+ lookupFunc := func(vids []string) (results map[string]LookupResult, err error) {
+ results, err = LookupVolumeIds(master, grpcDialOption, vids)
+ if err == nil && usePublicUrl {
+ for _, result := range results {
+ for _, loc := range result.Locations {
+ loc.Url = loc.PublicUrl
+ }
+ }
+ }
+ return
}
return DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc)
@@ -94,7 +101,7 @@ func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []str
go func(server string, fidList []string) {
defer wg.Done()
- if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, grpcDialOption, fidList); deleteErr != nil {
+ if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, grpcDialOption, fidList, true); deleteErr != nil {
err = deleteErr
} else if deleteResults != nil {
resultChan <- deleteResults
@@ -109,18 +116,17 @@ func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []str
ret = append(ret, result...)
}
- glog.V(1).Infof("deleted %d items", len(ret))
-
return ret, err
}
// DeleteFilesAtOneVolumeServer deletes a list of files that is on one volume server via gRpc
-func DeleteFilesAtOneVolumeServer(volumeServer string, grpcDialOption grpc.DialOption, fileIds []string) (ret []*volume_server_pb.DeleteResult, err error) {
+func DeleteFilesAtOneVolumeServer(volumeServer string, grpcDialOption grpc.DialOption, fileIds []string, includeCookie bool) (ret []*volume_server_pb.DeleteResult, err error) {
- err = WithVolumeServerClient(volumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
+ err = WithVolumeServerClient(volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
req := &volume_server_pb.BatchDeleteRequest{
- FileIds: fileIds,
+ FileIds: fileIds,
+ SkipCookieCheck: !includeCookie,
}
resp, err := volumeServerClient.BatchDelete(context.Background(), req)
diff --git a/weed/operation/grpc_client.go b/weed/operation/grpc_client.go
index e7ee2d2ba..025a65b38 100644
--- a/weed/operation/grpc_client.go
+++ b/weed/operation/grpc_client.go
@@ -1,29 +1,29 @@
package operation
import (
- "context"
"fmt"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
- "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
- "github.com/chrislusf/seaweedfs/weed/util"
- "google.golang.org/grpc"
"strconv"
"strings"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
)
-func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, fn func(context.Context, volume_server_pb.VolumeServerClient) error) error {
-
- ctx := context.Background()
+func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, fn func(volume_server_pb.VolumeServerClient) error) error {
grpcAddress, err := toVolumeServerGrpcAddress(volumeServer)
if err != nil {
- return err
+ return fmt.Errorf("failed to parse volume server %v: %v", volumeServer, err)
}
- return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error {
+ return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
client := volume_server_pb.NewVolumeServerClient(grpcConnection)
- return fn(ctx2, client)
+ return fn(client)
}, grpcAddress, grpcDialOption)
}
@@ -38,18 +38,30 @@ func toVolumeServerGrpcAddress(volumeServer string) (grpcAddress string, err err
return fmt.Sprintf("%s:%d", volumeServer[0:sepIndex], port+10000), nil
}
-func WithMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, fn func(ctx2 context.Context, masterClient master_pb.SeaweedClient) error) error {
+func WithMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, fn func(masterClient master_pb.SeaweedClient) error) error {
- ctx := context.Background()
-
- masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(masterServer)
+ masterGrpcAddress, parseErr := pb.ParseServerToGrpcAddress(masterServer)
if parseErr != nil {
- return fmt.Errorf("failed to parse master grpc %v: %v", masterServer, parseErr)
+ return fmt.Errorf("failed to parse master %v: %v", masterServer, parseErr)
}
- return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error {
+ return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
client := master_pb.NewSeaweedClient(grpcConnection)
- return fn(ctx2, client)
+ return fn(client)
}, masterGrpcAddress, grpcDialOption)
}
+
+func WithFilerServerClient(filerServer string, grpcDialOption grpc.DialOption, fn func(masterClient filer_pb.SeaweedFilerClient) error) error {
+
+ filerGrpcAddress, parseErr := pb.ParseServerToGrpcAddress(filerServer)
+ if parseErr != nil {
+ return fmt.Errorf("failed to parse filer %v: %v", filerGrpcAddress, parseErr)
+ }
+
+ return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
+ client := filer_pb.NewSeaweedFilerClient(grpcConnection)
+ return fn(client)
+ }, filerGrpcAddress, grpcDialOption)
+
+}
diff --git a/weed/operation/lookup.go b/weed/operation/lookup.go
index 78769ac5a..d0773e7fd 100644
--- a/weed/operation/lookup.go
+++ b/weed/operation/lookup.go
@@ -99,12 +99,12 @@ func LookupVolumeIds(server string, grpcDialOption grpc.DialOption, vids []strin
//only query unknown_vids
- err := WithMasterServerClient(server, grpcDialOption, func(ctx context.Context, masterClient master_pb.SeaweedClient) error {
+ err := WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
req := &master_pb.LookupVolumeRequest{
VolumeIds: unknown_vids,
}
- resp, grpcErr := masterClient.LookupVolume(ctx, req)
+ resp, grpcErr := masterClient.LookupVolume(context.Background(), req)
if grpcErr != nil {
return grpcErr
}
diff --git a/weed/operation/stats.go b/weed/operation/stats.go
deleted file mode 100644
index 3e6327f19..000000000
--- a/weed/operation/stats.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package operation
-
-import (
- "context"
- "google.golang.org/grpc"
-
- "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
-)
-
-func Statistics(server string, grpcDialOption grpc.DialOption, req *master_pb.StatisticsRequest) (resp *master_pb.StatisticsResponse, err error) {
-
- err = WithMasterServerClient(server, grpcDialOption, func(ctx context.Context, masterClient master_pb.SeaweedClient) error {
-
- grpcResponse, grpcErr := masterClient.Statistics(ctx, req)
- if grpcErr != nil {
- return grpcErr
- }
-
- resp = grpcResponse
-
- return nil
-
- })
-
- return
-}
diff --git a/weed/operation/submit.go b/weed/operation/submit.go
index 62f067430..e8bec382a 100644
--- a/weed/operation/submit.go
+++ b/weed/operation/submit.go
@@ -1,8 +1,6 @@
package operation
import (
- "bytes"
- "google.golang.org/grpc"
"io"
"mime"
"net/url"
@@ -11,6 +9,8 @@ import (
"strconv"
"strings"
+ "google.golang.org/grpc"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/security"
)
@@ -27,6 +27,7 @@ type FilePart struct {
Ttl string
Server string //this comes from assign result
Fid string //this comes from assign result, but customizable
+ Fsync bool
}
type SubmitResult struct {
@@ -37,8 +38,7 @@ type SubmitResult struct {
Error string `json:"error,omitempty"`
}
-func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart,
- replication string, collection string, dataCenter string, ttl string, maxMB int) ([]SubmitResult, error) {
+func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart, replication string, collection string, dataCenter string, ttl string, maxMB int, usePublicUrl bool) ([]SubmitResult, error) {
results := make([]SubmitResult, len(files))
for index, file := range files {
results[index].FileName = file.FileName
@@ -52,7 +52,7 @@ func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart
}
ret, err := Assign(master, grpcDialOption, ar)
if err != nil {
- for index, _ := range files {
+ for index := range files {
results[index].Error = err.Error()
}
return results, err
@@ -63,10 +63,13 @@ func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart
file.Fid = file.Fid + "_" + strconv.Itoa(index)
}
file.Server = ret.Url
+ if usePublicUrl {
+ file.Server = ret.PublicUrl
+ }
file.Replication = replication
file.Collection = collection
file.DataCenter = dataCenter
- results[index].Size, err = file.Upload(maxMB, master, ret.Auth, grpcDialOption)
+ results[index].Size, err = file.Upload(maxMB, master, usePublicUrl, ret.Auth, grpcDialOption)
if err != nil {
results[index].Error = err.Error()
}
@@ -109,11 +112,14 @@ func newFilePart(fullPathFilename string) (ret FilePart, err error) {
return ret, nil
}
-func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) {
+func (fi FilePart) Upload(maxMB int, master string, usePublicUrl bool, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) {
fileUrl := "http://" + fi.Server + "/" + fi.Fid
if fi.ModTime != 0 {
fileUrl += "?ts=" + strconv.Itoa(int(fi.ModTime))
}
+ if fi.Fsync {
+ fileUrl += "?fsync=true"
+ }
if closer, ok := fi.Reader.(io.Closer); ok {
defer closer.Close()
}
@@ -153,7 +159,7 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp
ret, err = Assign(master, grpcDialOption, ar)
if err != nil {
// delete all uploaded chunks
- cm.DeleteChunks(master, grpcDialOption)
+ cm.DeleteChunks(master, usePublicUrl, grpcDialOption)
return
}
id = ret.Fid
@@ -171,7 +177,7 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp
ret.Auth)
if e != nil {
// delete all uploaded chunks
- cm.DeleteChunks(master, grpcDialOption)
+ cm.DeleteChunks(master, usePublicUrl, grpcDialOption)
return 0, e
}
cm.Chunks = append(cm.Chunks,
@@ -186,10 +192,10 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp
err = upload_chunked_file_manifest(fileUrl, &cm, jwt)
if err != nil {
// delete all uploaded chunks
- cm.DeleteChunks(master, grpcDialOption)
+ cm.DeleteChunks(master, usePublicUrl, grpcDialOption)
}
} else {
- ret, e := Upload(fileUrl, baseName, fi.Reader, false, fi.MimeType, nil, jwt)
+ ret, e, _ := Upload(fileUrl, baseName, false, fi.Reader, false, fi.MimeType, nil, jwt)
if e != nil {
return 0, e
}
@@ -202,8 +208,7 @@ func upload_one_chunk(filename string, reader io.Reader, master,
fileUrl string, jwt security.EncodedJwt,
) (size uint32, e error) {
glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...")
- uploadResult, uploadError := Upload(fileUrl, filename, reader, false,
- "", nil, jwt)
+ uploadResult, uploadError, _ := Upload(fileUrl, filename, false, reader, false, "", nil, jwt)
if uploadError != nil {
return 0, uploadError
}
@@ -215,12 +220,11 @@ func upload_chunked_file_manifest(fileUrl string, manifest *ChunkManifest, jwt s
if e != nil {
return e
}
- bufReader := bytes.NewReader(buf)
glog.V(4).Info("Uploading chunks manifest ", manifest.Name, " to ", fileUrl, "...")
u, _ := url.Parse(fileUrl)
q := u.Query()
q.Set("cm", "true")
u.RawQuery = q.Encode()
- _, e = Upload(u.String(), manifest.Name, bufReader, false, "application/json", nil, jwt)
+ _, e = UploadData(u.String(), manifest.Name, false, buf, false, "application/json", nil, jwt)
return e
}
diff --git a/weed/operation/sync_volume.go b/weed/operation/sync_volume.go
index 4b39ad544..5562f12ab 100644
--- a/weed/operation/sync_volume.go
+++ b/weed/operation/sync_volume.go
@@ -8,9 +8,9 @@ import (
func GetVolumeSyncStatus(server string, grpcDialOption grpc.DialOption, vid uint32) (resp *volume_server_pb.VolumeSyncStatusResponse, err error) {
- WithVolumeServerClient(server, grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error {
+ WithVolumeServerClient(server, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
- resp, err = client.VolumeSyncStatus(ctx, &volume_server_pb.VolumeSyncStatusRequest{
+ resp, err = client.VolumeSyncStatus(context.Background(), &volume_server_pb.VolumeSyncStatusRequest{
VolumeId: vid,
})
return nil
diff --git a/weed/operation/tail_volume.go b/weed/operation/tail_volume.go
index 1e8b0a16e..3cd66b5da 100644
--- a/weed/operation/tail_volume.go
+++ b/weed/operation/tail_volume.go
@@ -5,9 +5,10 @@ import (
"fmt"
"io"
+ "google.golang.org/grpc"
+
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
- "google.golang.org/grpc"
)
func TailVolume(master string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, timeoutSeconds int, fn func(n *needle.Needle) error) error {
@@ -26,9 +27,9 @@ func TailVolume(master string, grpcDialOption grpc.DialOption, vid needle.Volume
}
func TailVolumeFromSource(volumeServer string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, idleTimeoutSeconds int, fn func(n *needle.Needle) error) error {
- return WithVolumeServerClient(volumeServer, grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error {
+ return WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
- stream, err := client.VolumeTailSender(ctx, &volume_server_pb.VolumeTailSenderRequest{
+ stream, err := client.VolumeTailSender(context.Background(), &volume_server_pb.VolumeTailSenderRequest{
VolumeId: uint32(vid),
SinceNs: sinceNs,
IdleTimeoutSeconds: uint32(idleTimeoutSeconds),
diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go
index c387d0230..1e2c591c5 100644
--- a/weed/operation/upload_content.go
+++ b/weed/operation/upload_content.go
@@ -2,8 +2,7 @@ package operation
import (
"bytes"
- "compress/flate"
- "compress/gzip"
+ "crypto/md5"
"encoding/json"
"errors"
"fmt"
@@ -15,17 +14,35 @@ import (
"net/textproto"
"path/filepath"
"strings"
+ "time"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
)
type UploadResult struct {
- Name string `json:"name,omitempty"`
- Size uint32 `json:"size,omitempty"`
- Error string `json:"error,omitempty"`
- ETag string `json:"eTag,omitempty"`
+ Name string `json:"name,omitempty"`
+ Size uint32 `json:"size,omitempty"`
+ Error string `json:"error,omitempty"`
+ ETag string `json:"eTag,omitempty"`
+ CipherKey []byte `json:"cipherKey,omitempty"`
+ Mime string `json:"mime,omitempty"`
+ Gzip uint32 `json:"gzip,omitempty"`
+ Md5 string `json:"md5,omitempty"`
+}
+
+func (uploadResult *UploadResult) ToPbFileChunk(fileId string, offset int64) *filer_pb.FileChunk {
+ return &filer_pb.FileChunk{
+ FileId: fileId,
+ Offset: offset,
+ Size: uint64(uploadResult.Size),
+ Mtime: time.Now().UnixNano(),
+ ETag: uploadResult.ETag,
+ CipherKey: uploadResult.CipherKey,
+ IsGzipped: uploadResult.Gzip > 0,
+ }
}
var (
@@ -41,40 +58,113 @@ func init() {
var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"")
// Upload sends a POST request to a volume server to upload the content with adjustable compression level
-func UploadWithLocalCompressionLevel(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt, compressionLevel int) (*UploadResult, error) {
- if compressionLevel < 1 {
- compressionLevel = 1
+func UploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) {
+ uploadResult, err = doUploadData(uploadUrl, filename, cipher, data, isInputGzipped, mtype, pairMap, jwt)
+ if uploadResult != nil {
+ uploadResult.Md5 = util.Md5(data)
}
- if compressionLevel > 9 {
- compressionLevel = 9
- }
- return doUpload(uploadUrl, filename, reader, isGzipped, mtype, pairMap, compressionLevel, jwt)
+ return
}
// Upload sends a POST request to a volume server to upload the content with fast compression
-func Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) {
- return doUpload(uploadUrl, filename, reader, isGzipped, mtype, pairMap, flate.BestSpeed, jwt)
+func Upload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error, data []byte) {
+ hash := md5.New()
+ reader = io.TeeReader(reader, hash)
+ uploadResult, err, data = doUpload(uploadUrl, filename, cipher, reader, isInputGzipped, mtype, pairMap, jwt)
+ if uploadResult != nil {
+ uploadResult.Md5 = fmt.Sprintf("%x", hash.Sum(nil))
+ }
+ return
}
-func doUpload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, compression int, jwt security.EncodedJwt) (*UploadResult, error) {
- contentIsGzipped := isGzipped
+func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error, data []byte) {
+ data, err = ioutil.ReadAll(reader)
+ if err != nil {
+ err = fmt.Errorf("read input: %v", err)
+ return
+ }
+ uploadResult, uploadErr := doUploadData(uploadUrl, filename, cipher, data, isInputGzipped, mtype, pairMap, jwt)
+ return uploadResult, uploadErr, data
+}
+
+func doUploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) {
+ contentIsGzipped := isInputGzipped
shouldGzipNow := false
- if !isGzipped {
+ if !isInputGzipped {
+ if mtype == "" {
+ mtype = http.DetectContentType(data)
+ if mtype == "application/octet-stream" {
+ mtype = ""
+ }
+ }
if shouldBeZipped, iAmSure := util.IsGzippableFileType(filepath.Base(filename), mtype); iAmSure && shouldBeZipped {
shouldGzipNow = true
- contentIsGzipped = true
+ } else if !iAmSure && mtype == "" && len(data) > 128 {
+ var compressed []byte
+ compressed, err = util.GzipData(data[0:128])
+ shouldGzipNow = len(compressed)*10 < 128*9 // can not compress to less than 90%
}
}
- return upload_content(uploadUrl, func(w io.Writer) (err error) {
- if shouldGzipNow {
- gzWriter, _ := gzip.NewWriterLevel(w, compression)
- _, err = io.Copy(gzWriter, reader)
- gzWriter.Close()
- } else {
- _, err = io.Copy(w, reader)
+
+ var clearDataLen int
+
+ // gzip if possible
+ // this could be double copying
+ clearDataLen = len(data)
+ if shouldGzipNow {
+ compressed, compressErr := util.GzipData(data)
+ // fmt.Printf("data is compressed from %d ==> %d\n", len(data), len(compressed))
+ if compressErr == nil {
+ data = compressed
+ contentIsGzipped = true
}
+ } else if isInputGzipped {
+ // just to get the clear data length
+ clearData, err := util.UnGzipData(data)
+ if err == nil {
+ clearDataLen = len(clearData)
+ }
+ }
+
+ if cipher {
+ // encrypt(gzip(data))
+
+ // encrypt
+ cipherKey := util.GenCipherKey()
+ encryptedData, encryptionErr := util.Encrypt(data, cipherKey)
+ if encryptionErr != nil {
+ err = fmt.Errorf("encrypt input: %v", encryptionErr)
+ return
+ }
+
+ // upload data
+ uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) {
+ _, err = w.Write(encryptedData)
+ return
+ }, "", false, "", nil, jwt)
+ if uploadResult != nil {
+ uploadResult.Name = filename
+ uploadResult.Mime = mtype
+ uploadResult.CipherKey = cipherKey
+ }
+ } else {
+ // upload data
+ uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) {
+ _, err = w.Write(data)
+ return
+ }, filename, contentIsGzipped, mtype, pairMap, jwt)
+ }
+
+ if uploadResult == nil {
return
- }, filename, contentIsGzipped, mtype, pairMap, jwt)
+ }
+
+ uploadResult.Size = uint32(clearDataLen)
+ if contentIsGzipped {
+ uploadResult.Gzip = 1
+ }
+
+ return uploadResult, err
}
func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) {
@@ -125,12 +215,17 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error
return nil, post_err
}
defer resp.Body.Close()
+
+ var ret UploadResult
etag := getEtag(resp)
+ if resp.StatusCode == http.StatusNoContent {
+ ret.ETag = etag
+ return &ret, nil
+ }
resp_body, ra_err := ioutil.ReadAll(resp.Body)
if ra_err != nil {
return nil, ra_err
}
- var ret UploadResult
unmarshal_err := json.Unmarshal(resp_body, &ret)
if unmarshal_err != nil {
glog.V(0).Infoln("failing to read upload response", uploadUrl, string(resp_body))
diff --git a/weed/pb/Makefile b/weed/pb/Makefile
index c50410574..5053669d8 100644
--- a/weed/pb/Makefile
+++ b/weed/pb/Makefile
@@ -6,5 +6,7 @@ gen:
protoc master.proto --go_out=plugins=grpc:./master_pb
protoc volume_server.proto --go_out=plugins=grpc:./volume_server_pb
protoc filer.proto --go_out=plugins=grpc:./filer_pb
+ protoc iam.proto --go_out=plugins=grpc:./iam_pb
+ protoc messaging.proto --go_out=plugins=grpc:./messaging_pb
# protoc filer.proto --java_out=../../other/java/client/src/main/java
cp filer.proto ../../other/java/client/src/main/proto
diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto
index 6357d971f..1fc8ef63d 100644
--- a/weed/pb/filer.proto
+++ b/weed/pb/filer.proto
@@ -21,6 +21,9 @@ service SeaweedFiler {
rpc UpdateEntry (UpdateEntryRequest) returns (UpdateEntryResponse) {
}
+ rpc AppendToEntry (AppendToEntryRequest) returns (AppendToEntryResponse) {
+ }
+
rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) {
}
@@ -42,6 +45,15 @@ service SeaweedFiler {
rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) {
}
+ rpc SubscribeMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) {
+ }
+
+ rpc KeepConnected (stream KeepConnectedRequest) returns (stream KeepConnectedResponse) {
+ }
+
+ rpc LocateBroker (LocateBrokerRequest) returns (LocateBrokerResponse) {
+ }
+
}
//////////////////////////////////////////////////
@@ -96,6 +108,8 @@ message FileChunk {
string source_file_id = 6; // to be deprecated
FileId fid = 7;
FileId source_fid = 8;
+ bytes cipher_key = 9;
+ bool is_gzipped = 10;
}
message FileId {
@@ -118,6 +132,7 @@ message FuseAttributes {
string user_name = 11; // for hdfs
repeated string group_name = 12; // for hdfs
string symlink_target = 13;
+ bytes md5 = 14;
}
message CreateEntryRequest {
@@ -137,6 +152,14 @@ message UpdateEntryRequest {
message UpdateEntryResponse {
}
+message AppendToEntryRequest {
+ string directory = 1;
+ string entry_name = 2;
+ repeated FileChunk chunks = 3;
+}
+message AppendToEntryResponse {
+}
+
message DeleteEntryRequest {
string directory = 1;
string name = 2;
@@ -147,6 +170,7 @@ message DeleteEntryRequest {
}
message DeleteEntryResponse {
+ string error = 1;
}
message AtomicRenameEntryRequest {
@@ -165,6 +189,7 @@ message AssignVolumeRequest {
string replication = 3;
int32 ttl_sec = 4;
string data_center = 5;
+ string parent_path = 6;
}
message AssignVolumeResponse {
@@ -173,6 +198,9 @@ message AssignVolumeResponse {
string public_url = 3;
int32 count = 4;
string auth = 5;
+ string collection = 6;
+ string replication = 7;
+ string error = 8;
}
message LookupVolumeRequest {
@@ -219,4 +247,45 @@ message GetFilerConfigurationResponse {
string replication = 2;
string collection = 3;
uint32 max_mb = 4;
+ string dir_buckets = 5;
+ bool cipher = 7;
+}
+
+message SubscribeMetadataRequest {
+ string client_name = 1;
+ string path_prefix = 2;
+ int64 since_ns = 3;
+}
+message SubscribeMetadataResponse {
+ string directory = 1;
+ EventNotification event_notification = 2;
+ int64 ts_ns = 3;
+}
+
+message LogEntry {
+ int64 ts_ns = 1;
+ int32 partition_key_hash = 2;
+ bytes data = 3;
+}
+
+message KeepConnectedRequest {
+ string name = 1;
+ uint32 grpc_port = 2;
+ repeated string resources = 3;
+}
+message KeepConnectedResponse {
+}
+
+message LocateBrokerRequest {
+ string resource = 1;
+}
+message LocateBrokerResponse {
+ bool found = 1;
+ // if found, send the exact address
+ // if not found, send the full list of existing brokers
+ message Resource {
+ string grpc_addresses = 1;
+ int32 resource_count = 2;
+ }
+ repeated Resource resources = 2;
}
diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go
index 01b3e8d90..f5b62e377 100644
--- a/weed/pb/filer_pb/filer.pb.go
+++ b/weed/pb/filer_pb/filer.pb.go
@@ -23,6 +23,8 @@ It has these top-level messages:
CreateEntryResponse
UpdateEntryRequest
UpdateEntryResponse
+ AppendToEntryRequest
+ AppendToEntryResponse
DeleteEntryRequest
DeleteEntryResponse
AtomicRenameEntryRequest
@@ -39,6 +41,13 @@ It has these top-level messages:
StatisticsResponse
GetFilerConfigurationRequest
GetFilerConfigurationResponse
+ SubscribeMetadataRequest
+ SubscribeMetadataResponse
+ LogEntry
+ KeepConnectedRequest
+ KeepConnectedResponse
+ LocateBrokerRequest
+ LocateBrokerResponse
*/
package filer_pb
@@ -287,6 +296,8 @@ type FileChunk struct {
SourceFileId string `protobuf:"bytes,6,opt,name=source_file_id,json=sourceFileId" json:"source_file_id,omitempty"`
Fid *FileId `protobuf:"bytes,7,opt,name=fid" json:"fid,omitempty"`
SourceFid *FileId `protobuf:"bytes,8,opt,name=source_fid,json=sourceFid" json:"source_fid,omitempty"`
+ CipherKey []byte `protobuf:"bytes,9,opt,name=cipher_key,json=cipherKey,proto3" json:"cipher_key,omitempty"`
+ IsGzipped bool `protobuf:"varint,10,opt,name=is_gzipped,json=isGzipped" json:"is_gzipped,omitempty"`
}
func (m *FileChunk) Reset() { *m = FileChunk{} }
@@ -350,6 +361,20 @@ func (m *FileChunk) GetSourceFid() *FileId {
return nil
}
+func (m *FileChunk) GetCipherKey() []byte {
+ if m != nil {
+ return m.CipherKey
+ }
+ return nil
+}
+
+func (m *FileChunk) GetIsGzipped() bool {
+ if m != nil {
+ return m.IsGzipped
+ }
+ return false
+}
+
type FileId struct {
VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
FileKey uint64 `protobuf:"varint,2,opt,name=file_key,json=fileKey" json:"file_key,omitempty"`
@@ -396,6 +421,7 @@ type FuseAttributes struct {
UserName string `protobuf:"bytes,11,opt,name=user_name,json=userName" json:"user_name,omitempty"`
GroupName []string `protobuf:"bytes,12,rep,name=group_name,json=groupName" json:"group_name,omitempty"`
SymlinkTarget string `protobuf:"bytes,13,opt,name=symlink_target,json=symlinkTarget" json:"symlink_target,omitempty"`
+ Md5 []byte `protobuf:"bytes,14,opt,name=md5,proto3" json:"md5,omitempty"`
}
func (m *FuseAttributes) Reset() { *m = FuseAttributes{} }
@@ -494,6 +520,13 @@ func (m *FuseAttributes) GetSymlinkTarget() string {
return ""
}
+func (m *FuseAttributes) GetMd5() []byte {
+ if m != nil {
+ return m.Md5
+ }
+ return nil
+}
+
type CreateEntryRequest struct {
Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"`
@@ -574,6 +607,46 @@ func (m *UpdateEntryResponse) String() string { return proto.CompactT
func (*UpdateEntryResponse) ProtoMessage() {}
func (*UpdateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+type AppendToEntryRequest struct {
+ Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
+ EntryName string `protobuf:"bytes,2,opt,name=entry_name,json=entryName" json:"entry_name,omitempty"`
+ Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks" json:"chunks,omitempty"`
+}
+
+func (m *AppendToEntryRequest) Reset() { *m = AppendToEntryRequest{} }
+func (m *AppendToEntryRequest) String() string { return proto.CompactTextString(m) }
+func (*AppendToEntryRequest) ProtoMessage() {}
+func (*AppendToEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+
+func (m *AppendToEntryRequest) GetDirectory() string {
+ if m != nil {
+ return m.Directory
+ }
+ return ""
+}
+
+func (m *AppendToEntryRequest) GetEntryName() string {
+ if m != nil {
+ return m.EntryName
+ }
+ return ""
+}
+
+func (m *AppendToEntryRequest) GetChunks() []*FileChunk {
+ if m != nil {
+ return m.Chunks
+ }
+ return nil
+}
+
+type AppendToEntryResponse struct {
+}
+
+func (m *AppendToEntryResponse) Reset() { *m = AppendToEntryResponse{} }
+func (m *AppendToEntryResponse) String() string { return proto.CompactTextString(m) }
+func (*AppendToEntryResponse) ProtoMessage() {}
+func (*AppendToEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+
type DeleteEntryRequest struct {
Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
@@ -586,7 +659,7 @@ type DeleteEntryRequest struct {
func (m *DeleteEntryRequest) Reset() { *m = DeleteEntryRequest{} }
func (m *DeleteEntryRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteEntryRequest) ProtoMessage() {}
-func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
func (m *DeleteEntryRequest) GetDirectory() string {
if m != nil {
@@ -624,12 +697,20 @@ func (m *DeleteEntryRequest) GetIgnoreRecursiveError() bool {
}
type DeleteEntryResponse struct {
+ Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
}
func (m *DeleteEntryResponse) Reset() { *m = DeleteEntryResponse{} }
func (m *DeleteEntryResponse) String() string { return proto.CompactTextString(m) }
func (*DeleteEntryResponse) ProtoMessage() {}
-func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
+
+func (m *DeleteEntryResponse) GetError() string {
+ if m != nil {
+ return m.Error
+ }
+ return ""
+}
type AtomicRenameEntryRequest struct {
OldDirectory string `protobuf:"bytes,1,opt,name=old_directory,json=oldDirectory" json:"old_directory,omitempty"`
@@ -641,7 +722,7 @@ type AtomicRenameEntryRequest struct {
func (m *AtomicRenameEntryRequest) Reset() { *m = AtomicRenameEntryRequest{} }
func (m *AtomicRenameEntryRequest) String() string { return proto.CompactTextString(m) }
func (*AtomicRenameEntryRequest) ProtoMessage() {}
-func (*AtomicRenameEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
+func (*AtomicRenameEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
func (m *AtomicRenameEntryRequest) GetOldDirectory() string {
if m != nil {
@@ -677,7 +758,7 @@ type AtomicRenameEntryResponse struct {
func (m *AtomicRenameEntryResponse) Reset() { *m = AtomicRenameEntryResponse{} }
func (m *AtomicRenameEntryResponse) String() string { return proto.CompactTextString(m) }
func (*AtomicRenameEntryResponse) ProtoMessage() {}
-func (*AtomicRenameEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
+func (*AtomicRenameEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
type AssignVolumeRequest struct {
Count int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"`
@@ -685,12 +766,13 @@ type AssignVolumeRequest struct {
Replication string `protobuf:"bytes,3,opt,name=replication" json:"replication,omitempty"`
TtlSec int32 `protobuf:"varint,4,opt,name=ttl_sec,json=ttlSec" json:"ttl_sec,omitempty"`
DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"`
+ ParentPath string `protobuf:"bytes,6,opt,name=parent_path,json=parentPath" json:"parent_path,omitempty"`
}
func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} }
func (m *AssignVolumeRequest) String() string { return proto.CompactTextString(m) }
func (*AssignVolumeRequest) ProtoMessage() {}
-func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
+func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
func (m *AssignVolumeRequest) GetCount() int32 {
if m != nil {
@@ -727,18 +809,28 @@ func (m *AssignVolumeRequest) GetDataCenter() string {
return ""
}
+func (m *AssignVolumeRequest) GetParentPath() string {
+ if m != nil {
+ return m.ParentPath
+ }
+ return ""
+}
+
type AssignVolumeResponse struct {
- FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"`
- Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"`
- PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"`
- Count int32 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
- Auth string `protobuf:"bytes,5,opt,name=auth" json:"auth,omitempty"`
+ FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"`
+ Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"`
+ PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"`
+ Count int32 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
+ Auth string `protobuf:"bytes,5,opt,name=auth" json:"auth,omitempty"`
+ Collection string `protobuf:"bytes,6,opt,name=collection" json:"collection,omitempty"`
+ Replication string `protobuf:"bytes,7,opt,name=replication" json:"replication,omitempty"`
+ Error string `protobuf:"bytes,8,opt,name=error" json:"error,omitempty"`
}
func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} }
func (m *AssignVolumeResponse) String() string { return proto.CompactTextString(m) }
func (*AssignVolumeResponse) ProtoMessage() {}
-func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
+func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
func (m *AssignVolumeResponse) GetFileId() string {
if m != nil {
@@ -775,6 +867,27 @@ func (m *AssignVolumeResponse) GetAuth() string {
return ""
}
+func (m *AssignVolumeResponse) GetCollection() string {
+ if m != nil {
+ return m.Collection
+ }
+ return ""
+}
+
+func (m *AssignVolumeResponse) GetReplication() string {
+ if m != nil {
+ return m.Replication
+ }
+ return ""
+}
+
+func (m *AssignVolumeResponse) GetError() string {
+ if m != nil {
+ return m.Error
+ }
+ return ""
+}
+
type LookupVolumeRequest struct {
VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"`
}
@@ -782,7 +895,7 @@ type LookupVolumeRequest struct {
func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} }
func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) }
func (*LookupVolumeRequest) ProtoMessage() {}
-func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
+func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
func (m *LookupVolumeRequest) GetVolumeIds() []string {
if m != nil {
@@ -798,7 +911,7 @@ type Locations struct {
func (m *Locations) Reset() { *m = Locations{} }
func (m *Locations) String() string { return proto.CompactTextString(m) }
func (*Locations) ProtoMessage() {}
-func (*Locations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
+func (*Locations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
func (m *Locations) GetLocations() []*Location {
if m != nil {
@@ -815,7 +928,7 @@ type Location struct {
func (m *Location) Reset() { *m = Location{} }
func (m *Location) String() string { return proto.CompactTextString(m) }
func (*Location) ProtoMessage() {}
-func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
+func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
func (m *Location) GetUrl() string {
if m != nil {
@@ -838,7 +951,7 @@ type LookupVolumeResponse struct {
func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} }
func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) }
func (*LookupVolumeResponse) ProtoMessage() {}
-func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
+func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
func (m *LookupVolumeResponse) GetLocationsMap() map[string]*Locations {
if m != nil {
@@ -854,7 +967,7 @@ type DeleteCollectionRequest struct {
func (m *DeleteCollectionRequest) Reset() { *m = DeleteCollectionRequest{} }
func (m *DeleteCollectionRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteCollectionRequest) ProtoMessage() {}
-func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
+func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
func (m *DeleteCollectionRequest) GetCollection() string {
if m != nil {
@@ -869,7 +982,7 @@ type DeleteCollectionResponse struct {
func (m *DeleteCollectionResponse) Reset() { *m = DeleteCollectionResponse{} }
func (m *DeleteCollectionResponse) String() string { return proto.CompactTextString(m) }
func (*DeleteCollectionResponse) ProtoMessage() {}
-func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
+func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
type StatisticsRequest struct {
Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"`
@@ -880,7 +993,7 @@ type StatisticsRequest struct {
func (m *StatisticsRequest) Reset() { *m = StatisticsRequest{} }
func (m *StatisticsRequest) String() string { return proto.CompactTextString(m) }
func (*StatisticsRequest) ProtoMessage() {}
-func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
+func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
func (m *StatisticsRequest) GetReplication() string {
if m != nil {
@@ -915,7 +1028,7 @@ type StatisticsResponse struct {
func (m *StatisticsResponse) Reset() { *m = StatisticsResponse{} }
func (m *StatisticsResponse) String() string { return proto.CompactTextString(m) }
func (*StatisticsResponse) ProtoMessage() {}
-func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
+func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
func (m *StatisticsResponse) GetReplication() string {
if m != nil {
@@ -965,19 +1078,21 @@ type GetFilerConfigurationRequest struct {
func (m *GetFilerConfigurationRequest) Reset() { *m = GetFilerConfigurationRequest{} }
func (m *GetFilerConfigurationRequest) String() string { return proto.CompactTextString(m) }
func (*GetFilerConfigurationRequest) ProtoMessage() {}
-func (*GetFilerConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
+func (*GetFilerConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
type GetFilerConfigurationResponse struct {
Masters []string `protobuf:"bytes,1,rep,name=masters" json:"masters,omitempty"`
Replication string `protobuf:"bytes,2,opt,name=replication" json:"replication,omitempty"`
Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"`
MaxMb uint32 `protobuf:"varint,4,opt,name=max_mb,json=maxMb" json:"max_mb,omitempty"`
+ DirBuckets string `protobuf:"bytes,5,opt,name=dir_buckets,json=dirBuckets" json:"dir_buckets,omitempty"`
+ Cipher bool `protobuf:"varint,7,opt,name=cipher" json:"cipher,omitempty"`
}
func (m *GetFilerConfigurationResponse) Reset() { *m = GetFilerConfigurationResponse{} }
func (m *GetFilerConfigurationResponse) String() string { return proto.CompactTextString(m) }
func (*GetFilerConfigurationResponse) ProtoMessage() {}
-func (*GetFilerConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
+func (*GetFilerConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} }
func (m *GetFilerConfigurationResponse) GetMasters() []string {
if m != nil {
@@ -1007,6 +1122,224 @@ func (m *GetFilerConfigurationResponse) GetMaxMb() uint32 {
return 0
}
+func (m *GetFilerConfigurationResponse) GetDirBuckets() string {
+ if m != nil {
+ return m.DirBuckets
+ }
+ return ""
+}
+
+func (m *GetFilerConfigurationResponse) GetCipher() bool {
+ if m != nil {
+ return m.Cipher
+ }
+ return false
+}
+
+type SubscribeMetadataRequest struct {
+ ClientName string `protobuf:"bytes,1,opt,name=client_name,json=clientName" json:"client_name,omitempty"`
+ PathPrefix string `protobuf:"bytes,2,opt,name=path_prefix,json=pathPrefix" json:"path_prefix,omitempty"`
+ SinceNs int64 `protobuf:"varint,3,opt,name=since_ns,json=sinceNs" json:"since_ns,omitempty"`
+}
+
+func (m *SubscribeMetadataRequest) Reset() { *m = SubscribeMetadataRequest{} }
+func (m *SubscribeMetadataRequest) String() string { return proto.CompactTextString(m) }
+func (*SubscribeMetadataRequest) ProtoMessage() {}
+func (*SubscribeMetadataRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} }
+
+func (m *SubscribeMetadataRequest) GetClientName() string {
+ if m != nil {
+ return m.ClientName
+ }
+ return ""
+}
+
+func (m *SubscribeMetadataRequest) GetPathPrefix() string {
+ if m != nil {
+ return m.PathPrefix
+ }
+ return ""
+}
+
+func (m *SubscribeMetadataRequest) GetSinceNs() int64 {
+ if m != nil {
+ return m.SinceNs
+ }
+ return 0
+}
+
+type SubscribeMetadataResponse struct {
+ Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
+ EventNotification *EventNotification `protobuf:"bytes,2,opt,name=event_notification,json=eventNotification" json:"event_notification,omitempty"`
+ TsNs int64 `protobuf:"varint,3,opt,name=ts_ns,json=tsNs" json:"ts_ns,omitempty"`
+}
+
+func (m *SubscribeMetadataResponse) Reset() { *m = SubscribeMetadataResponse{} }
+func (m *SubscribeMetadataResponse) String() string { return proto.CompactTextString(m) }
+func (*SubscribeMetadataResponse) ProtoMessage() {}
+func (*SubscribeMetadataResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} }
+
+func (m *SubscribeMetadataResponse) GetDirectory() string {
+ if m != nil {
+ return m.Directory
+ }
+ return ""
+}
+
+func (m *SubscribeMetadataResponse) GetEventNotification() *EventNotification {
+ if m != nil {
+ return m.EventNotification
+ }
+ return nil
+}
+
+func (m *SubscribeMetadataResponse) GetTsNs() int64 {
+ if m != nil {
+ return m.TsNs
+ }
+ return 0
+}
+
+type LogEntry struct {
+ TsNs int64 `protobuf:"varint,1,opt,name=ts_ns,json=tsNs" json:"ts_ns,omitempty"`
+ PartitionKeyHash int32 `protobuf:"varint,2,opt,name=partition_key_hash,json=partitionKeyHash" json:"partition_key_hash,omitempty"`
+ Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
+}
+
+func (m *LogEntry) Reset() { *m = LogEntry{} }
+func (m *LogEntry) String() string { return proto.CompactTextString(m) }
+func (*LogEntry) ProtoMessage() {}
+func (*LogEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} }
+
+func (m *LogEntry) GetTsNs() int64 {
+ if m != nil {
+ return m.TsNs
+ }
+ return 0
+}
+
+func (m *LogEntry) GetPartitionKeyHash() int32 {
+ if m != nil {
+ return m.PartitionKeyHash
+ }
+ return 0
+}
+
+func (m *LogEntry) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+type KeepConnectedRequest struct {
+ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ GrpcPort uint32 `protobuf:"varint,2,opt,name=grpc_port,json=grpcPort" json:"grpc_port,omitempty"`
+ Resources []string `protobuf:"bytes,3,rep,name=resources" json:"resources,omitempty"`
+}
+
+func (m *KeepConnectedRequest) Reset() { *m = KeepConnectedRequest{} }
+func (m *KeepConnectedRequest) String() string { return proto.CompactTextString(m) }
+func (*KeepConnectedRequest) ProtoMessage() {}
+func (*KeepConnectedRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} }
+
+func (m *KeepConnectedRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *KeepConnectedRequest) GetGrpcPort() uint32 {
+ if m != nil {
+ return m.GrpcPort
+ }
+ return 0
+}
+
+func (m *KeepConnectedRequest) GetResources() []string {
+ if m != nil {
+ return m.Resources
+ }
+ return nil
+}
+
+type KeepConnectedResponse struct {
+}
+
+func (m *KeepConnectedResponse) Reset() { *m = KeepConnectedResponse{} }
+func (m *KeepConnectedResponse) String() string { return proto.CompactTextString(m) }
+func (*KeepConnectedResponse) ProtoMessage() {}
+func (*KeepConnectedResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} }
+
+type LocateBrokerRequest struct {
+ Resource string `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"`
+}
+
+func (m *LocateBrokerRequest) Reset() { *m = LocateBrokerRequest{} }
+func (m *LocateBrokerRequest) String() string { return proto.CompactTextString(m) }
+func (*LocateBrokerRequest) ProtoMessage() {}
+func (*LocateBrokerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} }
+
+func (m *LocateBrokerRequest) GetResource() string {
+ if m != nil {
+ return m.Resource
+ }
+ return ""
+}
+
+type LocateBrokerResponse struct {
+ Found bool `protobuf:"varint,1,opt,name=found" json:"found,omitempty"`
+ Resources []*LocateBrokerResponse_Resource `protobuf:"bytes,2,rep,name=resources" json:"resources,omitempty"`
+}
+
+func (m *LocateBrokerResponse) Reset() { *m = LocateBrokerResponse{} }
+func (m *LocateBrokerResponse) String() string { return proto.CompactTextString(m) }
+func (*LocateBrokerResponse) ProtoMessage() {}
+func (*LocateBrokerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} }
+
+func (m *LocateBrokerResponse) GetFound() bool {
+ if m != nil {
+ return m.Found
+ }
+ return false
+}
+
+func (m *LocateBrokerResponse) GetResources() []*LocateBrokerResponse_Resource {
+ if m != nil {
+ return m.Resources
+ }
+ return nil
+}
+
+// if found, send the exact address
+// if not found, send the full list of existing brokers
+type LocateBrokerResponse_Resource struct {
+ GrpcAddresses string `protobuf:"bytes,1,opt,name=grpc_addresses,json=grpcAddresses" json:"grpc_addresses,omitempty"`
+ ResourceCount int32 `protobuf:"varint,2,opt,name=resource_count,json=resourceCount" json:"resource_count,omitempty"`
+}
+
+func (m *LocateBrokerResponse_Resource) Reset() { *m = LocateBrokerResponse_Resource{} }
+func (m *LocateBrokerResponse_Resource) String() string { return proto.CompactTextString(m) }
+func (*LocateBrokerResponse_Resource) ProtoMessage() {}
+func (*LocateBrokerResponse_Resource) Descriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{38, 0}
+}
+
+func (m *LocateBrokerResponse_Resource) GetGrpcAddresses() string {
+ if m != nil {
+ return m.GrpcAddresses
+ }
+ return ""
+}
+
+func (m *LocateBrokerResponse_Resource) GetResourceCount() int32 {
+ if m != nil {
+ return m.ResourceCount
+ }
+ return 0
+}
+
func init() {
proto.RegisterType((*LookupDirectoryEntryRequest)(nil), "filer_pb.LookupDirectoryEntryRequest")
proto.RegisterType((*LookupDirectoryEntryResponse)(nil), "filer_pb.LookupDirectoryEntryResponse")
@@ -1022,6 +1355,8 @@ func init() {
proto.RegisterType((*CreateEntryResponse)(nil), "filer_pb.CreateEntryResponse")
proto.RegisterType((*UpdateEntryRequest)(nil), "filer_pb.UpdateEntryRequest")
proto.RegisterType((*UpdateEntryResponse)(nil), "filer_pb.UpdateEntryResponse")
+ proto.RegisterType((*AppendToEntryRequest)(nil), "filer_pb.AppendToEntryRequest")
+ proto.RegisterType((*AppendToEntryResponse)(nil), "filer_pb.AppendToEntryResponse")
proto.RegisterType((*DeleteEntryRequest)(nil), "filer_pb.DeleteEntryRequest")
proto.RegisterType((*DeleteEntryResponse)(nil), "filer_pb.DeleteEntryResponse")
proto.RegisterType((*AtomicRenameEntryRequest)(nil), "filer_pb.AtomicRenameEntryRequest")
@@ -1038,6 +1373,14 @@ func init() {
proto.RegisterType((*StatisticsResponse)(nil), "filer_pb.StatisticsResponse")
proto.RegisterType((*GetFilerConfigurationRequest)(nil), "filer_pb.GetFilerConfigurationRequest")
proto.RegisterType((*GetFilerConfigurationResponse)(nil), "filer_pb.GetFilerConfigurationResponse")
+ proto.RegisterType((*SubscribeMetadataRequest)(nil), "filer_pb.SubscribeMetadataRequest")
+ proto.RegisterType((*SubscribeMetadataResponse)(nil), "filer_pb.SubscribeMetadataResponse")
+ proto.RegisterType((*LogEntry)(nil), "filer_pb.LogEntry")
+ proto.RegisterType((*KeepConnectedRequest)(nil), "filer_pb.KeepConnectedRequest")
+ proto.RegisterType((*KeepConnectedResponse)(nil), "filer_pb.KeepConnectedResponse")
+ proto.RegisterType((*LocateBrokerRequest)(nil), "filer_pb.LocateBrokerRequest")
+ proto.RegisterType((*LocateBrokerResponse)(nil), "filer_pb.LocateBrokerResponse")
+ proto.RegisterType((*LocateBrokerResponse_Resource)(nil), "filer_pb.LocateBrokerResponse.Resource")
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -1055,6 +1398,7 @@ type SeaweedFilerClient interface {
ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (SeaweedFiler_ListEntriesClient, error)
CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error)
UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error)
+ AppendToEntry(ctx context.Context, in *AppendToEntryRequest, opts ...grpc.CallOption) (*AppendToEntryResponse, error)
DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error)
AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error)
AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error)
@@ -1062,6 +1406,9 @@ type SeaweedFilerClient interface {
DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error)
Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error)
GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error)
+ SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeMetadataClient, error)
+ KeepConnected(ctx context.Context, opts ...grpc.CallOption) (SeaweedFiler_KeepConnectedClient, error)
+ LocateBroker(ctx context.Context, in *LocateBrokerRequest, opts ...grpc.CallOption) (*LocateBrokerResponse, error)
}
type seaweedFilerClient struct {
@@ -1131,6 +1478,15 @@ func (c *seaweedFilerClient) UpdateEntry(ctx context.Context, in *UpdateEntryReq
return out, nil
}
+func (c *seaweedFilerClient) AppendToEntry(ctx context.Context, in *AppendToEntryRequest, opts ...grpc.CallOption) (*AppendToEntryResponse, error) {
+ out := new(AppendToEntryResponse)
+ err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AppendToEntry", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) {
out := new(DeleteEntryResponse)
err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteEntry", in, out, c.cc, opts...)
@@ -1194,6 +1550,78 @@ func (c *seaweedFilerClient) GetFilerConfiguration(ctx context.Context, in *GetF
return out, nil
}
+func (c *seaweedFilerClient) SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeMetadataClient, error) {
+ stream, err := grpc.NewClientStream(ctx, &_SeaweedFiler_serviceDesc.Streams[1], c.cc, "/filer_pb.SeaweedFiler/SubscribeMetadata", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &seaweedFilerSubscribeMetadataClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type SeaweedFiler_SubscribeMetadataClient interface {
+ Recv() (*SubscribeMetadataResponse, error)
+ grpc.ClientStream
+}
+
+type seaweedFilerSubscribeMetadataClient struct {
+ grpc.ClientStream
+}
+
+func (x *seaweedFilerSubscribeMetadataClient) Recv() (*SubscribeMetadataResponse, error) {
+ m := new(SubscribeMetadataResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *seaweedFilerClient) KeepConnected(ctx context.Context, opts ...grpc.CallOption) (SeaweedFiler_KeepConnectedClient, error) {
+ stream, err := grpc.NewClientStream(ctx, &_SeaweedFiler_serviceDesc.Streams[2], c.cc, "/filer_pb.SeaweedFiler/KeepConnected", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &seaweedFilerKeepConnectedClient{stream}
+ return x, nil
+}
+
+type SeaweedFiler_KeepConnectedClient interface {
+ Send(*KeepConnectedRequest) error
+ Recv() (*KeepConnectedResponse, error)
+ grpc.ClientStream
+}
+
+type seaweedFilerKeepConnectedClient struct {
+ grpc.ClientStream
+}
+
+func (x *seaweedFilerKeepConnectedClient) Send(m *KeepConnectedRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *seaweedFilerKeepConnectedClient) Recv() (*KeepConnectedResponse, error) {
+ m := new(KeepConnectedResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *seaweedFilerClient) LocateBroker(ctx context.Context, in *LocateBrokerRequest, opts ...grpc.CallOption) (*LocateBrokerResponse, error) {
+ out := new(LocateBrokerResponse)
+ err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/LocateBroker", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// Server API for SeaweedFiler service
type SeaweedFilerServer interface {
@@ -1201,6 +1629,7 @@ type SeaweedFilerServer interface {
ListEntries(*ListEntriesRequest, SeaweedFiler_ListEntriesServer) error
CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error)
UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error)
+ AppendToEntry(context.Context, *AppendToEntryRequest) (*AppendToEntryResponse, error)
DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error)
AtomicRenameEntry(context.Context, *AtomicRenameEntryRequest) (*AtomicRenameEntryResponse, error)
AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error)
@@ -1208,6 +1637,9 @@ type SeaweedFilerServer interface {
DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error)
Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error)
GetFilerConfiguration(context.Context, *GetFilerConfigurationRequest) (*GetFilerConfigurationResponse, error)
+ SubscribeMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeMetadataServer) error
+ KeepConnected(SeaweedFiler_KeepConnectedServer) error
+ LocateBroker(context.Context, *LocateBrokerRequest) (*LocateBrokerResponse, error)
}
func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) {
@@ -1289,6 +1721,24 @@ func _SeaweedFiler_UpdateEntry_Handler(srv interface{}, ctx context.Context, dec
return interceptor(ctx, in, info, handler)
}
+func _SeaweedFiler_AppendToEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AppendToEntryRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedFilerServer).AppendToEntry(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/filer_pb.SeaweedFiler/AppendToEntry",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedFilerServer).AppendToEntry(ctx, req.(*AppendToEntryRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _SeaweedFiler_DeleteEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteEntryRequest)
if err := dec(in); err != nil {
@@ -1415,6 +1865,71 @@ func _SeaweedFiler_GetFilerConfiguration_Handler(srv interface{}, ctx context.Co
return interceptor(ctx, in, info, handler)
}
+func _SeaweedFiler_SubscribeMetadata_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(SubscribeMetadataRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(SeaweedFilerServer).SubscribeMetadata(m, &seaweedFilerSubscribeMetadataServer{stream})
+}
+
+type SeaweedFiler_SubscribeMetadataServer interface {
+ Send(*SubscribeMetadataResponse) error
+ grpc.ServerStream
+}
+
+type seaweedFilerSubscribeMetadataServer struct {
+ grpc.ServerStream
+}
+
+func (x *seaweedFilerSubscribeMetadataServer) Send(m *SubscribeMetadataResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _SeaweedFiler_KeepConnected_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(SeaweedFilerServer).KeepConnected(&seaweedFilerKeepConnectedServer{stream})
+}
+
+type SeaweedFiler_KeepConnectedServer interface {
+ Send(*KeepConnectedResponse) error
+ Recv() (*KeepConnectedRequest, error)
+ grpc.ServerStream
+}
+
+type seaweedFilerKeepConnectedServer struct {
+ grpc.ServerStream
+}
+
+func (x *seaweedFilerKeepConnectedServer) Send(m *KeepConnectedResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *seaweedFilerKeepConnectedServer) Recv() (*KeepConnectedRequest, error) {
+ m := new(KeepConnectedRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func _SeaweedFiler_LocateBroker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LocateBrokerRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedFilerServer).LocateBroker(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/filer_pb.SeaweedFiler/LocateBroker",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedFilerServer).LocateBroker(ctx, req.(*LocateBrokerRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
ServiceName: "filer_pb.SeaweedFiler",
HandlerType: (*SeaweedFilerServer)(nil),
@@ -1431,6 +1946,10 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
MethodName: "UpdateEntry",
Handler: _SeaweedFiler_UpdateEntry_Handler,
},
+ {
+ MethodName: "AppendToEntry",
+ Handler: _SeaweedFiler_AppendToEntry_Handler,
+ },
{
MethodName: "DeleteEntry",
Handler: _SeaweedFiler_DeleteEntry_Handler,
@@ -1459,6 +1978,10 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
MethodName: "GetFilerConfiguration",
Handler: _SeaweedFiler_GetFilerConfiguration_Handler,
},
+ {
+ MethodName: "LocateBroker",
+ Handler: _SeaweedFiler_LocateBroker_Handler,
+ },
},
Streams: []grpc.StreamDesc{
{
@@ -1466,6 +1989,17 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
Handler: _SeaweedFiler_ListEntries_Handler,
ServerStreams: true,
},
+ {
+ StreamName: "SubscribeMetadata",
+ Handler: _SeaweedFiler_SubscribeMetadata_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "KeepConnected",
+ Handler: _SeaweedFiler_KeepConnected_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
},
Metadata: "filer.proto",
}
@@ -1473,108 +2007,139 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
func init() { proto.RegisterFile("filer.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
- // 1633 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x18, 0x4b, 0x6f, 0xdc, 0xc6,
- 0x59, 0xdc, 0x37, 0xbf, 0xdd, 0xb5, 0xa5, 0x59, 0xc9, 0x5e, 0xaf, 0x1e, 0x95, 0xa9, 0xda, 0x55,
- 0x61, 0x43, 0x35, 0x54, 0x1f, 0xec, 0xba, 0x3d, 0xd8, 0x7a, 0x14, 0x42, 0xe5, 0x07, 0x28, 0xbb,
- 0x68, 0x11, 0x20, 0x04, 0x45, 0xce, 0xae, 0x26, 0x22, 0x39, 0x9b, 0xe1, 0x50, 0x92, 0xf3, 0x13,
- 0x72, 0xcc, 0x31, 0x40, 0xce, 0xf9, 0x13, 0x41, 0x2e, 0x41, 0x90, 0x7f, 0x93, 0x63, 0xce, 0xc1,
- 0xcc, 0x90, 0xdc, 0xe1, 0x72, 0x25, 0xd9, 0x08, 0x7c, 0x9b, 0xf9, 0xde, 0xef, 0x6f, 0x48, 0x68,
- 0x0f, 0x49, 0x80, 0xd9, 0xd6, 0x98, 0x51, 0x4e, 0x51, 0x4b, 0x5e, 0x9c, 0xf1, 0xb1, 0xf5, 0x1a,
- 0x96, 0x0f, 0x29, 0x3d, 0x4d, 0xc6, 0xbb, 0x84, 0x61, 0x8f, 0x53, 0xf6, 0x7e, 0x2f, 0xe2, 0xec,
- 0xbd, 0x8d, 0xbf, 0x4c, 0x70, 0xcc, 0xd1, 0x0a, 0x98, 0x7e, 0x86, 0xe8, 0x1b, 0xeb, 0xc6, 0xa6,
- 0x69, 0x4f, 0x00, 0x08, 0x41, 0x2d, 0x72, 0x43, 0xdc, 0xaf, 0x48, 0x84, 0x3c, 0x5b, 0x7b, 0xb0,
- 0x32, 0x5b, 0x60, 0x3c, 0xa6, 0x51, 0x8c, 0xd1, 0x3d, 0xa8, 0x63, 0x01, 0x90, 0xd2, 0xda, 0xdb,
- 0x37, 0xb7, 0x32, 0x53, 0xb6, 0x14, 0x9d, 0xc2, 0x5a, 0x3f, 0x1a, 0x80, 0x0e, 0x49, 0xcc, 0x05,
- 0x90, 0xe0, 0xf8, 0xc3, 0xec, 0xb9, 0x05, 0x8d, 0x31, 0xc3, 0x43, 0x72, 0x91, 0x5a, 0x94, 0xde,
- 0xd0, 0x43, 0x58, 0x88, 0xb9, 0xcb, 0xf8, 0x3e, 0xa3, 0xe1, 0x3e, 0x09, 0xf0, 0x2b, 0x61, 0x74,
- 0x55, 0x92, 0x94, 0x11, 0x68, 0x0b, 0x10, 0x89, 0xbc, 0x20, 0x89, 0xc9, 0x19, 0x3e, 0xca, 0xb0,
- 0xfd, 0xda, 0xba, 0xb1, 0xd9, 0xb2, 0x67, 0x60, 0xd0, 0x22, 0xd4, 0x03, 0x12, 0x12, 0xde, 0xaf,
- 0xaf, 0x1b, 0x9b, 0x5d, 0x5b, 0x5d, 0xac, 0x7f, 0x42, 0xaf, 0x60, 0xff, 0xc7, 0xb9, 0xff, 0x5d,
- 0x05, 0xea, 0x12, 0x90, 0xc7, 0xd8, 0x98, 0xc4, 0x18, 0xdd, 0x85, 0x0e, 0x89, 0x9d, 0x49, 0x20,
- 0x2a, 0xd2, 0xb6, 0x36, 0x89, 0xf3, 0x98, 0xa3, 0x07, 0xd0, 0xf0, 0x4e, 0x92, 0xe8, 0x34, 0xee,
- 0x57, 0xd7, 0xab, 0x9b, 0xed, 0xed, 0xde, 0x44, 0x91, 0x70, 0x74, 0x47, 0xe0, 0xec, 0x94, 0x04,
- 0x3d, 0x01, 0x70, 0x39, 0x67, 0xe4, 0x38, 0xe1, 0x38, 0x96, 0x9e, 0xb6, 0xb7, 0xfb, 0x1a, 0x43,
- 0x12, 0xe3, 0xe7, 0x39, 0xde, 0xd6, 0x68, 0xd1, 0x53, 0x68, 0xe1, 0x0b, 0x8e, 0x23, 0x1f, 0xfb,
- 0xfd, 0xba, 0x54, 0xb4, 0x3a, 0xe5, 0xd1, 0xd6, 0x5e, 0x8a, 0x57, 0xfe, 0xe5, 0xe4, 0x83, 0x67,
- 0xd0, 0x2d, 0xa0, 0xd0, 0x3c, 0x54, 0x4f, 0x71, 0x96, 0x55, 0x71, 0x14, 0x91, 0x3d, 0x73, 0x83,
- 0x44, 0x15, 0x58, 0xc7, 0x56, 0x97, 0x7f, 0x54, 0x9e, 0x18, 0xd6, 0x2e, 0x98, 0xfb, 0x49, 0x10,
- 0xe4, 0x8c, 0x3e, 0x61, 0x19, 0xa3, 0x4f, 0xd8, 0x24, 0xca, 0x95, 0x2b, 0xa3, 0xfc, 0x83, 0x01,
- 0x0b, 0x7b, 0x67, 0x38, 0xe2, 0xaf, 0x28, 0x27, 0x43, 0xe2, 0xb9, 0x9c, 0xd0, 0x08, 0x3d, 0x04,
- 0x93, 0x06, 0xbe, 0x73, 0x65, 0x9a, 0x5a, 0x34, 0x48, 0xad, 0x7e, 0x08, 0x66, 0x84, 0xcf, 0x9d,
- 0x2b, 0xd5, 0xb5, 0x22, 0x7c, 0xae, 0xa8, 0x37, 0xa0, 0xeb, 0xe3, 0x00, 0x73, 0xec, 0xe4, 0xd9,
- 0x11, 0xa9, 0xeb, 0x28, 0xe0, 0x8e, 0x4a, 0xc7, 0x7d, 0xb8, 0x29, 0x44, 0x8e, 0x5d, 0x86, 0x23,
- 0xee, 0x8c, 0x5d, 0x7e, 0x22, 0x73, 0x62, 0xda, 0xdd, 0x08, 0x9f, 0xbf, 0x91, 0xd0, 0x37, 0x2e,
- 0x3f, 0xb1, 0x7e, 0x33, 0xc0, 0xcc, 0x93, 0x89, 0x6e, 0x43, 0x53, 0xa8, 0x75, 0x88, 0x9f, 0x46,
- 0xa2, 0x21, 0xae, 0x07, 0xbe, 0xe8, 0x0a, 0x3a, 0x1c, 0xc6, 0x98, 0x4b, 0xf3, 0xaa, 0x76, 0x7a,
- 0x13, 0x95, 0x15, 0x93, 0xaf, 0x54, 0x23, 0xd4, 0x6c, 0x79, 0x16, 0x11, 0x0f, 0x39, 0x09, 0xb1,
- 0x54, 0x58, 0xb5, 0xd5, 0x05, 0xf5, 0xa0, 0x8e, 0x1d, 0xee, 0x8e, 0x64, 0x85, 0x9b, 0x76, 0x0d,
- 0xbf, 0x75, 0x47, 0xe8, 0xcf, 0x70, 0x23, 0xa6, 0x09, 0xf3, 0xb0, 0x93, 0xa9, 0x6d, 0x48, 0x6c,
- 0x47, 0x41, 0xf7, 0x95, 0x72, 0x0b, 0xaa, 0x43, 0xe2, 0xf7, 0x9b, 0x32, 0x30, 0xf3, 0xc5, 0x22,
- 0x3c, 0xf0, 0x6d, 0x81, 0x44, 0x7f, 0x03, 0xc8, 0x25, 0xf9, 0xfd, 0xd6, 0x25, 0xa4, 0x66, 0x26,
- 0xd7, 0xb7, 0xfe, 0x07, 0x8d, 0x54, 0xfc, 0x32, 0x98, 0x67, 0x34, 0x48, 0xc2, 0xdc, 0xed, 0xae,
- 0xdd, 0x52, 0x80, 0x03, 0x1f, 0xdd, 0x01, 0x39, 0xe7, 0x1c, 0x51, 0x55, 0x15, 0xe9, 0xa4, 0x8c,
- 0xd0, 0x7f, 0xb0, 0x9c, 0x14, 0x1e, 0xa5, 0xa7, 0x44, 0x79, 0xdf, 0xb4, 0xd3, 0x9b, 0xf5, 0x6b,
- 0x05, 0x6e, 0x14, 0xcb, 0x5d, 0xa8, 0x90, 0x52, 0x64, 0xac, 0x0c, 0x29, 0x46, 0x8a, 0x3d, 0x2a,
- 0xc4, 0xab, 0xa2, 0xc7, 0x2b, 0x63, 0x09, 0xa9, 0xaf, 0x14, 0x74, 0x15, 0xcb, 0x4b, 0xea, 0x63,
- 0x51, 0xad, 0x09, 0xf1, 0x65, 0x80, 0xbb, 0xb6, 0x38, 0x0a, 0xc8, 0x88, 0xf8, 0xe9, 0xf8, 0x10,
- 0x47, 0x69, 0x1e, 0x93, 0x72, 0x1b, 0x2a, 0x65, 0xea, 0x26, 0x52, 0x16, 0x0a, 0x68, 0x53, 0xe5,
- 0x41, 0x9c, 0xd1, 0x3a, 0xb4, 0x19, 0x1e, 0x07, 0x69, 0xf5, 0xca, 0xf0, 0x99, 0xb6, 0x0e, 0x42,
- 0x6b, 0x00, 0x1e, 0x0d, 0x02, 0xec, 0x49, 0x02, 0x53, 0x12, 0x68, 0x10, 0x51, 0x39, 0x9c, 0x07,
- 0x4e, 0x8c, 0xbd, 0x3e, 0xac, 0x1b, 0x9b, 0x75, 0xbb, 0xc1, 0x79, 0x70, 0x84, 0x3d, 0xe1, 0x47,
- 0x12, 0x63, 0xe6, 0xc8, 0x01, 0xd4, 0x96, 0x7c, 0x2d, 0x01, 0x90, 0x63, 0x72, 0x15, 0x60, 0xc4,
- 0x68, 0x32, 0x56, 0xd8, 0xce, 0x7a, 0x55, 0xcc, 0x62, 0x09, 0x91, 0xe8, 0x7b, 0x70, 0x23, 0x7e,
- 0x1f, 0x06, 0x24, 0x3a, 0x75, 0xb8, 0xcb, 0x46, 0x98, 0xf7, 0xbb, 0xaa, 0x86, 0x53, 0xe8, 0x5b,
- 0x09, 0xb4, 0xc6, 0x80, 0x76, 0x18, 0x76, 0x39, 0xfe, 0x88, 0xb5, 0xf3, 0x61, 0xdd, 0x8d, 0x96,
- 0xa0, 0x41, 0x1d, 0x7c, 0xe1, 0x05, 0x69, 0x93, 0xd5, 0xe9, 0xde, 0x85, 0x17, 0x58, 0x0f, 0xa0,
- 0x57, 0xd0, 0x98, 0x0e, 0xe6, 0x45, 0xa8, 0x63, 0xc6, 0x68, 0x36, 0x46, 0xd4, 0xc5, 0xfa, 0x3f,
- 0xa0, 0x77, 0x63, 0xff, 0x53, 0x98, 0x67, 0x2d, 0x41, 0xaf, 0x20, 0x5a, 0xd9, 0x61, 0xfd, 0x6c,
- 0x00, 0xda, 0x95, 0xd3, 0xe0, 0x8f, 0x2d, 0x62, 0xd1, 0x9f, 0x62, 0x49, 0xa8, 0x69, 0xe3, 0xbb,
- 0xdc, 0x4d, 0x57, 0x58, 0x87, 0xc4, 0x4a, 0xfe, 0xae, 0xcb, 0xdd, 0x74, 0x95, 0x30, 0xec, 0x25,
- 0x4c, 0x6c, 0x35, 0x59, 0x84, 0x72, 0x95, 0xd8, 0x19, 0x08, 0x3d, 0x86, 0x5b, 0x64, 0x14, 0x51,
- 0x86, 0x27, 0x64, 0x8e, 0x0a, 0x55, 0x43, 0x12, 0x2f, 0x2a, 0x6c, 0xce, 0xb0, 0x27, 0x23, 0xb7,
- 0x04, 0xbd, 0x82, 0x1b, 0xa9, 0x7b, 0xdf, 0x1a, 0xd0, 0x7f, 0xce, 0x69, 0x48, 0x3c, 0x1b, 0x0b,
- 0x33, 0x0b, 0x4e, 0x6e, 0x40, 0x57, 0x4c, 0xde, 0x69, 0x47, 0x3b, 0x34, 0xf0, 0x27, 0x9b, 0xed,
- 0x0e, 0x88, 0xe1, 0xeb, 0x68, 0xfe, 0x36, 0x69, 0xe0, 0xcb, 0x9a, 0xdb, 0x00, 0x31, 0x21, 0x35,
- 0x7e, 0xb5, 0xe3, 0x3b, 0x11, 0x3e, 0x2f, 0xf0, 0x0b, 0x22, 0xc9, 0xaf, 0xc6, 0x6a, 0x33, 0xc2,
- 0xe7, 0x82, 0xdf, 0x5a, 0x86, 0x3b, 0x33, 0x6c, 0x4b, 0x2d, 0xff, 0xde, 0x80, 0xde, 0xf3, 0x38,
- 0x26, 0xa3, 0xe8, 0xbf, 0x72, 0xc0, 0x64, 0x46, 0x2f, 0x42, 0xdd, 0xa3, 0x49, 0xc4, 0xa5, 0xb1,
- 0x75, 0x5b, 0x5d, 0xa6, 0x7a, 0xae, 0x52, 0xea, 0xb9, 0xa9, 0xae, 0xad, 0x96, 0xbb, 0x56, 0xeb,
- 0xca, 0x5a, 0xa1, 0x2b, 0xff, 0x04, 0x6d, 0x91, 0x4e, 0xc7, 0xc3, 0x11, 0xc7, 0x2c, 0x9d, 0xc9,
- 0x20, 0x40, 0x3b, 0x12, 0x62, 0x7d, 0x6d, 0xc0, 0x62, 0xd1, 0xd2, 0xb4, 0xc6, 0x2f, 0x5d, 0x11,
- 0x62, 0x26, 0xb1, 0x20, 0x35, 0x53, 0x1c, 0x45, 0x77, 0x8f, 0x93, 0xe3, 0x80, 0x78, 0x8e, 0x40,
- 0x28, 0xf3, 0x4c, 0x05, 0x79, 0xc7, 0x82, 0x89, 0xd3, 0x35, 0xdd, 0x69, 0x04, 0x35, 0x37, 0xe1,
- 0x27, 0xd9, 0x9a, 0x10, 0x67, 0xeb, 0x31, 0xf4, 0xd4, 0x7b, 0xb0, 0x18, 0xb5, 0x55, 0x80, 0x7c,
- 0x70, 0xc7, 0x7d, 0x43, 0x4d, 0x8f, 0x6c, 0x72, 0xc7, 0xd6, 0xbf, 0xc0, 0x3c, 0xa4, 0x2a, 0x10,
- 0x31, 0x7a, 0x04, 0x66, 0x90, 0x5d, 0x24, 0x69, 0x7b, 0x1b, 0x4d, 0x9a, 0x2a, 0xa3, 0xb3, 0x27,
- 0x44, 0xd6, 0x33, 0x68, 0x65, 0xe0, 0xcc, 0x37, 0xe3, 0x32, 0xdf, 0x2a, 0x53, 0xbe, 0x59, 0x3f,
- 0x19, 0xb0, 0x58, 0x34, 0x39, 0x0d, 0xdf, 0x3b, 0xe8, 0xe6, 0x2a, 0x9c, 0xd0, 0x1d, 0xa7, 0xb6,
- 0x3c, 0xd2, 0x6d, 0x29, 0xb3, 0xe5, 0x06, 0xc6, 0x2f, 0xdd, 0xb1, 0x2a, 0xa9, 0x4e, 0xa0, 0x81,
- 0x06, 0x6f, 0x61, 0xa1, 0x44, 0x32, 0xe3, 0x31, 0xf4, 0x57, 0xfd, 0x31, 0x54, 0x78, 0xd0, 0xe5,
- 0xdc, 0xfa, 0x0b, 0xe9, 0x29, 0xdc, 0x56, 0xfd, 0xb7, 0x93, 0x17, 0x5d, 0x16, 0xfb, 0x62, 0x6d,
- 0x1a, 0xd3, 0xb5, 0x69, 0x0d, 0xa0, 0x5f, 0x66, 0x4d, 0xbb, 0x60, 0x04, 0x0b, 0x47, 0xdc, 0xe5,
- 0x24, 0xe6, 0xc4, 0xcb, 0x5f, 0xe5, 0x53, 0xc5, 0x6c, 0x5c, 0xb7, 0x82, 0xca, 0xed, 0x30, 0x0f,
- 0x55, 0xce, 0xb3, 0x3a, 0x13, 0x47, 0x91, 0x05, 0xa4, 0x6b, 0x4a, 0x73, 0xf0, 0x09, 0x54, 0x89,
- 0x7a, 0xe0, 0x94, 0xbb, 0x81, 0x5a, 0xf1, 0x35, 0xb9, 0xe2, 0x4d, 0x09, 0x91, 0x3b, 0x5e, 0x6d,
- 0x41, 0x5f, 0x61, 0xeb, 0xea, 0x01, 0x20, 0x00, 0x12, 0xb9, 0x0a, 0x20, 0x5b, 0x4a, 0x75, 0x43,
- 0x43, 0xf1, 0x0a, 0xc8, 0x8e, 0x00, 0x58, 0x6b, 0xb0, 0xf2, 0x6f, 0xcc, 0xc5, 0x63, 0x85, 0xed,
- 0xd0, 0x68, 0x48, 0x46, 0x09, 0x73, 0xb5, 0x54, 0x58, 0xdf, 0x18, 0xb0, 0x7a, 0x09, 0x41, 0xea,
- 0x70, 0x1f, 0x9a, 0xa1, 0x1b, 0x73, 0xcc, 0xb2, 0x2e, 0xc9, 0xae, 0xd3, 0xa1, 0xa8, 0x5c, 0x17,
- 0x8a, 0x6a, 0x29, 0x14, 0x4b, 0xd0, 0x08, 0xdd, 0x0b, 0x27, 0x3c, 0x4e, 0x5f, 0x23, 0xf5, 0xd0,
- 0xbd, 0x78, 0x79, 0xbc, 0xfd, 0x4b, 0x13, 0x3a, 0x47, 0xd8, 0x3d, 0xc7, 0xd8, 0x97, 0x86, 0xa1,
- 0x51, 0xd6, 0x10, 0xc5, 0x6f, 0x3a, 0x74, 0x6f, 0xba, 0xf2, 0x67, 0x7e, 0x44, 0x0e, 0xee, 0x5f,
- 0x47, 0x96, 0xd6, 0xd6, 0x1c, 0x7a, 0x05, 0x6d, 0xed, 0xa3, 0x09, 0xad, 0x68, 0x8c, 0xa5, 0x6f,
- 0xc1, 0xc1, 0xea, 0x25, 0xd8, 0x4c, 0xda, 0x23, 0x03, 0x1d, 0x42, 0x5b, 0xdb, 0xf5, 0xba, 0xbc,
- 0xf2, 0xa3, 0x43, 0x97, 0x37, 0xe3, 0x81, 0x60, 0xcd, 0x09, 0x69, 0xda, 0xc6, 0xd6, 0xa5, 0x95,
- 0xdf, 0x08, 0xba, 0xb4, 0x59, 0x6b, 0x5e, 0x4a, 0xd3, 0x16, 0xa4, 0x2e, 0xad, 0xbc, 0xfe, 0x75,
- 0x69, 0xb3, 0xb6, 0xea, 0x1c, 0xfa, 0x1c, 0x16, 0x4a, 0xab, 0x0b, 0x59, 0x13, 0xae, 0xcb, 0x76,
- 0xee, 0x60, 0xe3, 0x4a, 0x9a, 0x5c, 0xfe, 0x6b, 0xe8, 0xe8, 0x2b, 0x05, 0x69, 0x06, 0xcd, 0x58,
- 0x8a, 0x83, 0xb5, 0xcb, 0xd0, 0xba, 0x40, 0x7d, 0x5a, 0xea, 0x02, 0x67, 0xec, 0x0b, 0x5d, 0xe0,
- 0xac, 0x21, 0x6b, 0xcd, 0xa1, 0xcf, 0x60, 0x7e, 0x7a, 0x6a, 0xa1, 0xbb, 0xd3, 0x61, 0x2b, 0x0d,
- 0xc3, 0x81, 0x75, 0x15, 0x49, 0x2e, 0xfc, 0x00, 0x60, 0x32, 0x8c, 0xd0, 0xf2, 0x84, 0xa7, 0x34,
- 0x0c, 0x07, 0x2b, 0xb3, 0x91, 0xb9, 0xa8, 0x2f, 0x60, 0x69, 0x66, 0xc7, 0x23, 0xad, 0x4d, 0xae,
- 0x9a, 0x19, 0x83, 0xbf, 0x5c, 0x4b, 0x97, 0xe9, 0x7a, 0xb1, 0x06, 0xf3, 0xb1, 0x6a, 0xe4, 0x61,
- 0xbc, 0xe5, 0x05, 0x04, 0x47, 0xfc, 0x05, 0x48, 0x8e, 0x37, 0x8c, 0x72, 0x7a, 0xdc, 0x90, 0xbf,
- 0x83, 0xfe, 0xfe, 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc5, 0xce, 0x15, 0x02, 0x1d, 0x12, 0x00,
- 0x00,
+ // 2142 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x59, 0x5f, 0x6f, 0xdb, 0xc8,
+ 0x11, 0x37, 0x25, 0x4b, 0x16, 0x47, 0x52, 0xce, 0x5e, 0x3b, 0x89, 0xa2, 0xc4, 0x89, 0x8f, 0x69,
+ 0xee, 0x5c, 0x24, 0x70, 0x53, 0xf7, 0x0a, 0xdc, 0xf5, 0xda, 0x07, 0xc7, 0x71, 0xae, 0x69, 0x12,
+ 0x9f, 0x41, 0x27, 0x87, 0x2b, 0x0a, 0x94, 0xa5, 0xc9, 0xb5, 0xbc, 0x35, 0x45, 0xb2, 0xbb, 0x4b,
+ 0xff, 0xb9, 0xa7, 0xfb, 0x1c, 0x05, 0xfa, 0xda, 0x4f, 0xd0, 0xc7, 0xa2, 0x2f, 0x45, 0x81, 0x02,
+ 0x45, 0xbf, 0x44, 0x3f, 0x49, 0xb1, 0xb3, 0x24, 0xb5, 0x14, 0x25, 0xfb, 0x82, 0xc3, 0xbd, 0x71,
+ 0x67, 0x66, 0x67, 0x67, 0xe7, 0xcf, 0x6f, 0x66, 0x25, 0xe8, 0x1e, 0xb3, 0x88, 0xf2, 0xad, 0x94,
+ 0x27, 0x32, 0x21, 0x1d, 0x5c, 0x78, 0xe9, 0x91, 0xf3, 0x25, 0xdc, 0x7d, 0x9d, 0x24, 0xa7, 0x59,
+ 0xfa, 0x9c, 0x71, 0x1a, 0xc8, 0x84, 0x5f, 0xee, 0xc5, 0x92, 0x5f, 0xba, 0xf4, 0x4f, 0x19, 0x15,
+ 0x92, 0xdc, 0x03, 0x3b, 0x2c, 0x18, 0x03, 0x6b, 0xc3, 0xda, 0xb4, 0xdd, 0x09, 0x81, 0x10, 0x58,
+ 0x8c, 0xfd, 0x31, 0x1d, 0x34, 0x90, 0x81, 0xdf, 0xce, 0x1e, 0xdc, 0x9b, 0xad, 0x50, 0xa4, 0x49,
+ 0x2c, 0x28, 0x79, 0x04, 0x2d, 0xaa, 0x08, 0xa8, 0xad, 0xbb, 0xfd, 0xc1, 0x56, 0x61, 0xca, 0x96,
+ 0x96, 0xd3, 0x5c, 0xe7, 0x1f, 0x16, 0x90, 0xd7, 0x4c, 0x48, 0x45, 0x64, 0x54, 0x7c, 0x37, 0x7b,
+ 0x6e, 0x41, 0x3b, 0xe5, 0xf4, 0x98, 0x5d, 0xe4, 0x16, 0xe5, 0x2b, 0xf2, 0x04, 0x56, 0x84, 0xf4,
+ 0xb9, 0x7c, 0xc1, 0x93, 0xf1, 0x0b, 0x16, 0xd1, 0x7d, 0x65, 0x74, 0x13, 0x45, 0xea, 0x0c, 0xb2,
+ 0x05, 0x84, 0xc5, 0x41, 0x94, 0x09, 0x76, 0x46, 0x0f, 0x0b, 0xee, 0x60, 0x71, 0xc3, 0xda, 0xec,
+ 0xb8, 0x33, 0x38, 0x64, 0x0d, 0x5a, 0x11, 0x1b, 0x33, 0x39, 0x68, 0x6d, 0x58, 0x9b, 0x7d, 0x57,
+ 0x2f, 0x9c, 0x5f, 0xc2, 0x6a, 0xc5, 0xfe, 0xf7, 0xbb, 0xfe, 0x5f, 0x1a, 0xd0, 0x42, 0x42, 0xe9,
+ 0x63, 0x6b, 0xe2, 0x63, 0xf2, 0x21, 0xf4, 0x98, 0xf0, 0x26, 0x8e, 0x68, 0xa0, 0x6d, 0x5d, 0x26,
+ 0x4a, 0x9f, 0x93, 0xc7, 0xd0, 0x0e, 0x4e, 0xb2, 0xf8, 0x54, 0x0c, 0x9a, 0x1b, 0xcd, 0xcd, 0xee,
+ 0xf6, 0xea, 0xe4, 0x20, 0x75, 0xd1, 0x5d, 0xc5, 0x73, 0x73, 0x11, 0xf2, 0x29, 0x80, 0x2f, 0x25,
+ 0x67, 0x47, 0x99, 0xa4, 0x02, 0x6f, 0xda, 0xdd, 0x1e, 0x18, 0x1b, 0x32, 0x41, 0x77, 0x4a, 0xbe,
+ 0x6b, 0xc8, 0x92, 0xcf, 0xa0, 0x43, 0x2f, 0x24, 0x8d, 0x43, 0x1a, 0x0e, 0x5a, 0x78, 0xd0, 0xfa,
+ 0xd4, 0x8d, 0xb6, 0xf6, 0x72, 0xbe, 0xbe, 0x5f, 0x29, 0x3e, 0xfc, 0x1c, 0xfa, 0x15, 0x16, 0x59,
+ 0x86, 0xe6, 0x29, 0x2d, 0xa2, 0xaa, 0x3e, 0x95, 0x67, 0xcf, 0xfc, 0x28, 0xd3, 0x09, 0xd6, 0x73,
+ 0xf5, 0xe2, 0x17, 0x8d, 0x4f, 0x2d, 0xe7, 0x39, 0xd8, 0x2f, 0xb2, 0x28, 0x2a, 0x37, 0x86, 0x8c,
+ 0x17, 0x1b, 0x43, 0xc6, 0x27, 0x5e, 0x6e, 0x5c, 0xe9, 0xe5, 0xbf, 0x5b, 0xb0, 0xb2, 0x77, 0x46,
+ 0x63, 0xb9, 0x9f, 0x48, 0x76, 0xcc, 0x02, 0x5f, 0xb2, 0x24, 0x26, 0x4f, 0xc0, 0x4e, 0xa2, 0xd0,
+ 0xbb, 0x32, 0x4c, 0x9d, 0x24, 0xca, 0xad, 0x7e, 0x02, 0x76, 0x4c, 0xcf, 0xbd, 0x2b, 0x8f, 0xeb,
+ 0xc4, 0xf4, 0x5c, 0x4b, 0x3f, 0x84, 0x7e, 0x48, 0x23, 0x2a, 0xa9, 0x57, 0x46, 0x47, 0x85, 0xae,
+ 0xa7, 0x89, 0xbb, 0x3a, 0x1c, 0x1f, 0xc1, 0x07, 0x4a, 0x65, 0xea, 0x73, 0x1a, 0x4b, 0x2f, 0xf5,
+ 0xe5, 0x09, 0xc6, 0xc4, 0x76, 0xfb, 0x31, 0x3d, 0x3f, 0x40, 0xea, 0x81, 0x2f, 0x4f, 0x9c, 0xbf,
+ 0x35, 0xc0, 0x2e, 0x83, 0x49, 0x6e, 0xc3, 0x92, 0x3a, 0xd6, 0x63, 0x61, 0xee, 0x89, 0xb6, 0x5a,
+ 0xbe, 0x0c, 0x55, 0x55, 0x24, 0xc7, 0xc7, 0x82, 0x4a, 0x34, 0xaf, 0xe9, 0xe6, 0x2b, 0x95, 0x59,
+ 0x82, 0x7d, 0xa3, 0x0b, 0x61, 0xd1, 0xc5, 0x6f, 0xe5, 0xf1, 0xb1, 0x64, 0x63, 0x8a, 0x07, 0x36,
+ 0x5d, 0xbd, 0x20, 0xab, 0xd0, 0xa2, 0x9e, 0xf4, 0x47, 0x98, 0xe1, 0xb6, 0xbb, 0x48, 0xdf, 0xfa,
+ 0x23, 0xf2, 0x23, 0xb8, 0x21, 0x92, 0x8c, 0x07, 0xd4, 0x2b, 0x8e, 0x6d, 0x23, 0xb7, 0xa7, 0xa9,
+ 0x2f, 0xf4, 0xe1, 0x0e, 0x34, 0x8f, 0x59, 0x38, 0x58, 0x42, 0xc7, 0x2c, 0x57, 0x93, 0xf0, 0x65,
+ 0xe8, 0x2a, 0x26, 0xf9, 0x09, 0x40, 0xa9, 0x29, 0x1c, 0x74, 0xe6, 0x88, 0xda, 0x85, 0xde, 0x90,
+ 0xac, 0x03, 0x04, 0x2c, 0x3d, 0xa1, 0xdc, 0x53, 0x09, 0x63, 0x63, 0x72, 0xd8, 0x9a, 0xf2, 0x8a,
+ 0x5e, 0x2a, 0x36, 0x13, 0xde, 0xe8, 0x1b, 0x96, 0xa6, 0x34, 0x1c, 0x00, 0x7a, 0xd8, 0x66, 0xe2,
+ 0x0b, 0x4d, 0x70, 0xbe, 0x86, 0x76, 0x6e, 0xdc, 0x5d, 0xb0, 0xcf, 0x92, 0x28, 0x1b, 0x97, 0x4e,
+ 0xeb, 0xbb, 0x1d, 0x4d, 0x78, 0x19, 0x92, 0x3b, 0x80, 0x28, 0x89, 0x47, 0x34, 0xd0, 0x45, 0xe8,
+ 0x5f, 0x75, 0xc0, 0x2d, 0x68, 0x07, 0x49, 0x72, 0xca, 0xb4, 0xef, 0x96, 0xdc, 0x7c, 0xe5, 0x7c,
+ 0xdb, 0x84, 0x1b, 0xd5, 0x62, 0x51, 0x47, 0xa0, 0x16, 0xf4, 0xb4, 0x85, 0x6a, 0x50, 0xed, 0x61,
+ 0xc5, 0xdb, 0x0d, 0xd3, 0xdb, 0xc5, 0x96, 0x71, 0x12, 0xea, 0x03, 0xfa, 0x7a, 0xcb, 0x9b, 0x24,
+ 0xa4, 0x2a, 0xd7, 0x33, 0x16, 0x62, 0x78, 0xfa, 0xae, 0xfa, 0x54, 0x94, 0x11, 0x0b, 0x73, 0xf0,
+ 0x51, 0x9f, 0x68, 0x1e, 0x47, 0xbd, 0x6d, 0x1d, 0x70, 0xbd, 0x52, 0x01, 0x1f, 0x2b, 0xea, 0x92,
+ 0x8e, 0xa2, 0xfa, 0x26, 0x1b, 0xd0, 0xe5, 0x34, 0x8d, 0xf2, 0xdc, 0x47, 0xe7, 0xdb, 0xae, 0x49,
+ 0x22, 0xf7, 0x01, 0x82, 0x24, 0x8a, 0x68, 0x80, 0x02, 0x36, 0x0a, 0x18, 0x14, 0x95, 0x77, 0x52,
+ 0x46, 0x9e, 0xa0, 0x01, 0xba, 0xba, 0xe5, 0xb6, 0xa5, 0x8c, 0x0e, 0x69, 0xa0, 0xee, 0x91, 0x09,
+ 0xca, 0x3d, 0x84, 0xaf, 0x2e, 0xee, 0xeb, 0x28, 0x02, 0x82, 0xec, 0x3a, 0xc0, 0x88, 0x27, 0x59,
+ 0xaa, 0xb9, 0xbd, 0x8d, 0xa6, 0x42, 0x72, 0xa4, 0x20, 0xfb, 0x11, 0xdc, 0x10, 0x97, 0xe3, 0x88,
+ 0xc5, 0xa7, 0x9e, 0xf4, 0xf9, 0x88, 0xca, 0x41, 0x5f, 0x57, 0x40, 0x4e, 0x7d, 0x8b, 0x44, 0x75,
+ 0xf7, 0x71, 0xf8, 0xf3, 0xc1, 0x0d, 0xcc, 0x00, 0xf5, 0xe9, 0xa4, 0x40, 0x76, 0x39, 0xf5, 0x25,
+ 0x7d, 0x8f, 0x36, 0xf6, 0xdd, 0xd0, 0x82, 0xdc, 0x84, 0x76, 0xe2, 0xd1, 0x8b, 0x20, 0xca, 0x8b,
+ 0xb6, 0x95, 0xec, 0x5d, 0x04, 0x91, 0xf3, 0x18, 0x56, 0x2b, 0x27, 0xe6, 0x40, 0xbf, 0x06, 0x2d,
+ 0xca, 0x79, 0x52, 0xc0, 0x92, 0x5e, 0x38, 0xbf, 0x05, 0xf2, 0x2e, 0x0d, 0x7f, 0x08, 0xf3, 0x9c,
+ 0x9b, 0xb0, 0x5a, 0x51, 0xad, 0xed, 0x70, 0xbe, 0xb5, 0x60, 0x6d, 0x27, 0x4d, 0x69, 0x1c, 0xbe,
+ 0x4d, 0xde, 0xe3, 0xd0, 0x75, 0x00, 0x54, 0xeb, 0x19, 0x0d, 0xde, 0x46, 0x0a, 0xc6, 0xe7, 0x7d,
+ 0xda, 0x8b, 0x73, 0x1b, 0x6e, 0x4e, 0x59, 0x90, 0xdb, 0xf6, 0x2f, 0x0b, 0xc8, 0x73, 0x44, 0xbe,
+ 0xef, 0x37, 0x74, 0x28, 0x2c, 0x52, 0x0d, 0x51, 0x23, 0x6b, 0xe8, 0x4b, 0x3f, 0x6f, 0xd7, 0x3d,
+ 0x26, 0xb4, 0xfe, 0xe7, 0xbe, 0xf4, 0xf3, 0xb6, 0xc9, 0x69, 0x90, 0x71, 0xd5, 0xc1, 0xb1, 0x64,
+ 0xb0, 0x6d, 0xba, 0x05, 0x89, 0x7c, 0x02, 0xb7, 0xd8, 0x28, 0x4e, 0x38, 0x9d, 0x88, 0x79, 0x3a,
+ 0x8c, 0x6d, 0x14, 0x5e, 0xd3, 0xdc, 0x72, 0xc3, 0x1e, 0x46, 0xf5, 0x31, 0xac, 0x56, 0xae, 0x71,
+ 0x65, 0x0a, 0xfc, 0xd9, 0x82, 0xc1, 0x8e, 0x4c, 0xc6, 0x2c, 0x70, 0xa9, 0x32, 0xbe, 0x72, 0xf5,
+ 0x87, 0xd0, 0x57, 0xbd, 0x67, 0xfa, 0xfa, 0xbd, 0x24, 0x0a, 0x27, 0xbd, 0xfd, 0x0e, 0xa8, 0xf6,
+ 0x63, 0x46, 0x66, 0x29, 0x89, 0x42, 0x8c, 0xcb, 0x43, 0x50, 0x3d, 0xc2, 0xd8, 0xaf, 0xa7, 0x9c,
+ 0x5e, 0x4c, 0xcf, 0x2b, 0xfb, 0x95, 0x10, 0xee, 0xd7, 0x8d, 0x65, 0x29, 0xa6, 0xe7, 0x6a, 0xbf,
+ 0x73, 0x17, 0xee, 0xcc, 0xb0, 0x2d, 0x0f, 0xd7, 0xbf, 0x2d, 0x58, 0xdd, 0x11, 0x82, 0x8d, 0xe2,
+ 0xaf, 0x10, 0x24, 0x0b, 0xa3, 0xd7, 0xa0, 0x15, 0x24, 0x59, 0x2c, 0xd1, 0xd8, 0x96, 0xab, 0x17,
+ 0x53, 0xb8, 0xd1, 0xa8, 0xe1, 0xc6, 0x14, 0xf2, 0x34, 0xeb, 0xc8, 0x63, 0x20, 0xcb, 0x62, 0x05,
+ 0x59, 0x1e, 0x40, 0x57, 0x05, 0xd9, 0x0b, 0x68, 0x2c, 0x29, 0xcf, 0xbb, 0x12, 0x28, 0xd2, 0x2e,
+ 0x52, 0x94, 0x80, 0xd9, 0x3d, 0x75, 0x63, 0x82, 0x74, 0xd2, 0x3a, 0xff, 0xa7, 0xaa, 0xa2, 0x72,
+ 0x95, 0x3c, 0x66, 0x73, 0xbb, 0xa8, 0x02, 0x5e, 0x1e, 0xe5, 0xf7, 0x50, 0x9f, 0xaa, 0x44, 0xd2,
+ 0xec, 0x28, 0x62, 0x81, 0xa7, 0x18, 0xda, 0x7e, 0x5b, 0x53, 0xde, 0xf1, 0x68, 0xe2, 0x95, 0x45,
+ 0xd3, 0x2b, 0x04, 0x16, 0xfd, 0x4c, 0x9e, 0x14, 0x9d, 0x54, 0x7d, 0x4f, 0x79, 0xaa, 0x7d, 0x9d,
+ 0xa7, 0x96, 0xea, 0x9e, 0x2a, 0x33, 0xad, 0x63, 0x66, 0xda, 0x27, 0xb0, 0xaa, 0x47, 0xf1, 0x6a,
+ 0xb8, 0xd6, 0x01, 0xca, 0xae, 0x27, 0x06, 0x96, 0x86, 0xde, 0xa2, 0xed, 0x09, 0xe7, 0x57, 0x60,
+ 0xbf, 0x4e, 0xb4, 0x5e, 0x41, 0x9e, 0x82, 0x1d, 0x15, 0x0b, 0x14, 0xed, 0x6e, 0x93, 0x49, 0xa9,
+ 0x17, 0x72, 0xee, 0x44, 0xc8, 0xf9, 0x1c, 0x3a, 0x05, 0xb9, 0xf0, 0x99, 0x35, 0xcf, 0x67, 0x8d,
+ 0x29, 0x9f, 0x39, 0xff, 0xb4, 0x60, 0xad, 0x6a, 0x72, 0x1e, 0x96, 0x77, 0xd0, 0x2f, 0x8f, 0xf0,
+ 0xc6, 0x7e, 0x9a, 0xdb, 0xf2, 0xd4, 0xb4, 0xa5, 0xbe, 0xad, 0x34, 0x50, 0xbc, 0xf1, 0x53, 0x9d,
+ 0xcb, 0xbd, 0xc8, 0x20, 0x0d, 0xdf, 0xc2, 0x4a, 0x4d, 0x64, 0xc6, 0x1c, 0xfa, 0x63, 0x73, 0x0e,
+ 0xad, 0x80, 0x5d, 0xb9, 0xdb, 0x1c, 0x4e, 0x3f, 0x83, 0xdb, 0x1a, 0x0e, 0x76, 0xcb, 0x18, 0x16,
+ 0xbe, 0xaf, 0x86, 0xda, 0x9a, 0x0e, 0xb5, 0x33, 0x84, 0x41, 0x7d, 0x6b, 0x5e, 0x7e, 0x23, 0x58,
+ 0x39, 0x94, 0xbe, 0x64, 0x42, 0xb2, 0xa0, 0x7c, 0x10, 0x4d, 0xe5, 0x86, 0x75, 0x5d, 0xff, 0xae,
+ 0xd7, 0xe1, 0x32, 0x34, 0xa5, 0x2c, 0xf2, 0x57, 0x7d, 0xaa, 0x28, 0x10, 0xf3, 0xa4, 0x3c, 0x06,
+ 0x3f, 0xc0, 0x51, 0x2a, 0x1f, 0x64, 0x22, 0xfd, 0x48, 0xcf, 0x47, 0x8b, 0x38, 0x1f, 0xd9, 0x48,
+ 0xc1, 0x01, 0x49, 0x8f, 0x10, 0xa1, 0xe6, 0xb6, 0xf4, 0xf4, 0xa4, 0x08, 0xc8, 0x5c, 0x07, 0xc0,
+ 0x52, 0xd5, 0x55, 0xd6, 0xd6, 0x7b, 0x15, 0x65, 0x57, 0x11, 0x9c, 0xfb, 0x70, 0xef, 0x0b, 0x2a,
+ 0x55, 0x37, 0xe2, 0xbb, 0x49, 0x7c, 0xcc, 0x46, 0x19, 0xf7, 0x8d, 0x50, 0x38, 0xff, 0xb1, 0x60,
+ 0x7d, 0x8e, 0x40, 0x7e, 0xe1, 0x01, 0x2c, 0x8d, 0x7d, 0x21, 0x29, 0x2f, 0xaa, 0xa4, 0x58, 0x4e,
+ 0xbb, 0xa2, 0x71, 0x9d, 0x2b, 0x9a, 0x35, 0x57, 0xdc, 0x84, 0xf6, 0xd8, 0xbf, 0xf0, 0xc6, 0x47,
+ 0xf9, 0x28, 0xd7, 0x1a, 0xfb, 0x17, 0x6f, 0x8e, 0x10, 0xd9, 0x18, 0xf7, 0x8e, 0xb2, 0xe0, 0x94,
+ 0x4a, 0x51, 0x22, 0x1b, 0xe3, 0xcf, 0x34, 0x05, 0x67, 0x3b, 0x1c, 0x74, 0x11, 0x06, 0x3a, 0x6e,
+ 0xbe, 0x72, 0xce, 0x61, 0x70, 0x98, 0x1d, 0x89, 0x80, 0xb3, 0x23, 0xfa, 0x86, 0x4a, 0x5f, 0x81,
+ 0x61, 0x91, 0x23, 0x0f, 0xa0, 0x1b, 0x44, 0x4c, 0xa1, 0xa1, 0xf1, 0x92, 0x04, 0x4d, 0xc2, 0xae,
+ 0x81, 0x70, 0x29, 0x4f, 0xbc, 0xca, 0xe3, 0x19, 0x14, 0xe9, 0x40, 0x3f, 0xa0, 0xef, 0x40, 0x47,
+ 0xb0, 0x38, 0xa0, 0x5e, 0xac, 0x5f, 0x2c, 0x4d, 0x77, 0x09, 0xd7, 0xfb, 0x42, 0xb5, 0xb3, 0x3b,
+ 0x33, 0x4e, 0xce, 0x5d, 0x78, 0x75, 0x2b, 0xff, 0x0d, 0x10, 0x7a, 0x86, 0x76, 0x19, 0xef, 0xaf,
+ 0xbc, 0xc8, 0xee, 0x1a, 0x63, 0xce, 0xf4, 0x13, 0xcd, 0x5d, 0xa1, 0xb5, 0x57, 0xdb, 0x2a, 0xb4,
+ 0xa4, 0x98, 0xd8, 0xb7, 0x28, 0xc5, 0xbe, 0x70, 0x7c, 0x05, 0x46, 0x23, 0x5d, 0xd6, 0xa5, 0x80,
+ 0x35, 0x11, 0x20, 0x4f, 0x80, 0xa4, 0x3e, 0x97, 0x4c, 0xa9, 0x50, 0x93, 0xbe, 0x77, 0xe2, 0x8b,
+ 0x13, 0xb4, 0xa0, 0xe5, 0x2e, 0x97, 0x9c, 0x57, 0xf4, 0xf2, 0xd7, 0xbe, 0x38, 0x51, 0xe0, 0x8d,
+ 0xc3, 0x45, 0x13, 0xe7, 0x4d, 0xfc, 0x76, 0x28, 0xac, 0xbd, 0xa2, 0x34, 0xdd, 0x4d, 0xe2, 0x98,
+ 0x06, 0x92, 0x86, 0x85, 0xd3, 0x67, 0xbd, 0xdb, 0xef, 0x82, 0x3d, 0xe2, 0x69, 0xe0, 0xa5, 0x09,
+ 0xd7, 0x8f, 0xb1, 0xbe, 0xdb, 0x51, 0x84, 0x83, 0x84, 0xe3, 0xd4, 0xc3, 0xa9, 0x7e, 0xe3, 0xe8,
+ 0xa9, 0xca, 0x76, 0x27, 0x04, 0x35, 0x43, 0x4d, 0x1d, 0x93, 0xa3, 0xc2, 0x4f, 0x15, 0xc8, 0x07,
+ 0xbe, 0xa4, 0xcf, 0x78, 0x72, 0x4a, 0x79, 0x71, 0xfc, 0x10, 0x3a, 0xc5, 0xe6, 0xdc, 0x84, 0x72,
+ 0xed, 0xfc, 0x17, 0x51, 0xd6, 0xdc, 0x33, 0x19, 0x58, 0x8e, 0x93, 0x2c, 0xd6, 0xad, 0xaf, 0xe3,
+ 0xea, 0x05, 0xd9, 0x33, 0x0d, 0x6b, 0x20, 0xee, 0x7e, 0x3c, 0x85, 0x80, 0x53, 0x8a, 0xb6, 0xdc,
+ 0x5c, 0xde, 0xb8, 0xc1, 0xf0, 0x6b, 0xe8, 0x14, 0x64, 0x35, 0xde, 0xa3, 0x23, 0xfc, 0x30, 0xe4,
+ 0x54, 0x08, 0x2a, 0x72, 0x1b, 0xfb, 0x8a, 0xba, 0x53, 0x10, 0x95, 0x58, 0xb1, 0x3f, 0xaf, 0x72,
+ 0x1d, 0x99, 0x7e, 0x41, 0xc5, 0x4a, 0xdf, 0xfe, 0x2b, 0x40, 0xef, 0x90, 0xfa, 0xe7, 0x94, 0x86,
+ 0x58, 0xcd, 0x64, 0x54, 0x74, 0x91, 0xea, 0x6f, 0x50, 0xe4, 0xd1, 0x74, 0xbb, 0x98, 0xf9, 0xa3,
+ 0xd7, 0xf0, 0xa3, 0xeb, 0xc4, 0x72, 0xd7, 0x2f, 0x90, 0x7d, 0xe8, 0x1a, 0x3f, 0xf2, 0x90, 0x7b,
+ 0xc6, 0xc6, 0xda, 0x6f, 0x57, 0xc3, 0xf5, 0x39, 0xdc, 0x42, 0xdb, 0x53, 0x8b, 0xbc, 0x86, 0xae,
+ 0xf1, 0x96, 0x30, 0xf5, 0xd5, 0x1f, 0x35, 0xa6, 0xbe, 0x19, 0x0f, 0x10, 0x67, 0x41, 0x69, 0x33,
+ 0x5e, 0x04, 0xa6, 0xb6, 0xfa, 0x1b, 0xc4, 0xd4, 0x36, 0xeb, 0x19, 0xb1, 0x40, 0x5c, 0xe8, 0x57,
+ 0xa6, 0x78, 0x72, 0x7f, 0xb2, 0x63, 0xd6, 0x03, 0x63, 0xf8, 0x60, 0x2e, 0xdf, 0xb4, 0xd0, 0x18,
+ 0x9c, 0x4d, 0x0b, 0xeb, 0xcf, 0x02, 0xd3, 0xc2, 0x19, 0xd3, 0xb6, 0xb3, 0x40, 0x7e, 0x0f, 0x2b,
+ 0xb5, 0xe1, 0x95, 0x38, 0x86, 0x15, 0x73, 0xa6, 0xee, 0xe1, 0xc3, 0x2b, 0x65, 0x4a, 0xfd, 0x5f,
+ 0x42, 0xcf, 0x9c, 0x19, 0x89, 0x61, 0xd0, 0x8c, 0xb1, 0x78, 0x78, 0x7f, 0x1e, 0xdb, 0x54, 0x68,
+ 0x8e, 0x2d, 0xa6, 0xc2, 0x19, 0x83, 0x9b, 0xa9, 0x70, 0xd6, 0xb4, 0xe3, 0x2c, 0x90, 0xdf, 0xc1,
+ 0xf2, 0xf4, 0xf8, 0x40, 0x3e, 0x9c, 0x76, 0x5b, 0x6d, 0x2a, 0x19, 0x3a, 0x57, 0x89, 0x94, 0xca,
+ 0x5f, 0x02, 0x4c, 0xa6, 0x02, 0x62, 0xe0, 0x73, 0x6d, 0x2a, 0x19, 0xde, 0x9b, 0xcd, 0x2c, 0x55,
+ 0xfd, 0x11, 0x6e, 0xce, 0x6c, 0xbd, 0xc4, 0x28, 0xbd, 0xab, 0x9a, 0xf7, 0xf0, 0xe3, 0x6b, 0xe5,
+ 0xca, 0xb3, 0xfe, 0x00, 0x2b, 0xb5, 0xfe, 0x64, 0x66, 0xc5, 0xbc, 0xb6, 0x69, 0x66, 0xc5, 0xdc,
+ 0x06, 0x87, 0x55, 0xfb, 0x15, 0xf4, 0x2b, 0xd8, 0x6c, 0x56, 0xc6, 0xac, 0xde, 0x60, 0x56, 0xc6,
+ 0x6c, 0x50, 0x5f, 0xd8, 0xb4, 0x9e, 0x5a, 0x3a, 0x3d, 0x26, 0xe8, 0x5a, 0x4d, 0x8f, 0x1a, 0xe4,
+ 0x57, 0xd3, 0xa3, 0x0e, 0xca, 0xce, 0xc2, 0xb3, 0xfb, 0xb0, 0x2c, 0x34, 0x4e, 0x1e, 0x8b, 0x2d,
+ 0xdd, 0xff, 0x9f, 0x01, 0x3a, 0xef, 0x80, 0x27, 0x32, 0x39, 0x6a, 0xe3, 0xbf, 0x03, 0x3f, 0xfb,
+ 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x32, 0x17, 0xbf, 0x34, 0x2c, 0x18, 0x00, 0x00,
}
diff --git a/weed/pb/filer_pb/filer_client.go b/weed/pb/filer_pb/filer_client.go
new file mode 100644
index 000000000..d42e20b34
--- /dev/null
+++ b/weed/pb/filer_pb/filer_client.go
@@ -0,0 +1,236 @@
+package filer_pb
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+var (
+ OS_UID = uint32(os.Getuid())
+ OS_GID = uint32(os.Getgid())
+)
+
+type FilerClient interface {
+ WithFilerClient(fn func(SeaweedFilerClient) error) error
+ AdjustedUrl(hostAndPort string) string
+}
+
+func GetEntry(filerClient FilerClient, fullFilePath util.FullPath) (entry *Entry, err error) {
+
+ dir, name := fullFilePath.DirAndName()
+
+ err = filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
+
+ request := &LookupDirectoryEntryRequest{
+ Directory: dir,
+ Name: name,
+ }
+
+ // glog.V(3).Infof("read %s request: %v", fullFilePath, request)
+ resp, err := LookupEntry(client, request)
+ if err != nil {
+ if err == ErrNotFound {
+ return nil
+ }
+ glog.V(3).Infof("read %s %v: %v", fullFilePath, resp, err)
+ return err
+ }
+
+ if resp.Entry == nil {
+ // glog.V(3).Infof("read %s entry: %v", fullFilePath, entry)
+ return nil
+ }
+
+ entry = resp.Entry
+ return nil
+ })
+
+ return
+}
+
+type EachEntryFunciton func(entry *Entry, isLast bool) error
+
+func ReadDirAllEntries(filerClient FilerClient, fullDirPath util.FullPath, prefix string, fn EachEntryFunciton) (err error) {
+
+ return doList(filerClient, fullDirPath, prefix, fn, "", false, math.MaxUint32)
+
+}
+
+func List(filerClient FilerClient, parentDirectoryPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) {
+
+ return doList(filerClient, util.FullPath(parentDirectoryPath), prefix, fn, startFrom, inclusive, limit)
+
+}
+
+func doList(filerClient FilerClient, fullDirPath util.FullPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) {
+
+ err = filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
+
+ request := &ListEntriesRequest{
+ Directory: string(fullDirPath),
+ Prefix: prefix,
+ StartFromFileName: startFrom,
+ Limit: limit,
+ InclusiveStartFrom: inclusive,
+ }
+
+ glog.V(3).Infof("read directory: %v", request)
+ ctx, cancel := context.WithCancel(context.Background())
+ stream, err := client.ListEntries(ctx, request)
+ if err != nil {
+ return fmt.Errorf("list %s: %v", fullDirPath, err)
+ }
+ defer cancel()
+
+ var prevEntry *Entry
+ for {
+ resp, recvErr := stream.Recv()
+ if recvErr != nil {
+ if recvErr == io.EOF {
+ if prevEntry != nil {
+ if err := fn(prevEntry, true); err != nil {
+ return err
+ }
+ }
+ break
+ } else {
+ return recvErr
+ }
+ }
+ if prevEntry != nil {
+ if err := fn(prevEntry, false); err != nil {
+ return err
+ }
+ }
+ prevEntry = resp.Entry
+ }
+
+ return nil
+
+ })
+
+ return
+}
+
+func Exists(filerClient FilerClient, parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) {
+
+ err = filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
+
+ request := &LookupDirectoryEntryRequest{
+ Directory: parentDirectoryPath,
+ Name: entryName,
+ }
+
+ glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request)
+ resp, err := LookupEntry(client, request)
+ if err != nil {
+ if err == ErrNotFound {
+ exists = false
+ return nil
+ }
+ glog.V(0).Infof("exists entry %v: %v", request, err)
+ return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err)
+ }
+
+ exists = resp.Entry.IsDirectory == isDirectory
+
+ return nil
+ })
+
+ return
+}
+
+func Mkdir(filerClient FilerClient, parentDirectoryPath string, dirName string, fn func(entry *Entry)) error {
+ return filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
+
+ entry := &Entry{
+ Name: dirName,
+ IsDirectory: true,
+ Attributes: &FuseAttributes{
+ Mtime: time.Now().Unix(),
+ Crtime: time.Now().Unix(),
+ FileMode: uint32(0777 | os.ModeDir),
+ Uid: OS_UID,
+ Gid: OS_GID,
+ },
+ }
+
+ if fn != nil {
+ fn(entry)
+ }
+
+ request := &CreateEntryRequest{
+ Directory: parentDirectoryPath,
+ Entry: entry,
+ }
+
+ glog.V(1).Infof("mkdir: %v", request)
+ if err := CreateEntry(client, request); err != nil {
+ glog.V(0).Infof("mkdir %v: %v", request, err)
+ return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err)
+ }
+
+ return nil
+ })
+}
+
+func MkFile(filerClient FilerClient, parentDirectoryPath string, fileName string, chunks []*FileChunk) error {
+ return filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
+
+ entry := &Entry{
+ Name: fileName,
+ IsDirectory: false,
+ Attributes: &FuseAttributes{
+ Mtime: time.Now().Unix(),
+ Crtime: time.Now().Unix(),
+ FileMode: uint32(0770),
+ Uid: OS_UID,
+ Gid: OS_GID,
+ },
+ Chunks: chunks,
+ }
+
+ request := &CreateEntryRequest{
+ Directory: parentDirectoryPath,
+ Entry: entry,
+ }
+
+ glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName)
+ if err := CreateEntry(client, request); err != nil {
+ glog.V(0).Infof("create file %v:%v", request, err)
+ return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err)
+ }
+
+ return nil
+ })
+}
+
+func Remove(filerClient FilerClient, parentDirectoryPath string, name string, isDeleteData, isRecursive, ignoreRecursiveErr bool) error {
+ return filerClient.WithFilerClient(func(client SeaweedFilerClient) error {
+
+ if resp, err := client.DeleteEntry(context.Background(), &DeleteEntryRequest{
+ Directory: parentDirectoryPath,
+ Name: name,
+ IsDeleteData: isDeleteData,
+ IsRecursive: isRecursive,
+ IgnoreRecursiveError: ignoreRecursiveErr,
+ }); err != nil {
+ return err
+ } else {
+ if resp.Error != "" {
+ return errors.New(resp.Error)
+ }
+ }
+
+ return nil
+
+ })
+}
diff --git a/weed/pb/filer_pb/filer_client_bfs.go b/weed/pb/filer_pb/filer_client_bfs.go
new file mode 100644
index 000000000..4e5b65f12
--- /dev/null
+++ b/weed/pb/filer_pb/filer_client_bfs.go
@@ -0,0 +1,63 @@
+package filer_pb
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func TraverseBfs(filerClient FilerClient, parentPath util.FullPath, fn func(parentPath util.FullPath, entry *Entry)) (err error) {
+
+ K := 5
+
+ var jobQueueWg sync.WaitGroup
+ queue := util.NewQueue()
+ jobQueueWg.Add(1)
+ queue.Enqueue(parentPath)
+ var isTerminating bool
+
+ for i := 0; i < K; i++ {
+ go func() {
+ for {
+ if isTerminating {
+ break
+ }
+ t := queue.Dequeue()
+ if t == nil {
+ time.Sleep(329 * time.Millisecond)
+ continue
+ }
+ dir := t.(util.FullPath)
+ processErr := processOneDirectory(filerClient, dir, queue, &jobQueueWg, fn)
+ if processErr != nil {
+ err = processErr
+ }
+ jobQueueWg.Done()
+ }
+ }()
+ }
+ jobQueueWg.Wait()
+ isTerminating = true
+ return
+}
+
+func processOneDirectory(filerClient FilerClient, parentPath util.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup, fn func(parentPath util.FullPath, entry *Entry)) (err error) {
+
+ return ReadDirAllEntries(filerClient, parentPath, "", func(entry *Entry, isLast bool) error {
+
+ fn(parentPath, entry)
+
+ if entry.IsDirectory {
+ subDir := fmt.Sprintf("%s/%s", parentPath, entry.Name)
+ if parentPath == "/" {
+ subDir = "/" + entry.Name
+ }
+ jobQueueWg.Add(1)
+ queue.Enqueue(util.FullPath(subDir))
+ }
+ return nil
+ })
+
+}
diff --git a/weed/pb/filer_pb/filer_pb_helper.go b/weed/pb/filer_pb/filer_pb_helper.go
index b2ffacc01..96ab2154f 100644
--- a/weed/pb/filer_pb/filer_pb_helper.go
+++ b/weed/pb/filer_pb/filer_pb_helper.go
@@ -2,8 +2,11 @@ package filer_pb
import (
"context"
+ "errors"
"fmt"
+ "strings"
+ "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
)
@@ -71,13 +74,32 @@ func AfterEntryDeserialization(chunks []*FileChunk) {
}
}
-func CreateEntry(ctx context.Context, client SeaweedFilerClient, request *CreateEntryRequest) error {
- resp, err := client.CreateEntry(ctx, request)
- if err == nil && resp.Error != "" {
- return fmt.Errorf("CreateEntry: %v", resp.Error)
- }
+func CreateEntry(client SeaweedFilerClient, request *CreateEntryRequest) error {
+ resp, err := client.CreateEntry(context.Background(), request)
if err != nil {
+ glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err)
return fmt.Errorf("CreateEntry: %v", err)
}
- return err
+ if resp.Error != "" {
+ glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err)
+ return fmt.Errorf("CreateEntry : %v", resp.Error)
+ }
+ return nil
}
+
+func LookupEntry(client SeaweedFilerClient, request *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) {
+ resp, err := client.LookupDirectoryEntry(context.Background(), request)
+ if err != nil {
+ if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) {
+ return nil, ErrNotFound
+ }
+ glog.V(3).Infof("read %s/%v: %v", request.Directory, request.Name, err)
+ return nil, fmt.Errorf("LookupEntry1: %v", err)
+ }
+ if resp.Entry == nil {
+ return nil, ErrNotFound
+ }
+ return resp, nil
+}
+
+var ErrNotFound = errors.New("filer: no entry is found in filer store")
diff --git a/weed/pb/grpc_client_server.go b/weed/pb/grpc_client_server.go
new file mode 100644
index 000000000..9c7cf124b
--- /dev/null
+++ b/weed/pb/grpc_client_server.go
@@ -0,0 +1,207 @@
+package pb
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/keepalive"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
+)
+
+const (
+ Max_Message_Size = 1 << 30 // 1 GB
+)
+
+var (
+ // cache grpc connections
+ grpcClients = make(map[string]*grpc.ClientConn)
+ grpcClientsLock sync.Mutex
+)
+
+func init() {
+ http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost = 1024
+}
+
+func NewGrpcServer(opts ...grpc.ServerOption) *grpc.Server {
+ var options []grpc.ServerOption
+ options = append(options,
+ grpc.KeepaliveParams(keepalive.ServerParameters{
+ Time: 10 * time.Second, // wait time before ping if no activity
+ Timeout: 20 * time.Second, // ping timeout
+ }),
+ grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
+ MinTime: 60 * time.Second, // min time a client should wait before sending a ping
+ PermitWithoutStream: false,
+ }),
+ grpc.MaxRecvMsgSize(Max_Message_Size),
+ grpc.MaxSendMsgSize(Max_Message_Size),
+ )
+ for _, opt := range opts {
+ if opt != nil {
+ options = append(options, opt)
+ }
+ }
+ return grpc.NewServer(options...)
+}
+
+func GrpcDial(ctx context.Context, address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
+ // opts = append(opts, grpc.WithBlock())
+ // opts = append(opts, grpc.WithTimeout(time.Duration(5*time.Second)))
+ var options []grpc.DialOption
+ options = append(options,
+ // grpc.WithInsecure(),
+ grpc.WithDefaultCallOptions(
+ grpc.MaxCallSendMsgSize(Max_Message_Size),
+ grpc.MaxCallRecvMsgSize(Max_Message_Size),
+ ),
+ grpc.WithKeepaliveParams(keepalive.ClientParameters{
+ Time: 30 * time.Second, // client ping server if no activity for this long
+ Timeout: 20 * time.Second,
+ PermitWithoutStream: false,
+ }))
+ for _, opt := range opts {
+ if opt != nil {
+ options = append(options, opt)
+ }
+ }
+ return grpc.DialContext(ctx, address, options...)
+}
+
+func WithCachedGrpcClient(fn func(*grpc.ClientConn) error, address string, opts ...grpc.DialOption) error {
+
+ grpcClientsLock.Lock()
+
+ existingConnection, found := grpcClients[address]
+ if found {
+ grpcClientsLock.Unlock()
+ err := fn(existingConnection)
+ if err != nil {
+ grpcClientsLock.Lock()
+ // delete(grpcClients, address)
+ grpcClientsLock.Unlock()
+ // println("closing existing connection to", existingConnection.Target())
+ // existingConnection.Close()
+ }
+ return err
+ }
+
+ grpcConnection, err := GrpcDial(context.Background(), address, opts...)
+ if err != nil {
+ grpcClientsLock.Unlock()
+ return fmt.Errorf("fail to dial %s: %v", address, err)
+ }
+
+ grpcClients[address] = grpcConnection
+ grpcClientsLock.Unlock()
+
+ err = fn(grpcConnection)
+ if err != nil {
+ grpcClientsLock.Lock()
+ // delete(grpcClients, address)
+ grpcClientsLock.Unlock()
+ // println("closing created new connection to", grpcConnection.Target())
+ // grpcConnection.Close()
+ }
+
+ return err
+}
+
+func ParseServerToGrpcAddress(server string) (serverGrpcAddress string, err error) {
+ colonIndex := strings.LastIndex(server, ":")
+ if colonIndex < 0 {
+ return "", fmt.Errorf("server should have hostname:port format: %v", server)
+ }
+
+ port, parseErr := strconv.ParseUint(server[colonIndex+1:], 10, 64)
+ if parseErr != nil {
+ return "", fmt.Errorf("server port parse error: %v", parseErr)
+ }
+
+ grpcPort := int(port) + 10000
+
+ return fmt.Sprintf("%s:%d", server[:colonIndex], grpcPort), nil
+}
+
+func ServerToGrpcAddress(server string) (serverGrpcAddress string) {
+ hostnameAndPort := strings.Split(server, ":")
+ if len(hostnameAndPort) != 2 {
+ return fmt.Sprintf("unexpected server address: %s", server)
+ }
+
+ port, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64)
+ if parseErr != nil {
+ return fmt.Sprintf("failed to parse port for %s:%s", hostnameAndPort[0], hostnameAndPort[1])
+ }
+
+ grpcPort := int(port) + 10000
+
+ return fmt.Sprintf("%s:%d", hostnameAndPort[0], grpcPort)
+}
+
+func WithMasterClient(master string, grpcDialOption grpc.DialOption, fn func(client master_pb.SeaweedClient) error) error {
+
+ masterGrpcAddress, parseErr := ParseServerToGrpcAddress(master)
+ if parseErr != nil {
+ return fmt.Errorf("failed to parse master grpc %v: %v", master, parseErr)
+ }
+
+ return WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
+ client := master_pb.NewSeaweedClient(grpcConnection)
+ return fn(client)
+ }, masterGrpcAddress, grpcDialOption)
+
+}
+
+func WithBrokerGrpcClient(brokerGrpcAddress string, grpcDialOption grpc.DialOption, fn func(client messaging_pb.SeaweedMessagingClient) error) error {
+
+ return WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
+ client := messaging_pb.NewSeaweedMessagingClient(grpcConnection)
+ return fn(client)
+ }, brokerGrpcAddress, grpcDialOption)
+
+}
+
+func WithFilerClient(filer string, grpcDialOption grpc.DialOption, fn func(client filer_pb.SeaweedFilerClient) error) error {
+
+ filerGrpcAddress, parseErr := ParseServerToGrpcAddress(filer)
+ if parseErr != nil {
+ return fmt.Errorf("failed to parse filer grpc %v: %v", filer, parseErr)
+ }
+
+ return WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, fn)
+
+}
+
+func WithGrpcFilerClient(filerGrpcAddress string, grpcDialOption grpc.DialOption, fn func(client filer_pb.SeaweedFilerClient) error) error {
+
+ return WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
+ client := filer_pb.NewSeaweedFilerClient(grpcConnection)
+ return fn(client)
+ }, filerGrpcAddress, grpcDialOption)
+
+}
+
+func ParseFilerGrpcAddress(filer string) (filerGrpcAddress string, err error) {
+ hostnameAndPort := strings.Split(filer, ":")
+ if len(hostnameAndPort) != 2 {
+ return "", fmt.Errorf("filer should have hostname:port format: %v", hostnameAndPort)
+ }
+
+ filerPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64)
+ if parseErr != nil {
+ return "", fmt.Errorf("filer port parse error: %v", parseErr)
+ }
+
+ filerGrpcPort := int(filerPort) + 10000
+
+ return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil
+}
diff --git a/weed/pb/iam.proto b/weed/pb/iam.proto
new file mode 100644
index 000000000..2eef22dd9
--- /dev/null
+++ b/weed/pb/iam.proto
@@ -0,0 +1,50 @@
+syntax = "proto3";
+
+package iam_pb;
+
+option java_package = "seaweedfs.client";
+option java_outer_classname = "IamProto";
+
+//////////////////////////////////////////////////
+
+service SeaweedIdentityAccessManagement {
+
+}
+
+//////////////////////////////////////////////////
+
+message S3ApiConfiguration {
+ repeated Identity identities = 1;
+}
+
+message Identity {
+ string name = 1;
+ repeated Credential credentials = 2;
+ repeated string actions = 3;
+}
+
+message Credential {
+ string access_key = 1;
+ string secret_key = 2;
+ // uint64 expiration = 3;
+ // bool is_disabled = 4;
+}
+
+/*
+message Policy {
+ repeated Statement statements = 1;
+}
+
+message Statement {
+ repeated Action action = 1;
+ repeated Resource resource = 2;
+}
+
+message Action {
+ string action = 1;
+}
+message Resource {
+ string bucket = 1;
+ // string path = 2;
+}
+*/
\ No newline at end of file
diff --git a/weed/pb/iam_pb/iam.pb.go b/weed/pb/iam_pb/iam.pb.go
new file mode 100644
index 000000000..b7d7b038b
--- /dev/null
+++ b/weed/pb/iam_pb/iam.pb.go
@@ -0,0 +1,174 @@
+// Code generated by protoc-gen-go.
+// source: iam.proto
+// DO NOT EDIT!
+
+/*
+Package iam_pb is a generated protocol buffer package.
+
+It is generated from these files:
+ iam.proto
+
+It has these top-level messages:
+ S3ApiConfiguration
+ Identity
+ Credential
+*/
+package iam_pb
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import (
+ context "golang.org/x/net/context"
+ grpc "google.golang.org/grpc"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type S3ApiConfiguration struct {
+ Identities []*Identity `protobuf:"bytes,1,rep,name=identities" json:"identities,omitempty"`
+}
+
+func (m *S3ApiConfiguration) Reset() { *m = S3ApiConfiguration{} }
+func (m *S3ApiConfiguration) String() string { return proto.CompactTextString(m) }
+func (*S3ApiConfiguration) ProtoMessage() {}
+func (*S3ApiConfiguration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *S3ApiConfiguration) GetIdentities() []*Identity {
+ if m != nil {
+ return m.Identities
+ }
+ return nil
+}
+
+type Identity struct {
+ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Credentials []*Credential `protobuf:"bytes,2,rep,name=credentials" json:"credentials,omitempty"`
+ Actions []string `protobuf:"bytes,3,rep,name=actions" json:"actions,omitempty"`
+}
+
+func (m *Identity) Reset() { *m = Identity{} }
+func (m *Identity) String() string { return proto.CompactTextString(m) }
+func (*Identity) ProtoMessage() {}
+func (*Identity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *Identity) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *Identity) GetCredentials() []*Credential {
+ if m != nil {
+ return m.Credentials
+ }
+ return nil
+}
+
+func (m *Identity) GetActions() []string {
+ if m != nil {
+ return m.Actions
+ }
+ return nil
+}
+
+type Credential struct {
+ AccessKey string `protobuf:"bytes,1,opt,name=access_key,json=accessKey" json:"access_key,omitempty"`
+ SecretKey string `protobuf:"bytes,2,opt,name=secret_key,json=secretKey" json:"secret_key,omitempty"`
+}
+
+func (m *Credential) Reset() { *m = Credential{} }
+func (m *Credential) String() string { return proto.CompactTextString(m) }
+func (*Credential) ProtoMessage() {}
+func (*Credential) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+func (m *Credential) GetAccessKey() string {
+ if m != nil {
+ return m.AccessKey
+ }
+ return ""
+}
+
+func (m *Credential) GetSecretKey() string {
+ if m != nil {
+ return m.SecretKey
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*S3ApiConfiguration)(nil), "iam_pb.S3ApiConfiguration")
+ proto.RegisterType((*Identity)(nil), "iam_pb.Identity")
+ proto.RegisterType((*Credential)(nil), "iam_pb.Credential")
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for SeaweedIdentityAccessManagement service
+
+type SeaweedIdentityAccessManagementClient interface {
+}
+
+type seaweedIdentityAccessManagementClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewSeaweedIdentityAccessManagementClient(cc *grpc.ClientConn) SeaweedIdentityAccessManagementClient {
+ return &seaweedIdentityAccessManagementClient{cc}
+}
+
+// Server API for SeaweedIdentityAccessManagement service
+
+type SeaweedIdentityAccessManagementServer interface {
+}
+
+func RegisterSeaweedIdentityAccessManagementServer(s *grpc.Server, srv SeaweedIdentityAccessManagementServer) {
+ s.RegisterService(&_SeaweedIdentityAccessManagement_serviceDesc, srv)
+}
+
+var _SeaweedIdentityAccessManagement_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "iam_pb.SeaweedIdentityAccessManagement",
+ HandlerType: (*SeaweedIdentityAccessManagementServer)(nil),
+ Methods: []grpc.MethodDesc{},
+ Streams: []grpc.StreamDesc{},
+ Metadata: "iam.proto",
+}
+
+func init() { proto.RegisterFile("iam.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 250 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x4c, 0x90, 0x41, 0x4b, 0xc3, 0x40,
+ 0x10, 0x85, 0x69, 0x23, 0xb5, 0x99, 0x5e, 0xca, 0x9c, 0xf6, 0xa0, 0x18, 0x73, 0xca, 0x29, 0x48,
+ 0xeb, 0x1f, 0xa8, 0x05, 0xa1, 0x16, 0x41, 0xd2, 0x1f, 0x50, 0xa6, 0xdb, 0x69, 0x19, 0xec, 0x6e,
+ 0x42, 0x76, 0x45, 0xf2, 0xef, 0x25, 0xbb, 0x46, 0x7b, 0xdb, 0x7d, 0xdf, 0x7b, 0xb3, 0x3b, 0x0f,
+ 0x52, 0x21, 0x53, 0x36, 0x6d, 0xed, 0x6b, 0x9c, 0x08, 0x99, 0x7d, 0x73, 0xc8, 0x5f, 0x01, 0x77,
+ 0xcb, 0x55, 0x23, 0xeb, 0xda, 0x9e, 0xe4, 0xfc, 0xd5, 0x92, 0x97, 0xda, 0xe2, 0x13, 0x80, 0x1c,
+ 0xd9, 0x7a, 0xf1, 0xc2, 0x4e, 0x8d, 0xb2, 0xa4, 0x98, 0x2d, 0xe6, 0x65, 0x8c, 0x94, 0x9b, 0x48,
+ 0xba, 0xea, 0xca, 0x93, 0x5b, 0x98, 0x0e, 0x3a, 0x22, 0xdc, 0x58, 0x32, 0xac, 0x46, 0xd9, 0xa8,
+ 0x48, 0xab, 0x70, 0xc6, 0x67, 0x98, 0xe9, 0x96, 0x83, 0x83, 0x2e, 0x4e, 0x8d, 0xc3, 0x48, 0x1c,
+ 0x46, 0xae, 0xff, 0x50, 0x75, 0x6d, 0x43, 0x05, 0xb7, 0xa4, 0xfb, 0x1f, 0x39, 0x95, 0x64, 0x49,
+ 0x91, 0x56, 0xc3, 0x35, 0x7f, 0x03, 0xf8, 0x0f, 0xe1, 0x3d, 0x00, 0x69, 0xcd, 0xce, 0xed, 0x3f,
+ 0xb9, 0xfb, 0x7d, 0x37, 0x8d, 0xca, 0x96, 0xbb, 0x1e, 0x3b, 0xd6, 0x2d, 0xfb, 0x80, 0xc7, 0x11,
+ 0x47, 0x65, 0xcb, 0xdd, 0xe2, 0x11, 0x1e, 0x76, 0x4c, 0xdf, 0xcc, 0xc7, 0x61, 0x85, 0x55, 0x88,
+ 0xbe, 0x93, 0xa5, 0x33, 0x1b, 0xb6, 0xfe, 0xe5, 0x0e, 0xe6, 0x2e, 0x5a, 0x4e, 0xae, 0xd4, 0x17,
+ 0xe9, 0xb5, 0xe9, 0x86, 0xcc, 0x47, 0x5f, 0xe6, 0x61, 0x12, 0x3a, 0x5d, 0xfe, 0x04, 0x00, 0x00,
+ 0xff, 0xff, 0x83, 0x4f, 0x61, 0x03, 0x60, 0x01, 0x00, 0x00,
+}
diff --git a/weed/pb/master.proto b/weed/pb/master.proto
index 9b1e884c7..71a9c08b8 100644
--- a/weed/pb/master.proto
+++ b/weed/pb/master.proto
@@ -25,6 +25,13 @@ service Seaweed {
}
rpc GetMasterConfiguration (GetMasterConfigurationRequest) returns (GetMasterConfigurationResponse) {
}
+ rpc ListMasterClients (ListMasterClientsRequest) returns (ListMasterClientsResponse) {
+ }
+ rpc LeaseAdminToken (LeaseAdminTokenRequest) returns (LeaseAdminTokenResponse) {
+ }
+ rpc ReleaseAdminToken (ReleaseAdminTokenRequest) returns (ReleaseAdminTokenResponse) {
+ }
+
}
//////////////////////////////////////////////////
@@ -112,6 +119,7 @@ message SuperBlockExtra {
message KeepConnectedRequest {
string name = 1;
+ uint32 grpc_port = 2;
}
message VolumeLocation {
@@ -264,3 +272,28 @@ message GetMasterConfigurationResponse {
string metrics_address = 1;
uint32 metrics_interval_seconds = 2;
}
+
+message ListMasterClientsRequest {
+ string client_type = 1;
+}
+message ListMasterClientsResponse {
+ repeated string grpc_addresses = 1;
+}
+
+message LeaseAdminTokenRequest {
+ int64 previous_token = 1;
+ int64 previous_lock_time = 2;
+ string lock_name = 3;
+}
+message LeaseAdminTokenResponse {
+ int64 token = 1;
+ int64 lock_ts_ns = 2;
+}
+
+message ReleaseAdminTokenRequest {
+ int64 previous_token = 1;
+ int64 previous_lock_time = 2;
+ string lock_name = 3;
+}
+message ReleaseAdminTokenResponse {
+}
diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go
index ea4362c92..4710b3d4c 100644
--- a/weed/pb/master_pb/master.pb.go
+++ b/weed/pb/master_pb/master.pb.go
@@ -42,6 +42,12 @@ It has these top-level messages:
LookupEcVolumeResponse
GetMasterConfigurationRequest
GetMasterConfigurationResponse
+ ListMasterClientsRequest
+ ListMasterClientsResponse
+ LeaseAdminTokenRequest
+ LeaseAdminTokenResponse
+ ReleaseAdminTokenRequest
+ ReleaseAdminTokenResponse
*/
package master_pb
@@ -543,7 +549,8 @@ func (m *SuperBlockExtra_ErasureCoding) GetVolumeIds() []uint32 {
}
type KeepConnectedRequest struct {
- Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ GrpcPort uint32 `protobuf:"varint,2,opt,name=grpc_port,json=grpcPort" json:"grpc_port,omitempty"`
}
func (m *KeepConnectedRequest) Reset() { *m = KeepConnectedRequest{} }
@@ -558,6 +565,13 @@ func (m *KeepConnectedRequest) GetName() string {
return ""
}
+func (m *KeepConnectedRequest) GetGrpcPort() uint32 {
+ if m != nil {
+ return m.GrpcPort
+ }
+ return 0
+}
+
type VolumeLocation struct {
Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"`
PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"`
@@ -1431,6 +1445,134 @@ func (m *GetMasterConfigurationResponse) GetMetricsIntervalSeconds() uint32 {
return 0
}
+type ListMasterClientsRequest struct {
+ ClientType string `protobuf:"bytes,1,opt,name=client_type,json=clientType" json:"client_type,omitempty"`
+}
+
+func (m *ListMasterClientsRequest) Reset() { *m = ListMasterClientsRequest{} }
+func (m *ListMasterClientsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListMasterClientsRequest) ProtoMessage() {}
+func (*ListMasterClientsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} }
+
+func (m *ListMasterClientsRequest) GetClientType() string {
+ if m != nil {
+ return m.ClientType
+ }
+ return ""
+}
+
+type ListMasterClientsResponse struct {
+ GrpcAddresses []string `protobuf:"bytes,1,rep,name=grpc_addresses,json=grpcAddresses" json:"grpc_addresses,omitempty"`
+}
+
+func (m *ListMasterClientsResponse) Reset() { *m = ListMasterClientsResponse{} }
+func (m *ListMasterClientsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListMasterClientsResponse) ProtoMessage() {}
+func (*ListMasterClientsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} }
+
+func (m *ListMasterClientsResponse) GetGrpcAddresses() []string {
+ if m != nil {
+ return m.GrpcAddresses
+ }
+ return nil
+}
+
+type LeaseAdminTokenRequest struct {
+ PreviousToken int64 `protobuf:"varint,1,opt,name=previous_token,json=previousToken" json:"previous_token,omitempty"`
+ PreviousLockTime int64 `protobuf:"varint,2,opt,name=previous_lock_time,json=previousLockTime" json:"previous_lock_time,omitempty"`
+ LockName string `protobuf:"bytes,3,opt,name=lock_name,json=lockName" json:"lock_name,omitempty"`
+}
+
+func (m *LeaseAdminTokenRequest) Reset() { *m = LeaseAdminTokenRequest{} }
+func (m *LeaseAdminTokenRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseAdminTokenRequest) ProtoMessage() {}
+func (*LeaseAdminTokenRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} }
+
+func (m *LeaseAdminTokenRequest) GetPreviousToken() int64 {
+ if m != nil {
+ return m.PreviousToken
+ }
+ return 0
+}
+
+func (m *LeaseAdminTokenRequest) GetPreviousLockTime() int64 {
+ if m != nil {
+ return m.PreviousLockTime
+ }
+ return 0
+}
+
+func (m *LeaseAdminTokenRequest) GetLockName() string {
+ if m != nil {
+ return m.LockName
+ }
+ return ""
+}
+
+type LeaseAdminTokenResponse struct {
+ Token int64 `protobuf:"varint,1,opt,name=token" json:"token,omitempty"`
+ LockTsNs int64 `protobuf:"varint,2,opt,name=lock_ts_ns,json=lockTsNs" json:"lock_ts_ns,omitempty"`
+}
+
+func (m *LeaseAdminTokenResponse) Reset() { *m = LeaseAdminTokenResponse{} }
+func (m *LeaseAdminTokenResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseAdminTokenResponse) ProtoMessage() {}
+func (*LeaseAdminTokenResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} }
+
+func (m *LeaseAdminTokenResponse) GetToken() int64 {
+ if m != nil {
+ return m.Token
+ }
+ return 0
+}
+
+func (m *LeaseAdminTokenResponse) GetLockTsNs() int64 {
+ if m != nil {
+ return m.LockTsNs
+ }
+ return 0
+}
+
+type ReleaseAdminTokenRequest struct {
+ PreviousToken int64 `protobuf:"varint,1,opt,name=previous_token,json=previousToken" json:"previous_token,omitempty"`
+ PreviousLockTime int64 `protobuf:"varint,2,opt,name=previous_lock_time,json=previousLockTime" json:"previous_lock_time,omitempty"`
+ LockName string `protobuf:"bytes,3,opt,name=lock_name,json=lockName" json:"lock_name,omitempty"`
+}
+
+func (m *ReleaseAdminTokenRequest) Reset() { *m = ReleaseAdminTokenRequest{} }
+func (m *ReleaseAdminTokenRequest) String() string { return proto.CompactTextString(m) }
+func (*ReleaseAdminTokenRequest) ProtoMessage() {}
+func (*ReleaseAdminTokenRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} }
+
+func (m *ReleaseAdminTokenRequest) GetPreviousToken() int64 {
+ if m != nil {
+ return m.PreviousToken
+ }
+ return 0
+}
+
+func (m *ReleaseAdminTokenRequest) GetPreviousLockTime() int64 {
+ if m != nil {
+ return m.PreviousLockTime
+ }
+ return 0
+}
+
+func (m *ReleaseAdminTokenRequest) GetLockName() string {
+ if m != nil {
+ return m.LockName
+ }
+ return ""
+}
+
+type ReleaseAdminTokenResponse struct {
+}
+
+func (m *ReleaseAdminTokenResponse) Reset() { *m = ReleaseAdminTokenResponse{} }
+func (m *ReleaseAdminTokenResponse) String() string { return proto.CompactTextString(m) }
+func (*ReleaseAdminTokenResponse) ProtoMessage() {}
+func (*ReleaseAdminTokenResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} }
+
func init() {
proto.RegisterType((*Heartbeat)(nil), "master_pb.Heartbeat")
proto.RegisterType((*HeartbeatResponse)(nil), "master_pb.HeartbeatResponse")
@@ -1468,6 +1610,12 @@ func init() {
proto.RegisterType((*LookupEcVolumeResponse_EcShardIdLocation)(nil), "master_pb.LookupEcVolumeResponse.EcShardIdLocation")
proto.RegisterType((*GetMasterConfigurationRequest)(nil), "master_pb.GetMasterConfigurationRequest")
proto.RegisterType((*GetMasterConfigurationResponse)(nil), "master_pb.GetMasterConfigurationResponse")
+ proto.RegisterType((*ListMasterClientsRequest)(nil), "master_pb.ListMasterClientsRequest")
+ proto.RegisterType((*ListMasterClientsResponse)(nil), "master_pb.ListMasterClientsResponse")
+ proto.RegisterType((*LeaseAdminTokenRequest)(nil), "master_pb.LeaseAdminTokenRequest")
+ proto.RegisterType((*LeaseAdminTokenResponse)(nil), "master_pb.LeaseAdminTokenResponse")
+ proto.RegisterType((*ReleaseAdminTokenRequest)(nil), "master_pb.ReleaseAdminTokenRequest")
+ proto.RegisterType((*ReleaseAdminTokenResponse)(nil), "master_pb.ReleaseAdminTokenResponse")
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -1491,6 +1639,9 @@ type SeaweedClient interface {
VolumeList(ctx context.Context, in *VolumeListRequest, opts ...grpc.CallOption) (*VolumeListResponse, error)
LookupEcVolume(ctx context.Context, in *LookupEcVolumeRequest, opts ...grpc.CallOption) (*LookupEcVolumeResponse, error)
GetMasterConfiguration(ctx context.Context, in *GetMasterConfigurationRequest, opts ...grpc.CallOption) (*GetMasterConfigurationResponse, error)
+ ListMasterClients(ctx context.Context, in *ListMasterClientsRequest, opts ...grpc.CallOption) (*ListMasterClientsResponse, error)
+ LeaseAdminToken(ctx context.Context, in *LeaseAdminTokenRequest, opts ...grpc.CallOption) (*LeaseAdminTokenResponse, error)
+ ReleaseAdminToken(ctx context.Context, in *ReleaseAdminTokenRequest, opts ...grpc.CallOption) (*ReleaseAdminTokenResponse, error)
}
type seaweedClient struct {
@@ -1635,6 +1786,33 @@ func (c *seaweedClient) GetMasterConfiguration(ctx context.Context, in *GetMaste
return out, nil
}
+func (c *seaweedClient) ListMasterClients(ctx context.Context, in *ListMasterClientsRequest, opts ...grpc.CallOption) (*ListMasterClientsResponse, error) {
+ out := new(ListMasterClientsResponse)
+ err := grpc.Invoke(ctx, "/master_pb.Seaweed/ListMasterClients", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedClient) LeaseAdminToken(ctx context.Context, in *LeaseAdminTokenRequest, opts ...grpc.CallOption) (*LeaseAdminTokenResponse, error) {
+ out := new(LeaseAdminTokenResponse)
+ err := grpc.Invoke(ctx, "/master_pb.Seaweed/LeaseAdminToken", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedClient) ReleaseAdminToken(ctx context.Context, in *ReleaseAdminTokenRequest, opts ...grpc.CallOption) (*ReleaseAdminTokenResponse, error) {
+ out := new(ReleaseAdminTokenResponse)
+ err := grpc.Invoke(ctx, "/master_pb.Seaweed/ReleaseAdminToken", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// Server API for Seaweed service
type SeaweedServer interface {
@@ -1648,6 +1826,9 @@ type SeaweedServer interface {
VolumeList(context.Context, *VolumeListRequest) (*VolumeListResponse, error)
LookupEcVolume(context.Context, *LookupEcVolumeRequest) (*LookupEcVolumeResponse, error)
GetMasterConfiguration(context.Context, *GetMasterConfigurationRequest) (*GetMasterConfigurationResponse, error)
+ ListMasterClients(context.Context, *ListMasterClientsRequest) (*ListMasterClientsResponse, error)
+ LeaseAdminToken(context.Context, *LeaseAdminTokenRequest) (*LeaseAdminTokenResponse, error)
+ ReleaseAdminToken(context.Context, *ReleaseAdminTokenRequest) (*ReleaseAdminTokenResponse, error)
}
func RegisterSeaweedServer(s *grpc.Server, srv SeaweedServer) {
@@ -1850,6 +2031,60 @@ func _Seaweed_GetMasterConfiguration_Handler(srv interface{}, ctx context.Contex
return interceptor(ctx, in, info, handler)
}
+func _Seaweed_ListMasterClients_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListMasterClientsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedServer).ListMasterClients(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/master_pb.Seaweed/ListMasterClients",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedServer).ListMasterClients(ctx, req.(*ListMasterClientsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Seaweed_LeaseAdminToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LeaseAdminTokenRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedServer).LeaseAdminToken(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/master_pb.Seaweed/LeaseAdminToken",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedServer).LeaseAdminToken(ctx, req.(*LeaseAdminTokenRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Seaweed_ReleaseAdminToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ReleaseAdminTokenRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedServer).ReleaseAdminToken(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/master_pb.Seaweed/ReleaseAdminToken",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedServer).ReleaseAdminToken(ctx, req.(*ReleaseAdminTokenRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
var _Seaweed_serviceDesc = grpc.ServiceDesc{
ServiceName: "master_pb.Seaweed",
HandlerType: (*SeaweedServer)(nil),
@@ -1886,6 +2121,18 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{
MethodName: "GetMasterConfiguration",
Handler: _Seaweed_GetMasterConfiguration_Handler,
},
+ {
+ MethodName: "ListMasterClients",
+ Handler: _Seaweed_ListMasterClients_Handler,
+ },
+ {
+ MethodName: "LeaseAdminToken",
+ Handler: _Seaweed_LeaseAdminToken_Handler,
+ },
+ {
+ MethodName: "ReleaseAdminToken",
+ Handler: _Seaweed_ReleaseAdminToken_Handler,
+ },
},
Streams: []grpc.StreamDesc{
{
@@ -1907,137 +2154,151 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{
func init() { proto.RegisterFile("master.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
- // 2102 bytes of a gzipped FileDescriptorProto
+ // 2334 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x59, 0x4b, 0x6f, 0x1c, 0xc7,
- 0x11, 0xd6, 0xec, 0x2e, 0x97, 0xbb, 0xb5, 0xef, 0x26, 0x45, 0xaf, 0xd6, 0x96, 0xb5, 0x1a, 0x07,
- 0x30, 0xa5, 0x38, 0x8c, 0x43, 0x1b, 0x88, 0x91, 0xc4, 0x30, 0x24, 0x8a, 0x76, 0x08, 0x89, 0xb4,
- 0x34, 0x54, 0x64, 0x20, 0x40, 0x30, 0xe9, 0x9d, 0x69, 0x92, 0x03, 0xce, 0x2b, 0xd3, 0xbd, 0x14,
- 0xd7, 0xb9, 0x04, 0xc8, 0x31, 0xa7, 0x20, 0x87, 0xfc, 0x85, 0x5c, 0x72, 0x4a, 0xce, 0xbe, 0xe4,
- 0x47, 0xe4, 0x7f, 0xe4, 0xea, 0x4b, 0xd0, 0xaf, 0x99, 0x9e, 0x7d, 0x90, 0xa6, 0x01, 0x1f, 0x74,
- 0x9b, 0xae, 0xaa, 0xae, 0xae, 0xfe, 0xaa, 0xbb, 0xea, 0xeb, 0x5d, 0x68, 0x47, 0x98, 0x32, 0x92,
- 0xed, 0xa4, 0x59, 0xc2, 0x12, 0xd4, 0x94, 0x23, 0x37, 0x9d, 0xd8, 0x7f, 0xa9, 0x43, 0xf3, 0xd7,
- 0x04, 0x67, 0x6c, 0x42, 0x30, 0x43, 0x5d, 0xa8, 0x04, 0xe9, 0xd0, 0x1a, 0x5b, 0xdb, 0x4d, 0xa7,
- 0x12, 0xa4, 0x08, 0x41, 0x2d, 0x4d, 0x32, 0x36, 0xac, 0x8c, 0xad, 0xed, 0x8e, 0x23, 0xbe, 0xd1,
- 0x5d, 0x80, 0x74, 0x3a, 0x09, 0x03, 0xcf, 0x9d, 0x66, 0xe1, 0xb0, 0x2a, 0x6c, 0x9b, 0x52, 0xf2,
- 0x9b, 0x2c, 0x44, 0xdb, 0xd0, 0x8f, 0xf0, 0xa5, 0x7b, 0x91, 0x84, 0xd3, 0x88, 0xb8, 0x5e, 0x32,
- 0x8d, 0xd9, 0xb0, 0x26, 0xa6, 0x77, 0x23, 0x7c, 0xf9, 0x4a, 0x88, 0xf7, 0xb8, 0x14, 0x8d, 0x79,
- 0x54, 0x97, 0xee, 0x49, 0x10, 0x12, 0xf7, 0x9c, 0xcc, 0x86, 0x6b, 0x63, 0x6b, 0xbb, 0xe6, 0x40,
- 0x84, 0x2f, 0x3f, 0x0f, 0x42, 0xf2, 0x94, 0xcc, 0xd0, 0x3d, 0x68, 0xf9, 0x98, 0x61, 0xd7, 0x23,
- 0x31, 0x23, 0xd9, 0xb0, 0x2e, 0xd6, 0x02, 0x2e, 0xda, 0x13, 0x12, 0x1e, 0x5f, 0x86, 0xbd, 0xf3,
- 0xe1, 0xba, 0xd0, 0x88, 0x6f, 0x1e, 0x1f, 0xf6, 0xa3, 0x20, 0x76, 0x45, 0xe4, 0x0d, 0xb1, 0x74,
- 0x53, 0x48, 0x9e, 0xf3, 0xf0, 0x3f, 0x85, 0x75, 0x19, 0x1b, 0x1d, 0x36, 0xc7, 0xd5, 0xed, 0xd6,
- 0xee, 0x7b, 0x3b, 0x39, 0x1a, 0x3b, 0x32, 0xbc, 0x83, 0xf8, 0x24, 0xc9, 0x22, 0xcc, 0x82, 0x24,
- 0x3e, 0x24, 0x94, 0xe2, 0x53, 0xe2, 0xe8, 0x39, 0xe8, 0x00, 0x5a, 0x31, 0x79, 0xed, 0x6a, 0x17,
- 0x20, 0x5c, 0x6c, 0x2f, 0xb8, 0x38, 0x3e, 0x4b, 0x32, 0xb6, 0xc4, 0x0f, 0xc4, 0xe4, 0xf5, 0x2b,
- 0xe5, 0xea, 0x05, 0xf4, 0x7c, 0x12, 0x12, 0x46, 0xfc, 0xdc, 0x5d, 0xeb, 0x86, 0xee, 0xba, 0xca,
- 0x81, 0x76, 0xf9, 0x23, 0xe8, 0x9e, 0x61, 0xea, 0xc6, 0x49, 0xee, 0xb1, 0x3d, 0xb6, 0xb6, 0x1b,
- 0x4e, 0xfb, 0x0c, 0xd3, 0xa3, 0x44, 0x5b, 0x7d, 0x01, 0x4d, 0xe2, 0xb9, 0xf4, 0x0c, 0x67, 0x3e,
- 0x1d, 0xf6, 0xc5, 0x92, 0x0f, 0x17, 0x96, 0xdc, 0xf7, 0x8e, 0xb9, 0xc1, 0x92, 0x45, 0x1b, 0x44,
- 0xaa, 0x28, 0x3a, 0x82, 0x0e, 0x07, 0xa3, 0x70, 0x36, 0xb8, 0xb1, 0x33, 0x8e, 0xe6, 0xbe, 0xf6,
- 0xf7, 0x0a, 0x06, 0x1a, 0x91, 0xc2, 0x27, 0xba, 0xb1, 0x4f, 0x0d, 0x6b, 0xee, 0xf7, 0x7d, 0xe8,
- 0x2b, 0x58, 0x0a, 0xb7, 0x1b, 0x02, 0x98, 0x8e, 0x00, 0x46, 0x1b, 0xda, 0x7f, 0xaa, 0xc0, 0x20,
- 0xbf, 0x0d, 0x0e, 0xa1, 0x69, 0x12, 0x53, 0x82, 0x1e, 0xc2, 0x40, 0x1d, 0x67, 0x1a, 0x7c, 0x4d,
- 0xdc, 0x30, 0x88, 0x02, 0x26, 0x2e, 0x49, 0xcd, 0xe9, 0x49, 0xc5, 0x71, 0xf0, 0x35, 0x79, 0xc6,
- 0xc5, 0x68, 0x0b, 0xea, 0x21, 0xc1, 0x3e, 0xc9, 0xc4, 0x9d, 0x69, 0x3a, 0x6a, 0x84, 0xde, 0x87,
- 0x5e, 0x44, 0x58, 0x16, 0x78, 0xd4, 0xc5, 0xbe, 0x9f, 0x11, 0x4a, 0xd5, 0xd5, 0xe9, 0x2a, 0xf1,
- 0x23, 0x29, 0x45, 0x9f, 0xc0, 0x50, 0x1b, 0x06, 0xfc, 0x8c, 0x5f, 0xe0, 0xd0, 0xa5, 0xc4, 0x4b,
- 0x62, 0x9f, 0xaa, 0x7b, 0xb4, 0xa5, 0xf4, 0x07, 0x4a, 0x7d, 0x2c, 0xb5, 0xe8, 0x09, 0xf4, 0x29,
- 0x4b, 0x32, 0x7c, 0x4a, 0xdc, 0x09, 0xf6, 0xce, 0x09, 0x9f, 0xb1, 0x26, 0xc0, 0xbb, 0x63, 0x80,
- 0x77, 0x2c, 0x4d, 0x1e, 0x4b, 0x0b, 0xa7, 0x47, 0x4b, 0x63, 0x6a, 0x7f, 0x5b, 0x85, 0xe1, 0xaa,
- 0x6b, 0x20, 0xea, 0x83, 0x2f, 0xb6, 0xde, 0x71, 0x2a, 0x81, 0xcf, 0xef, 0x1f, 0x87, 0x44, 0xec,
- 0xb5, 0xe6, 0x88, 0x6f, 0xf4, 0x2e, 0x80, 0x97, 0x84, 0x21, 0xf1, 0xf8, 0x44, 0xb5, 0x49, 0x43,
- 0xc2, 0xef, 0xa7, 0xb8, 0xf2, 0x45, 0x69, 0xa8, 0x39, 0x4d, 0x2e, 0x91, 0x55, 0xe1, 0x3e, 0xb4,
- 0x65, 0xfa, 0x94, 0x81, 0xac, 0x0a, 0x2d, 0x29, 0x93, 0x26, 0x1f, 0x00, 0xd2, 0xc7, 0x64, 0x32,
- 0xcb, 0x0d, 0xeb, 0xc2, 0xb0, 0xaf, 0x34, 0x8f, 0x67, 0xda, 0xfa, 0x6d, 0x68, 0x66, 0x04, 0xfb,
- 0x6e, 0x12, 0x87, 0x33, 0x51, 0x28, 0x1a, 0x4e, 0x83, 0x0b, 0xbe, 0x8c, 0xc3, 0x19, 0xfa, 0x31,
- 0x0c, 0x32, 0x92, 0x86, 0x81, 0x87, 0xdd, 0x34, 0xc4, 0x1e, 0x89, 0x48, 0xac, 0x6b, 0x46, 0x5f,
- 0x29, 0x9e, 0x6b, 0x39, 0x1a, 0xc2, 0xfa, 0x05, 0xc9, 0x28, 0xdf, 0x56, 0x53, 0x98, 0xe8, 0x21,
- 0xea, 0x43, 0x95, 0xb1, 0x70, 0x08, 0x42, 0xca, 0x3f, 0xd1, 0x03, 0xe8, 0x7b, 0x49, 0x94, 0x62,
- 0x8f, 0xb9, 0x19, 0xb9, 0x08, 0xc4, 0xa4, 0x96, 0x50, 0xf7, 0x94, 0xdc, 0x51, 0x62, 0xbe, 0x9d,
- 0x28, 0xf1, 0x83, 0x93, 0x80, 0xf8, 0x2e, 0x66, 0x2a, 0xd9, 0xe2, 0xe2, 0x56, 0x9d, 0xbe, 0xd6,
- 0x3c, 0x62, 0x32, 0xcd, 0x68, 0x07, 0x36, 0x32, 0x12, 0x25, 0x8c, 0xb8, 0x3a, 0xd9, 0x31, 0x8e,
- 0xc8, 0xb0, 0x23, 0x70, 0x1e, 0x48, 0x95, 0xca, 0xf1, 0x11, 0x8e, 0x08, 0xf7, 0x3e, 0x67, 0xcf,
- 0x6b, 0x6d, 0x57, 0x98, 0xf7, 0x4b, 0xe6, 0x4f, 0xc9, 0xcc, 0xfe, 0x87, 0x05, 0x77, 0xaf, 0x2c,
- 0x39, 0x0b, 0x47, 0xe0, 0xba, 0x74, 0xff, 0x50, 0x08, 0xdb, 0x53, 0xb8, 0x77, 0x4d, 0x21, 0xb8,
- 0x26, 0xd6, 0xca, 0x42, 0xac, 0x36, 0x74, 0x88, 0xe7, 0x06, 0xb1, 0x4f, 0x2e, 0xdd, 0x49, 0xc0,
- 0xe4, 0x15, 0xed, 0x38, 0x2d, 0xe2, 0x1d, 0x70, 0xd9, 0xe3, 0x80, 0x51, 0xfb, 0x1b, 0x0b, 0xba,
- 0xe5, 0x3b, 0xc4, 0x6f, 0x01, 0x9b, 0xa5, 0x44, 0xf5, 0x4d, 0xf1, 0xad, 0x96, 0xae, 0xa8, 0x4e,
- 0xea, 0xa3, 0x03, 0x80, 0x34, 0x4b, 0x52, 0x92, 0xb1, 0x80, 0x70, 0xbf, 0xfc, 0x5a, 0x3e, 0x58,
- 0x79, 0x2d, 0x77, 0x9e, 0xe7, 0xb6, 0xfb, 0x31, 0xcb, 0x66, 0x8e, 0x31, 0x79, 0xf4, 0x29, 0xf4,
- 0xe6, 0xd4, 0x1c, 0x1d, 0x9e, 0x55, 0x19, 0x00, 0xff, 0x44, 0x9b, 0xb0, 0x76, 0x81, 0xc3, 0x29,
- 0x51, 0x21, 0xc8, 0xc1, 0x2f, 0x2a, 0x9f, 0x58, 0xf6, 0x3a, 0xac, 0xed, 0x47, 0x29, 0x9b, 0xf1,
- 0x9d, 0xf4, 0x8e, 0xa7, 0x29, 0xc9, 0x1e, 0x87, 0x89, 0x77, 0xbe, 0x7f, 0xc9, 0x32, 0x8c, 0xbe,
- 0x84, 0x2e, 0xc9, 0x30, 0x9d, 0x66, 0xfc, 0x56, 0xf9, 0x41, 0x7c, 0x2a, 0x7c, 0x96, 0x5b, 0xd2,
- 0xdc, 0x9c, 0x9d, 0x7d, 0x39, 0x61, 0x4f, 0xd8, 0x3b, 0x1d, 0x62, 0x0e, 0x47, 0xbf, 0x85, 0x4e,
- 0x49, 0xcf, 0xc1, 0xe2, 0x0d, 0x5c, 0x65, 0x45, 0x7c, 0xf3, 0xa2, 0x99, 0xe2, 0x2c, 0x60, 0x33,
- 0x45, 0x34, 0xd4, 0x88, 0x97, 0x0a, 0x55, 0x78, 0x03, 0x5f, 0x82, 0xd6, 0x71, 0x9a, 0x52, 0x72,
- 0xe0, 0x53, 0xfb, 0x21, 0x6c, 0x3e, 0x25, 0x24, 0xdd, 0x4b, 0xe2, 0x98, 0x78, 0x8c, 0xf8, 0x0e,
- 0xf9, 0xc3, 0x94, 0x50, 0xc6, 0x97, 0x10, 0x77, 0x42, 0xe5, 0x83, 0x7f, 0xdb, 0x7f, 0xb7, 0xa0,
- 0x2b, 0x8f, 0xcb, 0xb3, 0xc4, 0x13, 0x87, 0x84, 0x83, 0xc6, 0x19, 0x8c, 0x02, 0x6d, 0x9a, 0x85,
- 0x73, 0xd4, 0xa6, 0x32, 0x4f, 0x6d, 0xee, 0x40, 0x43, 0xf4, 0xfe, 0x22, 0x98, 0x75, 0xde, 0xce,
- 0x03, 0x9f, 0x16, 0x55, 0xcb, 0x97, 0xea, 0x9a, 0x50, 0xb7, 0x74, 0x7b, 0xe6, 0x26, 0x45, 0x67,
- 0x58, 0x33, 0x3b, 0x83, 0xfd, 0x12, 0x36, 0x9e, 0x25, 0xc9, 0xf9, 0x34, 0x95, 0xe1, 0xe9, 0x4d,
- 0x94, 0xf7, 0x6e, 0x8d, 0xab, 0x3c, 0x96, 0x7c, 0xef, 0xd7, 0x1d, 0x65, 0xfb, 0x7f, 0x16, 0x6c,
- 0x96, 0xdd, 0xaa, 0x66, 0xf6, 0x7b, 0xd8, 0xc8, 0xfd, 0xba, 0xa1, 0xc2, 0x42, 0x2e, 0xd0, 0xda,
- 0xfd, 0xd0, 0x48, 0xf3, 0xb2, 0xd9, 0x9a, 0x20, 0xf9, 0x1a, 0x44, 0x67, 0x70, 0x31, 0x27, 0xa1,
- 0xa3, 0x4b, 0xe8, 0xcf, 0x9b, 0xf1, 0x22, 0x9c, 0xaf, 0xaa, 0x10, 0x6f, 0xe8, 0x99, 0xe8, 0x67,
- 0xd0, 0x2c, 0x02, 0xa9, 0x88, 0x40, 0x36, 0x4a, 0x81, 0xa8, 0xb5, 0x0a, 0x2b, 0x7e, 0xbc, 0x49,
- 0x96, 0x25, 0x99, 0x2a, 0x38, 0x72, 0x60, 0xff, 0x12, 0x1a, 0xdf, 0x3b, 0xbb, 0xf6, 0xbf, 0x2a,
- 0xd0, 0x79, 0x44, 0x69, 0x70, 0x1a, 0xeb, 0x14, 0x6c, 0xc2, 0x9a, 0x6c, 0x2d, 0xb2, 0xd7, 0xcb,
- 0x01, 0x1a, 0x43, 0x4b, 0xd5, 0x2d, 0x03, 0x7a, 0x53, 0x74, 0x6d, 0x49, 0x54, 0xb5, 0xac, 0x26,
- 0x43, 0xe3, 0xdd, 0x62, 0x8e, 0xe8, 0xae, 0xad, 0x24, 0xba, 0x75, 0x83, 0xe8, 0xbe, 0x0d, 0x4d,
- 0x31, 0x29, 0x4e, 0x7c, 0xa2, 0x18, 0x70, 0x83, 0x0b, 0x8e, 0x12, 0x9f, 0xa0, 0x5d, 0xd8, 0x8a,
- 0x48, 0x94, 0x64, 0x33, 0x37, 0xc2, 0xa9, 0xcb, 0x79, 0xb6, 0xe0, 0x2e, 0xd1, 0x44, 0xd5, 0x5e,
- 0x24, 0xb5, 0x87, 0x38, 0x3d, 0xc4, 0x97, 0x9c, 0xbe, 0x1c, 0x4e, 0xd0, 0x2e, 0xdc, 0xfe, 0x2a,
- 0x0b, 0x18, 0x9e, 0x84, 0xa4, 0xcc, 0xdf, 0x65, 0x2d, 0xde, 0xd0, 0x4a, 0x83, 0xc4, 0xdb, 0x7f,
- 0xb3, 0xa0, 0xab, 0x51, 0x53, 0x27, 0xac, 0x0f, 0xd5, 0x93, 0x3c, 0xcb, 0xfc, 0x53, 0xe7, 0xa2,
- 0xb2, 0x2a, 0x17, 0x0b, 0x8f, 0x88, 0x1c, 0xf9, 0x9a, 0x89, 0x7c, 0x9e, 0xf4, 0x35, 0x23, 0xe9,
- 0x1c, 0x1a, 0x3c, 0x65, 0x67, 0x1a, 0x1a, 0xfe, 0x6d, 0x9f, 0xc2, 0xe0, 0x98, 0x61, 0x16, 0x50,
- 0x16, 0x78, 0x54, 0xa7, 0x73, 0x2e, 0x71, 0xd6, 0x75, 0x89, 0xab, 0xac, 0x4a, 0x5c, 0x35, 0x4f,
- 0x9c, 0xfd, 0x1f, 0x0b, 0x90, 0xb9, 0x92, 0x82, 0xe0, 0x07, 0x58, 0x8a, 0x43, 0xc6, 0x12, 0xc6,
- 0xd9, 0x20, 0x67, 0x5c, 0x8a, 0x37, 0x09, 0x09, 0x4f, 0x1f, 0x3f, 0x0d, 0x53, 0x4a, 0x7c, 0xa9,
- 0x95, 0xa4, 0xa9, 0xc1, 0x05, 0x42, 0x59, 0xe6, 0x5c, 0xf5, 0x39, 0xce, 0x65, 0x3f, 0x82, 0x96,
- 0xea, 0x3f, 0x2f, 0x79, 0xef, 0xba, 0x3e, 0x7a, 0x15, 0x5d, 0xa5, 0x00, 0x62, 0x0c, 0xb0, 0x57,
- 0x44, 0xbf, 0xac, 0x02, 0xff, 0x11, 0x6e, 0x17, 0x16, 0xcf, 0x02, 0xca, 0x74, 0x5e, 0x3e, 0x86,
- 0xad, 0x20, 0xf6, 0xc2, 0xa9, 0x4f, 0xdc, 0x98, 0x77, 0xf0, 0x30, 0x7f, 0xbc, 0x58, 0x82, 0xad,
- 0x6d, 0x2a, 0xed, 0x91, 0x50, 0xea, 0x47, 0xcc, 0x07, 0x80, 0xf4, 0x2c, 0xe2, 0xe5, 0x33, 0x2a,
- 0x62, 0x46, 0x5f, 0x69, 0xf6, 0x3d, 0x65, 0x6d, 0xbf, 0x80, 0xad, 0xf9, 0xc5, 0x55, 0xaa, 0x7e,
- 0x0e, 0xad, 0x02, 0x76, 0x5d, 0x07, 0x6f, 0x1b, 0xe5, 0xa7, 0x98, 0xe7, 0x98, 0x96, 0xf6, 0x4f,
- 0xe0, 0xad, 0x42, 0xf5, 0x44, 0x14, 0xfa, 0xab, 0x1a, 0xd0, 0x08, 0x86, 0x8b, 0xe6, 0x32, 0x06,
- 0xfb, 0xaf, 0x55, 0x68, 0x3f, 0x51, 0x37, 0x97, 0xd3, 0x18, 0x83, 0xb8, 0x48, 0xf6, 0x70, 0x1f,
- 0xda, 0xa5, 0x0b, 0x29, 0xf9, 0x76, 0xeb, 0xc2, 0x78, 0x4d, 0x2f, 0x7b, 0x77, 0x57, 0x85, 0xd9,
- 0xfc, 0xbb, 0xfb, 0x21, 0x0c, 0x4e, 0x32, 0x42, 0x16, 0x9f, 0xe8, 0x35, 0xa7, 0xc7, 0x15, 0xa6,
- 0xed, 0x0e, 0x6c, 0x60, 0x8f, 0x05, 0x17, 0x73, 0xd6, 0xf2, 0x7c, 0x0d, 0xa4, 0xca, 0xb4, 0xff,
- 0x3c, 0x0f, 0x34, 0x88, 0x4f, 0x12, 0x3a, 0xac, 0x7f, 0xf7, 0x27, 0xb6, 0xda, 0x0d, 0xd7, 0x50,
- 0xf4, 0x1c, 0xba, 0xfa, 0xa9, 0xa6, 0x3c, 0xad, 0xdf, 0xf8, 0x19, 0xd8, 0x26, 0x85, 0x8a, 0x1a,
- 0xbc, 0xb9, 0xb4, 0x93, 0x86, 0xdc, 0x89, 0x54, 0x99, 0x85, 0xed, 0xdf, 0x15, 0x68, 0x38, 0xd8,
- 0x3b, 0x7f, 0xb3, 0xf3, 0xf1, 0x19, 0xf4, 0xf2, 0x1e, 0x51, 0x4a, 0xc9, 0x5b, 0x06, 0x90, 0xe6,
- 0xd1, 0x73, 0x3a, 0xbe, 0x31, 0x5a, 0x09, 0xdb, 0xfa, 0x2a, 0xd8, 0xfe, 0x59, 0x81, 0xee, 0x93,
- 0xbc, 0x6f, 0xbd, 0xd9, 0xe0, 0xed, 0x02, 0xf0, 0x46, 0x5b, 0xc2, 0xcd, 0x24, 0x26, 0xfa, 0x78,
- 0x38, 0xcd, 0x4c, 0x7d, 0xdd, 0x1c, 0xaf, 0x6f, 0x2a, 0xd0, 0x7e, 0x99, 0xa4, 0x49, 0x98, 0x9c,
- 0xce, 0xde, 0x6c, 0xb4, 0xf6, 0x61, 0x60, 0x70, 0x98, 0x12, 0x68, 0x77, 0xe6, 0x0e, 0x5b, 0x71,
- 0x38, 0x9c, 0x9e, 0x5f, 0x1a, 0xdf, 0x1c, 0xc0, 0x0d, 0x18, 0x28, 0x5e, 0x5f, 0xb4, 0x14, 0xfb,
- 0xcf, 0x16, 0x20, 0x53, 0xaa, 0x6a, 0xfd, 0xaf, 0xa0, 0xc3, 0x14, 0xd6, 0x22, 0x3e, 0xf5, 0xb8,
- 0x31, 0xef, 0x82, 0x99, 0x0b, 0xa7, 0xcd, 0xcc, 0xcc, 0xfc, 0x14, 0x36, 0x17, 0x7e, 0x06, 0xe2,
- 0x84, 0x4a, 0x66, 0x64, 0x30, 0xf7, 0x4b, 0xd0, 0xe1, 0xc4, 0xfe, 0x18, 0x6e, 0x4b, 0x12, 0xad,
- 0xfb, 0x90, 0xee, 0x0f, 0x0b, 0x6c, 0xb8, 0x53, 0xb0, 0x61, 0xfb, 0x5b, 0x0b, 0xb6, 0xe6, 0xa7,
- 0xa9, 0xf8, 0xaf, 0x9a, 0x87, 0x30, 0x20, 0x55, 0x2f, 0x4d, 0x5e, 0x2f, 0xe9, 0xf4, 0x47, 0x0b,
- 0xbc, 0x7e, 0xde, 0xf7, 0x8e, 0xae, 0xa3, 0x05, 0xb5, 0xef, 0xd3, 0xb2, 0x80, 0x8e, 0x30, 0x0c,
- 0x16, 0xcc, 0xf8, 0xab, 0x48, 0xaf, 0xab, 0x62, 0x5a, 0x57, 0x13, 0xbf, 0x07, 0xb1, 0xb7, 0xef,
- 0xc1, 0xdd, 0x2f, 0x08, 0x3b, 0x14, 0x36, 0x7b, 0x49, 0x7c, 0x12, 0x9c, 0x4e, 0x33, 0x69, 0x54,
- 0xa4, 0xf6, 0xdd, 0x55, 0x16, 0x0a, 0xa6, 0x25, 0xbf, 0xb5, 0x59, 0x37, 0xfe, 0xad, 0xad, 0x72,
- 0xd5, 0x6f, 0x6d, 0xbb, 0xff, 0xad, 0xc3, 0xfa, 0x31, 0xc1, 0xaf, 0x09, 0xe1, 0x4f, 0xfb, 0xce,
- 0x31, 0x89, 0xfd, 0xe2, 0x57, 0xf4, 0x4d, 0x63, 0x8f, 0xb9, 0x74, 0xf4, 0xce, 0x32, 0x69, 0x4e,
- 0x01, 0x6e, 0x6d, 0x5b, 0x1f, 0x5a, 0xe8, 0x05, 0x74, 0x4a, 0x2f, 0x5a, 0x74, 0xcf, 0x98, 0xb4,
- 0xec, 0xad, 0x3b, 0xba, 0xb3, 0xd0, 0x10, 0x35, 0xaa, 0xb9, 0xcb, 0xb6, 0xf9, 0x92, 0x43, 0xef,
- 0xae, 0x7c, 0xe2, 0x49, 0x87, 0xf7, 0xae, 0x79, 0x02, 0xda, 0xb7, 0xd0, 0x67, 0x50, 0x97, 0x94,
- 0x1f, 0x0d, 0x0d, 0xe3, 0xd2, 0xdb, 0xa9, 0x14, 0x57, 0xf9, 0x7d, 0x60, 0xdf, 0x42, 0x4f, 0x01,
- 0x0a, 0xd2, 0x8c, 0xde, 0x29, 0xfd, 0x0c, 0x32, 0xc7, 0xda, 0x47, 0x77, 0x57, 0x68, 0x73, 0x67,
- 0x5f, 0x41, 0xb7, 0x4c, 0xed, 0xd0, 0x78, 0x29, 0x7b, 0x33, 0xea, 0xc3, 0xe8, 0xfe, 0x15, 0x16,
- 0xb9, 0xe3, 0xdf, 0x41, 0x7f, 0x9e, 0xb1, 0x21, 0x7b, 0xe9, 0xc4, 0x12, 0xfb, 0x1b, 0xbd, 0x77,
- 0xa5, 0x8d, 0x09, 0x42, 0x51, 0xa2, 0x4a, 0x20, 0x2c, 0xd4, 0xb3, 0x12, 0x08, 0x8b, 0x75, 0x4d,
- 0x82, 0x50, 0xbe, 0xd7, 0x25, 0x10, 0x96, 0x56, 0xa1, 0x12, 0x08, 0xcb, 0x8b, 0x82, 0x7d, 0x0b,
- 0x25, 0xb0, 0xb5, 0xfc, 0xb6, 0x21, 0xf3, 0x27, 0xa1, 0x2b, 0xaf, 0xec, 0xe8, 0xc1, 0x77, 0xb0,
- 0xd4, 0x0b, 0x4e, 0xea, 0xe2, 0x2f, 0xaa, 0x8f, 0xfe, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x64,
- 0x5c, 0xbc, 0xb2, 0x1a, 0x00, 0x00,
+ 0x11, 0xd6, 0xec, 0xf2, 0xb1, 0x5b, 0xcb, 0x7d, 0x35, 0x29, 0x6a, 0xb9, 0x7a, 0x90, 0x1a, 0xdb,
+ 0x30, 0xa5, 0x38, 0x8c, 0x43, 0x1b, 0x88, 0x11, 0xc7, 0x30, 0x28, 0x8a, 0x56, 0x08, 0x89, 0xb4,
+ 0x34, 0x64, 0x64, 0xc0, 0x40, 0x30, 0xee, 0x9d, 0x69, 0x52, 0x03, 0xce, 0x2b, 0xd3, 0xbd, 0x14,
+ 0xd7, 0xb9, 0x04, 0xc8, 0x2d, 0xc9, 0x25, 0xc8, 0x21, 0x7f, 0x21, 0x97, 0x9c, 0x92, 0xb3, 0x2f,
+ 0xf9, 0x47, 0xb9, 0xe4, 0xe0, 0x4b, 0xd0, 0xaf, 0x99, 0x9e, 0x7d, 0x90, 0xa6, 0x01, 0x03, 0xd6,
+ 0x6d, 0xa6, 0xaa, 0xba, 0xba, 0xfa, 0xab, 0xee, 0xaa, 0xaf, 0x67, 0x60, 0x29, 0xc2, 0x94, 0x91,
+ 0x6c, 0x2b, 0xcd, 0x12, 0x96, 0xa0, 0xba, 0x7c, 0x73, 0xd3, 0x81, 0xfd, 0xe7, 0x05, 0xa8, 0xff,
+ 0x9a, 0xe0, 0x8c, 0x0d, 0x08, 0x66, 0xa8, 0x05, 0x95, 0x20, 0xed, 0x59, 0x1b, 0xd6, 0x66, 0xdd,
+ 0xa9, 0x04, 0x29, 0x42, 0x30, 0x97, 0x26, 0x19, 0xeb, 0x55, 0x36, 0xac, 0xcd, 0xa6, 0x23, 0x9e,
+ 0xd1, 0x5d, 0x80, 0x74, 0x38, 0x08, 0x03, 0xcf, 0x1d, 0x66, 0x61, 0xaf, 0x2a, 0x6c, 0xeb, 0x52,
+ 0xf2, 0x9b, 0x2c, 0x44, 0x9b, 0xd0, 0x89, 0xf0, 0x85, 0x7b, 0x9e, 0x84, 0xc3, 0x88, 0xb8, 0x5e,
+ 0x32, 0x8c, 0x59, 0x6f, 0x4e, 0x0c, 0x6f, 0x45, 0xf8, 0xe2, 0xa5, 0x10, 0xef, 0x72, 0x29, 0xda,
+ 0xe0, 0x51, 0x5d, 0xb8, 0x27, 0x41, 0x48, 0xdc, 0x33, 0x32, 0xea, 0xcd, 0x6f, 0x58, 0x9b, 0x73,
+ 0x0e, 0x44, 0xf8, 0xe2, 0xb3, 0x20, 0x24, 0x4f, 0xc9, 0x08, 0xad, 0x43, 0xc3, 0xc7, 0x0c, 0xbb,
+ 0x1e, 0x89, 0x19, 0xc9, 0x7a, 0x0b, 0x62, 0x2e, 0xe0, 0xa2, 0x5d, 0x21, 0xe1, 0xf1, 0x65, 0xd8,
+ 0x3b, 0xeb, 0x2d, 0x0a, 0x8d, 0x78, 0xe6, 0xf1, 0x61, 0x3f, 0x0a, 0x62, 0x57, 0x44, 0x5e, 0x13,
+ 0x53, 0xd7, 0x85, 0xe4, 0x39, 0x0f, 0xff, 0x13, 0x58, 0x94, 0xb1, 0xd1, 0x5e, 0x7d, 0xa3, 0xba,
+ 0xd9, 0xd8, 0x7e, 0x6b, 0x2b, 0x47, 0x63, 0x4b, 0x86, 0xb7, 0x1f, 0x9f, 0x24, 0x59, 0x84, 0x59,
+ 0x90, 0xc4, 0x07, 0x84, 0x52, 0x7c, 0x4a, 0x1c, 0x3d, 0x06, 0xed, 0x43, 0x23, 0x26, 0xaf, 0x5d,
+ 0xed, 0x02, 0x84, 0x8b, 0xcd, 0x09, 0x17, 0x47, 0xaf, 0x92, 0x8c, 0x4d, 0xf1, 0x03, 0x31, 0x79,
+ 0xfd, 0x52, 0xb9, 0x7a, 0x01, 0x6d, 0x9f, 0x84, 0x84, 0x11, 0x3f, 0x77, 0xd7, 0xb8, 0xa6, 0xbb,
+ 0x96, 0x72, 0xa0, 0x5d, 0xbe, 0x0d, 0xad, 0x57, 0x98, 0xba, 0x71, 0x92, 0x7b, 0x5c, 0xda, 0xb0,
+ 0x36, 0x6b, 0xce, 0xd2, 0x2b, 0x4c, 0x0f, 0x13, 0x6d, 0xf5, 0x04, 0xea, 0xc4, 0x73, 0xe9, 0x2b,
+ 0x9c, 0xf9, 0xb4, 0xd7, 0x11, 0x53, 0x3e, 0x9c, 0x98, 0x72, 0xcf, 0x3b, 0xe2, 0x06, 0x53, 0x26,
+ 0xad, 0x11, 0xa9, 0xa2, 0xe8, 0x10, 0x9a, 0x1c, 0x8c, 0xc2, 0x59, 0xf7, 0xda, 0xce, 0x38, 0x9a,
+ 0x7b, 0xda, 0xdf, 0x4b, 0xe8, 0x6a, 0x44, 0x0a, 0x9f, 0xe8, 0xda, 0x3e, 0x35, 0xac, 0xb9, 0xdf,
+ 0x77, 0xa1, 0xa3, 0x60, 0x29, 0xdc, 0x2e, 0x0b, 0x60, 0x9a, 0x02, 0x18, 0x6d, 0x68, 0xff, 0xa1,
+ 0x02, 0xdd, 0xfc, 0x34, 0x38, 0x84, 0xa6, 0x49, 0x4c, 0x09, 0x7a, 0x08, 0x5d, 0xb5, 0x9d, 0x69,
+ 0xf0, 0x35, 0x71, 0xc3, 0x20, 0x0a, 0x98, 0x38, 0x24, 0x73, 0x4e, 0x5b, 0x2a, 0x8e, 0x82, 0xaf,
+ 0xc9, 0x33, 0x2e, 0x46, 0xab, 0xb0, 0x10, 0x12, 0xec, 0x93, 0x4c, 0x9c, 0x99, 0xba, 0xa3, 0xde,
+ 0xd0, 0xbb, 0xd0, 0x8e, 0x08, 0xcb, 0x02, 0x8f, 0xba, 0xd8, 0xf7, 0x33, 0x42, 0xa9, 0x3a, 0x3a,
+ 0x2d, 0x25, 0xde, 0x91, 0x52, 0xf4, 0x11, 0xf4, 0xb4, 0x61, 0xc0, 0xf7, 0xf8, 0x39, 0x0e, 0x5d,
+ 0x4a, 0xbc, 0x24, 0xf6, 0xa9, 0x3a, 0x47, 0xab, 0x4a, 0xbf, 0xaf, 0xd4, 0x47, 0x52, 0x8b, 0x1e,
+ 0x43, 0x87, 0xb2, 0x24, 0xc3, 0xa7, 0xc4, 0x1d, 0x60, 0xef, 0x8c, 0xf0, 0x11, 0xf3, 0x02, 0xbc,
+ 0x35, 0x03, 0xbc, 0x23, 0x69, 0xf2, 0x48, 0x5a, 0x38, 0x6d, 0x5a, 0x7a, 0xa7, 0xf6, 0xb7, 0x55,
+ 0xe8, 0xcd, 0x3a, 0x06, 0xa2, 0x3e, 0xf8, 0x62, 0xe9, 0x4d, 0xa7, 0x12, 0xf8, 0xfc, 0xfc, 0x71,
+ 0x48, 0xc4, 0x5a, 0xe7, 0x1c, 0xf1, 0x8c, 0xee, 0x01, 0x78, 0x49, 0x18, 0x12, 0x8f, 0x0f, 0x54,
+ 0x8b, 0x34, 0x24, 0xfc, 0x7c, 0x8a, 0x23, 0x5f, 0x94, 0x86, 0x39, 0xa7, 0xce, 0x25, 0xb2, 0x2a,
+ 0xdc, 0x87, 0x25, 0x99, 0x3e, 0x65, 0x20, 0xab, 0x42, 0x43, 0xca, 0xa4, 0xc9, 0x7b, 0x80, 0xf4,
+ 0x36, 0x19, 0x8c, 0x72, 0xc3, 0x05, 0x61, 0xd8, 0x51, 0x9a, 0x47, 0x23, 0x6d, 0x7d, 0x1b, 0xea,
+ 0x19, 0xc1, 0xbe, 0x9b, 0xc4, 0xe1, 0x48, 0x14, 0x8a, 0x9a, 0x53, 0xe3, 0x82, 0xcf, 0xe3, 0x70,
+ 0x84, 0x7e, 0x02, 0xdd, 0x8c, 0xa4, 0x61, 0xe0, 0x61, 0x37, 0x0d, 0xb1, 0x47, 0x22, 0x12, 0xeb,
+ 0x9a, 0xd1, 0x51, 0x8a, 0xe7, 0x5a, 0x8e, 0x7a, 0xb0, 0x78, 0x4e, 0x32, 0xca, 0x97, 0x55, 0x17,
+ 0x26, 0xfa, 0x15, 0x75, 0xa0, 0xca, 0x58, 0xd8, 0x03, 0x21, 0xe5, 0x8f, 0xe8, 0x01, 0x74, 0xbc,
+ 0x24, 0x4a, 0xb1, 0xc7, 0xdc, 0x8c, 0x9c, 0x07, 0x62, 0x50, 0x43, 0xa8, 0xdb, 0x4a, 0xee, 0x28,
+ 0x31, 0x5f, 0x4e, 0x94, 0xf8, 0xc1, 0x49, 0x40, 0x7c, 0x17, 0x33, 0x95, 0x6c, 0x71, 0x70, 0xab,
+ 0x4e, 0x47, 0x6b, 0x76, 0x98, 0x4c, 0x33, 0xda, 0x82, 0xe5, 0x8c, 0x44, 0x09, 0x23, 0xae, 0x4e,
+ 0x76, 0x8c, 0x23, 0xd2, 0x6b, 0x0a, 0x9c, 0xbb, 0x52, 0xa5, 0x72, 0x7c, 0x88, 0x23, 0xc2, 0xbd,
+ 0x8f, 0xd9, 0xf3, 0x5a, 0xdb, 0x12, 0xe6, 0x9d, 0x92, 0xf9, 0x53, 0x32, 0xb2, 0xff, 0x61, 0xc1,
+ 0xdd, 0x4b, 0x4b, 0xce, 0xc4, 0x16, 0xb8, 0x2a, 0xdd, 0x3f, 0x14, 0xc2, 0xf6, 0x10, 0xd6, 0xaf,
+ 0x28, 0x04, 0x57, 0xc4, 0x5a, 0x99, 0x88, 0xd5, 0x86, 0x26, 0xf1, 0xdc, 0x20, 0xf6, 0xc9, 0x85,
+ 0x3b, 0x08, 0x98, 0x3c, 0xa2, 0x4d, 0xa7, 0x41, 0xbc, 0x7d, 0x2e, 0x7b, 0x14, 0x30, 0x6a, 0x7f,
+ 0x63, 0x41, 0xab, 0x7c, 0x86, 0xf8, 0x29, 0x60, 0xa3, 0x94, 0xa8, 0xbe, 0x29, 0x9e, 0xd5, 0xd4,
+ 0x15, 0xd5, 0x49, 0x7d, 0xb4, 0x0f, 0x90, 0x66, 0x49, 0x4a, 0x32, 0x16, 0x10, 0xee, 0x97, 0x1f,
+ 0xcb, 0x07, 0x33, 0x8f, 0xe5, 0xd6, 0xf3, 0xdc, 0x76, 0x2f, 0x66, 0xd9, 0xc8, 0x31, 0x06, 0xf7,
+ 0x3f, 0x81, 0xf6, 0x98, 0x9a, 0xa3, 0xc3, 0xb3, 0x2a, 0x03, 0xe0, 0x8f, 0x68, 0x05, 0xe6, 0xcf,
+ 0x71, 0x38, 0x24, 0x2a, 0x04, 0xf9, 0xf2, 0xcb, 0xca, 0x47, 0x96, 0xbd, 0x08, 0xf3, 0x7b, 0x51,
+ 0xca, 0x46, 0x7c, 0x25, 0xed, 0xa3, 0x61, 0x4a, 0xb2, 0x47, 0x61, 0xe2, 0x9d, 0xed, 0x5d, 0xb0,
+ 0x0c, 0xa3, 0xcf, 0xa1, 0x45, 0x32, 0x4c, 0x87, 0x19, 0x3f, 0x55, 0x7e, 0x10, 0x9f, 0x0a, 0x9f,
+ 0xe5, 0x96, 0x34, 0x36, 0x66, 0x6b, 0x4f, 0x0e, 0xd8, 0x15, 0xf6, 0x4e, 0x93, 0x98, 0xaf, 0xfd,
+ 0x2f, 0xa1, 0x59, 0xd2, 0x73, 0xb0, 0x78, 0x03, 0x57, 0x59, 0x11, 0xcf, 0xbc, 0x68, 0xa6, 0x38,
+ 0x0b, 0xd8, 0x48, 0x11, 0x0d, 0xf5, 0xc6, 0x4b, 0x85, 0x2a, 0xbc, 0x81, 0x2f, 0x41, 0x6b, 0x3a,
+ 0x75, 0x29, 0xd9, 0xf7, 0xa9, 0xfd, 0x04, 0x56, 0x9e, 0x12, 0x92, 0xee, 0x26, 0x71, 0x4c, 0x3c,
+ 0x46, 0x7c, 0x87, 0xfc, 0x6e, 0x48, 0x28, 0xe3, 0x53, 0x88, 0x33, 0xa1, 0xf2, 0xc1, 0x9f, 0x79,
+ 0x15, 0x38, 0xcd, 0x52, 0xcf, 0x35, 0xe8, 0x4c, 0x8d, 0x0b, 0x38, 0x27, 0xb0, 0xff, 0x6e, 0x41,
+ 0x4b, 0xee, 0xa5, 0x67, 0x89, 0x27, 0x76, 0x10, 0x47, 0x94, 0xd3, 0x1b, 0x85, 0xe8, 0x30, 0x0b,
+ 0xc7, 0x78, 0x4f, 0x65, 0x9c, 0xf7, 0xac, 0x41, 0x4d, 0x10, 0x83, 0x22, 0xd2, 0x45, 0xde, 0xeb,
+ 0x03, 0x9f, 0x16, 0x25, 0xcd, 0x97, 0xea, 0x39, 0xa1, 0x6e, 0xe8, 0xde, 0xcd, 0x4d, 0x8a, 0xb6,
+ 0x31, 0x6f, 0xb6, 0x0d, 0xfb, 0x18, 0x96, 0x9f, 0x25, 0xc9, 0xd9, 0x30, 0x95, 0xe1, 0xe9, 0x15,
+ 0x96, 0x81, 0xb1, 0x36, 0xaa, 0x3c, 0x96, 0x1c, 0x98, 0xab, 0xf6, 0xb9, 0xfd, 0x5f, 0x0b, 0x56,
+ 0xca, 0x6e, 0x55, 0xa7, 0xfb, 0x0a, 0x96, 0x73, 0xbf, 0x6e, 0xa8, 0xb0, 0x90, 0x13, 0x34, 0xb6,
+ 0xdf, 0x37, 0xf6, 0xc0, 0xb4, 0xd1, 0x9a, 0x3d, 0xf9, 0x1a, 0x44, 0xa7, 0x7b, 0x3e, 0x26, 0xa1,
+ 0xfd, 0x0b, 0xe8, 0x8c, 0x9b, 0xf1, 0xdc, 0xe4, 0xb3, 0x2a, 0xc4, 0x6b, 0x7a, 0x24, 0xfa, 0x39,
+ 0xd4, 0x8b, 0x40, 0x2a, 0x22, 0x90, 0xe5, 0x52, 0x20, 0x6a, 0xae, 0xc2, 0x8a, 0xef, 0x7d, 0x92,
+ 0x65, 0x49, 0xa6, 0xaa, 0x91, 0x7c, 0xb1, 0x3f, 0x86, 0xda, 0xf7, 0xce, 0xae, 0xfd, 0xaf, 0x0a,
+ 0x34, 0x77, 0x28, 0x0d, 0x4e, 0x63, 0x9d, 0x82, 0x15, 0x98, 0x97, 0x7d, 0x47, 0x12, 0x01, 0xf9,
+ 0x82, 0x36, 0xa0, 0xa1, 0x8a, 0x9a, 0x01, 0xbd, 0x29, 0xba, 0xb2, 0x5e, 0xaa, 0x42, 0x37, 0x27,
+ 0x43, 0xe3, 0xad, 0x64, 0x8c, 0x05, 0xcf, 0xcf, 0x64, 0xc1, 0x0b, 0x06, 0x0b, 0xbe, 0x0d, 0x75,
+ 0x31, 0x28, 0x4e, 0x7c, 0xa2, 0xe8, 0x71, 0x8d, 0x0b, 0x0e, 0x13, 0x9f, 0xa0, 0x6d, 0x58, 0x8d,
+ 0x48, 0x94, 0x64, 0x23, 0x37, 0xc2, 0xa9, 0xcb, 0x49, 0xb8, 0x20, 0x36, 0xd1, 0x40, 0x15, 0x66,
+ 0x24, 0xb5, 0x07, 0x38, 0x3d, 0xc0, 0x17, 0x9c, 0xdb, 0x1c, 0x0c, 0xd0, 0x36, 0xdc, 0xfc, 0x22,
+ 0x0b, 0x18, 0x1e, 0x84, 0xa4, 0x4c, 0xee, 0x65, 0xa1, 0x5e, 0xd6, 0x4a, 0x83, 0xe1, 0xdb, 0x7f,
+ 0xb3, 0xa0, 0xa5, 0x51, 0x53, 0x3b, 0xac, 0x03, 0xd5, 0x93, 0x3c, 0xcb, 0xfc, 0x51, 0xe7, 0xa2,
+ 0x32, 0x2b, 0x17, 0x13, 0x37, 0x8c, 0x1c, 0xf9, 0x39, 0x13, 0xf9, 0x3c, 0xe9, 0xf3, 0x46, 0xd2,
+ 0x39, 0x34, 0x78, 0xc8, 0x5e, 0x69, 0x68, 0xf8, 0xb3, 0x7d, 0x0a, 0xdd, 0x23, 0x86, 0x59, 0x40,
+ 0x59, 0xe0, 0x51, 0x9d, 0xce, 0xb1, 0xc4, 0x59, 0x57, 0x25, 0xae, 0x32, 0x2b, 0x71, 0xd5, 0x3c,
+ 0x71, 0xf6, 0x7f, 0x2c, 0x40, 0xe6, 0x4c, 0x0a, 0x82, 0x1f, 0x60, 0x2a, 0x0e, 0x19, 0x4b, 0x18,
+ 0xa7, 0x8a, 0x9c, 0x8e, 0x29, 0x52, 0x25, 0x24, 0x3c, 0x7d, 0x7c, 0x37, 0x0c, 0x29, 0xf1, 0xa5,
+ 0x56, 0x32, 0xaa, 0x1a, 0x17, 0x08, 0x65, 0x99, 0x90, 0x2d, 0x8c, 0x11, 0x32, 0x7b, 0x07, 0x1a,
+ 0xaa, 0x39, 0x1d, 0xf3, 0xc6, 0x76, 0x75, 0xf4, 0x2a, 0xba, 0x4a, 0x01, 0xc4, 0x06, 0xc0, 0x6e,
+ 0x11, 0xfd, 0x94, 0xf2, 0x6c, 0xff, 0x1e, 0x6e, 0x16, 0x16, 0xcf, 0x02, 0xca, 0x74, 0x5e, 0x3e,
+ 0x84, 0xd5, 0x20, 0xf6, 0xc2, 0xa1, 0x4f, 0xdc, 0x98, 0xb7, 0xf7, 0x30, 0xbf, 0xd9, 0x58, 0x82,
+ 0xca, 0xad, 0x28, 0xed, 0xa1, 0x50, 0xea, 0x1b, 0xce, 0x7b, 0x80, 0xf4, 0x28, 0xe2, 0xe5, 0x23,
+ 0x2a, 0x62, 0x44, 0x47, 0x69, 0xf6, 0x3c, 0x65, 0x6d, 0xbf, 0x80, 0xd5, 0xf1, 0xc9, 0x55, 0xaa,
+ 0x7e, 0x01, 0x8d, 0x02, 0x76, 0x5d, 0x07, 0x6f, 0x1a, 0xe5, 0xa7, 0x18, 0xe7, 0x98, 0x96, 0xf6,
+ 0x4f, 0xe1, 0x56, 0xa1, 0x7a, 0x2c, 0x0a, 0xfd, 0x25, 0xdd, 0xc9, 0xee, 0x43, 0x6f, 0xd2, 0x5c,
+ 0xc6, 0x60, 0xff, 0xb5, 0x0a, 0x4b, 0x8f, 0xd5, 0xc9, 0xe5, 0x1c, 0xc7, 0x60, 0x35, 0x92, 0x5a,
+ 0xdc, 0x87, 0xa5, 0xd2, 0x81, 0x94, 0x64, 0xbc, 0x71, 0x6e, 0x5c, 0xb5, 0xa7, 0x5d, 0xca, 0xab,
+ 0xc2, 0x6c, 0xfc, 0x52, 0xfe, 0x10, 0xba, 0x27, 0x19, 0x21, 0x93, 0xf7, 0xf7, 0x39, 0xa7, 0xcd,
+ 0x15, 0xa6, 0xed, 0x16, 0x2c, 0x63, 0x8f, 0x05, 0xe7, 0x63, 0xd6, 0x72, 0x7f, 0x75, 0xa5, 0xca,
+ 0xb4, 0xff, 0x2c, 0x0f, 0x34, 0x88, 0x4f, 0x12, 0xda, 0x5b, 0xf8, 0xee, 0xf7, 0x6f, 0xb5, 0x1a,
+ 0xae, 0xa1, 0xe8, 0x39, 0xb4, 0xf4, 0x3d, 0x4e, 0x79, 0x5a, 0xbc, 0xf6, 0x1d, 0x71, 0x89, 0x14,
+ 0x2a, 0x6a, 0x90, 0xea, 0xd2, 0x4a, 0x6a, 0x72, 0x25, 0x52, 0x65, 0x16, 0xb6, 0x7f, 0x57, 0xa0,
+ 0xe6, 0x60, 0xef, 0xec, 0xcd, 0xce, 0xc7, 0xa7, 0xd0, 0xce, 0x7b, 0x44, 0x29, 0x25, 0xb7, 0x0c,
+ 0x20, 0xcd, 0xad, 0xe7, 0x34, 0x7d, 0xe3, 0x6d, 0x26, 0x6c, 0x8b, 0xb3, 0x60, 0xfb, 0x67, 0x05,
+ 0x5a, 0x8f, 0xf3, 0xbe, 0xf5, 0x66, 0x83, 0xb7, 0x0d, 0xc0, 0x1b, 0x6d, 0x09, 0x37, 0x93, 0x98,
+ 0xe8, 0xed, 0xe1, 0xd4, 0x33, 0xf5, 0x74, 0x7d, 0xbc, 0xbe, 0xa9, 0xc0, 0xd2, 0x71, 0x92, 0x26,
+ 0x61, 0x72, 0x3a, 0x7a, 0xb3, 0xd1, 0xda, 0x83, 0xae, 0xc1, 0x61, 0x4a, 0xa0, 0xad, 0x8d, 0x6d,
+ 0xb6, 0x62, 0x73, 0x38, 0x6d, 0xbf, 0xf4, 0x7e, 0x7d, 0x00, 0x97, 0xa1, 0xab, 0x78, 0x7d, 0xd1,
+ 0x52, 0xec, 0x3f, 0x5a, 0x80, 0x4c, 0xa9, 0xaa, 0xf5, 0xbf, 0x82, 0x26, 0x53, 0x58, 0x8b, 0xf8,
+ 0xd4, 0xcd, 0xc7, 0x3c, 0x0b, 0x66, 0x2e, 0x9c, 0x25, 0x66, 0x66, 0xe6, 0x67, 0xb0, 0x32, 0xf1,
+ 0x8d, 0x88, 0x13, 0x2a, 0x99, 0x91, 0xee, 0xd8, 0x67, 0xa2, 0x83, 0x81, 0xfd, 0x21, 0xdc, 0x94,
+ 0x24, 0x5a, 0xf7, 0x21, 0xdd, 0x1f, 0x26, 0xd8, 0x70, 0xb3, 0x60, 0xc3, 0xf6, 0xb7, 0x16, 0xac,
+ 0x8e, 0x0f, 0x53, 0xf1, 0x5f, 0x36, 0x0e, 0x61, 0x40, 0xaa, 0x5e, 0x9a, 0xbc, 0x5e, 0xd2, 0xe9,
+ 0x0f, 0x26, 0x78, 0xfd, 0xb8, 0xef, 0x2d, 0x5d, 0x47, 0x0b, 0x6a, 0xdf, 0xa1, 0x65, 0x01, 0xed,
+ 0x63, 0xe8, 0x4e, 0x98, 0xf1, 0x5b, 0x91, 0x9e, 0x57, 0xc5, 0xb4, 0xa8, 0x06, 0x7e, 0x0f, 0x62,
+ 0x6f, 0xaf, 0xc3, 0xdd, 0x27, 0x84, 0x1d, 0x08, 0x9b, 0xdd, 0x24, 0x3e, 0x09, 0x4e, 0x87, 0x99,
+ 0x34, 0x2a, 0x52, 0x7b, 0x6f, 0x96, 0x85, 0x82, 0x69, 0xca, 0x87, 0x38, 0xeb, 0xda, 0x1f, 0xe2,
+ 0x2a, 0x97, 0x7d, 0x88, 0xb3, 0x3f, 0x86, 0x1e, 0xdf, 0x59, 0x2a, 0x8a, 0x30, 0x20, 0x31, 0xcb,
+ 0x79, 0xe6, 0x3a, 0x34, 0x3c, 0x21, 0x71, 0x8d, 0x4f, 0x06, 0x20, 0x45, 0x9c, 0x5f, 0xd9, 0x8f,
+ 0x60, 0x6d, 0xca, 0x60, 0x15, 0xfc, 0x3b, 0xd0, 0x12, 0xb7, 0x58, 0x15, 0x39, 0xd1, 0x77, 0xbf,
+ 0x26, 0x97, 0xee, 0x68, 0xa1, 0xfd, 0x27, 0xbe, 0x4b, 0x08, 0xa6, 0x64, 0xc7, 0x8f, 0x82, 0xf8,
+ 0x38, 0x39, 0x23, 0xf9, 0xb5, 0xe5, 0x1d, 0x68, 0xa5, 0x19, 0x39, 0x0f, 0x92, 0x21, 0x75, 0x19,
+ 0x57, 0x88, 0x10, 0xaa, 0x4e, 0x53, 0x4b, 0x85, 0x35, 0x27, 0x50, 0xb9, 0x19, 0xbf, 0xe9, 0xbb,
+ 0x2c, 0x88, 0xe4, 0xb7, 0x84, 0xaa, 0xd3, 0xd1, 0x9a, 0x67, 0x89, 0x77, 0x76, 0x1c, 0xc8, 0xcb,
+ 0xb5, 0x30, 0x12, 0xbc, 0x46, 0xb2, 0xd2, 0x1a, 0x17, 0x1c, 0x72, 0x6e, 0x73, 0x00, 0xb7, 0x26,
+ 0x62, 0x51, 0xcb, 0x59, 0x81, 0x79, 0x33, 0x06, 0xf9, 0x82, 0xee, 0x00, 0xc8, 0x29, 0xa9, 0x1b,
+ 0x53, 0x35, 0xa7, 0x70, 0x77, 0x4c, 0x0f, 0xa9, 0xfd, 0x17, 0x0b, 0x7a, 0x0e, 0x09, 0x7f, 0x2c,
+ 0xab, 0xbb, 0x0d, 0x6b, 0x53, 0xa2, 0x91, 0xeb, 0xdb, 0xfe, 0x5f, 0x0d, 0x16, 0x8f, 0x08, 0x7e,
+ 0x4d, 0x88, 0x8f, 0xf6, 0xa1, 0x79, 0x44, 0x62, 0xbf, 0xf8, 0xd7, 0xb2, 0x62, 0x6c, 0xf6, 0x5c,
+ 0xda, 0xbf, 0x33, 0x4d, 0x9a, 0x73, 0xc1, 0x1b, 0x9b, 0xd6, 0xfb, 0x16, 0x7a, 0x01, 0xcd, 0xd2,
+ 0x77, 0x0f, 0xb4, 0x6e, 0x0c, 0x9a, 0xf6, 0x45, 0xa4, 0xbf, 0x36, 0xc1, 0x8c, 0xf4, 0xf1, 0xca,
+ 0x5d, 0x2e, 0x99, 0x57, 0x7a, 0x74, 0x6f, 0xe6, 0x5d, 0x5f, 0x3a, 0x5c, 0xbf, 0xe2, 0x5b, 0x80,
+ 0x7d, 0x03, 0x7d, 0x0a, 0x0b, 0xf2, 0xee, 0x87, 0x7a, 0x86, 0x71, 0xe9, 0x12, 0x5d, 0x8a, 0xab,
+ 0x7c, 0x51, 0xb4, 0x6f, 0xa0, 0xa7, 0x00, 0xc5, 0xed, 0x09, 0xdd, 0x29, 0x7d, 0x2c, 0x1b, 0xbb,
+ 0xbe, 0xf5, 0xef, 0xce, 0xd0, 0xe6, 0xce, 0xbe, 0x80, 0x56, 0x99, 0xe3, 0xa3, 0x8d, 0xa9, 0x34,
+ 0xde, 0x68, 0x14, 0xfd, 0xfb, 0x97, 0x58, 0xe4, 0x8e, 0x7f, 0x0b, 0x9d, 0x71, 0xea, 0x8e, 0xec,
+ 0xa9, 0x03, 0x4b, 0xd7, 0x80, 0xfe, 0x5b, 0x97, 0xda, 0x98, 0x20, 0x14, 0xbd, 0xaa, 0x04, 0xc2,
+ 0x44, 0x63, 0x2b, 0x81, 0x30, 0xd9, 0xe0, 0x24, 0x08, 0xe5, 0x02, 0x5f, 0x02, 0x61, 0x6a, 0x3b,
+ 0x2a, 0x81, 0x30, 0xbd, 0x3b, 0xd8, 0x37, 0x50, 0x02, 0xab, 0xd3, 0xcb, 0x2e, 0x32, 0x3f, 0x1c,
+ 0x5e, 0x5a, 0xbb, 0xfb, 0x0f, 0xbe, 0x83, 0x65, 0x3e, 0xe1, 0x57, 0xd0, 0x9d, 0xa8, 0x92, 0xc8,
+ 0x84, 0x74, 0x56, 0x01, 0xee, 0xbf, 0x7d, 0xb9, 0x51, 0x3e, 0xc3, 0x97, 0xd0, 0x1e, 0x2b, 0x5b,
+ 0xa8, 0x04, 0xc5, 0xd4, 0x02, 0xd4, 0xb7, 0x2f, 0x33, 0x31, 0xa3, 0x9f, 0x28, 0x1a, 0xa5, 0xe8,
+ 0x67, 0x15, 0xb8, 0x52, 0xf4, 0x33, 0xeb, 0x8e, 0x7d, 0x63, 0xb0, 0x20, 0x7e, 0xf4, 0x7e, 0xf0,
+ 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6d, 0x11, 0x8c, 0x9d, 0xf8, 0x1d, 0x00, 0x00,
}
diff --git a/weed/pb/messaging.proto b/weed/pb/messaging.proto
new file mode 100644
index 000000000..689c22d29
--- /dev/null
+++ b/weed/pb/messaging.proto
@@ -0,0 +1,134 @@
+syntax = "proto3";
+
+package messaging_pb;
+
+option java_package = "seaweedfs.client";
+option java_outer_classname = "MessagingProto";
+
+//////////////////////////////////////////////////
+
+service SeaweedMessaging {
+
+ rpc Subscribe (stream SubscriberMessage) returns (stream BrokerMessage) {
+ }
+
+ rpc Publish (stream PublishRequest) returns (stream PublishResponse) {
+ }
+
+ rpc DeleteTopic (DeleteTopicRequest) returns (DeleteTopicResponse) {
+ }
+
+ rpc ConfigureTopic (ConfigureTopicRequest) returns (ConfigureTopicResponse) {
+ }
+
+ rpc GetTopicConfiguration (GetTopicConfigurationRequest) returns (GetTopicConfigurationResponse) {
+ }
+
+ rpc FindBroker (FindBrokerRequest) returns (FindBrokerResponse) {
+ }
+
+}
+
+//////////////////////////////////////////////////
+
+message SubscriberMessage {
+ message InitMessage {
+ string namespace = 1;
+ string topic = 2;
+ int32 partition = 3;
+ enum StartPosition {
+ LATEST = 0; // Start at the newest message
+ EARLIEST = 1; // Start at the oldest message
+ TIMESTAMP = 2; // Start after a specified timestamp, exclusive
+ }
+ StartPosition startPosition = 4; // Where to begin consuming from
+ int64 timestampNs = 5; // timestamp in nano seconds
+ string subscriber_id = 6; // uniquely identify a subscriber to track consumption
+ }
+ InitMessage init = 1;
+ message AckMessage {
+ int64 message_id = 1;
+ }
+ AckMessage ack = 2;
+ bool is_close = 3;
+}
+
+message Message {
+ int64 event_time_ns = 1 [jstype = JS_STRING];
+ bytes key = 2; // Message key
+ bytes value = 3; // Message payload
+ map headers = 4; // Message headers
+ bool is_close = 5;
+}
+
+message BrokerMessage {
+ Message data = 1;
+}
+
+message PublishRequest {
+ message InitMessage {
+ string namespace = 1; // only needed on the initial request
+ string topic = 2; // only needed on the initial request
+ int32 partition = 3;
+ }
+ InitMessage init = 1;
+ Message data = 2;
+}
+
+message PublishResponse {
+ message ConfigMessage {
+ int32 partition_count = 1;
+ }
+ ConfigMessage config = 1;
+ message RedirectMessage {
+ string new_broker = 1;
+ }
+ RedirectMessage redirect = 2;
+ bool is_closed = 3;
+}
+
+message DeleteTopicRequest {
+ string namespace = 1;
+ string topic = 2;
+}
+message DeleteTopicResponse {
+}
+
+message ConfigureTopicRequest {
+ string namespace = 1;
+ string topic = 2;
+ TopicConfiguration configuration = 3;
+}
+message ConfigureTopicResponse {
+}
+
+message GetTopicConfigurationRequest {
+ string namespace = 1;
+ string topic = 2;
+}
+message GetTopicConfigurationResponse {
+ TopicConfiguration configuration = 1;
+}
+
+message FindBrokerRequest {
+ string namespace = 1;
+ string topic = 2;
+ int32 parition = 3;
+}
+
+message FindBrokerResponse {
+ string broker = 1;
+}
+
+message TopicConfiguration {
+ int32 partition_count = 1;
+ string collection = 2;
+ string replication = 3;
+ bool is_transient = 4;
+ enum Partitioning {
+ NonNullKeyHash = 0; // If not null, hash by key value. If null, round robin
+ KeyHash = 1; // hash by key value
+ RoundRobin = 2; // round robin pick one partition
+ }
+ Partitioning partitoning = 5;
+}
diff --git a/weed/pb/messaging_pb/messaging.pb.go b/weed/pb/messaging_pb/messaging.pb.go
new file mode 100644
index 000000000..f42e2c2db
--- /dev/null
+++ b/weed/pb/messaging_pb/messaging.pb.go
@@ -0,0 +1,996 @@
+// Code generated by protoc-gen-go.
+// source: messaging.proto
+// DO NOT EDIT!
+
+/*
+Package messaging_pb is a generated protocol buffer package.
+
+It is generated from these files:
+ messaging.proto
+
+It has these top-level messages:
+ SubscriberMessage
+ Message
+ BrokerMessage
+ PublishRequest
+ PublishResponse
+ DeleteTopicRequest
+ DeleteTopicResponse
+ ConfigureTopicRequest
+ ConfigureTopicResponse
+ GetTopicConfigurationRequest
+ GetTopicConfigurationResponse
+ FindBrokerRequest
+ FindBrokerResponse
+ TopicConfiguration
+*/
+package messaging_pb
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import (
+ context "golang.org/x/net/context"
+ grpc "google.golang.org/grpc"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type SubscriberMessage_InitMessage_StartPosition int32
+
+const (
+ SubscriberMessage_InitMessage_LATEST SubscriberMessage_InitMessage_StartPosition = 0
+ SubscriberMessage_InitMessage_EARLIEST SubscriberMessage_InitMessage_StartPosition = 1
+ SubscriberMessage_InitMessage_TIMESTAMP SubscriberMessage_InitMessage_StartPosition = 2
+)
+
+var SubscriberMessage_InitMessage_StartPosition_name = map[int32]string{
+ 0: "LATEST",
+ 1: "EARLIEST",
+ 2: "TIMESTAMP",
+}
+var SubscriberMessage_InitMessage_StartPosition_value = map[string]int32{
+ "LATEST": 0,
+ "EARLIEST": 1,
+ "TIMESTAMP": 2,
+}
+
+func (x SubscriberMessage_InitMessage_StartPosition) String() string {
+ return proto.EnumName(SubscriberMessage_InitMessage_StartPosition_name, int32(x))
+}
+func (SubscriberMessage_InitMessage_StartPosition) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{0, 0, 0}
+}
+
+type TopicConfiguration_Partitioning int32
+
+const (
+ TopicConfiguration_NonNullKeyHash TopicConfiguration_Partitioning = 0
+ TopicConfiguration_KeyHash TopicConfiguration_Partitioning = 1
+ TopicConfiguration_RoundRobin TopicConfiguration_Partitioning = 2
+)
+
+var TopicConfiguration_Partitioning_name = map[int32]string{
+ 0: "NonNullKeyHash",
+ 1: "KeyHash",
+ 2: "RoundRobin",
+}
+var TopicConfiguration_Partitioning_value = map[string]int32{
+ "NonNullKeyHash": 0,
+ "KeyHash": 1,
+ "RoundRobin": 2,
+}
+
+func (x TopicConfiguration_Partitioning) String() string {
+ return proto.EnumName(TopicConfiguration_Partitioning_name, int32(x))
+}
+func (TopicConfiguration_Partitioning) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{13, 0}
+}
+
+type SubscriberMessage struct {
+ Init *SubscriberMessage_InitMessage `protobuf:"bytes,1,opt,name=init" json:"init,omitempty"`
+ Ack *SubscriberMessage_AckMessage `protobuf:"bytes,2,opt,name=ack" json:"ack,omitempty"`
+ IsClose bool `protobuf:"varint,3,opt,name=is_close,json=isClose" json:"is_close,omitempty"`
+}
+
+func (m *SubscriberMessage) Reset() { *m = SubscriberMessage{} }
+func (m *SubscriberMessage) String() string { return proto.CompactTextString(m) }
+func (*SubscriberMessage) ProtoMessage() {}
+func (*SubscriberMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *SubscriberMessage) GetInit() *SubscriberMessage_InitMessage {
+ if m != nil {
+ return m.Init
+ }
+ return nil
+}
+
+func (m *SubscriberMessage) GetAck() *SubscriberMessage_AckMessage {
+ if m != nil {
+ return m.Ack
+ }
+ return nil
+}
+
+func (m *SubscriberMessage) GetIsClose() bool {
+ if m != nil {
+ return m.IsClose
+ }
+ return false
+}
+
+type SubscriberMessage_InitMessage struct {
+ Namespace string `protobuf:"bytes,1,opt,name=namespace" json:"namespace,omitempty"`
+ Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"`
+ Partition int32 `protobuf:"varint,3,opt,name=partition" json:"partition,omitempty"`
+ StartPosition SubscriberMessage_InitMessage_StartPosition `protobuf:"varint,4,opt,name=startPosition,enum=messaging_pb.SubscriberMessage_InitMessage_StartPosition" json:"startPosition,omitempty"`
+ TimestampNs int64 `protobuf:"varint,5,opt,name=timestampNs" json:"timestampNs,omitempty"`
+ SubscriberId string `protobuf:"bytes,6,opt,name=subscriber_id,json=subscriberId" json:"subscriber_id,omitempty"`
+}
+
+func (m *SubscriberMessage_InitMessage) Reset() { *m = SubscriberMessage_InitMessage{} }
+func (m *SubscriberMessage_InitMessage) String() string { return proto.CompactTextString(m) }
+func (*SubscriberMessage_InitMessage) ProtoMessage() {}
+func (*SubscriberMessage_InitMessage) Descriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{0, 0}
+}
+
+func (m *SubscriberMessage_InitMessage) GetNamespace() string {
+ if m != nil {
+ return m.Namespace
+ }
+ return ""
+}
+
+func (m *SubscriberMessage_InitMessage) GetTopic() string {
+ if m != nil {
+ return m.Topic
+ }
+ return ""
+}
+
+func (m *SubscriberMessage_InitMessage) GetPartition() int32 {
+ if m != nil {
+ return m.Partition
+ }
+ return 0
+}
+
+func (m *SubscriberMessage_InitMessage) GetStartPosition() SubscriberMessage_InitMessage_StartPosition {
+ if m != nil {
+ return m.StartPosition
+ }
+ return SubscriberMessage_InitMessage_LATEST
+}
+
+func (m *SubscriberMessage_InitMessage) GetTimestampNs() int64 {
+ if m != nil {
+ return m.TimestampNs
+ }
+ return 0
+}
+
+func (m *SubscriberMessage_InitMessage) GetSubscriberId() string {
+ if m != nil {
+ return m.SubscriberId
+ }
+ return ""
+}
+
+type SubscriberMessage_AckMessage struct {
+ MessageId int64 `protobuf:"varint,1,opt,name=message_id,json=messageId" json:"message_id,omitempty"`
+}
+
+func (m *SubscriberMessage_AckMessage) Reset() { *m = SubscriberMessage_AckMessage{} }
+func (m *SubscriberMessage_AckMessage) String() string { return proto.CompactTextString(m) }
+func (*SubscriberMessage_AckMessage) ProtoMessage() {}
+func (*SubscriberMessage_AckMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 1} }
+
+func (m *SubscriberMessage_AckMessage) GetMessageId() int64 {
+ if m != nil {
+ return m.MessageId
+ }
+ return 0
+}
+
+type Message struct {
+ EventTimeNs int64 `protobuf:"varint,1,opt,name=event_time_ns,json=eventTimeNs" json:"event_time_ns,omitempty"`
+ Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
+ Headers map[string][]byte `protobuf:"bytes,4,rep,name=headers" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ IsClose bool `protobuf:"varint,5,opt,name=is_close,json=isClose" json:"is_close,omitempty"`
+}
+
+func (m *Message) Reset() { *m = Message{} }
+func (m *Message) String() string { return proto.CompactTextString(m) }
+func (*Message) ProtoMessage() {}
+func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *Message) GetEventTimeNs() int64 {
+ if m != nil {
+ return m.EventTimeNs
+ }
+ return 0
+}
+
+func (m *Message) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *Message) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *Message) GetHeaders() map[string][]byte {
+ if m != nil {
+ return m.Headers
+ }
+ return nil
+}
+
+func (m *Message) GetIsClose() bool {
+ if m != nil {
+ return m.IsClose
+ }
+ return false
+}
+
+type BrokerMessage struct {
+ Data *Message `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
+}
+
+func (m *BrokerMessage) Reset() { *m = BrokerMessage{} }
+func (m *BrokerMessage) String() string { return proto.CompactTextString(m) }
+func (*BrokerMessage) ProtoMessage() {}
+func (*BrokerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+func (m *BrokerMessage) GetData() *Message {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+type PublishRequest struct {
+ Init *PublishRequest_InitMessage `protobuf:"bytes,1,opt,name=init" json:"init,omitempty"`
+ Data *Message `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"`
+}
+
+func (m *PublishRequest) Reset() { *m = PublishRequest{} }
+func (m *PublishRequest) String() string { return proto.CompactTextString(m) }
+func (*PublishRequest) ProtoMessage() {}
+func (*PublishRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+func (m *PublishRequest) GetInit() *PublishRequest_InitMessage {
+ if m != nil {
+ return m.Init
+ }
+ return nil
+}
+
+func (m *PublishRequest) GetData() *Message {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+type PublishRequest_InitMessage struct {
+ Namespace string `protobuf:"bytes,1,opt,name=namespace" json:"namespace,omitempty"`
+ Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"`
+ Partition int32 `protobuf:"varint,3,opt,name=partition" json:"partition,omitempty"`
+}
+
+func (m *PublishRequest_InitMessage) Reset() { *m = PublishRequest_InitMessage{} }
+func (m *PublishRequest_InitMessage) String() string { return proto.CompactTextString(m) }
+func (*PublishRequest_InitMessage) ProtoMessage() {}
+func (*PublishRequest_InitMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} }
+
+func (m *PublishRequest_InitMessage) GetNamespace() string {
+ if m != nil {
+ return m.Namespace
+ }
+ return ""
+}
+
+func (m *PublishRequest_InitMessage) GetTopic() string {
+ if m != nil {
+ return m.Topic
+ }
+ return ""
+}
+
+func (m *PublishRequest_InitMessage) GetPartition() int32 {
+ if m != nil {
+ return m.Partition
+ }
+ return 0
+}
+
+type PublishResponse struct {
+ Config *PublishResponse_ConfigMessage `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"`
+ Redirect *PublishResponse_RedirectMessage `protobuf:"bytes,2,opt,name=redirect" json:"redirect,omitempty"`
+ IsClosed bool `protobuf:"varint,3,opt,name=is_closed,json=isClosed" json:"is_closed,omitempty"`
+}
+
+func (m *PublishResponse) Reset() { *m = PublishResponse{} }
+func (m *PublishResponse) String() string { return proto.CompactTextString(m) }
+func (*PublishResponse) ProtoMessage() {}
+func (*PublishResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+
+func (m *PublishResponse) GetConfig() *PublishResponse_ConfigMessage {
+ if m != nil {
+ return m.Config
+ }
+ return nil
+}
+
+func (m *PublishResponse) GetRedirect() *PublishResponse_RedirectMessage {
+ if m != nil {
+ return m.Redirect
+ }
+ return nil
+}
+
+func (m *PublishResponse) GetIsClosed() bool {
+ if m != nil {
+ return m.IsClosed
+ }
+ return false
+}
+
+type PublishResponse_ConfigMessage struct {
+ PartitionCount int32 `protobuf:"varint,1,opt,name=partition_count,json=partitionCount" json:"partition_count,omitempty"`
+}
+
+func (m *PublishResponse_ConfigMessage) Reset() { *m = PublishResponse_ConfigMessage{} }
+func (m *PublishResponse_ConfigMessage) String() string { return proto.CompactTextString(m) }
+func (*PublishResponse_ConfigMessage) ProtoMessage() {}
+func (*PublishResponse_ConfigMessage) Descriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{4, 0}
+}
+
+func (m *PublishResponse_ConfigMessage) GetPartitionCount() int32 {
+ if m != nil {
+ return m.PartitionCount
+ }
+ return 0
+}
+
+type PublishResponse_RedirectMessage struct {
+ NewBroker string `protobuf:"bytes,1,opt,name=new_broker,json=newBroker" json:"new_broker,omitempty"`
+}
+
+func (m *PublishResponse_RedirectMessage) Reset() { *m = PublishResponse_RedirectMessage{} }
+func (m *PublishResponse_RedirectMessage) String() string { return proto.CompactTextString(m) }
+func (*PublishResponse_RedirectMessage) ProtoMessage() {}
+func (*PublishResponse_RedirectMessage) Descriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{4, 1}
+}
+
+func (m *PublishResponse_RedirectMessage) GetNewBroker() string {
+ if m != nil {
+ return m.NewBroker
+ }
+ return ""
+}
+
+type DeleteTopicRequest struct {
+ Namespace string `protobuf:"bytes,1,opt,name=namespace" json:"namespace,omitempty"`
+ Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"`
+}
+
+func (m *DeleteTopicRequest) Reset() { *m = DeleteTopicRequest{} }
+func (m *DeleteTopicRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteTopicRequest) ProtoMessage() {}
+func (*DeleteTopicRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+
+func (m *DeleteTopicRequest) GetNamespace() string {
+ if m != nil {
+ return m.Namespace
+ }
+ return ""
+}
+
+func (m *DeleteTopicRequest) GetTopic() string {
+ if m != nil {
+ return m.Topic
+ }
+ return ""
+}
+
+type DeleteTopicResponse struct {
+}
+
+func (m *DeleteTopicResponse) Reset() { *m = DeleteTopicResponse{} }
+func (m *DeleteTopicResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteTopicResponse) ProtoMessage() {}
+func (*DeleteTopicResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+
+type ConfigureTopicRequest struct {
+ Namespace string `protobuf:"bytes,1,opt,name=namespace" json:"namespace,omitempty"`
+ Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"`
+ Configuration *TopicConfiguration `protobuf:"bytes,3,opt,name=configuration" json:"configuration,omitempty"`
+}
+
+func (m *ConfigureTopicRequest) Reset() { *m = ConfigureTopicRequest{} }
+func (m *ConfigureTopicRequest) String() string { return proto.CompactTextString(m) }
+func (*ConfigureTopicRequest) ProtoMessage() {}
+func (*ConfigureTopicRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+
+func (m *ConfigureTopicRequest) GetNamespace() string {
+ if m != nil {
+ return m.Namespace
+ }
+ return ""
+}
+
+func (m *ConfigureTopicRequest) GetTopic() string {
+ if m != nil {
+ return m.Topic
+ }
+ return ""
+}
+
+func (m *ConfigureTopicRequest) GetConfiguration() *TopicConfiguration {
+ if m != nil {
+ return m.Configuration
+ }
+ return nil
+}
+
+type ConfigureTopicResponse struct {
+}
+
+func (m *ConfigureTopicResponse) Reset() { *m = ConfigureTopicResponse{} }
+func (m *ConfigureTopicResponse) String() string { return proto.CompactTextString(m) }
+func (*ConfigureTopicResponse) ProtoMessage() {}
+func (*ConfigureTopicResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+
+type GetTopicConfigurationRequest struct {
+ Namespace string `protobuf:"bytes,1,opt,name=namespace" json:"namespace,omitempty"`
+ Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"`
+}
+
+func (m *GetTopicConfigurationRequest) Reset() { *m = GetTopicConfigurationRequest{} }
+func (m *GetTopicConfigurationRequest) String() string { return proto.CompactTextString(m) }
+func (*GetTopicConfigurationRequest) ProtoMessage() {}
+func (*GetTopicConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+
+func (m *GetTopicConfigurationRequest) GetNamespace() string {
+ if m != nil {
+ return m.Namespace
+ }
+ return ""
+}
+
+func (m *GetTopicConfigurationRequest) GetTopic() string {
+ if m != nil {
+ return m.Topic
+ }
+ return ""
+}
+
+type GetTopicConfigurationResponse struct {
+ Configuration *TopicConfiguration `protobuf:"bytes,1,opt,name=configuration" json:"configuration,omitempty"`
+}
+
+func (m *GetTopicConfigurationResponse) Reset() { *m = GetTopicConfigurationResponse{} }
+func (m *GetTopicConfigurationResponse) String() string { return proto.CompactTextString(m) }
+func (*GetTopicConfigurationResponse) ProtoMessage() {}
+func (*GetTopicConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+
+func (m *GetTopicConfigurationResponse) GetConfiguration() *TopicConfiguration {
+ if m != nil {
+ return m.Configuration
+ }
+ return nil
+}
+
+type FindBrokerRequest struct {
+ Namespace string `protobuf:"bytes,1,opt,name=namespace" json:"namespace,omitempty"`
+ Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"`
+ Parition int32 `protobuf:"varint,3,opt,name=parition" json:"parition,omitempty"`
+}
+
+func (m *FindBrokerRequest) Reset() { *m = FindBrokerRequest{} }
+func (m *FindBrokerRequest) String() string { return proto.CompactTextString(m) }
+func (*FindBrokerRequest) ProtoMessage() {}
+func (*FindBrokerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+
+func (m *FindBrokerRequest) GetNamespace() string {
+ if m != nil {
+ return m.Namespace
+ }
+ return ""
+}
+
+func (m *FindBrokerRequest) GetTopic() string {
+ if m != nil {
+ return m.Topic
+ }
+ return ""
+}
+
+func (m *FindBrokerRequest) GetParition() int32 {
+ if m != nil {
+ return m.Parition
+ }
+ return 0
+}
+
+type FindBrokerResponse struct {
+ Broker string `protobuf:"bytes,1,opt,name=broker" json:"broker,omitempty"`
+}
+
+func (m *FindBrokerResponse) Reset() { *m = FindBrokerResponse{} }
+func (m *FindBrokerResponse) String() string { return proto.CompactTextString(m) }
+func (*FindBrokerResponse) ProtoMessage() {}
+func (*FindBrokerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+
+func (m *FindBrokerResponse) GetBroker() string {
+ if m != nil {
+ return m.Broker
+ }
+ return ""
+}
+
+type TopicConfiguration struct {
+ PartitionCount int32 `protobuf:"varint,1,opt,name=partition_count,json=partitionCount" json:"partition_count,omitempty"`
+ Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
+ Replication string `protobuf:"bytes,3,opt,name=replication" json:"replication,omitempty"`
+ IsTransient bool `protobuf:"varint,4,opt,name=is_transient,json=isTransient" json:"is_transient,omitempty"`
+ Partitoning TopicConfiguration_Partitioning `protobuf:"varint,5,opt,name=partitoning,enum=messaging_pb.TopicConfiguration_Partitioning" json:"partitoning,omitempty"`
+}
+
+func (m *TopicConfiguration) Reset() { *m = TopicConfiguration{} }
+func (m *TopicConfiguration) String() string { return proto.CompactTextString(m) }
+func (*TopicConfiguration) ProtoMessage() {}
+func (*TopicConfiguration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+
+func (m *TopicConfiguration) GetPartitionCount() int32 {
+ if m != nil {
+ return m.PartitionCount
+ }
+ return 0
+}
+
+func (m *TopicConfiguration) GetCollection() string {
+ if m != nil {
+ return m.Collection
+ }
+ return ""
+}
+
+func (m *TopicConfiguration) GetReplication() string {
+ if m != nil {
+ return m.Replication
+ }
+ return ""
+}
+
+func (m *TopicConfiguration) GetIsTransient() bool {
+ if m != nil {
+ return m.IsTransient
+ }
+ return false
+}
+
+func (m *TopicConfiguration) GetPartitoning() TopicConfiguration_Partitioning {
+ if m != nil {
+ return m.Partitoning
+ }
+ return TopicConfiguration_NonNullKeyHash
+}
+
+func init() {
+ proto.RegisterType((*SubscriberMessage)(nil), "messaging_pb.SubscriberMessage")
+ proto.RegisterType((*SubscriberMessage_InitMessage)(nil), "messaging_pb.SubscriberMessage.InitMessage")
+ proto.RegisterType((*SubscriberMessage_AckMessage)(nil), "messaging_pb.SubscriberMessage.AckMessage")
+ proto.RegisterType((*Message)(nil), "messaging_pb.Message")
+ proto.RegisterType((*BrokerMessage)(nil), "messaging_pb.BrokerMessage")
+ proto.RegisterType((*PublishRequest)(nil), "messaging_pb.PublishRequest")
+ proto.RegisterType((*PublishRequest_InitMessage)(nil), "messaging_pb.PublishRequest.InitMessage")
+ proto.RegisterType((*PublishResponse)(nil), "messaging_pb.PublishResponse")
+ proto.RegisterType((*PublishResponse_ConfigMessage)(nil), "messaging_pb.PublishResponse.ConfigMessage")
+ proto.RegisterType((*PublishResponse_RedirectMessage)(nil), "messaging_pb.PublishResponse.RedirectMessage")
+ proto.RegisterType((*DeleteTopicRequest)(nil), "messaging_pb.DeleteTopicRequest")
+ proto.RegisterType((*DeleteTopicResponse)(nil), "messaging_pb.DeleteTopicResponse")
+ proto.RegisterType((*ConfigureTopicRequest)(nil), "messaging_pb.ConfigureTopicRequest")
+ proto.RegisterType((*ConfigureTopicResponse)(nil), "messaging_pb.ConfigureTopicResponse")
+ proto.RegisterType((*GetTopicConfigurationRequest)(nil), "messaging_pb.GetTopicConfigurationRequest")
+ proto.RegisterType((*GetTopicConfigurationResponse)(nil), "messaging_pb.GetTopicConfigurationResponse")
+ proto.RegisterType((*FindBrokerRequest)(nil), "messaging_pb.FindBrokerRequest")
+ proto.RegisterType((*FindBrokerResponse)(nil), "messaging_pb.FindBrokerResponse")
+ proto.RegisterType((*TopicConfiguration)(nil), "messaging_pb.TopicConfiguration")
+ proto.RegisterEnum("messaging_pb.SubscriberMessage_InitMessage_StartPosition", SubscriberMessage_InitMessage_StartPosition_name, SubscriberMessage_InitMessage_StartPosition_value)
+ proto.RegisterEnum("messaging_pb.TopicConfiguration_Partitioning", TopicConfiguration_Partitioning_name, TopicConfiguration_Partitioning_value)
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for SeaweedMessaging service
+
+type SeaweedMessagingClient interface {
+ Subscribe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeClient, error)
+ Publish(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishClient, error)
+ DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error)
+ ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error)
+ GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error)
+ FindBroker(ctx context.Context, in *FindBrokerRequest, opts ...grpc.CallOption) (*FindBrokerResponse, error)
+}
+
+type seaweedMessagingClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewSeaweedMessagingClient(cc *grpc.ClientConn) SeaweedMessagingClient {
+ return &seaweedMessagingClient{cc}
+}
+
+func (c *seaweedMessagingClient) Subscribe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeClient, error) {
+ stream, err := grpc.NewClientStream(ctx, &_SeaweedMessaging_serviceDesc.Streams[0], c.cc, "/messaging_pb.SeaweedMessaging/Subscribe", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &seaweedMessagingSubscribeClient{stream}
+ return x, nil
+}
+
+type SeaweedMessaging_SubscribeClient interface {
+ Send(*SubscriberMessage) error
+ Recv() (*BrokerMessage, error)
+ grpc.ClientStream
+}
+
+type seaweedMessagingSubscribeClient struct {
+ grpc.ClientStream
+}
+
+func (x *seaweedMessagingSubscribeClient) Send(m *SubscriberMessage) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *seaweedMessagingSubscribeClient) Recv() (*BrokerMessage, error) {
+ m := new(BrokerMessage)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *seaweedMessagingClient) Publish(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishClient, error) {
+ stream, err := grpc.NewClientStream(ctx, &_SeaweedMessaging_serviceDesc.Streams[1], c.cc, "/messaging_pb.SeaweedMessaging/Publish", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &seaweedMessagingPublishClient{stream}
+ return x, nil
+}
+
+type SeaweedMessaging_PublishClient interface {
+ Send(*PublishRequest) error
+ Recv() (*PublishResponse, error)
+ grpc.ClientStream
+}
+
+type seaweedMessagingPublishClient struct {
+ grpc.ClientStream
+}
+
+func (x *seaweedMessagingPublishClient) Send(m *PublishRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *seaweedMessagingPublishClient) Recv() (*PublishResponse, error) {
+ m := new(PublishResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *seaweedMessagingClient) DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error) {
+ out := new(DeleteTopicResponse)
+ err := grpc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/DeleteTopic", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedMessagingClient) ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) {
+ out := new(ConfigureTopicResponse)
+ err := grpc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/ConfigureTopic", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedMessagingClient) GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error) {
+ out := new(GetTopicConfigurationResponse)
+ err := grpc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/GetTopicConfiguration", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *seaweedMessagingClient) FindBroker(ctx context.Context, in *FindBrokerRequest, opts ...grpc.CallOption) (*FindBrokerResponse, error) {
+ out := new(FindBrokerResponse)
+ err := grpc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/FindBroker", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Server API for SeaweedMessaging service
+
+type SeaweedMessagingServer interface {
+ Subscribe(SeaweedMessaging_SubscribeServer) error
+ Publish(SeaweedMessaging_PublishServer) error
+ DeleteTopic(context.Context, *DeleteTopicRequest) (*DeleteTopicResponse, error)
+ ConfigureTopic(context.Context, *ConfigureTopicRequest) (*ConfigureTopicResponse, error)
+ GetTopicConfiguration(context.Context, *GetTopicConfigurationRequest) (*GetTopicConfigurationResponse, error)
+ FindBroker(context.Context, *FindBrokerRequest) (*FindBrokerResponse, error)
+}
+
+func RegisterSeaweedMessagingServer(s *grpc.Server, srv SeaweedMessagingServer) {
+ s.RegisterService(&_SeaweedMessaging_serviceDesc, srv)
+}
+
+func _SeaweedMessaging_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(SeaweedMessagingServer).Subscribe(&seaweedMessagingSubscribeServer{stream})
+}
+
+type SeaweedMessaging_SubscribeServer interface {
+ Send(*BrokerMessage) error
+ Recv() (*SubscriberMessage, error)
+ grpc.ServerStream
+}
+
+type seaweedMessagingSubscribeServer struct {
+ grpc.ServerStream
+}
+
+func (x *seaweedMessagingSubscribeServer) Send(m *BrokerMessage) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *seaweedMessagingSubscribeServer) Recv() (*SubscriberMessage, error) {
+ m := new(SubscriberMessage)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func _SeaweedMessaging_Publish_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(SeaweedMessagingServer).Publish(&seaweedMessagingPublishServer{stream})
+}
+
+type SeaweedMessaging_PublishServer interface {
+ Send(*PublishResponse) error
+ Recv() (*PublishRequest, error)
+ grpc.ServerStream
+}
+
+type seaweedMessagingPublishServer struct {
+ grpc.ServerStream
+}
+
+func (x *seaweedMessagingPublishServer) Send(m *PublishResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *seaweedMessagingPublishServer) Recv() (*PublishRequest, error) {
+ m := new(PublishRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func _SeaweedMessaging_DeleteTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteTopicRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedMessagingServer).DeleteTopic(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/messaging_pb.SeaweedMessaging/DeleteTopic",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedMessagingServer).DeleteTopic(ctx, req.(*DeleteTopicRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _SeaweedMessaging_ConfigureTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ConfigureTopicRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/messaging_pb.SeaweedMessaging/ConfigureTopic",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, req.(*ConfigureTopicRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _SeaweedMessaging_GetTopicConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetTopicConfigurationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/messaging_pb.SeaweedMessaging/GetTopicConfiguration",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, req.(*GetTopicConfigurationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _SeaweedMessaging_FindBroker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(FindBrokerRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SeaweedMessagingServer).FindBroker(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/messaging_pb.SeaweedMessaging/FindBroker",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SeaweedMessagingServer).FindBroker(ctx, req.(*FindBrokerRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _SeaweedMessaging_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "messaging_pb.SeaweedMessaging",
+ HandlerType: (*SeaweedMessagingServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "DeleteTopic",
+ Handler: _SeaweedMessaging_DeleteTopic_Handler,
+ },
+ {
+ MethodName: "ConfigureTopic",
+ Handler: _SeaweedMessaging_ConfigureTopic_Handler,
+ },
+ {
+ MethodName: "GetTopicConfiguration",
+ Handler: _SeaweedMessaging_GetTopicConfiguration_Handler,
+ },
+ {
+ MethodName: "FindBroker",
+ Handler: _SeaweedMessaging_FindBroker_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Subscribe",
+ Handler: _SeaweedMessaging_Subscribe_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ {
+ StreamName: "Publish",
+ Handler: _SeaweedMessaging_Publish_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "messaging.proto",
+}
+
+func init() { proto.RegisterFile("messaging.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 1002 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x56, 0xdd, 0x6e, 0xe3, 0x54,
+ 0x10, 0xae, 0xdd, 0xfc, 0x8e, 0x93, 0x34, 0x3b, 0xd0, 0x55, 0xf0, 0xb6, 0x90, 0xf5, 0x22, 0x08,
+ 0x14, 0xa2, 0x2a, 0xdc, 0x94, 0x6a, 0xa5, 0x55, 0x1b, 0xba, 0x34, 0xa2, 0xed, 0x86, 0x93, 0xdc,
+ 0x22, 0xcb, 0xb1, 0xcf, 0xa6, 0x47, 0x75, 0x8e, 0x8d, 0x8f, 0xb3, 0x55, 0x9f, 0x83, 0x7b, 0x1e,
+ 0x00, 0x89, 0x3b, 0x5e, 0x80, 0xd7, 0xe0, 0x21, 0x78, 0x06, 0xe4, 0xdf, 0xd8, 0x49, 0x36, 0x5d,
+ 0xb6, 0xda, 0xbb, 0x9c, 0xc9, 0x37, 0x33, 0xdf, 0x99, 0xf9, 0x66, 0x8e, 0x61, 0x67, 0x46, 0x85,
+ 0x30, 0xa6, 0x8c, 0x4f, 0xbb, 0xae, 0xe7, 0xf8, 0x0e, 0xd6, 0x52, 0x83, 0xee, 0x4e, 0xb4, 0xdf,
+ 0x0b, 0xf0, 0x68, 0x34, 0x9f, 0x08, 0xd3, 0x63, 0x13, 0xea, 0x5d, 0x86, 0x7f, 0x51, 0x7c, 0x01,
+ 0x05, 0xc6, 0x99, 0xdf, 0x92, 0xda, 0x52, 0x47, 0xe9, 0x1d, 0x74, 0xb3, 0x2e, 0xdd, 0x15, 0x78,
+ 0x77, 0xc0, 0x99, 0x1f, 0xff, 0x26, 0xa1, 0x23, 0x3e, 0x87, 0x6d, 0xc3, 0xbc, 0x69, 0xc9, 0xa1,
+ 0xff, 0xd7, 0xf7, 0xf9, 0x9f, 0x98, 0x37, 0x89, 0x7b, 0xe0, 0x86, 0x9f, 0x40, 0x85, 0x09, 0xdd,
+ 0xb4, 0x1d, 0x41, 0x5b, 0xdb, 0x6d, 0xa9, 0x53, 0x21, 0x65, 0x26, 0xfa, 0xc1, 0x51, 0xfd, 0x5b,
+ 0x06, 0x25, 0x93, 0x0e, 0xf7, 0xa0, 0xca, 0x8d, 0x19, 0x15, 0xae, 0x61, 0xd2, 0x90, 0x6e, 0x95,
+ 0x2c, 0x0c, 0xf8, 0x31, 0x14, 0x7d, 0xc7, 0x65, 0x66, 0x48, 0xa4, 0x4a, 0xa2, 0x43, 0xe0, 0xe3,
+ 0x1a, 0x9e, 0xcf, 0x7c, 0xe6, 0xf0, 0x30, 0x7e, 0x91, 0x2c, 0x0c, 0xa8, 0x43, 0x5d, 0xf8, 0x86,
+ 0xe7, 0x0f, 0x1d, 0x11, 0x21, 0x0a, 0x6d, 0xa9, 0xd3, 0xe8, 0x7d, 0xff, 0x3f, 0x8a, 0xd0, 0x1d,
+ 0x65, 0x03, 0x90, 0x7c, 0x3c, 0x6c, 0x83, 0xe2, 0xb3, 0x19, 0x15, 0xbe, 0x31, 0x73, 0xaf, 0x44,
+ 0xab, 0xd8, 0x96, 0x3a, 0xdb, 0x24, 0x6b, 0xc2, 0x67, 0x50, 0x17, 0x69, 0x7c, 0x9d, 0x59, 0xad,
+ 0x52, 0x48, 0xbf, 0xb6, 0x30, 0x0e, 0x2c, 0xed, 0x08, 0xea, 0xb9, 0x34, 0x08, 0x50, 0xba, 0x38,
+ 0x19, 0x9f, 0x8d, 0xc6, 0xcd, 0x2d, 0xac, 0x41, 0xe5, 0xec, 0x84, 0x5c, 0x0c, 0x82, 0x93, 0x84,
+ 0x75, 0xa8, 0x8e, 0x07, 0x97, 0x67, 0xa3, 0xf1, 0xc9, 0xe5, 0xb0, 0x29, 0xab, 0x07, 0x00, 0x8b,
+ 0x8a, 0xe3, 0x3e, 0x40, 0x74, 0x33, 0x1a, 0x64, 0x92, 0x42, 0x36, 0xd5, 0xd8, 0x32, 0xb0, 0xb4,
+ 0x7f, 0x25, 0x28, 0x27, 0xd0, 0x2f, 0xa0, 0x4e, 0xdf, 0x50, 0xee, 0xeb, 0x01, 0x59, 0x9d, 0x8b,
+ 0x08, 0x7d, 0x2a, 0x1f, 0x4a, 0x44, 0x09, 0xff, 0x18, 0xb3, 0x19, 0xbd, 0x12, 0xd8, 0x84, 0xed,
+ 0x1b, 0x7a, 0x17, 0x16, 0xbd, 0x46, 0x82, 0x9f, 0x41, 0x23, 0xde, 0x18, 0xf6, 0x3c, 0x6a, 0x67,
+ 0x8d, 0x44, 0x07, 0x7c, 0x0e, 0xe5, 0x6b, 0x6a, 0x58, 0xd4, 0x13, 0xad, 0x42, 0x7b, 0xbb, 0xa3,
+ 0xf4, 0xb4, 0x7c, 0x91, 0x93, 0x72, 0x9e, 0x47, 0xa0, 0x33, 0xee, 0x7b, 0x77, 0x24, 0x71, 0xc9,
+ 0xa9, 0xa4, 0x98, 0x57, 0xc9, 0x31, 0xd4, 0xb2, 0x3e, 0x09, 0xa1, 0x48, 0x1f, 0x79, 0x42, 0x72,
+ 0x86, 0xd0, 0xb1, 0x7c, 0x24, 0x69, 0xc7, 0x50, 0x3f, 0xf5, 0x9c, 0x9b, 0xc5, 0x30, 0x7c, 0x05,
+ 0x05, 0xcb, 0xf0, 0x8d, 0x78, 0x18, 0x76, 0xd7, 0x52, 0x24, 0x21, 0x44, 0xfb, 0x47, 0x82, 0xc6,
+ 0x70, 0x3e, 0xb1, 0x99, 0xb8, 0x26, 0xf4, 0xd7, 0x39, 0x15, 0xc1, 0x24, 0x64, 0x47, 0xa9, 0x93,
+ 0xf7, 0xce, 0x63, 0xd7, 0xcc, 0x51, 0x92, 0x5b, 0xbe, 0x37, 0xb7, 0xaa, 0x7f, 0xe0, 0xc1, 0xd0,
+ 0xfe, 0x90, 0x61, 0x27, 0x25, 0x2c, 0x5c, 0x87, 0x0b, 0x8a, 0x7d, 0x28, 0x99, 0x0e, 0x7f, 0xcd,
+ 0xa6, 0xeb, 0x57, 0xc5, 0x12, 0xbc, 0xdb, 0x0f, 0xb1, 0x09, 0xef, 0xd8, 0x15, 0x07, 0x50, 0xf1,
+ 0xa8, 0xc5, 0x3c, 0x6a, 0xfa, 0xf1, 0x45, 0xbf, 0xdd, 0x1c, 0x86, 0xc4, 0xe8, 0x24, 0x50, 0xea,
+ 0x8e, 0x4f, 0xa0, 0x9a, 0x68, 0xc2, 0x8a, 0x57, 0x47, 0x25, 0x16, 0x85, 0xa5, 0x1e, 0x41, 0x3d,
+ 0x47, 0x00, 0xbf, 0x84, 0x9d, 0xf4, 0x7a, 0xba, 0xe9, 0xcc, 0x79, 0xd4, 0xa6, 0x22, 0x69, 0xa4,
+ 0xe6, 0x7e, 0x60, 0x55, 0x0f, 0x61, 0x67, 0x29, 0x67, 0x30, 0x36, 0x9c, 0xde, 0xea, 0x93, 0x50,
+ 0x2a, 0x69, 0x81, 0xe9, 0x6d, 0xa4, 0x1d, 0xed, 0x1c, 0xf0, 0x07, 0x6a, 0x53, 0x9f, 0x8e, 0x83,
+ 0xca, 0x26, 0x62, 0x78, 0x8f, 0xa6, 0x68, 0xbb, 0xf0, 0x51, 0x2e, 0x52, 0x54, 0x03, 0xed, 0x37,
+ 0x09, 0x76, 0xa3, 0xdb, 0xcc, 0xbd, 0x07, 0x27, 0xc1, 0x97, 0x50, 0x37, 0xe3, 0x60, 0x46, 0xda,
+ 0x7d, 0xa5, 0xd7, 0xce, 0xf7, 0x21, 0x4c, 0xd3, 0xcf, 0xe2, 0x48, 0xde, 0x4d, 0x6b, 0xc1, 0xe3,
+ 0x65, 0x52, 0x31, 0x5f, 0x02, 0x7b, 0x3f, 0x52, 0x7f, 0x4d, 0x84, 0x07, 0x94, 0x66, 0x0a, 0xfb,
+ 0x6f, 0x89, 0x19, 0xcb, 0x73, 0xe5, 0x5a, 0xd2, 0xfb, 0x5d, 0xcb, 0x84, 0x47, 0x2f, 0x19, 0xb7,
+ 0xa2, 0xde, 0x3e, 0xa4, 0xce, 0x2a, 0x54, 0x5c, 0xc3, 0xcb, 0x0e, 0x58, 0x7a, 0xd6, 0xbe, 0x01,
+ 0xcc, 0x26, 0x89, 0xaf, 0xf0, 0x18, 0x4a, 0x39, 0x8d, 0xc5, 0x27, 0xed, 0x2f, 0x19, 0x70, 0x95,
+ 0xf8, 0x3b, 0x4b, 0x1a, 0x3f, 0x05, 0x30, 0x1d, 0xdb, 0xa6, 0x66, 0xc8, 0x25, 0x22, 0x99, 0xb1,
+ 0x04, 0xaf, 0x94, 0x47, 0x5d, 0x9b, 0x99, 0x0b, 0x3d, 0x54, 0x49, 0xd6, 0x84, 0x4f, 0xa1, 0xc6,
+ 0x84, 0xee, 0x7b, 0x06, 0x17, 0x8c, 0x72, 0x3f, 0x7c, 0x27, 0x2b, 0x44, 0x61, 0x62, 0x9c, 0x98,
+ 0xf0, 0x15, 0x28, 0x51, 0x5a, 0x87, 0x33, 0x3e, 0x0d, 0xb7, 0x74, 0x63, 0x79, 0xb8, 0x57, 0x2f,
+ 0xd1, 0x1d, 0x26, 0x54, 0x19, 0x9f, 0x92, 0x6c, 0x04, 0xed, 0x05, 0xd4, 0xb2, 0x7f, 0x22, 0x42,
+ 0xe3, 0xca, 0xe1, 0x57, 0x73, 0xdb, 0xfe, 0x89, 0xde, 0x9d, 0x1b, 0xe2, 0xba, 0xb9, 0x85, 0x0a,
+ 0x94, 0x93, 0x83, 0x84, 0x0d, 0x00, 0xe2, 0xcc, 0xb9, 0x45, 0x9c, 0x09, 0xe3, 0x4d, 0xb9, 0xf7,
+ 0x67, 0x01, 0x9a, 0x23, 0x6a, 0xdc, 0x52, 0x6a, 0x5d, 0x26, 0x2c, 0xf0, 0x15, 0x54, 0xd3, 0xf7,
+ 0x1c, 0x3f, 0xbb, 0xe7, 0xa1, 0x57, 0x9f, 0xe4, 0x01, 0xb9, 0xc7, 0x42, 0xdb, 0xea, 0x48, 0x87,
+ 0x12, 0x5e, 0x40, 0x39, 0xde, 0x59, 0xb8, 0xb7, 0x69, 0xe3, 0xab, 0xfb, 0x1b, 0x17, 0x5d, 0x1c,
+ 0x6d, 0x0c, 0x4a, 0x66, 0x03, 0xe0, 0x92, 0x7a, 0x57, 0xd7, 0x8c, 0xfa, 0x74, 0x03, 0x22, 0x89,
+ 0x8c, 0xbf, 0x40, 0x23, 0x3f, 0xaa, 0xf8, 0x2c, 0xef, 0xb6, 0x76, 0xbb, 0xa8, 0x9f, 0x6f, 0x06,
+ 0xa5, 0xe1, 0x3d, 0xd8, 0x5d, 0x3b, 0x9b, 0xb8, 0xf4, 0x35, 0xb8, 0x69, 0x29, 0xa8, 0x07, 0xef,
+ 0x84, 0x4d, 0x73, 0xfe, 0x0c, 0xb0, 0x98, 0xa0, 0xe5, 0x46, 0xae, 0x0c, 0xb0, 0xda, 0x7e, 0x3b,
+ 0x20, 0x09, 0x79, 0xaa, 0x41, 0x53, 0x44, 0x72, 0x79, 0x2d, 0xba, 0xa6, 0x1d, 0xa8, 0xfa, 0xb4,
+ 0x91, 0x2a, 0x67, 0x18, 0x7c, 0x51, 0x4f, 0x4a, 0xe1, 0x87, 0xf5, 0x77, 0xff, 0x05, 0x00, 0x00,
+ 0xff, 0xff, 0x7f, 0x62, 0xba, 0x48, 0x6b, 0x0b, 0x00, 0x00,
+}
diff --git a/weed/pb/shared_values.go b/weed/pb/shared_values.go
new file mode 100644
index 000000000..acc3bb56d
--- /dev/null
+++ b/weed/pb/shared_values.go
@@ -0,0 +1,5 @@
+package pb
+
+const (
+ AdminShellClient = "shell"
+)
diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto
index 405d41e9c..132f7fbd4 100644
--- a/weed/pb/volume_server.proto
+++ b/weed/pb/volume_server.proto
@@ -8,6 +8,10 @@ service VolumeServer {
//Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
rpc BatchDelete (BatchDeleteRequest) returns (BatchDeleteResponse) {
}
+
+ rpc FileGet (FileGetRequest) returns (stream FileGetResponse) {
+ }
+
rpc VacuumVolumeCheck (VacuumVolumeCheckRequest) returns (VacuumVolumeCheckResponse) {
}
rpc VacuumVolumeCompact (VacuumVolumeCompactRequest) returns (VacuumVolumeCompactResponse) {
@@ -77,7 +81,10 @@ service VolumeServer {
rpc VolumeTierMoveDatFromRemote (VolumeTierMoveDatFromRemoteRequest) returns (stream VolumeTierMoveDatFromRemoteResponse) {
}
- // query
+ rpc VolumeServerStatus (VolumeServerStatusRequest) returns (VolumeServerStatusResponse) {
+ }
+
+ // query
rpc Query (QueryRequest) returns (stream QueriedStripe) {
}
@@ -87,6 +94,7 @@ service VolumeServer {
message BatchDeleteRequest {
repeated string file_ids = 1;
+ bool skip_cookie_check = 2;
}
message BatchDeleteResponse {
@@ -100,6 +108,22 @@ message DeleteResult {
uint32 version = 5;
}
+message FileGetRequest {
+ string file_id = 1;
+ bool accept_gzip = 2;
+}
+message FileGetResponse {
+ bytes data = 1;
+ uint32 content_length = 2;
+ string content_type = 3;
+ uint64 last_modified = 4;
+ string filename = 5;
+ string etag = 6;
+ bool is_gzipped = 7;
+ map headers = 8;
+ int32 errorCode = 9;
+}
+
message Empty {
}
@@ -121,6 +145,7 @@ message VacuumVolumeCommitRequest {
uint32 volume_id = 1;
}
message VacuumVolumeCommitResponse {
+ bool is_read_only = 1;
}
message VacuumVolumeCleanupRequest {
@@ -340,6 +365,8 @@ message DiskStatus {
uint64 all = 2;
uint64 used = 3;
uint64 free = 4;
+ float percent_free = 5;
+ float percent_used = 6;
}
message MemStatus {
@@ -389,6 +416,14 @@ message VolumeTierMoveDatFromRemoteResponse {
float processedPercentage = 2;
}
+message VolumeServerStatusRequest {
+
+}
+message VolumeServerStatusResponse {
+ repeated DiskStatus disk_statuses = 1;
+ MemStatus memory_status = 2;
+}
+
// select on volume servers
message QueryRequest {
repeated string selections = 1;
diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go
index 2a8f91bc5..ca96b8d20 100644
--- a/weed/pb/volume_server_pb/volume_server.pb.go
+++ b/weed/pb/volume_server_pb/volume_server.pb.go
@@ -12,6 +12,8 @@ It has these top-level messages:
BatchDeleteRequest
BatchDeleteResponse
DeleteResult
+ FileGetRequest
+ FileGetResponse
Empty
VacuumVolumeCheckRequest
VacuumVolumeCheckResponse
@@ -75,6 +77,8 @@ It has these top-level messages:
VolumeTierMoveDatToRemoteResponse
VolumeTierMoveDatFromRemoteRequest
VolumeTierMoveDatFromRemoteResponse
+ VolumeServerStatusRequest
+ VolumeServerStatusResponse
QueryRequest
QueriedStripe
*/
@@ -101,7 +105,8 @@ var _ = math.Inf
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type BatchDeleteRequest struct {
- FileIds []string `protobuf:"bytes,1,rep,name=file_ids,json=fileIds" json:"file_ids,omitempty"`
+ FileIds []string `protobuf:"bytes,1,rep,name=file_ids,json=fileIds" json:"file_ids,omitempty"`
+ SkipCookieCheck bool `protobuf:"varint,2,opt,name=skip_cookie_check,json=skipCookieCheck" json:"skip_cookie_check,omitempty"`
}
func (m *BatchDeleteRequest) Reset() { *m = BatchDeleteRequest{} }
@@ -116,6 +121,13 @@ func (m *BatchDeleteRequest) GetFileIds() []string {
return nil
}
+func (m *BatchDeleteRequest) GetSkipCookieCheck() bool {
+ if m != nil {
+ return m.SkipCookieCheck
+ }
+ return false
+}
+
type BatchDeleteResponse struct {
Results []*DeleteResult `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"`
}
@@ -180,13 +192,117 @@ func (m *DeleteResult) GetVersion() uint32 {
return 0
}
+type FileGetRequest struct {
+ FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"`
+ AcceptGzip bool `protobuf:"varint,2,opt,name=accept_gzip,json=acceptGzip" json:"accept_gzip,omitempty"`
+}
+
+func (m *FileGetRequest) Reset() { *m = FileGetRequest{} }
+func (m *FileGetRequest) String() string { return proto.CompactTextString(m) }
+func (*FileGetRequest) ProtoMessage() {}
+func (*FileGetRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+func (m *FileGetRequest) GetFileId() string {
+ if m != nil {
+ return m.FileId
+ }
+ return ""
+}
+
+func (m *FileGetRequest) GetAcceptGzip() bool {
+ if m != nil {
+ return m.AcceptGzip
+ }
+ return false
+}
+
+type FileGetResponse struct {
+ Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
+ ContentLength uint32 `protobuf:"varint,2,opt,name=content_length,json=contentLength" json:"content_length,omitempty"`
+ ContentType string `protobuf:"bytes,3,opt,name=content_type,json=contentType" json:"content_type,omitempty"`
+ LastModified uint64 `protobuf:"varint,4,opt,name=last_modified,json=lastModified" json:"last_modified,omitempty"`
+ Filename string `protobuf:"bytes,5,opt,name=filename" json:"filename,omitempty"`
+ Etag string `protobuf:"bytes,6,opt,name=etag" json:"etag,omitempty"`
+ IsGzipped bool `protobuf:"varint,7,opt,name=is_gzipped,json=isGzipped" json:"is_gzipped,omitempty"`
+ Headers map[string]string `protobuf:"bytes,8,rep,name=headers" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ ErrorCode int32 `protobuf:"varint,9,opt,name=errorCode" json:"errorCode,omitempty"`
+}
+
+func (m *FileGetResponse) Reset() { *m = FileGetResponse{} }
+func (m *FileGetResponse) String() string { return proto.CompactTextString(m) }
+func (*FileGetResponse) ProtoMessage() {}
+func (*FileGetResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+
+func (m *FileGetResponse) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *FileGetResponse) GetContentLength() uint32 {
+ if m != nil {
+ return m.ContentLength
+ }
+ return 0
+}
+
+func (m *FileGetResponse) GetContentType() string {
+ if m != nil {
+ return m.ContentType
+ }
+ return ""
+}
+
+func (m *FileGetResponse) GetLastModified() uint64 {
+ if m != nil {
+ return m.LastModified
+ }
+ return 0
+}
+
+func (m *FileGetResponse) GetFilename() string {
+ if m != nil {
+ return m.Filename
+ }
+ return ""
+}
+
+func (m *FileGetResponse) GetEtag() string {
+ if m != nil {
+ return m.Etag
+ }
+ return ""
+}
+
+func (m *FileGetResponse) GetIsGzipped() bool {
+ if m != nil {
+ return m.IsGzipped
+ }
+ return false
+}
+
+func (m *FileGetResponse) GetHeaders() map[string]string {
+ if m != nil {
+ return m.Headers
+ }
+ return nil
+}
+
+func (m *FileGetResponse) GetErrorCode() int32 {
+ if m != nil {
+ return m.ErrorCode
+ }
+ return 0
+}
+
type Empty struct {
}
func (m *Empty) Reset() { *m = Empty{} }
func (m *Empty) String() string { return proto.CompactTextString(m) }
func (*Empty) ProtoMessage() {}
-func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
type VacuumVolumeCheckRequest struct {
VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
@@ -195,7 +311,7 @@ type VacuumVolumeCheckRequest struct {
func (m *VacuumVolumeCheckRequest) Reset() { *m = VacuumVolumeCheckRequest{} }
func (m *VacuumVolumeCheckRequest) String() string { return proto.CompactTextString(m) }
func (*VacuumVolumeCheckRequest) ProtoMessage() {}
-func (*VacuumVolumeCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+func (*VacuumVolumeCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *VacuumVolumeCheckRequest) GetVolumeId() uint32 {
if m != nil {
@@ -211,7 +327,7 @@ type VacuumVolumeCheckResponse struct {
func (m *VacuumVolumeCheckResponse) Reset() { *m = VacuumVolumeCheckResponse{} }
func (m *VacuumVolumeCheckResponse) String() string { return proto.CompactTextString(m) }
func (*VacuumVolumeCheckResponse) ProtoMessage() {}
-func (*VacuumVolumeCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+func (*VacuumVolumeCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *VacuumVolumeCheckResponse) GetGarbageRatio() float64 {
if m != nil {
@@ -228,7 +344,7 @@ type VacuumVolumeCompactRequest struct {
func (m *VacuumVolumeCompactRequest) Reset() { *m = VacuumVolumeCompactRequest{} }
func (m *VacuumVolumeCompactRequest) String() string { return proto.CompactTextString(m) }
func (*VacuumVolumeCompactRequest) ProtoMessage() {}
-func (*VacuumVolumeCompactRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+func (*VacuumVolumeCompactRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func (m *VacuumVolumeCompactRequest) GetVolumeId() uint32 {
if m != nil {
@@ -250,7 +366,7 @@ type VacuumVolumeCompactResponse struct {
func (m *VacuumVolumeCompactResponse) Reset() { *m = VacuumVolumeCompactResponse{} }
func (m *VacuumVolumeCompactResponse) String() string { return proto.CompactTextString(m) }
func (*VacuumVolumeCompactResponse) ProtoMessage() {}
-func (*VacuumVolumeCompactResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+func (*VacuumVolumeCompactResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
type VacuumVolumeCommitRequest struct {
VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
@@ -259,7 +375,7 @@ type VacuumVolumeCommitRequest struct {
func (m *VacuumVolumeCommitRequest) Reset() { *m = VacuumVolumeCommitRequest{} }
func (m *VacuumVolumeCommitRequest) String() string { return proto.CompactTextString(m) }
func (*VacuumVolumeCommitRequest) ProtoMessage() {}
-func (*VacuumVolumeCommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+func (*VacuumVolumeCommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
func (m *VacuumVolumeCommitRequest) GetVolumeId() uint32 {
if m != nil {
@@ -269,12 +385,20 @@ func (m *VacuumVolumeCommitRequest) GetVolumeId() uint32 {
}
type VacuumVolumeCommitResponse struct {
+ IsReadOnly bool `protobuf:"varint,1,opt,name=is_read_only,json=isReadOnly" json:"is_read_only,omitempty"`
}
func (m *VacuumVolumeCommitResponse) Reset() { *m = VacuumVolumeCommitResponse{} }
func (m *VacuumVolumeCommitResponse) String() string { return proto.CompactTextString(m) }
func (*VacuumVolumeCommitResponse) ProtoMessage() {}
-func (*VacuumVolumeCommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+func (*VacuumVolumeCommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+
+func (m *VacuumVolumeCommitResponse) GetIsReadOnly() bool {
+ if m != nil {
+ return m.IsReadOnly
+ }
+ return false
+}
type VacuumVolumeCleanupRequest struct {
VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
@@ -283,7 +407,7 @@ type VacuumVolumeCleanupRequest struct {
func (m *VacuumVolumeCleanupRequest) Reset() { *m = VacuumVolumeCleanupRequest{} }
func (m *VacuumVolumeCleanupRequest) String() string { return proto.CompactTextString(m) }
func (*VacuumVolumeCleanupRequest) ProtoMessage() {}
-func (*VacuumVolumeCleanupRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+func (*VacuumVolumeCleanupRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
func (m *VacuumVolumeCleanupRequest) GetVolumeId() uint32 {
if m != nil {
@@ -298,7 +422,7 @@ type VacuumVolumeCleanupResponse struct {
func (m *VacuumVolumeCleanupResponse) Reset() { *m = VacuumVolumeCleanupResponse{} }
func (m *VacuumVolumeCleanupResponse) String() string { return proto.CompactTextString(m) }
func (*VacuumVolumeCleanupResponse) ProtoMessage() {}
-func (*VacuumVolumeCleanupResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+func (*VacuumVolumeCleanupResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
type DeleteCollectionRequest struct {
Collection string `protobuf:"bytes,1,opt,name=collection" json:"collection,omitempty"`
@@ -307,7 +431,7 @@ type DeleteCollectionRequest struct {
func (m *DeleteCollectionRequest) Reset() { *m = DeleteCollectionRequest{} }
func (m *DeleteCollectionRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteCollectionRequest) ProtoMessage() {}
-func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
func (m *DeleteCollectionRequest) GetCollection() string {
if m != nil {
@@ -322,7 +446,7 @@ type DeleteCollectionResponse struct {
func (m *DeleteCollectionResponse) Reset() { *m = DeleteCollectionResponse{} }
func (m *DeleteCollectionResponse) String() string { return proto.CompactTextString(m) }
func (*DeleteCollectionResponse) ProtoMessage() {}
-func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
type AllocateVolumeRequest struct {
VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
@@ -336,7 +460,7 @@ type AllocateVolumeRequest struct {
func (m *AllocateVolumeRequest) Reset() { *m = AllocateVolumeRequest{} }
func (m *AllocateVolumeRequest) String() string { return proto.CompactTextString(m) }
func (*AllocateVolumeRequest) ProtoMessage() {}
-func (*AllocateVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+func (*AllocateVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
func (m *AllocateVolumeRequest) GetVolumeId() uint32 {
if m != nil {
@@ -386,7 +510,7 @@ type AllocateVolumeResponse struct {
func (m *AllocateVolumeResponse) Reset() { *m = AllocateVolumeResponse{} }
func (m *AllocateVolumeResponse) String() string { return proto.CompactTextString(m) }
func (*AllocateVolumeResponse) ProtoMessage() {}
-func (*AllocateVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+func (*AllocateVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
type VolumeSyncStatusRequest struct {
VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
@@ -395,7 +519,7 @@ type VolumeSyncStatusRequest struct {
func (m *VolumeSyncStatusRequest) Reset() { *m = VolumeSyncStatusRequest{} }
func (m *VolumeSyncStatusRequest) String() string { return proto.CompactTextString(m) }
func (*VolumeSyncStatusRequest) ProtoMessage() {}
-func (*VolumeSyncStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
+func (*VolumeSyncStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
func (m *VolumeSyncStatusRequest) GetVolumeId() uint32 {
if m != nil {
@@ -417,7 +541,7 @@ type VolumeSyncStatusResponse struct {
func (m *VolumeSyncStatusResponse) Reset() { *m = VolumeSyncStatusResponse{} }
func (m *VolumeSyncStatusResponse) String() string { return proto.CompactTextString(m) }
func (*VolumeSyncStatusResponse) ProtoMessage() {}
-func (*VolumeSyncStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
+func (*VolumeSyncStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
func (m *VolumeSyncStatusResponse) GetVolumeId() uint32 {
if m != nil {
@@ -476,7 +600,7 @@ type VolumeIncrementalCopyRequest struct {
func (m *VolumeIncrementalCopyRequest) Reset() { *m = VolumeIncrementalCopyRequest{} }
func (m *VolumeIncrementalCopyRequest) String() string { return proto.CompactTextString(m) }
func (*VolumeIncrementalCopyRequest) ProtoMessage() {}
-func (*VolumeIncrementalCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
+func (*VolumeIncrementalCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
func (m *VolumeIncrementalCopyRequest) GetVolumeId() uint32 {
if m != nil {
@@ -499,7 +623,7 @@ type VolumeIncrementalCopyResponse struct {
func (m *VolumeIncrementalCopyResponse) Reset() { *m = VolumeIncrementalCopyResponse{} }
func (m *VolumeIncrementalCopyResponse) String() string { return proto.CompactTextString(m) }
func (*VolumeIncrementalCopyResponse) ProtoMessage() {}
-func (*VolumeIncrementalCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
+func (*VolumeIncrementalCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
func (m *VolumeIncrementalCopyResponse) GetFileContent() []byte {
if m != nil {
@@ -515,7 +639,7 @@ type VolumeMountRequest struct {
func (m *VolumeMountRequest) Reset() { *m = VolumeMountRequest{} }
func (m *VolumeMountRequest) String() string { return proto.CompactTextString(m) }
func (*VolumeMountRequest) ProtoMessage() {}
-func (*VolumeMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
+func (*VolumeMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
func (m *VolumeMountRequest) GetVolumeId() uint32 {
if m != nil {
@@ -530,7 +654,7 @@ type VolumeMountResponse struct {
func (m *VolumeMountResponse) Reset() { *m = VolumeMountResponse{} }
func (m *VolumeMountResponse) String() string { return proto.CompactTextString(m) }
func (*VolumeMountResponse) ProtoMessage() {}
-func (*VolumeMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
+func (*VolumeMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
type VolumeUnmountRequest struct {
VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
@@ -539,7 +663,7 @@ type VolumeUnmountRequest struct {
func (m *VolumeUnmountRequest) Reset() { *m = VolumeUnmountRequest{} }
func (m *VolumeUnmountRequest) String() string { return proto.CompactTextString(m) }
func (*VolumeUnmountRequest) ProtoMessage() {}
-func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
+func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
func (m *VolumeUnmountRequest) GetVolumeId() uint32 {
if m != nil {
@@ -554,7 +678,7 @@ type VolumeUnmountResponse struct {
func (m *VolumeUnmountResponse) Reset() { *m = VolumeUnmountResponse{} }
func (m *VolumeUnmountResponse) String() string { return proto.CompactTextString(m) }
func (*VolumeUnmountResponse) ProtoMessage() {}
-func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
+func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
type VolumeDeleteRequest struct {
VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
@@ -563,7 +687,7 @@ type VolumeDeleteRequest struct {
func (m *VolumeDeleteRequest) Reset() { *m = VolumeDeleteRequest{} }
func (m *VolumeDeleteRequest) String() string { return proto.CompactTextString(m) }
func (*VolumeDeleteRequest) ProtoMessage() {}
-func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
+func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
func (m *VolumeDeleteRequest) GetVolumeId() uint32 {
if m != nil {
@@ -578,7 +702,7 @@ type VolumeDeleteResponse struct {
func (m *VolumeDeleteResponse) Reset() { *m = VolumeDeleteResponse{} }
func (m *VolumeDeleteResponse) String() string { return proto.CompactTextString(m) }
func (*VolumeDeleteResponse) ProtoMessage() {}
-func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
+func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
type VolumeMarkReadonlyRequest struct {
VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
@@ -587,7 +711,7 @@ type VolumeMarkReadonlyRequest struct {
func (m *VolumeMarkReadonlyRequest) Reset() { *m = VolumeMarkReadonlyRequest{} }
func (m *VolumeMarkReadonlyRequest) String() string { return proto.CompactTextString(m) }
func (*VolumeMarkReadonlyRequest) ProtoMessage() {}
-func (*VolumeMarkReadonlyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
+func (*VolumeMarkReadonlyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
func (m *VolumeMarkReadonlyRequest) GetVolumeId() uint32 {
if m != nil {
@@ -602,7 +726,7 @@ type VolumeMarkReadonlyResponse struct {
func (m *VolumeMarkReadonlyResponse) Reset() { *m = VolumeMarkReadonlyResponse{} }
func (m *VolumeMarkReadonlyResponse) String() string { return proto.CompactTextString(m) }
func (*VolumeMarkReadonlyResponse) ProtoMessage() {}
-func (*VolumeMarkReadonlyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
+func (*VolumeMarkReadonlyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
type VolumeConfigureRequest struct {
VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
@@ -612,7 +736,7 @@ type VolumeConfigureRequest struct {
func (m *VolumeConfigureRequest) Reset() { *m = VolumeConfigureRequest{} }
func (m *VolumeConfigureRequest) String() string { return proto.CompactTextString(m) }
func (*VolumeConfigureRequest) ProtoMessage() {}
-func (*VolumeConfigureRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
+func (*VolumeConfigureRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
func (m *VolumeConfigureRequest) GetVolumeId() uint32 {
if m != nil {
@@ -635,7 +759,7 @@ type VolumeConfigureResponse struct {
func (m *VolumeConfigureResponse) Reset() { *m = VolumeConfigureResponse{} }
func (m *VolumeConfigureResponse) String() string { return proto.CompactTextString(m) }
func (*VolumeConfigureResponse) ProtoMessage() {}
-func (*VolumeConfigureResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
+func (*VolumeConfigureResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} }
func (m *VolumeConfigureResponse) GetError() string {
if m != nil {
@@ -655,7 +779,7 @@ type VolumeCopyRequest struct {
func (m *VolumeCopyRequest) Reset() { *m = VolumeCopyRequest{} }
func (m *VolumeCopyRequest) String() string { return proto.CompactTextString(m) }
func (*VolumeCopyRequest) ProtoMessage() {}
-func (*VolumeCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
+func (*VolumeCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} }
func (m *VolumeCopyRequest) GetVolumeId() uint32 {
if m != nil {
@@ -699,7 +823,7 @@ type VolumeCopyResponse struct {
func (m *VolumeCopyResponse) Reset() { *m = VolumeCopyResponse{} }
func (m *VolumeCopyResponse) String() string { return proto.CompactTextString(m) }
func (*VolumeCopyResponse) ProtoMessage() {}
-func (*VolumeCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} }
+func (*VolumeCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} }
func (m *VolumeCopyResponse) GetLastAppendAtNs() uint64 {
if m != nil {
@@ -721,7 +845,7 @@ type CopyFileRequest struct {
func (m *CopyFileRequest) Reset() { *m = CopyFileRequest{} }
func (m *CopyFileRequest) String() string { return proto.CompactTextString(m) }
func (*CopyFileRequest) ProtoMessage() {}
-func (*CopyFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} }
+func (*CopyFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} }
func (m *CopyFileRequest) GetVolumeId() uint32 {
if m != nil {
@@ -779,7 +903,7 @@ type CopyFileResponse struct {
func (m *CopyFileResponse) Reset() { *m = CopyFileResponse{} }
func (m *CopyFileResponse) String() string { return proto.CompactTextString(m) }
func (*CopyFileResponse) ProtoMessage() {}
-func (*CopyFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} }
+func (*CopyFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} }
func (m *CopyFileResponse) GetFileContent() []byte {
if m != nil {
@@ -797,7 +921,7 @@ type VolumeTailSenderRequest struct {
func (m *VolumeTailSenderRequest) Reset() { *m = VolumeTailSenderRequest{} }
func (m *VolumeTailSenderRequest) String() string { return proto.CompactTextString(m) }
func (*VolumeTailSenderRequest) ProtoMessage() {}
-func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} }
+func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} }
func (m *VolumeTailSenderRequest) GetVolumeId() uint32 {
if m != nil {
@@ -829,7 +953,7 @@ type VolumeTailSenderResponse struct {
func (m *VolumeTailSenderResponse) Reset() { *m = VolumeTailSenderResponse{} }
func (m *VolumeTailSenderResponse) String() string { return proto.CompactTextString(m) }
func (*VolumeTailSenderResponse) ProtoMessage() {}
-func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} }
+func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} }
func (m *VolumeTailSenderResponse) GetNeedleHeader() []byte {
if m != nil {
@@ -862,7 +986,7 @@ type VolumeTailReceiverRequest struct {
func (m *VolumeTailReceiverRequest) Reset() { *m = VolumeTailReceiverRequest{} }
func (m *VolumeTailReceiverRequest) String() string { return proto.CompactTextString(m) }
func (*VolumeTailReceiverRequest) ProtoMessage() {}
-func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} }
+func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} }
func (m *VolumeTailReceiverRequest) GetVolumeId() uint32 {
if m != nil {
@@ -898,7 +1022,7 @@ type VolumeTailReceiverResponse struct {
func (m *VolumeTailReceiverResponse) Reset() { *m = VolumeTailReceiverResponse{} }
func (m *VolumeTailReceiverResponse) String() string { return proto.CompactTextString(m) }
func (*VolumeTailReceiverResponse) ProtoMessage() {}
-func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} }
+func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} }
type VolumeEcShardsGenerateRequest struct {
VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
@@ -908,7 +1032,7 @@ type VolumeEcShardsGenerateRequest struct {
func (m *VolumeEcShardsGenerateRequest) Reset() { *m = VolumeEcShardsGenerateRequest{} }
func (m *VolumeEcShardsGenerateRequest) String() string { return proto.CompactTextString(m) }
func (*VolumeEcShardsGenerateRequest) ProtoMessage() {}
-func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} }
+func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} }
func (m *VolumeEcShardsGenerateRequest) GetVolumeId() uint32 {
if m != nil {
@@ -930,7 +1054,7 @@ type VolumeEcShardsGenerateResponse struct {
func (m *VolumeEcShardsGenerateResponse) Reset() { *m = VolumeEcShardsGenerateResponse{} }
func (m *VolumeEcShardsGenerateResponse) String() string { return proto.CompactTextString(m) }
func (*VolumeEcShardsGenerateResponse) ProtoMessage() {}
-func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} }
+func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} }
type VolumeEcShardsRebuildRequest struct {
VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
@@ -940,7 +1064,7 @@ type VolumeEcShardsRebuildRequest struct {
func (m *VolumeEcShardsRebuildRequest) Reset() { *m = VolumeEcShardsRebuildRequest{} }
func (m *VolumeEcShardsRebuildRequest) String() string { return proto.CompactTextString(m) }
func (*VolumeEcShardsRebuildRequest) ProtoMessage() {}
-func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} }
+func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} }
func (m *VolumeEcShardsRebuildRequest) GetVolumeId() uint32 {
if m != nil {
@@ -963,7 +1087,7 @@ type VolumeEcShardsRebuildResponse struct {
func (m *VolumeEcShardsRebuildResponse) Reset() { *m = VolumeEcShardsRebuildResponse{} }
func (m *VolumeEcShardsRebuildResponse) String() string { return proto.CompactTextString(m) }
func (*VolumeEcShardsRebuildResponse) ProtoMessage() {}
-func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} }
+func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} }
func (m *VolumeEcShardsRebuildResponse) GetRebuiltShardIds() []uint32 {
if m != nil {
@@ -985,7 +1109,7 @@ type VolumeEcShardsCopyRequest struct {
func (m *VolumeEcShardsCopyRequest) Reset() { *m = VolumeEcShardsCopyRequest{} }
func (m *VolumeEcShardsCopyRequest) String() string { return proto.CompactTextString(m) }
func (*VolumeEcShardsCopyRequest) ProtoMessage() {}
-func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} }
+func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} }
func (m *VolumeEcShardsCopyRequest) GetVolumeId() uint32 {
if m != nil {
@@ -1042,7 +1166,7 @@ type VolumeEcShardsCopyResponse struct {
func (m *VolumeEcShardsCopyResponse) Reset() { *m = VolumeEcShardsCopyResponse{} }
func (m *VolumeEcShardsCopyResponse) String() string { return proto.CompactTextString(m) }
func (*VolumeEcShardsCopyResponse) ProtoMessage() {}
-func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} }
+func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} }
type VolumeEcShardsDeleteRequest struct {
VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
@@ -1053,7 +1177,7 @@ type VolumeEcShardsDeleteRequest struct {
func (m *VolumeEcShardsDeleteRequest) Reset() { *m = VolumeEcShardsDeleteRequest{} }
func (m *VolumeEcShardsDeleteRequest) String() string { return proto.CompactTextString(m) }
func (*VolumeEcShardsDeleteRequest) ProtoMessage() {}
-func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} }
+func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} }
func (m *VolumeEcShardsDeleteRequest) GetVolumeId() uint32 {
if m != nil {
@@ -1082,7 +1206,7 @@ type VolumeEcShardsDeleteResponse struct {
func (m *VolumeEcShardsDeleteResponse) Reset() { *m = VolumeEcShardsDeleteResponse{} }
func (m *VolumeEcShardsDeleteResponse) String() string { return proto.CompactTextString(m) }
func (*VolumeEcShardsDeleteResponse) ProtoMessage() {}
-func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} }
+func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} }
type VolumeEcShardsMountRequest struct {
VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
@@ -1093,7 +1217,7 @@ type VolumeEcShardsMountRequest struct {
func (m *VolumeEcShardsMountRequest) Reset() { *m = VolumeEcShardsMountRequest{} }
func (m *VolumeEcShardsMountRequest) String() string { return proto.CompactTextString(m) }
func (*VolumeEcShardsMountRequest) ProtoMessage() {}
-func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} }
+func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} }
func (m *VolumeEcShardsMountRequest) GetVolumeId() uint32 {
if m != nil {
@@ -1122,7 +1246,7 @@ type VolumeEcShardsMountResponse struct {
func (m *VolumeEcShardsMountResponse) Reset() { *m = VolumeEcShardsMountResponse{} }
func (m *VolumeEcShardsMountResponse) String() string { return proto.CompactTextString(m) }
func (*VolumeEcShardsMountResponse) ProtoMessage() {}
-func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} }
+func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} }
type VolumeEcShardsUnmountRequest struct {
VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
@@ -1132,7 +1256,7 @@ type VolumeEcShardsUnmountRequest struct {
func (m *VolumeEcShardsUnmountRequest) Reset() { *m = VolumeEcShardsUnmountRequest{} }
func (m *VolumeEcShardsUnmountRequest) String() string { return proto.CompactTextString(m) }
func (*VolumeEcShardsUnmountRequest) ProtoMessage() {}
-func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} }
+func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} }
func (m *VolumeEcShardsUnmountRequest) GetVolumeId() uint32 {
if m != nil {
@@ -1154,7 +1278,7 @@ type VolumeEcShardsUnmountResponse struct {
func (m *VolumeEcShardsUnmountResponse) Reset() { *m = VolumeEcShardsUnmountResponse{} }
func (m *VolumeEcShardsUnmountResponse) String() string { return proto.CompactTextString(m) }
func (*VolumeEcShardsUnmountResponse) ProtoMessage() {}
-func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} }
+func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} }
type VolumeEcShardReadRequest struct {
VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
@@ -1167,7 +1291,7 @@ type VolumeEcShardReadRequest struct {
func (m *VolumeEcShardReadRequest) Reset() { *m = VolumeEcShardReadRequest{} }
func (m *VolumeEcShardReadRequest) String() string { return proto.CompactTextString(m) }
func (*VolumeEcShardReadRequest) ProtoMessage() {}
-func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} }
+func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} }
func (m *VolumeEcShardReadRequest) GetVolumeId() uint32 {
if m != nil {
@@ -1212,7 +1336,7 @@ type VolumeEcShardReadResponse struct {
func (m *VolumeEcShardReadResponse) Reset() { *m = VolumeEcShardReadResponse{} }
func (m *VolumeEcShardReadResponse) String() string { return proto.CompactTextString(m) }
func (*VolumeEcShardReadResponse) ProtoMessage() {}
-func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} }
+func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} }
func (m *VolumeEcShardReadResponse) GetData() []byte {
if m != nil {
@@ -1238,7 +1362,7 @@ type VolumeEcBlobDeleteRequest struct {
func (m *VolumeEcBlobDeleteRequest) Reset() { *m = VolumeEcBlobDeleteRequest{} }
func (m *VolumeEcBlobDeleteRequest) String() string { return proto.CompactTextString(m) }
func (*VolumeEcBlobDeleteRequest) ProtoMessage() {}
-func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} }
+func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} }
func (m *VolumeEcBlobDeleteRequest) GetVolumeId() uint32 {
if m != nil {
@@ -1274,7 +1398,7 @@ type VolumeEcBlobDeleteResponse struct {
func (m *VolumeEcBlobDeleteResponse) Reset() { *m = VolumeEcBlobDeleteResponse{} }
func (m *VolumeEcBlobDeleteResponse) String() string { return proto.CompactTextString(m) }
func (*VolumeEcBlobDeleteResponse) ProtoMessage() {}
-func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} }
+func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} }
type VolumeEcShardsToVolumeRequest struct {
VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
@@ -1284,7 +1408,7 @@ type VolumeEcShardsToVolumeRequest struct {
func (m *VolumeEcShardsToVolumeRequest) Reset() { *m = VolumeEcShardsToVolumeRequest{} }
func (m *VolumeEcShardsToVolumeRequest) String() string { return proto.CompactTextString(m) }
func (*VolumeEcShardsToVolumeRequest) ProtoMessage() {}
-func (*VolumeEcShardsToVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} }
+func (*VolumeEcShardsToVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} }
func (m *VolumeEcShardsToVolumeRequest) GetVolumeId() uint32 {
if m != nil {
@@ -1306,7 +1430,7 @@ type VolumeEcShardsToVolumeResponse struct {
func (m *VolumeEcShardsToVolumeResponse) Reset() { *m = VolumeEcShardsToVolumeResponse{} }
func (m *VolumeEcShardsToVolumeResponse) String() string { return proto.CompactTextString(m) }
func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {}
-func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} }
+func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} }
type ReadVolumeFileStatusRequest struct {
VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
@@ -1315,7 +1439,7 @@ type ReadVolumeFileStatusRequest struct {
func (m *ReadVolumeFileStatusRequest) Reset() { *m = ReadVolumeFileStatusRequest{} }
func (m *ReadVolumeFileStatusRequest) String() string { return proto.CompactTextString(m) }
func (*ReadVolumeFileStatusRequest) ProtoMessage() {}
-func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} }
+func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} }
func (m *ReadVolumeFileStatusRequest) GetVolumeId() uint32 {
if m != nil {
@@ -1338,7 +1462,7 @@ type ReadVolumeFileStatusResponse struct {
func (m *ReadVolumeFileStatusResponse) Reset() { *m = ReadVolumeFileStatusResponse{} }
func (m *ReadVolumeFileStatusResponse) String() string { return proto.CompactTextString(m) }
func (*ReadVolumeFileStatusResponse) ProtoMessage() {}
-func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} }
+func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} }
func (m *ReadVolumeFileStatusResponse) GetVolumeId() uint32 {
if m != nil {
@@ -1397,16 +1521,18 @@ func (m *ReadVolumeFileStatusResponse) GetCollection() string {
}
type DiskStatus struct {
- Dir string `protobuf:"bytes,1,opt,name=dir" json:"dir,omitempty"`
- All uint64 `protobuf:"varint,2,opt,name=all" json:"all,omitempty"`
- Used uint64 `protobuf:"varint,3,opt,name=used" json:"used,omitempty"`
- Free uint64 `protobuf:"varint,4,opt,name=free" json:"free,omitempty"`
+ Dir string `protobuf:"bytes,1,opt,name=dir" json:"dir,omitempty"`
+ All uint64 `protobuf:"varint,2,opt,name=all" json:"all,omitempty"`
+ Used uint64 `protobuf:"varint,3,opt,name=used" json:"used,omitempty"`
+ Free uint64 `protobuf:"varint,4,opt,name=free" json:"free,omitempty"`
+ PercentFree float32 `protobuf:"fixed32,5,opt,name=percent_free,json=percentFree" json:"percent_free,omitempty"`
+ PercentUsed float32 `protobuf:"fixed32,6,opt,name=percent_used,json=percentUsed" json:"percent_used,omitempty"`
}
func (m *DiskStatus) Reset() { *m = DiskStatus{} }
func (m *DiskStatus) String() string { return proto.CompactTextString(m) }
func (*DiskStatus) ProtoMessage() {}
-func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} }
+func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{60} }
func (m *DiskStatus) GetDir() string {
if m != nil {
@@ -1436,6 +1562,20 @@ func (m *DiskStatus) GetFree() uint64 {
return 0
}
+func (m *DiskStatus) GetPercentFree() float32 {
+ if m != nil {
+ return m.PercentFree
+ }
+ return 0
+}
+
+func (m *DiskStatus) GetPercentUsed() float32 {
+ if m != nil {
+ return m.PercentUsed
+ }
+ return 0
+}
+
type MemStatus struct {
Goroutines int32 `protobuf:"varint,1,opt,name=goroutines" json:"goroutines,omitempty"`
All uint64 `protobuf:"varint,2,opt,name=all" json:"all,omitempty"`
@@ -1449,7 +1589,7 @@ type MemStatus struct {
func (m *MemStatus) Reset() { *m = MemStatus{} }
func (m *MemStatus) String() string { return proto.CompactTextString(m) }
func (*MemStatus) ProtoMessage() {}
-func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} }
+func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{61} }
func (m *MemStatus) GetGoroutines() int32 {
if m != nil {
@@ -1514,7 +1654,7 @@ type RemoteFile struct {
func (m *RemoteFile) Reset() { *m = RemoteFile{} }
func (m *RemoteFile) String() string { return proto.CompactTextString(m) }
func (*RemoteFile) ProtoMessage() {}
-func (*RemoteFile) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{60} }
+func (*RemoteFile) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{62} }
func (m *RemoteFile) GetBackendType() string {
if m != nil {
@@ -1574,7 +1714,7 @@ type VolumeInfo struct {
func (m *VolumeInfo) Reset() { *m = VolumeInfo{} }
func (m *VolumeInfo) String() string { return proto.CompactTextString(m) }
func (*VolumeInfo) ProtoMessage() {}
-func (*VolumeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{61} }
+func (*VolumeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{63} }
func (m *VolumeInfo) GetFiles() []*RemoteFile {
if m != nil {
@@ -1608,7 +1748,7 @@ func (m *VolumeTierMoveDatToRemoteRequest) Reset() { *m = VolumeTierMove
func (m *VolumeTierMoveDatToRemoteRequest) String() string { return proto.CompactTextString(m) }
func (*VolumeTierMoveDatToRemoteRequest) ProtoMessage() {}
func (*VolumeTierMoveDatToRemoteRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{62}
+ return fileDescriptor0, []int{64}
}
func (m *VolumeTierMoveDatToRemoteRequest) GetVolumeId() uint32 {
@@ -1648,7 +1788,7 @@ func (m *VolumeTierMoveDatToRemoteResponse) Reset() { *m = VolumeTierMov
func (m *VolumeTierMoveDatToRemoteResponse) String() string { return proto.CompactTextString(m) }
func (*VolumeTierMoveDatToRemoteResponse) ProtoMessage() {}
func (*VolumeTierMoveDatToRemoteResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{63}
+ return fileDescriptor0, []int{65}
}
func (m *VolumeTierMoveDatToRemoteResponse) GetProcessed() int64 {
@@ -1675,7 +1815,7 @@ func (m *VolumeTierMoveDatFromRemoteRequest) Reset() { *m = VolumeTierMo
func (m *VolumeTierMoveDatFromRemoteRequest) String() string { return proto.CompactTextString(m) }
func (*VolumeTierMoveDatFromRemoteRequest) ProtoMessage() {}
func (*VolumeTierMoveDatFromRemoteRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{64}
+ return fileDescriptor0, []int{66}
}
func (m *VolumeTierMoveDatFromRemoteRequest) GetVolumeId() uint32 {
@@ -1708,7 +1848,7 @@ func (m *VolumeTierMoveDatFromRemoteResponse) Reset() { *m = VolumeTierM
func (m *VolumeTierMoveDatFromRemoteResponse) String() string { return proto.CompactTextString(m) }
func (*VolumeTierMoveDatFromRemoteResponse) ProtoMessage() {}
func (*VolumeTierMoveDatFromRemoteResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{65}
+ return fileDescriptor0, []int{67}
}
func (m *VolumeTierMoveDatFromRemoteResponse) GetProcessed() int64 {
@@ -1725,6 +1865,38 @@ func (m *VolumeTierMoveDatFromRemoteResponse) GetProcessedPercentage() float32 {
return 0
}
+type VolumeServerStatusRequest struct {
+}
+
+func (m *VolumeServerStatusRequest) Reset() { *m = VolumeServerStatusRequest{} }
+func (m *VolumeServerStatusRequest) String() string { return proto.CompactTextString(m) }
+func (*VolumeServerStatusRequest) ProtoMessage() {}
+func (*VolumeServerStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{68} }
+
+type VolumeServerStatusResponse struct {
+ DiskStatuses []*DiskStatus `protobuf:"bytes,1,rep,name=disk_statuses,json=diskStatuses" json:"disk_statuses,omitempty"`
+ MemoryStatus *MemStatus `protobuf:"bytes,2,opt,name=memory_status,json=memoryStatus" json:"memory_status,omitempty"`
+}
+
+func (m *VolumeServerStatusResponse) Reset() { *m = VolumeServerStatusResponse{} }
+func (m *VolumeServerStatusResponse) String() string { return proto.CompactTextString(m) }
+func (*VolumeServerStatusResponse) ProtoMessage() {}
+func (*VolumeServerStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{69} }
+
+func (m *VolumeServerStatusResponse) GetDiskStatuses() []*DiskStatus {
+ if m != nil {
+ return m.DiskStatuses
+ }
+ return nil
+}
+
+func (m *VolumeServerStatusResponse) GetMemoryStatus() *MemStatus {
+ if m != nil {
+ return m.MemoryStatus
+ }
+ return nil
+}
+
// select on volume servers
type QueryRequest struct {
Selections []string `protobuf:"bytes,1,rep,name=selections" json:"selections,omitempty"`
@@ -1737,7 +1909,7 @@ type QueryRequest struct {
func (m *QueryRequest) Reset() { *m = QueryRequest{} }
func (m *QueryRequest) String() string { return proto.CompactTextString(m) }
func (*QueryRequest) ProtoMessage() {}
-func (*QueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{66} }
+func (*QueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{70} }
func (m *QueryRequest) GetSelections() []string {
if m != nil {
@@ -1783,7 +1955,7 @@ type QueryRequest_Filter struct {
func (m *QueryRequest_Filter) Reset() { *m = QueryRequest_Filter{} }
func (m *QueryRequest_Filter) String() string { return proto.CompactTextString(m) }
func (*QueryRequest_Filter) ProtoMessage() {}
-func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{66, 0} }
+func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{70, 0} }
func (m *QueryRequest_Filter) GetField() string {
if m != nil {
@@ -1818,7 +1990,7 @@ func (m *QueryRequest_InputSerialization) Reset() { *m = QueryRequest_In
func (m *QueryRequest_InputSerialization) String() string { return proto.CompactTextString(m) }
func (*QueryRequest_InputSerialization) ProtoMessage() {}
func (*QueryRequest_InputSerialization) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{66, 1}
+ return fileDescriptor0, []int{70, 1}
}
func (m *QueryRequest_InputSerialization) GetCompressionType() string {
@@ -1866,7 +2038,7 @@ func (m *QueryRequest_InputSerialization_CSVInput) Reset() {
func (m *QueryRequest_InputSerialization_CSVInput) String() string { return proto.CompactTextString(m) }
func (*QueryRequest_InputSerialization_CSVInput) ProtoMessage() {}
func (*QueryRequest_InputSerialization_CSVInput) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{66, 1, 0}
+ return fileDescriptor0, []int{70, 1, 0}
}
func (m *QueryRequest_InputSerialization_CSVInput) GetFileHeaderInfo() string {
@@ -1928,7 +2100,7 @@ func (m *QueryRequest_InputSerialization_JSONInput) Reset() {
func (m *QueryRequest_InputSerialization_JSONInput) String() string { return proto.CompactTextString(m) }
func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {}
func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{66, 1, 1}
+ return fileDescriptor0, []int{70, 1, 1}
}
func (m *QueryRequest_InputSerialization_JSONInput) GetType() string {
@@ -1949,7 +2121,7 @@ func (m *QueryRequest_InputSerialization_ParquetInput) String() string {
}
func (*QueryRequest_InputSerialization_ParquetInput) ProtoMessage() {}
func (*QueryRequest_InputSerialization_ParquetInput) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{66, 1, 2}
+ return fileDescriptor0, []int{70, 1, 2}
}
type QueryRequest_OutputSerialization struct {
@@ -1961,7 +2133,7 @@ func (m *QueryRequest_OutputSerialization) Reset() { *m = QueryRequest_O
func (m *QueryRequest_OutputSerialization) String() string { return proto.CompactTextString(m) }
func (*QueryRequest_OutputSerialization) ProtoMessage() {}
func (*QueryRequest_OutputSerialization) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{66, 2}
+ return fileDescriptor0, []int{70, 2}
}
func (m *QueryRequest_OutputSerialization) GetCsvOutput() *QueryRequest_OutputSerialization_CSVOutput {
@@ -1994,7 +2166,7 @@ func (m *QueryRequest_OutputSerialization_CSVOutput) String() string {
}
func (*QueryRequest_OutputSerialization_CSVOutput) ProtoMessage() {}
func (*QueryRequest_OutputSerialization_CSVOutput) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{66, 2, 0}
+ return fileDescriptor0, []int{70, 2, 0}
}
func (m *QueryRequest_OutputSerialization_CSVOutput) GetQuoteFields() string {
@@ -2044,7 +2216,7 @@ func (m *QueryRequest_OutputSerialization_JSONOutput) String() string {
}
func (*QueryRequest_OutputSerialization_JSONOutput) ProtoMessage() {}
func (*QueryRequest_OutputSerialization_JSONOutput) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{66, 2, 1}
+ return fileDescriptor0, []int{70, 2, 1}
}
func (m *QueryRequest_OutputSerialization_JSONOutput) GetRecordDelimiter() string {
@@ -2061,7 +2233,7 @@ type QueriedStripe struct {
func (m *QueriedStripe) Reset() { *m = QueriedStripe{} }
func (m *QueriedStripe) String() string { return proto.CompactTextString(m) }
func (*QueriedStripe) ProtoMessage() {}
-func (*QueriedStripe) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{67} }
+func (*QueriedStripe) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{71} }
func (m *QueriedStripe) GetRecords() []byte {
if m != nil {
@@ -2074,6 +2246,8 @@ func init() {
proto.RegisterType((*BatchDeleteRequest)(nil), "volume_server_pb.BatchDeleteRequest")
proto.RegisterType((*BatchDeleteResponse)(nil), "volume_server_pb.BatchDeleteResponse")
proto.RegisterType((*DeleteResult)(nil), "volume_server_pb.DeleteResult")
+ proto.RegisterType((*FileGetRequest)(nil), "volume_server_pb.FileGetRequest")
+ proto.RegisterType((*FileGetResponse)(nil), "volume_server_pb.FileGetResponse")
proto.RegisterType((*Empty)(nil), "volume_server_pb.Empty")
proto.RegisterType((*VacuumVolumeCheckRequest)(nil), "volume_server_pb.VacuumVolumeCheckRequest")
proto.RegisterType((*VacuumVolumeCheckResponse)(nil), "volume_server_pb.VacuumVolumeCheckResponse")
@@ -2137,6 +2311,8 @@ func init() {
proto.RegisterType((*VolumeTierMoveDatToRemoteResponse)(nil), "volume_server_pb.VolumeTierMoveDatToRemoteResponse")
proto.RegisterType((*VolumeTierMoveDatFromRemoteRequest)(nil), "volume_server_pb.VolumeTierMoveDatFromRemoteRequest")
proto.RegisterType((*VolumeTierMoveDatFromRemoteResponse)(nil), "volume_server_pb.VolumeTierMoveDatFromRemoteResponse")
+ proto.RegisterType((*VolumeServerStatusRequest)(nil), "volume_server_pb.VolumeServerStatusRequest")
+ proto.RegisterType((*VolumeServerStatusResponse)(nil), "volume_server_pb.VolumeServerStatusResponse")
proto.RegisterType((*QueryRequest)(nil), "volume_server_pb.QueryRequest")
proto.RegisterType((*QueryRequest_Filter)(nil), "volume_server_pb.QueryRequest.Filter")
proto.RegisterType((*QueryRequest_InputSerialization)(nil), "volume_server_pb.QueryRequest.InputSerialization")
@@ -2162,6 +2338,7 @@ const _ = grpc.SupportPackageIsVersion4
type VolumeServerClient interface {
// Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error)
+ FileGet(ctx context.Context, in *FileGetRequest, opts ...grpc.CallOption) (VolumeServer_FileGetClient, error)
VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error)
VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (*VacuumVolumeCompactResponse, error)
VacuumVolumeCommit(ctx context.Context, in *VacuumVolumeCommitRequest, opts ...grpc.CallOption) (*VacuumVolumeCommitResponse, error)
@@ -2194,7 +2371,8 @@ type VolumeServerClient interface {
// tiered storage
VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error)
VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error)
- // query
+ VolumeServerStatus(ctx context.Context, in *VolumeServerStatusRequest, opts ...grpc.CallOption) (*VolumeServerStatusResponse, error)
+ // query
Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error)
}
@@ -2215,6 +2393,38 @@ func (c *volumeServerClient) BatchDelete(ctx context.Context, in *BatchDeleteReq
return out, nil
}
+func (c *volumeServerClient) FileGet(ctx context.Context, in *FileGetRequest, opts ...grpc.CallOption) (VolumeServer_FileGetClient, error) {
+ stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[0], c.cc, "/volume_server_pb.VolumeServer/FileGet", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &volumeServerFileGetClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type VolumeServer_FileGetClient interface {
+ Recv() (*FileGetResponse, error)
+ grpc.ClientStream
+}
+
+type volumeServerFileGetClient struct {
+ grpc.ClientStream
+}
+
+func (x *volumeServerFileGetClient) Recv() (*FileGetResponse, error) {
+ m := new(FileGetResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
func (c *volumeServerClient) VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) {
out := new(VacuumVolumeCheckResponse)
err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCheck", in, out, c.cc, opts...)
@@ -2279,7 +2489,7 @@ func (c *volumeServerClient) VolumeSyncStatus(ctx context.Context, in *VolumeSyn
}
func (c *volumeServerClient) VolumeIncrementalCopy(ctx context.Context, in *VolumeIncrementalCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeIncrementalCopyClient, error) {
- stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[0], c.cc, "/volume_server_pb.VolumeServer/VolumeIncrementalCopy", opts...)
+ stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[1], c.cc, "/volume_server_pb.VolumeServer/VolumeIncrementalCopy", opts...)
if err != nil {
return nil, err
}
@@ -2374,7 +2584,7 @@ func (c *volumeServerClient) ReadVolumeFileStatus(ctx context.Context, in *ReadV
}
func (c *volumeServerClient) CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) {
- stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[1], c.cc, "/volume_server_pb.VolumeServer/CopyFile", opts...)
+ stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[2], c.cc, "/volume_server_pb.VolumeServer/CopyFile", opts...)
if err != nil {
return nil, err
}
@@ -2406,7 +2616,7 @@ func (x *volumeServerCopyFileClient) Recv() (*CopyFileResponse, error) {
}
func (c *volumeServerClient) VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error) {
- stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[2], c.cc, "/volume_server_pb.VolumeServer/VolumeTailSender", opts...)
+ stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[3], c.cc, "/volume_server_pb.VolumeServer/VolumeTailSender", opts...)
if err != nil {
return nil, err
}
@@ -2501,7 +2711,7 @@ func (c *volumeServerClient) VolumeEcShardsUnmount(ctx context.Context, in *Volu
}
func (c *volumeServerClient) VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (VolumeServer_VolumeEcShardReadClient, error) {
- stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[3], c.cc, "/volume_server_pb.VolumeServer/VolumeEcShardRead", opts...)
+ stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[4], c.cc, "/volume_server_pb.VolumeServer/VolumeEcShardRead", opts...)
if err != nil {
return nil, err
}
@@ -2551,7 +2761,7 @@ func (c *volumeServerClient) VolumeEcShardsToVolume(ctx context.Context, in *Vol
}
func (c *volumeServerClient) VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error) {
- stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[4], c.cc, "/volume_server_pb.VolumeServer/VolumeTierMoveDatToRemote", opts...)
+ stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[5], c.cc, "/volume_server_pb.VolumeServer/VolumeTierMoveDatToRemote", opts...)
if err != nil {
return nil, err
}
@@ -2583,7 +2793,7 @@ func (x *volumeServerVolumeTierMoveDatToRemoteClient) Recv() (*VolumeTierMoveDat
}
func (c *volumeServerClient) VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error) {
- stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[5], c.cc, "/volume_server_pb.VolumeServer/VolumeTierMoveDatFromRemote", opts...)
+ stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[6], c.cc, "/volume_server_pb.VolumeServer/VolumeTierMoveDatFromRemote", opts...)
if err != nil {
return nil, err
}
@@ -2614,8 +2824,17 @@ func (x *volumeServerVolumeTierMoveDatFromRemoteClient) Recv() (*VolumeTierMoveD
return m, nil
}
+func (c *volumeServerClient) VolumeServerStatus(ctx context.Context, in *VolumeServerStatusRequest, opts ...grpc.CallOption) (*VolumeServerStatusResponse, error) {
+ out := new(VolumeServerStatusResponse)
+ err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeServerStatus", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *volumeServerClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error) {
- stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[6], c.cc, "/volume_server_pb.VolumeServer/Query", opts...)
+ stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[7], c.cc, "/volume_server_pb.VolumeServer/Query", opts...)
if err != nil {
return nil, err
}
@@ -2651,6 +2870,7 @@ func (x *volumeServerQueryClient) Recv() (*QueriedStripe, error) {
type VolumeServerServer interface {
// Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error)
+ FileGet(*FileGetRequest, VolumeServer_FileGetServer) error
VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error)
VacuumVolumeCompact(context.Context, *VacuumVolumeCompactRequest) (*VacuumVolumeCompactResponse, error)
VacuumVolumeCommit(context.Context, *VacuumVolumeCommitRequest) (*VacuumVolumeCommitResponse, error)
@@ -2683,7 +2903,8 @@ type VolumeServerServer interface {
// tiered storage
VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, VolumeServer_VolumeTierMoveDatToRemoteServer) error
VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, VolumeServer_VolumeTierMoveDatFromRemoteServer) error
- // query
+ VolumeServerStatus(context.Context, *VolumeServerStatusRequest) (*VolumeServerStatusResponse, error)
+ // query
Query(*QueryRequest, VolumeServer_QueryServer) error
}
@@ -2709,6 +2930,27 @@ func _VolumeServer_BatchDelete_Handler(srv interface{}, ctx context.Context, dec
return interceptor(ctx, in, info, handler)
}
+func _VolumeServer_FileGet_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(FileGetRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(VolumeServerServer).FileGet(m, &volumeServerFileGetServer{stream})
+}
+
+type VolumeServer_FileGetServer interface {
+ Send(*FileGetResponse) error
+ grpc.ServerStream
+}
+
+type volumeServerFileGetServer struct {
+ grpc.ServerStream
+}
+
+func (x *volumeServerFileGetServer) Send(m *FileGetResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
func _VolumeServer_VacuumVolumeCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VacuumVolumeCheckRequest)
if err := dec(in); err != nil {
@@ -3249,6 +3491,24 @@ func (x *volumeServerVolumeTierMoveDatFromRemoteServer) Send(m *VolumeTierMoveDa
return x.ServerStream.SendMsg(m)
}
+func _VolumeServer_VolumeServerStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VolumeServerStatusRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VolumeServerServer).VolumeServerStatus(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/volume_server_pb.VolumeServer/VolumeServerStatus",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolumeServerServer).VolumeServerStatus(ctx, req.(*VolumeServerStatusRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _VolumeServer_Query_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(QueryRequest)
if err := stream.RecvMsg(m); err != nil {
@@ -3370,8 +3630,17 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{
MethodName: "VolumeEcShardsToVolume",
Handler: _VolumeServer_VolumeEcShardsToVolume_Handler,
},
+ {
+ MethodName: "VolumeServerStatus",
+ Handler: _VolumeServer_VolumeServerStatus_Handler,
+ },
},
Streams: []grpc.StreamDesc{
+ {
+ StreamName: "FileGet",
+ Handler: _VolumeServer_FileGet_Handler,
+ ServerStreams: true,
+ },
{
StreamName: "VolumeIncrementalCopy",
Handler: _VolumeServer_VolumeIncrementalCopy_Handler,
@@ -3414,190 +3683,214 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{
func init() { proto.RegisterFile("volume_server.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
- // 2959 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x5a, 0x4b, 0x73, 0xdc, 0xc6,
- 0x11, 0xe6, 0x72, 0xf9, 0xd8, 0xed, 0x5d, 0x8a, 0xd4, 0x90, 0xa6, 0xd6, 0x20, 0x25, 0xd1, 0x90,
- 0x1f, 0xa4, 0x6c, 0x91, 0x32, 0x6d, 0xc7, 0x8e, 0x1d, 0x3b, 0x91, 0x28, 0x29, 0x51, 0x6c, 0x51,
- 0x36, 0x28, 0xcb, 0x4e, 0xec, 0x0a, 0x0a, 0x04, 0x66, 0x45, 0x98, 0x00, 0x06, 0x02, 0x66, 0x69,
- 0xae, 0xca, 0x39, 0x39, 0x87, 0x54, 0xa5, 0x92, 0x43, 0x2a, 0x97, 0x9c, 0x73, 0xf7, 0x35, 0x7f,
- 0xc1, 0x7f, 0x20, 0x55, 0x39, 0xe5, 0x92, 0x73, 0x0e, 0xb9, 0xa5, 0x2a, 0x97, 0xd4, 0xbc, 0xb0,
- 0x78, 0x72, 0x41, 0x8b, 0xa9, 0x54, 0x6e, 0x83, 0x9e, 0x9e, 0xee, 0x99, 0x9e, 0xee, 0x9e, 0xe9,
- 0xf9, 0x00, 0x8b, 0x47, 0xc4, 0x1b, 0xf8, 0xd8, 0x8c, 0x71, 0x74, 0x84, 0xa3, 0xcd, 0x30, 0x22,
- 0x94, 0xa0, 0x85, 0x0c, 0xd1, 0x0c, 0xf7, 0xf5, 0x2d, 0x40, 0x37, 0x2d, 0x6a, 0x1f, 0xdc, 0xc2,
- 0x1e, 0xa6, 0xd8, 0xc0, 0x8f, 0x07, 0x38, 0xa6, 0xe8, 0x59, 0x68, 0xf5, 0x5d, 0x0f, 0x9b, 0xae,
- 0x13, 0xf7, 0x1a, 0x6b, 0xcd, 0xf5, 0xb6, 0x31, 0xcb, 0xbe, 0xef, 0x3a, 0xb1, 0x7e, 0x1f, 0x16,
- 0x33, 0x03, 0xe2, 0x90, 0x04, 0x31, 0x46, 0x6f, 0xc1, 0x6c, 0x84, 0xe3, 0x81, 0x47, 0xc5, 0x80,
- 0xce, 0xf6, 0xa5, 0xcd, 0xbc, 0xae, 0xcd, 0x64, 0xc8, 0xc0, 0xa3, 0x86, 0x62, 0xd7, 0xbf, 0x6e,
- 0x40, 0x37, 0xdd, 0x83, 0x2e, 0xc0, 0xac, 0x54, 0xde, 0x6b, 0xac, 0x35, 0xd6, 0xdb, 0xc6, 0x8c,
- 0xd0, 0x8d, 0x96, 0x61, 0x26, 0xa6, 0x16, 0x1d, 0xc4, 0xbd, 0xc9, 0xb5, 0xc6, 0xfa, 0xb4, 0x21,
- 0xbf, 0xd0, 0x12, 0x4c, 0xe3, 0x28, 0x22, 0x51, 0xaf, 0xc9, 0xd9, 0xc5, 0x07, 0x42, 0x30, 0x15,
- 0xbb, 0x4f, 0x70, 0x6f, 0x6a, 0xad, 0xb1, 0x3e, 0x67, 0xf0, 0x36, 0xea, 0xc1, 0xec, 0x11, 0x8e,
- 0x62, 0x97, 0x04, 0xbd, 0x69, 0x4e, 0x56, 0x9f, 0xfa, 0x2c, 0x4c, 0xdf, 0xf6, 0x43, 0x3a, 0xd4,
- 0xdf, 0x84, 0xde, 0x43, 0xcb, 0x1e, 0x0c, 0xfc, 0x87, 0x7c, 0xfa, 0x3b, 0x07, 0xd8, 0x3e, 0x54,
- 0x66, 0x59, 0x81, 0xb6, 0x5c, 0x94, 0x9c, 0xdb, 0x9c, 0xd1, 0x12, 0x84, 0xbb, 0x8e, 0xfe, 0x23,
- 0x78, 0xb6, 0x64, 0xa0, 0x34, 0xcf, 0x15, 0x98, 0x7b, 0x64, 0x45, 0xfb, 0xd6, 0x23, 0x6c, 0x46,
- 0x16, 0x75, 0x09, 0x1f, 0xdd, 0x30, 0xba, 0x92, 0x68, 0x30, 0x9a, 0xfe, 0x19, 0x68, 0x19, 0x09,
- 0xc4, 0x0f, 0x2d, 0x9b, 0xd6, 0x51, 0x8e, 0xd6, 0xa0, 0x13, 0x46, 0xd8, 0xf2, 0x3c, 0x62, 0x5b,
- 0x14, 0x73, 0xfb, 0x34, 0x8d, 0x34, 0x49, 0xbf, 0x08, 0x2b, 0xa5, 0xc2, 0xc5, 0x04, 0xf5, 0xb7,
- 0x72, 0xb3, 0x27, 0xbe, 0xef, 0xd6, 0x52, 0xad, 0xaf, 0x16, 0x66, 0xcd, 0x47, 0x4a, 0xb9, 0xdf,
- 0xcf, 0xf5, 0x7a, 0xd8, 0x0a, 0x06, 0x61, 0x2d, 0xc1, 0xf9, 0x19, 0xab, 0xa1, 0x89, 0xe4, 0x0b,
- 0xc2, 0x6d, 0x76, 0x88, 0xe7, 0x61, 0x9b, 0xba, 0x24, 0x50, 0x62, 0x2f, 0x01, 0xd8, 0x09, 0x51,
- 0x3a, 0x51, 0x8a, 0xa2, 0x6b, 0xd0, 0x2b, 0x0e, 0x95, 0x62, 0xff, 0xd6, 0x80, 0x67, 0x6e, 0x48,
- 0xa3, 0x09, 0xc5, 0xb5, 0x36, 0x20, 0xab, 0x72, 0x32, 0xaf, 0x32, 0xbf, 0x41, 0xcd, 0xc2, 0x06,
- 0x31, 0x8e, 0x08, 0x87, 0x9e, 0x6b, 0x5b, 0x5c, 0xc4, 0x14, 0x17, 0x91, 0x26, 0xa1, 0x05, 0x68,
- 0x52, 0xea, 0x71, 0xcf, 0x6d, 0x1b, 0xac, 0x89, 0xb6, 0x61, 0xd9, 0xc7, 0x3e, 0x89, 0x86, 0xa6,
- 0x6f, 0x85, 0xa6, 0x6f, 0x1d, 0x9b, 0xcc, 0xcd, 0x4d, 0x7f, 0xbf, 0x37, 0xc3, 0xe7, 0x87, 0x44,
- 0xef, 0x3d, 0x2b, 0xbc, 0x67, 0x1d, 0xef, 0xb9, 0x4f, 0xf0, 0xbd, 0x7d, 0xbd, 0x07, 0xcb, 0xf9,
- 0xf5, 0xc9, 0xa5, 0x7f, 0x0f, 0x2e, 0x08, 0xca, 0xde, 0x30, 0xb0, 0xf7, 0x78, 0x6c, 0xd5, 0xda,
- 0xa8, 0x7f, 0x37, 0xa0, 0x57, 0x1c, 0x28, 0x3d, 0xff, 0x69, 0xad, 0x76, 0x6a, 0x9b, 0x5c, 0x86,
- 0x0e, 0xb5, 0x5c, 0xcf, 0x24, 0xfd, 0x7e, 0x8c, 0x29, 0x37, 0xc4, 0x94, 0x01, 0x8c, 0x74, 0x9f,
- 0x53, 0xd0, 0x06, 0x2c, 0xd8, 0xc2, 0xfb, 0xcd, 0x08, 0x1f, 0xb9, 0x3c, 0x1b, 0xcc, 0xf2, 0x89,
- 0xcd, 0xdb, 0x2a, 0x2a, 0x04, 0x19, 0xe9, 0x30, 0xe7, 0x3a, 0xc7, 0x26, 0x4f, 0x47, 0x3c, 0x99,
- 0xb4, 0xb8, 0xb4, 0x8e, 0xeb, 0x1c, 0xdf, 0x71, 0x3d, 0xcc, 0x2c, 0xaa, 0x3f, 0x84, 0x55, 0xb1,
- 0xf8, 0xbb, 0x81, 0x1d, 0x61, 0x1f, 0x07, 0xd4, 0xf2, 0x76, 0x48, 0x38, 0xac, 0xe5, 0x36, 0xcf,
- 0x42, 0x2b, 0x76, 0x03, 0x1b, 0x9b, 0x81, 0x48, 0x6a, 0x53, 0xc6, 0x2c, 0xff, 0xde, 0x8d, 0xf5,
- 0x9b, 0x70, 0xb1, 0x42, 0xae, 0xb4, 0xec, 0x73, 0xd0, 0xe5, 0x13, 0xb3, 0x49, 0x40, 0x71, 0x40,
- 0xb9, 0xec, 0xae, 0xd1, 0x61, 0xb4, 0x1d, 0x41, 0xd2, 0x5f, 0x05, 0x24, 0x64, 0xdc, 0x23, 0x83,
- 0xa0, 0x5e, 0x38, 0x3f, 0x03, 0x8b, 0x99, 0x21, 0xd2, 0x37, 0x5e, 0x83, 0x25, 0x41, 0xfe, 0x38,
- 0xf0, 0x6b, 0xcb, 0xba, 0x00, 0xcf, 0xe4, 0x06, 0x49, 0x69, 0xdb, 0x4a, 0x49, 0xf6, 0xd8, 0x39,
- 0x51, 0xd8, 0xb2, 0x9a, 0x41, 0xf6, 0xe4, 0xe1, 0x99, 0x4b, 0x4c, 0xd8, 0x8a, 0x0e, 0x0d, 0x6c,
- 0x39, 0x24, 0xf0, 0x86, 0xb5, 0x33, 0x57, 0xc9, 0x48, 0x29, 0xf7, 0x13, 0x58, 0x56, 0x19, 0x2d,
- 0xe8, 0xbb, 0x8f, 0x06, 0x11, 0xae, 0x9b, 0x89, 0xd3, 0x2e, 0x3b, 0x59, 0x70, 0x59, 0x7d, 0x4b,
- 0x85, 0x59, 0x4a, 0xb0, 0xdc, 0xd2, 0xe4, 0x24, 0x6b, 0xa4, 0x4e, 0x32, 0xfd, 0x9b, 0x06, 0x9c,
- 0x57, 0x23, 0x6a, 0xfa, 0xd5, 0x29, 0x03, 0xab, 0x59, 0x19, 0x58, 0x53, 0xa3, 0xc0, 0x5a, 0x87,
- 0x85, 0x98, 0x0c, 0x22, 0x1b, 0x9b, 0x8e, 0x45, 0x2d, 0x33, 0x20, 0x0e, 0x96, 0x71, 0x77, 0x4e,
- 0xd0, 0x6f, 0x59, 0xd4, 0xda, 0x25, 0x0e, 0xd6, 0x7f, 0xa8, 0xdc, 0x2e, 0xe3, 0xaf, 0x1b, 0x70,
- 0xde, 0xb3, 0x62, 0x6a, 0x5a, 0x61, 0x88, 0x03, 0xc7, 0xb4, 0x28, 0x73, 0xfa, 0x06, 0x77, 0xfa,
- 0x73, 0xac, 0xe3, 0x06, 0xa7, 0xdf, 0xa0, 0xbb, 0xb1, 0xfe, 0x87, 0x49, 0x98, 0x67, 0x63, 0x59,
- 0x90, 0xd5, 0x5a, 0xef, 0x02, 0x34, 0xf1, 0x31, 0x95, 0x0b, 0x65, 0x4d, 0xb4, 0x05, 0x8b, 0x32,
- 0x9a, 0x5d, 0x12, 0x8c, 0x02, 0xbd, 0x29, 0xf2, 0xe2, 0xa8, 0x2b, 0x89, 0xf5, 0xcb, 0xd0, 0x89,
- 0x29, 0x09, 0x55, 0xde, 0x98, 0x12, 0x79, 0x83, 0x91, 0x64, 0xde, 0xc8, 0xda, 0x74, 0xba, 0xc4,
- 0xa6, 0x5d, 0x37, 0x36, 0xb1, 0x6d, 0x8a, 0x59, 0xf1, 0xcc, 0xd3, 0x32, 0xc0, 0x8d, 0x6f, 0xdb,
- 0xc2, 0x1a, 0xe8, 0x3d, 0x58, 0x75, 0x1f, 0x05, 0x24, 0xc2, 0xa6, 0x34, 0x24, 0x8f, 0xdf, 0x80,
- 0x50, 0xb3, 0x4f, 0x06, 0x81, 0xc3, 0xb3, 0x50, 0xcb, 0xe8, 0x09, 0x9e, 0x3d, 0xce, 0xc2, 0x2c,
- 0xb0, 0x4b, 0xe8, 0x1d, 0xd6, 0xaf, 0xbf, 0x01, 0x0b, 0x23, 0xab, 0xd4, 0xcf, 0x02, 0x5f, 0x37,
- 0x94, 0xc7, 0x3d, 0xb0, 0x5c, 0x6f, 0x0f, 0x07, 0x0e, 0x8e, 0x9e, 0x32, 0x3b, 0xa1, 0xeb, 0xb0,
- 0xe4, 0x3a, 0x1e, 0x36, 0xa9, 0xeb, 0x63, 0x32, 0xa0, 0x66, 0x8c, 0x6d, 0x12, 0x38, 0xb1, 0xb2,
- 0x2f, 0xeb, 0x7b, 0x20, 0xba, 0xf6, 0x44, 0x8f, 0xfe, 0xab, 0xe4, 0x94, 0x48, 0xcf, 0x62, 0x74,
- 0x3f, 0x0a, 0x30, 0x66, 0x02, 0x0f, 0xb0, 0xe5, 0xe0, 0x48, 0x2e, 0xa3, 0x2b, 0x88, 0x3f, 0xe1,
- 0x34, 0xb6, 0x43, 0x92, 0x69, 0x9f, 0x38, 0x43, 0x3e, 0xa3, 0xae, 0x01, 0x82, 0x74, 0x93, 0x38,
- 0x43, 0x9e, 0xae, 0x63, 0x93, 0x3b, 0x99, 0x7d, 0x30, 0x08, 0x0e, 0xf9, 0x6c, 0x5a, 0x46, 0xc7,
- 0x8d, 0x3f, 0xb0, 0x62, 0xba, 0xc3, 0x48, 0xfa, 0x9f, 0x1b, 0x2a, 0x5f, 0xb0, 0x69, 0x18, 0xd8,
- 0xc6, 0xee, 0xd1, 0xff, 0xc0, 0x1c, 0x6c, 0x84, 0x74, 0x82, 0xcc, 0x3d, 0x59, 0x06, 0x1c, 0x12,
- 0x7d, 0xf2, 0x54, 0xe5, 0x3d, 0xa3, 0x74, 0x95, 0x9d, 0xb8, 0x4c, 0x57, 0x9f, 0xab, 0xe3, 0xe2,
- 0xb6, 0xbd, 0x77, 0x60, 0x45, 0x4e, 0xfc, 0x63, 0x1c, 0xe0, 0xc8, 0xa2, 0x67, 0x72, 0x7d, 0xd1,
- 0xd7, 0xe0, 0x52, 0x95, 0x74, 0xa9, 0xff, 0x33, 0x75, 0x0c, 0x2a, 0x0e, 0x03, 0xef, 0x0f, 0x5c,
- 0xcf, 0x39, 0x13, 0xf5, 0xef, 0xe7, 0x17, 0x97, 0x08, 0x97, 0xfe, 0x73, 0x15, 0xce, 0x47, 0x9c,
- 0x44, 0xcd, 0x98, 0x31, 0x24, 0x95, 0xcb, 0x9c, 0x31, 0x2f, 0x3b, 0xf8, 0x40, 0x56, 0xc1, 0xfc,
- 0x66, 0x52, 0x79, 0x80, 0x92, 0x76, 0x66, 0x69, 0x75, 0x05, 0xda, 0x23, 0xf5, 0x4d, 0xae, 0xbe,
- 0x15, 0x4b, 0xbd, 0xcc, 0x3b, 0x6d, 0x12, 0x0e, 0x4d, 0x6c, 0x8b, 0x1b, 0x05, 0xdf, 0xea, 0x96,
- 0xd1, 0x61, 0xc4, 0xdb, 0x36, 0xbf, 0x50, 0xd4, 0xcf, 0xb1, 0x29, 0x69, 0x5f, 0x08, 0x69, 0x33,
- 0x69, 0x69, 0x5f, 0x70, 0x69, 0x8a, 0xe7, 0xc8, 0xed, 0x0b, 0x9e, 0xd9, 0x11, 0xcf, 0x43, 0xb7,
- 0xcf, 0x78, 0x46, 0x5e, 0x95, 0x35, 0x86, 0xdc, 0xd5, 0x2f, 0x61, 0x25, 0xdb, 0x5b, 0xff, 0xc0,
- 0x7e, 0x2a, 0x63, 0xe9, 0x97, 0xf2, 0xee, 0x94, 0x3b, 0xf5, 0x8f, 0xf2, 0xd3, 0xae, 0x7d, 0xc3,
- 0x79, 0xba, 0x79, 0x5d, 0xcc, 0x1b, 0x24, 0x7b, 0x4d, 0xfa, 0x34, 0x3f, 0xed, 0x53, 0x5c, 0x97,
- 0x4e, 0x56, 0x7c, 0x39, 0x1f, 0x02, 0xf9, 0x3b, 0xd5, 0x1f, 0x93, 0xfc, 0x2a, 0x39, 0xd8, 0x8d,
- 0xa6, 0x76, 0x5e, 0x93, 0x7a, 0xb9, 0x39, 0xe6, 0x8c, 0x59, 0xa9, 0x96, 0x95, 0xdc, 0xf2, 0x3c,
- 0x14, 0x15, 0x8b, 0xfc, 0xca, 0x14, 0xd7, 0x4d, 0x59, 0x5c, 0xab, 0x47, 0x83, 0x43, 0x3c, 0xe4,
- 0x3e, 0x3b, 0x25, 0x1e, 0x0d, 0xde, 0xc7, 0x43, 0x7d, 0x37, 0x17, 0x71, 0x62, 0x6a, 0x32, 0x76,
- 0x11, 0x4c, 0x31, 0x67, 0x97, 0x29, 0x9f, 0xb7, 0xd1, 0x45, 0x00, 0x37, 0x36, 0x1d, 0xbe, 0xe7,
- 0x62, 0x52, 0x2d, 0xa3, 0xed, 0x4a, 0x27, 0x70, 0xf4, 0xdf, 0x36, 0x46, 0x02, 0x6f, 0x7a, 0x64,
- 0xff, 0x0c, 0xbd, 0x32, 0xbd, 0x8a, 0x66, 0x66, 0x15, 0xe9, 0xd7, 0x83, 0xa9, 0xec, 0xeb, 0x41,
- 0x2a, 0x88, 0xd2, 0xd3, 0xa9, 0x4a, 0xcd, 0x0f, 0xc8, 0xd9, 0x55, 0x96, 0xc5, 0xd4, 0x3c, 0x92,
- 0x2e, 0xf5, 0xbf, 0x0d, 0x2b, 0xcc, 0xe0, 0x82, 0xca, 0xeb, 0x96, 0xfa, 0xb5, 0xdd, 0x3f, 0x26,
- 0x61, 0xb5, 0x7c, 0x70, 0x9d, 0xfa, 0xee, 0x1d, 0xd0, 0x92, 0xfa, 0x89, 0x1d, 0x8d, 0x31, 0xb5,
- 0xfc, 0x30, 0x39, 0x1c, 0xc5, 0x19, 0x7a, 0x41, 0x16, 0x53, 0x0f, 0x54, 0xbf, 0x3a, 0x21, 0x0b,
- 0xc5, 0x57, 0xb3, 0x50, 0x7c, 0x31, 0x05, 0x8e, 0x45, 0xab, 0x14, 0x88, 0x3b, 0xdc, 0x05, 0xc7,
- 0xa2, 0x55, 0x0a, 0x92, 0xc1, 0x5c, 0x81, 0xf0, 0xda, 0x8e, 0xe4, 0xe7, 0x0a, 0x2e, 0x02, 0xc8,
- 0xeb, 0xd5, 0x20, 0x50, 0xc5, 0x64, 0x5b, 0x5c, 0xae, 0x06, 0x41, 0xe5, 0x2d, 0x73, 0xb6, 0xf2,
- 0x96, 0x99, 0xdd, 0xcd, 0x56, 0x61, 0x37, 0x3f, 0x05, 0xb8, 0xe5, 0xc6, 0x87, 0xc2, 0xc8, 0xec,
- 0x5a, 0xeb, 0xb8, 0xaa, 0x1a, 0x60, 0x4d, 0x46, 0xb1, 0x3c, 0x4f, 0x9a, 0x8e, 0x35, 0x59, 0xf8,
- 0x0c, 0x62, 0xec, 0x48, 0xeb, 0xf0, 0x36, 0xa3, 0xf5, 0x23, 0x8c, 0xa5, 0x01, 0x78, 0x5b, 0xff,
- 0x53, 0x03, 0xda, 0xf7, 0xb0, 0x2f, 0x25, 0x5f, 0x02, 0x78, 0x44, 0x22, 0x32, 0xa0, 0x6e, 0x80,
- 0xc5, 0x2d, 0x7c, 0xda, 0x48, 0x51, 0xbe, 0xbb, 0x1e, 0x9e, 0x1a, 0xb0, 0xd7, 0x97, 0xc6, 0xe4,
- 0x6d, 0x46, 0x3b, 0xc0, 0x56, 0x28, 0xed, 0xc7, 0xdb, 0xac, 0xd6, 0x89, 0xa9, 0x65, 0x1f, 0x72,
- 0x63, 0x4d, 0x19, 0xe2, 0x43, 0xff, 0x6b, 0x03, 0xc0, 0xc0, 0x3e, 0xa1, 0xdc, 0xd7, 0xd8, 0xed,
- 0x76, 0xdf, 0xb2, 0x0f, 0x59, 0xbd, 0x40, 0x87, 0x21, 0x96, 0x96, 0xe8, 0x48, 0xda, 0x83, 0x61,
- 0xc8, 0x77, 0x48, 0xb1, 0xc8, 0xfc, 0xd5, 0x36, 0xda, 0x92, 0x22, 0x2a, 0x03, 0x15, 0xca, 0x6d,
- 0x83, 0x35, 0x53, 0x39, 0x4d, 0x4c, 0x5b, 0xe5, 0xb4, 0x15, 0x68, 0xe7, 0x5d, 0x81, 0xa7, 0x02,
- 0xee, 0x07, 0x57, 0x60, 0xce, 0x27, 0x8e, 0xdb, 0x77, 0xb1, 0xc3, 0x1d, 0x4d, 0x2e, 0xa5, 0xab,
- 0x88, 0xcc, 0xb9, 0xd0, 0x2a, 0xb4, 0xf1, 0x31, 0xc5, 0x41, 0xe2, 0x03, 0x6d, 0x63, 0x44, 0xd0,
- 0xbf, 0x02, 0x50, 0x05, 0x7d, 0x9f, 0xa0, 0x6d, 0x98, 0x66, 0xc2, 0xd5, 0x73, 0xe9, 0x6a, 0xf1,
- 0xb9, 0x74, 0x64, 0x06, 0x43, 0xb0, 0xa6, 0x13, 0xd0, 0x64, 0x26, 0x01, 0x8d, 0xaf, 0xe7, 0xf4,
- 0x6f, 0x1b, 0xb0, 0x26, 0xaf, 0x8f, 0x2e, 0x8e, 0xee, 0x91, 0x23, 0x76, 0x95, 0x78, 0x40, 0x84,
- 0x92, 0x33, 0xc9, 0x9c, 0x6f, 0x41, 0xcf, 0xc1, 0x31, 0x75, 0x03, 0xae, 0xd0, 0x54, 0x9b, 0x12,
- 0x58, 0x3e, 0x96, 0x13, 0x5a, 0x4e, 0xf5, 0xdf, 0x14, 0xdd, 0xbb, 0x96, 0x8f, 0xd1, 0x35, 0x58,
- 0x3c, 0xc4, 0x38, 0x34, 0x3d, 0x62, 0x5b, 0x9e, 0xa9, 0x62, 0x52, 0xde, 0x8f, 0x16, 0x58, 0xd7,
- 0x07, 0xac, 0xe7, 0x96, 0x88, 0x4b, 0x3d, 0x86, 0xe7, 0x4e, 0x58, 0x89, 0xcc, 0x4b, 0xab, 0xd0,
- 0x0e, 0x23, 0x62, 0xe3, 0x98, 0xf9, 0x6c, 0x83, 0x1f, 0x53, 0x23, 0x02, 0xba, 0x0e, 0x8b, 0xc9,
- 0xc7, 0x87, 0x38, 0xb2, 0x71, 0x40, 0xad, 0x47, 0xe2, 0xdd, 0x74, 0xd2, 0x28, 0xeb, 0xd2, 0x7f,
- 0xdf, 0x00, 0xbd, 0xa0, 0xf5, 0x4e, 0x44, 0xfc, 0x33, 0xb4, 0xe0, 0x16, 0x2c, 0x71, 0x3b, 0x44,
- 0x5c, 0xe4, 0xc8, 0x10, 0xa2, 0x8c, 0x39, 0xcf, 0xfa, 0x84, 0x36, 0x65, 0x89, 0x01, 0x5c, 0x39,
- 0x71, 0x4e, 0xff, 0x25, 0x5b, 0xfc, 0xab, 0x0b, 0xdd, 0x8f, 0x06, 0x38, 0x1a, 0xa6, 0x1e, 0x5c,
- 0x63, 0x2c, 0x57, 0xa1, 0x10, 0x83, 0x14, 0x85, 0x65, 0xda, 0x7e, 0x44, 0x7c, 0x33, 0x01, 0x15,
- 0x26, 0x39, 0x4b, 0x87, 0x11, 0xef, 0x08, 0x60, 0x01, 0xbd, 0x0b, 0x33, 0x7d, 0xd7, 0xa3, 0x58,
- 0x3c, 0xe3, 0x77, 0xb6, 0x5f, 0x28, 0x46, 0x44, 0x5a, 0xe7, 0xe6, 0x1d, 0xce, 0x6c, 0xc8, 0x41,
- 0x68, 0x1f, 0x16, 0xdd, 0x20, 0xe4, 0xa5, 0x57, 0xe4, 0x5a, 0x9e, 0xfb, 0x64, 0xf4, 0x64, 0xd8,
- 0xd9, 0x7e, 0x75, 0x8c, 0xac, 0xbb, 0x6c, 0xe4, 0x5e, 0x7a, 0xa0, 0x81, 0xdc, 0x02, 0x0d, 0x61,
- 0x58, 0x22, 0x03, 0x5a, 0x54, 0x32, 0xcd, 0x95, 0x6c, 0x8f, 0x51, 0x72, 0x9f, 0x0f, 0xcd, 0x6a,
- 0x59, 0x24, 0x45, 0xa2, 0xb6, 0x0b, 0x33, 0x62, 0x71, 0x2c, 0x47, 0xf6, 0x5d, 0xec, 0x29, 0x20,
- 0x44, 0x7c, 0xb0, 0x34, 0x40, 0x42, 0x1c, 0x59, 0x81, 0x4a, 0x77, 0xea, 0x93, 0xf1, 0x1f, 0x59,
- 0xde, 0x40, 0xc5, 0x9b, 0xf8, 0xd0, 0xfe, 0x32, 0x0d, 0xa8, 0xb8, 0x42, 0xf5, 0x0e, 0x1a, 0xe1,
- 0x98, 0xa5, 0x90, 0x74, 0x7e, 0x9d, 0x4f, 0xd1, 0x79, 0x8e, 0xfd, 0x04, 0xda, 0x76, 0x7c, 0x64,
- 0x72, 0x93, 0x70, 0x9d, 0x9d, 0xed, 0xb7, 0x4f, 0x6d, 0xd2, 0xcd, 0x9d, 0xbd, 0x87, 0x9c, 0x6a,
- 0xb4, 0xec, 0xf8, 0x88, 0xb7, 0xd0, 0xcf, 0x01, 0xbe, 0x88, 0x49, 0x20, 0x25, 0x8b, 0x8d, 0x7f,
- 0xe7, 0xf4, 0x92, 0x7f, 0xba, 0x77, 0x7f, 0x57, 0x88, 0x6e, 0x33, 0x71, 0x42, 0xb6, 0x0d, 0x73,
- 0xa1, 0x15, 0x3d, 0x1e, 0x60, 0x2a, 0xc5, 0x0b, 0x5f, 0x78, 0xef, 0xf4, 0xe2, 0x3f, 0x14, 0x62,
- 0x84, 0x86, 0x6e, 0x98, 0xfa, 0xd2, 0xbe, 0x9d, 0x84, 0x96, 0x5a, 0x17, 0xab, 0xde, 0xb8, 0x87,
- 0x8b, 0x37, 0x0c, 0xd3, 0x0d, 0xfa, 0x44, 0x5a, 0xf4, 0x1c, 0xa3, 0x8b, 0x67, 0x0c, 0x9e, 0xfd,
- 0x37, 0x60, 0x21, 0xc2, 0x36, 0x89, 0x1c, 0x76, 0xc7, 0x75, 0x7d, 0x97, 0xb9, 0xbd, 0xd8, 0xcb,
- 0x79, 0x41, 0xbf, 0xa5, 0xc8, 0xe8, 0x25, 0x98, 0xe7, 0xdb, 0x9e, 0xe2, 0x6c, 0x2a, 0x99, 0xd8,
- 0x4b, 0x31, 0x6e, 0xc0, 0xc2, 0xe3, 0x01, 0xcb, 0x1b, 0xf6, 0x81, 0x15, 0x59, 0x36, 0x25, 0xc9,
- 0x6b, 0xc2, 0x3c, 0xa7, 0xef, 0x24, 0x64, 0xf4, 0x3a, 0x2c, 0x0b, 0x56, 0x1c, 0xdb, 0x56, 0x98,
- 0x8c, 0xc0, 0x91, 0x2c, 0x36, 0x97, 0x78, 0xef, 0x6d, 0xde, 0xb9, 0xa3, 0xfa, 0x90, 0x06, 0x2d,
- 0x9b, 0xf8, 0x3e, 0x0e, 0x68, 0xcc, 0x8f, 0xbf, 0xb6, 0x91, 0x7c, 0xa3, 0x1b, 0x70, 0xd1, 0xf2,
- 0x3c, 0xf2, 0xa5, 0xc9, 0x47, 0x3a, 0x66, 0x61, 0x75, 0xa2, 0xf4, 0xd4, 0x38, 0xd3, 0x47, 0x9c,
- 0xc7, 0xc8, 0x2e, 0x54, 0xbb, 0x0c, 0xed, 0x64, 0x1f, 0xd9, 0x8d, 0x21, 0xe5, 0x90, 0xbc, 0xad,
- 0x9d, 0x83, 0x6e, 0x7a, 0x27, 0xb4, 0x7f, 0x36, 0x61, 0xb1, 0x24, 0xa8, 0xd0, 0x67, 0x00, 0xcc,
- 0x5b, 0x45, 0x68, 0x49, 0x77, 0xfd, 0xc1, 0xe9, 0x83, 0x93, 0xf9, 0xab, 0x20, 0x1b, 0xcc, 0xfb,
- 0x45, 0x13, 0xfd, 0x02, 0x3a, 0xdc, 0x63, 0xa5, 0x74, 0xe1, 0xb2, 0xef, 0x7e, 0x07, 0xe9, 0x6c,
- 0xad, 0x52, 0x3c, 0x8f, 0x01, 0xd1, 0xd6, 0xfe, 0xde, 0x80, 0x76, 0xa2, 0x98, 0xdd, 0x7f, 0xc4,
- 0x46, 0xf1, 0xbd, 0x8e, 0xd5, 0xfd, 0x87, 0xd3, 0xee, 0x70, 0xd2, 0xff, 0xa5, 0x2b, 0x69, 0x6f,
- 0x02, 0x8c, 0xd6, 0x5f, 0xba, 0x84, 0x46, 0xe9, 0x12, 0xf4, 0x0d, 0x98, 0x63, 0x96, 0x75, 0xb1,
- 0xb3, 0x47, 0x23, 0x37, 0xe4, 0x90, 0xae, 0xe0, 0x89, 0x65, 0x01, 0xa9, 0x3e, 0xb7, 0xbf, 0x59,
- 0x81, 0x6e, 0xfa, 0x01, 0x0d, 0x7d, 0x0e, 0x9d, 0x14, 0x74, 0x8d, 0x9e, 0x2f, 0x6e, 0x5a, 0x11,
- 0x0a, 0xd7, 0x5e, 0x18, 0xc3, 0x25, 0x6b, 0xac, 0x09, 0x14, 0xc0, 0xf9, 0x02, 0xfe, 0x8b, 0xae,
- 0x16, 0x47, 0x57, 0xa1, 0xcb, 0xda, 0xcb, 0xb5, 0x78, 0x13, 0x7d, 0x14, 0x16, 0x4b, 0x00, 0x5d,
- 0xf4, 0xca, 0x18, 0x29, 0x19, 0x50, 0x59, 0xbb, 0x56, 0x93, 0x3b, 0xd1, 0xfa, 0x18, 0x50, 0x11,
- 0xed, 0x45, 0x2f, 0x8f, 0x15, 0x33, 0x42, 0x93, 0xb5, 0x57, 0xea, 0x31, 0x57, 0x2e, 0x54, 0xe0,
- 0xc0, 0x63, 0x17, 0x9a, 0x41, 0x9a, 0xc7, 0x2e, 0x34, 0x07, 0x2e, 0x4f, 0xa0, 0x43, 0x58, 0xc8,
- 0x63, 0xc4, 0x68, 0xa3, 0xea, 0x9f, 0x86, 0x02, 0x04, 0xad, 0x5d, 0xad, 0xc3, 0x9a, 0x28, 0xc3,
- 0x70, 0x2e, 0x8b, 0xc9, 0xa2, 0x97, 0x8a, 0xe3, 0x4b, 0x51, 0x69, 0x6d, 0x7d, 0x3c, 0x63, 0x7a,
- 0x4d, 0x79, 0x9c, 0xb6, 0x6c, 0x4d, 0x15, 0x20, 0x70, 0xd9, 0x9a, 0xaa, 0x60, 0x5f, 0x7d, 0x02,
- 0x7d, 0xa5, 0xc0, 0xbf, 0x1c, 0x7e, 0x89, 0x36, 0xab, 0xc4, 0x94, 0x03, 0xa8, 0xda, 0x56, 0x6d,
- 0x7e, 0xa5, 0xfb, 0x7a, 0x83, 0xc5, 0x7a, 0x0a, 0xc6, 0x2c, 0x8b, 0xf5, 0x22, 0x30, 0x5a, 0x16,
- 0xeb, 0x65, 0x58, 0xe8, 0x04, 0xda, 0x87, 0xb9, 0x0c, 0xb0, 0x89, 0x5e, 0xac, 0x1a, 0x99, 0x7d,
- 0xff, 0xd3, 0x5e, 0x1a, 0xcb, 0x97, 0xe8, 0x30, 0x55, 0xf6, 0x92, 0xe9, 0xaa, 0x72, 0x72, 0xd9,
- 0x7c, 0xf5, 0xe2, 0x38, 0xb6, 0x4c, 0x28, 0x17, 0xe0, 0xcf, 0xd2, 0x50, 0xae, 0x82, 0x57, 0x4b,
- 0x43, 0xb9, 0x1a, 0x51, 0x9d, 0x40, 0x07, 0x30, 0x9f, 0x83, 0x3e, 0xd1, 0x7a, 0x95, 0x88, 0x3c,
- 0xec, 0xaa, 0x6d, 0xd4, 0xe0, 0x4c, 0x34, 0xfd, 0x4c, 0x15, 0xdb, 0xdc, 0xe5, 0xae, 0x54, 0x0f,
- 0x1d, 0xf9, 0xd9, 0xf3, 0x27, 0x33, 0x25, 0xa2, 0xbf, 0x84, 0xa5, 0xb2, 0x17, 0x31, 0x74, 0xad,
- 0xac, 0x84, 0xaf, 0x7c, 0x76, 0xd3, 0x36, 0xeb, 0xb2, 0x27, 0x8a, 0x3f, 0x86, 0x96, 0x82, 0xff,
- 0xd0, 0x73, 0xc5, 0xd1, 0x39, 0xc0, 0x54, 0xd3, 0x4f, 0x62, 0x49, 0x85, 0x8a, 0xaf, 0xb2, 0xc2,
- 0x08, 0x97, 0xab, 0xce, 0x0a, 0x05, 0x04, 0xb1, 0x3a, 0x2b, 0x14, 0x61, 0x3e, 0xae, 0x2e, 0x71,
- 0xbb, 0x34, 0x8c, 0x55, 0xed, 0x76, 0x25, 0x28, 0x5d, 0xb5, 0xdb, 0x95, 0x22, 0x63, 0x13, 0xe8,
- 0x97, 0x0a, 0xca, 0xcf, 0xa3, 0x57, 0xa8, 0x32, 0xb7, 0x54, 0xa0, 0x68, 0xda, 0xf5, 0xfa, 0x03,
- 0x12, 0xf5, 0x4f, 0x54, 0x26, 0xcc, 0xa1, 0x57, 0xd5, 0x99, 0xb0, 0x1c, 0x43, 0xd3, 0xb6, 0x6a,
- 0xf3, 0x17, 0x83, 0x3c, 0x0d, 0xef, 0x54, 0x5b, 0xbb, 0x04, 0x11, 0xab, 0xb6, 0x76, 0x29, 0x62,
- 0xc4, 0xe3, 0xa3, 0x0c, 0xba, 0x29, 0x8b, 0x8f, 0x13, 0xb0, 0x25, 0x6d, 0xb3, 0x2e, 0x7b, 0xe6,
- 0xa2, 0x50, 0xc4, 0x66, 0xd0, 0xd8, 0xf9, 0x67, 0xce, 0x80, 0x6b, 0x35, 0xb9, 0xab, 0x77, 0x57,
- 0x9d, 0x09, 0x63, 0x17, 0x90, 0x3b, 0x1b, 0xb6, 0x6a, 0xf3, 0x27, 0xba, 0x43, 0xf5, 0x63, 0x48,
- 0x0a, 0x57, 0x41, 0x57, 0xc7, 0xc8, 0x49, 0xe1, 0x42, 0xda, 0xcb, 0xb5, 0x78, 0xcb, 0xa2, 0x37,
- 0x8d, 0x74, 0x9c, 0xe4, 0x4f, 0x05, 0x78, 0xe6, 0x24, 0x7f, 0x2a, 0x01, 0x4f, 0x4a, 0xa2, 0x57,
- 0x01, 0x1c, 0xe3, 0xa3, 0x37, 0x07, 0xb4, 0x8c, 0x8f, 0xde, 0x02, 0x76, 0x32, 0x81, 0x7e, 0x3d,
- 0xfa, 0x61, 0xa0, 0xf8, 0xdc, 0x88, 0xb6, 0x2b, 0x53, 0x51, 0xe5, 0x2b, 0xab, 0xf6, 0xda, 0xa9,
- 0xc6, 0xa4, 0x8c, 0xff, 0xbb, 0x86, 0x42, 0x1f, 0x4b, 0xdf, 0xfb, 0xd0, 0xeb, 0x35, 0x04, 0x17,
- 0x9e, 0x2c, 0xb5, 0x37, 0x4e, 0x39, 0x2a, 0x35, 0xa1, 0x0f, 0x60, 0x9a, 0xd7, 0xb9, 0xe8, 0xd2,
- 0xc9, 0x05, 0xb0, 0x76, 0xb9, 0xbc, 0x3f, 0x29, 0xe3, 0x98, 0xb4, 0xfd, 0x19, 0xfe, 0x93, 0xf2,
- 0x6b, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x66, 0x23, 0x9f, 0xad, 0xbb, 0x2c, 0x00, 0x00,
+ // 3329 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x3b, 0x4b, 0x6f, 0x1c, 0xc7,
+ 0xd1, 0x5c, 0x2e, 0x1f, 0xbb, 0xb5, 0xbb, 0x22, 0xd5, 0x94, 0xa9, 0xf5, 0x90, 0x92, 0xa8, 0x91,
+ 0x1f, 0x92, 0x6c, 0x51, 0x32, 0x6d, 0x7f, 0x96, 0xe5, 0xcf, 0xfe, 0x2c, 0x51, 0xa2, 0x2c, 0x5b,
+ 0xa4, 0xec, 0xa1, 0x2c, 0x7f, 0x89, 0x8c, 0x0c, 0x86, 0x33, 0xbd, 0xe4, 0x98, 0xb3, 0xd3, 0xa3,
+ 0x99, 0x5e, 0x5a, 0x2b, 0x38, 0x27, 0x07, 0x48, 0x80, 0x20, 0x39, 0x04, 0xb9, 0xe4, 0x12, 0x20,
+ 0xc8, 0x3d, 0xd7, 0xfc, 0x05, 0xff, 0x81, 0x00, 0x39, 0xe5, 0x92, 0x73, 0x0e, 0x39, 0x04, 0x08,
+ 0x90, 0x4b, 0xd0, 0xaf, 0xd9, 0x79, 0x72, 0x87, 0x11, 0x83, 0x20, 0xb7, 0xe9, 0xea, 0xea, 0xaa,
+ 0xae, 0xea, 0xaa, 0xea, 0xea, 0xaa, 0x5d, 0x58, 0x38, 0x20, 0xde, 0xa0, 0x8f, 0xcd, 0x08, 0x87,
+ 0x07, 0x38, 0x5c, 0x0d, 0x42, 0x42, 0x09, 0x9a, 0x4f, 0x01, 0xcd, 0x60, 0x47, 0x7f, 0x0c, 0xe8,
+ 0x96, 0x45, 0xed, 0xbd, 0xdb, 0xd8, 0xc3, 0x14, 0x1b, 0xf8, 0xc9, 0x00, 0x47, 0x14, 0xbd, 0x08,
+ 0x8d, 0x9e, 0xeb, 0x61, 0xd3, 0x75, 0xa2, 0x6e, 0x6d, 0xa5, 0x7e, 0xb1, 0x69, 0xcc, 0xb2, 0xf1,
+ 0x3d, 0x27, 0x42, 0x97, 0xe1, 0x64, 0xb4, 0xef, 0x06, 0xa6, 0x4d, 0xc8, 0xbe, 0x8b, 0x4d, 0x7b,
+ 0x0f, 0xdb, 0xfb, 0xdd, 0xc9, 0x95, 0xda, 0xc5, 0x86, 0x31, 0xc7, 0x26, 0xd6, 0x39, 0x7c, 0x9d,
+ 0x81, 0xf5, 0x07, 0xb0, 0x90, 0x22, 0x1e, 0x05, 0xc4, 0x8f, 0x30, 0xba, 0x0e, 0xb3, 0x21, 0x8e,
+ 0x06, 0x1e, 0x15, 0xc4, 0x5b, 0x6b, 0x67, 0x57, 0xb3, 0xfb, 0x5a, 0x8d, 0x97, 0x0c, 0x3c, 0x6a,
+ 0x28, 0x74, 0xfd, 0xdb, 0x1a, 0xb4, 0x93, 0x33, 0xe8, 0x34, 0xcc, 0xca, 0x8d, 0x76, 0x6b, 0x2b,
+ 0xb5, 0x8b, 0x4d, 0x63, 0x46, 0xec, 0x13, 0x2d, 0xc2, 0x4c, 0x44, 0x2d, 0x3a, 0x88, 0xf8, 0xde,
+ 0xa6, 0x0d, 0x39, 0x42, 0xa7, 0x60, 0x1a, 0x87, 0x21, 0x09, 0xbb, 0x75, 0x8e, 0x2e, 0x06, 0x08,
+ 0xc1, 0x54, 0xe4, 0x3e, 0xc3, 0xdd, 0xa9, 0x95, 0xda, 0xc5, 0x8e, 0xc1, 0xbf, 0x51, 0x17, 0x66,
+ 0x0f, 0x70, 0x18, 0xb9, 0xc4, 0xef, 0x4e, 0x73, 0xb0, 0x1a, 0xea, 0x1f, 0xc3, 0x89, 0x0d, 0xd7,
+ 0xc3, 0x77, 0x31, 0x55, 0xfa, 0x2a, 0xdd, 0xc6, 0x39, 0x68, 0x59, 0xb6, 0x8d, 0x03, 0x6a, 0xee,
+ 0x3e, 0x73, 0x03, 0xa9, 0x27, 0x10, 0xa0, 0xbb, 0xcf, 0xdc, 0x40, 0xff, 0x71, 0x1d, 0xe6, 0x62,
+ 0x62, 0x52, 0x3f, 0x08, 0xa6, 0x1c, 0x8b, 0x5a, 0x9c, 0x54, 0xdb, 0xe0, 0xdf, 0xe8, 0x65, 0x38,
+ 0x61, 0x13, 0x9f, 0x62, 0x9f, 0x9a, 0x1e, 0xf6, 0x77, 0xe9, 0x1e, 0xa7, 0xd5, 0x31, 0x3a, 0x12,
+ 0x7a, 0x9f, 0x03, 0xd1, 0x79, 0x68, 0x2b, 0x34, 0x3a, 0x0c, 0xb0, 0x94, 0xb2, 0x25, 0x61, 0x0f,
+ 0x87, 0x01, 0x46, 0x17, 0xa0, 0xe3, 0x59, 0x11, 0x35, 0xfb, 0xc4, 0x71, 0x7b, 0x2e, 0x76, 0xb8,
+ 0xd0, 0x53, 0x46, 0x9b, 0x01, 0x37, 0x25, 0x0c, 0x69, 0xc2, 0x00, 0x7c, 0xab, 0x8f, 0xb9, 0xf4,
+ 0x4d, 0x23, 0x1e, 0xb3, 0xed, 0x61, 0x6a, 0xed, 0x76, 0x67, 0x38, 0x9c, 0x7f, 0xa3, 0x33, 0x00,
+ 0x6e, 0xc4, 0x65, 0x0c, 0xb0, 0xd3, 0x9d, 0xe5, 0x62, 0x36, 0xdd, 0xe8, 0xae, 0x00, 0xa0, 0x8f,
+ 0x60, 0x76, 0x0f, 0x5b, 0x0e, 0x0e, 0xa3, 0x6e, 0x83, 0x9f, 0xf8, 0x6a, 0xfe, 0xc4, 0x33, 0x5a,
+ 0x58, 0xfd, 0x48, 0x2c, 0xb8, 0xe3, 0xd3, 0x70, 0x68, 0xa8, 0xe5, 0x68, 0x19, 0x9a, 0xfc, 0xc8,
+ 0xd6, 0x89, 0x83, 0xbb, 0x4d, 0x7e, 0xb4, 0x23, 0x80, 0x76, 0x03, 0xda, 0xc9, 0x65, 0x68, 0x1e,
+ 0xea, 0xfb, 0x78, 0x28, 0xcf, 0x84, 0x7d, 0xb2, 0xf3, 0x3f, 0xb0, 0xbc, 0x01, 0xe6, 0xea, 0x6b,
+ 0x1a, 0x62, 0x70, 0x63, 0xf2, 0x7a, 0x4d, 0x9f, 0x85, 0xe9, 0x3b, 0xfd, 0x80, 0x0e, 0xf5, 0x77,
+ 0xa0, 0xfb, 0xc8, 0xb2, 0x07, 0x83, 0xfe, 0x23, 0xbe, 0x45, 0x6e, 0xca, 0xea, 0xa0, 0x97, 0xa0,
+ 0x29, 0x37, 0x2e, 0x8f, 0xba, 0x63, 0x34, 0x04, 0xe0, 0x9e, 0xa3, 0x7f, 0x08, 0x2f, 0x16, 0x2c,
+ 0x94, 0x87, 0x7a, 0x01, 0x3a, 0xbb, 0x56, 0xb8, 0x63, 0xed, 0x62, 0x33, 0xb4, 0xa8, 0x4b, 0xf8,
+ 0xea, 0x9a, 0xd1, 0x96, 0x40, 0x83, 0xc1, 0xf4, 0xc7, 0xa0, 0xa5, 0x28, 0x90, 0x7e, 0x60, 0xd9,
+ 0xb4, 0x0a, 0x73, 0xb4, 0x02, 0xad, 0x20, 0xc4, 0x96, 0xe7, 0x11, 0xdb, 0xa2, 0x42, 0xbc, 0xba,
+ 0x91, 0x04, 0xe9, 0x67, 0x60, 0xa9, 0x90, 0xb8, 0xd8, 0xa0, 0x7e, 0x3d, 0xb3, 0x7b, 0xd2, 0xef,
+ 0xbb, 0x95, 0x58, 0xeb, 0x1f, 0xe4, 0x76, 0xcd, 0x57, 0x4a, 0xc1, 0x57, 0xa0, 0xed, 0x46, 0x66,
+ 0x88, 0x2d, 0xc7, 0x24, 0xbe, 0x27, 0x0e, 0xa3, 0x61, 0x80, 0x1b, 0x19, 0xd8, 0x72, 0x1e, 0xf8,
+ 0xde, 0x50, 0x7f, 0x37, 0xb3, 0xde, 0xc3, 0x96, 0x3f, 0x08, 0x2a, 0xb1, 0xce, 0xca, 0xa4, 0x96,
+ 0x4a, 0x99, 0xde, 0x85, 0xd3, 0x22, 0x5c, 0xac, 0x13, 0xcf, 0xc3, 0x36, 0x75, 0x89, 0xaf, 0xc8,
+ 0x9e, 0x05, 0xb0, 0x63, 0xa0, 0xb4, 0x90, 0x04, 0x44, 0xd7, 0xa0, 0x9b, 0x5f, 0x2a, 0xc9, 0xfe,
+ 0xa9, 0x06, 0x2f, 0xdc, 0x94, 0x6a, 0x15, 0x8c, 0x2b, 0x1d, 0x51, 0x9a, 0xe5, 0x64, 0x96, 0x65,
+ 0xf6, 0x08, 0xeb, 0xb9, 0x23, 0x64, 0x18, 0x21, 0x0e, 0x3c, 0xd7, 0xb6, 0x38, 0x89, 0x29, 0xe1,
+ 0xdd, 0x09, 0x10, 0xb3, 0x78, 0x4a, 0x3d, 0xe9, 0xb3, 0xec, 0x13, 0xad, 0xc1, 0x62, 0x1f, 0xf7,
+ 0x49, 0x38, 0x34, 0xfb, 0x56, 0x60, 0xf6, 0xad, 0xa7, 0x26, 0x0b, 0x6f, 0x66, 0x7f, 0x87, 0x3b,
+ 0x70, 0xc7, 0x40, 0x62, 0x76, 0xd3, 0x0a, 0x36, 0xad, 0xa7, 0xdb, 0xee, 0x33, 0xbc, 0xb9, 0xa3,
+ 0x77, 0x61, 0x31, 0x2b, 0x9f, 0x14, 0xfd, 0x7f, 0xe0, 0xb4, 0x80, 0x6c, 0x0f, 0x7d, 0x7b, 0x9b,
+ 0xc7, 0xd4, 0x4a, 0x07, 0xf5, 0x8f, 0x1a, 0x74, 0xf3, 0x0b, 0xa5, 0x89, 0x3c, 0xaf, 0xd6, 0x8e,
+ 0xac, 0x93, 0x73, 0xd0, 0xa2, 0x96, 0xeb, 0x99, 0xa4, 0xd7, 0x8b, 0x30, 0xe5, 0x8a, 0x98, 0x32,
+ 0x80, 0x81, 0x1e, 0x70, 0x08, 0xba, 0x04, 0xf3, 0xb6, 0xf0, 0x0f, 0x33, 0xc4, 0x07, 0x2e, 0xbf,
+ 0x05, 0x66, 0xf9, 0xc6, 0xe6, 0x6c, 0xe5, 0x37, 0x02, 0x8c, 0x74, 0xe8, 0xb8, 0xce, 0x53, 0x93,
+ 0xc7, 0x7f, 0x7e, 0x89, 0x34, 0x38, 0xb5, 0x96, 0xeb, 0x3c, 0x65, 0x21, 0x8d, 0x69, 0x54, 0x7f,
+ 0x04, 0xcb, 0x42, 0xf8, 0x7b, 0xbe, 0x1d, 0xe2, 0x3e, 0xf6, 0xa9, 0xe5, 0xad, 0x93, 0x60, 0x58,
+ 0xc9, 0x6c, 0x5e, 0x84, 0x46, 0xe4, 0xfa, 0x36, 0x36, 0x7d, 0x71, 0x99, 0x4d, 0x19, 0xb3, 0x7c,
+ 0xbc, 0x15, 0xe9, 0xb7, 0xe0, 0x4c, 0x09, 0x5d, 0xa9, 0xd9, 0xf3, 0xd0, 0xe6, 0x1b, 0x93, 0x17,
+ 0x80, 0xbc, 0x52, 0x5a, 0x0c, 0xb6, 0x2e, 0x40, 0xfa, 0x1b, 0x80, 0x04, 0x8d, 0x4d, 0x32, 0xf0,
+ 0xab, 0x39, 0xfc, 0x0b, 0xb0, 0x90, 0x5a, 0x22, 0x6d, 0xe3, 0x4d, 0x38, 0x25, 0xc0, 0x9f, 0xfb,
+ 0xfd, 0xca, 0xb4, 0x4e, 0xc3, 0x0b, 0x99, 0x45, 0x92, 0xda, 0x9a, 0x62, 0x92, 0x4e, 0x4d, 0x0e,
+ 0x25, 0xb6, 0xa8, 0x76, 0x90, 0xce, 0x38, 0x78, 0x6c, 0x13, 0x1b, 0xb6, 0xc2, 0x7d, 0x16, 0x77,
+ 0x58, 0x24, 0xaa, 0x44, 0x71, 0x19, 0xb4, 0xa2, 0x95, 0x92, 0xee, 0x17, 0xb0, 0xa8, 0x62, 0x9e,
+ 0xdf, 0x73, 0x77, 0x07, 0x21, 0xae, 0x1a, 0xab, 0x93, 0x26, 0x3b, 0x99, 0x33, 0x59, 0xfd, 0xaa,
+ 0x72, 0xb3, 0x04, 0x61, 0x79, 0xa4, 0x71, 0x06, 0x53, 0x4b, 0x64, 0x30, 0xfa, 0xef, 0x6a, 0x70,
+ 0x52, 0xad, 0xa8, 0x68, 0x57, 0x47, 0x74, 0xac, 0x7a, 0xa9, 0x63, 0x4d, 0x8d, 0x1c, 0xeb, 0x22,
+ 0xcc, 0x47, 0x64, 0x10, 0xda, 0xd8, 0x64, 0x59, 0x8b, 0xe9, 0xb3, 0x5b, 0x5a, 0xf8, 0xdd, 0x09,
+ 0x01, 0xbf, 0x6d, 0x51, 0x6b, 0x8b, 0x38, 0x58, 0xff, 0x3f, 0x65, 0x76, 0x29, 0x7b, 0xbd, 0x04,
+ 0x27, 0x79, 0x72, 0x62, 0x05, 0x01, 0xf6, 0x1d, 0xd3, 0xa2, 0xcc, 0xe8, 0x6b, 0xdc, 0xe8, 0x4f,
+ 0xb0, 0x89, 0x9b, 0x1c, 0x7e, 0x93, 0x6e, 0x45, 0xfa, 0x2f, 0x27, 0x61, 0x8e, 0xad, 0x65, 0x4e,
+ 0x56, 0x49, 0xde, 0x79, 0xa8, 0xe3, 0xa7, 0x54, 0x0a, 0xca, 0x3e, 0xd1, 0x55, 0x58, 0x90, 0xde,
+ 0xec, 0x12, 0x7f, 0xe4, 0xe8, 0x75, 0x11, 0x17, 0x47, 0x53, 0xb1, 0xaf, 0x9f, 0x83, 0x56, 0x44,
+ 0x49, 0xa0, 0xe2, 0x86, 0xc8, 0x9c, 0x80, 0x81, 0x64, 0xdc, 0x48, 0xeb, 0x74, 0xba, 0x40, 0xa7,
+ 0xec, 0x32, 0xc4, 0xb6, 0x29, 0x76, 0xc5, 0x23, 0x0f, 0xbf, 0x0c, 0xef, 0xd8, 0x42, 0x1b, 0xe8,
+ 0x03, 0x58, 0x76, 0x77, 0x7d, 0x12, 0x62, 0x53, 0x2a, 0x92, 0xfb, 0xaf, 0x4f, 0xa8, 0xd9, 0x23,
+ 0x03, 0x5f, 0xe5, 0x56, 0x5d, 0x81, 0xb3, 0xcd, 0x51, 0x98, 0x06, 0xb6, 0x08, 0xdd, 0x60, 0xf3,
+ 0xfa, 0xdb, 0x30, 0x3f, 0xd2, 0x4a, 0xf5, 0x28, 0xf0, 0x6d, 0x4d, 0x59, 0xdc, 0x43, 0xcb, 0xf5,
+ 0xb6, 0xb1, 0xef, 0xe0, 0xf0, 0x39, 0xa3, 0x13, 0xba, 0x06, 0xa7, 0x5c, 0xc7, 0xc3, 0x26, 0x75,
+ 0xfb, 0x98, 0x0c, 0xa8, 0x19, 0x61, 0x9b, 0xf8, 0x4e, 0xa4, 0xf4, 0xcb, 0xe6, 0x1e, 0x8a, 0xa9,
+ 0x6d, 0x31, 0xa3, 0xff, 0x28, 0xbe, 0x25, 0x92, 0xbb, 0x18, 0x65, 0x50, 0x3e, 0xc6, 0x8c, 0xa0,
+ 0x48, 0x06, 0xa5, 0x18, 0x6d, 0x01, 0x14, 0x79, 0x1f, 0x3b, 0x21, 0x89, 0xb4, 0x43, 0x9c, 0x21,
+ 0xdf, 0x51, 0xdb, 0x00, 0x01, 0xba, 0x45, 0x9c, 0x21, 0x0f, 0xd7, 0x91, 0xc9, 0x8d, 0xcc, 0xde,
+ 0x1b, 0xf8, 0xfb, 0x7c, 0x37, 0x0d, 0xa3, 0xe5, 0x46, 0xf7, 0xad, 0x88, 0xae, 0x33, 0x90, 0xfe,
+ 0xfb, 0x9a, 0x8a, 0x17, 0x6c, 0x1b, 0x06, 0xb6, 0xb1, 0x7b, 0xf0, 0x1f, 0x50, 0x07, 0x5b, 0x21,
+ 0x8d, 0x20, 0x95, 0x2d, 0x4b, 0x87, 0x43, 0x62, 0x4e, 0xde, 0xaa, 0x7c, 0x66, 0x14, 0xae, 0xd2,
+ 0x1b, 0x97, 0xe1, 0xea, 0x4b, 0x75, 0x5d, 0xdc, 0xb1, 0xb7, 0xf7, 0xac, 0xd0, 0x89, 0xee, 0x62,
+ 0x1f, 0x87, 0x16, 0x3d, 0x96, 0xf4, 0x45, 0x5f, 0x81, 0xb3, 0x65, 0xd4, 0x25, 0xff, 0xc7, 0xea,
+ 0x1a, 0x54, 0x18, 0x06, 0xde, 0x19, 0xb8, 0x9e, 0x73, 0x2c, 0xec, 0x3f, 0xc9, 0x0a, 0x17, 0x13,
+ 0x97, 0xf6, 0x73, 0x19, 0x4e, 0x86, 0x1c, 0x44, 0xcd, 0x88, 0x21, 0xc4, 0xaf, 0xdb, 0x8e, 0x31,
+ 0x27, 0x27, 0xf8, 0xc2, 0x7b, 0x4e, 0xa4, 0xff, 0x74, 0x52, 0x59, 0x80, 0xa2, 0x76, 0x6c, 0x61,
+ 0x75, 0x09, 0x9a, 0x23, 0xf6, 0x75, 0xce, 0xbe, 0x11, 0x49, 0xbe, 0xcc, 0x3a, 0x6d, 0x12, 0x0c,
+ 0x4d, 0x6c, 0x8b, 0x8c, 0x82, 0x1f, 0x75, 0x83, 0x3d, 0xe0, 0x82, 0xe1, 0x1d, 0x9b, 0x27, 0x14,
+ 0xd5, 0x63, 0x6c, 0x82, 0xda, 0x57, 0x82, 0xda, 0x4c, 0x92, 0xda, 0x57, 0x9c, 0x9a, 0xc2, 0x39,
+ 0x70, 0x7b, 0x02, 0x67, 0x76, 0x84, 0xf3, 0xc8, 0xed, 0x31, 0x9c, 0x91, 0x55, 0xa5, 0x95, 0x21,
+ 0x4f, 0xf5, 0x6b, 0x58, 0x4a, 0xcf, 0x56, 0xbf, 0xb0, 0x9f, 0x4b, 0x59, 0xfa, 0xd9, 0xac, 0x39,
+ 0x65, 0x6e, 0xfd, 0x83, 0xec, 0xb6, 0x2b, 0x67, 0x38, 0xcf, 0xb7, 0xaf, 0x33, 0x59, 0x85, 0xa4,
+ 0xd3, 0xa4, 0xff, 0xcf, 0x6e, 0xfb, 0x08, 0xe9, 0xd2, 0xe1, 0x8c, 0xcf, 0x65, 0x5d, 0x20, 0x9b,
+ 0x53, 0xfd, 0x2a, 0x8e, 0xaf, 0x12, 0x83, 0x65, 0x34, 0x95, 0xe3, 0x9a, 0xe4, 0x2b, 0x2b, 0x0f,
+ 0xb3, 0x92, 0x2d, 0x5a, 0x84, 0x19, 0x79, 0x1f, 0x8a, 0x17, 0x8b, 0x1c, 0xa5, 0x8a, 0x2a, 0x75,
+ 0x59, 0x54, 0x51, 0x85, 0x25, 0xf6, 0x2a, 0x9f, 0x16, 0xe1, 0x91, 0x8d, 0x3f, 0xc1, 0x43, 0x7d,
+ 0x2b, 0xe3, 0x71, 0x62, 0x6b, 0x87, 0x94, 0x44, 0x44, 0xcd, 0xc1, 0xe1, 0x67, 0xee, 0xc8, 0xd2,
+ 0x4a, 0xd3, 0x95, 0x46, 0xe0, 0xe8, 0x3f, 0xab, 0x8d, 0x08, 0xde, 0xf2, 0xc8, 0xce, 0x31, 0x5a,
+ 0x65, 0x52, 0x8a, 0x7a, 0x4a, 0x8a, 0x64, 0xd5, 0x68, 0x2a, 0x5d, 0x35, 0x4a, 0x38, 0x51, 0x72,
+ 0x3b, 0x65, 0xa1, 0xf9, 0x21, 0x39, 0xbe, 0x97, 0x65, 0x3e, 0x34, 0x8f, 0xa8, 0x4b, 0xfe, 0x37,
+ 0x60, 0x89, 0x29, 0x5c, 0x40, 0xf9, 0xbb, 0xa5, 0xfa, 0xdb, 0xee, 0x2f, 0x93, 0xb0, 0x5c, 0xbc,
+ 0xb8, 0xca, 0xfb, 0xee, 0x3d, 0xd0, 0xe2, 0xf7, 0x13, 0xbb, 0x1a, 0x23, 0x6a, 0xf5, 0x83, 0xf8,
+ 0x72, 0x14, 0x77, 0xe8, 0x69, 0xf9, 0x98, 0x7a, 0xa8, 0xe6, 0xd5, 0x0d, 0x99, 0x7b, 0x7c, 0xd5,
+ 0x73, 0x8f, 0x2f, 0xc6, 0xc0, 0xb1, 0x68, 0x19, 0x03, 0x91, 0xc3, 0x9d, 0x76, 0x2c, 0x5a, 0xc6,
+ 0x20, 0x5e, 0xcc, 0x19, 0x08, 0xab, 0x6d, 0x49, 0x7c, 0xce, 0xe0, 0x0c, 0x80, 0x4c, 0xaf, 0x06,
+ 0xbe, 0x7a, 0x4c, 0x36, 0x45, 0x72, 0x35, 0xf0, 0x4b, 0xb3, 0xcc, 0xd9, 0xd2, 0x2c, 0x33, 0x7d,
+ 0x9a, 0x8d, 0xdc, 0x69, 0xfe, 0xba, 0x06, 0x70, 0xdb, 0x8d, 0xf6, 0x85, 0x96, 0x59, 0x5e, 0xeb,
+ 0xb8, 0xea, 0x39, 0xc0, 0x3e, 0x19, 0xc4, 0xf2, 0x3c, 0xa9, 0x3b, 0xf6, 0xc9, 0xfc, 0x67, 0x10,
+ 0x61, 0x47, 0xaa, 0x87, 0x7f, 0x33, 0x58, 0x2f, 0xc4, 0x58, 0x6a, 0x80, 0x7f, 0xb3, 0x4c, 0x31,
+ 0xc0, 0xa1, 0x8d, 0x7d, 0x6a, 0xf2, 0x39, 0x26, 0xed, 0xa4, 0xd1, 0x92, 0xb0, 0x8d, 0x0c, 0x0a,
+ 0x27, 0x39, 0x93, 0x42, 0xf9, 0x3c, 0xc2, 0x8e, 0xfe, 0xdb, 0x1a, 0x34, 0x37, 0x71, 0x5f, 0xee,
+ 0xef, 0x2c, 0xc0, 0x2e, 0x09, 0xc9, 0x80, 0xba, 0x3e, 0x16, 0xc9, 0xfc, 0xb4, 0x91, 0x80, 0x3c,
+ 0xc7, 0x6e, 0x59, 0x84, 0xc1, 0x5e, 0x4f, 0x9e, 0x09, 0xff, 0x66, 0xb0, 0x3d, 0x6c, 0x05, 0xf2,
+ 0x18, 0xf8, 0x37, 0x7b, 0x32, 0x45, 0xd4, 0xb2, 0xf7, 0xb9, 0xce, 0xa7, 0x0c, 0x31, 0xd0, 0xff,
+ 0x58, 0x03, 0x30, 0x70, 0x9f, 0x50, 0x6e, 0xb2, 0x4c, 0xae, 0x1d, 0xcb, 0xde, 0x67, 0xcf, 0x0e,
+ 0x5e, 0x3a, 0x15, 0xfa, 0x6c, 0x49, 0x18, 0x2f, 0x9d, 0x9e, 0x01, 0x50, 0x28, 0x32, 0x0c, 0x36,
+ 0x8d, 0xa6, 0x84, 0x88, 0x07, 0x86, 0x8a, 0x08, 0xb2, 0xda, 0x38, 0x0a, 0x8d, 0x62, 0xdb, 0x2a,
+ 0x34, 0x2e, 0x41, 0x33, 0x6b, 0x51, 0x3c, 0xa2, 0x70, 0x73, 0xba, 0x00, 0x1d, 0x55, 0x9b, 0xe5,
+ 0xf6, 0x2a, 0x45, 0x69, 0x2b, 0x20, 0xb3, 0x51, 0x5e, 0x07, 0x7d, 0x4a, 0xb1, 0x1f, 0x9b, 0x52,
+ 0xd3, 0x18, 0x01, 0xf4, 0x6f, 0x00, 0x54, 0x5d, 0xa0, 0x47, 0xd0, 0x1a, 0x4c, 0x33, 0xe2, 0xaa,
+ 0xda, 0xbe, 0x9c, 0xaf, 0xbd, 0x8e, 0xd4, 0x60, 0x08, 0xd4, 0x64, 0x1c, 0x9b, 0x4c, 0xc5, 0xb1,
+ 0xf1, 0xcf, 0x42, 0xfd, 0xbb, 0x1a, 0xac, 0xc8, 0x2c, 0xd4, 0xc5, 0xe1, 0x26, 0x39, 0x60, 0x19,
+ 0xc9, 0x43, 0x22, 0x98, 0x1c, 0x4b, 0x00, 0xbe, 0x0e, 0x5d, 0x07, 0x47, 0xd4, 0xf5, 0x39, 0x43,
+ 0x53, 0x1d, 0x0a, 0x2f, 0x57, 0x8b, 0x0d, 0x2d, 0x26, 0xe6, 0x6f, 0x89, 0xe9, 0x2d, 0xab, 0x8f,
+ 0xd1, 0x15, 0x58, 0xd8, 0xc7, 0x38, 0x30, 0x3d, 0x62, 0x5b, 0x9e, 0xa9, 0x5c, 0x5b, 0xa6, 0x59,
+ 0xf3, 0x6c, 0xea, 0x3e, 0x9b, 0xb9, 0x2d, 0xdc, 0x5b, 0x8f, 0xe0, 0xfc, 0x21, 0x92, 0xc8, 0xf0,
+ 0xb6, 0x0c, 0xcd, 0x20, 0x24, 0x36, 0x8e, 0x98, 0xcd, 0xd6, 0xf8, 0x6d, 0x37, 0x02, 0xa0, 0x6b,
+ 0xb0, 0x10, 0x0f, 0x3e, 0x15, 0x4e, 0x62, 0xed, 0x8a, 0x02, 0xed, 0xa4, 0x51, 0x34, 0xa5, 0xff,
+ 0xa2, 0x06, 0x7a, 0x8e, 0xeb, 0x46, 0x48, 0xfa, 0xc7, 0xa8, 0xc1, 0xab, 0x70, 0x8a, 0xeb, 0x21,
+ 0xe4, 0x24, 0x47, 0x8a, 0x10, 0xaf, 0xa1, 0x93, 0x6c, 0x4e, 0x70, 0x53, 0x9a, 0x18, 0xc0, 0x85,
+ 0x43, 0xf7, 0xf4, 0x6f, 0xd2, 0xc5, 0x92, 0xba, 0xc4, 0xc5, 0x03, 0x27, 0x75, 0x2b, 0xe9, 0xbf,
+ 0xa9, 0xa9, 0x3b, 0x35, 0x3d, 0x2b, 0xf7, 0x72, 0x13, 0x3a, 0x8e, 0x1b, 0xed, 0x9b, 0xa2, 0xf5,
+ 0x73, 0x98, 0xfd, 0x8f, 0xa2, 0xa9, 0xd1, 0x76, 0xe2, 0x6f, 0x1c, 0xa1, 0x0f, 0xa1, 0x23, 0x8b,
+ 0xa7, 0x89, 0x6e, 0x52, 0x6b, 0x6d, 0x29, 0x4f, 0x22, 0x8e, 0x77, 0x46, 0x5b, 0xac, 0x10, 0x23,
+ 0xfd, 0xef, 0x6d, 0x68, 0x7f, 0x36, 0xc0, 0xe1, 0x30, 0x51, 0x78, 0x8e, 0xb0, 0x3c, 0x06, 0xd5,
+ 0x5d, 0x4b, 0x40, 0xd8, 0x8d, 0xd3, 0x0b, 0x49, 0xdf, 0x8c, 0x1b, 0x70, 0x93, 0x1c, 0xa5, 0xc5,
+ 0x80, 0x1b, 0xb2, 0x09, 0xf7, 0x3e, 0xcc, 0xf4, 0x5c, 0x8f, 0x62, 0xd1, 0xc6, 0x6a, 0xad, 0xbd,
+ 0x9c, 0xdf, 0x4f, 0x92, 0xe7, 0xea, 0x06, 0x47, 0x36, 0xe4, 0x22, 0xb4, 0x03, 0x0b, 0xae, 0x1f,
+ 0xf0, 0x27, 0x68, 0xe8, 0x5a, 0x9e, 0xfb, 0x6c, 0x54, 0x3a, 0x6d, 0xad, 0xbd, 0x31, 0x86, 0xd6,
+ 0x3d, 0xb6, 0x72, 0x3b, 0xb9, 0xd0, 0x40, 0x6e, 0x0e, 0x86, 0x30, 0x9c, 0x22, 0x03, 0x9a, 0x67,
+ 0x32, 0xcd, 0x99, 0xac, 0x8d, 0x61, 0xf2, 0x80, 0x2f, 0x4d, 0x73, 0x59, 0x20, 0x79, 0xa0, 0xb6,
+ 0x05, 0x33, 0x42, 0x38, 0x16, 0xe4, 0x7b, 0x2e, 0xf6, 0x54, 0x07, 0x4e, 0x0c, 0x58, 0x1c, 0x23,
+ 0x01, 0x0e, 0x2d, 0x5f, 0xc5, 0x6b, 0x35, 0x1c, 0x75, 0x82, 0xea, 0x89, 0x4e, 0x90, 0xf6, 0x87,
+ 0x69, 0x40, 0x79, 0x09, 0x55, 0x3d, 0x38, 0xc4, 0x11, 0x8b, 0x81, 0xc9, 0x0b, 0x62, 0x2e, 0x01,
+ 0xe7, 0x97, 0xc4, 0x17, 0xd0, 0xb4, 0xa3, 0x03, 0x93, 0xab, 0x44, 0x9a, 0xcb, 0x8d, 0x23, 0xab,
+ 0x74, 0x75, 0x7d, 0xfb, 0x11, 0x87, 0x1a, 0x0d, 0x3b, 0x3a, 0xe0, 0x5f, 0xe8, 0xfb, 0x00, 0x5f,
+ 0x45, 0xc4, 0x97, 0x94, 0xc5, 0xc1, 0xbf, 0x77, 0x74, 0xca, 0x1f, 0x6f, 0x3f, 0xd8, 0x12, 0xa4,
+ 0x9b, 0x8c, 0x9c, 0xa0, 0x6d, 0x43, 0x27, 0xb0, 0xc2, 0x27, 0x03, 0x4c, 0x25, 0x79, 0x61, 0x0b,
+ 0x1f, 0x1c, 0x9d, 0xfc, 0xa7, 0x82, 0x8c, 0xe0, 0xd0, 0x0e, 0x12, 0x23, 0xed, 0xbb, 0x49, 0x68,
+ 0x28, 0xb9, 0xd8, 0x2b, 0x96, 0x5b, 0xb8, 0xa8, 0xe5, 0x98, 0xae, 0xdf, 0x23, 0x52, 0xa3, 0x27,
+ 0x18, 0x5c, 0x94, 0x73, 0xf8, 0xf5, 0x75, 0x09, 0xe6, 0x43, 0x6c, 0x93, 0xd0, 0x61, 0xb9, 0xbe,
+ 0xdb, 0x77, 0x99, 0xd9, 0x8b, 0xb3, 0x9c, 0x13, 0xf0, 0xdb, 0x0a, 0x8c, 0x5e, 0x85, 0x39, 0x7e,
+ 0xec, 0x09, 0xcc, 0xba, 0xa2, 0x89, 0xbd, 0x04, 0xe2, 0x25, 0x98, 0x7f, 0x32, 0x60, 0x81, 0xcf,
+ 0xde, 0xb3, 0x42, 0xcb, 0xa6, 0x24, 0xae, 0xaa, 0xcc, 0x71, 0xf8, 0x7a, 0x0c, 0x46, 0x6f, 0xc1,
+ 0xa2, 0x40, 0xc5, 0x91, 0x6d, 0x05, 0xf1, 0x0a, 0x1c, 0xca, 0x47, 0xf7, 0x29, 0x3e, 0x7b, 0x87,
+ 0x4f, 0xae, 0xab, 0x39, 0xa4, 0x41, 0xc3, 0x26, 0xfd, 0x3e, 0xf6, 0x69, 0x24, 0x1b, 0xa5, 0xf1,
+ 0x18, 0xdd, 0x84, 0x33, 0x96, 0xe7, 0x91, 0xaf, 0x4d, 0xbe, 0xd2, 0x31, 0x73, 0xd2, 0x89, 0x27,
+ 0xb8, 0xc6, 0x91, 0x3e, 0xe3, 0x38, 0x46, 0x5a, 0x50, 0xed, 0x1c, 0x34, 0xe3, 0x73, 0x64, 0x29,
+ 0x4f, 0xc2, 0x20, 0xf9, 0xb7, 0x76, 0x02, 0xda, 0xc9, 0x93, 0xd0, 0xfe, 0x5a, 0x87, 0x85, 0x02,
+ 0xa7, 0x42, 0x8f, 0x01, 0x98, 0xb5, 0x0a, 0xd7, 0x92, 0xe6, 0xfa, 0xbf, 0x47, 0x77, 0x4e, 0x66,
+ 0xaf, 0x02, 0x6c, 0x30, 0xeb, 0x17, 0x9f, 0xe8, 0x07, 0xd0, 0xe2, 0x16, 0x2b, 0xa9, 0x0b, 0x93,
+ 0x7d, 0xff, 0x5f, 0xa0, 0xce, 0x64, 0x95, 0xe4, 0xb9, 0x0f, 0x88, 0x6f, 0xed, 0xcf, 0x35, 0x68,
+ 0xc6, 0x8c, 0x59, 0x02, 0x27, 0x0e, 0x8a, 0x9f, 0x75, 0xa4, 0x12, 0x38, 0x0e, 0xdb, 0xe0, 0xa0,
+ 0xff, 0x4a, 0x53, 0xd2, 0xde, 0x01, 0x18, 0xc9, 0x5f, 0x28, 0x42, 0xad, 0x50, 0x04, 0xfd, 0x12,
+ 0x74, 0x98, 0x66, 0x5d, 0xec, 0x6c, 0xd3, 0xd0, 0x0d, 0xf8, 0x4f, 0x1a, 0x04, 0x4e, 0x24, 0x1f,
+ 0xd2, 0x6a, 0xb8, 0xf6, 0xb7, 0x65, 0x68, 0x27, 0x6f, 0x52, 0xf4, 0x25, 0xb4, 0x12, 0x3f, 0xdd,
+ 0x40, 0x2f, 0xe5, 0x0f, 0x2d, 0xff, 0xb3, 0x11, 0xed, 0xe5, 0x31, 0x58, 0xf2, 0xad, 0x39, 0x81,
+ 0x0c, 0x98, 0x95, 0xed, 0x7e, 0xb4, 0x72, 0xc8, 0x2f, 0x01, 0x04, 0xd5, 0xf3, 0x63, 0x7f, 0x2b,
+ 0xa0, 0x4f, 0x5c, 0xab, 0x21, 0x1f, 0x4e, 0xe6, 0xba, 0xef, 0xe8, 0x72, 0x7e, 0x6d, 0x59, 0x6f,
+ 0x5f, 0x7b, 0xad, 0x12, 0x6e, 0x2c, 0x03, 0x85, 0x85, 0x82, 0x76, 0x3a, 0x7a, 0x7d, 0x0c, 0x95,
+ 0x54, 0x4b, 0x5f, 0xbb, 0x52, 0x11, 0x3b, 0xe6, 0xfa, 0x04, 0x50, 0xbe, 0xd7, 0x8e, 0x5e, 0x1b,
+ 0x4b, 0x66, 0xd4, 0xcb, 0xd7, 0x5e, 0xaf, 0x86, 0x5c, 0x2a, 0xa8, 0xe8, 0xb1, 0x8f, 0x15, 0x34,
+ 0xd5, 0xc5, 0x1f, 0x2b, 0x68, 0xa6, 0x71, 0x3f, 0x81, 0xf6, 0x61, 0x3e, 0xdb, 0x7f, 0x47, 0x97,
+ 0xca, 0x7e, 0x27, 0x94, 0x6b, 0xef, 0x6b, 0x97, 0xab, 0xa0, 0xc6, 0xcc, 0x30, 0x9c, 0x48, 0xf7,
+ 0xbb, 0xd1, 0xab, 0xf9, 0xf5, 0x85, 0x1d, 0x7f, 0xed, 0xe2, 0x78, 0xc4, 0xa4, 0x4c, 0xd9, 0x1e,
+ 0x78, 0x91, 0x4c, 0x25, 0x0d, 0xf6, 0x22, 0x99, 0xca, 0x5a, 0xea, 0xfa, 0x04, 0xfa, 0x46, 0x35,
+ 0x56, 0x33, 0xbd, 0x61, 0xb4, 0x5a, 0x46, 0xa6, 0xb8, 0x39, 0xad, 0x5d, 0xad, 0x8c, 0x9f, 0xf0,
+ 0xc6, 0x2f, 0xa1, 0x95, 0x68, 0x11, 0x17, 0xc5, 0x8f, 0x7c, 0xd3, 0xb9, 0x28, 0x7e, 0x14, 0xf5,
+ 0x99, 0x27, 0xd0, 0x0e, 0x74, 0x52, 0x4d, 0x63, 0xf4, 0x4a, 0xd9, 0xca, 0x74, 0x6d, 0x55, 0x7b,
+ 0x75, 0x2c, 0x5e, 0xcc, 0xc3, 0x54, 0x11, 0x51, 0x86, 0xc0, 0xd2, 0xcd, 0xa5, 0x63, 0xe0, 0x2b,
+ 0xe3, 0xd0, 0x52, 0xae, 0x9c, 0x6b, 0x2d, 0x17, 0xba, 0x72, 0x59, 0xeb, 0xba, 0xd0, 0x95, 0xcb,
+ 0xbb, 0xd5, 0x13, 0x68, 0x0f, 0xe6, 0x32, 0x6d, 0x65, 0x74, 0xb1, 0x8c, 0x44, 0xb6, 0xa5, 0xad,
+ 0x5d, 0xaa, 0x80, 0x19, 0x73, 0xfa, 0x9e, 0xaa, 0x40, 0x70, 0x93, 0xbb, 0x50, 0xbe, 0x74, 0x64,
+ 0x67, 0x2f, 0x1d, 0x8e, 0x14, 0x93, 0xfe, 0x1a, 0x4e, 0x15, 0x55, 0x1b, 0xd1, 0x95, 0xa2, 0xba,
+ 0x46, 0x69, 0x49, 0x53, 0x5b, 0xad, 0x8a, 0x1e, 0x33, 0xfe, 0x1c, 0x1a, 0xaa, 0xb5, 0x8a, 0x0a,
+ 0x2e, 0xa5, 0x4c, 0x33, 0x5a, 0xd3, 0x0f, 0x43, 0x49, 0xb8, 0x4a, 0x5f, 0x45, 0x85, 0x51, 0xcf,
+ 0xb3, 0x3c, 0x2a, 0xe4, 0xba, 0xb3, 0xe5, 0x51, 0x21, 0xdf, 0x42, 0xe5, 0xec, 0x62, 0xb3, 0x4b,
+ 0xb6, 0x08, 0xcb, 0xcd, 0xae, 0xa0, 0x03, 0x5a, 0x6e, 0x76, 0x85, 0x5d, 0xc7, 0x09, 0xf4, 0x43,
+ 0xf5, 0x33, 0x89, 0x6c, 0x67, 0x10, 0x95, 0xc6, 0x96, 0x92, 0x0e, 0xa5, 0x76, 0xad, 0xfa, 0x82,
+ 0x98, 0xfd, 0x33, 0x15, 0x09, 0x33, 0x9d, 0xc1, 0xf2, 0x48, 0x58, 0xdc, 0x9f, 0xd4, 0xae, 0x56,
+ 0xc6, 0xcf, 0x3b, 0x79, 0xb2, 0x75, 0x56, 0xae, 0xed, 0x82, 0x6e, 0x63, 0xb9, 0xb6, 0x0b, 0xbb,
+ 0x71, 0xdc, 0x3f, 0x8a, 0xda, 0x62, 0x45, 0xfe, 0x71, 0x48, 0xdf, 0x4e, 0x5b, 0xad, 0x8a, 0x9e,
+ 0x4a, 0x14, 0xf2, 0x7d, 0x2f, 0x34, 0x76, 0xff, 0xa9, 0x3b, 0xe0, 0x4a, 0x45, 0xec, 0xf2, 0xd3,
+ 0x55, 0x77, 0xc2, 0x58, 0x01, 0x32, 0x77, 0xc3, 0xd5, 0xca, 0xf8, 0x31, 0xef, 0x40, 0xfd, 0xe8,
+ 0x26, 0xd1, 0xb3, 0x42, 0x97, 0xc7, 0xd0, 0x49, 0xf4, 0xdc, 0xb4, 0xd7, 0x2a, 0xe1, 0x16, 0x79,
+ 0x6f, 0xb2, 0x8b, 0x74, 0x98, 0x3d, 0xe5, 0x5a, 0x5f, 0x87, 0xd9, 0x53, 0x41, 0x63, 0xaa, 0xc0,
+ 0x7b, 0x55, 0xf3, 0x68, 0xbc, 0xf7, 0x66, 0x9a, 0x58, 0xe3, 0xbd, 0x37, 0xd7, 0x97, 0x9a, 0x40,
+ 0x3f, 0x19, 0xfd, 0x18, 0x23, 0x5f, 0x83, 0x45, 0x6b, 0xa5, 0xa1, 0xa8, 0xb4, 0xf4, 0xac, 0xbd,
+ 0x79, 0xa4, 0x35, 0x09, 0xe5, 0xff, 0xbc, 0xa6, 0x3a, 0xbb, 0x85, 0x45, 0x50, 0xf4, 0x56, 0x05,
+ 0xc2, 0xb9, 0x3a, 0xae, 0xf6, 0xf6, 0x11, 0x57, 0x15, 0x59, 0x43, 0xb2, 0xfe, 0x59, 0x6e, 0x0d,
+ 0x05, 0x35, 0xd4, 0x72, 0x6b, 0x28, 0x2a, 0xa9, 0xea, 0x13, 0xe8, 0x3e, 0x4c, 0xf3, 0xe7, 0x3a,
+ 0x3a, 0x7b, 0xf8, 0x3b, 0x5e, 0x3b, 0x57, 0x3c, 0x1f, 0xbf, 0x46, 0x99, 0x00, 0x3b, 0x33, 0xfc,
+ 0x7f, 0x09, 0x6f, 0xfe, 0x33, 0x00, 0x00, 0xff, 0xff, 0x4d, 0x3e, 0xd5, 0xcd, 0xae, 0x30, 0x00,
+ 0x00,
}
diff --git a/weed/replication/replicator.go b/weed/replication/replicator.go
index a0ef6591c..1ed60f536 100644
--- a/weed/replication/replicator.go
+++ b/weed/replication/replicator.go
@@ -3,7 +3,6 @@ package replication
import (
"context"
"fmt"
- "path/filepath"
"strings"
"github.com/chrislusf/seaweedfs/weed/glog"
@@ -36,33 +35,33 @@ func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_p
glog.V(4).Infof("skipping %v outside of %v", key, r.source.Dir)
return nil
}
- newKey := filepath.ToSlash(filepath.Join(r.sink.GetSinkToDirectory(), key[len(r.source.Dir):]))
+ newKey := util.Join(r.sink.GetSinkToDirectory(), key[len(r.source.Dir):])
glog.V(3).Infof("replicate %s => %s", key, newKey)
key = newKey
if message.OldEntry != nil && message.NewEntry == nil {
glog.V(4).Infof("deleting %v", key)
- return r.sink.DeleteEntry(ctx, key, message.OldEntry.IsDirectory, message.DeleteChunks)
+ return r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks)
}
if message.OldEntry == nil && message.NewEntry != nil {
glog.V(4).Infof("creating %v", key)
- return r.sink.CreateEntry(ctx, key, message.NewEntry)
+ return r.sink.CreateEntry(key, message.NewEntry)
}
if message.OldEntry == nil && message.NewEntry == nil {
glog.V(0).Infof("weird message %+v", message)
return nil
}
- foundExisting, err := r.sink.UpdateEntry(ctx, key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks)
+ foundExisting, err := r.sink.UpdateEntry(key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks)
if foundExisting {
glog.V(4).Infof("updated %v", key)
return err
}
- err = r.sink.DeleteEntry(ctx, key, message.OldEntry.IsDirectory, false)
+ err = r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, false)
if err != nil {
return fmt.Errorf("delete old entry %v: %v", key, err)
}
glog.V(4).Infof("creating missing %v", key)
- return r.sink.CreateEntry(ctx, key, message.NewEntry)
+ return r.sink.CreateEntry(key, message.NewEntry)
}
diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go
index a0b1a41ab..aef97c06e 100644
--- a/weed/replication/sink/azuresink/azure_sink.go
+++ b/weed/replication/sink/azuresink/azure_sink.go
@@ -70,7 +70,7 @@ func (g *AzureSink) initialize(accountName, accountKey, container, dir string) e
return nil
}
-func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
+func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
key = cleanKey(key)
@@ -78,7 +78,7 @@ func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, de
key = key + "/"
}
- if _, err := g.containerURL.NewBlobURL(key).Delete(ctx,
+ if _, err := g.containerURL.NewBlobURL(key).Delete(context.Background(),
azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}); err != nil {
return fmt.Errorf("azure delete %s/%s: %v", g.container, key, err)
}
@@ -87,7 +87,7 @@ func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, de
}
-func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
+func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error {
key = cleanKey(key)
@@ -96,27 +96,27 @@ func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb
}
totalSize := filer2.TotalSize(entry.Chunks)
- chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize))
+ chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int64(totalSize))
// Create a URL that references a to-be-created blob in your
// Azure Storage account's container.
appendBlobURL := g.containerURL.NewAppendBlobURL(key)
- _, err := appendBlobURL.Create(ctx, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
+ _, err := appendBlobURL.Create(context.Background(), azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
if err != nil {
return err
}
for _, chunk := range chunkViews {
- fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId)
+ fileUrl, err := g.filerSource.LookupFileId(chunk.FileId)
if err != nil {
return err
}
var writeErr error
- _, readErr := util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) {
- _, writeErr = appendBlobURL.AppendBlock(ctx, bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil)
+ readErr := util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) {
+ _, writeErr = appendBlobURL.AppendBlock(context.Background(), bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil)
})
if readErr != nil {
@@ -132,7 +132,7 @@ func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb
}
-func (g *AzureSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
+func (g *AzureSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
key = cleanKey(key)
// TODO improve efficiency
return false, nil
diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go
index 8c80a64bd..1e7d82ed4 100644
--- a/weed/replication/sink/b2sink/b2_sink.go
+++ b/weed/replication/sink/b2sink/b2_sink.go
@@ -45,8 +45,7 @@ func (g *B2Sink) SetSourceFiler(s *source.FilerSource) {
}
func (g *B2Sink) initialize(accountId, accountKey, bucket, dir string) error {
- ctx := context.Background()
- client, err := b2.NewClient(ctx, accountId, accountKey)
+ client, err := b2.NewClient(context.Background(), accountId, accountKey)
if err != nil {
return err
}
@@ -58,7 +57,7 @@ func (g *B2Sink) initialize(accountId, accountKey, bucket, dir string) error {
return nil
}
-func (g *B2Sink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
+func (g *B2Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
key = cleanKey(key)
@@ -66,18 +65,18 @@ func (g *B2Sink) DeleteEntry(ctx context.Context, key string, isDirectory, delet
key = key + "/"
}
- bucket, err := g.client.Bucket(ctx, g.bucket)
+ bucket, err := g.client.Bucket(context.Background(), g.bucket)
if err != nil {
return err
}
targetObject := bucket.Object(key)
- return targetObject.Delete(ctx)
+ return targetObject.Delete(context.Background())
}
-func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
+func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
key = cleanKey(key)
@@ -86,25 +85,25 @@ func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.En
}
totalSize := filer2.TotalSize(entry.Chunks)
- chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize))
+ chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int64(totalSize))
- bucket, err := g.client.Bucket(ctx, g.bucket)
+ bucket, err := g.client.Bucket(context.Background(), g.bucket)
if err != nil {
return err
}
targetObject := bucket.Object(key)
- writer := targetObject.NewWriter(ctx)
+ writer := targetObject.NewWriter(context.Background())
for _, chunk := range chunkViews {
- fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId)
+ fileUrl, err := g.filerSource.LookupFileId(chunk.FileId)
if err != nil {
return err
}
var writeErr error
- _, readErr := util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) {
+ readErr := util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) {
_, err := writer.Write(data)
if err != nil {
writeErr = err
@@ -124,7 +123,7 @@ func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.En
}
-func (g *B2Sink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
+func (g *B2Sink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
key = cleanKey(key)
diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go
index 26c055da5..d6474a7f1 100644
--- a/weed/replication/sink/filersink/fetch_write.go
+++ b/weed/replication/sink/filersink/fetch_write.go
@@ -3,41 +3,44 @@ package filersink
import (
"context"
"fmt"
- "google.golang.org/grpc"
- "strings"
"sync"
+ "google.golang.org/grpc"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
- "github.com/chrislusf/seaweedfs/weed/util"
)
-func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_pb.FileChunk) (replicatedChunks []*filer_pb.FileChunk, err error) {
+func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, dir string) (replicatedChunks []*filer_pb.FileChunk, err error) {
if len(sourceChunks) == 0 {
return
}
+
+ replicatedChunks = make([]*filer_pb.FileChunk, len(sourceChunks))
+
var wg sync.WaitGroup
- for _, sourceChunk := range sourceChunks {
+ for chunkIndex, sourceChunk := range sourceChunks {
wg.Add(1)
- go func(chunk *filer_pb.FileChunk) {
+ go func(chunk *filer_pb.FileChunk, index int) {
defer wg.Done()
- replicatedChunk, e := fs.replicateOneChunk(ctx, chunk)
+ replicatedChunk, e := fs.replicateOneChunk(chunk, dir)
if e != nil {
err = e
}
- replicatedChunks = append(replicatedChunks, replicatedChunk)
- }(sourceChunk)
+ replicatedChunks[index] = replicatedChunk
+ }(sourceChunk, chunkIndex)
}
wg.Wait()
return
}
-func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_pb.FileChunk) (*filer_pb.FileChunk, error) {
+func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk, dir string) (*filer_pb.FileChunk, error) {
- fileId, err := fs.fetchAndWrite(ctx, sourceChunk)
+ fileId, err := fs.fetchAndWrite(sourceChunk, dir)
if err != nil {
return nil, fmt.Errorf("copy %s: %v", sourceChunk.GetFileIdString(), err)
}
@@ -49,12 +52,14 @@ func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_p
Mtime: sourceChunk.Mtime,
ETag: sourceChunk.ETag,
SourceFileId: sourceChunk.GetFileIdString(),
+ CipherKey: sourceChunk.CipherKey,
+ IsGzipped: sourceChunk.IsGzipped,
}, nil
}
-func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.FileChunk) (fileId string, err error) {
+func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, dir string) (fileId string, err error) {
- filename, header, readCloser, err := fs.filerSource.ReadPart(ctx, sourceChunk.GetFileIdString())
+ filename, header, readCloser, err := fs.filerSource.ReadPart(sourceChunk.GetFileIdString())
if err != nil {
return "", fmt.Errorf("read part %s: %v", sourceChunk.GetFileIdString(), err)
}
@@ -63,7 +68,7 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi
var host string
var auth security.EncodedJwt
- if err := fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+ if err := fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
@@ -71,13 +76,17 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi
Collection: fs.collection,
TtlSec: fs.ttlSec,
DataCenter: fs.dataCenter,
+ ParentPath: dir,
}
- resp, err := client.AssignVolume(ctx, request)
+ resp, err := client.AssignVolume(context.Background(), request)
if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
return err
}
+ if resp.Error != "" {
+ return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
+ }
fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
@@ -90,8 +99,8 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi
glog.V(4).Infof("replicating %s to %s header:%+v", filename, fileUrl, header)
- uploadResult, err := operation.Upload(fileUrl, filename, readCloser,
- "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth)
+ // fetch data as is, regardless whether it is encrypted or not
+ uploadResult, err, _ := operation.Upload(fileUrl, filename, false, readCloser, "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth)
if err != nil {
glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err)
return "", fmt.Errorf("upload data: %v", err)
@@ -104,19 +113,16 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi
return
}
-func (fs *FilerSink) withFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error {
+var _ = filer_pb.FilerClient(&FilerSink{})
- return util.WithCachedGrpcClient(ctx, func(ctx context.Context, grpcConnection *grpc.ClientConn) error {
+func (fs *FilerSink) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
+
+ return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
client := filer_pb.NewSeaweedFilerClient(grpcConnection)
- return fn(ctx, client)
+ return fn(client)
}, fs.grpcAddress, fs.grpcDialOption)
}
-
-func volumeId(fileId string) string {
- lastCommaIndex := strings.LastIndex(fileId, ",")
- if lastCommaIndex > 0 {
- return fileId[:lastCommaIndex]
- }
- return fileId
+func (fs *FilerSink) AdjustedUrl(hostAndPort string) string {
+ return hostAndPort
}
diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go
index de99fbe1c..fa9cc0f05 100644
--- a/weed/replication/sink/filersink/filer_sink.go
+++ b/weed/replication/sink/filersink/filer_sink.go
@@ -64,33 +64,24 @@ func (fs *FilerSink) initialize(grpcAddress string, dir string,
return nil
}
-func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
- return fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
- dir, name := filer2.FullPath(key).DirAndName()
+ dir, name := util.FullPath(key).DirAndName()
- request := &filer_pb.DeleteEntryRequest{
- Directory: dir,
- Name: name,
- IsDeleteData: deleteIncludeChunks,
- }
-
- glog.V(1).Infof("delete entry: %v", request)
- _, err := client.DeleteEntry(ctx, request)
- if err != nil {
- glog.V(0).Infof("delete entry %s: %v", key, err)
- return fmt.Errorf("delete entry %s: %v", key, err)
- }
-
- return nil
- })
+ glog.V(1).Infof("delete entry: %v", key)
+ err := filer_pb.Remove(fs, dir, name, deleteIncludeChunks, false, false)
+ if err != nil {
+ glog.V(0).Infof("delete entry %s: %v", key, err)
+ return fmt.Errorf("delete entry %s: %v", key, err)
+ }
+ return nil
}
-func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
+func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error {
- return fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+ return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
- dir, name := filer2.FullPath(key).DirAndName()
+ dir, name := util.FullPath(key).DirAndName()
// look up existing entry
lookupRequest := &filer_pb.LookupDirectoryEntryRequest{
@@ -98,14 +89,14 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p
Name: name,
}
glog.V(1).Infof("lookup: %v", lookupRequest)
- if resp, err := client.LookupDirectoryEntry(ctx, lookupRequest); err == nil {
- if filer2.ETag(resp.Entry.Chunks) == filer2.ETag(entry.Chunks) {
+ if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil {
+ if filer2.ETag(resp.Entry) == filer2.ETag(entry) {
glog.V(0).Infof("already replicated %s", key)
return nil
}
}
- replicatedChunks, err := fs.replicateChunks(ctx, entry.Chunks)
+ replicatedChunks, err := fs.replicateChunks(entry.Chunks, dir)
if err != nil {
glog.V(0).Infof("replicate entry chunks %s: %v", key, err)
@@ -125,7 +116,7 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p
}
glog.V(1).Infof("create: %v", request)
- if err := filer_pb.CreateEntry(ctx, client, request); err != nil {
+ if err := filer_pb.CreateEntry(client, request); err != nil {
glog.V(0).Infof("create entry %s: %v", key, err)
return fmt.Errorf("create entry %s: %v", key, err)
}
@@ -134,13 +125,13 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p
})
}
-func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
+func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
- dir, name := filer2.FullPath(key).DirAndName()
+ dir, name := util.FullPath(key).DirAndName()
// read existing entry
var existingEntry *filer_pb.Entry
- err = fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+ err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{
Directory: dir,
@@ -148,7 +139,7 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file
}
glog.V(4).Infof("lookup entry: %v", request)
- resp, err := client.LookupDirectoryEntry(ctx, request)
+ resp, err := filer_pb.LookupEntry(client, request)
if err != nil {
glog.V(0).Infof("lookup %s: %v", key, err)
return err
@@ -169,7 +160,7 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file
// skip if already changed
// this usually happens when the messages are not ordered
glog.V(0).Infof("late updates %s", key)
- } else if filer2.ETag(newEntry.Chunks) == filer2.ETag(existingEntry.Chunks) {
+ } else if filer2.ETag(newEntry) == filer2.ETag(existingEntry) {
// skip if no change
// this usually happens when retrying the replication
glog.V(0).Infof("already replicated %s", key)
@@ -184,7 +175,7 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file
}
// replicate the chunks that are new in the source
- replicatedChunks, err := fs.replicateChunks(ctx, newChunks)
+ replicatedChunks, err := fs.replicateChunks(newChunks, newParentPath)
if err != nil {
return true, fmt.Errorf("replicte %s chunks error: %v", key, err)
}
@@ -192,14 +183,14 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file
}
// save updated meta data
- return true, fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+ return true, fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.UpdateEntryRequest{
Directory: newParentPath,
Entry: existingEntry,
}
- if _, err := client.UpdateEntry(ctx, request); err != nil {
+ if _, err := client.UpdateEntry(context.Background(), request); err != nil {
return fmt.Errorf("update existingEntry %s: %v", key, err)
}
diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go
index 5aa978ab8..bb5a54272 100644
--- a/weed/replication/sink/gcssink/gcs_sink.go
+++ b/weed/replication/sink/gcssink/gcs_sink.go
@@ -6,13 +6,14 @@ import (
"os"
"cloud.google.com/go/storage"
+ "google.golang.org/api/option"
+
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink"
"github.com/chrislusf/seaweedfs/weed/replication/source"
"github.com/chrislusf/seaweedfs/weed/util"
- "google.golang.org/api/option"
)
type GcsSink struct {
@@ -50,7 +51,6 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str
g.bucket = bucketName
g.dir = dir
- ctx := context.Background()
// Creates a client.
if google_application_credentials == "" {
var found bool
@@ -59,7 +59,7 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str
glog.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in replication.toml")
}
}
- client, err := storage.NewClient(ctx, option.WithCredentialsFile(google_application_credentials))
+ client, err := storage.NewClient(context.Background(), option.WithCredentialsFile(google_application_credentials))
if err != nil {
glog.Fatalf("Failed to create client: %v", err)
}
@@ -69,13 +69,13 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str
return nil
}
-func (g *GcsSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
+func (g *GcsSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
if isDirectory {
key = key + "/"
}
- if err := g.client.Bucket(g.bucket).Object(key).Delete(ctx); err != nil {
+ if err := g.client.Bucket(g.bucket).Object(key).Delete(context.Background()); err != nil {
return fmt.Errorf("gcs delete %s%s: %v", g.bucket, key, err)
}
@@ -83,25 +83,25 @@ func (g *GcsSink) DeleteEntry(ctx context.Context, key string, isDirectory, dele
}
-func (g *GcsSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
+func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error {
if entry.IsDirectory {
return nil
}
totalSize := filer2.TotalSize(entry.Chunks)
- chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize))
+ chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int64(totalSize))
- wc := g.client.Bucket(g.bucket).Object(key).NewWriter(ctx)
+ wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background())
for _, chunk := range chunkViews {
- fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId)
+ fileUrl, err := g.filerSource.LookupFileId(chunk.FileId)
if err != nil {
return err
}
- _, err = util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) {
+ err = util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) {
wc.Write(data)
})
@@ -119,7 +119,7 @@ func (g *GcsSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.E
}
-func (g *GcsSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
+func (g *GcsSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
// TODO improve efficiency
return false, nil
}
diff --git a/weed/replication/sink/replication_sink.go b/weed/replication/sink/replication_sink.go
index 208bbdf87..6d85f660a 100644
--- a/weed/replication/sink/replication_sink.go
+++ b/weed/replication/sink/replication_sink.go
@@ -1,7 +1,6 @@
package sink
import (
- "context"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/source"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -10,9 +9,9 @@ import (
type ReplicationSink interface {
GetName() string
Initialize(configuration util.Configuration, prefix string) error
- DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error
- CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error
- UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error)
+ DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error
+ CreateEntry(key string, entry *filer_pb.Entry) error
+ UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error)
GetSinkToDirectory() string
SetSourceFiler(s *source.FilerSource)
}
diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go
index e4e097c0f..d7af105b8 100644
--- a/weed/replication/sink/s3sink/s3_sink.go
+++ b/weed/replication/sink/s3sink/s3_sink.go
@@ -11,6 +11,7 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
+
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -24,6 +25,7 @@ type S3Sink struct {
region string
bucket string
dir string
+ endpoint string
filerSource *source.FilerSource
}
@@ -43,12 +45,14 @@ func (s3sink *S3Sink) Initialize(configuration util.Configuration, prefix string
glog.V(0).Infof("sink.s3.region: %v", configuration.GetString(prefix+"region"))
glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString(prefix+"bucket"))
glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString(prefix+"directory"))
+ glog.V(0).Infof("sink.s3.endpoint: %v", configuration.GetString(prefix+"endpoint"))
return s3sink.initialize(
configuration.GetString(prefix+"aws_access_key_id"),
configuration.GetString(prefix+"aws_secret_access_key"),
configuration.GetString(prefix+"region"),
configuration.GetString(prefix+"bucket"),
configuration.GetString(prefix+"directory"),
+ configuration.GetString(prefix+"endpoint"),
)
}
@@ -56,13 +60,15 @@ func (s3sink *S3Sink) SetSourceFiler(s *source.FilerSource) {
s3sink.filerSource = s
}
-func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, bucket, dir string) error {
+func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, bucket, dir, endpoint string) error {
s3sink.region = region
s3sink.bucket = bucket
s3sink.dir = dir
+ s3sink.endpoint = endpoint
config := &aws.Config{
- Region: aws.String(s3sink.region),
+ Region: aws.String(s3sink.region),
+ Endpoint: aws.String(s3sink.endpoint),
}
if awsAccessKeyId != "" && awsSecretAccessKey != "" {
config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "")
@@ -77,7 +83,7 @@ func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, buc
return nil
}
-func (s3sink *S3Sink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
+func (s3sink *S3Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
key = cleanKey(key)
@@ -89,8 +95,7 @@ func (s3sink *S3Sink) DeleteEntry(ctx context.Context, key string, isDirectory,
}
-func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
-
+func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
key = cleanKey(key)
if entry.IsDirectory {
@@ -103,21 +108,22 @@ func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_
}
totalSize := filer2.TotalSize(entry.Chunks)
- chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize))
+ chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int64(totalSize))
+
+ parts := make([]*s3.CompletedPart, len(chunkViews))
- var parts []*s3.CompletedPart
var wg sync.WaitGroup
for chunkIndex, chunk := range chunkViews {
partId := chunkIndex + 1
wg.Add(1)
- go func(chunk *filer2.ChunkView) {
+ go func(chunk *filer2.ChunkView, index int) {
defer wg.Done()
- if part, uploadErr := s3sink.uploadPart(ctx, key, uploadId, partId, chunk); uploadErr != nil {
+ if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil {
err = uploadErr
} else {
- parts = append(parts, part)
+ parts[index] = part
}
- }(chunk)
+ }(chunk, chunkIndex)
}
wg.Wait()
@@ -126,11 +132,11 @@ func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_
return err
}
- return s3sink.completeMultipartUpload(ctx, key, uploadId, parts)
+ return s3sink.completeMultipartUpload(context.Background(), key, uploadId, parts)
}
-func (s3sink *S3Sink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
+func (s3sink *S3Sink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
key = cleanKey(key)
// TODO improve efficiency
return false, nil
diff --git a/weed/replication/sink/s3sink/s3_write.go b/weed/replication/sink/s3sink/s3_write.go
index 0a190b27d..c5c65ed5c 100644
--- a/weed/replication/sink/s3sink/s3_write.go
+++ b/weed/replication/sink/s3sink/s3_write.go
@@ -103,10 +103,10 @@ func (s3sink *S3Sink) completeMultipartUpload(ctx context.Context, key, uploadId
}
// To upload a part
-func (s3sink *S3Sink) uploadPart(ctx context.Context, key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) {
+func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) {
var readSeeker io.ReadSeeker
- readSeeker, err := s3sink.buildReadSeeker(ctx, chunk)
+ readSeeker, err := s3sink.buildReadSeeker(chunk)
if err != nil {
glog.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err)
return nil, fmt.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err)
@@ -156,12 +156,12 @@ func (s3sink *S3Sink) uploadPartCopy(key, uploadId string, partId int64, copySou
return err
}
-func (s3sink *S3Sink) buildReadSeeker(ctx context.Context, chunk *filer2.ChunkView) (io.ReadSeeker, error) {
- fileUrl, err := s3sink.filerSource.LookupFileId(ctx, chunk.FileId)
+func (s3sink *S3Sink) buildReadSeeker(chunk *filer2.ChunkView) (io.ReadSeeker, error) {
+ fileUrl, err := s3sink.filerSource.LookupFileId(chunk.FileId)
if err != nil {
return nil, err
}
buf := make([]byte, chunk.Size)
- util.ReadUrl(fileUrl, chunk.Offset, int(chunk.Size), buf, true)
+ util.ReadUrl(fileUrl, nil, false, false, chunk.Offset, int(chunk.Size), buf)
return bytes.NewReader(buf), nil
}
diff --git a/weed/replication/source/filer_source.go b/weed/replication/source/filer_source.go
index c3ea44671..69c23fe82 100644
--- a/weed/replication/source/filer_source.go
+++ b/weed/replication/source/filer_source.go
@@ -9,6 +9,7 @@ import (
"google.golang.org/grpc"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/glog"
@@ -40,16 +41,16 @@ func (fs *FilerSource) initialize(grpcAddress string, dir string) (err error) {
return nil
}
-func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrl string, err error) {
+func (fs *FilerSource) LookupFileId(part string) (fileUrl string, err error) {
vid2Locations := make(map[string]*filer_pb.Locations)
vid := volumeId(part)
- err = fs.withFilerClient(ctx, fs.grpcDialOption, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+ err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
glog.V(4).Infof("read lookup volume id locations: %v", vid)
- resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{
+ resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
VolumeIds: []string{vid},
})
if err != nil {
@@ -78,9 +79,9 @@ func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrl s
return
}
-func (fs *FilerSource) ReadPart(ctx context.Context, part string) (filename string, header http.Header, readCloser io.ReadCloser, err error) {
+func (fs *FilerSource) ReadPart(part string) (filename string, header http.Header, readCloser io.ReadCloser, err error) {
- fileUrl, err := fs.LookupFileId(ctx, part)
+ fileUrl, err := fs.LookupFileId(part)
if err != nil {
return "", nil, nil, err
}
@@ -90,15 +91,21 @@ func (fs *FilerSource) ReadPart(ctx context.Context, part string) (filename stri
return filename, header, readCloser, err
}
-func (fs *FilerSource) withFilerClient(ctx context.Context, grpcDialOption grpc.DialOption, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error {
+var _ = filer_pb.FilerClient(&FilerSource{})
- return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error {
+func (fs *FilerSource) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
+
+ return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
client := filer_pb.NewSeaweedFilerClient(grpcConnection)
- return fn(ctx2, client)
+ return fn(client)
}, fs.grpcAddress, fs.grpcDialOption)
}
+func (fs *FilerSource) AdjustedUrl(hostAndPort string) string {
+ return hostAndPort
+}
+
func volumeId(fileId string) string {
lastCommaIndex := strings.LastIndex(fileId, ",")
if lastCommaIndex > 0 {
diff --git a/weed/replication/sub/notification_aws_sqs.go b/weed/replication/sub/notification_aws_sqs.go
index 06869e619..1dd386ba7 100644
--- a/weed/replication/sub/notification_aws_sqs.go
+++ b/weed/replication/sub/notification_aws_sqs.go
@@ -92,7 +92,9 @@ func (k *AwsSqsInput) ReceiveMessage() (key string, message *filer_pb.EventNotif
}
// process the message
- key = *result.Messages[0].Attributes["key"]
+ // fmt.Printf("messages: %+v\n", result.Messages[0])
+ keyValue := result.Messages[0].MessageAttributes["key"]
+ key = *keyValue.StringValue
text := *result.Messages[0].Body
message = &filer_pb.EventNotification{}
err = proto.UnmarshalText(text, message)
diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go
new file mode 100644
index 000000000..c1e8dff1e
--- /dev/null
+++ b/weed/s3api/auth_credentials.go
@@ -0,0 +1,188 @@
+package s3api
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/golang/protobuf/jsonpb"
+ "github.com/gorilla/mux"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
+)
+
+type Action string
+
+const (
+ ACTION_READ = "Read"
+ ACTION_WRITE = "Write"
+ ACTION_ADMIN = "Admin"
+)
+
+type Iam interface {
+ Check(f http.HandlerFunc, actions ...Action) http.HandlerFunc
+}
+
+type IdentityAccessManagement struct {
+ identities []*Identity
+ domain string
+}
+
+type Identity struct {
+ Name string
+ Credentials []*Credential
+ Actions []Action
+}
+
+type Credential struct {
+ AccessKey string
+ SecretKey string
+}
+
+func NewIdentityAccessManagement(fileName string, domain string) *IdentityAccessManagement {
+ iam := &IdentityAccessManagement{
+ domain: domain,
+ }
+ if fileName == "" {
+ return iam
+ }
+ if err := iam.loadS3ApiConfiguration(fileName); err != nil {
+ glog.Fatalf("fail to load config file %s: %v", fileName, err)
+ }
+ return iam
+}
+
+func (iam *IdentityAccessManagement) loadS3ApiConfiguration(fileName string) error {
+
+ s3ApiConfiguration := &iam_pb.S3ApiConfiguration{}
+
+ rawData, readErr := ioutil.ReadFile(fileName)
+ if readErr != nil {
+ glog.Warningf("fail to read %s : %v", fileName, readErr)
+ return fmt.Errorf("fail to read %s : %v", fileName, readErr)
+ }
+
+ glog.V(1).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName)
+ if err := jsonpb.Unmarshal(bytes.NewReader(rawData), s3ApiConfiguration); err != nil {
+ glog.Warningf("unmarshal error: %v", err)
+ return fmt.Errorf("unmarshal %s error: %v", fileName, err)
+ }
+
+ for _, ident := range s3ApiConfiguration.Identities {
+ t := &Identity{
+ Name: ident.Name,
+ Credentials: nil,
+ Actions: nil,
+ }
+ for _, action := range ident.Actions {
+ t.Actions = append(t.Actions, Action(action))
+ }
+ for _, cred := range ident.Credentials {
+ t.Credentials = append(t.Credentials, &Credential{
+ AccessKey: cred.AccessKey,
+ SecretKey: cred.SecretKey,
+ })
+ }
+ iam.identities = append(iam.identities, t)
+ }
+
+ return nil
+}
+
+func (iam *IdentityAccessManagement) lookupByAccessKey(accessKey string) (identity *Identity, cred *Credential, found bool) {
+ for _, ident := range iam.identities {
+ for _, cred := range ident.Credentials {
+ if cred.AccessKey == accessKey {
+ return ident, cred, true
+ }
+ }
+ }
+ return nil, nil, false
+}
+
+func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) http.HandlerFunc {
+
+ if len(iam.identities) == 0 {
+ return f
+ }
+
+ return func(w http.ResponseWriter, r *http.Request) {
+ errCode := iam.authRequest(r, action)
+ if errCode == ErrNone {
+ f(w, r)
+ return
+ }
+ writeErrorResponse(w, errCode, r.URL)
+ }
+}
+
+// check whether the request has valid access keys
+func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) ErrorCode {
+ var identity *Identity
+ var s3Err ErrorCode
+ switch getRequestAuthType(r) {
+ case authTypeStreamingSigned:
+ return ErrNone
+ case authTypeUnknown:
+ glog.V(3).Infof("unknown auth type")
+ return ErrAccessDenied
+ case authTypePresignedV2, authTypeSignedV2:
+ glog.V(3).Infof("v2 auth type")
+ identity, s3Err = iam.isReqAuthenticatedV2(r)
+ case authTypeSigned, authTypePresigned:
+ glog.V(3).Infof("v4 auth type")
+ identity, s3Err = iam.reqSignatureV4Verify(r)
+ case authTypePostPolicy:
+ glog.V(3).Infof("post policy auth type")
+ return ErrNotImplemented
+ case authTypeJWT:
+ glog.V(3).Infof("jwt auth type")
+ return ErrNotImplemented
+ case authTypeAnonymous:
+ return ErrAccessDenied
+ default:
+ return ErrNotImplemented
+ }
+
+ glog.V(3).Infof("auth error: %v", s3Err)
+ if s3Err != ErrNone {
+ return s3Err
+ }
+
+ glog.V(3).Infof("user name: %v actions: %v", identity.Name, identity.Actions)
+
+ vars := mux.Vars(r)
+ bucket := vars["bucket"]
+
+ if !identity.canDo(action, bucket) {
+ return ErrAccessDenied
+ }
+
+ return ErrNone
+
+}
+
+func (identity *Identity) canDo(action Action, bucket string) bool {
+ for _, a := range identity.Actions {
+ if a == "Admin" {
+ return true
+ }
+ }
+ for _, a := range identity.Actions {
+ if a == action {
+ return true
+ }
+ }
+ if bucket == "" {
+ return false
+ }
+ limitedByBucket := string(action) + ":" + bucket
+ for _, a := range identity.Actions {
+ if string(a) == limitedByBucket {
+ return true
+ }
+ }
+ return false
+}
diff --git a/weed/s3api/auth_credentials_test.go b/weed/s3api/auth_credentials_test.go
new file mode 100644
index 000000000..c6f76560c
--- /dev/null
+++ b/weed/s3api/auth_credentials_test.go
@@ -0,0 +1,68 @@
+package s3api
+
+import (
+ "testing"
+
+ "github.com/golang/protobuf/jsonpb"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
+)
+
+func TestIdentityListFileFormat(t *testing.T) {
+
+ s3ApiConfiguration := &iam_pb.S3ApiConfiguration{}
+
+ identity1 := &iam_pb.Identity{
+ Name: "some_name",
+ Credentials: []*iam_pb.Credential{
+ {
+ AccessKey: "some_access_key1",
+ SecretKey: "some_secret_key2",
+ },
+ },
+ Actions: []string{
+ ACTION_ADMIN,
+ ACTION_READ,
+ ACTION_WRITE,
+ },
+ }
+ identity2 := &iam_pb.Identity{
+ Name: "some_read_only_user",
+ Credentials: []*iam_pb.Credential{
+ {
+ AccessKey: "some_access_key1",
+ SecretKey: "some_secret_key1",
+ },
+ },
+ Actions: []string{
+ ACTION_READ,
+ },
+ }
+ identity3 := &iam_pb.Identity{
+ Name: "some_normal_user",
+ Credentials: []*iam_pb.Credential{
+ {
+ AccessKey: "some_access_key2",
+ SecretKey: "some_secret_key2",
+ },
+ },
+ Actions: []string{
+ ACTION_READ,
+ ACTION_WRITE,
+ },
+ }
+
+ s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity1)
+ s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity2)
+ s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity3)
+
+ m := jsonpb.Marshaler{
+ EmitDefaults: true,
+ Indent: " ",
+ }
+
+ text, _ := m.MarshalToString(s3ApiConfiguration)
+
+ println(text)
+
+}
diff --git a/weed/s3api/auth_signature_v2.go b/weed/s3api/auth_signature_v2.go
new file mode 100644
index 000000000..151a9ec26
--- /dev/null
+++ b/weed/s3api/auth_signature_v2.go
@@ -0,0 +1,412 @@
+/*
+ * The following code tries to reverse engineer the Amazon S3 APIs,
+ * and is mostly copied from minio implementation.
+ */
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package s3api
+
+import (
+ "crypto/hmac"
+ "crypto/sha1"
+ "crypto/subtle"
+ "encoding/base64"
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "path"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Whitelist resource list that will be used in query string for signature-V2 calculation.
+// The list should be alphabetically sorted
+var resourceList = []string{
+ "acl",
+ "delete",
+ "lifecycle",
+ "location",
+ "logging",
+ "notification",
+ "partNumber",
+ "policy",
+ "requestPayment",
+ "response-cache-control",
+ "response-content-disposition",
+ "response-content-encoding",
+ "response-content-language",
+ "response-content-type",
+ "response-expires",
+ "torrent",
+ "uploadId",
+ "uploads",
+ "versionId",
+ "versioning",
+ "versions",
+ "website",
+}
+
+// Verify if request has valid AWS Signature Version '2'.
+func (iam *IdentityAccessManagement) isReqAuthenticatedV2(r *http.Request) (*Identity, ErrorCode) {
+ if isRequestSignatureV2(r) {
+ return iam.doesSignV2Match(r)
+ }
+ return iam.doesPresignV2SignatureMatch(r)
+}
+
+// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature;
+// Signature = Base64( HMAC-SHA1( YourSecretKey, UTF-8-Encoding-Of( StringToSign ) ) );
+//
+// StringToSign = HTTP-Verb + "\n" +
+// Content-Md5 + "\n" +
+// Content-Type + "\n" +
+// Date + "\n" +
+// CanonicalizedProtocolHeaders +
+// CanonicalizedResource;
+//
+// CanonicalizedResource = [ "/" + Bucket ] +
+// +
+// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
+//
+// CanonicalizedProtocolHeaders =
+
+// doesSignV2Match - Verify authorization header with calculated header in accordance with
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/auth-request-sig-v2.html
+// returns true if matches, false otherwise. if error is not nil then it is always false
+
+func validateV2AuthHeader(v2Auth string) (accessKey string, errCode ErrorCode) {
+ if v2Auth == "" {
+ return "", ErrAuthHeaderEmpty
+ }
+ // Verify if the header algorithm is supported or not.
+ if !strings.HasPrefix(v2Auth, signV2Algorithm) {
+ return "", ErrSignatureVersionNotSupported
+ }
+
+ // below is V2 Signed Auth header format, splitting on `space` (after the `AWS` string).
+ // Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature
+ authFields := strings.Split(v2Auth, " ")
+ if len(authFields) != 2 {
+ return "", ErrMissingFields
+ }
+
+ // Then will be splitting on ":", this will seprate `AWSAccessKeyId` and `Signature` string.
+ keySignFields := strings.Split(strings.TrimSpace(authFields[1]), ":")
+ if len(keySignFields) != 2 {
+ return "", ErrMissingFields
+ }
+
+ return keySignFields[0], ErrNone
+}
+
+func (iam *IdentityAccessManagement) doesSignV2Match(r *http.Request) (*Identity, ErrorCode) {
+ v2Auth := r.Header.Get("Authorization")
+
+ accessKey, apiError := validateV2AuthHeader(v2Auth)
+ if apiError != ErrNone {
+ return nil, apiError
+ }
+
+ // Access credentials.
+ // Validate if access key id same.
+ ident, cred, found := iam.lookupByAccessKey(accessKey)
+ if !found {
+ return nil, ErrInvalidAccessKeyID
+ }
+
+ // r.RequestURI will have raw encoded URI as sent by the client.
+ tokens := strings.SplitN(r.RequestURI, "?", 2)
+ encodedResource := tokens[0]
+ encodedQuery := ""
+ if len(tokens) == 2 {
+ encodedQuery = tokens[1]
+ }
+
+ unescapedQueries, err := unescapeQueries(encodedQuery)
+ if err != nil {
+ return nil, ErrInvalidQueryParams
+ }
+
+ encodedResource, err = getResource(encodedResource, r.Host, iam.domain)
+ if err != nil {
+ return nil, ErrInvalidRequest
+ }
+
+ prefix := fmt.Sprintf("%s %s:", signV2Algorithm, cred.AccessKey)
+ if !strings.HasPrefix(v2Auth, prefix) {
+ return nil, ErrSignatureDoesNotMatch
+ }
+ v2Auth = v2Auth[len(prefix):]
+ expectedAuth := signatureV2(cred, r.Method, encodedResource, strings.Join(unescapedQueries, "&"), r.Header)
+ if !compareSignatureV2(v2Auth, expectedAuth) {
+ return nil, ErrSignatureDoesNotMatch
+ }
+ return ident, ErrNone
+}
+
+// doesPresignV2SignatureMatch - Verify query headers with presigned signature
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+// returns ErrNone if matches. S3 errors otherwise.
+func (iam *IdentityAccessManagement) doesPresignV2SignatureMatch(r *http.Request) (*Identity, ErrorCode) {
+
+ // r.RequestURI will have raw encoded URI as sent by the client.
+ tokens := strings.SplitN(r.RequestURI, "?", 2)
+ encodedResource := tokens[0]
+ encodedQuery := ""
+ if len(tokens) == 2 {
+ encodedQuery = tokens[1]
+ }
+
+ var (
+ filteredQueries []string
+ gotSignature string
+ expires string
+ accessKey string
+ err error
+ )
+
+ var unescapedQueries []string
+ unescapedQueries, err = unescapeQueries(encodedQuery)
+ if err != nil {
+ return nil, ErrInvalidQueryParams
+ }
+
+ // Extract the necessary values from presigned query, construct a list of new filtered queries.
+ for _, query := range unescapedQueries {
+ keyval := strings.SplitN(query, "=", 2)
+ if len(keyval) != 2 {
+ return nil, ErrInvalidQueryParams
+ }
+ switch keyval[0] {
+ case "AWSAccessKeyId":
+ accessKey = keyval[1]
+ case "Signature":
+ gotSignature = keyval[1]
+ case "Expires":
+ expires = keyval[1]
+ default:
+ filteredQueries = append(filteredQueries, query)
+ }
+ }
+
+ // Invalid values returns error.
+ if accessKey == "" || gotSignature == "" || expires == "" {
+ return nil, ErrInvalidQueryParams
+ }
+
+ // Validate if access key id same.
+ ident, cred, found := iam.lookupByAccessKey(accessKey)
+ if !found {
+ return nil, ErrInvalidAccessKeyID
+ }
+
+ // Make sure the request has not expired.
+ expiresInt, err := strconv.ParseInt(expires, 10, 64)
+ if err != nil {
+ return nil, ErrMalformedExpires
+ }
+
+ // Check if the presigned URL has expired.
+ if expiresInt < time.Now().UTC().Unix() {
+ return nil, ErrExpiredPresignRequest
+ }
+
+ encodedResource, err = getResource(encodedResource, r.Host, iam.domain)
+ if err != nil {
+ return nil, ErrInvalidRequest
+ }
+
+ expectedSignature := preSignatureV2(cred, r.Method, encodedResource, strings.Join(filteredQueries, "&"), r.Header, expires)
+ if !compareSignatureV2(gotSignature, expectedSignature) {
+ return nil, ErrSignatureDoesNotMatch
+ }
+
+ return ident, ErrNone
+}
+
+// Escape encodedQuery string into unescaped list of query params, returns error
+// if any while unescaping the values.
+func unescapeQueries(encodedQuery string) (unescapedQueries []string, err error) {
+ for _, query := range strings.Split(encodedQuery, "&") {
+ var unescapedQuery string
+ unescapedQuery, err = url.QueryUnescape(query)
+ if err != nil {
+ return nil, err
+ }
+ unescapedQueries = append(unescapedQueries, unescapedQuery)
+ }
+ return unescapedQueries, nil
+}
+
+// Returns "/bucketName/objectName" for path-style or virtual-host-style requests.
+func getResource(path string, host string, domain string) (string, error) {
+ if domain == "" {
+ return path, nil
+ }
+ // If virtual-host-style is enabled construct the "resource" properly.
+ if strings.Contains(host, ":") {
+ // In bucket.mydomain.com:9000, strip out :9000
+ var err error
+ if host, _, err = net.SplitHostPort(host); err != nil {
+ return "", err
+ }
+ }
+ if !strings.HasSuffix(host, "."+domain) {
+ return path, nil
+ }
+ bucket := strings.TrimSuffix(host, "."+domain)
+ return "/" + pathJoin(bucket, path), nil
+}
+
+// pathJoin - like path.Join() but retains trailing "/" of the last element
+func pathJoin(elem ...string) string {
+ trailingSlash := ""
+ if len(elem) > 0 {
+ if strings.HasSuffix(elem[len(elem)-1], "/") {
+ trailingSlash = "/"
+ }
+ }
+ return path.Join(elem...) + trailingSlash
+}
+
+// Return the signature v2 of a given request.
+func signatureV2(cred *Credential, method string, encodedResource string, encodedQuery string, headers http.Header) string {
+ stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, "")
+ signature := calculateSignatureV2(stringToSign, cred.SecretKey)
+ return signature
+}
+
+// Return string to sign under two different conditions.
+// - if expires string is set then string to sign includes date instead of the Date header.
+// - if expires string is empty then string to sign includes date header instead.
+func getStringToSignV2(method string, encodedResource, encodedQuery string, headers http.Header, expires string) string {
+ canonicalHeaders := canonicalizedAmzHeadersV2(headers)
+ if len(canonicalHeaders) > 0 {
+ canonicalHeaders += "\n"
+ }
+
+ date := expires // Date is set to expires date for presign operations.
+ if date == "" {
+ // If expires date is empty then request header Date is used.
+ date = headers.Get("Date")
+ }
+
+ // From the Amazon docs:
+ //
+ // StringToSign = HTTP-Verb + "\n" +
+ // Content-Md5 + "\n" +
+ // Content-Type + "\n" +
+ // Date/Expires + "\n" +
+ // CanonicalizedProtocolHeaders +
+ // CanonicalizedResource;
+ stringToSign := strings.Join([]string{
+ method,
+ headers.Get("Content-MD5"),
+ headers.Get("Content-Type"),
+ date,
+ canonicalHeaders,
+ }, "\n")
+
+ return stringToSign + canonicalizedResourceV2(encodedResource, encodedQuery)
+}
+
+// Return canonical resource string.
+func canonicalizedResourceV2(encodedResource, encodedQuery string) string {
+ queries := strings.Split(encodedQuery, "&")
+ keyval := make(map[string]string)
+ for _, query := range queries {
+ key := query
+ val := ""
+ index := strings.Index(query, "=")
+ if index != -1 {
+ key = query[:index]
+ val = query[index+1:]
+ }
+ keyval[key] = val
+ }
+
+ var canonicalQueries []string
+ for _, key := range resourceList {
+ val, ok := keyval[key]
+ if !ok {
+ continue
+ }
+ if val == "" {
+ canonicalQueries = append(canonicalQueries, key)
+ continue
+ }
+ canonicalQueries = append(canonicalQueries, key+"="+val)
+ }
+
+ // The queries will be already sorted as resourceList is sorted, if canonicalQueries
+ // is empty strings.Join returns empty.
+ canonicalQuery := strings.Join(canonicalQueries, "&")
+ if canonicalQuery != "" {
+ return encodedResource + "?" + canonicalQuery
+ }
+ return encodedResource
+}
+
+// Return canonical headers.
+func canonicalizedAmzHeadersV2(headers http.Header) string {
+ var keys []string
+ keyval := make(map[string]string)
+ for key := range headers {
+ lkey := strings.ToLower(key)
+ if !strings.HasPrefix(lkey, "x-amz-") {
+ continue
+ }
+ keys = append(keys, lkey)
+ keyval[lkey] = strings.Join(headers[key], ",")
+ }
+ sort.Strings(keys)
+ var canonicalHeaders []string
+ for _, key := range keys {
+ canonicalHeaders = append(canonicalHeaders, key+":"+keyval[key])
+ }
+ return strings.Join(canonicalHeaders, "\n")
+}
+
+func calculateSignatureV2(stringToSign string, secret string) string {
+ hm := hmac.New(sha1.New, []byte(secret))
+ hm.Write([]byte(stringToSign))
+ return base64.StdEncoding.EncodeToString(hm.Sum(nil))
+}
+
+// compareSignatureV2 returns true if and only if both signatures
+// are equal. The signatures are expected to be base64 encoded strings
+// according to the AWS S3 signature V2 spec.
+func compareSignatureV2(sig1, sig2 string) bool {
+ // Decode signature string to binary byte-sequence representation is required
+ // as Base64 encoding of a value is not unique:
+ // For example "aGVsbG8=" and "aGVsbG8=\r" will result in the same byte slice.
+ signature1, err := base64.StdEncoding.DecodeString(sig1)
+ if err != nil {
+ return false
+ }
+ signature2, err := base64.StdEncoding.DecodeString(sig2)
+ if err != nil {
+ return false
+ }
+ return subtle.ConstantTimeCompare(signature1, signature2) == 1
+}
+
+// Return signature-v2 for the presigned request.
+func preSignatureV2(cred *Credential, method string, encodedResource string, encodedQuery string, headers http.Header, expires string) string {
+ stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, expires)
+ return calculateSignatureV2(stringToSign, cred.SecretKey)
+}
diff --git a/weed/s3api/auth_signature_v4.go b/weed/s3api/auth_signature_v4.go
new file mode 100644
index 000000000..cdfd8be1d
--- /dev/null
+++ b/weed/s3api/auth_signature_v4.go
@@ -0,0 +1,720 @@
+/*
+ * The following code tries to reverse engineer the Amazon S3 APIs,
+ * and is mostly copied from minio implementation.
+ */
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package s3api
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/sha256"
+ "crypto/subtle"
+ "encoding/hex"
+ "net/http"
+ "net/url"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+func (iam *IdentityAccessManagement) reqSignatureV4Verify(r *http.Request) (*Identity, ErrorCode) {
+ sha256sum := getContentSha256Cksum(r)
+ switch {
+ case isRequestSignatureV4(r):
+ return iam.doesSignatureMatch(sha256sum, r)
+ case isRequestPresignedSignatureV4(r):
+ return iam.doesPresignedSignatureMatch(sha256sum, r)
+ }
+ return nil, ErrAccessDenied
+}
+
+// Streaming AWS Signature Version '4' constants.
+const (
+ emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
+ signV4ChunkedAlgorithm = "AWS4-HMAC-SHA256-PAYLOAD"
+
+ // http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the
+ // client did not calculate sha256 of the payload.
+ unsignedPayload = "UNSIGNED-PAYLOAD"
+)
+
+// Returns SHA256 for calculating canonical-request.
+func getContentSha256Cksum(r *http.Request) string {
+ var (
+ defaultSha256Cksum string
+ v []string
+ ok bool
+ )
+
+ // For a presigned request we look at the query param for sha256.
+ if isRequestPresignedSignatureV4(r) {
+ // X-Amz-Content-Sha256, if not set in presigned requests, checksum
+ // will default to 'UNSIGNED-PAYLOAD'.
+ defaultSha256Cksum = unsignedPayload
+ v, ok = r.URL.Query()["X-Amz-Content-Sha256"]
+ if !ok {
+ v, ok = r.Header["X-Amz-Content-Sha256"]
+ }
+ } else {
+ // X-Amz-Content-Sha256, if not set in signed requests, checksum
+ // will default to sha256([]byte("")).
+ defaultSha256Cksum = emptySHA256
+ v, ok = r.Header["X-Amz-Content-Sha256"]
+ }
+
+ // We found 'X-Amz-Content-Sha256' return the captured value.
+ if ok {
+ return v[0]
+ }
+
+ // We couldn't find 'X-Amz-Content-Sha256'.
+ return defaultSha256Cksum
+}
+
+// Verify authorization header - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
+func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r *http.Request) (*Identity, ErrorCode) {
+
+ // Copy request.
+ req := *r
+
+ // Save authorization header.
+ v4Auth := req.Header.Get("Authorization")
+
+ // Parse signature version '4' header.
+ signV4Values, err := parseSignV4(v4Auth)
+ if err != ErrNone {
+ return nil, err
+ }
+
+ // Extract all the signed headers along with its values.
+ extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r)
+ if errCode != ErrNone {
+ return nil, errCode
+ }
+
+ // Verify if the access key id matches.
+ identity, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey)
+ if !found {
+ return nil, ErrInvalidAccessKeyID
+ }
+
+ // Extract date, if not present throw error.
+ var date string
+ if date = req.Header.Get(http.CanonicalHeaderKey("X-Amz-Date")); date == "" {
+ if date = r.Header.Get("Date"); date == "" {
+ return nil, ErrMissingDateHeader
+ }
+ }
+ // Parse date header.
+ t, e := time.Parse(iso8601Format, date)
+ if e != nil {
+ return nil, ErrMalformedDate
+ }
+
+ // Query string.
+ queryStr := req.URL.Query().Encode()
+
+ // Get canonical request.
+ canonicalRequest := getCanonicalRequest(extractedSignedHeaders, hashedPayload, queryStr, req.URL.Path, req.Method)
+
+ // Get string to sign from canonical request.
+ stringToSign := getStringToSign(canonicalRequest, t, signV4Values.Credential.getScope())
+
+ // Get hmac signing key.
+ signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, signV4Values.Credential.scope.region)
+
+ // Calculate signature.
+ newSignature := getSignature(signingKey, stringToSign)
+
+ // Verify if signature match.
+ if !compareSignatureV4(newSignature, signV4Values.Signature) {
+ return nil, ErrSignatureDoesNotMatch
+ }
+
+ // Return error none.
+ return identity, ErrNone
+}
+
+// credentialHeader data type represents structured form of Credential
+// string from authorization header.
+type credentialHeader struct {
+ accessKey string
+ scope struct {
+ date time.Time
+ region string
+ service string
+ request string
+ }
+}
+
+// signValues data type represents structured form of AWS Signature V4 header.
+type signValues struct {
+ Credential credentialHeader
+ SignedHeaders []string
+ Signature string
+}
+
+// Return scope string.
+func (c credentialHeader) getScope() string {
+ return strings.Join([]string{
+ c.scope.date.Format(yyyymmdd),
+ c.scope.region,
+ c.scope.service,
+ c.scope.request,
+ }, "/")
+}
+
+// Authorization: algorithm Credential=accessKeyID/credScope, \
+// SignedHeaders=signedHeaders, Signature=signature
+//
+func parseSignV4(v4Auth string) (sv signValues, aec ErrorCode) {
+ // Replace all spaced strings, some clients can send spaced
+ // parameters and some won't. So we pro-actively remove any spaces
+ // to make parsing easier.
+ v4Auth = strings.Replace(v4Auth, " ", "", -1)
+ if v4Auth == "" {
+ return sv, ErrAuthHeaderEmpty
+ }
+
+ // Verify if the header algorithm is supported or not.
+ if !strings.HasPrefix(v4Auth, signV4Algorithm) {
+ return sv, ErrSignatureVersionNotSupported
+ }
+
+ // Strip off the Algorithm prefix.
+ v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm)
+ authFields := strings.Split(strings.TrimSpace(v4Auth), ",")
+ if len(authFields) != 3 {
+ return sv, ErrMissingFields
+ }
+
+ // Initialize signature version '4' structured header.
+ signV4Values := signValues{}
+
+ var err ErrorCode
+ // Save credentail values.
+ signV4Values.Credential, err = parseCredentialHeader(authFields[0])
+ if err != ErrNone {
+ return sv, err
+ }
+
+ // Save signed headers.
+ signV4Values.SignedHeaders, err = parseSignedHeader(authFields[1])
+ if err != ErrNone {
+ return sv, err
+ }
+
+ // Save signature.
+ signV4Values.Signature, err = parseSignature(authFields[2])
+ if err != ErrNone {
+ return sv, err
+ }
+
+ // Return the structure here.
+ return signV4Values, ErrNone
+}
+
+// parse credentialHeader string into its structured form.
+func parseCredentialHeader(credElement string) (ch credentialHeader, aec ErrorCode) {
+ creds := strings.Split(strings.TrimSpace(credElement), "=")
+ if len(creds) != 2 {
+ return ch, ErrMissingFields
+ }
+ if creds[0] != "Credential" {
+ return ch, ErrMissingCredTag
+ }
+ credElements := strings.Split(strings.TrimSpace(creds[1]), "/")
+ if len(credElements) != 5 {
+ return ch, ErrCredMalformed
+ }
+ // Save access key id.
+ cred := credentialHeader{
+ accessKey: credElements[0],
+ }
+ var e error
+ cred.scope.date, e = time.Parse(yyyymmdd, credElements[1])
+ if e != nil {
+ return ch, ErrMalformedCredentialDate
+ }
+
+ cred.scope.region = credElements[2]
+ cred.scope.service = credElements[3] // "s3"
+ cred.scope.request = credElements[4] // "aws4_request"
+ return cred, ErrNone
+}
+
+// Parse slice of signed headers from signed headers tag.
+func parseSignedHeader(signedHdrElement string) ([]string, ErrorCode) {
+ signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=")
+ if len(signedHdrFields) != 2 {
+ return nil, ErrMissingFields
+ }
+ if signedHdrFields[0] != "SignedHeaders" {
+ return nil, ErrMissingSignHeadersTag
+ }
+ if signedHdrFields[1] == "" {
+ return nil, ErrMissingFields
+ }
+ signedHeaders := strings.Split(signedHdrFields[1], ";")
+ return signedHeaders, ErrNone
+}
+
+// Parse signature from signature tag.
+func parseSignature(signElement string) (string, ErrorCode) {
+ signFields := strings.Split(strings.TrimSpace(signElement), "=")
+ if len(signFields) != 2 {
+ return "", ErrMissingFields
+ }
+ if signFields[0] != "Signature" {
+ return "", ErrMissingSignTag
+ }
+ if signFields[1] == "" {
+ return "", ErrMissingFields
+ }
+ signature := signFields[1]
+ return signature, ErrNone
+}
+
+// check query headers with presigned signature
+// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
+func (iam *IdentityAccessManagement) doesPresignedSignatureMatch(hashedPayload string, r *http.Request) (*Identity, ErrorCode) {
+
+ // Copy request
+ req := *r
+
+ // Parse request query string.
+ pSignValues, err := parsePreSignV4(req.URL.Query())
+ if err != ErrNone {
+ return nil, err
+ }
+
+ // Verify if the access key id matches.
+ identity, cred, found := iam.lookupByAccessKey(pSignValues.Credential.accessKey)
+ if !found {
+ return nil, ErrInvalidAccessKeyID
+ }
+
+ // Extract all the signed headers along with its values.
+ extractedSignedHeaders, errCode := extractSignedHeaders(pSignValues.SignedHeaders, r)
+ if errCode != ErrNone {
+ return nil, errCode
+ }
+ // Construct new query.
+ query := make(url.Values)
+ if req.URL.Query().Get("X-Amz-Content-Sha256") != "" {
+ query.Set("X-Amz-Content-Sha256", hashedPayload)
+ }
+
+ query.Set("X-Amz-Algorithm", signV4Algorithm)
+
+ now := time.Now().UTC()
+
+ // If the host which signed the request is slightly ahead in time (by less than globalMaxSkewTime) the
+ // request should still be allowed.
+ if pSignValues.Date.After(now.Add(15 * time.Minute)) {
+ return nil, ErrRequestNotReadyYet
+ }
+
+ if now.Sub(pSignValues.Date) > pSignValues.Expires {
+ return nil, ErrExpiredPresignRequest
+ }
+
+ // Save the date and expires.
+ t := pSignValues.Date
+ expireSeconds := int(pSignValues.Expires / time.Second)
+
+ // Construct the query.
+ query.Set("X-Amz-Date", t.Format(iso8601Format))
+ query.Set("X-Amz-Expires", strconv.Itoa(expireSeconds))
+ query.Set("X-Amz-SignedHeaders", getSignedHeaders(extractedSignedHeaders))
+ query.Set("X-Amz-Credential", cred.AccessKey+"/"+getScope(t, pSignValues.Credential.scope.region))
+
+ // Save other headers available in the request parameters.
+ for k, v := range req.URL.Query() {
+
+ // Handle the metadata in presigned put query string
+ if strings.Contains(strings.ToLower(k), "x-amz-meta-") {
+ query.Set(k, v[0])
+ }
+
+ if strings.HasPrefix(strings.ToLower(k), "x-amz") {
+ continue
+ }
+ query[k] = v
+ }
+
+ // Get the encoded query.
+ encodedQuery := query.Encode()
+
+ // Verify if date query is same.
+ if req.URL.Query().Get("X-Amz-Date") != query.Get("X-Amz-Date") {
+ return nil, ErrSignatureDoesNotMatch
+ }
+ // Verify if expires query is same.
+ if req.URL.Query().Get("X-Amz-Expires") != query.Get("X-Amz-Expires") {
+ return nil, ErrSignatureDoesNotMatch
+ }
+ // Verify if signed headers query is same.
+ if req.URL.Query().Get("X-Amz-SignedHeaders") != query.Get("X-Amz-SignedHeaders") {
+ return nil, ErrSignatureDoesNotMatch
+ }
+ // Verify if credential query is same.
+ if req.URL.Query().Get("X-Amz-Credential") != query.Get("X-Amz-Credential") {
+ return nil, ErrSignatureDoesNotMatch
+ }
+ // Verify if sha256 payload query is same.
+ if req.URL.Query().Get("X-Amz-Content-Sha256") != "" {
+ if req.URL.Query().Get("X-Amz-Content-Sha256") != query.Get("X-Amz-Content-Sha256") {
+ return nil, ErrContentSHA256Mismatch
+ }
+ }
+
+ /// Verify finally if signature is same.
+
+ // Get canonical request.
+ presignedCanonicalReq := getCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method)
+
+ // Get string to sign from canonical request.
+ presignedStringToSign := getStringToSign(presignedCanonicalReq, t, pSignValues.Credential.getScope())
+
+ // Get hmac presigned signing key.
+ presignedSigningKey := getSigningKey(cred.SecretKey, pSignValues.Credential.scope.date, pSignValues.Credential.scope.region)
+
+ // Get new signature.
+ newSignature := getSignature(presignedSigningKey, presignedStringToSign)
+
+ // Verify signature.
+ if !compareSignatureV4(req.URL.Query().Get("X-Amz-Signature"), newSignature) {
+ return nil, ErrSignatureDoesNotMatch
+ }
+ return identity, ErrNone
+}
+
+func contains(list []string, elem string) bool {
+ for _, t := range list {
+ if t == elem {
+ return true
+ }
+ }
+ return false
+}
+
+// preSignValues data type represents structued form of AWS Signature V4 query string.
+type preSignValues struct {
+ signValues
+ Date time.Time
+ Expires time.Duration
+}
+
+// Parses signature version '4' query string of the following form.
+//
+// querystring = X-Amz-Algorithm=algorithm
+// querystring += &X-Amz-Credential= urlencode(accessKey + '/' + credential_scope)
+// querystring += &X-Amz-Date=date
+// querystring += &X-Amz-Expires=timeout interval
+// querystring += &X-Amz-SignedHeaders=signed_headers
+// querystring += &X-Amz-Signature=signature
+//
+// verifies if any of the necessary query params are missing in the presigned request.
+func doesV4PresignParamsExist(query url.Values) ErrorCode {
+ v4PresignQueryParams := []string{"X-Amz-Algorithm", "X-Amz-Credential", "X-Amz-Signature", "X-Amz-Date", "X-Amz-SignedHeaders", "X-Amz-Expires"}
+ for _, v4PresignQueryParam := range v4PresignQueryParams {
+ if _, ok := query[v4PresignQueryParam]; !ok {
+ return ErrInvalidQueryParams
+ }
+ }
+ return ErrNone
+}
+
+// Parses all the presigned signature values into separate elements.
+func parsePreSignV4(query url.Values) (psv preSignValues, aec ErrorCode) {
+ var err ErrorCode
+ // verify whether the required query params exist.
+ err = doesV4PresignParamsExist(query)
+ if err != ErrNone {
+ return psv, err
+ }
+
+ // Verify if the query algorithm is supported or not.
+ if query.Get("X-Amz-Algorithm") != signV4Algorithm {
+ return psv, ErrInvalidQuerySignatureAlgo
+ }
+
+ // Initialize signature version '4' structured header.
+ preSignV4Values := preSignValues{}
+
+ // Save credential.
+ preSignV4Values.Credential, err = parseCredentialHeader("Credential=" + query.Get("X-Amz-Credential"))
+ if err != ErrNone {
+ return psv, err
+ }
+
+ var e error
+ // Save date in native time.Time.
+ preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get("X-Amz-Date"))
+ if e != nil {
+ return psv, ErrMalformedPresignedDate
+ }
+
+ // Save expires in native time.Duration.
+ preSignV4Values.Expires, e = time.ParseDuration(query.Get("X-Amz-Expires") + "s")
+ if e != nil {
+ return psv, ErrMalformedExpires
+ }
+
+ if preSignV4Values.Expires < 0 {
+ return psv, ErrNegativeExpires
+ }
+
+ // Check if Expiry time is less than 7 days (value in seconds).
+ if preSignV4Values.Expires.Seconds() > 604800 {
+ return psv, ErrMaximumExpires
+ }
+
+ // Save signed headers.
+ preSignV4Values.SignedHeaders, err = parseSignedHeader("SignedHeaders=" + query.Get("X-Amz-SignedHeaders"))
+ if err != ErrNone {
+ return psv, err
+ }
+
+ // Save signature.
+ preSignV4Values.Signature, err = parseSignature("Signature=" + query.Get("X-Amz-Signature"))
+ if err != ErrNone {
+ return psv, err
+ }
+
+ // Return structed form of signature query string.
+ return preSignV4Values, ErrNone
+}
+
+// extractSignedHeaders extract signed headers from Authorization header
+func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, ErrorCode) {
+ reqHeaders := r.Header
+ // find whether "host" is part of list of signed headers.
+ // if not return ErrUnsignedHeaders. "host" is mandatory.
+ if !contains(signedHeaders, "host") {
+ return nil, ErrUnsignedHeaders
+ }
+ extractedSignedHeaders := make(http.Header)
+ for _, header := range signedHeaders {
+ // `host` will not be found in the headers, can be found in r.Host.
+ // but its alway necessary that the list of signed headers containing host in it.
+ val, ok := reqHeaders[http.CanonicalHeaderKey(header)]
+ if ok {
+ for _, enc := range val {
+ extractedSignedHeaders.Add(header, enc)
+ }
+ continue
+ }
+ switch header {
+ case "expect":
+ // Golang http server strips off 'Expect' header, if the
+ // client sent this as part of signed headers we need to
+ // handle otherwise we would see a signature mismatch.
+ // `aws-cli` sets this as part of signed headers.
+ //
+ // According to
+ // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20
+ // Expect header is always of form:
+ //
+ // Expect = "Expect" ":" 1#expectation
+ // expectation = "100-continue" | expectation-extension
+ //
+ // So it safe to assume that '100-continue' is what would
+ // be sent, for the time being keep this work around.
+ // Adding a *TODO* to remove this later when Golang server
+ // doesn't filter out the 'Expect' header.
+ extractedSignedHeaders.Set(header, "100-continue")
+ case "host":
+ // Go http server removes "host" from Request.Header
+ extractedSignedHeaders.Set(header, r.Host)
+ case "transfer-encoding":
+ for _, enc := range r.TransferEncoding {
+ extractedSignedHeaders.Add(header, enc)
+ }
+ case "content-length":
+ // Signature-V4 spec excludes Content-Length from signed headers list for signature calculation.
+ // But some clients deviate from this rule. Hence we consider Content-Length for signature
+ // calculation to be compatible with such clients.
+ extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10))
+ default:
+ return nil, ErrUnsignedHeaders
+ }
+ }
+ return extractedSignedHeaders, ErrNone
+}
+
+// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names
+func getSignedHeaders(signedHeaders http.Header) string {
+ var headers []string
+ for k := range signedHeaders {
+ headers = append(headers, strings.ToLower(k))
+ }
+ sort.Strings(headers)
+ return strings.Join(headers, ";")
+}
+
+// getScope generate a string of a specific date, an AWS region, and a service.
+func getScope(t time.Time, region string) string {
+ scope := strings.Join([]string{
+ t.Format(yyyymmdd),
+ region,
+ "s3",
+ "aws4_request",
+ }, "/")
+ return scope
+}
+
+// getCanonicalRequest generate a canonical request of style
+//
+// canonicalRequest =
+// \n
+// \n
+// \n
+// \n
+// \n
+//
+//
+func getCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, urlPath, method string) string {
+ rawQuery := strings.Replace(queryStr, "+", "%20", -1)
+ encodedPath := encodePath(urlPath)
+ canonicalRequest := strings.Join([]string{
+ method,
+ encodedPath,
+ rawQuery,
+ getCanonicalHeaders(extractedSignedHeaders),
+ getSignedHeaders(extractedSignedHeaders),
+ payload,
+ }, "\n")
+ return canonicalRequest
+}
+
+// getStringToSign a string based on selected query values.
+func getStringToSign(canonicalRequest string, t time.Time, scope string) string {
+ stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n"
+ stringToSign = stringToSign + scope + "\n"
+ canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest))
+ stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:])
+ return stringToSign
+}
+
+// sumHMAC calculate hmac between two input byte array.
+func sumHMAC(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// getSigningKey hmac seed to calculate final signature.
+func getSigningKey(secretKey string, t time.Time, region string) []byte {
+ date := sumHMAC([]byte("AWS4"+secretKey), []byte(t.Format(yyyymmdd)))
+ regionBytes := sumHMAC(date, []byte(region))
+ service := sumHMAC(regionBytes, []byte("s3"))
+ signingKey := sumHMAC(service, []byte("aws4_request"))
+ return signingKey
+}
+
+// getSignature final signature in hexadecimal form.
+func getSignature(signingKey []byte, stringToSign string) string {
+ return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
+}
+
+// getCanonicalHeaders generate a list of request headers with their values
+func getCanonicalHeaders(signedHeaders http.Header) string {
+ var headers []string
+ vals := make(http.Header)
+ for k, vv := range signedHeaders {
+ headers = append(headers, strings.ToLower(k))
+ vals[strings.ToLower(k)] = vv
+ }
+ sort.Strings(headers)
+
+ var buf bytes.Buffer
+ for _, k := range headers {
+ buf.WriteString(k)
+ buf.WriteByte(':')
+ for idx, v := range vals[k] {
+ if idx > 0 {
+ buf.WriteByte(',')
+ }
+ buf.WriteString(signV4TrimAll(v))
+ }
+ buf.WriteByte('\n')
+ }
+ return buf.String()
+}
+
+// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall()
+// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+func signV4TrimAll(input string) string {
+ // Compress adjacent spaces (a space is determined by
+ // unicode.IsSpace() internally here) to one space and return
+ return strings.Join(strings.Fields(input), " ")
+}
+
+// if object matches reserved string, no need to encode them
+var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
+
+// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
+//
+// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
+// non english characters cannot be parsed due to the nature in which url.Encode() is written
+//
+// This function on the other hand is a direct replacement for url.Encode() technique to support
+// pretty much every UTF-8 character.
+func encodePath(pathName string) string {
+ if reservedObjectNames.MatchString(pathName) {
+ return pathName
+ }
+ var encodedPathname string
+ for _, s := range pathName {
+ if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ }
+ switch s {
+ case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ default:
+ len := utf8.RuneLen(s)
+ if len < 0 {
+ // if utf8 cannot convert return the same string as is
+ return pathName
+ }
+ u := make([]byte, len)
+ utf8.EncodeRune(u, s)
+ for _, r := range u {
+ hex := hex.EncodeToString([]byte{r})
+ encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
+ }
+ }
+ }
+ return encodedPathname
+}
+
+// compareSignatureV4 returns true if and only if both signatures
+// are equal. The signatures are expected to be HEX encoded strings
+// according to the AWS S3 signature V4 spec.
+func compareSignatureV4(sig1, sig2 string) bool {
+ // The CTC using []byte(str) works because the hex encoding
+ // is unique for a sequence of bytes. See also compareSignatureV2.
+ return subtle.ConstantTimeCompare([]byte(sig1), []byte(sig2)) == 1
+}
diff --git a/weed/s3api/auto_signature_v4_test.go b/weed/s3api/auto_signature_v4_test.go
new file mode 100644
index 000000000..036b5c052
--- /dev/null
+++ b/weed/s3api/auto_signature_v4_test.go
@@ -0,0 +1,418 @@
+package s3api
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+ "unicode/utf8"
+)
+
+// TestIsRequestPresignedSignatureV4 - Test validates the logic for presign signature verision v4 detection.
+func TestIsRequestPresignedSignatureV4(t *testing.T) {
+ testCases := []struct {
+ inputQueryKey string
+ inputQueryValue string
+ expectedResult bool
+ }{
+ // Test case - 1.
+ // Test case with query key ""X-Amz-Credential" set.
+ {"", "", false},
+ // Test case - 2.
+ {"X-Amz-Credential", "", true},
+ // Test case - 3.
+ {"X-Amz-Content-Sha256", "", false},
+ }
+
+ for i, testCase := range testCases {
+ // creating an input HTTP request.
+ // Only the query parameters are relevant for this particular test.
+ inputReq, err := http.NewRequest("GET", "http://example.com", nil)
+ if err != nil {
+ t.Fatalf("Error initializing input HTTP request: %v", err)
+ }
+ q := inputReq.URL.Query()
+ q.Add(testCase.inputQueryKey, testCase.inputQueryValue)
+ inputReq.URL.RawQuery = q.Encode()
+
+ actualResult := isRequestPresignedSignatureV4(inputReq)
+ if testCase.expectedResult != actualResult {
+ t.Errorf("Test %d: Expected the result to `%v`, but instead got `%v`", i+1, testCase.expectedResult, actualResult)
+ }
+ }
+}
+
+// Tests is requested authenticated function, tests replies for s3 errors.
+func TestIsReqAuthenticated(t *testing.T) {
+ iam := NewIdentityAccessManagement("", "")
+ iam.identities = []*Identity{
+ {
+ Name: "someone",
+ Credentials: []*Credential{
+ {
+ AccessKey: "access_key_1",
+ SecretKey: "secret_key_1",
+ },
+ },
+ Actions: nil,
+ },
+ }
+
+ // List of test cases for validating http request authentication.
+ testCases := []struct {
+ req *http.Request
+ s3Error ErrorCode
+ }{
+ // When request is unsigned, access denied is returned.
+ {mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied},
+ // When request is properly signed, error is none.
+ {mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrNone},
+ }
+
+ // Validates all testcases.
+ for i, testCase := range testCases {
+ if _, s3Error := iam.reqSignatureV4Verify(testCase.req); s3Error != testCase.s3Error {
+ ioutil.ReadAll(testCase.req.Body)
+ t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d", i, testCase.s3Error, s3Error)
+ }
+ }
+}
+
+func TestCheckAdminRequestAuthType(t *testing.T) {
+ iam := NewIdentityAccessManagement("", "")
+ iam.identities = []*Identity{
+ {
+ Name: "someone",
+ Credentials: []*Credential{
+ {
+ AccessKey: "access_key_1",
+ SecretKey: "secret_key_1",
+ },
+ },
+ Actions: nil,
+ },
+ }
+
+ testCases := []struct {
+ Request *http.Request
+ ErrCode ErrorCode
+ }{
+ {Request: mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
+ {Request: mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone},
+ {Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone},
+ }
+ for i, testCase := range testCases {
+ if _, s3Error := iam.reqSignatureV4Verify(testCase.Request); s3Error != testCase.ErrCode {
+ t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
+ }
+ }
+}
+
+// Provides a fully populated http request instance, fails otherwise.
+func mustNewRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
+ req, err := newTestRequest(method, urlStr, contentLength, body)
+ if err != nil {
+ t.Fatalf("Unable to initialize new http request %s", err)
+ }
+ return req
+}
+
+// This is similar to mustNewRequest but additionally the request
+// is signed with AWS Signature V4, fails if not able to do so.
+func mustNewSignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
+ req := mustNewRequest(method, urlStr, contentLength, body, t)
+ cred := &Credential{"access_key_1", "secret_key_1"}
+ if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
+ t.Fatalf("Unable to inititalized new signed http request %s", err)
+ }
+ return req
+}
+
+// This is similar to mustNewRequest but additionally the request
+// is presigned with AWS Signature V4, fails if not able to do so.
+func mustNewPresignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
+ req := mustNewRequest(method, urlStr, contentLength, body, t)
+ cred := &Credential{"access_key_1", "secret_key_1"}
+ if err := preSignV4(req, cred.AccessKey, cred.SecretKey, int64(10*time.Minute.Seconds())); err != nil {
+ t.Fatalf("Unable to inititalized new signed http request %s", err)
+ }
+ return req
+}
+
+// Returns new HTTP request object.
+func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeeker) (*http.Request, error) {
+ if method == "" {
+ method = "POST"
+ }
+
+ // Save for subsequent use
+ var hashedPayload string
+ var md5Base64 string
+ switch {
+ case body == nil:
+ hashedPayload = getSHA256Hash([]byte{})
+ default:
+ payloadBytes, err := ioutil.ReadAll(body)
+ if err != nil {
+ return nil, err
+ }
+ hashedPayload = getSHA256Hash(payloadBytes)
+ md5Base64 = getMD5HashBase64(payloadBytes)
+ }
+ // Seek back to beginning.
+ if body != nil {
+ body.Seek(0, 0)
+ } else {
+ body = bytes.NewReader([]byte(""))
+ }
+ req, err := http.NewRequest(method, urlStr, body)
+ if err != nil {
+ return nil, err
+ }
+ if md5Base64 != "" {
+ req.Header.Set("Content-Md5", md5Base64)
+ }
+ req.Header.Set("x-amz-content-sha256", hashedPayload)
+
+ // Add Content-Length
+ req.ContentLength = contentLength
+
+ return req, nil
+}
+
+// getSHA256Hash returns SHA-256 hash in hex encoding of given data.
+func getSHA256Hash(data []byte) string {
+ return hex.EncodeToString(getSHA256Sum(data))
+}
+
+// getMD5HashBase64 returns MD5 hash in base64 encoding of given data.
+func getMD5HashBase64(data []byte) string {
+ return base64.StdEncoding.EncodeToString(getMD5Sum(data))
+}
+
+// getSHA256Hash returns SHA-256 sum of given data.
+func getSHA256Sum(data []byte) []byte {
+ hash := sha256.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// getMD5Sum returns MD5 sum of given data.
+func getMD5Sum(data []byte) []byte {
+ hash := md5.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// getMD5Hash returns MD5 hash in hex encoding of given data.
+func getMD5Hash(data []byte) string {
+ return hex.EncodeToString(getMD5Sum(data))
+}
+
+var ignoredHeaders = map[string]bool{
+ "Authorization": true,
+ "Content-Type": true,
+ "Content-Length": true,
+ "User-Agent": true,
+}
+
+// Sign given request using Signature V4.
+func signRequestV4(req *http.Request, accessKey, secretKey string) error {
+ // Get hashed payload.
+ hashedPayload := req.Header.Get("x-amz-content-sha256")
+ if hashedPayload == "" {
+ return fmt.Errorf("Invalid hashed payload")
+ }
+
+ currTime := time.Now()
+
+ // Set x-amz-date.
+ req.Header.Set("x-amz-date", currTime.Format(iso8601Format))
+
+ // Get header map.
+ headerMap := make(map[string][]string)
+ for k, vv := range req.Header {
+ // If request header key is not in ignored headers, then add it.
+ if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; !ok {
+ headerMap[strings.ToLower(k)] = vv
+ }
+ }
+
+ // Get header keys.
+ headers := []string{"host"}
+ for k := range headerMap {
+ headers = append(headers, k)
+ }
+ sort.Strings(headers)
+
+ region := "us-east-1"
+
+ // Get canonical headers.
+ var buf bytes.Buffer
+ for _, k := range headers {
+ buf.WriteString(k)
+ buf.WriteByte(':')
+ switch {
+ case k == "host":
+ buf.WriteString(req.URL.Host)
+ fallthrough
+ default:
+ for idx, v := range headerMap[k] {
+ if idx > 0 {
+ buf.WriteByte(',')
+ }
+ buf.WriteString(v)
+ }
+ buf.WriteByte('\n')
+ }
+ }
+ canonicalHeaders := buf.String()
+
+ // Get signed headers.
+ signedHeaders := strings.Join(headers, ";")
+
+ // Get canonical query string.
+ req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
+
+ // Get canonical URI.
+ canonicalURI := EncodePath(req.URL.Path)
+
+ // Get canonical request.
+ // canonicalRequest =
+ // \n
+ // \n
+ // \n
+ // \n
+ // \n
+ //
+ //
+ canonicalRequest := strings.Join([]string{
+ req.Method,
+ canonicalURI,
+ req.URL.RawQuery,
+ canonicalHeaders,
+ signedHeaders,
+ hashedPayload,
+ }, "\n")
+
+ // Get scope.
+ scope := strings.Join([]string{
+ currTime.Format(yyyymmdd),
+ region,
+ "s3",
+ "aws4_request",
+ }, "/")
+
+ stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n"
+ stringToSign = stringToSign + scope + "\n"
+ stringToSign = stringToSign + getSHA256Hash([]byte(canonicalRequest))
+
+ date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd)))
+ regionHMAC := sumHMAC(date, []byte(region))
+ service := sumHMAC(regionHMAC, []byte("s3"))
+ signingKey := sumHMAC(service, []byte("aws4_request"))
+
+ signature := hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
+
+ // final Authorization header
+ parts := []string{
+ "AWS4-HMAC-SHA256" + " Credential=" + accessKey + "/" + scope,
+ "SignedHeaders=" + signedHeaders,
+ "Signature=" + signature,
+ }
+ auth := strings.Join(parts, ", ")
+ req.Header.Set("Authorization", auth)
+
+ return nil
+}
+
+// preSignV4 presign the request, in accordance with
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
+func preSignV4(req *http.Request, accessKeyID, secretAccessKey string, expires int64) error {
+ // Presign is not needed for anonymous credentials.
+ if accessKeyID == "" || secretAccessKey == "" {
+ return errors.New("Presign cannot be generated without access and secret keys")
+ }
+
+ region := "us-east-1"
+ date := time.Now().UTC()
+ scope := getScope(date, region)
+ credential := fmt.Sprintf("%s/%s", accessKeyID, scope)
+
+ // Set URL query.
+ query := req.URL.Query()
+ query.Set("X-Amz-Algorithm", signV4Algorithm)
+ query.Set("X-Amz-Date", date.Format(iso8601Format))
+ query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10))
+ query.Set("X-Amz-SignedHeaders", "host")
+ query.Set("X-Amz-Credential", credential)
+ query.Set("X-Amz-Content-Sha256", unsignedPayload)
+
+ // "host" is the only header required to be signed for Presigned URLs.
+ extractedSignedHeaders := make(http.Header)
+ extractedSignedHeaders.Set("host", req.Host)
+
+ queryStr := strings.Replace(query.Encode(), "+", "%20", -1)
+ canonicalRequest := getCanonicalRequest(extractedSignedHeaders, unsignedPayload, queryStr, req.URL.Path, req.Method)
+ stringToSign := getStringToSign(canonicalRequest, date, scope)
+ signingKey := getSigningKey(secretAccessKey, date, region)
+ signature := getSignature(signingKey, stringToSign)
+
+ req.URL.RawQuery = query.Encode()
+
+ // Add signature header to RawQuery.
+ req.URL.RawQuery += "&X-Amz-Signature=" + url.QueryEscape(signature)
+
+ // Construct the final presigned URL.
+ return nil
+}
+
+// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
+//
+// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
+// non english characters cannot be parsed due to the nature in which url.Encode() is written
+//
+// This function on the other hand is a direct replacement for url.Encode() technique to support
+// pretty much every UTF-8 character.
+func EncodePath(pathName string) string {
+ if reservedObjectNames.MatchString(pathName) {
+ return pathName
+ }
+ var encodedPathname string
+ for _, s := range pathName {
+ if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ }
+ switch s {
+ case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ default:
+ len := utf8.RuneLen(s)
+ if len < 0 {
+ // if utf8 cannot convert return the same string as is
+ return pathName
+ }
+ u := make([]byte, len)
+ utf8.EncodeRune(u, s)
+ for _, r := range u {
+ hex := hex.EncodeToString([]byte{r})
+ encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
+ }
+ }
+ }
+ return encodedPathname
+}
diff --git a/weed/s3api/chunked_reader_v4.go b/weed/s3api/chunked_reader_v4.go
index 061fd4a92..76c4394c2 100644
--- a/weed/s3api/chunked_reader_v4.go
+++ b/weed/s3api/chunked_reader_v4.go
@@ -21,16 +21,114 @@ package s3api
import (
"bufio"
"bytes"
+ "crypto/sha256"
+ "encoding/hex"
"errors"
- "github.com/dustin/go-humanize"
+ "hash"
"io"
"net/http"
+ "time"
+
+ "github.com/dustin/go-humanize"
)
-// Streaming AWS Signature Version '4' constants.
-const (
- streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
-)
+// getChunkSignature - get chunk signature.
+func getChunkSignature(secretKey string, seedSignature string, region string, date time.Time, hashedChunk string) string {
+
+ // Calculate string to sign.
+ stringToSign := signV4ChunkedAlgorithm + "\n" +
+ date.Format(iso8601Format) + "\n" +
+ getScope(date, region) + "\n" +
+ seedSignature + "\n" +
+ emptySHA256 + "\n" +
+ hashedChunk
+
+ // Get hmac signing key.
+ signingKey := getSigningKey(secretKey, date, region)
+
+ // Calculate signature.
+ newSignature := getSignature(signingKey, stringToSign)
+
+ return newSignature
+}
+
+// calculateSeedSignature - Calculate seed signature in accordance with
+// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
+// returns signature, error otherwise if the signature mismatches or any other
+// error while parsing and validating.
+func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cred *Credential, signature string, region string, date time.Time, errCode ErrorCode) {
+
+ // Copy request.
+ req := *r
+
+ // Save authorization header.
+ v4Auth := req.Header.Get("Authorization")
+
+ // Parse signature version '4' header.
+ signV4Values, errCode := parseSignV4(v4Auth)
+ if errCode != ErrNone {
+ return nil, "", "", time.Time{}, errCode
+ }
+
+ // Payload streaming.
+ payload := streamingContentSHA256
+
+ // Payload for STREAMING signature should be 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD'
+ if payload != req.Header.Get("X-Amz-Content-Sha256") {
+ return nil, "", "", time.Time{}, ErrContentSHA256Mismatch
+ }
+
+ // Extract all the signed headers along with its values.
+ extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r)
+ if errCode != ErrNone {
+ return nil, "", "", time.Time{}, errCode
+ }
+ // Verify if the access key id matches.
+ _, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey)
+ if !found {
+ return nil, "", "", time.Time{}, ErrInvalidAccessKeyID
+ }
+
+ // Verify if region is valid.
+ region = signV4Values.Credential.scope.region
+
+ // Extract date, if not present throw error.
+ var dateStr string
+ if dateStr = req.Header.Get(http.CanonicalHeaderKey("x-amz-date")); dateStr == "" {
+ if dateStr = r.Header.Get("Date"); dateStr == "" {
+ return nil, "", "", time.Time{}, ErrMissingDateHeader
+ }
+ }
+ // Parse date header.
+ var err error
+ date, err = time.Parse(iso8601Format, dateStr)
+ if err != nil {
+ return nil, "", "", time.Time{}, ErrMalformedDate
+ }
+
+ // Query string.
+ queryStr := req.URL.Query().Encode()
+
+ // Get canonical request.
+ canonicalRequest := getCanonicalRequest(extractedSignedHeaders, payload, queryStr, req.URL.Path, req.Method)
+
+ // Get string to sign from canonical request.
+ stringToSign := getStringToSign(canonicalRequest, date, signV4Values.Credential.getScope())
+
+ // Get hmac signing key.
+ signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, region)
+
+ // Calculate signature.
+ newSignature := getSignature(signingKey, stringToSign)
+
+ // Verify if signature match.
+ if !compareSignatureV4(newSignature, signV4Values.Signature) {
+ return nil, "", "", time.Time{}, ErrSignatureDoesNotMatch
+ }
+
+ // Return caculated signature.
+ return cred, newSignature, region, date, ErrNone
+}
const maxLineLength = 4 * humanize.KiByte // assumed <= bufio.defaultBufSize 4KiB
@@ -43,22 +141,36 @@ var errMalformedEncoding = errors.New("malformed chunked encoding")
// newSignV4ChunkedReader returns a new s3ChunkedReader that translates the data read from r
// out of HTTP "chunked" format before returning it.
// The s3ChunkedReader returns io.EOF when the final 0-length chunk is read.
-func newSignV4ChunkedReader(req *http.Request) io.ReadCloser {
- return &s3ChunkedReader{
- reader: bufio.NewReader(req.Body),
- state: readChunkHeader,
+func (iam *IdentityAccessManagement) newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, ErrorCode) {
+ ident, seedSignature, region, seedDate, errCode := iam.calculateSeedSignature(req)
+ if errCode != ErrNone {
+ return nil, errCode
}
+ return &s3ChunkedReader{
+ cred: ident,
+ reader: bufio.NewReader(req.Body),
+ seedSignature: seedSignature,
+ seedDate: seedDate,
+ region: region,
+ chunkSHA256Writer: sha256.New(),
+ state: readChunkHeader,
+ }, ErrNone
}
// Represents the overall state that is required for decoding a
// AWS Signature V4 chunked reader.
type s3ChunkedReader struct {
- reader *bufio.Reader
- state chunkState
- lastChunk bool
- chunkSignature string
- n uint64 // Unread bytes in chunk
- err error
+ cred *Credential
+ reader *bufio.Reader
+ seedSignature string
+ seedDate time.Time
+ region string
+ state chunkState
+ lastChunk bool
+ chunkSignature string
+ chunkSHA256Writer hash.Hash // Calculates sha256 of chunk data.
+ n uint64 // Unread bytes in chunk
+ err error
}
// Read chunk reads the chunk token signature portion.
@@ -157,6 +269,9 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) {
return 0, cr.err
}
+ // Calculate sha256.
+ cr.chunkSHA256Writer.Write(rbuf[:n0])
+
// Update the bytes read into request buffer so far.
n += n0
buf = buf[n0:]
@@ -169,6 +284,19 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) {
continue
}
case verifyChunk:
+ // Calculate the hashed chunk.
+ hashedChunk := hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil))
+ // Calculate the chunk signature.
+ newSignature := getChunkSignature(cr.cred.SecretKey, cr.seedSignature, cr.region, cr.seedDate, hashedChunk)
+ if !compareSignatureV4(cr.chunkSignature, newSignature) {
+ // Chunk signature doesn't match we return signature does not match.
+ cr.err = errors.New("chunk signature does not match")
+ return 0, cr.err
+ }
+ // Newly calculated signature becomes the seed for the next chunk
+ // this follows the chaining.
+ cr.seedSignature = newSignature
+ cr.chunkSHA256Writer.Reset()
if cr.lastChunk {
cr.state = eofChunk
} else {
diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go
index d3bde66ee..31ac850b1 100644
--- a/weed/s3api/filer_multipart.go
+++ b/weed/s3api/filer_multipart.go
@@ -1,7 +1,6 @@
package s3api
import (
- "context"
"encoding/xml"
"fmt"
"path/filepath"
@@ -11,10 +10,11 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
+ "github.com/google/uuid"
+
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/google/uuid"
)
type InitiateMultipartUploadResult struct {
@@ -22,11 +22,11 @@ type InitiateMultipartUploadResult struct {
s3.CreateMultipartUploadOutput
}
-func (s3a *S3ApiServer) createMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code ErrorCode) {
+func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code ErrorCode) {
uploadId, _ := uuid.NewRandom()
uploadIdString := uploadId.String()
- if err := s3a.mkdir(ctx, s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) {
+ if err := s3a.mkdir(s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) {
if entry.Extended == nil {
entry.Extended = make(map[string][]byte)
}
@@ -52,11 +52,11 @@ type CompleteMultipartUploadResult struct {
s3.CompleteMultipartUploadOutput
}
-func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code ErrorCode) {
+func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code ErrorCode) {
uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId
- entries, err := s3a.list(ctx, uploadDirectory, "", "", false, 0)
+ entries, err := s3a.list(uploadDirectory, "", "", false, 0)
if err != nil {
glog.Errorf("completeMultipartUpload %s %s error: %v", *input.Bucket, *input.UploadId, err)
return nil, ErrNoSuchUpload
@@ -69,11 +69,12 @@ func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.C
if strings.HasSuffix(entry.Name, ".part") && !entry.IsDirectory {
for _, chunk := range entry.Chunks {
p := &filer_pb.FileChunk{
- FileId: chunk.GetFileIdString(),
- Offset: offset,
- Size: chunk.Size,
- Mtime: chunk.Mtime,
- ETag: chunk.ETag,
+ FileId: chunk.GetFileIdString(),
+ Offset: offset,
+ Size: chunk.Size,
+ Mtime: chunk.Mtime,
+ CipherKey: chunk.CipherKey,
+ ETag: chunk.ETag,
}
finalParts = append(finalParts, p)
offset += int64(chunk.Size)
@@ -96,7 +97,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.C
dirName = dirName[:len(dirName)-1]
}
- err = s3a.mkFile(ctx, dirName, entryName, finalParts)
+ err = s3a.mkFile(dirName, entryName, finalParts)
if err != nil {
glog.Errorf("completeMultipartUpload %s/%s error: %v", dirName, entryName, err)
@@ -107,27 +108,27 @@ func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.C
CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{
Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer, dirName, entryName)),
Bucket: input.Bucket,
- ETag: aws.String("\"" + filer2.ETag(finalParts) + "\""),
+ ETag: aws.String("\"" + filer2.ETagChunks(finalParts) + "\""),
Key: objectKey(input.Key),
},
}
- if err = s3a.rm(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, false, true); err != nil {
+ if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, false, true); err != nil {
glog.V(1).Infof("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err)
}
return
}
-func (s3a *S3ApiServer) abortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code ErrorCode) {
+func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code ErrorCode) {
- exists, err := s3a.exists(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true)
+ exists, err := s3a.exists(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true)
if err != nil {
glog.V(1).Infof("bucket %s abort upload %s: %v", *input.Bucket, *input.UploadId, err)
return nil, ErrNoSuchUpload
}
if exists {
- err = s3a.rm(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true, true)
+ err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true)
}
if err != nil {
glog.V(1).Infof("bucket %s remove upload %s: %v", *input.Bucket, *input.UploadId, err)
@@ -142,7 +143,7 @@ type ListMultipartUploadsResult struct {
s3.ListMultipartUploadsOutput
}
-func (s3a *S3ApiServer) listMultipartUploads(ctx context.Context, input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code ErrorCode) {
+func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code ErrorCode) {
output = &ListMultipartUploadsResult{
ListMultipartUploadsOutput: s3.ListMultipartUploadsOutput{
@@ -155,7 +156,7 @@ func (s3a *S3ApiServer) listMultipartUploads(ctx context.Context, input *s3.List
},
}
- entries, err := s3a.list(ctx, s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, int(*input.MaxUploads))
+ entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, uint32(*input.MaxUploads))
if err != nil {
glog.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err)
return
@@ -179,7 +180,7 @@ type ListPartsResult struct {
s3.ListPartsOutput
}
-func (s3a *S3ApiServer) listObjectParts(ctx context.Context, input *s3.ListPartsInput) (output *ListPartsResult, code ErrorCode) {
+func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListPartsResult, code ErrorCode) {
output = &ListPartsResult{
ListPartsOutput: s3.ListPartsOutput{
Bucket: input.Bucket,
@@ -190,8 +191,7 @@ func (s3a *S3ApiServer) listObjectParts(ctx context.Context, input *s3.ListParts
},
}
- entries, err := s3a.list(ctx, s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId,
- "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, int(*input.MaxParts))
+ entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, uint32(*input.MaxParts))
if err != nil {
glog.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err)
return nil, ErrNoSuchUpload
@@ -207,9 +207,9 @@ func (s3a *S3ApiServer) listObjectParts(ctx context.Context, input *s3.ListParts
}
output.Parts = append(output.Parts, &s3.Part{
PartNumber: aws.Int64(int64(partNumber)),
- LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0)),
+ LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0).UTC()),
Size: aws.Int64(int64(filer2.TotalSize(entry.Chunks))),
- ETag: aws.String("\"" + filer2.ETag(entry.Chunks) + "\""),
+ ETag: aws.String("\"" + filer2.ETag(entry) + "\""),
})
}
}
diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go
index 2fceacd2a..7f49c320e 100644
--- a/weed/s3api/filer_util.go
+++ b/weed/s3api/filer_util.go
@@ -3,135 +3,42 @@ package s3api
import (
"context"
"fmt"
- "io"
- "os"
"strings"
- "time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
-func (s3a *S3ApiServer) mkdir(ctx context.Context, parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error {
- return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error {
- entry := &filer_pb.Entry{
- Name: dirName,
- IsDirectory: true,
- Attributes: &filer_pb.FuseAttributes{
- Mtime: time.Now().Unix(),
- Crtime: time.Now().Unix(),
- FileMode: uint32(0777 | os.ModeDir),
- Uid: OS_UID,
- Gid: OS_GID,
- },
- }
+ return filer_pb.Mkdir(s3a, parentDirectoryPath, dirName, fn)
- if fn != nil {
- fn(entry)
- }
-
- request := &filer_pb.CreateEntryRequest{
- Directory: parentDirectoryPath,
- Entry: entry,
- }
-
- glog.V(1).Infof("mkdir: %v", request)
- if err := filer_pb.CreateEntry(ctx, client, request); err != nil {
- glog.V(0).Infof("mkdir %v: %v", request, err)
- return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err)
- }
-
- return nil
- })
}
-func (s3a *S3ApiServer) mkFile(ctx context.Context, parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error {
- return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error {
- entry := &filer_pb.Entry{
- Name: fileName,
- IsDirectory: false,
- Attributes: &filer_pb.FuseAttributes{
- Mtime: time.Now().Unix(),
- Crtime: time.Now().Unix(),
- FileMode: uint32(0770),
- Uid: OS_UID,
- Gid: OS_GID,
- },
- Chunks: chunks,
- }
+ return filer_pb.MkFile(s3a, parentDirectoryPath, fileName, chunks)
- request := &filer_pb.CreateEntryRequest{
- Directory: parentDirectoryPath,
- Entry: entry,
- }
-
- glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName)
- if err := filer_pb.CreateEntry(ctx, client, request); err != nil {
- glog.V(0).Infof("create file %v:%v", request, err)
- return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err)
- }
-
- return nil
- })
}
-func (s3a *S3ApiServer) list(ctx context.Context, parentDirectoryPath, prefix, startFrom string, inclusive bool, limit int) (entries []*filer_pb.Entry, err error) {
-
- err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
-
- request := &filer_pb.ListEntriesRequest{
- Directory: parentDirectoryPath,
- Prefix: prefix,
- StartFromFileName: startFrom,
- InclusiveStartFrom: inclusive,
- Limit: uint32(limit),
- }
-
- glog.V(4).Infof("read directory: %v", request)
- stream, err := client.ListEntries(ctx, request)
- if err != nil {
- glog.V(0).Infof("read directory %v: %v", request, err)
- return fmt.Errorf("list dir %v: %v", parentDirectoryPath, err)
- }
-
- for {
- resp, recvErr := stream.Recv()
- if recvErr != nil {
- if recvErr == io.EOF {
- break
- } else {
- return recvErr
- }
- }
-
- entries = append(entries, resp.Entry)
-
- }
+func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, inclusive bool, limit uint32) (entries []*filer_pb.Entry, err error) {
+ err = filer_pb.List(s3a, parentDirectoryPath, prefix, func(entry *filer_pb.Entry, isLast bool) error {
+ entries = append(entries, entry)
return nil
- })
+ }, startFrom, inclusive, limit)
return
}
-func (s3a *S3ApiServer) rm(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory, isDeleteData, isRecursive bool) error {
+func (s3a *S3ApiServer) rm(parentDirectoryPath, entryName string, isDeleteData, isRecursive bool) error {
- return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+ return s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
- request := &filer_pb.DeleteEntryRequest{
- Directory: parentDirectoryPath,
- Name: entryName,
- IsDeleteData: isDeleteData,
- IsRecursive: isRecursive,
- }
-
- glog.V(1).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request)
- if _, err := client.DeleteEntry(ctx, request); err != nil {
- glog.V(0).Infof("delete entry %v: %v", request, err)
- return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, err)
+ err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive)
+ if err != nil {
+ return err
}
return nil
@@ -139,28 +46,30 @@ func (s3a *S3ApiServer) rm(ctx context.Context, parentDirectoryPath string, entr
}
-func (s3a *S3ApiServer) exists(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) {
+func doDeleteEntry(client filer_pb.SeaweedFilerClient, parentDirectoryPath string, entryName string, isDeleteData bool, isRecursive bool) error {
+ request := &filer_pb.DeleteEntryRequest{
+ Directory: parentDirectoryPath,
+ Name: entryName,
+ IsDeleteData: isDeleteData,
+ IsRecursive: isRecursive,
+ }
- err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
-
- request := &filer_pb.LookupDirectoryEntryRequest{
- Directory: parentDirectoryPath,
- Name: entryName,
+ glog.V(1).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request)
+ if resp, err := client.DeleteEntry(context.Background(), request); err != nil {
+ glog.V(0).Infof("delete entry %v: %v", request, err)
+ return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, err)
+ } else {
+ if resp.Error != "" {
+ return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, resp.Error)
}
+ }
+ return nil
+}
- glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request)
- resp, err := client.LookupDirectoryEntry(ctx, request)
- if err != nil {
- glog.V(0).Infof("exists entry %v: %v", request, err)
- return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err)
- }
+func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) {
- exists = resp.Entry.IsDirectory == isDirectory
+ return filer_pb.Exists(s3a, parentDirectoryPath, entryName, isDirectory)
- return nil
- })
-
- return
}
func objectKey(key *string) *string {
diff --git a/weed/s3api/s3api_auth.go b/weed/s3api/s3api_auth.go
index b680fe1e1..bf5cf5fab 100644
--- a/weed/s3api/s3api_auth.go
+++ b/weed/s3api/s3api_auth.go
@@ -9,6 +9,8 @@ import (
const (
signV4Algorithm = "AWS4-HMAC-SHA256"
signV2Algorithm = "AWS"
+ iso8601Format = "20060102T150405Z"
+ yyyymmdd = "20060102"
)
// Verify if request has JWT.
@@ -23,8 +25,8 @@ func isRequestSignatureV4(r *http.Request) bool {
// Verify if request has AWS Signature Version '2'.
func isRequestSignatureV2(r *http.Request) bool {
- return (!strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) &&
- strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm))
+ return !strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) &&
+ strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm)
}
// Verify if request has AWS PreSign Version '4'.
diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go
index 492d94616..7d96e3e0e 100644
--- a/weed/s3api/s3api_bucket_handlers.go
+++ b/weed/s3api/s3api_bucket_handlers.go
@@ -6,19 +6,14 @@ import (
"fmt"
"math"
"net/http"
- "os"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
+ "github.com/gorilla/mux"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/gorilla/mux"
-)
-
-var (
- OS_UID = uint32(os.Getuid())
- OS_GID = uint32(os.Getgid())
)
type ListAllMyBucketsResult struct {
@@ -31,7 +26,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
var response ListAllMyBucketsResult
- entries, err := s3a.list(context.Background(), s3a.option.BucketsPath, "", "", false, math.MaxInt32)
+ entries, err := s3a.list(s3a.option.BucketsPath, "", "", false, math.MaxInt32)
if err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
@@ -43,7 +38,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
if entry.IsDirectory {
buckets = append(buckets, &s3.Bucket{
Name: aws.String(entry.Name),
- CreationDate: aws.Time(time.Unix(entry.Attributes.Crtime, 0)),
+ CreationDate: aws.Time(time.Unix(entry.Attributes.Crtime, 0).UTC()),
})
}
}
@@ -65,7 +60,7 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
bucket := vars["bucket"]
// create the folder for bucket, but lazily create actual collection
- if err := s3a.mkdir(context.Background(), s3a.option.BucketsPath, bucket, nil); err != nil {
+ if err := s3a.mkdir(s3a.option.BucketsPath, bucket, nil); err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
return
}
@@ -78,8 +73,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
vars := mux.Vars(r)
bucket := vars["bucket"]
- ctx := context.Background()
- err := s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+ err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
// delete collection
deleteCollectionRequest := &filer_pb.DeleteCollectionRequest{
@@ -87,14 +81,14 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
}
glog.V(1).Infof("delete collection: %v", deleteCollectionRequest)
- if _, err := client.DeleteCollection(ctx, deleteCollectionRequest); err != nil {
+ if _, err := client.DeleteCollection(context.Background(), deleteCollectionRequest); err != nil {
return fmt.Errorf("delete collection %s: %v", bucket, err)
}
return nil
})
- err = s3a.rm(ctx, s3a.option.BucketsPath, bucket, true, false, true)
+ err = s3a.rm(s3a.option.BucketsPath, bucket, false, true)
if err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
@@ -109,9 +103,7 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request
vars := mux.Vars(r)
bucket := vars["bucket"]
- ctx := context.Background()
-
- err := s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+ err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{
Directory: s3a.option.BucketsPath,
@@ -119,7 +111,10 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request
}
glog.V(1).Infof("lookup bucket: %v", request)
- if _, err := client.LookupDirectoryEntry(ctx, request); err != nil {
+ if _, err := filer_pb.LookupEntry(client, request); err != nil {
+ if err == filer_pb.ErrNotFound {
+ return filer_pb.ErrNotFound
+ }
return fmt.Errorf("lookup bucket %s/%s: %v", s3a.option.BucketsPath, bucket, err)
}
diff --git a/weed/s3api/s3api_errors.go b/weed/s3api/s3api_errors.go
index 96f8d9fd6..3f97c73cb 100644
--- a/weed/s3api/s3api_errors.go
+++ b/weed/s3api/s3api_errors.go
@@ -27,6 +27,7 @@ type ErrorCode int
// Error codes, see full list at http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
const (
ErrNone ErrorCode = iota
+ ErrAccessDenied
ErrMethodNotAllowed
ErrBucketNotEmpty
ErrBucketAlreadyExists
@@ -43,12 +44,41 @@ const (
ErrInternalError
ErrInvalidCopyDest
ErrInvalidCopySource
+ ErrAuthHeaderEmpty
+ ErrSignatureVersionNotSupported
+ ErrMissingFields
+ ErrMissingCredTag
+ ErrCredMalformed
+ ErrMalformedXML
+ ErrMalformedDate
+ ErrMalformedPresignedDate
+ ErrMalformedCredentialDate
+ ErrMissingSignHeadersTag
+ ErrMissingSignTag
+ ErrUnsignedHeaders
+ ErrInvalidQueryParams
+ ErrInvalidQuerySignatureAlgo
+ ErrExpiredPresignRequest
+ ErrMalformedExpires
+ ErrNegativeExpires
+ ErrMaximumExpires
+ ErrSignatureDoesNotMatch
+ ErrContentSHA256Mismatch
+ ErrInvalidAccessKeyID
+ ErrRequestNotReadyYet
+ ErrMissingDateHeader
+ ErrInvalidRequest
ErrNotImplemented
)
// error code to APIError structure, these fields carry respective
// descriptions for all the error responses.
var errorCodeResponse = map[ErrorCode]APIError{
+ ErrAccessDenied: {
+ Code: "AccessDenied",
+ Description: "Access Denied.",
+ HTTPStatusCode: http.StatusForbidden,
+ },
ErrMethodNotAllowed: {
Code: "MethodNotAllowed",
Description: "The specified method is not allowed against this resource.",
@@ -132,6 +162,127 @@ var errorCodeResponse = map[ErrorCode]APIError{
HTTPStatusCode: http.StatusBadRequest,
},
+ ErrMalformedXML: {
+ Code: "MalformedXML",
+ Description: "The XML you provided was not well-formed or did not validate against our published schema.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+
+ ErrAuthHeaderEmpty: {
+ Code: "InvalidArgument",
+ Description: "Authorization header is invalid -- one and only one ' ' (space) required.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrSignatureVersionNotSupported: {
+ Code: "InvalidRequest",
+ Description: "The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMissingFields: {
+ Code: "MissingFields",
+ Description: "Missing fields in request.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMissingCredTag: {
+ Code: "InvalidRequest",
+ Description: "Missing Credential field for this request.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrCredMalformed: {
+ Code: "AuthorizationQueryParametersError",
+ Description: "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMalformedDate: {
+ Code: "MalformedDate",
+ Description: "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMalformedPresignedDate: {
+ Code: "AuthorizationQueryParametersError",
+ Description: "X-Amz-Date must be in the ISO8601 Long Format \"yyyyMMdd'T'HHmmss'Z'\"",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMissingSignHeadersTag: {
+ Code: "InvalidArgument",
+ Description: "Signature header missing SignedHeaders field.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMissingSignTag: {
+ Code: "AccessDenied",
+ Description: "Signature header missing Signature field.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+
+ ErrUnsignedHeaders: {
+ Code: "AccessDenied",
+ Description: "There were headers present in the request which were not signed",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrInvalidQueryParams: {
+ Code: "AuthorizationQueryParametersError",
+ Description: "Query-string authentication version 4 requires the X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, X-Amz-SignedHeaders, and X-Amz-Expires parameters.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrInvalidQuerySignatureAlgo: {
+ Code: "AuthorizationQueryParametersError",
+ Description: "X-Amz-Algorithm only supports \"AWS4-HMAC-SHA256\".",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrExpiredPresignRequest: {
+ Code: "AccessDenied",
+ Description: "Request has expired",
+ HTTPStatusCode: http.StatusForbidden,
+ },
+ ErrMalformedExpires: {
+ Code: "AuthorizationQueryParametersError",
+ Description: "X-Amz-Expires should be a number",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrNegativeExpires: {
+ Code: "AuthorizationQueryParametersError",
+ Description: "X-Amz-Expires must be non-negative",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMaximumExpires: {
+ Code: "AuthorizationQueryParametersError",
+ Description: "X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less than 604800 seconds",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+
+ ErrInvalidAccessKeyID: {
+ Code: "InvalidAccessKeyId",
+ Description: "The access key ID you provided does not exist in our records.",
+ HTTPStatusCode: http.StatusForbidden,
+ },
+
+ ErrRequestNotReadyYet: {
+ Code: "AccessDenied",
+ Description: "Request is not valid yet",
+ HTTPStatusCode: http.StatusForbidden,
+ },
+
+ ErrSignatureDoesNotMatch: {
+ Code: "SignatureDoesNotMatch",
+ Description: "The request signature we calculated does not match the signature you provided. Check your key and signing method.",
+ HTTPStatusCode: http.StatusForbidden,
+ },
+
+ ErrContentSHA256Mismatch: {
+ Code: "XAmzContentSHA256Mismatch",
+ Description: "The provided 'x-amz-content-sha256' header does not match what was computed.",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrMissingDateHeader: {
+ Code: "AccessDenied",
+ Description: "AWS authentication requires a valid Date or x-amz-date header",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
+ ErrInvalidRequest: {
+ Code: "InvalidRequest",
+ Description: "Invalid Request",
+ HTTPStatusCode: http.StatusBadRequest,
+ },
ErrNotImplemented: {
Code: "NotImplemented",
Description: "A header you provided implies functionality that is not implemented",
diff --git a/weed/s3api/s3api_handlers.go b/weed/s3api/s3api_handlers.go
index 602f03e5c..45a7cbc2e 100644
--- a/weed/s3api/s3api_handlers.go
+++ b/weed/s3api/s3api_handlers.go
@@ -2,17 +2,18 @@ package s3api
import (
"bytes"
- "context"
"encoding/base64"
"encoding/xml"
"fmt"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/chrislusf/seaweedfs/weed/util"
- "google.golang.org/grpc"
"net/http"
"net/url"
"time"
+
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
type mimeType string
@@ -37,14 +38,19 @@ func encodeResponse(response interface{}) []byte {
return bytesBuffer.Bytes()
}
-func (s3a *S3ApiServer) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error {
+var _ = filer_pb.FilerClient(&S3ApiServer{})
- return util.WithCachedGrpcClient(ctx, func(ctx context.Context, grpcConnection *grpc.ClientConn) error {
+func (s3a *S3ApiServer) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
+
+ return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
client := filer_pb.NewSeaweedFilerClient(grpcConnection)
return fn(client)
}, s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption)
}
+func (s3a *S3ApiServer) AdjustedUrl(hostAndPort string) string {
+ return hostAndPort
+}
// If none of the http routes match respond with MethodNotAllowed
func notFoundHandler(w http.ResponseWriter, r *http.Request) {
diff --git a/weed/s3api/s3api_object_copy_handlers.go b/weed/s3api/s3api_object_copy_handlers.go
index 5e0fa5de1..b8fb3f6a4 100644
--- a/weed/s3api/s3api_object_copy_handlers.go
+++ b/weed/s3api/s3api_object_copy_handlers.go
@@ -48,6 +48,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
writeErrorResponse(w, ErrInvalidCopySource, r.URL)
return
}
+ defer dataReader.Close()
etag, errCode := s3a.putToFiler(r, dstUrl, dataReader)
@@ -112,7 +113,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
}
// check partID with maximum part ID for multipart objects
- if partID > 10000 {
+ if partID > globalMaxPartID {
writeErrorResponse(w, ErrInvalidMaxParts, r.URL)
return
}
@@ -129,6 +130,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
writeErrorResponse(w, ErrInvalidCopySource, r.URL)
return
}
+ defer dataReader.Close()
etag, errCode := s3a.putToFiler(r, dstUrl, dataReader)
diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go
index 8dc733eb9..300441ef2 100644
--- a/weed/s3api/s3api_object_handlers.go
+++ b/weed/s3api/s3api_object_handlers.go
@@ -3,6 +3,7 @@ package s3api
import (
"crypto/md5"
"encoding/json"
+ "encoding/xml"
"fmt"
"io"
"io/ioutil"
@@ -12,7 +13,9 @@ import (
"github.com/gorilla/mux"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/server"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
var (
@@ -41,12 +44,17 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
rAuthType := getRequestAuthType(r)
dataReader := r.Body
+ var s3ErrCode ErrorCode
if rAuthType == authTypeStreamingSigned {
- dataReader = newSignV4ChunkedReader(r)
+ dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r)
}
+ if s3ErrCode != ErrNone {
+ writeErrorResponse(w, s3ErrCode, r.URL)
+ return
+ }
+ defer dataReader.Close()
- uploadUrl := fmt.Sprintf("http://%s%s/%s%s?collection=%s",
- s3a.option.Filer, s3a.option.BucketsPath, bucket, object, bucket)
+ uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer, s3a.option.BucketsPath, bucket, object)
etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader)
@@ -109,10 +117,91 @@ func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Reque
}
+/// ObjectIdentifier carries key name for the object to delete.
+type ObjectIdentifier struct {
+ ObjectName string `xml:"Key"`
+}
+
+// DeleteObjectsRequest - xml carrying the object key names which needs to be deleted.
+type DeleteObjectsRequest struct {
+ // Element to enable quiet mode for the request
+ Quiet bool
+ // List of objects to be deleted
+ Objects []ObjectIdentifier `xml:"Object"`
+}
+
+// DeleteError structure.
+type DeleteError struct {
+ Code string
+ Message string
+ Key string
+}
+
+// DeleteObjectsResponse container for multiple object deletes.
+type DeleteObjectsResponse struct {
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"`
+
+ // Collection of all deleted objects
+ DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"`
+
+ // Collection of errors deleting certain objects.
+ Errors []DeleteError `xml:"Error,omitempty"`
+}
+
// DeleteMultipleObjectsHandler - Delete multiple objects
func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
- // TODO
- writeErrorResponse(w, ErrNotImplemented, r.URL)
+
+ vars := mux.Vars(r)
+ bucket := vars["bucket"]
+
+ deleteXMLBytes, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ writeErrorResponse(w, ErrInternalError, r.URL)
+ return
+ }
+
+ deleteObjects := &DeleteObjectsRequest{}
+ if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
+ writeErrorResponse(w, ErrMalformedXML, r.URL)
+ return
+ }
+
+ var deletedObjects []ObjectIdentifier
+ var deleteErrors []DeleteError
+
+ s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ for _, object := range deleteObjects.Objects {
+ lastSeparator := strings.LastIndex(object.ObjectName, "/")
+ parentDirectoryPath, entryName, isDeleteData, isRecursive := "/", object.ObjectName, true, true
+ if lastSeparator > 0 && lastSeparator+1 < len(object.ObjectName) {
+ entryName = object.ObjectName[lastSeparator+1:]
+ parentDirectoryPath = "/" + object.ObjectName[:lastSeparator]
+ }
+ parentDirectoryPath = fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, parentDirectoryPath)
+
+ err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive)
+ if err == nil {
+ deletedObjects = append(deletedObjects, object)
+ } else {
+ deleteErrors = append(deleteErrors, DeleteError{
+ Code: "",
+ Message: err.Error(),
+ Key: object.ObjectName,
+ })
+ }
+ }
+ return nil
+ })
+
+ deleteResp := DeleteObjectsResponse{}
+ if !deleteObjects.Quiet {
+ deleteResp.DeletedObjects = deletedObjects
+ }
+ deleteResp.Errors = deleteErrors
+
+ writeSuccessResponseXML(w, encodeResponse(deleteResp))
+
}
func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, responseFn func(proxyResonse *http.Response, w http.ResponseWriter)) {
@@ -129,7 +218,6 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
proxyReq.Header.Set("Host", s3a.option.Filer)
proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr)
- proxyReq.Header.Set("Etag-MD5", "True")
for header, values := range r.Header {
for _, value := range values {
@@ -144,9 +232,10 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
writeErrorResponse(w, ErrInternalError, r.URL)
return
}
- defer resp.Body.Close()
+ defer util.CloseResponse(resp)
responseFn(resp, w)
+
}
func passThroughResponse(proxyResonse *http.Response, w http.ResponseWriter) {
for k, v := range proxyResonse.Header {
@@ -156,10 +245,10 @@ func passThroughResponse(proxyResonse *http.Response, w http.ResponseWriter) {
io.Copy(w, proxyResonse.Body)
}
-func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.ReadCloser) (etag string, code ErrorCode) {
+func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader) (etag string, code ErrorCode) {
hash := md5.New()
- var body io.Reader = io.TeeReader(dataReader, hash)
+ var body = io.TeeReader(dataReader, hash)
proxyReq, err := http.NewRequest("PUT", uploadUrl, body)
@@ -179,8 +268,6 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
resp, postErr := client.Do(proxyReq)
- dataReader.Close()
-
if postErr != nil {
glog.Errorf("post to filer: %v", postErr)
return "", ErrInternalError
diff --git a/weed/s3api/s3api_object_multipart_handlers.go b/weed/s3api/s3api_object_multipart_handlers.go
index 72a25e4a5..3282e4176 100644
--- a/weed/s3api/s3api_object_multipart_handlers.go
+++ b/weed/s3api/s3api_object_multipart_handlers.go
@@ -1,22 +1,22 @@
package s3api
import (
- "context"
"fmt"
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/service/s3"
- "github.com/gorilla/mux"
"net/http"
"net/url"
"strconv"
"strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/gorilla/mux"
)
const (
- maxObjectList = 1000 // Limit number of objects in a listObjectsResponse.
- maxUploadsList = 1000 // Limit number of uploads in a listUploadsResponse.
- maxPartsList = 1000 // Limit number of parts in a listPartsResponse.
- globalMaxPartID = 10000
+ maxObjectListSizeLimit = 10000 // Limit number of objects in a listObjectsResponse.
+ maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
+ maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
+ globalMaxPartID = 100000
)
// NewMultipartUploadHandler - New multipart upload.
@@ -26,7 +26,7 @@ func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http
bucket = vars["bucket"]
object = vars["object"]
- response, errCode := s3a.createMultipartUpload(context.Background(), &s3.CreateMultipartUploadInput{
+ response, errCode := s3a.createMultipartUpload(&s3.CreateMultipartUploadInput{
Bucket: aws.String(bucket),
Key: objectKey(aws.String(object)),
})
@@ -51,7 +51,7 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r
// Get upload id.
uploadID, _, _, _ := getObjectResources(r.URL.Query())
- response, errCode := s3a.completeMultipartUpload(context.Background(), &s3.CompleteMultipartUploadInput{
+ response, errCode := s3a.completeMultipartUpload(&s3.CompleteMultipartUploadInput{
Bucket: aws.String(bucket),
Key: objectKey(aws.String(object)),
UploadId: aws.String(uploadID),
@@ -77,7 +77,7 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht
// Get upload id.
uploadID, _, _, _ := getObjectResources(r.URL.Query())
- response, errCode := s3a.abortMultipartUpload(context.Background(), &s3.AbortMultipartUploadInput{
+ response, errCode := s3a.abortMultipartUpload(&s3.AbortMultipartUploadInput{
Bucket: aws.String(bucket),
Key: objectKey(aws.String(object)),
UploadId: aws.String(uploadID),
@@ -112,7 +112,7 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht
}
}
- response, errCode := s3a.listMultipartUploads(context.Background(), &s3.ListMultipartUploadsInput{
+ response, errCode := s3a.listMultipartUploads(&s3.ListMultipartUploadsInput{
Bucket: aws.String(bucket),
Delimiter: aws.String(delimiter),
EncodingType: aws.String(encodingType),
@@ -149,7 +149,7 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re
return
}
- response, errCode := s3a.listObjectParts(context.Background(), &s3.ListPartsInput{
+ response, errCode := s3a.listObjectParts(&s3.ListPartsInput{
Bucket: aws.String(bucket),
Key: objectKey(aws.String(object)),
MaxParts: aws.Int64(int64(maxParts)),
@@ -175,10 +175,8 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
rAuthType := getRequestAuthType(r)
- ctx := context.Background()
-
uploadID := r.URL.Query().Get("uploadId")
- exists, err := s3a.exists(ctx, s3a.genUploadsFolder(bucket), uploadID, true)
+ exists, err := s3a.exists(s3a.genUploadsFolder(bucket), uploadID, true)
if !exists {
writeErrorResponse(w, ErrNoSuchUpload, r.URL)
return
@@ -195,10 +193,16 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
return
}
+ var s3ErrCode ErrorCode
dataReader := r.Body
if rAuthType == authTypeStreamingSigned {
- dataReader = newSignV4ChunkedReader(r)
+ dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r)
}
+ if s3ErrCode != ErrNone {
+ writeErrorResponse(w, s3ErrCode, r.URL)
+ return
+ }
+ defer dataReader.Close()
uploadUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s",
s3a.option.Filer, s3a.genUploadsFolder(bucket), uploadID, partID-1, bucket)
diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go
index aa6849cbd..086b9acd3 100644
--- a/weed/s3api/s3api_objects_list_handlers.go
+++ b/weed/s3api/s3api_objects_list_handlers.go
@@ -11,14 +11,11 @@ import (
"strings"
"time"
+ "github.com/gorilla/mux"
+
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "github.com/gorilla/mux"
-)
-
-const (
- maxObjectListSizeLimit = 1000 // Limit number of objects in a listObjectsResponse.
)
func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
@@ -46,9 +43,7 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ
marker = startAfter
}
- ctx := context.Background()
-
- response, err := s3a.listFilerEntries(ctx, bucket, originalPrefix, maxKeys, marker)
+ response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker)
if err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
@@ -66,8 +61,6 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ
vars := mux.Vars(r)
bucket := vars["bucket"]
- ctx := context.Background()
-
originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query())
if maxKeys < 0 {
@@ -79,7 +72,7 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ
return
}
- response, err := s3a.listFilerEntries(ctx, bucket, originalPrefix, maxKeys, marker)
+ response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker)
if err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
@@ -89,7 +82,7 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ
writeSuccessResponseXML(w, encodeResponse(response))
}
-func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPrefix string, maxKeys int, marker string) (response ListBucketResult, err error) {
+func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys int, marker string) (response ListBucketResult, err error) {
// convert full path prefix into directory name and prefix for entry name
dir, prefix := filepath.Split(originalPrefix)
@@ -98,7 +91,7 @@ func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPr
}
// check filer
- err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
+ err = s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.ListEntriesRequest{
Directory: fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, bucket, dir),
@@ -108,7 +101,7 @@ func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPr
InclusiveStartFrom: false,
}
- stream, err := client.ListEntries(ctx, request)
+ stream, err := client.ListEntries(context.Background(), request)
if err != nil {
return fmt.Errorf("list buckets: %v", err)
}
@@ -146,7 +139,7 @@ func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPr
contents = append(contents, ListEntry{
Key: fmt.Sprintf("%s%s", dir, entry.Name),
LastModified: time.Unix(entry.Attributes.Mtime, 0),
- ETag: "\"" + filer2.ETag(entry.Chunks) + "\"",
+ ETag: "\"" + filer2.ETag(entry) + "\"",
Size: int64(filer2.TotalSize(entry.Chunks)),
Owner: CanonicalUser{
ID: fmt.Sprintf("%x", entry.Attributes.Uid),
diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go
index 2233c8384..773094a5f 100644
--- a/weed/s3api/s3api_server.go
+++ b/weed/s3api/s3api_server.go
@@ -1,14 +1,16 @@
package s3api
import (
+ "net/http"
+
"github.com/gorilla/mux"
"google.golang.org/grpc"
- "net/http"
)
type S3ApiServerOption struct {
Filer string
FilerGrpcAddress string
+ Config string
DomainName string
BucketsPath string
GrpcDialOption grpc.DialOption
@@ -16,11 +18,13 @@ type S3ApiServerOption struct {
type S3ApiServer struct {
option *S3ApiServerOption
+ iam *IdentityAccessManagement
}
func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer *S3ApiServer, err error) {
s3ApiServer = &S3ApiServer{
option: option,
+ iam: NewIdentityAccessManagement(option.Config, option.DomainName),
}
s3ApiServer.registerRouter(router)
@@ -40,46 +44,46 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
for _, bucket := range routers {
// HeadObject
- bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(s3a.HeadObjectHandler)
+ bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.HeadObjectHandler, ACTION_READ))
// HeadBucket
- bucket.Methods("HEAD").HandlerFunc(s3a.HeadBucketHandler)
+ bucket.Methods("HEAD").HandlerFunc(s3a.iam.Auth(s3a.HeadBucketHandler, ACTION_ADMIN))
// CopyObjectPart
- bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
+ bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.iam.Auth(s3a.CopyObjectPartHandler, ACTION_WRITE)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// PutObjectPart
- bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
+ bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.PutObjectPartHandler, ACTION_WRITE)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// CompleteMultipartUpload
- bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}")
+ bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.CompleteMultipartUploadHandler, ACTION_WRITE)).Queries("uploadId", "{uploadId:.*}")
// NewMultipartUpload
- bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.NewMultipartUploadHandler).Queries("uploads", "")
+ bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.NewMultipartUploadHandler, ACTION_WRITE)).Queries("uploads", "")
// AbortMultipartUpload
- bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}")
+ bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.AbortMultipartUploadHandler, ACTION_WRITE)).Queries("uploadId", "{uploadId:.*}")
// ListObjectParts
- bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.ListObjectPartsHandler).Queries("uploadId", "{uploadId:.*}")
+ bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.ListObjectPartsHandler, ACTION_WRITE)).Queries("uploadId", "{uploadId:.*}")
// ListMultipartUploads
- bucket.Methods("GET").HandlerFunc(s3a.ListMultipartUploadsHandler).Queries("uploads", "")
+ bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.ListMultipartUploadsHandler, ACTION_WRITE)).Queries("uploads", "")
// CopyObject
- bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectHandler)
+ bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.iam.Auth(s3a.CopyObjectHandler, ACTION_WRITE))
// PutObject
- bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectHandler)
+ bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.PutObjectHandler, ACTION_WRITE))
// PutBucket
- bucket.Methods("PUT").HandlerFunc(s3a.PutBucketHandler)
+ bucket.Methods("PUT").HandlerFunc(s3a.iam.Auth(s3a.PutBucketHandler, ACTION_ADMIN))
// DeleteObject
- bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.DeleteObjectHandler)
+ bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.DeleteObjectHandler, ACTION_WRITE))
// DeleteBucket
- bucket.Methods("DELETE").HandlerFunc(s3a.DeleteBucketHandler)
+ bucket.Methods("DELETE").HandlerFunc(s3a.iam.Auth(s3a.DeleteBucketHandler, ACTION_WRITE))
// ListObjectsV2
- bucket.Methods("GET").HandlerFunc(s3a.ListObjectsV2Handler).Queries("list-type", "2")
+ bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.ListObjectsV2Handler, ACTION_READ)).Queries("list-type", "2")
// GetObject, but directory listing is not supported
- bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.GetObjectHandler)
+ bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.GetObjectHandler, ACTION_READ))
// ListObjectsV1 (Legacy)
- bucket.Methods("GET").HandlerFunc(s3a.ListObjectsV1Handler)
+ bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_READ))
// DeleteMultipleObjects
- bucket.Methods("POST").HandlerFunc(s3a.DeleteMultipleObjectsHandler).Queries("delete", "")
+ bucket.Methods("POST").HandlerFunc(s3a.iam.Auth(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE)).Queries("delete", "")
/*
// not implemented
@@ -102,7 +106,7 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
}
// ListBuckets
- apiRouter.Methods("GET").Path("/").HandlerFunc(s3a.ListBucketsHandler)
+ apiRouter.Methods("GET").Path("/").HandlerFunc(s3a.iam.Auth(s3a.ListBucketsHandler, ACTION_ADMIN))
// NotFound
apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler)
diff --git a/weed/security/guard.go b/weed/security/guard.go
index 17fe2ea9e..87ec91ec1 100644
--- a/weed/security/guard.go
+++ b/weed/security/guard.go
@@ -62,7 +62,7 @@ func NewGuard(whiteList []string, signingKey string, expiresAfterSec int, readSi
return g
}
-func (g *Guard) WhiteList(f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
+func (g *Guard) WhiteList(f http.HandlerFunc) http.HandlerFunc {
if !g.isWriteActive {
//if no security needed, just skip all checking
return f
diff --git a/weed/security/tls.go b/weed/security/tls.go
index f4f525ede..1832e6e07 100644
--- a/weed/security/tls.go
+++ b/weed/security/tls.go
@@ -3,12 +3,14 @@ package security
import (
"crypto/tls"
"crypto/x509"
- "github.com/spf13/viper"
"io/ioutil"
- "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/spf13/viper"
+
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
)
func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption {
@@ -19,12 +21,12 @@ func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption {
// load cert/key, ca cert
cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key"))
if err != nil {
- glog.Errorf("load cert/key error: %v", err)
+ glog.V(1).Infof("load cert/key error: %v", err)
return nil
}
caCert, err := ioutil.ReadFile(config.GetString(component + ".ca"))
if err != nil {
- glog.Errorf("read ca cert file error: %v", err)
+ glog.V(1).Infof("read ca cert file error: %v", err)
return nil
}
caCertPool := x509.NewCertPool()
@@ -46,12 +48,12 @@ func LoadClientTLS(config *viper.Viper, component string) grpc.DialOption {
// load cert/key, cacert
cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key"))
if err != nil {
- glog.Errorf("load cert/key error: %v", err)
+ glog.V(1).Infof("load cert/key error: %v", err)
return grpc.WithInsecure()
}
caCert, err := ioutil.ReadFile(config.GetString(component + ".ca"))
if err != nil {
- glog.Errorf("read ca cert file error: %v", err)
+ glog.V(1).Infof("read ca cert file error: %v", err)
return grpc.WithInsecure()
}
caCertPool := x509.NewCertPool()
diff --git a/weed/server/common.go b/weed/server/common.go
index 31a9a73b8..bc6008864 100644
--- a/weed/server/common.go
+++ b/weed/server/common.go
@@ -1,10 +1,11 @@
package weed_server
import (
- "bytes"
"encoding/json"
"errors"
"fmt"
+ "io"
+ "mime/multipart"
"net/http"
"path/filepath"
"strconv"
@@ -45,6 +46,12 @@ func writeJson(w http.ResponseWriter, r *http.Request, httpStatus int, obj inter
if err != nil {
return
}
+
+ if httpStatus >= 400 {
+ glog.V(0).Infof("response method:%s URL:%s with httpStatus:%d and JSON:%s",
+ r.Method, r.URL.String(), httpStatus, string(bytes))
+ }
+
callback := r.FormValue("callback")
if callback == "" {
w.Header().Set("Content-Type", "application/json")
@@ -99,13 +106,13 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st
}
debug("parsing upload file...")
- fname, data, mimeType, pairMap, isGzipped, originalDataSize, lastModified, _, _, pe := needle.ParseUpload(r, 256*1024*1024)
+ pu, pe := needle.ParseUpload(r, 256*1024*1024)
if pe != nil {
writeJsonError(w, r, http.StatusBadRequest, pe)
return
}
- debug("assigning file id for", fname)
+ debug("assigning file id for", pu.FileName)
r.ParseForm()
count := uint64(1)
if r.FormValue("count") != "" {
@@ -129,21 +136,21 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st
}
url := "http://" + assignResult.Url + "/" + assignResult.Fid
- if lastModified != 0 {
- url = url + "?ts=" + strconv.FormatUint(lastModified, 10)
+ if pu.ModifiedTime != 0 {
+ url = url + "?ts=" + strconv.FormatUint(pu.ModifiedTime, 10)
}
debug("upload file to store", url)
- uploadResult, err := operation.Upload(url, fname, bytes.NewReader(data), isGzipped, mimeType, pairMap, assignResult.Auth)
+ uploadResult, err := operation.UploadData(url, pu.FileName, false, pu.Data, pu.IsGzipped, pu.MimeType, pu.PairMap, assignResult.Auth)
if err != nil {
writeJsonError(w, r, http.StatusInternalServerError, err)
return
}
- m["fileName"] = fname
+ m["fileName"] = pu.FileName
m["fid"] = assignResult.Fid
m["fileUrl"] = assignResult.PublicUrl + "/" + assignResult.Fid
- m["size"] = originalDataSize
+ m["size"] = pu.OriginalDataSize
m["eTag"] = uploadResult.ETag
writeJsonQuiet(w, r, http.StatusCreated, m)
return
@@ -184,19 +191,19 @@ func parseURLPath(path string) (vid, fid, filename, ext string, isVolumeIdOnly b
func statsHealthHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
- m["Version"] = util.VERSION
+ m["Version"] = util.Version()
writeJsonQuiet(w, r, http.StatusOK, m)
}
func statsCounterHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
- m["Version"] = util.VERSION
+ m["Version"] = util.Version()
m["Counters"] = serverStats
writeJsonQuiet(w, r, http.StatusOK, m)
}
func statsMemoryHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
- m["Version"] = util.VERSION
+ m["Version"] = util.Version()
m["Memory"] = stats.MemStat()
writeJsonQuiet(w, r, http.StatusOK, m)
}
@@ -210,3 +217,107 @@ func handleStaticResources2(r *mux.Router) {
r.Handle("/favicon.ico", http.FileServer(statikFS))
r.PathPrefix("/seaweedfsstatic/").Handler(http.StripPrefix("/seaweedfsstatic", http.FileServer(statikFS)))
}
+
+func adjustHeadersAfterHEAD(w http.ResponseWriter, r *http.Request, filename string) {
+ if filename != "" {
+ contentDisposition := "inline"
+ if r.FormValue("dl") != "" {
+ if dl, _ := strconv.ParseBool(r.FormValue("dl")); dl {
+ contentDisposition = "attachment"
+ }
+ }
+ w.Header().Set("Content-Disposition", contentDisposition+`; filename="`+fileNameEscaper.Replace(filename)+`"`)
+ }
+}
+
+func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) {
+ rangeReq := r.Header.Get("Range")
+
+ if rangeReq == "" {
+ w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
+ if err := writeFn(w, 0, totalSize); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ return
+ }
+
+ //the rest is dealing with partial content request
+ //mostly copy from src/pkg/net/http/fs.go
+ ranges, err := parseRange(rangeReq, totalSize)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
+ return
+ }
+ if sumRangesSize(ranges) > totalSize {
+ // The total number of bytes in all the ranges
+ // is larger than the size of the file by
+ // itself, so this is probably an attack, or a
+ // dumb client. Ignore the range request.
+ return
+ }
+ if len(ranges) == 0 {
+ return
+ }
+ if len(ranges) == 1 {
+ // RFC 2616, Section 14.16:
+ // "When an HTTP message includes the content of a single
+ // range (for example, a response to a request for a
+ // single range, or to a request for a set of ranges
+ // that overlap without any holes), this content is
+ // transmitted with a Content-Range header, and a
+ // Content-Length header showing the number of bytes
+ // actually transferred.
+ // ...
+ // A response to a request for a single range MUST NOT
+ // be sent using the multipart/byteranges media type."
+ ra := ranges[0]
+ w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10))
+ w.Header().Set("Content-Range", ra.contentRange(totalSize))
+ w.WriteHeader(http.StatusPartialContent)
+
+ err = writeFn(w, ra.start, ra.length)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ return
+ }
+
+ // process multiple ranges
+ for _, ra := range ranges {
+ if ra.start > totalSize {
+ http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable)
+ return
+ }
+ }
+ sendSize := rangesMIMESize(ranges, mimeType, totalSize)
+ pr, pw := io.Pipe()
+ mw := multipart.NewWriter(pw)
+ w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
+ sendContent := pr
+ defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish.
+ go func() {
+ for _, ra := range ranges {
+ part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize))
+ if e != nil {
+ pw.CloseWithError(e)
+ return
+ }
+ if e = writeFn(part, ra.start, ra.length); e != nil {
+ pw.CloseWithError(e)
+ return
+ }
+ }
+ mw.Close()
+ pw.Close()
+ }()
+ if w.Header().Get("Content-Encoding") == "" {
+ w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
+ }
+ w.WriteHeader(http.StatusPartialContent)
+ if _, err := io.CopyN(w, sendContent, sendSize); err != nil {
+ http.Error(w, "Internal Error", http.StatusInternalServerError)
+ return
+ }
+}
diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go
index 03954a58c..901f798f0 100644
--- a/weed/server/filer_grpc_server.go
+++ b/weed/server/filer_grpc_server.go
@@ -14,13 +14,16 @@ import (
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) {
- entry, err := fs.filer.FindEntry(ctx, filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name))))
- if err == filer2.ErrNotFound {
- return &filer_pb.LookupDirectoryEntryResponse{}, nil
+ glog.V(4).Infof("LookupDirectoryEntry %s", filepath.Join(req.Directory, req.Name))
+
+ entry, err := fs.filer.FindEntry(ctx, util.JoinPath(req.Directory, req.Name))
+ if err == filer_pb.ErrNotFound {
+ return &filer_pb.LookupDirectoryEntryResponse{}, err
}
if err != nil {
glog.V(3).Infof("LookupDirectoryEntry %s: %+v, ", filepath.Join(req.Directory, req.Name), err)
@@ -40,6 +43,8 @@ func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.L
func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream filer_pb.SeaweedFiler_ListEntriesServer) error {
+ glog.V(4).Infof("ListEntries %v", req)
+
limit := int(req.Limit)
if limit == 0 {
limit = fs.option.DirListingLimit
@@ -53,7 +58,8 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file
lastFileName := req.StartFromFileName
includeLastFile := req.InclusiveStartFrom
for limit > 0 {
- entries, err := fs.filer.ListDirectoryEntries(stream.Context(), filer2.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit)
+ entries, err := fs.filer.ListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit)
+
if err != nil {
return err
}
@@ -84,6 +90,7 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file
}); err != nil {
return err
}
+
limit--
if limit == 0 {
return nil
@@ -132,9 +139,10 @@ func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVol
func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) {
+ glog.V(4).Infof("CreateEntry %v", req)
+
resp = &filer_pb.CreateEntryResponse{}
- fullpath := filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name)))
chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks)
if req.Entry.Attributes == nil {
@@ -144,7 +152,7 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr
}
createErr := fs.filer.CreateEntry(ctx, &filer2.Entry{
- FullPath: fullpath,
+ FullPath: util.JoinPath(req.Directory, req.Entry.Name),
Attr: filer2.PbToEntryAttribute(req.Entry.Attributes),
Chunks: chunks,
}, req.OExcl)
@@ -161,8 +169,10 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr
func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntryRequest) (*filer_pb.UpdateEntryResponse, error) {
- fullpath := filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name))
- entry, err := fs.filer.FindEntry(ctx, filer2.FullPath(fullpath))
+ glog.V(4).Infof("UpdateEntry %v", req)
+
+ fullpath := util.Join(req.Directory, req.Entry.Name)
+ entry, err := fs.filer.FindEntry(ctx, util.FullPath(fullpath))
if err != nil {
return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("not found %s: %v", fullpath, err)
}
@@ -173,7 +183,7 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr
chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks)
newEntry := &filer2.Entry{
- FullPath: filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name))),
+ FullPath: util.JoinPath(req.Directory, req.Entry.Name),
Attr: entry.Attr,
Extended: req.Entry.Extended,
Chunks: chunks,
@@ -215,9 +225,50 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr
return &filer_pb.UpdateEntryResponse{}, err
}
+func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendToEntryRequest) (*filer_pb.AppendToEntryResponse, error) {
+
+ glog.V(4).Infof("AppendToEntry %v", req)
+
+ fullpath := util.NewFullPath(req.Directory, req.EntryName)
+ var offset int64 = 0
+ entry, err := fs.filer.FindEntry(ctx, util.FullPath(fullpath))
+ if err == filer_pb.ErrNotFound {
+ entry = &filer2.Entry{
+ FullPath: fullpath,
+ Attr: filer2.Attr{
+ Crtime: time.Now(),
+ Mtime: time.Now(),
+ Mode: os.FileMode(0644),
+ Uid: OS_UID,
+ Gid: OS_GID,
+ },
+ }
+ } else {
+ offset = int64(filer2.TotalSize(entry.Chunks))
+ }
+
+ for _, chunk := range req.Chunks {
+ chunk.Offset = offset
+ offset += int64(chunk.Size)
+ }
+
+ entry.Chunks = append(entry.Chunks, req.Chunks...)
+
+ err = fs.filer.CreateEntry(context.Background(), entry, false)
+
+ return &filer_pb.AppendToEntryResponse{}, err
+}
+
func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) {
- err = fs.filer.DeleteEntryMetaAndData(ctx, filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name))), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData)
- return &filer_pb.DeleteEntryResponse{}, err
+
+ glog.V(4).Infof("DeleteEntry %v", req)
+
+ err = fs.filer.DeleteEntryMetaAndData(ctx, util.JoinPath(req.Directory, req.Name), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData)
+ resp = &filer_pb.DeleteEntryResponse{}
+ if err != nil {
+ resp.Error = err.Error()
+ }
+ return resp, nil
}
func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVolumeRequest) (resp *filer_pb.AssignVolumeResponse, err error) {
@@ -226,6 +277,7 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol
if req.TtlSec > 0 {
ttlStr = strconv.Itoa(int(req.TtlSec))
}
+ collection, replication, _ := fs.detectCollection(req.ParentPath, req.Collection, req.Replication)
var altRequest *operation.VolumeAssignRequest
@@ -236,16 +288,16 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol
assignRequest := &operation.VolumeAssignRequest{
Count: uint64(req.Count),
- Replication: req.Replication,
- Collection: req.Collection,
+ Replication: replication,
+ Collection: collection,
Ttl: ttlStr,
DataCenter: dataCenter,
}
if dataCenter != "" {
altRequest = &operation.VolumeAssignRequest{
Count: uint64(req.Count),
- Replication: req.Replication,
- Collection: req.Collection,
+ Replication: replication,
+ Collection: collection,
Ttl: ttlStr,
DataCenter: "",
}
@@ -253,26 +305,30 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol
assignResult, err := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, assignRequest, altRequest)
if err != nil {
glog.V(3).Infof("AssignVolume: %v", err)
- return nil, fmt.Errorf("assign volume: %v", err)
+ return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume: %v", err)}, nil
}
if assignResult.Error != "" {
glog.V(3).Infof("AssignVolume error: %v", assignResult.Error)
- return nil, fmt.Errorf("assign volume result: %v", assignResult.Error)
+ return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume result: %v", assignResult.Error)}, nil
}
return &filer_pb.AssignVolumeResponse{
- FileId: assignResult.Fid,
- Count: int32(assignResult.Count),
- Url: assignResult.Url,
- PublicUrl: assignResult.PublicUrl,
- Auth: string(assignResult.Auth),
- }, err
+ FileId: assignResult.Fid,
+ Count: int32(assignResult.Count),
+ Url: assignResult.Url,
+ PublicUrl: assignResult.PublicUrl,
+ Auth: string(assignResult.Auth),
+ Collection: collection,
+ Replication: replication,
+ }, nil
}
func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.DeleteCollectionRequest) (resp *filer_pb.DeleteCollectionResponse, err error) {
- err = fs.filer.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error {
- _, err := client.CollectionDelete(ctx, &master_pb.CollectionDeleteRequest{
+ glog.V(4).Infof("DeleteCollection %v", req)
+
+ err = fs.filer.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ _, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{
Name: req.GetCollection(),
})
return err
@@ -283,13 +339,22 @@ func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.Delet
func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsRequest) (resp *filer_pb.StatisticsResponse, err error) {
- input := &master_pb.StatisticsRequest{
- Replication: req.Replication,
- Collection: req.Collection,
- Ttl: req.Ttl,
- }
+ var output *master_pb.StatisticsResponse
+
+ err = fs.filer.MasterClient.WithClient(func(masterClient master_pb.SeaweedClient) error {
+ grpcResponse, grpcErr := masterClient.Statistics(context.Background(), &master_pb.StatisticsRequest{
+ Replication: req.Replication,
+ Collection: req.Collection,
+ Ttl: req.Ttl,
+ })
+ if grpcErr != nil {
+ return grpcErr
+ }
+
+ output = grpcResponse
+ return nil
+ })
- output, err := operation.Statistics(fs.filer.GetMaster(), fs.grpcDialOption, input)
if err != nil {
return nil, err
}
@@ -303,10 +368,88 @@ func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsR
func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.GetFilerConfigurationRequest) (resp *filer_pb.GetFilerConfigurationResponse, err error) {
- return &filer_pb.GetFilerConfigurationResponse{
+ t := &filer_pb.GetFilerConfigurationResponse{
Masters: fs.option.Masters,
Collection: fs.option.Collection,
Replication: fs.option.DefaultReplication,
MaxMb: uint32(fs.option.MaxMB),
- }, nil
+ DirBuckets: fs.filer.DirBucketsPath,
+ Cipher: fs.filer.Cipher,
+ }
+
+ glog.V(4).Infof("GetFilerConfiguration: %v", t)
+
+ return t, nil
+}
+
+func (fs *FilerServer) KeepConnected(stream filer_pb.SeaweedFiler_KeepConnectedServer) error {
+
+ req, err := stream.Recv()
+ if err != nil {
+ return err
+ }
+
+ clientName := fmt.Sprintf("%s:%d", req.Name, req.GrpcPort)
+ m := make(map[string]bool)
+ for _, tp := range req.Resources {
+ m[tp] = true
+ }
+ fs.brokersLock.Lock()
+ fs.brokers[clientName] = m
+ glog.V(0).Infof("+ broker %v", clientName)
+ fs.brokersLock.Unlock()
+
+ defer func() {
+ fs.brokersLock.Lock()
+ delete(fs.brokers, clientName)
+ glog.V(0).Infof("- broker %v: %v", clientName, err)
+ fs.brokersLock.Unlock()
+ }()
+
+ for {
+ if err := stream.Send(&filer_pb.KeepConnectedResponse{}); err != nil {
+ glog.V(0).Infof("send broker %v: %+v", clientName, err)
+ return err
+ }
+ // println("replied")
+
+ if _, err := stream.Recv(); err != nil {
+ glog.V(0).Infof("recv broker %v: %v", clientName, err)
+ return err
+ }
+ // println("received")
+ }
+
+}
+
+func (fs *FilerServer) LocateBroker(ctx context.Context, req *filer_pb.LocateBrokerRequest) (resp *filer_pb.LocateBrokerResponse, err error) {
+
+ resp = &filer_pb.LocateBrokerResponse{}
+
+ fs.brokersLock.Lock()
+ defer fs.brokersLock.Unlock()
+
+ var localBrokers []*filer_pb.LocateBrokerResponse_Resource
+
+ for b, m := range fs.brokers {
+ if _, found := m[req.Resource]; found {
+ resp.Found = true
+ resp.Resources = []*filer_pb.LocateBrokerResponse_Resource{
+ {
+ GrpcAddresses: b,
+ ResourceCount: int32(len(m)),
+ },
+ }
+ return
+ }
+ localBrokers = append(localBrokers, &filer_pb.LocateBrokerResponse_Resource{
+ GrpcAddresses: b,
+ ResourceCount: int32(len(m)),
+ })
+ }
+
+ resp.Resources = localBrokers
+
+ return resp, nil
+
}
diff --git a/weed/server/filer_grpc_server_listen.go b/weed/server/filer_grpc_server_listen.go
new file mode 100644
index 000000000..848a1fc3a
--- /dev/null
+++ b/weed/server/filer_grpc_server_listen.go
@@ -0,0 +1,108 @@
+package weed_server
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeMetadataServer) error {
+
+ peerAddress := findClientAddress(stream.Context(), 0)
+
+ clientName := fs.addClient(req.ClientName, peerAddress)
+
+ defer fs.deleteClient(clientName)
+
+ lastReadTime := time.Unix(0, req.SinceNs)
+ glog.V(0).Infof(" %v starts to subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
+ var processedTsNs int64
+
+ eachEventNotificationFn := func(dirPath string, eventNotification *filer_pb.EventNotification, tsNs int64) error {
+
+ // get complete path to the file or directory
+ var entryName string
+ if eventNotification.OldEntry != nil {
+ entryName = eventNotification.OldEntry.Name
+ } else if eventNotification.NewEntry != nil {
+ entryName = eventNotification.NewEntry.Name
+ }
+
+ fullpath := util.Join(dirPath, entryName)
+
+ // skip on filer internal meta logs
+ if strings.HasPrefix(fullpath, filer2.SystemLogDir) {
+ return nil
+ }
+
+ if !strings.HasPrefix(fullpath, req.PathPrefix) {
+ return nil
+ }
+
+ message := &filer_pb.SubscribeMetadataResponse{
+ Directory: dirPath,
+ EventNotification: eventNotification,
+ TsNs: tsNs,
+ }
+ if err := stream.Send(message); err != nil {
+ glog.V(0).Infof("=> client %v: %+v", clientName, err)
+ return err
+ }
+ return nil
+ }
+
+ eachLogEntryFn := func(logEntry *filer_pb.LogEntry) error {
+ event := &filer_pb.SubscribeMetadataResponse{}
+ if err := proto.Unmarshal(logEntry.Data, event); err != nil {
+ glog.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
+ return fmt.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
+ }
+
+ if err := eachEventNotificationFn(event.Directory, event.EventNotification, event.TsNs); err != nil {
+ return err
+ }
+
+ processedTsNs = logEntry.TsNs
+
+ return nil
+ }
+
+ if err := fs.filer.ReadPersistedLogBuffer(lastReadTime, eachLogEntryFn); err != nil {
+ return fmt.Errorf("reading from persisted logs: %v", err)
+ }
+
+ if processedTsNs != 0 {
+ lastReadTime = time.Unix(0, processedTsNs)
+ }
+
+ err := fs.filer.MetaLogBuffer.LoopProcessLogData(lastReadTime, func() bool {
+ fs.listenersLock.Lock()
+ fs.listenersCond.Wait()
+ fs.listenersLock.Unlock()
+ return true
+ }, eachLogEntryFn)
+
+ return err
+
+}
+
+func (fs *FilerServer) addClient(clientType string, clientAddress string) (clientName string) {
+ clientName = clientType + "@" + clientAddress
+ glog.V(0).Infof("+ listener %v", clientName)
+ return
+}
+
+func (fs *FilerServer) deleteClient(clientName string) {
+ glog.V(0).Infof("- listener %v", clientName)
+}
+
+func (fs *FilerServer) notifyMetaListeners() {
+ fs.listenersCond.Broadcast()
+}
diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go
index 0669a26f1..71e2c7d17 100644
--- a/weed/server/filer_grpc_server_rename.go
+++ b/weed/server/filer_grpc_server_rename.go
@@ -3,10 +3,12 @@ package weed_server
import (
"context"
"fmt"
+ "path/filepath"
+
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- "path/filepath"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.AtomicRenameEntryRequest) (*filer_pb.AtomicRenameEntryResponse, error) {
@@ -18,7 +20,7 @@ func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.Atom
return nil, err
}
- oldParent := filer2.FullPath(filepath.ToSlash(req.OldDirectory))
+ oldParent := util.FullPath(filepath.ToSlash(req.OldDirectory))
oldEntry, err := fs.filer.FindEntry(ctx, oldParent.Child(req.OldName))
if err != nil {
@@ -27,7 +29,7 @@ func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.Atom
}
var events MoveEvents
- moveErr := fs.moveEntry(ctx, oldParent, oldEntry, filer2.FullPath(filepath.ToSlash(req.NewDirectory)), req.NewName, &events)
+ moveErr := fs.moveEntry(ctx, oldParent, oldEntry, util.FullPath(filepath.ToSlash(req.NewDirectory)), req.NewName, &events)
if moveErr != nil {
fs.filer.RollbackTransaction(ctx)
return nil, fmt.Errorf("%s/%s move error: %v", req.OldDirectory, req.OldName, err)
@@ -38,26 +40,26 @@ func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.Atom
}
}
- for _, entry := range events.newEntries {
- fs.filer.NotifyUpdateEvent(nil, entry, false)
- }
- for _, entry := range events.oldEntries {
- fs.filer.NotifyUpdateEvent(entry, nil, false)
- }
-
return &filer_pb.AtomicRenameEntryResponse{}, nil
}
-func (fs *FilerServer) moveEntry(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string, events *MoveEvents) error {
- if entry.IsDirectory() {
- if err := fs.moveFolderSubEntries(ctx, oldParent, entry, newParent, newName, events); err != nil {
- return err
+func (fs *FilerServer) moveEntry(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents) error {
+
+ if err := fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName, events, func() error {
+ if entry.IsDirectory() {
+ if err := fs.moveFolderSubEntries(ctx, oldParent, entry, newParent, newName, events); err != nil {
+ return err
+ }
}
+ return nil
+ }); err != nil {
+ return fmt.Errorf("fail to move %s => %s: %v", oldParent.Child(entry.Name()), newParent.Child(newName), err)
}
- return fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName, events)
+
+ return nil
}
-func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string, events *MoveEvents) error {
+func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents) error {
currentDirPath := oldParent.Child(entry.Name())
newDirPath := newParent.Child(newName)
@@ -90,7 +92,8 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer
return nil
}
-func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string, events *MoveEvents) error {
+func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents,
+ moveFolderSubEntries func() error) error {
oldPath, newPath := oldParent.Child(entry.Name()), newParent.Child(newName)
@@ -112,6 +115,14 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent filer2.FullP
return createErr
}
+ events.newEntries = append(events.newEntries, newEntry)
+
+ if moveFolderSubEntries != nil {
+ if moveChildrenErr := moveFolderSubEntries(); moveChildrenErr != nil {
+ return moveChildrenErr
+ }
+ }
+
// delete old entry
deleteErr := fs.filer.DeleteEntryMetaAndData(ctx, oldPath, false, false, false)
if deleteErr != nil {
@@ -119,7 +130,7 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent filer2.FullP
}
events.oldEntries = append(events.oldEntries, entry)
- events.newEntries = append(events.newEntries, newEntry)
+
return nil
}
diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go
index 72cca1f6f..10b607dfe 100644
--- a/weed/server/filer_server.go
+++ b/weed/server/filer_server.go
@@ -5,11 +5,15 @@ import (
"fmt"
"net/http"
"os"
+ "sync"
"time"
"google.golang.org/grpc"
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
+
"github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -19,10 +23,11 @@ import (
_ "github.com/chrislusf/seaweedfs/weed/filer2/etcd"
_ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb"
_ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb2"
+ _ "github.com/chrislusf/seaweedfs/weed/filer2/mongodb"
_ "github.com/chrislusf/seaweedfs/weed/filer2/mysql"
_ "github.com/chrislusf/seaweedfs/weed/filer2/postgres"
_ "github.com/chrislusf/seaweedfs/weed/filer2/redis"
- _ "github.com/chrislusf/seaweedfs/weed/filer2/tikv"
+ _ "github.com/chrislusf/seaweedfs/weed/filer2/redis2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/notification"
_ "github.com/chrislusf/seaweedfs/weed/notification/aws_sqs"
@@ -37,15 +42,16 @@ type FilerOption struct {
Masters []string
Collection string
DefaultReplication string
- RedirectOnRead bool
DisableDirListing bool
MaxMB int
DirListingLimit int
DataCenter string
DefaultLevelDbDir string
DisableHttp bool
- Port int
+ Host string
+ Port uint32
recursiveDelete bool
+ Cipher bool
}
type FilerServer struct {
@@ -53,6 +59,13 @@ type FilerServer struct {
secret security.SigningKey
filer *filer2.Filer
grpcDialOption grpc.DialOption
+
+ // notifying clients
+ listenersLock sync.Mutex
+ listenersCond *sync.Cond
+
+ brokers map[string]map[string]bool
+ brokersLock sync.Mutex
}
func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) (fs *FilerServer, err error) {
@@ -60,13 +73,18 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
fs = &FilerServer{
option: option,
grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"),
+ brokers: make(map[string]map[string]bool),
}
+ fs.listenersCond = sync.NewCond(&fs.listenersLock)
if len(option.Masters) == 0 {
glog.Fatal("master list is required!")
}
- fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption)
+ fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption, option.Host, option.Port, option.Collection, option.DefaultReplication, fs.notifyMetaListeners)
+ fs.filer.Cipher = option.Cipher
+
+ maybeStartMetrics(fs, option)
go fs.filer.KeepConnectedToMaster()
@@ -82,6 +100,9 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
util.LoadConfiguration("notification", false)
fs.option.recursiveDelete = v.GetBool("filer.options.recursive_delete")
+ v.SetDefault("filer.options.buckets_folder", "/buckets")
+ fs.filer.DirBucketsPath = v.GetString("filer.options.buckets_folder")
+ fs.filer.FsyncBuckets = v.GetStringSlice("filer.options.buckets_fsync")
fs.filer.LoadConfiguration(v)
notification.LoadConfiguration(v, "notification.")
@@ -94,22 +115,36 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
readonlyMux.HandleFunc("/", fs.readonlyFilerHandler)
}
- maybeStartMetrics(fs, option)
+ fs.filer.LoadBuckets()
+
+ grace.OnInterrupt(func() {
+ fs.filer.Shutdown()
+ })
return fs, nil
}
func maybeStartMetrics(fs *FilerServer, option *FilerOption) {
+
+ for _, master := range option.Masters {
+ _, err := pb.ParseFilerGrpcAddress(master)
+ if err != nil {
+ glog.Fatalf("invalid master address %s: %v", master, err)
+ }
+ }
+
isConnected := false
var metricsAddress string
var metricsIntervalSec int
var readErr error
for !isConnected {
- metricsAddress, metricsIntervalSec, readErr = readFilerConfiguration(fs.grpcDialOption, option.Masters[0])
- if readErr == nil {
- isConnected = true
- } else {
- time.Sleep(7 * time.Second)
+ for _, master := range option.Masters {
+ metricsAddress, metricsIntervalSec, readErr = readFilerConfiguration(fs.grpcDialOption, master)
+ if readErr == nil {
+ isConnected = true
+ } else {
+ time.Sleep(7 * time.Second)
+ }
}
}
if metricsAddress == "" && metricsIntervalSec <= 0 {
@@ -121,11 +156,11 @@ func maybeStartMetrics(fs *FilerServer, option *FilerOption) {
})
}
-func readFilerConfiguration(grpcDialOption grpc.DialOption, masterGrpcAddress string) (metricsAddress string, metricsIntervalSec int, err error) {
- err = operation.WithMasterServerClient(masterGrpcAddress, grpcDialOption, func(ctx context.Context, masterClient master_pb.SeaweedClient) error {
- resp, err := masterClient.GetMasterConfiguration(ctx, &master_pb.GetMasterConfigurationRequest{})
+func readFilerConfiguration(grpcDialOption grpc.DialOption, masterAddress string) (metricsAddress string, metricsIntervalSec int, err error) {
+ err = operation.WithMasterServerClient(masterAddress, grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
+ resp, err := masterClient.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
if err != nil {
- return fmt.Errorf("get master %s configuration: %v", masterGrpcAddress, err)
+ return fmt.Errorf("get master %s configuration: %v", masterAddress, err)
}
metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSeconds)
return nil
diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go
index ba21298ba..76c924df1 100644
--- a/weed/server/filer_server_handlers_read.go
+++ b/weed/server/filer_server_handlers_read.go
@@ -1,19 +1,20 @@
package weed_server
import (
+ "bytes"
"context"
"io"
- "io/ioutil"
"mime"
- "mime/multipart"
"net/http"
- "net/url"
- "path"
+ "path/filepath"
"strconv"
"strings"
+ "time"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/images"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -26,13 +27,13 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
path = path[:len(path)-1]
}
- entry, err := fs.filer.FindEntry(context.Background(), filer2.FullPath(path))
+ entry, err := fs.filer.FindEntry(context.Background(), util.FullPath(path))
if err != nil {
if path == "/" {
fs.listDirectoryHandler(w, r)
return
}
- if err == filer2.ErrNotFound {
+ if err == filer_pb.ErrNotFound {
glog.V(1).Infof("Not found %s: %v", path, err)
stats.FilerRequestCounter.WithLabelValues("read.notfound").Inc()
w.WriteHeader(http.StatusNotFound)
@@ -66,188 +67,68 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
}
w.Header().Set("Accept-Ranges", "bytes")
- if r.Method == "HEAD" {
- w.Header().Set("Content-Length", strconv.FormatInt(int64(filer2.TotalSize(entry.Chunks)), 10))
- w.Header().Set("Last-Modified", entry.Attr.Mtime.Format(http.TimeFormat))
- setEtag(w, filer2.ETag(entry.Chunks))
- return
- }
-
- if len(entry.Chunks) == 1 {
- fs.handleSingleChunk(w, r, entry)
- return
- }
-
- fs.handleMultipleChunks(w, r, entry)
-
-}
-
-func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) {
-
- fileId := entry.Chunks[0].GetFileIdString()
-
- urlString, err := fs.filer.MasterClient.LookupFileId(fileId)
- if err != nil {
- glog.V(1).Infof("operation LookupFileId %s failed, err: %v", fileId, err)
- w.WriteHeader(http.StatusNotFound)
- return
- }
-
- if fs.option.RedirectOnRead {
- stats.FilerRequestCounter.WithLabelValues("redirect").Inc()
- http.Redirect(w, r, urlString, http.StatusFound)
- return
- }
-
- u, _ := url.Parse(urlString)
- q := u.Query()
- for key, values := range r.URL.Query() {
- for _, value := range values {
- q.Add(key, value)
- }
- }
- u.RawQuery = q.Encode()
- request := &http.Request{
- Method: r.Method,
- URL: u,
- Proto: r.Proto,
- ProtoMajor: r.ProtoMajor,
- ProtoMinor: r.ProtoMinor,
- Header: r.Header,
- Body: r.Body,
- Host: r.Host,
- ContentLength: r.ContentLength,
- }
- glog.V(3).Infoln("retrieving from", u)
- resp, do_err := util.Do(request)
- if do_err != nil {
- glog.V(0).Infoln("failing to connect to volume server", do_err.Error())
- writeJsonError(w, r, http.StatusInternalServerError, do_err)
- return
- }
- defer func() {
- io.Copy(ioutil.Discard, resp.Body)
- resp.Body.Close()
- }()
- for k, v := range resp.Header {
- w.Header()[k] = v
- }
- if entry.Attr.Mime != "" {
- w.Header().Set("Content-Type", entry.Attr.Mime)
- }
- w.WriteHeader(resp.StatusCode)
- io.Copy(w, resp.Body)
-}
-
-func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) {
+ w.Header().Set("Last-Modified", entry.Attr.Mtime.Format(http.TimeFormat))
+ // mime type
mimeType := entry.Attr.Mime
if mimeType == "" {
- if ext := path.Ext(entry.Name()); ext != "" {
+ if ext := filepath.Ext(entry.Name()); ext != "" {
mimeType = mime.TypeByExtension(ext)
}
}
if mimeType != "" {
w.Header().Set("Content-Type", mimeType)
}
- setEtag(w, filer2.ETag(entry.Chunks))
+
+ // if modified since
+ if !entry.Attr.Mtime.IsZero() {
+ w.Header().Set("Last-Modified", entry.Attr.Mtime.UTC().Format(http.TimeFormat))
+ if r.Header.Get("If-Modified-Since") != "" {
+ if t, parseError := time.Parse(http.TimeFormat, r.Header.Get("If-Modified-Since")); parseError == nil {
+ if t.After(entry.Attr.Mtime) {
+ w.WriteHeader(http.StatusNotModified)
+ return
+ }
+ }
+ }
+ }
+
+ // set etag
+ etag := filer2.ETagEntry(entry)
+ if inm := r.Header.Get("If-None-Match"); inm == "\""+etag+"\"" {
+ w.WriteHeader(http.StatusNotModified)
+ return
+ }
+ setEtag(w, etag)
+
+ if r.Method == "HEAD" {
+ w.Header().Set("Content-Length", strconv.FormatInt(int64(filer2.TotalSize(entry.Chunks)), 10))
+ return
+ }
+
+ filename := entry.Name()
+ adjustHeadersAfterHEAD(w, r, filename)
totalSize := int64(filer2.TotalSize(entry.Chunks))
- rangeReq := r.Header.Get("Range")
-
- if rangeReq == "" {
- w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
- if err := fs.writeContent(w, entry, 0, int(totalSize)); err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
- return
- }
-
- //the rest is dealing with partial content request
- //mostly copy from src/pkg/net/http/fs.go
- ranges, err := parseRange(rangeReq, totalSize)
- if err != nil {
- http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
- return
- }
- if sumRangesSize(ranges) > totalSize {
- // The total number of bytes in all the ranges
- // is larger than the size of the file by
- // itself, so this is probably an attack, or a
- // dumb client. Ignore the range request.
- return
- }
- if len(ranges) == 0 {
- return
- }
- if len(ranges) == 1 {
- // RFC 2616, Section 14.16:
- // "When an HTTP message includes the content of a single
- // range (for example, a response to a request for a
- // single range, or to a request for a set of ranges
- // that overlap without any holes), this content is
- // transmitted with a Content-Range header, and a
- // Content-Length header showing the number of bytes
- // actually transferred.
- // ...
- // A response to a request for a single range MUST NOT
- // be sent using the multipart/byteranges media type."
- ra := ranges[0]
- w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10))
- w.Header().Set("Content-Range", ra.contentRange(totalSize))
- w.WriteHeader(http.StatusPartialContent)
-
- err = fs.writeContent(w, entry, ra.start, int(ra.length))
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
- return
- }
-
- // process multiple ranges
- for _, ra := range ranges {
- if ra.start > totalSize {
- http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable)
- return
- }
- }
- sendSize := rangesMIMESize(ranges, mimeType, totalSize)
- pr, pw := io.Pipe()
- mw := multipart.NewWriter(pw)
- w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
- sendContent := pr
- defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish.
- go func() {
- for _, ra := range ranges {
- part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize))
- if e != nil {
- pw.CloseWithError(e)
- return
- }
- if e = fs.writeContent(part, entry, ra.start, int(ra.length)); e != nil {
- pw.CloseWithError(e)
+ if rangeReq := r.Header.Get("Range"); rangeReq == "" {
+ ext := filepath.Ext(filename)
+ width, height, mode, shouldResize := shouldResizeImages(ext, r)
+ if shouldResize {
+ data, err := filer2.ReadAll(fs.filer.MasterClient, entry.Chunks)
+ if err != nil {
+ glog.Errorf("failed to read %s: %v", path, err)
+ w.WriteHeader(http.StatusNotModified)
return
}
+ rs, _, _ := images.Resized(ext, bytes.NewReader(data), width, height, mode)
+ io.Copy(w, rs)
+ return
}
- mw.Close()
- pw.Close()
- }()
- if w.Header().Get("Content-Encoding") == "" {
- w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
- }
- w.WriteHeader(http.StatusPartialContent)
- if _, err := io.CopyN(w, sendContent, sendSize); err != nil {
- http.Error(w, "Internal Error", http.StatusInternalServerError)
- return
}
-}
-
-func (fs *FilerServer) writeContent(w io.Writer, entry *filer2.Entry, offset int64, size int) error {
-
- return filer2.StreamContent(fs.filer.MasterClient, w, entry.Chunks, offset, size)
+ processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
+ return filer2.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size)
+ })
}
diff --git a/weed/server/filer_server_handlers_read_dir.go b/weed/server/filer_server_handlers_read_dir.go
index 87e864559..ae28fc1db 100644
--- a/weed/server/filer_server_handlers_read_dir.go
+++ b/weed/server/filer_server_handlers_read_dir.go
@@ -6,10 +6,10 @@ import (
"strconv"
"strings"
- "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
ui "github.com/chrislusf/seaweedfs/weed/server/filer_ui"
"github.com/chrislusf/seaweedfs/weed/stats"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
// listDirectoryHandler lists directories and folers under a directory
@@ -32,7 +32,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque
lastFileName := r.FormValue("lastFileName")
- entries, err := fs.filer.ListDirectoryEntries(context.Background(), filer2.FullPath(path), lastFileName, false, limit)
+ entries, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, limit)
if err != nil {
glog.V(0).Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err)
diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go
index 4707f1011..74a558e22 100644
--- a/weed/server/filer_server_handlers_write.go
+++ b/weed/server/filer_server_handlers_write.go
@@ -2,6 +2,7 @@ package weed_server
import (
"context"
+ "crypto/md5"
"encoding/json"
"errors"
"fmt"
@@ -22,6 +23,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/stats"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -38,7 +40,7 @@ type FilerPostResult struct {
Url string `json:"url,omitempty"`
}
-func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection string, dataCenter string) (fileId, urlLocation string, auth security.EncodedJwt, err error) {
+func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection, dataCenter, ttlString string, fsync bool) (fileId, urlLocation string, auth security.EncodedJwt, err error) {
stats.FilerRequestCounter.WithLabelValues("assign").Inc()
start := time.Now()
@@ -48,7 +50,7 @@ func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request,
Count: 1,
Replication: replication,
Collection: collection,
- Ttl: r.URL.Query().Get("ttl"),
+ Ttl: ttlString,
DataCenter: dataCenter,
}
var altRequest *operation.VolumeAssignRequest
@@ -57,7 +59,7 @@ func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request,
Count: 1,
Replication: replication,
Collection: collection,
- Ttl: r.URL.Query().Get("ttl"),
+ Ttl: ttlString,
DataCenter: "",
}
}
@@ -71,6 +73,9 @@ func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request,
}
fileId = assignResult.Fid
urlLocation = "http://" + assignResult.Url + "/" + assignResult.Fid
+ if fsync {
+ urlLocation += "?fsync=true"
+ }
auth = assignResult.Auth
return
}
@@ -80,50 +85,52 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
query := r.URL.Query()
- replication := query.Get("replication")
- if replication == "" {
- replication = fs.option.DefaultReplication
- }
- collection := query.Get("collection")
- if collection == "" {
- collection = fs.option.Collection
- }
+ collection, replication, fsync := fs.detectCollection(r.RequestURI, query.Get("collection"), query.Get("replication"))
dataCenter := query.Get("dataCenter")
if dataCenter == "" {
dataCenter = fs.option.DataCenter
}
+ ttlString := r.URL.Query().Get("ttl")
- if autoChunked := fs.autoChunk(ctx, w, r, replication, collection, dataCenter); autoChunked {
+ // read ttl in seconds
+ ttl, err := needle.ReadTTL(ttlString)
+ ttlSeconds := int32(0)
+ if err == nil {
+ ttlSeconds = int32(ttl.Minutes()) * 60
+ }
+
+ if autoChunked := fs.autoChunk(ctx, w, r, replication, collection, dataCenter, ttlSeconds, ttlString, fsync); autoChunked {
return
}
- fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter)
+ if fs.option.Cipher {
+ reply, err := fs.encrypt(ctx, w, r, replication, collection, dataCenter, ttlSeconds, ttlString, fsync)
+ if err != nil {
+ writeJsonError(w, r, http.StatusInternalServerError, err)
+ } else if reply != nil {
+ writeJsonQuiet(w, r, http.StatusCreated, reply)
+ }
+
+ return
+ }
+
+ fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString, fsync)
if err != nil || fileId == "" || urlLocation == "" {
glog.V(0).Infof("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter)
+ writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter))
return
}
glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation)
u, _ := url.Parse(urlLocation)
-
- // This allows a client to generate a chunk manifest and submit it to the filer -- it is a little off
- // because they need to provide FIDs instead of file paths...
- cm, _ := strconv.ParseBool(query.Get("cm"))
- if cm {
- q := u.Query()
- q.Set("cm", "true")
- u.RawQuery = q.Encode()
- }
- glog.V(4).Infoln("post to", u)
-
- ret, err := fs.uploadToVolumeServer(r, u, auth, w, fileId)
+ ret, md5value, err := fs.uploadToVolumeServer(r, u, auth, w, fileId)
if err != nil {
return
}
- if err = fs.updateFilerStore(ctx, r, w, replication, collection, ret, fileId); err != nil {
+ if err = fs.updateFilerStore(ctx, r, w, replication, collection, ret, md5value, fileId, ttlSeconds); err != nil {
return
}
@@ -140,8 +147,8 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
}
// update metadata in filer store
-func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w http.ResponseWriter,
- replication string, collection string, ret operation.UploadResult, fileId string) (err error) {
+func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w http.ResponseWriter, replication string,
+ collection string, ret *operation.UploadResult, md5value []byte, fileId string, ttlSeconds int32) (err error) {
stats.FilerRequestCounter.WithLabelValues("postStoreWrite").Inc()
start := time.Now()
@@ -165,13 +172,13 @@ func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w
path += ret.Name
}
}
- existingEntry, err := fs.filer.FindEntry(ctx, filer2.FullPath(path))
+ existingEntry, err := fs.filer.FindEntry(ctx, util.FullPath(path))
crTime := time.Now()
if err == nil && existingEntry != nil {
crTime = existingEntry.Crtime
}
entry := &filer2.Entry{
- FullPath: filer2.FullPath(path),
+ FullPath: util.FullPath(path),
Attr: filer2.Attr{
Mtime: time.Now(),
Crtime: crTime,
@@ -180,7 +187,9 @@ func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w
Gid: OS_GID,
Replication: replication,
Collection: collection,
- TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)),
+ TtlSec: ttlSeconds,
+ Mime: ret.Mime,
+ Md5: md5value,
},
Chunks: []*filer_pb.FileChunk{{
FileId: fileId,
@@ -189,8 +198,10 @@ func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w
ETag: ret.ETag,
}},
}
- if ext := filenamePath.Ext(path); ext != "" {
- entry.Attr.Mime = mime.TypeByExtension(ext)
+ if entry.Attr.Mime == "" {
+ if ext := filenamePath.Ext(path); ext != "" {
+ entry.Attr.Mime = mime.TypeByExtension(ext)
+ }
}
// glog.V(4).Infof("saving %s => %+v", path, entry)
if dbErr := fs.filer.CreateEntry(ctx, entry, false); dbErr != nil {
@@ -205,12 +216,21 @@ func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w
}
// send request to volume server
-func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth security.EncodedJwt, w http.ResponseWriter, fileId string) (ret operation.UploadResult, err error) {
+func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth security.EncodedJwt, w http.ResponseWriter, fileId string) (ret *operation.UploadResult, md5value []byte, err error) {
stats.FilerRequestCounter.WithLabelValues("postUpload").Inc()
start := time.Now()
defer func() { stats.FilerRequestHistogram.WithLabelValues("postUpload").Observe(time.Since(start).Seconds()) }()
+ ret = &operation.UploadResult{}
+
+ md5Hash := md5.New()
+ body := r.Body
+ if r.Method == "PUT" {
+ // only PUT or large chunked files has Md5 in attributes
+ body = ioutil.NopCloser(io.TeeReader(r.Body, md5Hash))
+ }
+
request := &http.Request{
Method: r.Method,
URL: u,
@@ -218,10 +238,11 @@ func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth se
ProtoMajor: r.ProtoMajor,
ProtoMinor: r.ProtoMinor,
Header: r.Header,
- Body: r.Body,
+ Body: body,
Host: r.Host,
ContentLength: r.ContentLength,
}
+
if auth != "" {
request.Header.Set("Authorization", "BEARER "+string(auth))
}
@@ -236,7 +257,7 @@ func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth se
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}()
- etag := resp.Header.Get("ETag")
+
respBody, raErr := ioutil.ReadAll(resp.Body)
if raErr != nil {
glog.V(0).Infoln("failing to upload to volume server", r.RequestURI, raErr.Error())
@@ -244,6 +265,7 @@ func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth se
err = raErr
return
}
+
glog.V(4).Infoln("post result", string(respBody))
unmarshalErr := json.Unmarshal(respBody, &ret)
if unmarshalErr != nil {
@@ -271,9 +293,11 @@ func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth se
return
}
}
- if etag != "" {
- ret.ETag = etag
+ // use filer calculated md5 ETag, instead of the volume server crc ETag
+ if r.Method == "PUT" {
+ md5value = md5Hash.Sum(nil)
}
+ ret.ETag = getEtag(resp)
return
}
@@ -292,11 +316,11 @@ func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
ignoreRecursiveError := r.FormValue("ignoreRecursiveError") == "true"
skipChunkDeletion := r.FormValue("skipChunkDeletion") == "true"
- err := fs.filer.DeleteEntryMetaAndData(context.Background(), filer2.FullPath(r.URL.Path), isRecursive, ignoreRecursiveError, !skipChunkDeletion)
+ err := fs.filer.DeleteEntryMetaAndData(context.Background(), util.FullPath(r.URL.Path), isRecursive, ignoreRecursiveError, !skipChunkDeletion)
if err != nil {
glog.V(1).Infoln("deleting", r.URL.Path, ":", err.Error())
httpStatus := http.StatusInternalServerError
- if err == filer2.ErrNotFound {
+ if err == filer_pb.ErrNotFound {
httpStatus = http.StatusNotFound
}
writeJsonError(w, r, httpStatus, err)
@@ -305,3 +329,32 @@ func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNoContent)
}
+
+func (fs *FilerServer) detectCollection(requestURI, qCollection, qReplication string) (collection, replication string, fsync bool) {
+ // default
+ collection = fs.option.Collection
+ replication = fs.option.DefaultReplication
+
+ // get default collection settings
+ if qCollection != "" {
+ collection = qCollection
+ }
+ if qReplication != "" {
+ replication = qReplication
+ }
+
+ // required by buckets folder
+ if strings.HasPrefix(requestURI, fs.filer.DirBucketsPath+"/") {
+ bucketAndObjectKey := requestURI[len(fs.filer.DirBucketsPath)+1:]
+ t := strings.Index(bucketAndObjectKey, "/")
+ if t < 0 {
+ collection = bucketAndObjectKey
+ }
+ if t > 0 {
+ collection = bucketAndObjectKey[:t]
+ }
+ replication, fsync = fs.filer.ReadBucketOption(collection)
+ }
+
+ return
+}
diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go
index 25c0a4b4d..532693742 100644
--- a/weed/server/filer_server_handlers_write_autochunk.go
+++ b/weed/server/filer_server_handlers_write_autochunk.go
@@ -2,7 +2,9 @@ package weed_server
import (
"context"
+ "crypto/md5"
"io"
+ "io/ioutil"
"net/http"
"path"
"strconv"
@@ -19,7 +21,7 @@ import (
)
func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request,
- replication string, collection string, dataCenter string) bool {
+ replication string, collection string, dataCenter string, ttlSec int32, ttlString string, fsync bool) bool {
if r.Method != "POST" {
glog.V(4).Infoln("AutoChunking not supported for method", r.Method)
return false
@@ -55,7 +57,7 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *
return false
}
- reply, err := fs.doAutoChunk(ctx, w, r, contentLength, chunkSize, replication, collection, dataCenter)
+ reply, err := fs.doAutoChunk(ctx, w, r, contentLength, chunkSize, replication, collection, dataCenter, ttlSec, ttlString, fsync)
if err != nil {
writeJsonError(w, r, http.StatusInternalServerError, err)
} else if reply != nil {
@@ -65,7 +67,7 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *
}
func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request,
- contentLength int64, chunkSize int32, replication string, collection string, dataCenter string) (filerResult *FilerPostResult, replyerr error) {
+ contentLength int64, chunkSize int32, replication string, collection string, dataCenter string, ttlSec int32, ttlString string, fsync bool) (filerResult *FilerPostResult, replyerr error) {
stats.FilerRequestCounter.WithLabelValues("postAutoChunk").Inc()
start := time.Now()
@@ -87,50 +89,47 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r
if fileName != "" {
fileName = path.Base(fileName)
}
+ contentType := part1.Header.Get("Content-Type")
var fileChunks []*filer_pb.FileChunk
+ md5Hash := md5.New()
+ var partReader = ioutil.NopCloser(io.TeeReader(part1, md5Hash))
+
chunkOffset := int64(0)
for chunkOffset < contentLength {
- limitedReader := io.LimitReader(part1, int64(chunkSize))
+ limitedReader := io.LimitReader(partReader, int64(chunkSize))
// assign one file id for one chunk
- fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter)
+ fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString, fsync)
if assignErr != nil {
return nil, assignErr
}
// upload the chunk to the volume server
- chunkName := fileName + "_chunk_" + strconv.FormatInt(int64(len(fileChunks)+1), 10)
- uploadedSize, uploadErr := fs.doUpload(urlLocation, w, r, limitedReader, chunkName, "", fileId, auth)
+ uploadResult, uploadErr := fs.doUpload(urlLocation, w, r, limitedReader, fileName, contentType, nil, auth)
if uploadErr != nil {
return nil, uploadErr
}
// if last chunk exhausted the reader exactly at the border
- if uploadedSize == 0 {
+ if uploadResult.Size == 0 {
break
}
// Save to chunk manifest structure
- fileChunks = append(fileChunks,
- &filer_pb.FileChunk{
- FileId: fileId,
- Offset: chunkOffset,
- Size: uint64(uploadedSize),
- Mtime: time.Now().UnixNano(),
- },
- )
+ fileChunks = append(fileChunks, uploadResult.ToPbFileChunk(fileId, chunkOffset))
- glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d) of %d", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadedSize), contentLength)
+ glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d) of %d", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadResult.Size), contentLength)
+
+ // reset variables for the next chunk
+ chunkOffset = chunkOffset + int64(uploadResult.Size)
// if last chunk was not at full chunk size, but already exhausted the reader
- if uploadedSize < int64(chunkSize) {
+ if int64(uploadResult.Size) < int64(chunkSize) {
break
}
- // reset variables for the next chunk
- chunkOffset = chunkOffset + int64(uploadedSize)
}
path := r.URL.Path
@@ -142,7 +141,7 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r
glog.V(4).Infoln("saving", path)
entry := &filer2.Entry{
- FullPath: filer2.FullPath(path),
+ FullPath: util.FullPath(path),
Attr: filer2.Attr{
Mtime: time.Now(),
Crtime: time.Now(),
@@ -151,7 +150,9 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r
Gid: OS_GID,
Replication: replication,
Collection: collection,
- TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)),
+ TtlSec: ttlSec,
+ Mime: contentType,
+ Md5: md5Hash.Sum(nil),
},
Chunks: fileChunks,
}
@@ -172,8 +173,7 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r
return
}
-func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request,
- limitedReader io.Reader, fileName string, contentType string, fileId string, auth security.EncodedJwt) (size int64, err error) {
+func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt) (*operation.UploadResult, error) {
stats.FilerRequestCounter.WithLabelValues("postAutoChunkUpload").Inc()
start := time.Now()
@@ -181,9 +181,6 @@ func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *ht
stats.FilerRequestHistogram.WithLabelValues("postAutoChunkUpload").Observe(time.Since(start).Seconds())
}()
- uploadResult, uploadError := operation.Upload(urlLocation, fileName, limitedReader, false, contentType, nil, auth)
- if uploadError != nil {
- return 0, uploadError
- }
- return int64(uploadResult.Size), nil
+ uploadResult, err, _ := operation.Upload(urlLocation, fileName, fs.option.Cipher, limitedReader, false, contentType, pairMap, auth)
+ return uploadResult, err
}
diff --git a/weed/server/filer_server_handlers_write_cipher.go b/weed/server/filer_server_handlers_write_cipher.go
new file mode 100644
index 000000000..bea72b2c1
--- /dev/null
+++ b/weed/server/filer_server_handlers_write_cipher.go
@@ -0,0 +1,89 @@
+package weed_server
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+// handling single chunk POST or PUT upload
+func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *http.Request,
+ replication string, collection string, dataCenter string, ttlSeconds int32, ttlString string, fsync bool) (filerResult *FilerPostResult, err error) {
+
+ fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString, fsync)
+
+ if err != nil || fileId == "" || urlLocation == "" {
+ return nil, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter)
+ }
+
+ glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation)
+
+ // Note: encrypt(gzip(data)), encrypt data first, then gzip
+
+ sizeLimit := int64(fs.option.MaxMB) * 1024 * 1024
+
+ pu, err := needle.ParseUpload(r, sizeLimit)
+ uncompressedData := pu.Data
+ if pu.IsGzipped {
+ uncompressedData = pu.UncompressedData
+ }
+ if pu.MimeType == "" {
+ pu.MimeType = http.DetectContentType(uncompressedData)
+ }
+
+ uploadResult, uploadError := operation.UploadData(urlLocation, pu.FileName, true, uncompressedData, false, pu.MimeType, pu.PairMap, auth)
+ if uploadError != nil {
+ return nil, fmt.Errorf("upload to volume server: %v", uploadError)
+ }
+
+ // Save to chunk manifest structure
+ fileChunks := []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(fileId, 0)}
+
+ // fmt.Printf("uploaded: %+v\n", uploadResult)
+
+ path := r.URL.Path
+ if strings.HasSuffix(path, "/") {
+ if pu.FileName != "" {
+ path += pu.FileName
+ }
+ }
+
+ entry := &filer2.Entry{
+ FullPath: util.FullPath(path),
+ Attr: filer2.Attr{
+ Mtime: time.Now(),
+ Crtime: time.Now(),
+ Mode: 0660,
+ Uid: OS_UID,
+ Gid: OS_GID,
+ Replication: replication,
+ Collection: collection,
+ TtlSec: ttlSeconds,
+ Mime: pu.MimeType,
+ },
+ Chunks: fileChunks,
+ }
+
+ filerResult = &FilerPostResult{
+ Name: pu.FileName,
+ Size: int64(pu.OriginalDataSize),
+ }
+
+ if dbErr := fs.filer.CreateEntry(ctx, entry, false); dbErr != nil {
+ fs.filer.DeleteChunks(entry.Chunks)
+ err = dbErr
+ filerResult.Error = dbErr.Error()
+ return
+ }
+
+ return
+}
diff --git a/weed/server/filer_ui/breadcrumb.go b/weed/server/filer_ui/breadcrumb.go
index 2f0df7f91..f21cce7d1 100644
--- a/weed/server/filer_ui/breadcrumb.go
+++ b/weed/server/filer_ui/breadcrumb.go
@@ -1,8 +1,9 @@
package master_ui
import (
- "path/filepath"
"strings"
+
+ "github.com/chrislusf/seaweedfs/weed/util"
)
type Breadcrumb struct {
@@ -16,7 +17,7 @@ func ToBreadcrumb(fullpath string) (crumbs []Breadcrumb) {
for i := 0; i < len(parts); i++ {
crumb := Breadcrumb{
Name: parts[i] + " /",
- Link: "/" + filepath.ToSlash(filepath.Join(parts[0:i+1]...)),
+ Link: "/" + util.Join(parts[0:i+1]...),
}
if !strings.HasSuffix(crumb.Link, "/") {
crumb.Link += "/"
diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go
index fcfd98f7b..1ee214deb 100644
--- a/weed/server/master_grpc_server.go
+++ b/weed/server/master_grpc_server.go
@@ -1,18 +1,20 @@
package weed_server
import (
+ "context"
"fmt"
"net"
"strings"
"time"
"github.com/chrislusf/raft"
+ "google.golang.org/grpc/peer"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/topology"
- "google.golang.org/grpc/peer"
)
func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServer) error {
@@ -22,8 +24,10 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
defer func() {
if dn != nil {
- glog.V(0).Infof("unregister disconnected volume server %s:%d", dn.Ip, dn.Port)
+ // if the volume server disconnects and reconnects quickly
+ // the unregister and register can race with each other
t.UnRegisterDataNode(dn)
+ glog.V(0).Infof("unregister disconnected volume server %s:%d", dn.Ip, dn.Port)
message := &master_pb.VolumeLocation{
Url: dn.Url(),
@@ -61,14 +65,6 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
t.Sequence.SetMax(heartbeat.MaxFileKey)
if dn == nil {
- if heartbeat.Ip == "" {
- if pr, ok := peer.FromContext(stream.Context()); ok {
- if pr.Addr != net.Addr(nil) {
- heartbeat.Ip = pr.Addr.String()[0:strings.LastIndex(pr.Addr.String(), ":")]
- glog.V(0).Infof("remote IP address is detected as %v", heartbeat.Ip)
- }
- }
- }
dcName, rackName := t.Configuration.Locate(heartbeat.Ip, heartbeat.DataCenter, heartbeat.Rack)
dc := t.GetOrCreateDataCenter(dcName)
rack := dc.GetOrCreateRack(rackName)
@@ -87,6 +83,11 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
}
}
+ if heartbeat.MaxVolumeCount != 0 && dn.GetMaxVolumeCount() != int64(heartbeat.MaxVolumeCount) {
+ delta := int64(heartbeat.MaxVolumeCount) - dn.GetMaxVolumeCount()
+ dn.UpAdjustMaxVolumeCountDelta(delta)
+ }
+
glog.V(4).Infof("master received heartbeat %s", heartbeat.String())
message := &master_pb.VolumeLocation{
Url: dn.Url(),
@@ -189,35 +190,13 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ
return ms.informNewLeader(stream)
}
- // remember client address
- ctx := stream.Context()
- // fmt.Printf("FromContext %+v\n", ctx)
- pr, ok := peer.FromContext(ctx)
- if !ok {
- glog.Error("failed to get peer from ctx")
- return fmt.Errorf("failed to get peer from ctx")
- }
- if pr.Addr == net.Addr(nil) {
- glog.Error("failed to get peer address")
- return fmt.Errorf("failed to get peer address")
- }
+ peerAddress := findClientAddress(stream.Context(), req.GrpcPort)
- clientName := req.Name + pr.Addr.String()
- glog.V(0).Infof("+ client %v", clientName)
-
- messageChan := make(chan *master_pb.VolumeLocation)
stopChan := make(chan bool)
- ms.clientChansLock.Lock()
- ms.clientChans[clientName] = messageChan
- ms.clientChansLock.Unlock()
+ clientName, messageChan := ms.addClient(req.Name, peerAddress)
- defer func() {
- glog.V(0).Infof("- client %v", clientName)
- ms.clientChansLock.Lock()
- delete(ms.clientChans, clientName)
- ms.clientChansLock.Unlock()
- }()
+ defer ms.deleteClient(clientName)
for _, message := range ms.Topo.ToVolumeLocations() {
if err := stream.Send(message); err != nil {
@@ -253,7 +232,6 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ
}
}
- return nil
}
func (ms *MasterServer) informNewLeader(stream master_pb.Seaweed_KeepConnectedServer) error {
@@ -269,3 +247,57 @@ func (ms *MasterServer) informNewLeader(stream master_pb.Seaweed_KeepConnectedSe
}
return nil
}
+
+func (ms *MasterServer) addClient(clientType string, clientAddress string) (clientName string, messageChan chan *master_pb.VolumeLocation) {
+ clientName = clientType + "@" + clientAddress
+ glog.V(0).Infof("+ client %v", clientName)
+
+ messageChan = make(chan *master_pb.VolumeLocation)
+
+ ms.clientChansLock.Lock()
+ ms.clientChans[clientName] = messageChan
+ ms.clientChansLock.Unlock()
+ return
+}
+
+func (ms *MasterServer) deleteClient(clientName string) {
+ glog.V(0).Infof("- client %v", clientName)
+ ms.clientChansLock.Lock()
+ delete(ms.clientChans, clientName)
+ ms.clientChansLock.Unlock()
+}
+
+func findClientAddress(ctx context.Context, grpcPort uint32) string {
+ // fmt.Printf("FromContext %+v\n", ctx)
+ pr, ok := peer.FromContext(ctx)
+ if !ok {
+ glog.Error("failed to get peer from ctx")
+ return ""
+ }
+ if pr.Addr == net.Addr(nil) {
+ glog.Error("failed to get peer address")
+ return ""
+ }
+ if grpcPort == 0 {
+ return pr.Addr.String()
+ }
+ if tcpAddr, ok := pr.Addr.(*net.TCPAddr); ok {
+ externalIP := tcpAddr.IP
+ return fmt.Sprintf("%s:%d", externalIP, grpcPort)
+ }
+ return pr.Addr.String()
+
+}
+
+func (ms *MasterServer) ListMasterClients(ctx context.Context, req *master_pb.ListMasterClientsRequest) (*master_pb.ListMasterClientsResponse, error) {
+ resp := &master_pb.ListMasterClientsResponse{}
+ ms.clientChansLock.RLock()
+ defer ms.clientChansLock.RUnlock()
+
+ for k := range ms.clientChans {
+ if strings.HasPrefix(k, req.ClientType+"@") {
+ resp.GrpcAddresses = append(resp.GrpcAddresses, k[len(req.ClientType)+1:])
+ }
+ }
+ return resp, nil
+}
diff --git a/weed/server/master_grpc_server_admin.go b/weed/server/master_grpc_server_admin.go
new file mode 100644
index 000000000..7e7dcb36b
--- /dev/null
+++ b/weed/server/master_grpc_server_admin.go
@@ -0,0 +1,138 @@
+package weed_server
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "sync"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+)
+
+/*
+How exclusive lock works?
+-----------
+
+Shell
+------
+When shell lock,
+ * lease an admin token (lockTime, token)
+ * start a goroutine to renew the admin token periodically
+
+When shell unlock
+ * stop the renewal goroutine
+ * sends a release lock request
+
+Master
+------
+Master maintains:
+ * randomNumber
+ * lastLockTime
+When master receives the lease/renew request from shell
+ If lastLockTime still fresh {
+ if is a renew and token is valid {
+ // for renew
+ generate the randomNumber => token
+ return
+ }
+ refuse
+ return
+ } else {
+ // for fresh lease request
+ generate the randomNumber => token
+ return
+ }
+
+When master receives the release lock request from shell
+ set the lastLockTime to zero
+
+
+The volume server does not need to verify.
+This makes the lock/unlock optional, similar to what golang code usually does.
+
+*/
+
+const (
+ LockDuration = 10 * time.Second
+)
+
+type AdminLock struct {
+ accessSecret int64
+ accessLockTime time.Time
+}
+
+type AdminLocks struct {
+ locks map[string]*AdminLock
+ sync.RWMutex
+}
+
+func NewAdminLocks() *AdminLocks {
+ return &AdminLocks{
+ locks: make(map[string]*AdminLock),
+ }
+}
+
+func (locks *AdminLocks) isLocked(lockName string) bool {
+ locks.RLock()
+ defer locks.RUnlock()
+ adminLock, found := locks.locks[lockName]
+ if !found {
+ return false
+ }
+ return adminLock.accessLockTime.Add(LockDuration).After(time.Now())
+}
+
+func (locks *AdminLocks) isValidToken(lockName string, ts time.Time, token int64) bool {
+ locks.RLock()
+ defer locks.RUnlock()
+ adminLock, found := locks.locks[lockName]
+ if !found {
+ return false
+ }
+ return adminLock.accessLockTime.Equal(ts) && adminLock.accessSecret == token
+}
+
+func (locks *AdminLocks) generateToken(lockName string) (ts time.Time, token int64) {
+ locks.Lock()
+ defer locks.Unlock()
+ lock := &AdminLock{
+ accessSecret: rand.Int63(),
+ accessLockTime: time.Now(),
+ }
+ locks.locks[lockName] = lock
+ return lock.accessLockTime, lock.accessSecret
+}
+
+func (locks *AdminLocks) deleteLock(lockName string) {
+ locks.Lock()
+ defer locks.Unlock()
+ delete(locks.locks, lockName)
+}
+
+func (ms *MasterServer) LeaseAdminToken(ctx context.Context, req *master_pb.LeaseAdminTokenRequest) (*master_pb.LeaseAdminTokenResponse, error) {
+ resp := &master_pb.LeaseAdminTokenResponse{}
+
+ if ms.adminLocks.isLocked(req.LockName) {
+ if req.PreviousToken != 0 && ms.adminLocks.isValidToken(req.LockName, time.Unix(0, req.PreviousLockTime), req.PreviousToken) {
+ // for renew
+ ts, token := ms.adminLocks.generateToken(req.LockName)
+ resp.Token, resp.LockTsNs = token, ts.UnixNano()
+ return resp, nil
+ }
+ // refuse since still locked
+ return resp, fmt.Errorf("already locked")
+ }
+ // for fresh lease request
+ ts, token := ms.adminLocks.generateToken(req.LockName)
+ resp.Token, resp.LockTsNs = token, ts.UnixNano()
+ return resp, nil
+}
+
+func (ms *MasterServer) ReleaseAdminToken(ctx context.Context, req *master_pb.ReleaseAdminTokenRequest) (*master_pb.ReleaseAdminTokenResponse, error) {
+ resp := &master_pb.ReleaseAdminTokenResponse{}
+ if ms.adminLocks.isValidToken(req.LockName, time.Unix(0, req.PreviousLockTime), req.PreviousToken) {
+ ms.adminLocks.deleteLock(req.LockName)
+ }
+ return resp, nil
+}
diff --git a/weed/server/master_grpc_server_collection.go b/weed/server/master_grpc_server_collection.go
index f02b0f242..b92d6bcbe 100644
--- a/weed/server/master_grpc_server_collection.go
+++ b/weed/server/master_grpc_server_collection.go
@@ -4,6 +4,7 @@ import (
"context"
"github.com/chrislusf/raft"
+
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
@@ -57,8 +58,8 @@ func (ms *MasterServer) doDeleteNormalCollection(collectionName string) error {
}
for _, server := range collection.ListVolumeServers() {
- err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error {
- _, deleteErr := client.DeleteCollection(ctx, &volume_server_pb.DeleteCollectionRequest{
+ err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
+ _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{
Collection: collectionName,
})
return deleteErr
@@ -77,8 +78,8 @@ func (ms *MasterServer) doDeleteEcCollection(collectionName string) error {
listOfEcServers := ms.Topo.ListEcServersByCollection(collectionName)
for _, server := range listOfEcServers {
- err := operation.WithVolumeServerClient(server, ms.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error {
- _, deleteErr := client.DeleteCollection(ctx, &volume_server_pb.DeleteCollectionRequest{
+ err := operation.WithVolumeServerClient(server, ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
+ _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{
Collection: collectionName,
})
return deleteErr
diff --git a/weed/server/master_grpc_server_volume.go b/weed/server/master_grpc_server_volume.go
index 856c07890..282c75679 100644
--- a/weed/server/master_grpc_server_volume.go
+++ b/weed/server/master_grpc_server_volume.go
@@ -121,8 +121,10 @@ func (ms *MasterServer) Statistics(ctx context.Context, req *master_pb.Statistic
volumeLayout := ms.Topo.GetVolumeLayout(req.Collection, replicaPlacement, ttl)
stats := volumeLayout.Stats()
+ totalSize := ms.Topo.GetMaxVolumeCount() * int64(ms.option.VolumeSizeLimitMB) * 1024 * 1024
+
resp := &master_pb.StatisticsResponse{
- TotalSize: stats.TotalSize,
+ TotalSize: uint64(totalSize),
UsedSize: stats.UsedSize,
FileCount: stats.FileCount,
}
diff --git a/weed/server/master_server.go b/weed/server/master_server.go
index b3cc310e6..dc3ef9cfc 100644
--- a/weed/server/master_server.go
+++ b/weed/server/master_server.go
@@ -1,7 +1,6 @@
package weed_server
import (
- "context"
"fmt"
"net/http"
"net/http/httputil"
@@ -33,6 +32,7 @@ const (
)
type MasterOption struct {
+ Host string
Port int
MetaFolder string
VolumeSizeLimitMB uint
@@ -65,6 +65,8 @@ type MasterServer struct {
grpcDialOption grpc.DialOption
MasterClient *wdclient.MasterClient
+
+ adminLocks *AdminLocks
}
func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *MasterServer {
@@ -78,6 +80,9 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste
v.SetDefault("jwt.signing.read.expires_after_seconds", 60)
readExpiresAfterSec := v.GetInt("jwt.signing.read.expires_after_seconds")
+ v.SetDefault("master.replication.treat_replication_as_minimums", false)
+ replicationAsMin := v.GetBool("master.replication.treat_replication_as_minimums")
+
var preallocateSize int64
if option.VolumePreallocate {
preallocateSize = int64(option.VolumeSizeLimitMB) * (1 << 20)
@@ -89,7 +94,8 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste
preallocateSize: preallocateSize,
clientChans: make(map[string]chan *master_pb.VolumeLocation),
grpcDialOption: grpcDialOption,
- MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "master", peers),
+ MasterClient: wdclient.NewMasterClient(grpcDialOption, "master", option.Host, 0, peers),
+ adminLocks: NewAdminLocks(),
}
ms.bounedLeaderChan = make(chan int, 16)
@@ -97,7 +103,7 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste
if nil == seq {
glog.Fatalf("create sequencer failed.")
}
- ms.Topo = topology.NewTopology("topo", seq, uint64(ms.option.VolumeSizeLimitMB)*1024*1024, ms.option.PulseSeconds)
+ ms.Topo = topology.NewTopology("topo", seq, uint64(ms.option.VolumeSizeLimitMB)*1024*1024, ms.option.PulseSeconds, replicationAsMin)
ms.vg = topology.NewDefaultVolumeGrowth()
glog.V(0).Infoln("Volume Size Limit is", ms.option.VolumeSizeLimitMB, "MB")
@@ -115,9 +121,11 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste
r.HandleFunc("/vol/status", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeStatusHandler)))
r.HandleFunc("/vol/vacuum", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeVacuumHandler)))
r.HandleFunc("/submit", ms.guard.WhiteList(ms.submitFromMasterServerHandler))
- r.HandleFunc("/stats/health", ms.guard.WhiteList(statsHealthHandler))
- r.HandleFunc("/stats/counter", ms.guard.WhiteList(statsCounterHandler))
- r.HandleFunc("/stats/memory", ms.guard.WhiteList(statsMemoryHandler))
+ /*
+ r.HandleFunc("/stats/health", ms.guard.WhiteList(statsHealthHandler))
+ r.HandleFunc("/stats/counter", ms.guard.WhiteList(statsCounterHandler))
+ r.HandleFunc("/stats/memory", ms.guard.WhiteList(statsMemoryHandler))
+ */
r.HandleFunc("/{fileId}", ms.redirectHandler)
}
@@ -148,7 +156,7 @@ func (ms *MasterServer) SetRaftServer(raftServer *RaftServer) {
}
}
-func (ms *MasterServer) proxyToLeader(f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
+func (ms *MasterServer) proxyToLeader(f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if ms.Topo.IsLeader() {
f(w, r)
@@ -193,10 +201,14 @@ func (ms *MasterServer) startAdminScripts() {
v.SetDefault("master.maintenance.sleep_minutes", 17)
sleepMinutes := v.GetInt("master.maintenance.sleep_minutes")
- v.SetDefault("master.filer.default_filer_url", "http://localhost:8888/")
- filerURL := v.GetString("master.filer.default_filer_url")
+ v.SetDefault("master.filer.default", "localhost:8888")
+ filerHostPort := v.GetString("master.filer.default")
scriptLines := strings.Split(adminScripts, "\n")
+ if !strings.Contains(adminScripts, "lock") {
+ scriptLines = append(append([]string{}, "lock"), scriptLines...)
+ scriptLines = append(scriptLines, "unlock")
+ }
masterAddress := "localhost:" + strconv.Itoa(ms.option.Port)
@@ -204,9 +216,10 @@ func (ms *MasterServer) startAdminScripts() {
shellOptions.GrpcDialOption = security.LoadClientTLS(v, "grpc.master")
shellOptions.Masters = &masterAddress
- shellOptions.FilerHost, shellOptions.FilerPort, shellOptions.Directory, err = util.ParseFilerUrl(filerURL)
+ shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(filerHostPort)
+ shellOptions.Directory = "/"
if err != nil {
- glog.V(0).Infof("failed to parse master.filer.default_filer_urll=%s : %v\n", filerURL, err)
+ glog.V(0).Infof("failed to parse master.filer.default = %s : %v\n", filerHostPort, err)
return
}
@@ -220,27 +233,11 @@ func (ms *MasterServer) startAdminScripts() {
commandEnv.MasterClient.WaitUntilConnected()
c := time.Tick(time.Duration(sleepMinutes) * time.Minute)
- for _ = range c {
+ for range c {
if ms.Topo.IsLeader() {
for _, line := range scriptLines {
-
- cmds := reg.FindAllString(line, -1)
- if len(cmds) == 0 {
- continue
- }
- args := make([]string, len(cmds[1:]))
- for i := range args {
- args[i] = strings.Trim(string(cmds[1+i]), "\"'")
- }
- cmd := strings.ToLower(cmds[0])
-
- for _, c := range shell.Commands {
- if c.Name() == cmd {
- glog.V(0).Infof("executing: %s %v", cmd, args)
- if err := c.Do(args, commandEnv, os.Stdout); err != nil {
- glog.V(0).Infof("error: %v", err)
- }
- }
+ for _, c := range strings.Split(line, ";") {
+ processEachCmd(reg, c, commandEnv)
}
}
}
@@ -248,6 +245,27 @@ func (ms *MasterServer) startAdminScripts() {
}()
}
+func processEachCmd(reg *regexp.Regexp, line string, commandEnv *shell.CommandEnv) {
+ cmds := reg.FindAllString(line, -1)
+ if len(cmds) == 0 {
+ return
+ }
+ args := make([]string, len(cmds[1:]))
+ for i := range args {
+ args[i] = strings.Trim(string(cmds[1+i]), "\"'")
+ }
+ cmd := strings.ToLower(cmds[0])
+
+ for _, c := range shell.Commands {
+ if c.Name() == cmd {
+ glog.V(0).Infof("executing: %s %v", cmd, args)
+ if err := c.Do(args, commandEnv, os.Stdout); err != nil {
+ glog.V(0).Infof("error: %v", err)
+ }
+ }
+ }
+}
+
func (ms *MasterServer) createSequencer(option *MasterOption) sequence.Sequencer {
var seq sequence.Sequencer
v := util.GetViper()
diff --git a/weed/server/master_server_handlers.go b/weed/server/master_server_handlers.go
index 514d86800..ebcb7efd2 100644
--- a/weed/server/master_server_handlers.go
+++ b/weed/server/master_server_handlers.go
@@ -45,7 +45,7 @@ func (ms *MasterServer) dirLookupHandler(w http.ResponseWriter, r *http.Request)
vid = fileId[0:commaSep]
}
}
- collection := r.FormValue("collection") //optional, but can be faster if too many collections
+ collection := r.FormValue("collection") // optional, but can be faster if too many collections
location := ms.findVolumeLocation(collection, vid)
httpStatus := http.StatusOK
if location.Error != "" || location.Locations == nil {
@@ -72,9 +72,6 @@ func (ms *MasterServer) findVolumeLocation(collection, vid string) operation.Loo
for _, loc := range machines {
locations = append(locations, operation.Location{Url: loc.Url(), PublicUrl: loc.PublicUrl})
}
- if locations == nil {
- err = fmt.Errorf("volume id %s not found", vid)
- }
}
} else {
machines, getVidLocationsErr := ms.MasterClient.GetVidLocations(vid)
@@ -83,6 +80,9 @@ func (ms *MasterServer) findVolumeLocation(collection, vid string) operation.Loo
}
err = getVidLocationsErr
}
+ if len(locations) == 0 && err == nil {
+ err = fmt.Errorf("volume id %s not found", vid)
+ }
ret := operation.LookupResult{
VolumeId: vid,
Locations: locations,
diff --git a/weed/server/master_server_handlers_admin.go b/weed/server/master_server_handlers_admin.go
index 44a04cb86..7595c0171 100644
--- a/weed/server/master_server_handlers_admin.go
+++ b/weed/server/master_server_handlers_admin.go
@@ -25,8 +25,8 @@ func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.R
return
}
for _, server := range collection.ListVolumeServers() {
- err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error {
- _, deleteErr := client.DeleteCollection(ctx, &volume_server_pb.DeleteCollectionRequest{
+ err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
+ _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{
Collection: collection.Name,
})
return deleteErr
@@ -44,7 +44,7 @@ func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.R
func (ms *MasterServer) dirStatusHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
- m["Version"] = util.VERSION
+ m["Version"] = util.Version()
m["Topology"] = ms.Topo.ToMap()
writeJsonQuiet(w, r, http.StatusOK, m)
}
@@ -61,7 +61,7 @@ func (ms *MasterServer) volumeVacuumHandler(w http.ResponseWriter, r *http.Reque
return
}
}
- glog.Infoln("garbageThreshold =", gcThreshold)
+ // glog.Infoln("garbageThreshold =", gcThreshold)
ms.Topo.Vacuum(ms.grpcDialOption, gcThreshold, ms.preallocateSize)
ms.dirStatusHandler(w, r)
}
@@ -93,7 +93,7 @@ func (ms *MasterServer) volumeGrowHandler(w http.ResponseWriter, r *http.Request
func (ms *MasterServer) volumeStatusHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
- m["Version"] = util.VERSION
+ m["Version"] = util.Version()
m["Volumes"] = ms.Topo.ToVolumeMap()
writeJsonQuiet(w, r, http.StatusOK, m)
}
diff --git a/weed/server/master_server_handlers_ui.go b/weed/server/master_server_handlers_ui.go
index f241df87f..9cd58158b 100644
--- a/weed/server/master_server_handlers_ui.go
+++ b/weed/server/master_server_handlers_ui.go
@@ -2,6 +2,7 @@ package weed_server
import (
"net/http"
+ "time"
"github.com/chrislusf/raft"
ui "github.com/chrislusf/seaweedfs/weed/server/master_ui"
@@ -11,7 +12,7 @@ import (
func (ms *MasterServer) uiStatusHandler(w http.ResponseWriter, r *http.Request) {
infos := make(map[string]interface{})
- infos["Version"] = util.VERSION
+ infos["Up Time"] = time.Now().Sub(startTime).String()
args := struct {
Version string
Topology interface{}
@@ -19,7 +20,7 @@ func (ms *MasterServer) uiStatusHandler(w http.ResponseWriter, r *http.Request)
Stats map[string]interface{}
Counters *stats.ServerStats
}{
- util.VERSION,
+ util.Version(),
ms.Topo.ToMap(),
ms.Topo.RaftServer,
infos,
diff --git a/weed/server/master_ui/templates.go b/weed/server/master_ui/templates.go
index b674e3f82..7189064d0 100644
--- a/weed/server/master_ui/templates.go
+++ b/weed/server/master_ui/templates.go
@@ -76,6 +76,7 @@ var StatusTpl = template.Must(template.New("status").Parse(`
Rack
RemoteAddr
#Volumes
+ Volume Ids
#ErasureCodingShards
Max
@@ -89,6 +90,7 @@ var StatusTpl = template.Must(template.New("status").Parse(`
{{ $rack.Id }}
{{ $dn.Url }}
{{ $dn.Volumes }}
+ {{ $dn.VolumeIds}}
{{ $dn.EcShards }}
{{ $dn.Max }}
diff --git a/weed/server/raft_server.go b/weed/server/raft_server.go
index 53289f1c1..0381c7feb 100644
--- a/weed/server/raft_server.go
+++ b/weed/server/raft_server.go
@@ -2,8 +2,6 @@ package weed_server
import (
"encoding/json"
- "github.com/chrislusf/seaweedfs/weed/util"
- "google.golang.org/grpc"
"io/ioutil"
"os"
"path"
@@ -11,7 +9,12 @@ import (
"sort"
"time"
+ "google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/pb"
+
"github.com/chrislusf/raft"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/topology"
)
@@ -61,7 +64,7 @@ func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr, d
s.raftServer.Start()
for _, peer := range s.peers {
- s.raftServer.AddPeer(peer, util.ServerToGrpcAddress(peer))
+ s.raftServer.AddPeer(peer, pb.ServerToGrpcAddress(peer))
}
s.GrpcServer = raft.NewGrpcServer(s.raftServer)
@@ -72,7 +75,7 @@ func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr, d
_, err := s.raftServer.Do(&raft.DefaultJoinCommand{
Name: s.raftServer.Name(),
- ConnectionString: util.ServerToGrpcAddress(s.serverAddr),
+ ConnectionString: pb.ServerToGrpcAddress(s.serverAddr),
})
if err != nil {
diff --git a/weed/server/volume_grpc_admin.go b/weed/server/volume_grpc_admin.go
index 43987b748..27b21ac09 100644
--- a/weed/server/volume_grpc_admin.go
+++ b/weed/server/volume_grpc_admin.go
@@ -3,9 +3,11 @@ package weed_server
import (
"context"
"fmt"
+ "path/filepath"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
)
@@ -148,3 +150,19 @@ func (vs *VolumeServer) VolumeMarkReadonly(ctx context.Context, req *volume_serv
return resp, err
}
+
+func (vs *VolumeServer) VolumeServerStatus(ctx context.Context, req *volume_server_pb.VolumeServerStatusRequest) (*volume_server_pb.VolumeServerStatusResponse, error) {
+
+ resp := &volume_server_pb.VolumeServerStatusResponse{}
+
+ for _, loc := range vs.store.Locations {
+ if dir, e := filepath.Abs(loc.Directory); e == nil {
+ resp.DiskStatuses = append(resp.DiskStatuses, stats.NewDiskStatus(dir))
+ }
+ }
+
+ resp.MemoryStatus = stats.MemStat()
+
+ return resp, nil
+
+}
diff --git a/weed/server/volume_grpc_batch_delete.go b/weed/server/volume_grpc_batch_delete.go
index fdb7937d2..501964191 100644
--- a/weed/server/volume_grpc_batch_delete.go
+++ b/weed/server/volume_grpc_batch_delete.go
@@ -8,6 +8,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
)
func (vs *VolumeServer) BatchDelete(ctx context.Context, req *volume_server_pb.BatchDeleteRequest) (*volume_server_pb.BatchDeleteResponse, error) {
@@ -28,16 +29,34 @@ func (vs *VolumeServer) BatchDelete(ctx context.Context, req *volume_server_pb.B
n := new(needle.Needle)
volumeId, _ := needle.NewVolumeId(vid)
- n.ParsePath(id_cookie)
-
- cookie := n.Cookie
- if _, err := vs.store.ReadVolumeNeedle(volumeId, n); err != nil {
- resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{
- FileId: fid,
- Status: http.StatusNotFound,
- Error: err.Error(),
- })
- continue
+ if req.SkipCookieCheck {
+ n.Id, err = types.ParseNeedleId(id_cookie)
+ if err != nil {
+ resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{
+ FileId: fid,
+ Status: http.StatusBadRequest,
+ Error: err.Error()})
+ continue
+ }
+ } else {
+ n.ParsePath(id_cookie)
+ cookie := n.Cookie
+ if _, err := vs.store.ReadVolumeNeedle(volumeId, n); err != nil {
+ resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{
+ FileId: fid,
+ Status: http.StatusNotFound,
+ Error: err.Error(),
+ })
+ continue
+ }
+ if n.Cookie != cookie {
+ resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{
+ FileId: fid,
+ Status: http.StatusBadRequest,
+ Error: "File Random Cookie does not match.",
+ })
+ break
+ }
}
if n.IsChunkedManifest() {
@@ -49,14 +68,6 @@ func (vs *VolumeServer) BatchDelete(ctx context.Context, req *volume_server_pb.B
continue
}
- if n.Cookie != cookie {
- resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{
- FileId: fid,
- Status: http.StatusBadRequest,
- Error: "File Random Cookie does not match.",
- })
- break
- }
n.LastModified = now
if size, err := vs.store.DeleteVolumeNeedle(volumeId, n); err != nil {
resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{
diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go
index dc47c2884..7cb836344 100644
--- a/weed/server/volume_grpc_client_to_master.go
+++ b/weed/server/volume_grpc_client_to_master.go
@@ -7,6 +7,7 @@ import (
"google.golang.org/grpc"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
@@ -34,15 +35,18 @@ func (vs *VolumeServer) heartbeat() {
for {
for _, master := range vs.SeedMasterNodes {
if newLeader != "" {
+ // the new leader may actually is the same master
+ // need to wait a bit before adding itself
+ time.Sleep(3 * time.Second)
master = newLeader
}
- masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master)
+ masterGrpcAddress, parseErr := pb.ParseServerToGrpcAddress(master)
if parseErr != nil {
glog.V(0).Infof("failed to parse master grpc %v: %v", masterGrpcAddress, parseErr)
continue
}
vs.store.MasterAddress = master
- newLeader, err = vs.doHeartbeat(context.Background(), master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second)
+ newLeader, err = vs.doHeartbeat(master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second)
if err != nil {
glog.V(0).Infof("heartbeat error: %v", err)
time.Sleep(time.Duration(vs.pulseSeconds) * time.Second)
@@ -53,16 +57,16 @@ func (vs *VolumeServer) heartbeat() {
}
}
-func (vs *VolumeServer) doHeartbeat(ctx context.Context, masterNode, masterGrpcAddress string, grpcDialOption grpc.DialOption, sleepInterval time.Duration) (newLeader string, err error) {
+func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDialOption grpc.DialOption, sleepInterval time.Duration) (newLeader string, err error) {
- grpcConection, err := util.GrpcDial(ctx, masterGrpcAddress, grpcDialOption)
+ grpcConection, err := pb.GrpcDial(context.Background(), masterGrpcAddress, grpcDialOption)
if err != nil {
return "", fmt.Errorf("fail to dial %s : %v", masterNode, err)
}
defer grpcConection.Close()
client := master_pb.NewSeaweedClient(grpcConection)
- stream, err := client.SendHeartbeat(ctx)
+ stream, err := client.SendHeartbeat(context.Background())
if err != nil {
glog.V(0).Infof("SendHeartbeat to %s: %v", masterNode, err)
return "", err
@@ -79,8 +83,13 @@ func (vs *VolumeServer) doHeartbeat(ctx context.Context, masterNode, masterGrpcA
doneChan <- err
return
}
- if in.GetVolumeSizeLimit() != 0 {
+ if in.GetVolumeSizeLimit() != 0 && vs.store.GetVolumeSizeLimit() != in.GetVolumeSizeLimit() {
vs.store.SetVolumeSizeLimit(in.GetVolumeSizeLimit())
+ if vs.store.MaybeAdjustVolumeMax() {
+ if err = stream.Send(vs.store.CollectHeartbeat()); err != nil {
+ glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err)
+ }
+ }
}
if in.GetLeader() != "" && masterNode != in.GetLeader() && !isSameIP(in.GetLeader(), masterNode) {
glog.V(0).Infof("Volume Server found a new master newLeader: %v instead of %v", in.GetLeader(), masterNode)
diff --git a/weed/server/volume_grpc_copy.go b/weed/server/volume_grpc_copy.go
index 6d74f8171..5c7d5572c 100644
--- a/weed/server/volume_grpc_copy.go
+++ b/weed/server/volume_grpc_copy.go
@@ -6,7 +6,6 @@ import (
"io"
"math"
"os"
- "path"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
@@ -25,7 +24,20 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
if v != nil {
- return nil, fmt.Errorf("volume %d already exists", req.VolumeId)
+
+ glog.V(0).Infof("volume %d already exists. deleted before copying...", req.VolumeId)
+
+ err := vs.store.UnmountVolume(needle.VolumeId(req.VolumeId))
+ if err != nil {
+ return nil, fmt.Errorf("failed to mount existing volume %d: %v", req.VolumeId, err)
+ }
+
+ err = vs.store.DeleteVolume(needle.VolumeId(req.VolumeId))
+ if err != nil {
+ return nil, fmt.Errorf("failed to delete existing volume %d: %v", req.VolumeId, err)
+ }
+
+ glog.V(0).Infof("deleted exisitng volume %d before copying.", req.VolumeId)
}
location := vs.store.FindFreeLocation()
@@ -41,9 +53,9 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
// confirm size and timestamp
var volFileInfoResp *volume_server_pb.ReadVolumeFileStatusResponse
var volumeFileName, idxFileName, datFileName string
- err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error {
+ err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
var err error
- volFileInfoResp, err = client.ReadVolumeFileStatus(ctx,
+ volFileInfoResp, err = client.ReadVolumeFileStatus(context.Background(),
&volume_server_pb.ReadVolumeFileStatusRequest{
VolumeId: req.VolumeId,
})
@@ -55,30 +67,38 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
// println("source:", volFileInfoResp.String())
// copy ecx file
- if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, volumeFileName, ".idx", false, false); err != nil {
+ if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, volumeFileName, ".idx", false, false); err != nil {
return err
}
- if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".dat", false, true); err != nil {
+ if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".dat", false, true); err != nil {
return err
}
- if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".vif", false, true); err != nil {
+ if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".vif", false, true); err != nil {
return err
}
return nil
})
+ if err != nil {
+ return nil, err
+ }
+ if volumeFileName == "" {
+ return nil, fmt.Errorf("not found volume %d file", req.VolumeId)
+ }
+
idxFileName = volumeFileName + ".idx"
datFileName = volumeFileName + ".dat"
- if err != nil && volumeFileName != "" {
- os.Remove(idxFileName)
- os.Remove(datFileName)
- os.Remove(volumeFileName + ".vif")
- return nil, err
- }
+ defer func() {
+ if err != nil && volumeFileName != "" {
+ os.Remove(idxFileName)
+ os.Remove(datFileName)
+ os.Remove(volumeFileName + ".vif")
+ }
+ }()
if err = checkCopyFiles(volFileInfoResp, idxFileName, datFileName); err != nil { // added by panyc16
return nil, err
@@ -95,10 +115,9 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
}, err
}
-func (vs *VolumeServer) doCopyFile(ctx context.Context, client volume_server_pb.VolumeServerClient, isEcVolume bool, collection string, vid uint32,
- compactRevision uint32, stopOffset uint64, baseFileName, ext string, isAppend bool, ignoreSourceFileNotFound bool) error {
+func (vs *VolumeServer) doCopyFile(client volume_server_pb.VolumeServerClient, isEcVolume bool, collection string, vid, compactRevision uint32, stopOffset uint64, baseFileName, ext string, isAppend, ignoreSourceFileNotFound bool) error {
- copyFileClient, err := client.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
+ copyFileClient, err := client.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{
VolumeId: vid,
Ext: ext,
CompactionRevision: compactRevision,
@@ -209,7 +228,7 @@ func (vs *VolumeServer) CopyFile(req *volume_server_pb.CopyFileRequest, stream v
} else {
baseFileName := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId)) + req.Ext
for _, location := range vs.store.Locations {
- tName := path.Join(location.Directory, baseFileName)
+ tName := util.Join(location.Directory, baseFileName)
if util.FileExists(tName) {
fileName = tName
}
diff --git a/weed/server/volume_grpc_erasure_coding.go b/weed/server/volume_grpc_erasure_coding.go
index 256e7c447..66dd5bf8d 100644
--- a/weed/server/volume_grpc_erasure_coding.go
+++ b/weed/server/volume_grpc_erasure_coding.go
@@ -106,11 +106,11 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv
baseFileName := storage.VolumeFileName(location.Directory, req.Collection, int(req.VolumeId))
- err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error {
+ err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
// copy ec data slices
for _, shardId := range req.ShardIds {
- if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, erasure_coding.ToExt(int(shardId)), false, false); err != nil {
+ if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, erasure_coding.ToExt(int(shardId)), false, false); err != nil {
return err
}
}
@@ -118,7 +118,7 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv
if req.CopyEcxFile {
// copy ecx file
- if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecx", false, false); err != nil {
+ if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecx", false, false); err != nil {
return err
}
return nil
@@ -126,14 +126,14 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv
if req.CopyEcjFile {
// copy ecj file
- if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecj", true, true); err != nil {
+ if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecj", true, true); err != nil {
return err
}
}
if req.CopyVifFile {
// copy vif file
- if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".vif", false, true); err != nil {
+ if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".vif", false, true); err != nil {
return err
}
}
diff --git a/weed/server/volume_grpc_file.go b/weed/server/volume_grpc_file.go
new file mode 100644
index 000000000..4d71ddeb1
--- /dev/null
+++ b/weed/server/volume_grpc_file.go
@@ -0,0 +1,129 @@
+package weed_server
+
+import (
+ "encoding/json"
+ "net/http"
+ "strings"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func (vs *VolumeServer) FileGet(req *volume_server_pb.FileGetRequest, stream volume_server_pb.VolumeServer_FileGetServer) error {
+
+ headResponse := &volume_server_pb.FileGetResponse{}
+ n := new(needle.Needle)
+
+ commaIndex := strings.LastIndex(req.FileId, ",")
+ vid := req.FileId[:commaIndex]
+ fid := req.FileId[commaIndex+1:]
+
+ volumeId, err := needle.NewVolumeId(vid)
+ if err != nil {
+ headResponse.ErrorCode = http.StatusBadRequest
+ return stream.Send(headResponse)
+ }
+ err = n.ParsePath(fid)
+ if err != nil {
+ headResponse.ErrorCode = http.StatusBadRequest
+ return stream.Send(headResponse)
+ }
+
+ hasVolume := vs.store.HasVolume(volumeId)
+ _, hasEcVolume := vs.store.FindEcVolume(volumeId)
+
+ if !hasVolume && !hasEcVolume {
+ headResponse.ErrorCode = http.StatusMovedPermanently
+ return stream.Send(headResponse)
+ }
+
+ cookie := n.Cookie
+ var count int
+ if hasVolume {
+ count, err = vs.store.ReadVolumeNeedle(volumeId, n)
+ } else if hasEcVolume {
+ count, err = vs.store.ReadEcShardNeedle(volumeId, n)
+ }
+
+ if err != nil || count < 0 {
+ headResponse.ErrorCode = http.StatusNotFound
+ return stream.Send(headResponse)
+ }
+ if n.Cookie != cookie {
+ headResponse.ErrorCode = http.StatusNotFound
+ return stream.Send(headResponse)
+ }
+
+ if n.LastModified != 0 {
+ headResponse.LastModified = n.LastModified
+ }
+
+ headResponse.Etag = n.Etag()
+
+ if n.HasPairs() {
+ pairMap := make(map[string]string)
+ err = json.Unmarshal(n.Pairs, &pairMap)
+ if err != nil {
+ glog.V(0).Infoln("Unmarshal pairs error:", err)
+ }
+ headResponse.Headers = pairMap
+ }
+
+ /*
+ // skip this, no redirection
+ if vs.tryHandleChunkedFile(n, filename, w, r) {
+ return
+ }
+ */
+
+ if n.NameSize > 0 {
+ headResponse.Filename = string(n.Name)
+ }
+ mtype := ""
+ if n.MimeSize > 0 {
+ mt := string(n.Mime)
+ if !strings.HasPrefix(mt, "application/octet-stream") {
+ mtype = mt
+ }
+ }
+ headResponse.ContentType = mtype
+
+ headResponse.IsGzipped = n.IsGzipped()
+
+ if n.IsGzipped() && req.AcceptGzip {
+ if n.Data, err = util.UnGzipData(n.Data); err != nil {
+ glog.V(0).Infof("ungzip %s error: %v", req.FileId, err)
+ }
+ }
+
+ headResponse.ContentLength = uint32(len(n.Data))
+ bytesToRead := len(n.Data)
+ bytesRead := 0
+
+ t := headResponse
+
+ for bytesRead < bytesToRead {
+
+ stopIndex := bytesRead + BufferSizeLimit
+ if stopIndex > bytesToRead {
+ stopIndex = bytesToRead
+ }
+
+ if t == nil {
+ t = &volume_server_pb.FileGetResponse{}
+ }
+ t.Data = n.Data[bytesRead:stopIndex]
+
+ err = stream.Send(t)
+ t = nil
+ if err != nil {
+ return err
+ }
+
+ bytesRead = stopIndex
+ }
+
+ return nil
+}
diff --git a/weed/server/volume_grpc_tail.go b/weed/server/volume_grpc_tail.go
index c26d6ed8f..2dde5b69c 100644
--- a/weed/server/volume_grpc_tail.go
+++ b/weed/server/volume_grpc_tail.go
@@ -90,7 +90,7 @@ func (vs *VolumeServer) VolumeTailReceiver(ctx context.Context, req *volume_serv
defer glog.V(1).Infof("receive tailing volume %d finished", v.Id)
return resp, operation.TailVolumeFromSource(req.SourceVolumeServer, vs.grpcDialOption, v.Id, req.SinceNs, int(req.IdleTimeoutSeconds), func(n *needle.Needle) error {
- _, _, err := vs.store.WriteVolumeNeedle(v.Id, n)
+ _, err := vs.store.WriteVolumeNeedle(v.Id, n, false)
return err
})
diff --git a/weed/server/volume_grpc_vacuum.go b/weed/server/volume_grpc_vacuum.go
index 24f982241..b87de4b5b 100644
--- a/weed/server/volume_grpc_vacuum.go
+++ b/weed/server/volume_grpc_vacuum.go
@@ -51,6 +51,11 @@ func (vs *VolumeServer) VacuumVolumeCommit(ctx context.Context, req *volume_serv
} else {
glog.V(1).Infof("commit volume %d", req.VolumeId)
}
+ if err == nil {
+ if vs.store.GetVolume(needle.VolumeId(req.VolumeId)).IsReadOnly() {
+ resp.IsReadOnly = true
+ }
+ }
return resp, err
diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go
index 0fdcf662a..2d716edc1 100644
--- a/weed/server/volume_server.go
+++ b/weed/server/volume_server.go
@@ -77,9 +77,11 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
// only expose the volume server details for safe environments
adminMux.HandleFunc("/ui/index.html", vs.uiStatusHandler)
adminMux.HandleFunc("/status", vs.guard.WhiteList(vs.statusHandler))
- adminMux.HandleFunc("/stats/counter", vs.guard.WhiteList(statsCounterHandler))
- adminMux.HandleFunc("/stats/memory", vs.guard.WhiteList(statsMemoryHandler))
- adminMux.HandleFunc("/stats/disk", vs.guard.WhiteList(vs.statsDiskHandler))
+ /*
+ adminMux.HandleFunc("/stats/counter", vs.guard.WhiteList(statsCounterHandler))
+ adminMux.HandleFunc("/stats/memory", vs.guard.WhiteList(statsMemoryHandler))
+ adminMux.HandleFunc("/stats/disk", vs.guard.WhiteList(vs.statsDiskHandler))
+ */
}
adminMux.HandleFunc("/", vs.privateStoreHandler)
if publicMux != adminMux {
diff --git a/weed/server/volume_server_handlers_admin.go b/weed/server/volume_server_handlers_admin.go
index 1938a34c4..34655d833 100644
--- a/weed/server/volume_server_handlers_admin.go
+++ b/weed/server/volume_server_handlers_admin.go
@@ -11,14 +11,21 @@ import (
func (vs *VolumeServer) statusHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
- m["Version"] = util.VERSION
+ m["Version"] = util.Version()
+ var ds []*volume_server_pb.DiskStatus
+ for _, loc := range vs.store.Locations {
+ if dir, e := filepath.Abs(loc.Directory); e == nil {
+ ds = append(ds, stats.NewDiskStatus(dir))
+ }
+ }
+ m["DiskStatuses"] = ds
m["Volumes"] = vs.store.VolumeInfos()
writeJsonQuiet(w, r, http.StatusOK, m)
}
func (vs *VolumeServer) statsDiskHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
- m["Version"] = util.VERSION
+ m["Version"] = util.Version()
var ds []*volume_server_pb.DiskStatus
for _, loc := range vs.store.Locations {
if dir, e := filepath.Abs(loc.Directory); e == nil {
diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go
index d89d13a0d..19b459136 100644
--- a/weed/server/volume_server_handlers_read.go
+++ b/weed/server/volume_server_handlers_read.go
@@ -2,21 +2,18 @@ package weed_server
import (
"bytes"
- "context"
+ "encoding/json"
"errors"
"fmt"
"io"
"mime"
- "mime/multipart"
"net/http"
"net/url"
- "path"
+ "path/filepath"
"strconv"
"strings"
"time"
- "encoding/json"
-
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/images"
"github.com/chrislusf/seaweedfs/weed/operation"
@@ -43,13 +40,13 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
volumeId, err := needle.NewVolumeId(vid)
if err != nil {
- glog.V(2).Infoln("parsing error:", err, r.URL.Path)
+ glog.V(2).Infof("parsing vid %s: %v", r.URL.Path, err)
w.WriteHeader(http.StatusBadRequest)
return
}
err = n.ParsePath(fid)
if err != nil {
- glog.V(2).Infoln("parsing fid error:", err, r.URL.Path)
+ glog.V(2).Infof("parsing fid %s: %v", r.URL.Path, err)
w.WriteHeader(http.StatusBadRequest)
return
}
@@ -86,7 +83,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
if hasVolume {
count, err = vs.store.ReadVolumeNeedle(volumeId, n)
} else if hasEcVolume {
- count, err = vs.store.ReadEcShardNeedle(context.Background(), volumeId, n)
+ count, err = vs.store.ReadEcShardNeedle(volumeId, n)
}
// glog.V(4).Infoln("read bytes", count, "error", err)
if err != nil || count < 0 {
@@ -114,11 +111,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
w.WriteHeader(http.StatusNotModified)
return
}
- if r.Header.Get("ETag-MD5") == "True" {
- setEtag(w, n.MD5())
- } else {
- setEtag(w, n.Etag())
- }
+ setEtag(w, n.Etag())
if n.HasPairs() {
pairMap := make(map[string]string)
@@ -131,14 +124,14 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
}
}
- if vs.tryHandleChunkedFile(n, filename, w, r) {
+ if vs.tryHandleChunkedFile(n, filename, ext, w, r) {
return
}
if n.NameSize > 0 && filename == "" {
filename = string(n.Name)
if ext == "" {
- ext = path.Ext(filename)
+ ext = filepath.Ext(filename)
}
}
mtype := ""
@@ -152,7 +145,13 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
if ext != ".gz" {
if n.IsGzipped() {
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
- w.Header().Set("Content-Encoding", "gzip")
+ if _, _, _, shouldResize := shouldResizeImages(ext, r); shouldResize {
+ if n.Data, err = util.UnGzipData(n.Data); err != nil {
+ glog.V(0).Infoln("ungzip error:", err, r.URL.Path)
+ }
+ } else {
+ w.Header().Set("Content-Encoding", "gzip")
+ }
} else {
if n.Data, err = util.UnGzipData(n.Data); err != nil {
glog.V(0).Infoln("ungzip error:", err, r.URL.Path)
@@ -168,7 +167,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
}
}
-func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string, w http.ResponseWriter, r *http.Request) (processed bool) {
+func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string, ext string, w http.ResponseWriter, r *http.Request) (processed bool) {
if !n.IsChunkedManifest() || r.URL.Query().Get("cm") == "false" {
return false
}
@@ -182,7 +181,9 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string,
fileName = chunkManifest.Name
}
- ext := path.Ext(fileName)
+ if ext == "" {
+ ext = filepath.Ext(fileName)
+ }
mType := ""
if chunkManifest.Mime != "" {
@@ -194,10 +195,7 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string,
w.Header().Set("X-File-Store", "chunked")
- chunkedFileReader := &operation.ChunkedFileReader{
- Manifest: chunkManifest,
- Master: vs.GetMaster(),
- }
+ chunkedFileReader := operation.NewChunkedFileReader(chunkManifest.Chunks, vs.GetMaster())
defer chunkedFileReader.Close()
rs := conditionallyResizeImages(chunkedFileReader, ext, r)
@@ -210,132 +208,56 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string,
func conditionallyResizeImages(originalDataReaderSeeker io.ReadSeeker, ext string, r *http.Request) io.ReadSeeker {
rs := originalDataReaderSeeker
+
+ width, height, mode, shouldResize := shouldResizeImages(ext, r)
+ if shouldResize {
+ rs, _, _ = images.Resized(ext, originalDataReaderSeeker, width, height, mode)
+ }
+ return rs
+}
+
+func shouldResizeImages(ext string, r *http.Request) (width, height int, mode string, shouldResize bool) {
if len(ext) > 0 {
ext = strings.ToLower(ext)
}
if ext == ".png" || ext == ".jpg" || ext == ".jpeg" || ext == ".gif" {
- width, height := 0, 0
if r.FormValue("width") != "" {
width, _ = strconv.Atoi(r.FormValue("width"))
}
if r.FormValue("height") != "" {
height, _ = strconv.Atoi(r.FormValue("height"))
}
- rs, _, _ = images.Resized(ext, originalDataReaderSeeker, width, height, r.FormValue("mode"))
}
- return rs
+ mode = r.FormValue("mode")
+ shouldResize = width > 0 || height > 0
+ return
}
func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.ResponseWriter, r *http.Request) error {
totalSize, e := rs.Seek(0, 2)
if mimeType == "" {
- if ext := path.Ext(filename); ext != "" {
+ if ext := filepath.Ext(filename); ext != "" {
mimeType = mime.TypeByExtension(ext)
}
}
if mimeType != "" {
w.Header().Set("Content-Type", mimeType)
}
- if filename != "" {
- contentDisposition := "inline"
- if r.FormValue("dl") != "" {
- if dl, _ := strconv.ParseBool(r.FormValue("dl")); dl {
- contentDisposition = "attachment"
- }
- }
- w.Header().Set("Content-Disposition", contentDisposition+`; filename="`+fileNameEscaper.Replace(filename)+`"`)
- }
w.Header().Set("Accept-Ranges", "bytes")
+
if r.Method == "HEAD" {
w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
return nil
}
- rangeReq := r.Header.Get("Range")
- if rangeReq == "" {
- w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
- if _, e = rs.Seek(0, 0); e != nil {
+
+ adjustHeadersAfterHEAD(w, r, filename)
+
+ processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
+ if _, e = rs.Seek(offset, 0); e != nil {
return e
}
- _, e = io.Copy(w, rs)
+ _, e = io.CopyN(writer, rs, size)
return e
- }
-
- //the rest is dealing with partial content request
- //mostly copy from src/pkg/net/http/fs.go
- ranges, err := parseRange(rangeReq, totalSize)
- if err != nil {
- http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
- return nil
- }
- if sumRangesSize(ranges) > totalSize {
- // The total number of bytes in all the ranges
- // is larger than the size of the file by
- // itself, so this is probably an attack, or a
- // dumb client. Ignore the range request.
- return nil
- }
- if len(ranges) == 0 {
- return nil
- }
- if len(ranges) == 1 {
- // RFC 2616, Section 14.16:
- // "When an HTTP message includes the content of a single
- // range (for example, a response to a request for a
- // single range, or to a request for a set of ranges
- // that overlap without any holes), this content is
- // transmitted with a Content-Range header, and a
- // Content-Length header showing the number of bytes
- // actually transferred.
- // ...
- // A response to a request for a single range MUST NOT
- // be sent using the multipart/byteranges media type."
- ra := ranges[0]
- w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10))
- w.Header().Set("Content-Range", ra.contentRange(totalSize))
- w.WriteHeader(http.StatusPartialContent)
- if _, e = rs.Seek(ra.start, 0); e != nil {
- return e
- }
-
- _, e = io.CopyN(w, rs, ra.length)
- return e
- }
- // process multiple ranges
- for _, ra := range ranges {
- if ra.start > totalSize {
- http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable)
- return nil
- }
- }
- sendSize := rangesMIMESize(ranges, mimeType, totalSize)
- pr, pw := io.Pipe()
- mw := multipart.NewWriter(pw)
- w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
- sendContent := pr
- defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish.
- go func() {
- for _, ra := range ranges {
- part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize))
- if e != nil {
- pw.CloseWithError(e)
- return
- }
- if _, e = rs.Seek(ra.start, 0); e != nil {
- pw.CloseWithError(e)
- return
- }
- if _, e = io.CopyN(part, rs, ra.length); e != nil {
- pw.CloseWithError(e)
- return
- }
- }
- mw.Close()
- pw.Close()
- }()
- if w.Header().Get("Content-Encoding") == "" {
- w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
- }
- w.WriteHeader(http.StatusPartialContent)
- _, e = io.CopyN(w, sendContent, sendSize)
- return e
+ })
+ return nil
}
diff --git a/weed/server/volume_server_handlers_ui.go b/weed/server/volume_server_handlers_ui.go
index 8d35c9c8b..8b2027e7b 100644
--- a/weed/server/volume_server_handlers_ui.go
+++ b/weed/server/volume_server_handlers_ui.go
@@ -40,7 +40,7 @@ func (vs *VolumeServer) uiStatusHandler(w http.ResponseWriter, r *http.Request)
Stats interface{}
Counters *stats.ServerStats
}{
- util.VERSION,
+ util.Version(),
vs.SeedMasterNodes,
normalVolumeInfos,
vs.store.EcVolumes(),
diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go
index cd35255e5..9a00dcc29 100644
--- a/weed/server/volume_server_handlers_write.go
+++ b/weed/server/volume_server_handlers_write.go
@@ -1,7 +1,6 @@
package weed_server
import (
- "context"
"errors"
"fmt"
"net/http"
@@ -43,18 +42,19 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) {
return
}
- needle, originalSize, ne := needle.CreateNeedleFromRequest(r, vs.FixJpgOrientation, vs.fileSizeLimitBytes)
+ reqNeedle, originalSize, ne := needle.CreateNeedleFromRequest(r, vs.FixJpgOrientation, vs.fileSizeLimitBytes)
if ne != nil {
writeJsonError(w, r, http.StatusBadRequest, ne)
return
}
ret := operation.UploadResult{}
- _, isUnchanged, writeError := topology.ReplicatedWrite(vs.GetMaster(), vs.store, volumeId, needle, r)
+ isUnchanged, writeError := topology.ReplicatedWrite(vs.GetMaster(), vs.store, volumeId, reqNeedle, r)
- // http 304 status code does not allow body
+ // http 204 status code does not allow body
if writeError == nil && isUnchanged {
- w.WriteHeader(http.StatusNotModified)
+ setEtag(w, reqNeedle.Etag())
+ w.WriteHeader(http.StatusNoContent)
return
}
@@ -63,11 +63,12 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) {
httpStatus = http.StatusInternalServerError
ret.Error = writeError.Error()
}
- if needle.HasName() {
- ret.Name = string(needle.Name)
+ if reqNeedle.HasName() {
+ ret.Name = string(reqNeedle.Name)
}
ret.Size = uint32(originalSize)
- ret.ETag = needle.Etag()
+ ret.ETag = reqNeedle.Etag()
+ ret.Mime = string(reqNeedle.Mime)
setEtag(w, ret.ETag)
writeJsonQuiet(w, r, httpStatus, ret)
}
@@ -97,7 +98,7 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
ecVolume, hasEcVolume := vs.store.FindEcVolume(volumeId)
if hasEcVolume {
- count, err := vs.store.DeleteEcShardNeedle(context.Background(), ecVolume, n, cookie)
+ count, err := vs.store.DeleteEcShardNeedle(ecVolume, n, cookie)
writeDeleteResult(err, count, w, r)
return
}
@@ -125,7 +126,7 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
return
}
// make sure all chunks had deleted before delete manifest
- if e := chunkManifest.DeleteChunks(vs.GetMaster(), vs.grpcDialOption); e != nil {
+ if e := chunkManifest.DeleteChunks(vs.GetMaster(), false, vs.grpcDialOption); e != nil {
writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Delete chunks error: %v", e))
return
}
@@ -165,3 +166,11 @@ func setEtag(w http.ResponseWriter, etag string) {
}
}
}
+
+func getEtag(resp *http.Response) (etag string) {
+ etag = resp.Header.Get("ETag")
+ if strings.HasPrefix(etag, "\"") && strings.HasSuffix(etag, "\"") {
+ return etag[1 : len(etag)-1]
+ }
+ return
+}
diff --git a/weed/server/volume_server_ui/templates.go b/weed/server/volume_server_ui/templates.go
index 81496b1de..8705bc088 100644
--- a/weed/server/volume_server_ui/templates.go
+++ b/weed/server/volume_server_ui/templates.go
@@ -1,11 +1,17 @@
package master_ui
import (
+ "fmt"
+ "github.com/chrislusf/seaweedfs/weed/util"
"html/template"
"strconv"
"strings"
)
+func percentFrom(total uint64, part_of uint64) string {
+ return fmt.Sprintf("%.2f", (float64(part_of)/float64(total))*100)
+}
+
func join(data []int64) string {
var ret []string
for _, d := range data {
@@ -15,7 +21,9 @@ func join(data []int64) string {
}
var funcMap = template.FuncMap{
- "join": join,
+ "join": join,
+ "bytesToHumanReadable": util.BytesToHumanReadable,
+ "percentFrom": percentFrom,
}
var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`
@@ -57,13 +65,25 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`
Disk Stats
-
+
+
+
+ Path
+ Total
+ Free
+ Usage
+
+
+
{{ range .DiskStatuses }}
- {{ .Dir }}
- {{ .Free }} Bytes Free
+ {{ .Dir }}
+ {{ bytesToHumanReadable .All }}
+ {{ bytesToHumanReadable .Free }}
+ {{ percentFrom .All .Used}}%
{{ end }}
+
@@ -119,9 +139,9 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`
{{ .Id }}
{{ .Collection }}
- {{ .Size }} Bytes
+ {{ bytesToHumanReadable .Size }}
{{ .FileCount }}
- {{ .DeleteCount }} / {{.DeletedByteCount}} Bytes
+ {{ .DeleteCount }} / {{bytesToHumanReadable .DeletedByteCount}}
{{ .Ttl }}
{{ .ReadOnly }}
@@ -149,9 +169,9 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`
{{ .Id }}
{{ .Collection }}
- {{ .Size }} Bytes
+ {{ bytesToHumanReadable .Size }}
{{ .FileCount }}
- {{ .DeleteCount }} / {{.DeletedByteCount}} Bytes
+ {{ .DeleteCount }} / {{bytesToHumanReadable .DeletedByteCount}}
{{ .RemoteStorageName }}
{{ .RemoteStorageKey }}
@@ -177,7 +197,7 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`
{{ .VolumeId }}
{{ .Collection }}
- {{ .ShardSize }} Bytes
+ {{ bytesToHumanReadable .ShardSize }}
{{ .ShardIdList }}
{{ .CreatedAt.Format "02 Jan 06 15:04 -0700" }}
diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go
index d75869f30..37c4afd5c 100644
--- a/weed/server/webdav_server.go
+++ b/weed/server/webdav_server.go
@@ -1,21 +1,24 @@
package weed_server
import (
- "bytes"
"context"
"fmt"
"io"
+ "math"
"os"
"path"
"strings"
"time"
+ "github.com/chrislusf/seaweedfs/weed/util/grace"
"golang.org/x/net/webdav"
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
+ "github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
@@ -31,6 +34,9 @@ type WebDavOption struct {
Collection string
Uid uint32
Gid uint32
+ Cipher bool
+ CacheDir string
+ CacheSizeMB int64
}
type WebDavServer struct {
@@ -64,6 +70,7 @@ type WebDavFileSystem struct {
secret security.SigningKey
filer *filer2.Filer
grpcDialOption grpc.DialOption
+ chunkCache *chunk_cache.ChunkCache
}
type FileInfo struct {
@@ -88,22 +95,34 @@ type WebDavFile struct {
off int64
entry *filer_pb.Entry
entryViewCache []filer2.VisibleInterval
+ reader io.ReaderAt
}
func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) {
+
+ chunkCache := chunk_cache.NewChunkCache(256, option.CacheDir, option.CacheSizeMB)
+ grace.OnInterrupt(func() {
+ chunkCache.Shutdown()
+ })
return &WebDavFileSystem{
- option: option,
+ option: option,
+ chunkCache: chunkCache,
}, nil
}
-func (fs *WebDavFileSystem) WithFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error {
+var _ = filer_pb.FilerClient(&WebDavFileSystem{})
- return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error {
+func (fs *WebDavFileSystem) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
+
+ return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
client := filer_pb.NewSeaweedFilerClient(grpcConnection)
- return fn(ctx2, client)
+ return fn(client)
}, fs.option.FilerGrpcAddress, fs.option.GrpcDialOption)
}
+func (fs *WebDavFileSystem) AdjustedUrl(hostAndPort string) string {
+ return hostAndPort
+}
func clearName(name string) (string, error) {
slashed := strings.HasSuffix(name, "/")
@@ -135,8 +154,8 @@ func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm
return os.ErrExist
}
- return fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
- dir, name := filer2.FullPath(fullDirPath).DirAndName()
+ return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ dir, name := util.FullPath(fullDirPath).DirAndName()
request := &filer_pb.CreateEntryRequest{
Directory: dir,
Entry: &filer_pb.Entry{
@@ -153,7 +172,7 @@ func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm
}
glog.V(1).Infof("mkdir: %v", request)
- if err := filer_pb.CreateEntry(ctx, client, request); err != nil {
+ if err := filer_pb.CreateEntry(client, request); err != nil {
return fmt.Errorf("mkdir %s/%s: %v", dir, name, err)
}
@@ -183,9 +202,9 @@ func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, f
fs.removeAll(ctx, fullFilePath)
}
- dir, name := filer2.FullPath(fullFilePath).DirAndName()
- err = fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
- if err := filer_pb.CreateEntry(ctx, client, &filer_pb.CreateEntryRequest{
+ dir, name := util.FullPath(fullFilePath).DirAndName()
+ err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+ if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{
Directory: dir,
Entry: &filer_pb.Entry{
Name: name,
@@ -238,34 +257,10 @@ func (fs *WebDavFileSystem) removeAll(ctx context.Context, fullFilePath string)
return err
}
- fi, err := fs.stat(ctx, fullFilePath)
- if err != nil {
- return err
- }
+ dir, name := util.FullPath(fullFilePath).DirAndName()
- if fi.IsDir() {
- //_, err = fs.db.Exec(`delete from filesystem where fullFilePath like $1 escape '\'`, strings.Replace(fullFilePath, `%`, `\%`, -1)+`%`)
- } else {
- //_, err = fs.db.Exec(`delete from filesystem where fullFilePath = ?`, fullFilePath)
- }
- dir, name := filer2.FullPath(fullFilePath).DirAndName()
- err = fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+ return filer_pb.Remove(fs, dir, name, true, false, false)
- request := &filer_pb.DeleteEntryRequest{
- Directory: dir,
- Name: name,
- IsDeleteData: true,
- }
-
- glog.V(3).Infof("removing entry: %v", request)
- _, err := client.DeleteEntry(ctx, request)
- if err != nil {
- return fmt.Errorf("remove %s: %v", fullFilePath, err)
- }
-
- return nil
- })
- return err
}
func (fs *WebDavFileSystem) RemoveAll(ctx context.Context, name string) error {
@@ -305,10 +300,10 @@ func (fs *WebDavFileSystem) Rename(ctx context.Context, oldName, newName string)
return os.ErrExist
}
- oldDir, oldBaseName := filer2.FullPath(oldName).DirAndName()
- newDir, newBaseName := filer2.FullPath(newName).DirAndName()
+ oldDir, oldBaseName := util.FullPath(oldName).DirAndName()
+ newDir, newBaseName := util.FullPath(newName).DirAndName()
- return fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+ return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AtomicRenameEntryRequest{
OldDirectory: oldDir,
@@ -333,10 +328,10 @@ func (fs *WebDavFileSystem) stat(ctx context.Context, fullFilePath string) (os.F
return nil, err
}
- fullpath := filer2.FullPath(fullFilePath)
+ fullpath := util.FullPath(fullFilePath)
var fi FileInfo
- entry, err := filer2.GetEntry(ctx, fs, fullpath)
+ entry, err := filer_pb.GetEntry(fs, fullpath)
if entry == nil {
return nil, os.ErrNotExist
}
@@ -367,10 +362,12 @@ func (f *WebDavFile) Write(buf []byte) (int, error) {
glog.V(2).Infof("WebDavFileSystem.Write %v", f.name)
+ dir, _ := util.FullPath(f.name).DirAndName()
+
var err error
ctx := context.Background()
if f.entry == nil {
- f.entry, err = filer2.GetEntry(ctx, f.fs, filer2.FullPath(f.name))
+ f.entry, err = filer_pb.GetEntry(f.fs, util.FullPath(f.name))
}
if f.entry == nil {
@@ -382,13 +379,15 @@ func (f *WebDavFile) Write(buf []byte) (int, error) {
var fileId, host string
var auth security.EncodedJwt
+ var collection, replication string
- if err = f.fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+ if err = f.fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
- Replication: "000",
+ Replication: "",
Collection: f.fs.option.Collection,
+ ParentPath: dir,
}
resp, err := client.AssignVolume(ctx, request)
@@ -396,8 +395,12 @@ func (f *WebDavFile) Write(buf []byte) (int, error) {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
return err
}
+ if resp.Error != "" {
+ return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
+ }
fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
+ collection, replication = resp.Collection, resp.Replication
return nil
}); err != nil {
@@ -405,8 +408,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) {
}
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
- bufReader := bytes.NewReader(buf)
- uploadResult, err := operation.Upload(fileUrl, f.name, bufReader, false, "", nil, auth)
+ uploadResult, err := operation.UploadData(fileUrl, f.name, f.fs.option.Cipher, buf, false, "", nil, auth)
if err != nil {
glog.V(0).Infof("upload data %v to %s: %v", f.name, fileUrl, err)
return 0, fmt.Errorf("upload data: %v", err)
@@ -416,19 +418,12 @@ func (f *WebDavFile) Write(buf []byte) (int, error) {
return 0, fmt.Errorf("upload result: %v", uploadResult.Error)
}
- chunk := &filer_pb.FileChunk{
- FileId: fileId,
- Offset: f.off,
- Size: uint64(len(buf)),
- Mtime: time.Now().UnixNano(),
- ETag: uploadResult.ETag,
- }
+ f.entry.Chunks = append(f.entry.Chunks, uploadResult.ToPbFileChunk(fileId, f.off))
- f.entry.Chunks = append(f.entry.Chunks, chunk)
- dir, _ := filer2.FullPath(f.name).DirAndName()
-
- err = f.fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+ err = f.fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
f.entry.Attributes.Mtime = time.Now().Unix()
+ f.entry.Attributes.Collection = collection
+ f.entry.Attributes.Replication = replication
request := &filer_pb.UpdateEntryRequest{
Directory: dir,
@@ -465,10 +460,9 @@ func (f *WebDavFile) Close() error {
func (f *WebDavFile) Read(p []byte) (readSize int, err error) {
glog.V(2).Infof("WebDavFileSystem.Read %v", f.name)
- ctx := context.Background()
if f.entry == nil {
- f.entry, err = filer2.GetEntry(ctx, f.fs, filer2.FullPath(f.name))
+ f.entry, err = filer_pb.GetEntry(f.fs, util.FullPath(f.name))
}
if f.entry == nil {
return 0, err
@@ -481,33 +475,33 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) {
}
if f.entryViewCache == nil {
f.entryViewCache = filer2.NonOverlappingVisibleIntervals(f.entry.Chunks)
+ f.reader = nil
+ }
+ if f.reader == nil {
+ chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, 0, math.MaxInt32)
+ f.reader = filer2.NewChunkReaderAtFromClient(f.fs, chunkViews, f.fs.chunkCache)
}
- chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, f.off, len(p))
- totalRead, err := filer2.ReadIntoBuffer(ctx, f.fs, filer2.FullPath(f.name), p, chunkViews, f.off)
+ readSize, err = f.reader.ReadAt(p, f.off)
+
+ glog.V(3).Infof("WebDavFileSystem.Read %v: [%d,%d)", f.name, f.off, f.off+int64(readSize))
+ f.off += int64(readSize)
+
if err != nil {
- return 0, err
- }
- readSize = int(totalRead)
-
- glog.V(3).Infof("WebDavFileSystem.Read %v: [%d,%d)", f.name, f.off, f.off+totalRead)
-
- f.off += totalRead
- if readSize == 0 {
- return 0, io.EOF
+ glog.Errorf("file read %s: %v", f.name, err)
}
return
+
}
func (f *WebDavFile) Readdir(count int) (ret []os.FileInfo, err error) {
glog.V(2).Infof("WebDavFileSystem.Readdir %v count %d", f.name, count)
- ctx := context.Background()
- dir, _ := filer2.FullPath(f.name).DirAndName()
+ dir, _ := util.FullPath(f.name).DirAndName()
- err = filer2.ReadDirAllEntries(ctx, f.fs, filer2.FullPath(dir), "", func(entry *filer_pb.Entry, isLast bool) {
+ err = filer_pb.ReadDirAllEntries(f.fs, util.FullPath(dir), "", func(entry *filer_pb.Entry, isLast bool) error {
fi := FileInfo{
size: int64(filer2.TotalSize(entry.GetChunks())),
name: entry.Name,
@@ -521,6 +515,7 @@ func (f *WebDavFile) Readdir(count int) (ret []os.FileInfo, err error) {
}
glog.V(4).Infof("entry: %v", fi.name)
ret = append(ret, &fi)
+ return nil
})
old := f.off
diff --git a/weed/shell/command_bucket_create.go b/weed/shell/command_bucket_create.go
new file mode 100644
index 000000000..52d96e4c3
--- /dev/null
+++ b/weed/shell/command_bucket_create.go
@@ -0,0 +1,83 @@
+package shell
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+)
+
+func init() {
+ Commands = append(Commands, &commandBucketCreate{})
+}
+
+type commandBucketCreate struct {
+}
+
+func (c *commandBucketCreate) Name() string {
+ return "bucket.create"
+}
+
+func (c *commandBucketCreate) Help() string {
+ return `create a bucket with a given name
+
+ Example:
+ bucket.create -name -replication 001
+`
+}
+
+func (c *commandBucketCreate) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ bucketName := bucketCommand.String("name", "", "bucket name")
+ replication := bucketCommand.String("replication", "", "replication setting for the bucket")
+ if err = bucketCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ if *bucketName == "" {
+ return fmt.Errorf("empty bucket name")
+ }
+
+ err = commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get filer configuration: %v", err)
+ }
+ filerBucketsPath := resp.DirBuckets
+
+ println("create bucket under", filerBucketsPath)
+
+ entry := &filer_pb.Entry{
+ Name: *bucketName,
+ IsDirectory: true,
+ Attributes: &filer_pb.FuseAttributes{
+ Mtime: time.Now().Unix(),
+ Crtime: time.Now().Unix(),
+ FileMode: uint32(0777 | os.ModeDir),
+ Collection: *bucketName,
+ Replication: *replication,
+ },
+ }
+
+ if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{
+ Directory: filerBucketsPath,
+ Entry: entry,
+ }); err != nil {
+ return err
+ }
+
+ println("created bucket", *bucketName)
+
+ return nil
+
+ })
+
+ return err
+
+}
diff --git a/weed/shell/command_bucket_delete.go b/weed/shell/command_bucket_delete.go
new file mode 100644
index 000000000..07c2e74ac
--- /dev/null
+++ b/weed/shell/command_bucket_delete.go
@@ -0,0 +1,54 @@
+package shell
+
+import (
+ "flag"
+ "fmt"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+)
+
+func init() {
+ Commands = append(Commands, &commandBucketDelete{})
+}
+
+type commandBucketDelete struct {
+}
+
+func (c *commandBucketDelete) Name() string {
+ return "bucket.delete"
+}
+
+func (c *commandBucketDelete) Help() string {
+ return `delete a bucket by a given name
+
+ bucket.delete -name
+`
+}
+
+func (c *commandBucketDelete) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ bucketName := bucketCommand.String("name", "", "bucket name")
+ if err = bucketCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ if *bucketName == "" {
+ return fmt.Errorf("empty bucket name")
+ }
+
+ _, parseErr := commandEnv.parseUrl(findInputDirectory(bucketCommand.Args()))
+ if parseErr != nil {
+ return parseErr
+ }
+
+ var filerBucketsPath string
+ filerBucketsPath, err = readFilerBucketsPath(commandEnv)
+ if err != nil {
+ return fmt.Errorf("read buckets: %v", err)
+ }
+
+ return filer_pb.Remove(commandEnv, filerBucketsPath, *bucketName, false, true, true)
+
+}
diff --git a/weed/shell/command_bucket_list.go b/weed/shell/command_bucket_list.go
new file mode 100644
index 000000000..2e446b6b2
--- /dev/null
+++ b/weed/shell/command_bucket_list.go
@@ -0,0 +1,78 @@
+package shell
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "math"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+)
+
+func init() {
+ Commands = append(Commands, &commandBucketList{})
+}
+
+type commandBucketList struct {
+}
+
+func (c *commandBucketList) Name() string {
+ return "bucket.list"
+}
+
+func (c *commandBucketList) Help() string {
+ return `list all buckets
+
+`
+}
+
+func (c *commandBucketList) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ if err = bucketCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ _, parseErr := commandEnv.parseUrl(findInputDirectory(bucketCommand.Args()))
+ if parseErr != nil {
+ return parseErr
+ }
+
+ var filerBucketsPath string
+ filerBucketsPath, err = readFilerBucketsPath(commandEnv)
+ if err != nil {
+ return fmt.Errorf("read buckets: %v", err)
+ }
+
+ err = filer_pb.List(commandEnv, filerBucketsPath, "", func(entry *filer_pb.Entry, isLast bool) error {
+ if entry.Attributes.Replication == "" || entry.Attributes.Replication == "000" {
+ fmt.Fprintf(writer, " %s\n", entry.Name)
+ } else {
+ fmt.Fprintf(writer, " %s\t\t\treplication: %s\n", entry.Name, entry.Attributes.Replication)
+ }
+ return nil
+ }, "", false, math.MaxUint32)
+ if err != nil {
+ return fmt.Errorf("list buckets under %v: %v", filerBucketsPath, err)
+ }
+
+ return err
+
+}
+
+func readFilerBucketsPath(filerClient filer_pb.FilerClient) (filerBucketsPath string, err error) {
+ err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+
+ resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+ if err != nil {
+ return fmt.Errorf("get filer configuration: %v", err)
+ }
+ filerBucketsPath = resp.DirBuckets
+
+ return nil
+
+ })
+
+ return filerBucketsPath, err
+}
diff --git a/weed/shell/command_collection_delete.go b/weed/shell/command_collection_delete.go
index fbaddcd51..4b3d7f0be 100644
--- a/weed/shell/command_collection_delete.go
+++ b/weed/shell/command_collection_delete.go
@@ -34,9 +34,8 @@ func (c *commandCollectionDelete) Do(args []string, commandEnv *CommandEnv, writ
collectionName := args[0]
- ctx := context.Background()
- err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error {
- _, err = client.CollectionDelete(ctx, &master_pb.CollectionDeleteRequest{
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ _, err = client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{
Name: collectionName,
})
return err
diff --git a/weed/shell/command_collection_list.go b/weed/shell/command_collection_list.go
index c4325c66f..2a114e61b 100644
--- a/weed/shell/command_collection_list.go
+++ b/weed/shell/command_collection_list.go
@@ -41,9 +41,8 @@ func (c *commandCollectionList) Do(args []string, commandEnv *CommandEnv, writer
func ListCollectionNames(commandEnv *CommandEnv, includeNormalVolumes, includeEcVolumes bool) (collections []string, err error) {
var resp *master_pb.CollectionListResponse
- ctx := context.Background()
- err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error {
- resp, err = client.CollectionList(ctx, &master_pb.CollectionListRequest{
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err = client.CollectionList(context.Background(), &master_pb.CollectionListRequest{
IncludeNormalVolumes: includeNormalVolumes,
IncludeEcVolumes: includeEcVolumes,
})
diff --git a/weed/shell/command_ec_balance.go b/weed/shell/command_ec_balance.go
index 96599372e..1ddb6a490 100644
--- a/weed/shell/command_ec_balance.go
+++ b/weed/shell/command_ec_balance.go
@@ -1,7 +1,6 @@
package shell
import (
- "context"
"flag"
"fmt"
"io"
@@ -99,6 +98,10 @@ func (c *commandEcBalance) Help() string {
func (c *commandEcBalance) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
balanceCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
collection := balanceCommand.String("collection", "EACH_COLLECTION", "collection name, or \"EACH_COLLECTION\" for each collection")
dc := balanceCommand.String("dataCenter", "", "only apply the balancing for this dataCenter")
@@ -107,10 +110,8 @@ func (c *commandEcBalance) Do(args []string, commandEnv *CommandEnv, writer io.W
return nil
}
- ctx := context.Background()
-
// collect all ec nodes
- allEcNodes, totalFreeEcSlots, err := collectEcNodes(ctx, commandEnv, *dc)
+ allEcNodes, totalFreeEcSlots, err := collectEcNodes(commandEnv, *dc)
if err != nil {
return err
}
@@ -138,7 +139,7 @@ func (c *commandEcBalance) Do(args []string, commandEnv *CommandEnv, writer io.W
}
}
- if err := balanceEcRacks(ctx, commandEnv, racks, *applyBalancing); err != nil {
+ if err := balanceEcRacks(commandEnv, racks, *applyBalancing); err != nil {
return fmt.Errorf("balance ec racks: %v", err)
}
@@ -162,38 +163,36 @@ func collectRacks(allEcNodes []*EcNode) map[RackId]*EcRack {
func balanceEcVolumes(commandEnv *CommandEnv, collection string, allEcNodes []*EcNode, racks map[RackId]*EcRack, applyBalancing bool) error {
- ctx := context.Background()
-
fmt.Printf("balanceEcVolumes %s\n", collection)
- if err := deleteDuplicatedEcShards(ctx, commandEnv, allEcNodes, collection, applyBalancing); err != nil {
+ if err := deleteDuplicatedEcShards(commandEnv, allEcNodes, collection, applyBalancing); err != nil {
return fmt.Errorf("delete duplicated collection %s ec shards: %v", collection, err)
}
- if err := balanceEcShardsAcrossRacks(ctx, commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil {
+ if err := balanceEcShardsAcrossRacks(commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil {
return fmt.Errorf("balance across racks collection %s ec shards: %v", collection, err)
}
- if err := balanceEcShardsWithinRacks(ctx, commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil {
+ if err := balanceEcShardsWithinRacks(commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil {
return fmt.Errorf("balance across racks collection %s ec shards: %v", collection, err)
}
return nil
}
-func deleteDuplicatedEcShards(ctx context.Context, commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, applyBalancing bool) error {
+func deleteDuplicatedEcShards(commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, applyBalancing bool) error {
// vid => []ecNode
vidLocations := collectVolumeIdToEcNodes(allEcNodes)
// deduplicate ec shards
for vid, locations := range vidLocations {
- if err := doDeduplicateEcShards(ctx, commandEnv, collection, vid, locations, applyBalancing); err != nil {
+ if err := doDeduplicateEcShards(commandEnv, collection, vid, locations, applyBalancing); err != nil {
return err
}
}
return nil
}
-func doDeduplicateEcShards(ctx context.Context, commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, applyBalancing bool) error {
+func doDeduplicateEcShards(commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, applyBalancing bool) error {
// check whether this volume has ecNodes that are over average
shardToLocations := make([][]*EcNode, erasure_coding.TotalShardsCount)
@@ -215,10 +214,10 @@ func doDeduplicateEcShards(ctx context.Context, commandEnv *CommandEnv, collecti
duplicatedShardIds := []uint32{uint32(shardId)}
for _, ecNode := range ecNodes[1:] {
- if err := unmountEcShards(ctx, commandEnv.option.GrpcDialOption, vid, ecNode.info.Id, duplicatedShardIds); err != nil {
+ if err := unmountEcShards(commandEnv.option.GrpcDialOption, vid, ecNode.info.Id, duplicatedShardIds); err != nil {
return err
}
- if err := sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, vid, ecNode.info.Id, duplicatedShardIds); err != nil {
+ if err := sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, vid, ecNode.info.Id, duplicatedShardIds); err != nil {
return err
}
ecNode.deleteEcVolumeShards(vid, duplicatedShardIds)
@@ -227,19 +226,19 @@ func doDeduplicateEcShards(ctx context.Context, commandEnv *CommandEnv, collecti
return nil
}
-func balanceEcShardsAcrossRacks(ctx context.Context, commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error {
+func balanceEcShardsAcrossRacks(commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error {
// collect vid => []ecNode, since previous steps can change the locations
vidLocations := collectVolumeIdToEcNodes(allEcNodes)
// spread the ec shards evenly
for vid, locations := range vidLocations {
- if err := doBalanceEcShardsAcrossRacks(ctx, commandEnv, collection, vid, locations, racks, applyBalancing); err != nil {
+ if err := doBalanceEcShardsAcrossRacks(commandEnv, collection, vid, locations, racks, applyBalancing); err != nil {
return err
}
}
return nil
}
-func doBalanceEcShardsAcrossRacks(ctx context.Context, commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, racks map[RackId]*EcRack, applyBalancing bool) error {
+func doBalanceEcShardsAcrossRacks(commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, racks map[RackId]*EcRack, applyBalancing bool) error {
// calculate average number of shards an ec rack should have for one volume
averageShardsPerEcRack := ceilDivide(erasure_coding.TotalShardsCount, len(racks))
@@ -274,7 +273,7 @@ func doBalanceEcShardsAcrossRacks(ctx context.Context, commandEnv *CommandEnv, c
for _, n := range racks[rackId].ecNodes {
possibleDestinationEcNodes = append(possibleDestinationEcNodes, n)
}
- err := pickOneEcNodeAndMoveOneShard(ctx, commandEnv, averageShardsPerEcRack, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing)
+ err := pickOneEcNodeAndMoveOneShard(commandEnv, averageShardsPerEcRack, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing)
if err != nil {
return err
}
@@ -306,7 +305,7 @@ func pickOneRack(rackToEcNodes map[RackId]*EcRack, rackToShardCount map[string]i
return ""
}
-func balanceEcShardsWithinRacks(ctx context.Context, commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error {
+func balanceEcShardsWithinRacks(commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error {
// collect vid => []ecNode, since previous steps can change the locations
vidLocations := collectVolumeIdToEcNodes(allEcNodes)
@@ -330,7 +329,7 @@ func balanceEcShardsWithinRacks(ctx context.Context, commandEnv *CommandEnv, all
}
sourceEcNodes := rackEcNodesWithVid[rackId]
averageShardsPerEcNode := ceilDivide(rackToShardCount[rackId], len(possibleDestinationEcNodes))
- if err := doBalanceEcShardsWithinOneRack(ctx, commandEnv, averageShardsPerEcNode, collection, vid, sourceEcNodes, possibleDestinationEcNodes, applyBalancing); err != nil {
+ if err := doBalanceEcShardsWithinOneRack(commandEnv, averageShardsPerEcNode, collection, vid, sourceEcNodes, possibleDestinationEcNodes, applyBalancing); err != nil {
return err
}
}
@@ -338,7 +337,7 @@ func balanceEcShardsWithinRacks(ctx context.Context, commandEnv *CommandEnv, all
return nil
}
-func doBalanceEcShardsWithinOneRack(ctx context.Context, commandEnv *CommandEnv, averageShardsPerEcNode int, collection string, vid needle.VolumeId, existingLocations, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error {
+func doBalanceEcShardsWithinOneRack(commandEnv *CommandEnv, averageShardsPerEcNode int, collection string, vid needle.VolumeId, existingLocations, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error {
for _, ecNode := range existingLocations {
@@ -353,7 +352,7 @@ func doBalanceEcShardsWithinOneRack(ctx context.Context, commandEnv *CommandEnv,
fmt.Printf("%s has %d overlimit, moving ec shard %d.%d\n", ecNode.info.Id, overLimitCount, vid, shardId)
- err := pickOneEcNodeAndMoveOneShard(ctx, commandEnv, averageShardsPerEcNode, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing)
+ err := pickOneEcNodeAndMoveOneShard(commandEnv, averageShardsPerEcNode, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing)
if err != nil {
return err
}
@@ -365,18 +364,18 @@ func doBalanceEcShardsWithinOneRack(ctx context.Context, commandEnv *CommandEnv,
return nil
}
-func balanceEcRacks(ctx context.Context, commandEnv *CommandEnv, racks map[RackId]*EcRack, applyBalancing bool) error {
+func balanceEcRacks(commandEnv *CommandEnv, racks map[RackId]*EcRack, applyBalancing bool) error {
// balance one rack for all ec shards
for _, ecRack := range racks {
- if err := doBalanceEcRack(ctx, commandEnv, ecRack, applyBalancing); err != nil {
+ if err := doBalanceEcRack(commandEnv, ecRack, applyBalancing); err != nil {
return err
}
}
return nil
}
-func doBalanceEcRack(ctx context.Context, commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool) error {
+func doBalanceEcRack(commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool) error {
if len(ecRack.ecNodes) <= 1 {
return nil
@@ -421,7 +420,7 @@ func doBalanceEcRack(ctx context.Context, commandEnv *CommandEnv, ecRack *EcRack
fmt.Printf("%s moves ec shards %d.%d to %s\n", fullNode.info.Id, shards.Id, shardId, emptyNode.info.Id)
- err := moveMountedShardToEcNode(ctx, commandEnv, fullNode, shards.Collection, needle.VolumeId(shards.Id), shardId, emptyNode, applyBalancing)
+ err := moveMountedShardToEcNode(commandEnv, fullNode, shards.Collection, needle.VolumeId(shards.Id), shardId, emptyNode, applyBalancing)
if err != nil {
return err
}
@@ -440,7 +439,7 @@ func doBalanceEcRack(ctx context.Context, commandEnv *CommandEnv, ecRack *EcRack
return nil
}
-func pickOneEcNodeAndMoveOneShard(ctx context.Context, commandEnv *CommandEnv, averageShardsPerEcNode int, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error {
+func pickOneEcNodeAndMoveOneShard(commandEnv *CommandEnv, averageShardsPerEcNode int, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error {
sortEcNodesByFreeslotsDecending(possibleDestinationEcNodes)
@@ -458,7 +457,7 @@ func pickOneEcNodeAndMoveOneShard(ctx context.Context, commandEnv *CommandEnv, a
fmt.Printf("%s moves ec shard %d.%d to %s\n", existingLocation.info.Id, vid, shardId, destEcNode.info.Id)
- err := moveMountedShardToEcNode(ctx, commandEnv, existingLocation, collection, vid, shardId, destEcNode, applyBalancing)
+ err := moveMountedShardToEcNode(commandEnv, existingLocation, collection, vid, shardId, destEcNode, applyBalancing)
if err != nil {
return err
}
diff --git a/weed/shell/command_ec_common.go b/weed/shell/command_ec_common.go
index e187d5a3b..0db119d3c 100644
--- a/weed/shell/command_ec_common.go
+++ b/weed/shell/command_ec_common.go
@@ -15,26 +15,26 @@ import (
"google.golang.org/grpc"
)
-func moveMountedShardToEcNode(ctx context.Context, commandEnv *CommandEnv, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, destinationEcNode *EcNode, applyBalancing bool) (err error) {
+func moveMountedShardToEcNode(commandEnv *CommandEnv, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, destinationEcNode *EcNode, applyBalancing bool) (err error) {
copiedShardIds := []uint32{uint32(shardId)}
if applyBalancing {
// ask destination node to copy shard and the ecx file from source node, and mount it
- copiedShardIds, err = oneServerCopyAndMountEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, destinationEcNode, []uint32{uint32(shardId)}, vid, collection, existingLocation.info.Id)
+ copiedShardIds, err = oneServerCopyAndMountEcShardsFromSource(commandEnv.option.GrpcDialOption, destinationEcNode, []uint32{uint32(shardId)}, vid, collection, existingLocation.info.Id)
if err != nil {
return err
}
// unmount the to be deleted shards
- err = unmountEcShards(ctx, commandEnv.option.GrpcDialOption, vid, existingLocation.info.Id, copiedShardIds)
+ err = unmountEcShards(commandEnv.option.GrpcDialOption, vid, existingLocation.info.Id, copiedShardIds)
if err != nil {
return err
}
// ask source node to delete the shard, and maybe the ecx file
- err = sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, vid, existingLocation.info.Id, copiedShardIds)
+ err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, vid, existingLocation.info.Id, copiedShardIds)
if err != nil {
return err
}
@@ -50,18 +50,18 @@ func moveMountedShardToEcNode(ctx context.Context, commandEnv *CommandEnv, exist
}
-func oneServerCopyAndMountEcShardsFromSource(ctx context.Context, grpcDialOption grpc.DialOption,
+func oneServerCopyAndMountEcShardsFromSource(grpcDialOption grpc.DialOption,
targetServer *EcNode, shardIdsToCopy []uint32,
volumeId needle.VolumeId, collection string, existingLocation string) (copiedShardIds []uint32, err error) {
fmt.Printf("allocate %d.%v %s => %s\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id)
- err = operation.WithVolumeServerClient(targetServer.info.Id, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
+ err = operation.WithVolumeServerClient(targetServer.info.Id, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
if targetServer.info.Id != existingLocation {
fmt.Printf("copy %d.%v %s => %s\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id)
- _, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{
+ _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{
VolumeId: uint32(volumeId),
Collection: collection,
ShardIds: shardIdsToCopy,
@@ -76,7 +76,7 @@ func oneServerCopyAndMountEcShardsFromSource(ctx context.Context, grpcDialOption
}
fmt.Printf("mount %d.%v on %s\n", volumeId, shardIdsToCopy, targetServer.info.Id)
- _, mountErr := volumeServerClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
+ _, mountErr := volumeServerClient.VolumeEcShardsMount(context.Background(), &volume_server_pb.VolumeEcShardsMountRequest{
VolumeId: uint32(volumeId),
Collection: collection,
ShardIds: shardIdsToCopy,
@@ -178,12 +178,12 @@ type EcRack struct {
freeEcSlot int
}
-func collectEcNodes(ctx context.Context, commandEnv *CommandEnv, selectedDataCenter string) (ecNodes []*EcNode, totalFreeEcSlots int, err error) {
+func collectEcNodes(commandEnv *CommandEnv, selectedDataCenter string) (ecNodes []*EcNode, totalFreeEcSlots int, err error) {
// list all possible locations
var resp *master_pb.VolumeListResponse
- err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error {
- resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{})
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
return err
})
if err != nil {
@@ -211,13 +211,12 @@ func collectEcNodes(ctx context.Context, commandEnv *CommandEnv, selectedDataCen
return
}
-func sourceServerDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOption,
- collection string, volumeId needle.VolumeId, sourceLocation string, toBeDeletedShardIds []uint32) error {
+func sourceServerDeleteEcShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string, toBeDeletedShardIds []uint32) error {
fmt.Printf("delete %d.%v from %s\n", volumeId, toBeDeletedShardIds, sourceLocation)
- return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, deleteErr := volumeServerClient.VolumeEcShardsDelete(ctx, &volume_server_pb.VolumeEcShardsDeleteRequest{
+ return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, deleteErr := volumeServerClient.VolumeEcShardsDelete(context.Background(), &volume_server_pb.VolumeEcShardsDeleteRequest{
VolumeId: uint32(volumeId),
Collection: collection,
ShardIds: toBeDeletedShardIds,
@@ -227,13 +226,12 @@ func sourceServerDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOpt
}
-func unmountEcShards(ctx context.Context, grpcDialOption grpc.DialOption,
- volumeId needle.VolumeId, sourceLocation string, toBeUnmountedhardIds []uint32) error {
+func unmountEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceLocation string, toBeUnmountedhardIds []uint32) error {
fmt.Printf("unmount %d.%v from %s\n", volumeId, toBeUnmountedhardIds, sourceLocation)
- return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, deleteErr := volumeServerClient.VolumeEcShardsUnmount(ctx, &volume_server_pb.VolumeEcShardsUnmountRequest{
+ return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, deleteErr := volumeServerClient.VolumeEcShardsUnmount(context.Background(), &volume_server_pb.VolumeEcShardsUnmountRequest{
VolumeId: uint32(volumeId),
ShardIds: toBeUnmountedhardIds,
})
@@ -241,13 +239,12 @@ func unmountEcShards(ctx context.Context, grpcDialOption grpc.DialOption,
})
}
-func mountEcShards(ctx context.Context, grpcDialOption grpc.DialOption,
- collection string, volumeId needle.VolumeId, sourceLocation string, toBeMountedhardIds []uint32) error {
+func mountEcShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string, toBeMountedhardIds []uint32) error {
fmt.Printf("mount %d.%v on %s\n", volumeId, toBeMountedhardIds, sourceLocation)
- return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, mountErr := volumeServerClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
+ return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, mountErr := volumeServerClient.VolumeEcShardsMount(context.Background(), &volume_server_pb.VolumeEcShardsMountRequest{
VolumeId: uint32(volumeId),
Collection: collection,
ShardIds: toBeMountedhardIds,
diff --git a/weed/shell/command_ec_decode.go b/weed/shell/command_ec_decode.go
index 8a705a5ae..5f03df58c 100644
--- a/weed/shell/command_ec_decode.go
+++ b/weed/shell/command_ec_decode.go
@@ -36,6 +36,10 @@ func (c *commandEcDecode) Help() string {
func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
encodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
volumeId := encodeCommand.Int("volumeId", 0, "the volume id")
collection := encodeCommand.String("collection", "", "the collection name")
@@ -43,25 +47,24 @@ func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
return nil
}
- ctx := context.Background()
vid := needle.VolumeId(*volumeId)
// collect topology information
- topologyInfo, err := collectTopologyInfo(ctx, commandEnv)
+ topologyInfo, err := collectTopologyInfo(commandEnv)
if err != nil {
return err
}
// volumeId is provided
if vid != 0 {
- return doEcDecode(ctx, commandEnv, topologyInfo, *collection, vid)
+ return doEcDecode(commandEnv, topologyInfo, *collection, vid)
}
// apply to all volumes in the collection
volumeIds := collectEcShardIds(topologyInfo, *collection)
fmt.Printf("ec encode volumes: %v\n", volumeIds)
for _, vid := range volumeIds {
- if err = doEcDecode(ctx, commandEnv, topologyInfo, *collection, vid); err != nil {
+ if err = doEcDecode(commandEnv, topologyInfo, *collection, vid); err != nil {
return err
}
}
@@ -69,26 +72,26 @@ func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
return nil
}
-func doEcDecode(ctx context.Context, commandEnv *CommandEnv, topoInfo *master_pb.TopologyInfo, collection string, vid needle.VolumeId) (err error) {
+func doEcDecode(commandEnv *CommandEnv, topoInfo *master_pb.TopologyInfo, collection string, vid needle.VolumeId) (err error) {
// find volume location
nodeToEcIndexBits := collectEcNodeShardBits(topoInfo, vid)
fmt.Printf("ec volume %d shard locations: %+v\n", vid, nodeToEcIndexBits)
// collect ec shards to the server with most space
- targetNodeLocation, err := collectEcShards(ctx, commandEnv, nodeToEcIndexBits, collection, vid)
+ targetNodeLocation, err := collectEcShards(commandEnv, nodeToEcIndexBits, collection, vid)
if err != nil {
return fmt.Errorf("collectEcShards for volume %d: %v", vid, err)
}
// generate a normal volume
- err = generateNormalVolume(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, targetNodeLocation)
+ err = generateNormalVolume(commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, targetNodeLocation)
if err != nil {
return fmt.Errorf("generate normal volume %d on %s: %v", vid, targetNodeLocation, err)
}
// delete the previous ec shards
- err = mountVolumeAndDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, targetNodeLocation, nodeToEcIndexBits, vid)
+ err = mountVolumeAndDeleteEcShards(commandEnv.option.GrpcDialOption, collection, targetNodeLocation, nodeToEcIndexBits, vid)
if err != nil {
return fmt.Errorf("delete ec shards for volume %d: %v", vid, err)
}
@@ -96,11 +99,11 @@ func doEcDecode(ctx context.Context, commandEnv *CommandEnv, topoInfo *master_pb
return nil
}
-func mountVolumeAndDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOption, collection, targetNodeLocation string, nodeToEcIndexBits map[string]erasure_coding.ShardBits, vid needle.VolumeId) error {
+func mountVolumeAndDeleteEcShards(grpcDialOption grpc.DialOption, collection, targetNodeLocation string, nodeToEcIndexBits map[string]erasure_coding.ShardBits, vid needle.VolumeId) error {
// mount volume
- if err := operation.WithVolumeServerClient(targetNodeLocation, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, mountErr := volumeServerClient.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{
+ if err := operation.WithVolumeServerClient(targetNodeLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{
VolumeId: uint32(vid),
})
return mountErr
@@ -111,7 +114,7 @@ func mountVolumeAndDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialO
// unmount ec shards
for location, ecIndexBits := range nodeToEcIndexBits {
fmt.Printf("unmount ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds())
- err := unmountEcShards(ctx, grpcDialOption, vid, location, ecIndexBits.ToUint32Slice())
+ err := unmountEcShards(grpcDialOption, vid, location, ecIndexBits.ToUint32Slice())
if err != nil {
return fmt.Errorf("mountVolumeAndDeleteEcShards unmount ec volume %d on %s: %v", vid, location, err)
}
@@ -119,7 +122,7 @@ func mountVolumeAndDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialO
// delete ec shards
for location, ecIndexBits := range nodeToEcIndexBits {
fmt.Printf("delete ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds())
- err := sourceServerDeleteEcShards(ctx, grpcDialOption, collection, vid, location, ecIndexBits.ToUint32Slice())
+ err := sourceServerDeleteEcShards(grpcDialOption, collection, vid, location, ecIndexBits.ToUint32Slice())
if err != nil {
return fmt.Errorf("mountVolumeAndDeleteEcShards delete ec volume %d on %s: %v", vid, location, err)
}
@@ -128,12 +131,12 @@ func mountVolumeAndDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialO
return nil
}
-func generateNormalVolume(ctx context.Context, grpcDialOption grpc.DialOption, vid needle.VolumeId, collection string, sourceVolumeServer string) error {
+func generateNormalVolume(grpcDialOption grpc.DialOption, vid needle.VolumeId, collection string, sourceVolumeServer string) error {
fmt.Printf("generateNormalVolume from ec volume %d on %s\n", vid, sourceVolumeServer)
- err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, genErr := volumeServerClient.VolumeEcShardsToVolume(ctx, &volume_server_pb.VolumeEcShardsToVolumeRequest{
+ err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, genErr := volumeServerClient.VolumeEcShardsToVolume(context.Background(), &volume_server_pb.VolumeEcShardsToVolumeRequest{
VolumeId: uint32(vid),
Collection: collection,
})
@@ -144,7 +147,7 @@ func generateNormalVolume(ctx context.Context, grpcDialOption grpc.DialOption, v
}
-func collectEcShards(ctx context.Context, commandEnv *CommandEnv, nodeToEcIndexBits map[string]erasure_coding.ShardBits, collection string, vid needle.VolumeId) (targetNodeLocation string, err error) {
+func collectEcShards(commandEnv *CommandEnv, nodeToEcIndexBits map[string]erasure_coding.ShardBits, collection string, vid needle.VolumeId) (targetNodeLocation string, err error) {
maxShardCount := 0
var exisitngEcIndexBits erasure_coding.ShardBits
@@ -170,11 +173,11 @@ func collectEcShards(ctx context.Context, commandEnv *CommandEnv, nodeToEcIndexB
continue
}
- err = operation.WithVolumeServerClient(targetNodeLocation, commandEnv.option.GrpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
+ err = operation.WithVolumeServerClient(targetNodeLocation, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
fmt.Printf("copy %d.%v %s => %s\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation)
- _, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{
+ _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{
VolumeId: uint32(vid),
Collection: collection,
ShardIds: needToCopyEcIndexBits.ToUint32Slice(),
@@ -204,11 +207,11 @@ func collectEcShards(ctx context.Context, commandEnv *CommandEnv, nodeToEcIndexB
}
-func collectTopologyInfo(ctx context.Context, commandEnv *CommandEnv) (topoInfo *master_pb.TopologyInfo, err error) {
+func collectTopologyInfo(commandEnv *CommandEnv) (topoInfo *master_pb.TopologyInfo, err error) {
var resp *master_pb.VolumeListResponse
- err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error {
- resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{})
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
return err
})
if err != nil {
diff --git a/weed/shell/command_ec_encode.go b/weed/shell/command_ec_encode.go
index 587b59388..165809d05 100644
--- a/weed/shell/command_ec_encode.go
+++ b/weed/shell/command_ec_encode.go
@@ -54,6 +54,10 @@ func (c *commandEcEncode) Help() string {
func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
encodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
volumeId := encodeCommand.Int("volumeId", 0, "the volume id")
collection := encodeCommand.String("collection", "", "the collection name")
@@ -63,22 +67,21 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
return nil
}
- ctx := context.Background()
vid := needle.VolumeId(*volumeId)
// volumeId is provided
if vid != 0 {
- return doEcEncode(ctx, commandEnv, *collection, vid)
+ return doEcEncode(commandEnv, *collection, vid)
}
// apply to all volumes in the collection
- volumeIds, err := collectVolumeIdsForEcEncode(ctx, commandEnv, *collection, *fullPercentage, *quietPeriod)
+ volumeIds, err := collectVolumeIdsForEcEncode(commandEnv, *collection, *fullPercentage, *quietPeriod)
if err != nil {
return err
}
fmt.Printf("ec encode volumes: %v\n", volumeIds)
for _, vid := range volumeIds {
- if err = doEcEncode(ctx, commandEnv, *collection, vid); err != nil {
+ if err = doEcEncode(commandEnv, *collection, vid); err != nil {
return err
}
}
@@ -86,7 +89,7 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
return nil
}
-func doEcEncode(ctx context.Context, commandEnv *CommandEnv, collection string, vid needle.VolumeId) (err error) {
+func doEcEncode(commandEnv *CommandEnv, collection string, vid needle.VolumeId) (err error) {
// find volume location
locations, found := commandEnv.MasterClient.GetLocations(uint32(vid))
if !found {
@@ -96,19 +99,19 @@ func doEcEncode(ctx context.Context, commandEnv *CommandEnv, collection string,
// fmt.Printf("found ec %d shards on %v\n", vid, locations)
// mark the volume as readonly
- err = markVolumeReadonly(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations)
+ err = markVolumeReadonly(commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations)
if err != nil {
return fmt.Errorf("mark volume %d as readonly on %s: %v", vid, locations[0].Url, err)
}
// generate ec shards
- err = generateEcShards(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, locations[0].Url)
+ err = generateEcShards(commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, locations[0].Url)
if err != nil {
return fmt.Errorf("generate ec shards for volume %d on %s: %v", vid, locations[0].Url, err)
}
// balance the ec shards to current cluster
- err = spreadEcShards(ctx, commandEnv, vid, collection, locations)
+ err = spreadEcShards(commandEnv, vid, collection, locations)
if err != nil {
return fmt.Errorf("spread ec shards for volume %d from %s: %v", vid, locations[0].Url, err)
}
@@ -116,12 +119,12 @@ func doEcEncode(ctx context.Context, commandEnv *CommandEnv, collection string,
return nil
}
-func markVolumeReadonly(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, locations []wdclient.Location) error {
+func markVolumeReadonly(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, locations []wdclient.Location) error {
for _, location := range locations {
- err := operation.WithVolumeServerClient(location.Url, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, markErr := volumeServerClient.VolumeMarkReadonly(ctx, &volume_server_pb.VolumeMarkReadonlyRequest{
+ err := operation.WithVolumeServerClient(location.Url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, markErr := volumeServerClient.VolumeMarkReadonly(context.Background(), &volume_server_pb.VolumeMarkReadonlyRequest{
VolumeId: uint32(volumeId),
})
return markErr
@@ -136,10 +139,10 @@ func markVolumeReadonly(ctx context.Context, grpcDialOption grpc.DialOption, vol
return nil
}
-func generateEcShards(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, collection string, sourceVolumeServer string) error {
+func generateEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, collection string, sourceVolumeServer string) error {
- err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, genErr := volumeServerClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
+ err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, genErr := volumeServerClient.VolumeEcShardsGenerate(context.Background(), &volume_server_pb.VolumeEcShardsGenerateRequest{
VolumeId: uint32(volumeId),
Collection: collection,
})
@@ -150,9 +153,9 @@ func generateEcShards(ctx context.Context, grpcDialOption grpc.DialOption, volum
}
-func spreadEcShards(ctx context.Context, commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location) (err error) {
+func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location) (err error) {
- allEcNodes, totalFreeEcSlots, err := collectEcNodes(ctx, commandEnv, "")
+ allEcNodes, totalFreeEcSlots, err := collectEcNodes(commandEnv, "")
if err != nil {
return err
}
@@ -169,26 +172,27 @@ func spreadEcShards(ctx context.Context, commandEnv *CommandEnv, volumeId needle
allocatedEcIds := balancedEcDistribution(allocatedDataNodes)
// ask the data nodes to copy from the source volume server
- copiedShardIds, err := parallelCopyEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, allocatedDataNodes, allocatedEcIds, volumeId, collection, existingLocations[0])
+ copiedShardIds, err := parallelCopyEcShardsFromSource(commandEnv.option.GrpcDialOption, allocatedDataNodes, allocatedEcIds, volumeId, collection, existingLocations[0])
if err != nil {
return err
}
// unmount the to be deleted shards
- err = unmountEcShards(ctx, commandEnv.option.GrpcDialOption, volumeId, existingLocations[0].Url, copiedShardIds)
+ err = unmountEcShards(commandEnv.option.GrpcDialOption, volumeId, existingLocations[0].Url, copiedShardIds)
if err != nil {
return err
}
// ask the source volume server to clean up copied ec shards
- err = sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, existingLocations[0].Url, copiedShardIds)
+ err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, existingLocations[0].Url, copiedShardIds)
if err != nil {
return fmt.Errorf("source delete copied ecShards %s %d.%v: %v", existingLocations[0].Url, volumeId, copiedShardIds, err)
}
// ask the source volume server to delete the original volume
for _, location := range existingLocations {
- err = deleteVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, location.Url)
+ fmt.Printf("delete volume %d from %s\n", volumeId, location.Url)
+ err = deleteVolume(commandEnv.option.GrpcDialOption, volumeId, location.Url)
if err != nil {
return fmt.Errorf("deleteVolume %s volume %d: %v", location.Url, volumeId, err)
}
@@ -198,9 +202,7 @@ func spreadEcShards(ctx context.Context, commandEnv *CommandEnv, volumeId needle
}
-func parallelCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.DialOption,
- targetServers []*EcNode, allocatedEcIds [][]uint32,
- volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (actuallyCopied []uint32, err error) {
+func parallelCopyEcShardsFromSource(grpcDialOption grpc.DialOption, targetServers []*EcNode, allocatedEcIds [][]uint32, volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (actuallyCopied []uint32, err error) {
// parallelize
shardIdChan := make(chan []uint32, len(targetServers))
@@ -213,7 +215,7 @@ func parallelCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.Dia
wg.Add(1)
go func(server *EcNode, allocatedEcShardIds []uint32) {
defer wg.Done()
- copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(ctx, grpcDialOption, server,
+ copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(grpcDialOption, server,
allocatedEcShardIds, volumeId, collection, existingLocation.Url)
if copyErr != nil {
err = copyErr
@@ -255,11 +257,11 @@ func balancedEcDistribution(servers []*EcNode) (allocated [][]uint32) {
return allocated
}
-func collectVolumeIdsForEcEncode(ctx context.Context, commandEnv *CommandEnv, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) {
+func collectVolumeIdsForEcEncode(commandEnv *CommandEnv, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) {
var resp *master_pb.VolumeListResponse
- err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error {
- resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{})
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
return err
})
if err != nil {
diff --git a/weed/shell/command_ec_rebuild.go b/weed/shell/command_ec_rebuild.go
index 600a8cb45..df28681fe 100644
--- a/weed/shell/command_ec_rebuild.go
+++ b/weed/shell/command_ec_rebuild.go
@@ -56,6 +56,10 @@ func (c *commandEcRebuild) Help() string {
func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
fixCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
collection := fixCommand.String("collection", "EACH_COLLECTION", "collection name, or \"EACH_COLLECTION\" for each collection")
applyChanges := fixCommand.Bool("force", false, "apply the changes")
@@ -64,7 +68,7 @@ func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.W
}
// collect all ec nodes
- allEcNodes, _, err := collectEcNodes(context.Background(), commandEnv, "")
+ allEcNodes, _, err := collectEcNodes(commandEnv, "")
if err != nil {
return err
}
@@ -92,8 +96,6 @@ func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.W
func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, writer io.Writer, applyChanges bool) error {
- ctx := context.Background()
-
fmt.Printf("rebuildEcVolumes %s\n", collection)
// collect vid => each shard locations, similar to ecShardMap in topology.go
@@ -117,7 +119,7 @@ func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection s
return fmt.Errorf("disk space is not enough")
}
- if err := rebuildOneEcVolume(ctx, commandEnv, allEcNodes[0], collection, vid, locations, writer, applyChanges); err != nil {
+ if err := rebuildOneEcVolume(commandEnv, allEcNodes[0], collection, vid, locations, writer, applyChanges); err != nil {
return err
}
}
@@ -125,13 +127,13 @@ func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection s
return nil
}
-func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyChanges bool) error {
+func rebuildOneEcVolume(commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyChanges bool) error {
fmt.Printf("rebuildOneEcVolume %s %d\n", collection, volumeId)
// collect shard files to rebuilder local disk
var generatedShardIds []uint32
- copiedShardIds, _, err := prepareDataToRecover(ctx, commandEnv, rebuilder, collection, volumeId, locations, writer, applyChanges)
+ copiedShardIds, _, err := prepareDataToRecover(commandEnv, rebuilder, collection, volumeId, locations, writer, applyChanges)
if err != nil {
return err
}
@@ -139,7 +141,7 @@ func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder *
// clean up working files
// ask the rebuilder to delete the copied shards
- err = sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, copiedShardIds)
+ err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, copiedShardIds)
if err != nil {
fmt.Fprintf(writer, "%s delete copied ec shards %s %d.%v\n", rebuilder.info.Id, collection, volumeId, copiedShardIds)
}
@@ -151,13 +153,13 @@ func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder *
}
// generate ec shards, and maybe ecx file
- generatedShardIds, err = generateMissingShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id)
+ generatedShardIds, err = generateMissingShards(commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id)
if err != nil {
return err
}
// mount the generated shards
- err = mountEcShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, generatedShardIds)
+ err = mountEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, generatedShardIds)
if err != nil {
return err
}
@@ -167,11 +169,10 @@ func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder *
return nil
}
-func generateMissingShards(ctx context.Context, grpcDialOption grpc.DialOption,
- collection string, volumeId needle.VolumeId, sourceLocation string) (rebuiltShardIds []uint32, err error) {
+func generateMissingShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string) (rebuiltShardIds []uint32, err error) {
- err = operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
- resp, rebultErr := volumeServerClient.VolumeEcShardsRebuild(ctx, &volume_server_pb.VolumeEcShardsRebuildRequest{
+ err = operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ resp, rebultErr := volumeServerClient.VolumeEcShardsRebuild(context.Background(), &volume_server_pb.VolumeEcShardsRebuildRequest{
VolumeId: uint32(volumeId),
Collection: collection,
})
@@ -183,7 +184,7 @@ func generateMissingShards(ctx context.Context, grpcDialOption grpc.DialOption,
return
}
-func prepareDataToRecover(ctx context.Context, commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyBalancing bool) (copiedShardIds []uint32, localShardIds []uint32, err error) {
+func prepareDataToRecover(commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyBalancing bool) (copiedShardIds []uint32, localShardIds []uint32, err error) {
needEcxFile := true
var localShardBits erasure_coding.ShardBits
@@ -209,8 +210,8 @@ func prepareDataToRecover(ctx context.Context, commandEnv *CommandEnv, rebuilder
var copyErr error
if applyBalancing {
- copyErr = operation.WithVolumeServerClient(rebuilder.info.Id, commandEnv.option.GrpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{
+ copyErr = operation.WithVolumeServerClient(rebuilder.info.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{
VolumeId: uint32(volumeId),
Collection: collection,
ShardIds: []uint32{uint32(shardId)},
diff --git a/weed/shell/command_ec_test.go b/weed/shell/command_ec_test.go
index c233d25d0..4fddcbea5 100644
--- a/weed/shell/command_ec_test.go
+++ b/weed/shell/command_ec_test.go
@@ -1,7 +1,6 @@
package shell
import (
- "context"
"fmt"
"testing"
@@ -121,7 +120,7 @@ func TestCommandEcBalanceVolumeEvenButRackUneven(t *testing.T) {
racks := collectRacks(allEcNodes)
balanceEcVolumes(nil, "c1", allEcNodes, racks, false)
- balanceEcRacks(context.Background(), nil, racks, false)
+ balanceEcRacks(nil, racks, false)
}
func newEcNode(dc string, rack string, dataNodeId string, freeEcSlot int) *EcNode {
diff --git a/weed/shell/command_fs_cat.go b/weed/shell/command_fs_cat.go
index 238dee7f9..7177d8ac3 100644
--- a/weed/shell/command_fs_cat.go
+++ b/weed/shell/command_fs_cat.go
@@ -1,13 +1,13 @@
package shell
import (
- "context"
"fmt"
"io"
"math"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func init() {
@@ -25,39 +25,34 @@ func (c *commandFsCat) Help() string {
return `stream the file content on to the screen
fs.cat /dir/file_name
- fs.cat http://:/dir/file_name
`
}
func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- input := findInputDirectory(args)
-
- filerServer, filerPort, path, err := commandEnv.parseUrl(input)
+ path, err := commandEnv.parseUrl(findInputDirectory(args))
if err != nil {
return err
}
- ctx := context.Background()
-
- if commandEnv.isDirectory(ctx, filerServer, filerPort, path) {
+ if commandEnv.isDirectory(path) {
return fmt.Errorf("%s is a directory", path)
}
- dir, name := filer2.FullPath(path).DirAndName()
+ dir, name := util.FullPath(path).DirAndName()
- return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+ return commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{
Name: name,
Directory: dir,
}
- respLookupEntry, err := client.LookupDirectoryEntry(ctx, request)
+ respLookupEntry, err := filer_pb.LookupEntry(client, request)
if err != nil {
return err
}
- return filer2.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt32)
+ return filer2.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64)
})
diff --git a/weed/shell/command_fs_cd.go b/weed/shell/command_fs_cd.go
index 408ec86c8..2cc28f7a2 100644
--- a/weed/shell/command_fs_cd.go
+++ b/weed/shell/command_fs_cd.go
@@ -1,7 +1,6 @@
package shell
import (
- "context"
"io"
)
@@ -17,41 +16,33 @@ func (c *commandFsCd) Name() string {
}
func (c *commandFsCd) Help() string {
- return `change directory to http://:/dir/
+ return `change directory to a directory /path/to/dir
The full path can be too long to type. For example,
- fs.ls http://:/some/path/to/file_name
+ fs.ls /some/path/to/file_name
can be simplified as
- fs.cd http://:/some/path
+ fs.cd /some/path
fs.ls to/file_name
`
}
func (c *commandFsCd) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- input := findInputDirectory(args)
-
- filerServer, filerPort, path, err := commandEnv.parseUrl(input)
+ path, err := commandEnv.parseUrl(findInputDirectory(args))
if err != nil {
return err
}
if path == "/" {
- commandEnv.option.FilerHost = filerServer
- commandEnv.option.FilerPort = filerPort
commandEnv.option.Directory = "/"
return nil
}
- ctx := context.Background()
-
- err = commandEnv.checkDirectory(ctx, filerServer, filerPort, path)
+ err = commandEnv.checkDirectory(path)
if err == nil {
- commandEnv.option.FilerHost = filerServer
- commandEnv.option.FilerPort = filerPort
commandEnv.option.Directory = path
}
diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go
index d6ea51d0c..96551dd5a 100644
--- a/weed/shell/command_fs_du.go
+++ b/weed/shell/command_fs_du.go
@@ -1,12 +1,9 @@
package shell
import (
- "context"
"fmt"
"io"
- "google.golang.org/grpc"
-
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -26,28 +23,26 @@ func (c *commandFsDu) Name() string {
func (c *commandFsDu) Help() string {
return `show disk usage
- fs.du http://:/dir
- fs.du http://:/dir/file_name
- fs.du http://:/dir/file_prefix
+ fs.du /dir
+ fs.du /dir/file_name
+ fs.du /dir/file_prefix
`
}
func (c *commandFsDu) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(args))
+ path, err := commandEnv.parseUrl(findInputDirectory(args))
if err != nil {
return err
}
- ctx := context.Background()
-
- if commandEnv.isDirectory(ctx, filerServer, filerPort, path) {
+ if commandEnv.isDirectory(path) {
path = path + "/"
}
var blockCount, byteCount uint64
- dir, name := filer2.FullPath(path).DirAndName()
- blockCount, byteCount, err = duTraverseDirectory(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), dir, name)
+ dir, name := util.FullPath(path).DirAndName()
+ blockCount, byteCount, err = duTraverseDirectory(writer, commandEnv, dir, name)
if name == "" && err == nil {
fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s\n", blockCount, byteCount, dir)
@@ -57,54 +52,33 @@ func (c *commandFsDu) Do(args []string, commandEnv *CommandEnv, writer io.Writer
}
-func duTraverseDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, dir, name string) (blockCount uint64, byteCount uint64, err error) {
+func duTraverseDirectory(writer io.Writer, filerClient filer_pb.FilerClient, dir, name string) (blockCount, byteCount uint64, err error) {
+
+ err = filer_pb.ReadDirAllEntries(filerClient, util.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) error {
+
+ var fileBlockCount, fileByteCount uint64
- err = filer2.ReadDirAllEntries(ctx, filerClient, filer2.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) {
if entry.IsDirectory {
subDir := fmt.Sprintf("%s/%s", dir, entry.Name)
if dir == "/" {
subDir = "/" + entry.Name
}
- numBlock, numByte, err := duTraverseDirectory(ctx, writer, filerClient, subDir, "")
+ numBlock, numByte, err := duTraverseDirectory(writer, filerClient, subDir, "")
if err == nil {
blockCount += numBlock
byteCount += numByte
}
} else {
+ fileBlockCount = uint64(len(entry.Chunks))
+ fileByteCount = filer2.TotalSize(entry.Chunks)
blockCount += uint64(len(entry.Chunks))
byteCount += filer2.TotalSize(entry.Chunks)
}
if name != "" && !entry.IsDirectory {
- fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s/%s\n", blockCount, byteCount, dir, name)
+ fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s/%s\n", fileBlockCount, fileByteCount, dir, entry.Name)
}
+ return nil
})
return
}
-
-func (env *CommandEnv) withFilerClient(ctx context.Context, filerServer string, filerPort int64, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error {
-
- filerGrpcAddress := fmt.Sprintf("%s:%d", filerServer, filerPort+10000)
- return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error {
- client := filer_pb.NewSeaweedFilerClient(grpcConnection)
- return fn(ctx2, client)
- }, filerGrpcAddress, env.option.GrpcDialOption)
-
-}
-
-type commandFilerClient struct {
- env *CommandEnv
- filerServer string
- filerPort int64
-}
-
-func (env *CommandEnv) getFilerClient(filerServer string, filerPort int64) *commandFilerClient {
- return &commandFilerClient{
- env: env,
- filerServer: filerServer,
- filerPort: filerPort,
- }
-}
-func (c *commandFilerClient) WithFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error {
- return c.env.withFilerClient(ctx, c.filerServer, c.filerPort, fn)
-}
diff --git a/weed/shell/command_fs_lock_unlock.go b/weed/shell/command_fs_lock_unlock.go
new file mode 100644
index 000000000..8a6e8f71b
--- /dev/null
+++ b/weed/shell/command_fs_lock_unlock.go
@@ -0,0 +1,54 @@
+package shell
+
+import (
+ "io"
+)
+
+func init() {
+ Commands = append(Commands, &commandUnlock{})
+ Commands = append(Commands, &commandLock{})
+}
+
+// =========== Lock ==============
+type commandLock struct {
+}
+
+func (c *commandLock) Name() string {
+ return "lock"
+}
+
+func (c *commandLock) Help() string {
+ return `lock in order to exclusively manage the cluster
+
+ This is a blocking operation if there is alread another lock.
+`
+}
+
+func (c *commandLock) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ commandEnv.locker.RequestLock()
+
+ return nil
+}
+
+// =========== Unlock ==============
+
+type commandUnlock struct {
+}
+
+func (c *commandUnlock) Name() string {
+ return "unlock"
+}
+
+func (c *commandUnlock) Help() string {
+ return `unlock the cluster-wide lock
+
+`
+}
+
+func (c *commandUnlock) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ commandEnv.locker.ReleaseLock()
+
+ return nil
+}
diff --git a/weed/shell/command_fs_ls.go b/weed/shell/command_fs_ls.go
index 0c63f71fa..36133992f 100644
--- a/weed/shell/command_fs_ls.go
+++ b/weed/shell/command_fs_ls.go
@@ -1,7 +1,6 @@
package shell
import (
- "context"
"fmt"
"io"
"os"
@@ -11,6 +10,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func init() {
@@ -30,9 +30,6 @@ func (c *commandFsLs) Help() string {
fs.ls [-l] [-a] /dir/
fs.ls [-l] [-a] /dir/file_name
fs.ls [-l] [-a] /dir/file_prefix
- fs.ls [-l] [-a] http://:/dir/
- fs.ls [-l] [-a] http://:/dir/file_name
- fs.ls [-l] [-a] http://:/dir/file_prefix
`
}
@@ -53,26 +50,22 @@ func (c *commandFsLs) Do(args []string, commandEnv *CommandEnv, writer io.Writer
}
}
- input := findInputDirectory(args)
-
- filerServer, filerPort, path, err := commandEnv.parseUrl(input)
+ path, err := commandEnv.parseUrl(findInputDirectory(args))
if err != nil {
return err
}
- ctx := context.Background()
-
- if commandEnv.isDirectory(ctx, filerServer, filerPort, path) {
+ if commandEnv.isDirectory(path) {
path = path + "/"
}
- dir, name := filer2.FullPath(path).DirAndName()
+ dir, name := util.FullPath(path).DirAndName()
entryCount := 0
- err = filer2.ReadDirAllEntries(ctx, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) {
+ err = filer_pb.ReadDirAllEntries(commandEnv, util.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) error {
if !showHidden && strings.HasPrefix(entry.Name, ".") {
- return
+ return nil
}
entryCount++
@@ -95,9 +88,9 @@ func (c *commandFsLs) Do(args []string, commandEnv *CommandEnv, writer io.Writer
}
}
- if dir == "/" {
+ if strings.HasSuffix(dir, "/") {
// just for printing
- dir = ""
+ dir = dir[:len(dir)-1]
}
fmt.Fprintf(writer, "%s %3d %s %s %6d %s/%s\n",
fileMode, len(entry.Chunks),
@@ -107,6 +100,7 @@ func (c *commandFsLs) Do(args []string, commandEnv *CommandEnv, writer io.Writer
fmt.Fprintf(writer, "%s\n", entry.Name)
}
+ return nil
})
if isLongFormat && err == nil {
diff --git a/weed/shell/command_fs_meta_cat.go b/weed/shell/command_fs_meta_cat.go
index 9980f67a2..0679ec075 100644
--- a/weed/shell/command_fs_meta_cat.go
+++ b/weed/shell/command_fs_meta_cat.go
@@ -1,14 +1,13 @@
package shell
import (
- "context"
"fmt"
"io"
"github.com/golang/protobuf/jsonpb"
- "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func init() {
@@ -27,31 +26,25 @@ func (c *commandFsMetaCat) Help() string {
fs.meta.cat /dir/
fs.meta.cat /dir/file_name
- fs.meta.cat http://:/dir/
- fs.meta.cat http://:/dir/file_name
`
}
func (c *commandFsMetaCat) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- input := findInputDirectory(args)
-
- filerServer, filerPort, path, err := commandEnv.parseUrl(input)
+ path, err := commandEnv.parseUrl(findInputDirectory(args))
if err != nil {
return err
}
- ctx := context.Background()
+ dir, name := util.FullPath(path).DirAndName()
- dir, name := filer2.FullPath(path).DirAndName()
-
- return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+ return commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{
Name: name,
Directory: dir,
}
- respLookupEntry, err := client.LookupDirectoryEntry(ctx, request)
+ respLookupEntry, err := filer_pb.LookupEntry(client, request)
if err != nil {
return err
}
diff --git a/weed/shell/command_fs_meta_load.go b/weed/shell/command_fs_meta_load.go
index 8f2ef95e3..69ae9454c 100644
--- a/weed/shell/command_fs_meta_load.go
+++ b/weed/shell/command_fs_meta_load.go
@@ -1,15 +1,14 @@
package shell
import (
- "context"
"fmt"
"io"
"os"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/golang/protobuf/proto"
+
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/golang/protobuf/proto"
)
func init() {
@@ -38,11 +37,6 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io.
return nil
}
- filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(nil))
- if err != nil {
- return err
- }
-
fileName := args[len(args)-1]
dst, err := os.OpenFile(fileName, os.O_RDONLY, 0644)
@@ -53,9 +47,7 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io.
var dirCount, fileCount uint64
- ctx := context.Background()
-
- err = commandEnv.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+ err = commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
sizeBuf := make([]byte, 4)
@@ -80,14 +72,14 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io.
return err
}
- if err := filer_pb.CreateEntry(ctx, client, &filer_pb.CreateEntryRequest{
+ if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{
Directory: fullEntry.Dir,
Entry: fullEntry.Entry,
}); err != nil {
return err
}
- fmt.Fprintf(writer, "load %s\n", filer2.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name))
+ fmt.Fprintf(writer, "load %s\n", util.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name))
if fullEntry.Entry.IsDirectory {
dirCount++
@@ -101,7 +93,7 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io.
if err == nil {
fmt.Fprintf(writer, "\ntotal %d directories, %d files", dirCount, fileCount)
- fmt.Fprintf(writer, "\n%s is loaded to http://%s:%d%s\n", fileName, filerServer, filerPort, path)
+ fmt.Fprintf(writer, "\n%s is loaded.\n", fileName)
}
return err
diff --git a/weed/shell/command_fs_meta_notify.go b/weed/shell/command_fs_meta_notify.go
index e2b2d22cc..4342fa81d 100644
--- a/weed/shell/command_fs_meta_notify.go
+++ b/weed/shell/command_fs_meta_notify.go
@@ -1,11 +1,9 @@
package shell
import (
- "context"
"fmt"
"io"
- "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -34,7 +32,7 @@ func (c *commandFsMetaNotify) Help() string {
func (c *commandFsMetaNotify) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(args))
+ path, err := commandEnv.parseUrl(findInputDirectory(args))
if err != nil {
return err
}
@@ -43,11 +41,9 @@ func (c *commandFsMetaNotify) Do(args []string, commandEnv *CommandEnv, writer i
v := util.GetViper()
notification.LoadConfiguration(v, "notification.")
- ctx := context.Background()
-
var dirCount, fileCount uint64
- err = doTraverseBFS(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) {
+ err = filer_pb.TraverseBfs(commandEnv, util.FullPath(path), func(parentPath util.FullPath, entry *filer_pb.Entry) {
if entry.IsDirectory {
dirCount++
diff --git a/weed/shell/command_fs_meta_save.go b/weed/shell/command_fs_meta_save.go
index 178c826d5..ed19e3d01 100644
--- a/weed/shell/command_fs_meta_save.go
+++ b/weed/shell/command_fs_meta_save.go
@@ -1,7 +1,6 @@
package shell
import (
- "context"
"flag"
"fmt"
"io"
@@ -12,7 +11,6 @@ import (
"github.com/golang/protobuf/proto"
- "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@@ -40,8 +38,6 @@ func (c *commandFsMetaSave) Help() string {
The meta data will be saved into a local --.meta file.
These meta data can be later loaded by fs.meta.load command,
- This assumes there are no deletions, so this is different from taking a snapshot.
-
`
}
@@ -50,22 +46,21 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io.
fsMetaSaveCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
verbose := fsMetaSaveCommand.Bool("v", false, "print out each processed files")
outputFileName := fsMetaSaveCommand.String("o", "", "output the meta data to this file")
+ // chunksFileName := fsMetaSaveCommand.String("chunks", "", "output all the chunks to this file")
if err = fsMetaSaveCommand.Parse(args); err != nil {
return nil
}
- filerServer, filerPort, path, parseErr := commandEnv.parseUrl(findInputDirectory(fsMetaSaveCommand.Args()))
+ path, parseErr := commandEnv.parseUrl(findInputDirectory(fsMetaSaveCommand.Args()))
if parseErr != nil {
return parseErr
}
- ctx := context.Background()
-
- t := time.Now()
fileName := *outputFileName
if fileName == "" {
+ t := time.Now()
fileName = fmt.Sprintf("%s-%d-%4d%02d%02d-%02d%02d%02d.meta",
- filerServer, filerPort, t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second())
+ commandEnv.option.FilerHost, commandEnv.option.FilerPort, t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second())
}
dst, openErr := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
@@ -74,35 +69,56 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io.
}
defer dst.Close()
- var wg sync.WaitGroup
- wg.Add(1)
- outputChan := make(chan []byte, 1024)
- go func() {
+ err = doTraverseBfsAndSaving(commandEnv, writer, path, *verbose, func(outputChan chan interface{}) {
sizeBuf := make([]byte, 4)
- for b := range outputChan {
+ for item := range outputChan {
+ b := item.([]byte)
util.Uint32toBytes(sizeBuf, uint32(len(b)))
dst.Write(sizeBuf)
dst.Write(b)
}
- wg.Done()
- }()
-
- var dirCount, fileCount uint64
-
- err = doTraverseBFS(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) {
-
- protoMessage := &filer_pb.FullEntry{
- Dir: string(parentPath),
- Entry: entry,
- }
-
- bytes, err := proto.Marshal(protoMessage)
+ }, func(entry *filer_pb.FullEntry, outputChan chan interface{}) (err error) {
+ bytes, err := proto.Marshal(entry)
if err != nil {
fmt.Fprintf(writer, "marshall error: %v\n", err)
return
}
outputChan <- bytes
+ return nil
+ })
+
+ if err == nil {
+ fmt.Fprintf(writer, "meta data for http://%s:%d%s is saved to %s\n", commandEnv.option.FilerHost, commandEnv.option.FilerPort, path, fileName)
+ }
+
+ return err
+
+}
+
+func doTraverseBfsAndSaving(filerClient filer_pb.FilerClient, writer io.Writer, path string, verbose bool, saveFn func(outputChan chan interface{}), genFn func(entry *filer_pb.FullEntry, outputChan chan interface{}) error) error {
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ outputChan := make(chan interface{}, 1024)
+ go func() {
+ saveFn(outputChan)
+ wg.Done()
+ }()
+
+ var dirCount, fileCount uint64
+
+ err := filer_pb.TraverseBfs(filerClient, util.FullPath(path), func(parentPath util.FullPath, entry *filer_pb.Entry) {
+
+ protoMessage := &filer_pb.FullEntry{
+ Dir: string(parentPath),
+ Entry: entry,
+ }
+
+ if err := genFn(protoMessage, outputChan); err != nil {
+ fmt.Fprintf(writer, "marshall error: %v\n", err)
+ return
+ }
if entry.IsDirectory {
atomic.AddUint64(&dirCount, 1)
@@ -110,7 +126,7 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io.
atomic.AddUint64(&fileCount, 1)
}
- if *verbose {
+ if verbose {
println(parentPath.Child(entry.Name))
}
@@ -120,66 +136,8 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io.
wg.Wait()
- if err == nil {
+ if err == nil && writer != nil {
fmt.Fprintf(writer, "total %d directories, %d files\n", dirCount, fileCount)
- fmt.Fprintf(writer, "meta data for http://%s:%d%s is saved to %s\n", filerServer, filerPort, path, fileName)
}
-
return err
-
-}
-func doTraverseBFS(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient,
- parentPath filer2.FullPath, fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) {
-
- K := 5
-
- var jobQueueWg sync.WaitGroup
- queue := util.NewQueue()
- jobQueueWg.Add(1)
- queue.Enqueue(parentPath)
- var isTerminating bool
-
- for i := 0; i < K; i++ {
- go func() {
- for {
- if isTerminating {
- break
- }
- t := queue.Dequeue()
- if t == nil {
- time.Sleep(329 * time.Millisecond)
- continue
- }
- dir := t.(filer2.FullPath)
- processErr := processOneDirectory(ctx, writer, filerClient, dir, queue, &jobQueueWg, fn)
- if processErr != nil {
- err = processErr
- }
- jobQueueWg.Done()
- }
- }()
- }
- jobQueueWg.Wait()
- isTerminating = true
- return
-}
-
-func processOneDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient,
- parentPath filer2.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup,
- fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) {
-
- return filer2.ReadDirAllEntries(ctx, filerClient, parentPath, "", func(entry *filer_pb.Entry, isLast bool) {
-
- fn(parentPath, entry)
-
- if entry.IsDirectory {
- subDir := fmt.Sprintf("%s/%s", parentPath, entry.Name)
- if parentPath == "/" {
- subDir = "/" + entry.Name
- }
- jobQueueWg.Add(1)
- queue.Enqueue(filer2.FullPath(subDir))
- }
- })
-
}
diff --git a/weed/shell/command_fs_mv.go b/weed/shell/command_fs_mv.go
index e77755921..0a7eed02d 100644
--- a/weed/shell/command_fs_mv.go
+++ b/weed/shell/command_fs_mv.go
@@ -4,10 +4,9 @@ import (
"context"
"fmt"
"io"
- "path/filepath"
- "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func init() {
@@ -37,37 +36,35 @@ func (c *commandFsMv) Help() string {
func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- filerServer, filerPort, sourcePath, err := commandEnv.parseUrl(args[0])
+ sourcePath, err := commandEnv.parseUrl(args[0])
if err != nil {
return err
}
- _, _, destinationPath, err := commandEnv.parseUrl(args[1])
+ destinationPath, err := commandEnv.parseUrl(args[1])
if err != nil {
return err
}
- ctx := context.Background()
+ sourceDir, sourceName := util.FullPath(sourcePath).DirAndName()
- sourceDir, sourceName := filer2.FullPath(sourcePath).DirAndName()
+ destinationDir, destinationName := util.FullPath(destinationPath).DirAndName()
- destinationDir, destinationName := filer2.FullPath(destinationPath).DirAndName()
-
- return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
+ return commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
// collect destination entry info
destinationRequest := &filer_pb.LookupDirectoryEntryRequest{
Name: destinationDir,
Directory: destinationName,
}
- respDestinationLookupEntry, err := client.LookupDirectoryEntry(ctx, destinationRequest)
+ respDestinationLookupEntry, err := filer_pb.LookupEntry(client, destinationRequest)
var targetDir, targetName string
// moving a file or folder
if err == nil && respDestinationLookupEntry.Entry.IsDirectory {
// to a directory
- targetDir = filepath.ToSlash(filepath.Join(destinationDir, destinationName))
+ targetDir = util.Join(destinationDir, destinationName)
targetName = sourceName
} else {
// to a file or folder
@@ -82,9 +79,9 @@ func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer
NewName: targetName,
}
- _, err = client.AtomicRenameEntry(ctx, request)
+ _, err = client.AtomicRenameEntry(context.Background(), request)
- fmt.Fprintf(writer, "move: %s => %s\n", sourcePath, filer2.NewFullPath(targetDir, targetName))
+ fmt.Fprintf(writer, "move: %s => %s\n", sourcePath, util.NewFullPath(targetDir, targetName))
return err
diff --git a/weed/shell/command_fs_pwd.go b/weed/shell/command_fs_pwd.go
index 084a5e90a..d7d9819c8 100644
--- a/weed/shell/command_fs_pwd.go
+++ b/weed/shell/command_fs_pwd.go
@@ -22,11 +22,7 @@ func (c *commandFsPwd) Help() string {
func (c *commandFsPwd) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- fmt.Fprintf(writer, "http://%s:%d%s\n",
- commandEnv.option.FilerHost,
- commandEnv.option.FilerPort,
- commandEnv.option.Directory,
- )
+ fmt.Fprintf(writer, "%s\n", commandEnv.option.Directory)
return nil
}
diff --git a/weed/shell/command_fs_tree.go b/weed/shell/command_fs_tree.go
index 8660030e3..a8c5b2018 100644
--- a/weed/shell/command_fs_tree.go
+++ b/weed/shell/command_fs_tree.go
@@ -1,13 +1,12 @@
package shell
import (
- "context"
"fmt"
"io"
"strings"
- "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
)
func init() {
@@ -24,22 +23,21 @@ func (c *commandFsTree) Name() string {
func (c *commandFsTree) Help() string {
return `recursively list all files under a directory
- fs.tree http://:/dir/
+ fs.tree /some/dir
+
`
}
func (c *commandFsTree) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
- filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(args))
+ path, err := commandEnv.parseUrl(findInputDirectory(args))
if err != nil {
return err
}
- dir, name := filer2.FullPath(path).DirAndName()
+ dir, name := util.FullPath(path).DirAndName()
- ctx := context.Background()
-
- dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(dir), name, newPrefix(), -1)
+ dirCount, fCount, terr := treeTraverseDirectory(writer, commandEnv, util.FullPath(dir), name, newPrefix(), -1)
if terr == nil {
fmt.Fprintf(writer, "%d directories, %d files\n", dirCount, fCount)
@@ -49,14 +47,14 @@ func (c *commandFsTree) Do(args []string, commandEnv *CommandEnv, writer io.Writ
}
-func treeTraverseDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, dir filer2.FullPath, name string, prefix *Prefix, level int) (directoryCount, fileCount int64, err error) {
+func treeTraverseDirectory(writer io.Writer, filerClient filer_pb.FilerClient, dir util.FullPath, name string, prefix *Prefix, level int) (directoryCount, fileCount int64, err error) {
prefix.addMarker(level)
- err = filer2.ReadDirAllEntries(ctx, filerClient, dir, name, func(entry *filer_pb.Entry, isLast bool) {
+ err = filer_pb.ReadDirAllEntries(filerClient, dir, name, func(entry *filer_pb.Entry, isLast bool) error {
if level < 0 && name != "" {
if entry.Name != name {
- return
+ return nil
}
}
@@ -65,14 +63,14 @@ func treeTraverseDirectory(ctx context.Context, writer io.Writer, filerClient fi
if entry.IsDirectory {
directoryCount++
subDir := dir.Child(entry.Name)
- dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, filerClient, subDir, "", prefix, level+1)
+ dirCount, fCount, terr := treeTraverseDirectory(writer, filerClient, subDir, "", prefix, level+1)
directoryCount += dirCount
fileCount += fCount
err = terr
} else {
fileCount++
}
-
+ return nil
})
return
}
diff --git a/weed/shell/command_volume_balance.go b/weed/shell/command_volume_balance.go
index bed4f4306..69e3c7fd9 100644
--- a/weed/shell/command_volume_balance.go
+++ b/weed/shell/command_volume_balance.go
@@ -60,6 +60,10 @@ func (c *commandVolumeBalance) Help() string {
func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
balanceCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
collection := balanceCommand.String("collection", "EACH_COLLECTION", "collection name, or use \"ALL_COLLECTIONS\" across collections, \"EACH_COLLECTION\" for each collection")
dc := balanceCommand.String("dataCenter", "", "only apply the balancing for this dataCenter")
@@ -69,9 +73,8 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer
}
var resp *master_pb.VolumeListResponse
- ctx := context.Background()
- err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error {
- resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{})
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
return err
})
if err != nil {
@@ -109,14 +112,7 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer
return nil
}
-func balanceVolumeServers(commandEnv *CommandEnv, dataNodeInfos []*master_pb.DataNodeInfo, volumeSizeLimit uint64, collection string, applyBalancing bool) error {
-
- var nodes []*Node
- for _, dn := range dataNodeInfos {
- nodes = append(nodes, &Node{
- info: dn,
- })
- }
+func balanceVolumeServers(commandEnv *CommandEnv, nodes []*Node, volumeSizeLimit uint64, collection string, applyBalancing bool) error {
// balance writable volumes
for _, n := range nodes {
@@ -151,15 +147,19 @@ func balanceVolumeServers(commandEnv *CommandEnv, dataNodeInfos []*master_pb.Dat
return nil
}
-func collectVolumeServersByType(t *master_pb.TopologyInfo, selectedDataCenter string) (typeToNodes map[uint64][]*master_pb.DataNodeInfo) {
- typeToNodes = make(map[uint64][]*master_pb.DataNodeInfo)
+func collectVolumeServersByType(t *master_pb.TopologyInfo, selectedDataCenter string) (typeToNodes map[uint64][]*Node) {
+ typeToNodes = make(map[uint64][]*Node)
for _, dc := range t.DataCenterInfos {
if selectedDataCenter != "" && dc.Id != selectedDataCenter {
continue
}
for _, r := range dc.RackInfos {
for _, dn := range r.DataNodeInfos {
- typeToNodes[dn.MaxVolumeCount] = append(typeToNodes[dn.MaxVolumeCount], dn)
+ typeToNodes[dn.MaxVolumeCount] = append(typeToNodes[dn.MaxVolumeCount], &Node{
+ info: dn,
+ dc: dc.Id,
+ rack: r.Id,
+ })
}
}
}
@@ -169,6 +169,8 @@ func collectVolumeServersByType(t *master_pb.TopologyInfo, selectedDataCenter st
type Node struct {
info *master_pb.DataNodeInfo
selectedVolumes map[uint32]*master_pb.VolumeInformationMessage
+ dc string
+ rack string
}
func sortWritableVolumes(volumes []*master_pb.VolumeInformationMessage) {
@@ -210,6 +212,13 @@ func balanceSelectedVolume(commandEnv *CommandEnv, nodes []*Node, sortCandidates
sortCandidatesFn(candidateVolumes)
for _, v := range candidateVolumes {
+ if v.ReplicaPlacement > 0 {
+ if fullNode.dc != emptyNode.dc && fullNode.rack != emptyNode.rack {
+ // TODO this logic is too simple, but should work most of the time
+ // Need a correct algorithm to handle all different cases
+ continue
+ }
+ }
if _, found := emptyNode.selectedVolumes[v.Id]; !found {
if err := moveVolume(commandEnv, v, fullNode, emptyNode, applyBalancing); err == nil {
delete(fullNode.selectedVolumes, v.Id)
@@ -233,8 +242,7 @@ func moveVolume(commandEnv *CommandEnv, v *master_pb.VolumeInformationMessage, f
}
fmt.Fprintf(os.Stdout, "moving volume %s%d %s => %s\n", collectionPrefix, v.Id, fullNode.info.Id, emptyNode.info.Id)
if applyBalancing {
- ctx := context.Background()
- return LiveMoveVolume(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(v.Id), fullNode.info.Id, emptyNode.info.Id, 5*time.Second)
+ return LiveMoveVolume(commandEnv.option.GrpcDialOption, needle.VolumeId(v.Id), fullNode.info.Id, emptyNode.info.Id, 5*time.Second)
}
return nil
}
diff --git a/weed/shell/command_volume_configure_replication.go b/weed/shell/command_volume_configure_replication.go
index 6000d0de0..ff976c345 100644
--- a/weed/shell/command_volume_configure_replication.go
+++ b/weed/shell/command_volume_configure_replication.go
@@ -35,6 +35,10 @@ func (c *commandVolumeConfigureReplication) Help() string {
func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
configureReplicationCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
volumeIdInt := configureReplicationCommand.Int("volumeId", 0, "the volume id")
replicationString := configureReplicationCommand.String("replication", "", "the intended replication value")
@@ -53,9 +57,8 @@ func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *Comman
replicaPlacementInt32 := uint32(replicaPlacement.Byte())
var resp *master_pb.VolumeListResponse
- ctx := context.Background()
- err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error {
- resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{})
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
return err
})
if err != nil {
@@ -81,8 +84,8 @@ func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *Comman
}
for _, dst := range allLocations {
- err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
- resp, configureErr := volumeServerClient.VolumeConfigure(ctx, &volume_server_pb.VolumeConfigureRequest{
+ err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ resp, configureErr := volumeServerClient.VolumeConfigure(context.Background(), &volume_server_pb.VolumeConfigureRequest{
VolumeId: uint32(vid),
Replication: replicaPlacement.String(),
})
diff --git a/weed/shell/command_volume_copy.go b/weed/shell/command_volume_copy.go
index 1c83ba655..cdd10863f 100644
--- a/weed/shell/command_volume_copy.go
+++ b/weed/shell/command_volume_copy.go
@@ -1,7 +1,6 @@
package shell
import (
- "context"
"fmt"
"io"
@@ -32,6 +31,10 @@ func (c *commandVolumeCopy) Help() string {
func (c *commandVolumeCopy) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
if len(args) != 3 {
fmt.Fprintf(writer, "received args: %+v\n", args)
return fmt.Errorf("need 3 args of ")
@@ -47,7 +50,6 @@ func (c *commandVolumeCopy) Do(args []string, commandEnv *CommandEnv, writer io.
return fmt.Errorf("source and target volume servers are the same!")
}
- ctx := context.Background()
- _, err = copyVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer)
+ _, err = copyVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer)
return
}
diff --git a/weed/shell/command_volume_delete.go b/weed/shell/command_volume_delete.go
index 17d27ea3a..c5cc9e277 100644
--- a/weed/shell/command_volume_delete.go
+++ b/weed/shell/command_volume_delete.go
@@ -1,7 +1,6 @@
package shell
import (
- "context"
"fmt"
"io"
@@ -31,6 +30,10 @@ func (c *commandVolumeDelete) Help() string {
func (c *commandVolumeDelete) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
if len(args) != 2 {
fmt.Fprintf(writer, "received args: %+v\n", args)
return fmt.Errorf("need 2 args of ")
@@ -42,7 +45,6 @@ func (c *commandVolumeDelete) Do(args []string, commandEnv *CommandEnv, writer i
return fmt.Errorf("wrong volume id format %s: %v", volumeId, err)
}
- ctx := context.Background()
- return deleteVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer)
+ return deleteVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer)
}
diff --git a/weed/shell/command_volume_fix_replication.go b/weed/shell/command_volume_fix_replication.go
index 7a1a77cbe..19da89b67 100644
--- a/weed/shell/command_volume_fix_replication.go
+++ b/weed/shell/command_volume_fix_replication.go
@@ -44,15 +44,18 @@ func (c *commandVolumeFixReplication) Help() string {
func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
takeAction := true
if len(args) > 0 && args[0] == "-n" {
takeAction = false
}
var resp *master_pb.VolumeListResponse
- ctx := context.Background()
- err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error {
- resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{})
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
return err
})
if err != nil {
@@ -113,12 +116,12 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv,
break
}
- err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, replicateErr := volumeServerClient.VolumeCopy(ctx, &volume_server_pb.VolumeCopyRequest{
+ err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, replicateErr := volumeServerClient.VolumeCopy(context.Background(), &volume_server_pb.VolumeCopyRequest{
VolumeId: volumeInfo.Id,
SourceDataNode: sourceNode.dataNode.Id,
})
- return replicateErr
+ return fmt.Errorf("copying from %s => %s : %v", sourceNode.dataNode.Id, dst.dataNode.Id, replicateErr)
})
if err != nil {
@@ -146,31 +149,131 @@ func keepDataNodesSorted(dataNodes []location) {
})
}
+/*
+ if on an existing data node {
+ return false
+ }
+ if different from existing dcs {
+ if lack on different dcs {
+ return true
+ }else{
+ return false
+ }
+ }
+ if not on primary dc {
+ return false
+ }
+ if different from existing racks {
+ if lack on different racks {
+ return true
+ }else{
+ return false
+ }
+ }
+ if not on primary rack {
+ return false
+ }
+ if lacks on same rack {
+ return true
+ } else {
+ return false
+ }
+*/
func satisfyReplicaPlacement(replicaPlacement *super_block.ReplicaPlacement, existingLocations []location, possibleLocation location) bool {
- existingDataCenters := make(map[string]bool)
- existingRacks := make(map[string]bool)
- existingDataNodes := make(map[string]bool)
+ existingDataNodes := make(map[string]int)
for _, loc := range existingLocations {
- existingDataCenters[loc.DataCenter()] = true
- existingRacks[loc.Rack()] = true
- existingDataNodes[loc.String()] = true
+ existingDataNodes[loc.String()] += 1
+ }
+ sameDataNodeCount := existingDataNodes[possibleLocation.String()]
+ // avoid duplicated volume on the same data node
+ if sameDataNodeCount > 0 {
+ return false
}
- if replicaPlacement.DiffDataCenterCount >= len(existingDataCenters) {
- // check dc, good if different from any existing data centers
- _, found := existingDataCenters[possibleLocation.DataCenter()]
- return !found
- } else if replicaPlacement.DiffRackCount >= len(existingRacks) {
- // check rack, good if different from any existing racks
- _, found := existingRacks[possibleLocation.Rack()]
- return !found
- } else if replicaPlacement.SameRackCount >= len(existingDataNodes) {
- // check data node, good if different from any existing data nodes
- _, found := existingDataNodes[possibleLocation.String()]
- return !found
+ existingDataCenters := make(map[string]int)
+ for _, loc := range existingLocations {
+ existingDataCenters[loc.DataCenter()] += 1
+ }
+ primaryDataCenters, _ := findTopKeys(existingDataCenters)
+
+ // ensure data center count is within limit
+ if _, found := existingDataCenters[possibleLocation.DataCenter()]; !found {
+ // different from existing dcs
+ if len(existingDataCenters) < replicaPlacement.DiffDataCenterCount+1 {
+ // lack on different dcs
+ return true
+ } else {
+ // adding this would go over the different dcs limit
+ return false
+ }
+ }
+ // now this is same as one of the existing data center
+ if !isAmong(possibleLocation.DataCenter(), primaryDataCenters) {
+ // not on one of the primary dcs
+ return false
}
+ // now this is one of the primary dcs
+ existingRacks := make(map[string]int)
+ for _, loc := range existingLocations {
+ if loc.DataCenter() != possibleLocation.DataCenter() {
+ continue
+ }
+ existingRacks[loc.Rack()] += 1
+ }
+ primaryRacks, _ := findTopKeys(existingRacks)
+ sameRackCount := existingRacks[possibleLocation.Rack()]
+
+ // ensure rack count is within limit
+ if _, found := existingRacks[possibleLocation.Rack()]; !found {
+ // different from existing racks
+ if len(existingRacks) < replicaPlacement.DiffRackCount+1 {
+ // lack on different racks
+ return true
+ } else {
+ // adding this would go over the different racks limit
+ return false
+ }
+ }
+ // now this is same as one of the existing racks
+ if !isAmong(possibleLocation.Rack(), primaryRacks) {
+ // not on the primary rack
+ return false
+ }
+
+ // now this is on the primary rack
+
+ // different from existing data nodes
+ if sameRackCount < replicaPlacement.SameRackCount+1 {
+ // lack on same rack
+ return true
+ } else {
+ // adding this would go over the same data node limit
+ return false
+ }
+
+}
+
+func findTopKeys(m map[string]int) (topKeys []string, max int) {
+ for k, c := range m {
+ if max < c {
+ topKeys = topKeys[:0]
+ topKeys = append(topKeys, k)
+ max = c
+ } else if max == c {
+ topKeys = append(topKeys, k)
+ }
+ }
+ return
+}
+
+func isAmong(key string, keys []string) bool {
+ for _, k := range keys {
+ if k == key {
+ return true
+ }
+ }
return false
}
diff --git a/weed/shell/command_volume_fix_replication_test.go b/weed/shell/command_volume_fix_replication_test.go
new file mode 100644
index 000000000..4cfbd96aa
--- /dev/null
+++ b/weed/shell/command_volume_fix_replication_test.go
@@ -0,0 +1,207 @@
+package shell
+
+import (
+ "testing"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/super_block"
+)
+
+type testcase struct {
+ name string
+ replication string
+ existingLocations []location
+ possibleLocation location
+ expected bool
+}
+
+func TestSatisfyReplicaPlacementComplicated(t *testing.T) {
+
+ var tests = []testcase{
+ {
+ name: "test 100 negative",
+ replication: "100",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ possibleLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ expected: false,
+ },
+ {
+ name: "test 100 positive",
+ replication: "100",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ possibleLocation: location{"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ expected: true,
+ },
+ {
+ name: "test 022 positive",
+ replication: "022",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ {"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
+ },
+ possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn4"}},
+ expected: true,
+ },
+ {
+ name: "test 022 negative",
+ replication: "022",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ {"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
+ },
+ possibleLocation: location{"dc1", "r4", &master_pb.DataNodeInfo{Id: "dn4"}},
+ expected: false,
+ },
+ {
+ name: "test 210 moved from 200 positive",
+ replication: "210",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ {"dc3", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
+ },
+ possibleLocation: location{"dc1", "r4", &master_pb.DataNodeInfo{Id: "dn4"}},
+ expected: true,
+ },
+ {
+ name: "test 210 moved from 200 negative extra dc",
+ replication: "210",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ {"dc3", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
+ },
+ possibleLocation: location{"dc4", "r4", &master_pb.DataNodeInfo{Id: "dn4"}},
+ expected: false,
+ },
+ {
+ name: "test 210 moved from 200 negative extra data node",
+ replication: "210",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc2", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ {"dc3", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
+ },
+ possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn4"}},
+ expected: false,
+ },
+ }
+
+ runTests(tests, t)
+
+}
+
+func TestSatisfyReplicaPlacement01x(t *testing.T) {
+
+ var tests = []testcase{
+ {
+ name: "test 011 same existing rack",
+ replication: "011",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ possibleLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
+ expected: true,
+ },
+ {
+ name: "test 011 negative",
+ replication: "011",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}},
+ expected: false,
+ },
+ {
+ name: "test 011 different existing racks",
+ replication: "011",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ possibleLocation: location{"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn3"}},
+ expected: true,
+ },
+ {
+ name: "test 011 different existing racks negative",
+ replication: "011",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc1", "r2", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ possibleLocation: location{"dc1", "r3", &master_pb.DataNodeInfo{Id: "dn3"}},
+ expected: false,
+ },
+ }
+
+ runTests(tests, t)
+
+}
+
+func TestSatisfyReplicaPlacement00x(t *testing.T) {
+
+ var tests = []testcase{
+ {
+ name: "test 001",
+ replication: "001",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ },
+ possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ expected: true,
+ },
+ {
+ name: "test 002 positive",
+ replication: "002",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}},
+ expected: true,
+ },
+ {
+ name: "test 002 negative, repeat the same node",
+ replication: "002",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ },
+ possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ expected: false,
+ },
+ {
+ name: "test 002 negative, enough node already",
+ replication: "002",
+ existingLocations: []location{
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn1"}},
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn2"}},
+ {"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn3"}},
+ },
+ possibleLocation: location{"dc1", "r1", &master_pb.DataNodeInfo{Id: "dn4"}},
+ expected: false,
+ },
+ }
+
+ runTests(tests, t)
+
+}
+
+func runTests(tests []testcase, t *testing.T) {
+ for _, tt := range tests {
+ replicaPlacement, _ := super_block.NewReplicaPlacementFromString(tt.replication)
+ println("replication:", tt.replication, "expected", tt.expected, "name:", tt.name)
+ if satisfyReplicaPlacement(replicaPlacement, tt.existingLocations, tt.possibleLocation) != tt.expected {
+ t.Errorf("%s: expect %v add %v to %s %+v",
+ tt.name, tt.expected, tt.possibleLocation, tt.replication, tt.existingLocations)
+ }
+ }
+}
diff --git a/weed/shell/command_volume_fsck.go b/weed/shell/command_volume_fsck.go
new file mode 100644
index 000000000..69a1a63b4
--- /dev/null
+++ b/weed/shell/command_volume_fsck.go
@@ -0,0 +1,361 @@
+package shell
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/operation"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle_map"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func init() {
+ Commands = append(Commands, &commandVolumeFsck{})
+}
+
+type commandVolumeFsck struct {
+ env *CommandEnv
+}
+
+func (c *commandVolumeFsck) Name() string {
+ return "volume.fsck"
+}
+
+func (c *commandVolumeFsck) Help() string {
+ return `check all volumes to find entries not used by the filer
+
+ Important assumption!!!
+ the system is all used by one filer.
+
+ This command works this way:
+ 1. collect all file ids from all volumes, as set A
+ 2. collect all file ids from the filer, as set B
+ 3. find out the set A subtract B
+
+`
+}
+
+func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
+ fsckCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+ verbose := fsckCommand.Bool("v", false, "verbose mode")
+ applyPurging := fsckCommand.Bool("reallyDeleteFromVolume", false, " delete data not referenced by the filer")
+ if err = fsckCommand.Parse(args); err != nil {
+ return nil
+ }
+
+ c.env = commandEnv
+
+ // create a temp folder
+ tempFolder, err := ioutil.TempDir("", "sw_fsck")
+ if err != nil {
+ return fmt.Errorf("failed to create temp folder: %v", err)
+ }
+ if *verbose {
+ fmt.Fprintf(writer, "working directory: %s\n", tempFolder)
+ }
+ defer os.RemoveAll(tempFolder)
+
+ // collect all volume id locations
+ volumeIdToVInfo, err := c.collectVolumeIds(*verbose, writer)
+ if err != nil {
+ return fmt.Errorf("failed to collect all volume locations: %v", err)
+ }
+
+ // collect each volume file ids
+ for volumeId, vinfo := range volumeIdToVInfo {
+ err = c.collectOneVolumeFileIds(tempFolder, volumeId, vinfo, *verbose, writer)
+ if err != nil {
+ return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, err)
+ }
+ }
+
+ // collect all filer file ids
+ if err = c.collectFilerFileIds(tempFolder, volumeIdToVInfo, *verbose, writer); err != nil {
+ return fmt.Errorf("failed to collect file ids from filer: %v", err)
+ }
+
+ // volume file ids substract filer file ids
+ var totalInUseCount, totalOrphanChunkCount, totalOrphanDataSize uint64
+ for volumeId, vinfo := range volumeIdToVInfo {
+ inUseCount, orphanFileIds, orphanDataSize, checkErr := c.oneVolumeFileIdsSubtractFilerFileIds(tempFolder, volumeId, writer, *verbose)
+ if checkErr != nil {
+ return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, checkErr)
+ }
+ totalInUseCount += inUseCount
+ totalOrphanChunkCount += uint64(len(orphanFileIds))
+ totalOrphanDataSize += orphanDataSize
+
+ if *applyPurging && len(orphanFileIds) > 0 {
+ if vinfo.isEcVolume {
+ fmt.Fprintf(writer, "Skip purging for Erasure Coded volumes.\n")
+ }
+ if err = c.purgeFileIdsForOneVolume(volumeId, orphanFileIds, writer); err != nil {
+ return fmt.Errorf("purge for volume %d: %v\n", volumeId, err)
+ }
+ }
+ }
+
+ if totalOrphanChunkCount == 0 {
+ fmt.Fprintf(writer, "no orphan data\n")
+ return nil
+ }
+
+ if !*applyPurging {
+ pct := float64(totalOrphanChunkCount*100) / (float64(totalOrphanChunkCount + totalInUseCount))
+ fmt.Fprintf(writer, "\nTotal\t\tentries:%d\torphan:%d\t%.2f%%\t%dB\n",
+ totalOrphanChunkCount+totalInUseCount, totalOrphanChunkCount, pct, totalOrphanDataSize)
+
+ fmt.Fprintf(writer, "This could be normal if multiple filers or no filers are used.\n")
+ }
+
+ return nil
+}
+
+func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, volumeId uint32, vinfo VInfo, verbose bool, writer io.Writer) error {
+
+ if verbose {
+ fmt.Fprintf(writer, "collecting volume %d file ids from %s ...\n", volumeId, vinfo.server)
+ }
+
+ return operation.WithVolumeServerClient(vinfo.server, c.env.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+
+ ext := ".idx"
+ if vinfo.isEcVolume {
+ ext = ".ecx"
+ }
+
+ copyFileClient, err := volumeServerClient.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{
+ VolumeId: volumeId,
+ Ext: ext,
+ CompactionRevision: math.MaxUint32,
+ StopOffset: math.MaxInt64,
+ Collection: vinfo.collection,
+ IsEcVolume: vinfo.isEcVolume,
+ IgnoreSourceFileNotFound: false,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to start copying volume %d.idx: %v", volumeId, err)
+ }
+
+ err = writeToFile(copyFileClient, getVolumeFileIdFile(tempFolder, volumeId))
+ if err != nil {
+ return fmt.Errorf("failed to copy %d.idx from %s: %v", volumeId, vinfo.server, err)
+ }
+
+ return nil
+
+ })
+
+}
+
+func (c *commandVolumeFsck) collectFilerFileIds(tempFolder string, volumeIdToServer map[uint32]VInfo, verbose bool, writer io.Writer) error {
+
+ if verbose {
+ fmt.Fprintf(writer, "collecting file ids from filer ...\n")
+ }
+
+ files := make(map[uint32]*os.File)
+ for vid := range volumeIdToServer {
+ dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if openErr != nil {
+ return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr)
+ }
+ files[vid] = dst
+ }
+ defer func() {
+ for _, f := range files {
+ f.Close()
+ }
+ }()
+
+ type Item struct {
+ vid uint32
+ fileKey uint64
+ }
+ return doTraverseBfsAndSaving(c.env, nil, "/", false, func(outputChan chan interface{}) {
+ buffer := make([]byte, 8)
+ for item := range outputChan {
+ i := item.(*Item)
+ util.Uint64toBytes(buffer, i.fileKey)
+ files[i.vid].Write(buffer)
+ }
+ }, func(entry *filer_pb.FullEntry, outputChan chan interface{}) (err error) {
+ for _, chunk := range entry.Entry.Chunks {
+ outputChan <- &Item{
+ vid: chunk.Fid.VolumeId,
+ fileKey: chunk.Fid.FileKey,
+ }
+ }
+ return nil
+ })
+}
+
+func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(tempFolder string, volumeId uint32, writer io.Writer, verbose bool) (inUseCount uint64, orphanFileIds []string, orphanDataSize uint64, err error) {
+
+ db := needle_map.NewMemDb()
+ defer db.Close()
+
+ if err = db.LoadFromIdx(getVolumeFileIdFile(tempFolder, volumeId)); err != nil {
+ return
+ }
+
+ filerFileIdsData, err := ioutil.ReadFile(getFilerFileIdFile(tempFolder, volumeId))
+ if err != nil {
+ return
+ }
+
+ dataLen := len(filerFileIdsData)
+ if dataLen%8 != 0 {
+ return 0, nil, 0, fmt.Errorf("filer data is corrupted")
+ }
+
+ for i := 0; i < len(filerFileIdsData); i += 8 {
+ fileKey := util.BytesToUint64(filerFileIdsData[i : i+8])
+ db.Delete(types.NeedleId(fileKey))
+ inUseCount++
+ }
+
+ var orphanFileCount uint64
+ db.AscendingVisit(func(n needle_map.NeedleValue) error {
+ // fmt.Printf("%d,%x\n", volumeId, n.Key)
+ orphanFileIds = append(orphanFileIds, fmt.Sprintf("%d,%s", volumeId, n.Key.String()))
+ orphanFileCount++
+ orphanDataSize += uint64(n.Size)
+ return nil
+ })
+
+ if orphanFileCount > 0 {
+ pct := float64(orphanFileCount*100) / (float64(orphanFileCount + inUseCount))
+ fmt.Fprintf(writer, "volume:%d\tentries:%d\torphan:%d\t%.2f%%\t%dB\n",
+ volumeId, orphanFileCount+inUseCount, orphanFileCount, pct, orphanDataSize)
+ }
+
+ return
+
+}
+
+type VInfo struct {
+ server string
+ collection string
+ isEcVolume bool
+}
+
+func (c *commandVolumeFsck) collectVolumeIds(verbose bool, writer io.Writer) (volumeIdToServer map[uint32]VInfo, err error) {
+
+ if verbose {
+ fmt.Fprintf(writer, "collecting volume id and locations from master ...\n")
+ }
+
+ volumeIdToServer = make(map[uint32]VInfo)
+ var resp *master_pb.VolumeListResponse
+ err = c.env.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
+ return err
+ })
+ if err != nil {
+ return
+ }
+
+ eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, t *master_pb.DataNodeInfo) {
+ for _, vi := range t.VolumeInfos {
+ volumeIdToServer[vi.Id] = VInfo{
+ server: t.Id,
+ collection: vi.Collection,
+ isEcVolume: false,
+ }
+ }
+ for _, ecShardInfo := range t.EcShardInfos {
+ volumeIdToServer[ecShardInfo.Id] = VInfo{
+ server: t.Id,
+ collection: ecShardInfo.Collection,
+ isEcVolume: true,
+ }
+ }
+ })
+
+ if verbose {
+ fmt.Fprintf(writer, "collected %d volumes and locations.\n", len(volumeIdToServer))
+ }
+ return
+}
+
+func (c *commandVolumeFsck) purgeFileIdsForOneVolume(volumeId uint32, fileIds []string, writer io.Writer) (err error) {
+ fmt.Fprintf(writer, "purging orphan data for volume %d...\n", volumeId)
+ locations, found := c.env.MasterClient.GetLocations(volumeId)
+ if !found {
+ return fmt.Errorf("failed to find volume %d locations", volumeId)
+ }
+
+ resultChan := make(chan []*volume_server_pb.DeleteResult, len(locations))
+ var wg sync.WaitGroup
+ for _, location := range locations {
+ wg.Add(1)
+ go func(server string, fidList []string) {
+ defer wg.Done()
+
+ if deleteResults, deleteErr := operation.DeleteFilesAtOneVolumeServer(server, c.env.option.GrpcDialOption, fidList, false); deleteErr != nil {
+ err = deleteErr
+ } else if deleteResults != nil {
+ resultChan <- deleteResults
+ }
+
+ }(location.Url, fileIds)
+ }
+ wg.Wait()
+ close(resultChan)
+
+ for results := range resultChan {
+ for _, result := range results {
+ if result.Error != "" {
+ fmt.Fprintf(writer, "purge error: %s\n", result.Error)
+ }
+ }
+ }
+
+ return
+}
+
+func getVolumeFileIdFile(tempFolder string, vid uint32) string {
+ return filepath.Join(tempFolder, fmt.Sprintf("%d.idx", vid))
+}
+
+func getFilerFileIdFile(tempFolder string, vid uint32) string {
+ return filepath.Join(tempFolder, fmt.Sprintf("%d.fid", vid))
+}
+
+func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName string) error {
+ flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC
+ dst, err := os.OpenFile(fileName, flags, 0644)
+ if err != nil {
+ return nil
+ }
+ defer dst.Close()
+
+ for {
+ resp, receiveErr := client.Recv()
+ if receiveErr == io.EOF {
+ break
+ }
+ if receiveErr != nil {
+ return fmt.Errorf("receiving %s: %v", fileName, receiveErr)
+ }
+ dst.Write(resp.FileContent)
+ }
+ return nil
+}
diff --git a/weed/shell/command_volume_list.go b/weed/shell/command_volume_list.go
index c6c79d150..c5a9388fa 100644
--- a/weed/shell/command_volume_list.go
+++ b/weed/shell/command_volume_list.go
@@ -32,9 +32,8 @@ func (c *commandVolumeList) Help() string {
func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
var resp *master_pb.VolumeListResponse
- ctx := context.Background()
- err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error {
- resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{})
+ err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
return err
})
if err != nil {
diff --git a/weed/shell/command_volume_mount.go b/weed/shell/command_volume_mount.go
index 21bc342b4..ded7b7e66 100644
--- a/weed/shell/command_volume_mount.go
+++ b/weed/shell/command_volume_mount.go
@@ -34,6 +34,10 @@ func (c *commandVolumeMount) Help() string {
func (c *commandVolumeMount) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
if len(args) != 2 {
fmt.Fprintf(writer, "received args: %+v\n", args)
return fmt.Errorf("need 2 args of ")
@@ -45,14 +49,13 @@ func (c *commandVolumeMount) Do(args []string, commandEnv *CommandEnv, writer io
return fmt.Errorf("wrong volume id format %s: %v", volumeId, err)
}
- ctx := context.Background()
- return mountVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer)
+ return mountVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer)
}
-func mountVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) {
- return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, mountErr := volumeServerClient.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{
+func mountVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) {
+ return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{
VolumeId: uint32(volumeId),
})
return mountErr
diff --git a/weed/shell/command_volume_move.go b/weed/shell/command_volume_move.go
index 2e39c0600..392b947e7 100644
--- a/weed/shell/command_volume_move.go
+++ b/weed/shell/command_volume_move.go
@@ -44,6 +44,10 @@ func (c *commandVolumeMove) Help() string {
func (c *commandVolumeMove) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
if len(args) != 3 {
fmt.Fprintf(writer, "received args: %+v\n", args)
return fmt.Errorf("need 3 args of ")
@@ -59,26 +63,25 @@ func (c *commandVolumeMove) Do(args []string, commandEnv *CommandEnv, writer io.
return fmt.Errorf("source and target volume servers are the same!")
}
- ctx := context.Background()
- return LiveMoveVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, 5*time.Second)
+ return LiveMoveVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, 5*time.Second)
}
// LiveMoveVolume moves one volume from one source volume server to one target volume server, with idleTimeout to drain the incoming requests.
-func LiveMoveVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, idleTimeout time.Duration) (err error) {
+func LiveMoveVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, idleTimeout time.Duration) (err error) {
log.Printf("copying volume %d from %s to %s", volumeId, sourceVolumeServer, targetVolumeServer)
- lastAppendAtNs, err := copyVolume(ctx, grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer)
+ lastAppendAtNs, err := copyVolume(grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer)
if err != nil {
return fmt.Errorf("copy volume %d from %s to %s: %v", volumeId, sourceVolumeServer, targetVolumeServer, err)
}
log.Printf("tailing volume %d from %s to %s", volumeId, sourceVolumeServer, targetVolumeServer)
- if err = tailVolume(ctx, grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, lastAppendAtNs, idleTimeout); err != nil {
+ if err = tailVolume(grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, lastAppendAtNs, idleTimeout); err != nil {
return fmt.Errorf("tail volume %d from %s to %s: %v", volumeId, sourceVolumeServer, targetVolumeServer, err)
}
log.Printf("deleting volume %d from %s", volumeId, sourceVolumeServer)
- if err = deleteVolume(ctx, grpcDialOption, volumeId, sourceVolumeServer); err != nil {
+ if err = deleteVolume(grpcDialOption, volumeId, sourceVolumeServer); err != nil {
return fmt.Errorf("delete volume %d from %s: %v", volumeId, sourceVolumeServer, err)
}
@@ -86,10 +89,10 @@ func LiveMoveVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeI
return nil
}
-func copyVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string) (lastAppendAtNs uint64, err error) {
+func copyVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string) (lastAppendAtNs uint64, err error) {
- err = operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
- resp, replicateErr := volumeServerClient.VolumeCopy(ctx, &volume_server_pb.VolumeCopyRequest{
+ err = operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ resp, replicateErr := volumeServerClient.VolumeCopy(context.Background(), &volume_server_pb.VolumeCopyRequest{
VolumeId: uint32(volumeId),
SourceDataNode: sourceVolumeServer,
})
@@ -102,10 +105,10 @@ func copyVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId ne
return
}
-func tailVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, lastAppendAtNs uint64, idleTimeout time.Duration) (err error) {
+func tailVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, lastAppendAtNs uint64, idleTimeout time.Duration) (err error) {
- return operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, replicateErr := volumeServerClient.VolumeTailReceiver(ctx, &volume_server_pb.VolumeTailReceiverRequest{
+ return operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, replicateErr := volumeServerClient.VolumeTailReceiver(context.Background(), &volume_server_pb.VolumeTailReceiverRequest{
VolumeId: uint32(volumeId),
SinceNs: lastAppendAtNs,
IdleTimeoutSeconds: uint32(idleTimeout.Seconds()),
@@ -116,9 +119,9 @@ func tailVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId ne
}
-func deleteVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) {
- return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, deleteErr := volumeServerClient.VolumeDelete(ctx, &volume_server_pb.VolumeDeleteRequest{
+func deleteVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) {
+ return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, deleteErr := volumeServerClient.VolumeDelete(context.Background(), &volume_server_pb.VolumeDeleteRequest{
VolumeId: uint32(volumeId),
})
return deleteErr
diff --git a/weed/shell/command_volume_tier_download.go b/weed/shell/command_volume_tier_download.go
index 0f1a1bb6e..d31c8c031 100644
--- a/weed/shell/command_volume_tier_download.go
+++ b/weed/shell/command_volume_tier_download.go
@@ -42,6 +42,10 @@ func (c *commandVolumeTierDownload) Help() string {
func (c *commandVolumeTierDownload) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
volumeId := tierCommand.Int("volumeId", 0, "the volume id")
collection := tierCommand.String("collection", "", "the collection name")
@@ -49,18 +53,17 @@ func (c *commandVolumeTierDownload) Do(args []string, commandEnv *CommandEnv, wr
return nil
}
- ctx := context.Background()
vid := needle.VolumeId(*volumeId)
// collect topology information
- topologyInfo, err := collectTopologyInfo(ctx, commandEnv)
+ topologyInfo, err := collectTopologyInfo(commandEnv)
if err != nil {
return err
}
// volumeId is provided
if vid != 0 {
- return doVolumeTierDownload(ctx, commandEnv, writer, *collection, vid)
+ return doVolumeTierDownload(commandEnv, writer, *collection, vid)
}
// apply to all volumes in the collection
@@ -71,7 +74,7 @@ func (c *commandVolumeTierDownload) Do(args []string, commandEnv *CommandEnv, wr
}
fmt.Printf("tier download volumes: %v\n", volumeIds)
for _, vid := range volumeIds {
- if err = doVolumeTierDownload(ctx, commandEnv, writer, *collection, vid); err != nil {
+ if err = doVolumeTierDownload(commandEnv, writer, *collection, vid); err != nil {
return err
}
}
@@ -97,7 +100,7 @@ func collectRemoteVolumes(topoInfo *master_pb.TopologyInfo, selectedCollection s
return
}
-func doVolumeTierDownload(ctx context.Context, commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId) (err error) {
+func doVolumeTierDownload(commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId) (err error) {
// find volume location
locations, found := commandEnv.MasterClient.GetLocations(uint32(vid))
if !found {
@@ -107,7 +110,7 @@ func doVolumeTierDownload(ctx context.Context, commandEnv *CommandEnv, writer io
// TODO parallelize this
for _, loc := range locations {
// copy the .dat file from remote tier to local
- err = downloadDatFromRemoteTier(ctx, commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, loc.Url)
+ err = downloadDatFromRemoteTier(commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, loc.Url)
if err != nil {
return fmt.Errorf("download dat file for volume %d to %s: %v", vid, loc.Url, err)
}
@@ -116,10 +119,10 @@ func doVolumeTierDownload(ctx context.Context, commandEnv *CommandEnv, writer io
return nil
}
-func downloadDatFromRemoteTier(ctx context.Context, grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, targetVolumeServer string) error {
+func downloadDatFromRemoteTier(grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, targetVolumeServer string) error {
- err := operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
- stream, downloadErr := volumeServerClient.VolumeTierMoveDatFromRemote(ctx, &volume_server_pb.VolumeTierMoveDatFromRemoteRequest{
+ err := operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ stream, downloadErr := volumeServerClient.VolumeTierMoveDatFromRemote(context.Background(), &volume_server_pb.VolumeTierMoveDatFromRemoteRequest{
VolumeId: uint32(volumeId),
Collection: collection,
})
@@ -145,14 +148,14 @@ func downloadDatFromRemoteTier(ctx context.Context, grpcDialOption grpc.DialOpti
return downloadErr
}
- _, unmountErr := volumeServerClient.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{
+ _, unmountErr := volumeServerClient.VolumeUnmount(context.Background(), &volume_server_pb.VolumeUnmountRequest{
VolumeId: uint32(volumeId),
})
if unmountErr != nil {
return unmountErr
}
- _, mountErr := volumeServerClient.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{
+ _, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{
VolumeId: uint32(volumeId),
})
if mountErr != nil {
diff --git a/weed/shell/command_volume_tier_upload.go b/weed/shell/command_volume_tier_upload.go
index 20da1187c..f92cdc3e4 100644
--- a/weed/shell/command_volume_tier_upload.go
+++ b/weed/shell/command_volume_tier_upload.go
@@ -56,6 +56,10 @@ func (c *commandVolumeTierUpload) Help() string {
func (c *commandVolumeTierUpload) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
volumeId := tierCommand.Int("volumeId", 0, "the volume id")
collection := tierCommand.String("collection", "", "the collection name")
@@ -67,23 +71,22 @@ func (c *commandVolumeTierUpload) Do(args []string, commandEnv *CommandEnv, writ
return nil
}
- ctx := context.Background()
vid := needle.VolumeId(*volumeId)
// volumeId is provided
if vid != 0 {
- return doVolumeTierUpload(ctx, commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile)
+ return doVolumeTierUpload(commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile)
}
// apply to all volumes in the collection
// reusing collectVolumeIdsForEcEncode for now
- volumeIds, err := collectVolumeIdsForEcEncode(ctx, commandEnv, *collection, *fullPercentage, *quietPeriod)
+ volumeIds, err := collectVolumeIdsForEcEncode(commandEnv, *collection, *fullPercentage, *quietPeriod)
if err != nil {
return err
}
fmt.Printf("tier upload volumes: %v\n", volumeIds)
for _, vid := range volumeIds {
- if err = doVolumeTierUpload(ctx, commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile); err != nil {
+ if err = doVolumeTierUpload(commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile); err != nil {
return err
}
}
@@ -91,20 +94,20 @@ func (c *commandVolumeTierUpload) Do(args []string, commandEnv *CommandEnv, writ
return nil
}
-func doVolumeTierUpload(ctx context.Context, commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId, dest string, keepLocalDatFile bool) (err error) {
+func doVolumeTierUpload(commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId, dest string, keepLocalDatFile bool) (err error) {
// find volume location
locations, found := commandEnv.MasterClient.GetLocations(uint32(vid))
if !found {
return fmt.Errorf("volume %d not found", vid)
}
- err = markVolumeReadonly(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations)
+ err = markVolumeReadonly(commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations)
if err != nil {
return fmt.Errorf("mark volume %d as readonly on %s: %v", vid, locations[0].Url, err)
}
// copy the .dat file to remote tier
- err = uploadDatToRemoteTier(ctx, commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, locations[0].Url, dest, keepLocalDatFile)
+ err = uploadDatToRemoteTier(commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, locations[0].Url, dest, keepLocalDatFile)
if err != nil {
return fmt.Errorf("copy dat file for volume %d on %s to %s: %v", vid, locations[0].Url, dest, err)
}
@@ -112,10 +115,10 @@ func doVolumeTierUpload(ctx context.Context, commandEnv *CommandEnv, writer io.W
return nil
}
-func uploadDatToRemoteTier(ctx context.Context, grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, sourceVolumeServer string, dest string, keepLocalDatFile bool) error {
+func uploadDatToRemoteTier(grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, sourceVolumeServer string, dest string, keepLocalDatFile bool) error {
- err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
- stream, copyErr := volumeServerClient.VolumeTierMoveDatToRemote(ctx, &volume_server_pb.VolumeTierMoveDatToRemoteRequest{
+ err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ stream, copyErr := volumeServerClient.VolumeTierMoveDatToRemote(context.Background(), &volume_server_pb.VolumeTierMoveDatToRemoteRequest{
VolumeId: uint32(volumeId),
Collection: collection,
DestinationBackendName: dest,
diff --git a/weed/shell/command_volume_unmount.go b/weed/shell/command_volume_unmount.go
index 826258dfb..7596bb4c8 100644
--- a/weed/shell/command_volume_unmount.go
+++ b/weed/shell/command_volume_unmount.go
@@ -34,6 +34,10 @@ func (c *commandVolumeUnmount) Help() string {
func (c *commandVolumeUnmount) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
+ if err = commandEnv.confirmIsLocked(); err != nil {
+ return
+ }
+
if len(args) != 2 {
fmt.Fprintf(writer, "received args: %+v\n", args)
return fmt.Errorf("need 2 args of ")
@@ -45,14 +49,13 @@ func (c *commandVolumeUnmount) Do(args []string, commandEnv *CommandEnv, writer
return fmt.Errorf("wrong volume id format %s: %v", volumeId, err)
}
- ctx := context.Background()
- return unmountVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer)
+ return unmountVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer)
}
-func unmountVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) {
- return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, unmountErr := volumeServerClient.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{
+func unmountVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) {
+ return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, unmountErr := volumeServerClient.VolumeUnmount(context.Background(), &volume_server_pb.VolumeUnmountRequest{
VolumeId: uint32(volumeId),
})
return unmountErr
diff --git a/weed/shell/commands.go b/weed/shell/commands.go
index f1fcb62d4..f61ed9f82 100644
--- a/weed/shell/commands.go
+++ b/weed/shell/commands.go
@@ -1,19 +1,19 @@
package shell
import (
- "context"
"fmt"
"io"
"net/url"
- "path/filepath"
"strconv"
"strings"
"google.golang.org/grpc"
- "github.com/chrislusf/seaweedfs/weed/filer2"
+ "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/wdclient"
+ "github.com/chrislusf/seaweedfs/weed/wdclient/exclusive_locks"
)
type ShellOptions struct {
@@ -29,6 +29,7 @@ type CommandEnv struct {
env map[string]string
MasterClient *wdclient.MasterClient
option ShellOptions
+ locker *exclusive_locks.ExclusiveLocker
}
type command interface {
@@ -42,55 +43,67 @@ var (
)
func NewCommandEnv(options ShellOptions) *CommandEnv {
- return &CommandEnv{
- env: make(map[string]string),
- MasterClient: wdclient.NewMasterClient(context.Background(),
- options.GrpcDialOption, "shell", strings.Split(*options.Masters, ",")),
- option: options,
+ ce := &CommandEnv{
+ env: make(map[string]string),
+ MasterClient: wdclient.NewMasterClient(options.GrpcDialOption, pb.AdminShellClient, "", 0, strings.Split(*options.Masters, ",")),
+ option: options,
}
+ ce.locker = exclusive_locks.NewExclusiveLocker(ce.MasterClient)
+ return ce
}
-func (ce *CommandEnv) parseUrl(input string) (filerServer string, filerPort int64, path string, err error) {
+func (ce *CommandEnv) parseUrl(input string) (path string, err error) {
if strings.HasPrefix(input, "http") {
- return parseFilerUrl(input)
+ err = fmt.Errorf("http://: prefix is not supported any more")
+ return
}
if !strings.HasPrefix(input, "/") {
- input = filepath.ToSlash(filepath.Join(ce.option.Directory, input))
+ input = util.Join(ce.option.Directory, input)
}
- return ce.option.FilerHost, ce.option.FilerPort, input, err
+ return input, err
}
-func (ce *CommandEnv) isDirectory(ctx context.Context, filerServer string, filerPort int64, path string) bool {
+func (ce *CommandEnv) isDirectory(path string) bool {
- return ce.checkDirectory(ctx, filerServer, filerPort, path) == nil
+ return ce.checkDirectory(path) == nil
}
-func (ce *CommandEnv) checkDirectory(ctx context.Context, filerServer string, filerPort int64, path string) error {
-
- dir, name := filer2.FullPath(path).DirAndName()
-
- return ce.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
-
- resp, lookupErr := client.LookupDirectoryEntry(ctx, &filer_pb.LookupDirectoryEntryRequest{
- Directory: dir,
- Name: name,
- })
- if lookupErr != nil {
- return lookupErr
- }
-
- if resp.Entry == nil {
- return fmt.Errorf("entry not found")
- }
-
- if !resp.Entry.IsDirectory {
- return fmt.Errorf("not a directory")
- }
+func (ce *CommandEnv) confirmIsLocked() error {
+ if ce.locker.IsLocking() {
return nil
- })
+ }
+ return fmt.Errorf("need to lock to continue")
+
+}
+
+func (ce *CommandEnv) checkDirectory(path string) error {
+
+ dir, name := util.FullPath(path).DirAndName()
+
+ exists, err := filer_pb.Exists(ce, dir, name, true)
+
+ if !exists {
+ return fmt.Errorf("%s is not a directory", path)
+ }
+
+ return err
+
+}
+
+var _ = filer_pb.FilerClient(&CommandEnv{})
+
+func (ce *CommandEnv) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
+
+ filerGrpcAddress := fmt.Sprintf("%s:%d", ce.option.FilerHost, ce.option.FilerPort+10000)
+ return pb.WithGrpcFilerClient(filerGrpcAddress, ce.option.GrpcDialOption, fn)
+
+}
+
+func (ce *CommandEnv) AdjustedUrl(hostAndPort string) string {
+ return hostAndPort
}
func parseFilerUrl(entryPath string) (filerServer string, filerPort int64, path string, err error) {
@@ -107,7 +120,7 @@ func parseFilerUrl(entryPath string) (filerServer string, filerPort int64, path
}
path = u.Path
} else {
- err = fmt.Errorf("path should have full url http://:/path/to/dirOrFile : %s", entryPath)
+ err = fmt.Errorf("path should have full url /path/to/dirOrFile : %s", entryPath)
}
return
}
diff --git a/weed/shell/shell_liner.go b/weed/shell/shell_liner.go
index a4f17e0fa..4632a1fb0 100644
--- a/weed/shell/shell_liner.go
+++ b/weed/shell/shell_liner.go
@@ -6,9 +6,8 @@ import (
"os"
"path"
"regexp"
- "strings"
-
"sort"
+ "strings"
"github.com/peterh/liner"
)
@@ -46,42 +45,51 @@ func RunShell(options ShellOptions) {
return
}
- cmds := reg.FindAllString(cmd, -1)
- if len(cmds) == 0 {
- continue
- } else {
- line.AppendHistory(cmd)
-
- args := make([]string, len(cmds[1:]))
-
- for i := range args {
- args[i] = strings.Trim(string(cmds[1+i]), "\"'")
- }
-
- cmd := strings.ToLower(cmds[0])
- if cmd == "help" || cmd == "?" {
- printHelp(cmds)
- } else if cmd == "exit" || cmd == "quit" {
+ for _, c := range strings.Split(cmd, ";") {
+ if processEachCmd(reg, c, commandEnv) {
return
- } else {
- foundCommand := false
- for _, c := range Commands {
- if c.Name() == cmd {
- if err := c.Do(args, commandEnv, os.Stdout); err != nil {
- fmt.Fprintf(os.Stderr, "error: %v\n", err)
- }
- foundCommand = true
- }
- }
- if !foundCommand {
- fmt.Fprintf(os.Stderr, "unknown command: %v\n", cmd)
- }
}
-
}
}
}
+func processEachCmd(reg *regexp.Regexp, cmd string, commandEnv *CommandEnv) bool {
+ cmds := reg.FindAllString(cmd, -1)
+ if len(cmds) == 0 {
+ return false
+ } else {
+ line.AppendHistory(cmd)
+
+ args := make([]string, len(cmds[1:]))
+
+ for i := range args {
+ args[i] = strings.Trim(string(cmds[1+i]), "\"'")
+ }
+
+ cmd := strings.ToLower(cmds[0])
+ if cmd == "help" || cmd == "?" {
+ printHelp(cmds)
+ } else if cmd == "exit" || cmd == "quit" {
+ return true
+ } else {
+ foundCommand := false
+ for _, c := range Commands {
+ if c.Name() == cmd || c.Name() == "fs."+cmd {
+ if err := c.Do(args, commandEnv, os.Stdout); err != nil {
+ fmt.Fprintf(os.Stderr, "error: %v\n", err)
+ }
+ foundCommand = true
+ }
+ }
+ if !foundCommand {
+ fmt.Fprintf(os.Stderr, "unknown command: %v\n", cmd)
+ }
+ }
+
+ }
+ return false
+}
+
func printGenericHelp() {
msg :=
`Type: "help " for help on
diff --git a/weed/stats/disk.go b/weed/stats/disk.go
index e9d8baedd..813c08f7b 100644
--- a/weed/stats/disk.go
+++ b/weed/stats/disk.go
@@ -1,9 +1,13 @@
package stats
-import "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+import (
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+)
func NewDiskStatus(path string) (disk *volume_server_pb.DiskStatus) {
disk = &volume_server_pb.DiskStatus{Dir: path}
fillInDiskStatus(disk)
+ glog.V(0).Infof("read disk size: %v", disk)
return
}
diff --git a/weed/stats/disk_notsupported.go b/weed/stats/disk_notsupported.go
index ace662f6a..3d99e6ce7 100644
--- a/weed/stats/disk_notsupported.go
+++ b/weed/stats/disk_notsupported.go
@@ -1,4 +1,4 @@
-// +build windows openbsd netbsd plan9 solaris
+// +build openbsd netbsd plan9 solaris
package stats
diff --git a/weed/stats/disk_supported.go b/weed/stats/disk_supported.go
index 0537828b0..dff580b5b 100644
--- a/weed/stats/disk_supported.go
+++ b/weed/stats/disk_supported.go
@@ -17,5 +17,7 @@ func fillInDiskStatus(disk *volume_server_pb.DiskStatus) {
disk.All = fs.Blocks * uint64(fs.Bsize)
disk.Free = fs.Bfree * uint64(fs.Bsize)
disk.Used = disk.All - disk.Free
+ disk.PercentFree = float32((float64(disk.Free) / float64(disk.All)) * 100)
+ disk.PercentUsed = float32((float64(disk.Used) / float64(disk.All)) * 100)
return
}
diff --git a/weed/stats/disk_windows.go b/weed/stats/disk_windows.go
new file mode 100644
index 000000000..1185e129c
--- /dev/null
+++ b/weed/stats/disk_windows.go
@@ -0,0 +1,46 @@
+package stats
+
+import (
+ "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
+ "golang.org/x/sys/windows"
+ "syscall"
+ "unsafe"
+)
+var (
+ kernel32 = windows.NewLazySystemDLL("Kernel32.dll")
+ getDiskFreeSpaceEx = kernel32.NewProc("GetDiskFreeSpaceExW")
+)
+
+func fillInDiskStatus(disk *volume_server_pb.DiskStatus) {
+
+ ptr, err := syscall.UTF16PtrFromString(disk.Dir)
+
+ if err != nil {
+ return
+ }
+ var _temp uint64
+ /* #nosec */
+ r, _, e := syscall.Syscall6(
+ getDiskFreeSpaceEx.Addr(),
+ 4,
+ uintptr(unsafe.Pointer(ptr)),
+ uintptr(unsafe.Pointer(&disk.Free)),
+ uintptr(unsafe.Pointer(&disk.All)),
+ uintptr(unsafe.Pointer(&_temp)),
+ 0,
+ 0,
+ )
+
+ if r == 0 {
+ if e != 0 {
+ return
+ }
+
+ return
+ }
+ disk.Used = disk.All - disk.Free
+ disk.PercentFree = float32((float64(disk.Free) / float64(disk.All)) * 100)
+ disk.PercentUsed = float32((float64(disk.Used) / float64(disk.All)) * 100)
+
+ return
+}
diff --git a/weed/stats/metrics.go b/weed/stats/metrics.go
index a9624cd86..7ff09a388 100644
--- a/weed/stats/metrics.go
+++ b/weed/stats/metrics.go
@@ -3,11 +3,13 @@ package stats
import (
"fmt"
"os"
+ "strings"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/push"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
)
var (
@@ -119,7 +121,7 @@ func LoopPushingMetric(name, instance string, gatherer *prometheus.Registry, fnG
for {
if currentAddr != "" {
err := pusher.Push()
- if err != nil {
+ if err != nil && !strings.HasPrefix(err.Error(), "unexpected status code 200") {
glog.V(0).Infof("could not push metrics to prometheus push gateway %s: %v", addr, err)
}
}
@@ -136,7 +138,7 @@ func LoopPushingMetric(name, instance string, gatherer *prometheus.Registry, fnG
}
}
-func SourceName(port int) string {
+func SourceName(port uint32) string {
hostname, err := os.Hostname()
if err != nil {
return "unknown"
diff --git a/weed/storage/backend/backend.go b/weed/storage/backend/backend.go
index 6941ca5a1..daab29621 100644
--- a/weed/storage/backend/backend.go
+++ b/weed/storage/backend/backend.go
@@ -19,6 +19,7 @@ type BackendStorageFile interface {
io.Closer
GetStat() (datSize int64, modTime time.Time, err error)
Name() string
+ Sync() error
}
type BackendStorage interface {
diff --git a/weed/storage/backend/disk_file.go b/weed/storage/backend/disk_file.go
index c4b3caffb..2b04c8df2 100644
--- a/weed/storage/backend/disk_file.go
+++ b/weed/storage/backend/disk_file.go
@@ -48,3 +48,7 @@ func (df *DiskFile) GetStat() (datSize int64, modTime time.Time, err error) {
func (df *DiskFile) Name() string {
return df.fullFilePath
}
+
+func (df *DiskFile) Sync() error {
+ return df.File.Sync()
+}
diff --git a/weed/storage/backend/memory_map/memory_map_backend.go b/weed/storage/backend/memory_map/memory_map_backend.go
index 03e7308d0..8ff03d9af 100644
--- a/weed/storage/backend/memory_map/memory_map_backend.go
+++ b/weed/storage/backend/memory_map/memory_map_backend.go
@@ -3,12 +3,10 @@ package memory_map
import (
"os"
"time"
-
- "github.com/chrislusf/seaweedfs/weed/storage/backend"
)
var (
- _ backend.BackendStorageFile = &MemoryMappedFile{}
+// _ backend.BackendStorageFile = &MemoryMappedFile{} // remove this to break import cycle
)
type MemoryMappedFile struct {
@@ -58,3 +56,7 @@ func (mmf *MemoryMappedFile) GetStat() (datSize int64, modTime time.Time, err er
func (mmf *MemoryMappedFile) Name() string {
return mmf.mm.File.Name()
}
+
+func (mm *MemoryMappedFile) Sync() error {
+ return nil
+}
diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go
index 8d71861c2..4706c9334 100644
--- a/weed/storage/backend/s3_backend/s3_backend.go
+++ b/weed/storage/backend/s3_backend/s3_backend.go
@@ -36,6 +36,7 @@ type S3BackendStorage struct {
aws_secret_access_key string
region string
bucket string
+ endpoint string
conn s3iface.S3API
}
@@ -46,7 +47,9 @@ func newS3BackendStorage(configuration backend.StringProperties, configPrefix st
s.aws_secret_access_key = configuration.GetString(configPrefix + "aws_secret_access_key")
s.region = configuration.GetString(configPrefix + "region")
s.bucket = configuration.GetString(configPrefix + "bucket")
- s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region)
+ s.endpoint = configuration.GetString(configPrefix + "endpoint")
+
+ s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region, s.endpoint)
glog.V(0).Infof("created backend storage s3.%s for region %s bucket %s", s.id, s.region, s.bucket)
return
@@ -58,6 +61,7 @@ func (s *S3BackendStorage) ToProperties() map[string]string {
m["aws_secret_access_key"] = s.aws_secret_access_key
m["region"] = s.region
m["bucket"] = s.bucket
+ m["endpoint"] = s.endpoint
return m
}
@@ -175,3 +179,7 @@ func (s3backendStorageFile S3BackendStorageFile) GetStat() (datSize int64, modTi
func (s3backendStorageFile S3BackendStorageFile) Name() string {
return s3backendStorageFile.key
}
+
+func (s3backendStorageFile S3BackendStorageFile) Sync() error {
+ return nil
+}
diff --git a/weed/storage/backend/s3_backend/s3_sessions.go b/weed/storage/backend/s3_backend/s3_sessions.go
index 5fdbcb66b..e2fdf1eb6 100644
--- a/weed/storage/backend/s3_backend/s3_sessions.go
+++ b/weed/storage/backend/s3_backend/s3_sessions.go
@@ -24,7 +24,7 @@ func getSession(region string) (s3iface.S3API, bool) {
return sess, found
}
-func createSession(awsAccessKeyId, awsSecretAccessKey, region string) (s3iface.S3API, error) {
+func createSession(awsAccessKeyId, awsSecretAccessKey, region, endpoint string) (s3iface.S3API, error) {
sessionsLock.Lock()
defer sessionsLock.Unlock()
@@ -34,7 +34,8 @@ func createSession(awsAccessKeyId, awsSecretAccessKey, region string) (s3iface.S
}
config := &aws.Config{
- Region: aws.String(region),
+ Region: aws.String(region),
+ Endpoint: aws.String(endpoint),
}
if awsAccessKeyId != "" && awsSecretAccessKey != "" {
config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "")
diff --git a/weed/storage/volume_create.go b/weed/storage/backend/volume_create.go
similarity index 57%
rename from weed/storage/volume_create.go
rename to weed/storage/backend/volume_create.go
index ffcb246a4..abb1f7238 100644
--- a/weed/storage/volume_create.go
+++ b/weed/storage/backend/volume_create.go
@@ -1,15 +1,14 @@
// +build !linux,!windows
-package storage
+package backend
import (
"os"
"github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/storage/backend"
)
-func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (backend.BackendStorageFile, error) {
+func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) {
file, e := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
if e != nil {
return nil, e
@@ -17,5 +16,5 @@ func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32
if preallocate > 0 {
glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName)
}
- return backend.NewDiskFile(file), nil
+ return NewDiskFile(file), nil
}
diff --git a/weed/storage/volume_create_linux.go b/weed/storage/backend/volume_create_linux.go
similarity index 61%
rename from weed/storage/volume_create_linux.go
rename to weed/storage/backend/volume_create_linux.go
index ee599ac32..4602831ca 100644
--- a/weed/storage/volume_create_linux.go
+++ b/weed/storage/backend/volume_create_linux.go
@@ -1,16 +1,15 @@
// +build linux
-package storage
+package backend
import (
"os"
"syscall"
"github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/storage/backend"
)
-func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (backend.BackendStorageFile, error) {
+func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) {
file, e := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
if e != nil {
return nil, e
@@ -19,5 +18,5 @@ func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32
syscall.Fallocate(int(file.Fd()), 1, 0, preallocate)
glog.V(0).Infof("Preallocated %d bytes disk space for %s", preallocate, fileName)
}
- return backend.NewDiskFile(file), nil
+ return NewDiskFile(file), nil
}
diff --git a/weed/storage/volume_create_windows.go b/weed/storage/backend/volume_create_windows.go
similarity index 76%
rename from weed/storage/volume_create_windows.go
rename to weed/storage/backend/volume_create_windows.go
index e1c0b961f..7d40ec0d7 100644
--- a/weed/storage/volume_create_windows.go
+++ b/weed/storage/backend/volume_create_windows.go
@@ -1,17 +1,16 @@
// +build windows
-package storage
+package backend
import (
"github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map"
"golang.org/x/sys/windows"
"github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map/os_overloads"
)
-func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (backend.BackendStorageFile, error) {
+func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) {
if preallocate > 0 {
glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName)
}
@@ -27,7 +26,7 @@ func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32
if e != nil {
return nil, e
}
- return backend.NewDiskFile(file), nil
+ return NewDiskFile(file), nil
}
}
diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go
index a12a68cbc..088763c45 100644
--- a/weed/storage/disk_location.go
+++ b/weed/storage/disk_location.go
@@ -50,29 +50,39 @@ func parseCollectionVolumeId(base string) (collection string, vid needle.VolumeI
return collection, vol, err
}
-func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind NeedleMapType) {
+func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind NeedleMapType) bool {
name := fileInfo.Name()
if !fileInfo.IsDir() && strings.HasSuffix(name, ".idx") {
vid, collection, err := l.volumeIdFromPath(fileInfo)
- if err == nil {
- l.volumesLock.RLock()
- _, found := l.volumes[vid]
- l.volumesLock.RUnlock()
- if !found {
- if v, e := NewVolume(l.Directory, collection, vid, needleMapKind, nil, nil, 0, 0); e == nil {
- l.volumesLock.Lock()
- l.volumes[vid] = v
- l.volumesLock.Unlock()
- size, _, _ := v.FileStat()
- glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s",
- l.Directory+"/"+name, v.ReplicaPlacement, v.Version(), size, v.Ttl.String())
- // println("volume", vid, "last append at", v.lastAppendAtNs)
- } else {
- glog.V(0).Infof("new volume %s error %s", name, e)
- }
- }
+ if err != nil {
+ glog.Warningf("get volume id failed, %s, err : %s", name, err)
+ return false
}
+
+ // void loading one volume more than once
+ l.volumesLock.RLock()
+ _, found := l.volumes[vid]
+ l.volumesLock.RUnlock()
+ if found {
+ glog.V(1).Infof("loaded volume, %v", vid)
+ return true
+ }
+
+ v, e := NewVolume(l.Directory, collection, vid, needleMapKind, nil, nil, 0, 0)
+ if e != nil {
+ glog.V(0).Infof("new volume %s error %s", name, e)
+ return false
+ }
+
+ l.volumesLock.Lock()
+ l.volumes[vid] = v
+ l.volumesLock.Unlock()
+ size, _, _ := v.FileStat()
+ glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s",
+ l.Directory+"/"+name, v.ReplicaPlacement, v.Version(), size, v.Ttl.String())
+ return true
}
+ return false
}
func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapType, concurrency int) {
@@ -93,7 +103,7 @@ func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapType, con
go func() {
defer wg.Done()
for dir := range task_queue {
- l.loadExistingVolume(dir, needleMapKind)
+ _ = l.loadExistingVolume(dir, needleMapKind)
}
}()
}
@@ -157,7 +167,7 @@ func (l *DiskLocation) DeleteCollectionFromDiskLocation(collection string) (e er
return
}
-func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId) (e error) {
+func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId) (found bool, e error) {
v, ok := l.volumes[vid]
if !ok {
return
@@ -166,14 +176,14 @@ func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId) (e error) {
if e != nil {
return
}
+ found = true
delete(l.volumes, vid)
return
}
func (l *DiskLocation) LoadVolume(vid needle.VolumeId, needleMapKind NeedleMapType) bool {
if fileInfo, found := l.LocateVolume(vid); found {
- l.loadExistingVolume(fileInfo, needleMapKind)
- return true
+ return l.loadExistingVolume(fileInfo, needleMapKind)
}
return false
}
@@ -186,7 +196,8 @@ func (l *DiskLocation) DeleteVolume(vid needle.VolumeId) error {
if !ok {
return fmt.Errorf("Volume not found, VolumeId: %d", vid)
}
- return l.deleteVolumeById(vid)
+ _, err := l.deleteVolumeById(vid)
+ return err
}
func (l *DiskLocation) UnloadVolume(vid needle.VolumeId) error {
@@ -266,3 +277,19 @@ func (l *DiskLocation) LocateVolume(vid needle.VolumeId) (os.FileInfo, bool) {
return nil, false
}
+
+func (l *DiskLocation) UnUsedSpace(volumeSizeLimit uint64) (unUsedSpace uint64) {
+
+ l.volumesLock.RLock()
+ defer l.volumesLock.RUnlock()
+
+ for _, vol := range l.volumes {
+ if vol.IsReadOnly() {
+ continue
+ }
+ datSize, idxSize, _ := vol.FileStat()
+ unUsedSpace += volumeSizeLimit - (datSize + idxSize)
+ }
+
+ return
+}
diff --git a/weed/storage/disk_location_ec.go b/weed/storage/disk_location_ec.go
index f6c44e966..72d3e2b3e 100644
--- a/weed/storage/disk_location_ec.go
+++ b/weed/storage/disk_location_ec.go
@@ -183,3 +183,10 @@ func (l *DiskLocation) unmountEcVolumeByCollection(collectionName string) map[ne
}
return deltaVols
}
+
+func (l *DiskLocation) EcVolumesLen() int {
+ l.ecVolumesLock.RLock()
+ defer l.ecVolumesLock.RUnlock()
+
+ return len(l.ecVolumes)
+}
diff --git a/weed/storage/erasure_coding/ec_encoder.go b/weed/storage/erasure_coding/ec_encoder.go
index 9e2edf57d..97c3ccbd9 100644
--- a/weed/storage/erasure_coding/ec_encoder.go
+++ b/weed/storage/erasure_coding/ec_encoder.go
@@ -27,6 +27,9 @@ const (
func WriteSortedFileFromIdx(baseFileName string, ext string) (e error) {
nm, err := readNeedleMap(baseFileName)
+ if nm != nil {
+ defer nm.Close()
+ }
if err != nil {
return fmt.Errorf("readNeedleMap: %v", err)
}
@@ -196,7 +199,7 @@ func encodeDatFile(remainingSize int64, err error, baseFileName string, bufferSi
}
buffers := make([][]byte, TotalShardsCount)
- for i, _ := range buffers {
+ for i := range buffers {
buffers[i] = make([]byte, bufferSize)
}
@@ -233,7 +236,7 @@ func rebuildEcFiles(shardHasData []bool, inputFiles []*os.File, outputFiles []*o
}
buffers := make([][]byte, TotalShardsCount)
- for i, _ := range buffers {
+ for i := range buffers {
if shardHasData[i] {
buffers[i] = make([]byte, ErasureCodingSmallBlockSize)
}
diff --git a/weed/storage/erasure_coding/ec_test.go b/weed/storage/erasure_coding/ec_test.go
index b2c94cfd7..92b83cdc8 100644
--- a/weed/storage/erasure_coding/ec_test.go
+++ b/weed/storage/erasure_coding/ec_test.go
@@ -7,9 +7,10 @@ import (
"os"
"testing"
+ "github.com/klauspost/reedsolomon"
+
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
"github.com/chrislusf/seaweedfs/weed/storage/types"
- "github.com/klauspost/reedsolomon"
)
const (
@@ -42,6 +43,7 @@ func TestEncodingDecoding(t *testing.T) {
func validateFiles(baseFileName string) error {
nm, err := readNeedleMap(baseFileName)
+ defer nm.Close()
if err != nil {
return fmt.Errorf("readNeedleMap: %v", err)
}
diff --git a/weed/storage/erasure_coding/ec_volume.go b/weed/storage/erasure_coding/ec_volume.go
index 3d9aa2cff..eef53765f 100644
--- a/weed/storage/erasure_coding/ec_volume.go
+++ b/weed/storage/erasure_coding/ec_volume.go
@@ -152,6 +152,13 @@ func (ev *EcVolume) ShardSize() int64 {
return 0
}
+func (ev *EcVolume) Size() (size int64) {
+ for _, shard := range ev.Shards {
+ size += shard.Size()
+ }
+ return
+}
+
func (ev *EcVolume) CreatedAt() time.Time {
return ev.ecxCreatedAt
}
diff --git a/weed/storage/needle/async_request.go b/weed/storage/needle/async_request.go
new file mode 100644
index 000000000..ea02c55c5
--- /dev/null
+++ b/weed/storage/needle/async_request.go
@@ -0,0 +1,53 @@
+package needle
+
+type AsyncRequest struct {
+ N *Needle
+ IsWriteRequest bool
+ ActualSize int64
+ offset uint64
+ size uint64
+ doneChan chan interface{}
+ isUnchanged bool
+ err error
+}
+
+func NewAsyncRequest(n *Needle, isWriteRequest bool) *AsyncRequest {
+ return &AsyncRequest{
+ offset: 0,
+ size: 0,
+ ActualSize: 0,
+ doneChan: make(chan interface{}),
+ N: n,
+ isUnchanged: false,
+ IsWriteRequest: isWriteRequest,
+ err: nil,
+ }
+}
+
+func (r *AsyncRequest) WaitComplete() (uint64, uint64, bool, error) {
+ <-r.doneChan
+ return r.offset, r.size, r.isUnchanged, r.err
+}
+
+func (r *AsyncRequest) Complete(offset uint64, size uint64, isUnchanged bool, err error) {
+ r.offset = offset
+ r.size = size
+ r.isUnchanged = isUnchanged
+ r.err = err
+ close(r.doneChan)
+}
+
+func (r *AsyncRequest) UpdateResult(offset uint64, size uint64, isUnchanged bool, err error) {
+ r.offset = offset
+ r.size = size
+ r.isUnchanged = isUnchanged
+ r.err = err
+}
+
+func (r *AsyncRequest) Submit() {
+ close(r.doneChan)
+}
+
+func (r *AsyncRequest) IsSucceed() bool {
+ return r.err == nil
+}
diff --git a/weed/storage/needle/crc.go b/weed/storage/needle/crc.go
index 00ea1db69..6fd910bb7 100644
--- a/weed/storage/needle/crc.go
+++ b/weed/storage/needle/crc.go
@@ -1,11 +1,11 @@
package needle
import (
- "crypto/md5"
"fmt"
- "github.com/chrislusf/seaweedfs/weed/util"
"github.com/klauspost/crc32"
+
+ "github.com/chrislusf/seaweedfs/weed/util"
)
var table = crc32.MakeTable(crc32.Castagnoli)
@@ -29,13 +29,3 @@ func (n *Needle) Etag() string {
util.Uint32toBytes(bits, uint32(n.Checksum))
return fmt.Sprintf("%x", bits)
}
-
-func (n *Needle) MD5() string {
-
- hash := md5.New()
-
- hash.Write(n.Data)
-
- return fmt.Sprintf("%x", hash.Sum(nil))
-
-}
diff --git a/weed/storage/needle/needle.go b/weed/storage/needle/needle.go
index 494cc138e..d3969e868 100644
--- a/weed/storage/needle/needle.go
+++ b/weed/storage/needle/needle.go
@@ -3,8 +3,6 @@ package needle
import (
"encoding/json"
"fmt"
- "io"
- "io/ioutil"
"net/http"
"strconv"
"strings"
@@ -50,57 +48,28 @@ func (n *Needle) String() (str string) {
return
}
-func ParseUpload(r *http.Request, sizeLimit int64) (
- fileName string, data []byte, mimeType string, pairMap map[string]string, isGzipped bool, originalDataSize int,
- modifiedTime uint64, ttl *TTL, isChunkedFile bool, e error) {
- pairMap = make(map[string]string)
- for k, v := range r.Header {
- if len(v) > 0 && strings.HasPrefix(k, PairNamePrefix) {
- pairMap[k] = v[0]
- }
- }
-
- if r.Method == "POST" {
- fileName, data, mimeType, isGzipped, originalDataSize, isChunkedFile, e = parseMultipart(r, sizeLimit)
- } else {
- isGzipped = false
- mimeType = r.Header.Get("Content-Type")
- fileName = ""
- data, e = ioutil.ReadAll(io.LimitReader(r.Body, sizeLimit+1))
- originalDataSize = len(data)
- if e == io.EOF || int64(originalDataSize) == sizeLimit+1 {
- io.Copy(ioutil.Discard, r.Body)
- }
- r.Body.Close()
- }
- if e != nil {
- return
- }
-
- modifiedTime, _ = strconv.ParseUint(r.FormValue("ts"), 10, 64)
- ttl, _ = ReadTTL(r.FormValue("ttl"))
-
- return
-}
func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool, sizeLimit int64) (n *Needle, originalSize int, e error) {
- var pairMap map[string]string
- fname, mimeType, isGzipped, isChunkedFile := "", "", false, false
n = new(Needle)
- fname, n.Data, mimeType, pairMap, isGzipped, originalSize, n.LastModified, n.Ttl, isChunkedFile, e = ParseUpload(r, sizeLimit)
+ pu, e := ParseUpload(r, sizeLimit)
if e != nil {
return
}
- if len(fname) < 256 {
- n.Name = []byte(fname)
+ n.Data = pu.Data
+ originalSize = pu.OriginalDataSize
+ n.LastModified = pu.ModifiedTime
+ n.Ttl = pu.Ttl
+
+ if len(pu.FileName) < 256 {
+ n.Name = []byte(pu.FileName)
n.SetHasName()
}
- if len(mimeType) < 256 {
- n.Mime = []byte(mimeType)
+ if len(pu.MimeType) < 256 {
+ n.Mime = []byte(pu.MimeType)
n.SetHasMime()
}
- if len(pairMap) != 0 {
+ if len(pu.PairMap) != 0 {
trimmedPairMap := make(map[string]string)
- for k, v := range pairMap {
+ for k, v := range pu.PairMap {
trimmedPairMap[k[len(PairNamePrefix):]] = v
}
@@ -111,7 +80,7 @@ func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool, sizeLimit
n.SetHasPairs()
}
}
- if isGzipped {
+ if pu.IsGzipped {
n.SetGzipped()
}
if n.LastModified == 0 {
@@ -122,13 +91,13 @@ func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool, sizeLimit
n.SetHasTtl()
}
- if isChunkedFile {
+ if pu.IsChunkedFile {
n.SetIsChunkManifest()
}
if fixJpgOrientation {
- loweredName := strings.ToLower(fname)
- if mimeType == "image/jpeg" || strings.HasSuffix(loweredName, ".jpg") || strings.HasSuffix(loweredName, ".jpeg") {
+ loweredName := strings.ToLower(pu.FileName)
+ if pu.MimeType == "image/jpeg" || strings.HasSuffix(loweredName, ".jpg") || strings.HasSuffix(loweredName, ".jpeg") {
n.Data = images.FixJpgOrientation(n.Data)
}
}
diff --git a/weed/storage/needle/needle_parse_multipart.go b/weed/storage/needle/needle_parse_multipart.go
deleted file mode 100644
index 8c9032f5f..000000000
--- a/weed/storage/needle/needle_parse_multipart.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package needle
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "mime"
- "net/http"
- "path"
- "strconv"
- "strings"
-
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/util"
-)
-
-func parseMultipart(r *http.Request, sizeLimit int64) (
- fileName string, data []byte, mimeType string, isGzipped bool, originalDataSize int, isChunkedFile bool, e error) {
- defer func() {
- if e != nil && r.Body != nil {
- io.Copy(ioutil.Discard, r.Body)
- r.Body.Close()
- }
- }()
- form, fe := r.MultipartReader()
- if fe != nil {
- glog.V(0).Infoln("MultipartReader [ERROR]", fe)
- e = fe
- return
- }
-
- //first multi-part item
- part, fe := form.NextPart()
- if fe != nil {
- glog.V(0).Infoln("Reading Multi part [ERROR]", fe)
- e = fe
- return
- }
-
- fileName = part.FileName()
- if fileName != "" {
- fileName = path.Base(fileName)
- }
-
- data, e = ioutil.ReadAll(io.LimitReader(part, sizeLimit+1))
- if e != nil {
- glog.V(0).Infoln("Reading Content [ERROR]", e)
- return
- }
- if len(data) == int(sizeLimit)+1 {
- e = fmt.Errorf("file over the limited %d bytes", sizeLimit)
- return
- }
-
- //if the filename is empty string, do a search on the other multi-part items
- for fileName == "" {
- part2, fe := form.NextPart()
- if fe != nil {
- break // no more or on error, just safely break
- }
-
- fName := part2.FileName()
-
- //found the first multi-part has filename
- if fName != "" {
- data2, fe2 := ioutil.ReadAll(io.LimitReader(part2, sizeLimit+1))
- if fe2 != nil {
- glog.V(0).Infoln("Reading Content [ERROR]", fe2)
- e = fe2
- return
- }
- if len(data) == int(sizeLimit)+1 {
- e = fmt.Errorf("file over the limited %d bytes", sizeLimit)
- return
- }
-
- //update
- data = data2
- fileName = path.Base(fName)
- break
- }
- }
-
- originalDataSize = len(data)
-
- isChunkedFile, _ = strconv.ParseBool(r.FormValue("cm"))
-
- if !isChunkedFile {
-
- dotIndex := strings.LastIndex(fileName, ".")
- ext, mtype := "", ""
- if dotIndex > 0 {
- ext = strings.ToLower(fileName[dotIndex:])
- mtype = mime.TypeByExtension(ext)
- }
- contentType := part.Header.Get("Content-Type")
- if contentType != "" && mtype != contentType {
- mimeType = contentType //only return mime type if not deductable
- mtype = contentType
- }
-
- if part.Header.Get("Content-Encoding") == "gzip" {
- if unzipped, e := util.UnGzipData(data); e == nil {
- originalDataSize = len(unzipped)
- }
- isGzipped = true
- } else if util.IsGzippable(ext, mtype, data) {
- if compressedData, err := util.GzipData(data); err == nil {
- if len(data) > len(compressedData) {
- data = compressedData
- isGzipped = true
- }
- }
- }
- }
-
- return
-}
diff --git a/weed/storage/needle/needle_parse_upload.go b/weed/storage/needle/needle_parse_upload.go
new file mode 100644
index 000000000..0babeda20
--- /dev/null
+++ b/weed/storage/needle/needle_parse_upload.go
@@ -0,0 +1,166 @@
+package needle
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime"
+ "net/http"
+ "path"
+ "strconv"
+ "strings"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+type ParsedUpload struct {
+ FileName string
+ Data []byte
+ MimeType string
+ PairMap map[string]string
+ IsGzipped bool
+ OriginalDataSize int
+ ModifiedTime uint64
+ Ttl *TTL
+ IsChunkedFile bool
+ UncompressedData []byte
+}
+
+func ParseUpload(r *http.Request, sizeLimit int64) (pu *ParsedUpload, e error) {
+ pu = &ParsedUpload{}
+ pu.PairMap = make(map[string]string)
+ for k, v := range r.Header {
+ if len(v) > 0 && strings.HasPrefix(k, PairNamePrefix) {
+ pu.PairMap[k] = v[0]
+ }
+ }
+
+ if r.Method == "POST" {
+ e = parseMultipart(r, sizeLimit, pu)
+ } else {
+ e = parsePut(r, sizeLimit, pu)
+ }
+ if e != nil {
+ return
+ }
+
+ pu.ModifiedTime, _ = strconv.ParseUint(r.FormValue("ts"), 10, 64)
+ pu.Ttl, _ = ReadTTL(r.FormValue("ttl"))
+
+ pu.OriginalDataSize = len(pu.Data)
+ pu.UncompressedData = pu.Data
+ if pu.IsGzipped {
+ if unzipped, e := util.UnGzipData(pu.Data); e == nil {
+ pu.OriginalDataSize = len(unzipped)
+ pu.UncompressedData = unzipped
+ }
+ } else if shouldGzip, _ := util.IsGzippableFileType("", pu.MimeType); pu.MimeType == "" || shouldGzip {
+ if compressedData, err := util.GzipData(pu.Data); err == nil {
+ pu.Data = compressedData
+ pu.IsGzipped = true
+ }
+ }
+
+ return
+}
+
+func parsePut(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) {
+ pu.IsGzipped = r.Header.Get("Content-Encoding") == "gzip"
+ pu.MimeType = r.Header.Get("Content-Type")
+ pu.FileName = ""
+ pu.Data, e = ioutil.ReadAll(io.LimitReader(r.Body, sizeLimit+1))
+ if e == io.EOF || int64(pu.OriginalDataSize) == sizeLimit+1 {
+ io.Copy(ioutil.Discard, r.Body)
+ }
+ r.Body.Close()
+ return nil
+}
+
+func parseMultipart(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) {
+ defer func() {
+ if e != nil && r.Body != nil {
+ io.Copy(ioutil.Discard, r.Body)
+ r.Body.Close()
+ }
+ }()
+ form, fe := r.MultipartReader()
+ if fe != nil {
+ glog.V(0).Infoln("MultipartReader [ERROR]", fe)
+ e = fe
+ return
+ }
+
+ //first multi-part item
+ part, fe := form.NextPart()
+ if fe != nil {
+ glog.V(0).Infoln("Reading Multi part [ERROR]", fe)
+ e = fe
+ return
+ }
+
+ pu.FileName = part.FileName()
+ if pu.FileName != "" {
+ pu.FileName = path.Base(pu.FileName)
+ }
+
+ pu.Data, e = ioutil.ReadAll(io.LimitReader(part, sizeLimit+1))
+ if e != nil {
+ glog.V(0).Infoln("Reading Content [ERROR]", e)
+ return
+ }
+ if len(pu.Data) == int(sizeLimit)+1 {
+ e = fmt.Errorf("file over the limited %d bytes", sizeLimit)
+ return
+ }
+
+ //if the filename is empty string, do a search on the other multi-part items
+ for pu.FileName == "" {
+ part2, fe := form.NextPart()
+ if fe != nil {
+ break // no more or on error, just safely break
+ }
+
+ fName := part2.FileName()
+
+ //found the first multi-part has filename
+ if fName != "" {
+ data2, fe2 := ioutil.ReadAll(io.LimitReader(part2, sizeLimit+1))
+ if fe2 != nil {
+ glog.V(0).Infoln("Reading Content [ERROR]", fe2)
+ e = fe2
+ return
+ }
+ if len(data2) == int(sizeLimit)+1 {
+ e = fmt.Errorf("file over the limited %d bytes", sizeLimit)
+ return
+ }
+
+ //update
+ pu.Data = data2
+ pu.FileName = path.Base(fName)
+ break
+ }
+ }
+
+ pu.IsChunkedFile, _ = strconv.ParseBool(r.FormValue("cm"))
+
+ if !pu.IsChunkedFile {
+
+ dotIndex := strings.LastIndex(pu.FileName, ".")
+ ext, mtype := "", ""
+ if dotIndex > 0 {
+ ext = strings.ToLower(pu.FileName[dotIndex:])
+ mtype = mime.TypeByExtension(ext)
+ }
+ contentType := part.Header.Get("Content-Type")
+ if contentType != "" && contentType != "application/octet-stream" && mtype != contentType {
+ pu.MimeType = contentType //only return mime type if not deductable
+ mtype = contentType
+ }
+
+ pu.IsGzipped = part.Header.Get("Content-Encoding") == "gzip"
+ }
+
+ return
+}
diff --git a/weed/storage/needle_map.go b/weed/storage/needle_map.go
index 77d081ea7..8962e78cb 100644
--- a/weed/storage/needle_map.go
+++ b/weed/storage/needle_map.go
@@ -30,6 +30,7 @@ type NeedleMapper interface {
DeletedCount() int
MaxFileKey() NeedleId
IndexFileSize() uint64
+ Sync() error
}
type baseNeedleMapper struct {
@@ -59,3 +60,7 @@ func (nm *baseNeedleMapper) appendToIndexFile(key NeedleId, offset Offset, size
_, err := nm.indexFile.Write(bytes)
return err
}
+
+func (nm *baseNeedleMapper) Sync() error {
+ return nm.indexFile.Sync()
+}
diff --git a/weed/storage/needle_map/memdb.go b/weed/storage/needle_map/memdb.go
index 9eb4d9f56..a52d52a10 100644
--- a/weed/storage/needle_map/memdb.go
+++ b/weed/storage/needle_map/memdb.go
@@ -113,3 +113,7 @@ func (cm *MemDb) LoadFromIdx(idxName string) (ret error) {
})
}
+
+func (cm *MemDb) Close() {
+ cm.db.Close()
+}
diff --git a/weed/storage/needle_map/memdb_test.go b/weed/storage/needle_map/memdb_test.go
new file mode 100644
index 000000000..7b45d23f8
--- /dev/null
+++ b/weed/storage/needle_map/memdb_test.go
@@ -0,0 +1,23 @@
+package needle_map
+
+import (
+ "testing"
+
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+)
+
+func BenchmarkMemDb(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ nm := NewMemDb()
+
+ nid := types.NeedleId(345)
+ offset := types.Offset{
+ OffsetHigher: types.OffsetHigher{},
+ OffsetLower: types.OffsetLower{},
+ }
+ nm.Set(nid, offset, 324)
+ nm.Close()
+ }
+
+}
diff --git a/weed/storage/needle_map_leveldb.go b/weed/storage/needle_map_leveldb.go
index 3bb258559..63485522d 100644
--- a/weed/storage/needle_map_leveldb.go
+++ b/weed/storage/needle_map_leveldb.go
@@ -5,14 +5,17 @@ import (
"os"
"path/filepath"
- "github.com/chrislusf/seaweedfs/weed/storage/idx"
+ "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/chrislusf/seaweedfs/weed/storage/idx"
+
+ "github.com/syndtr/goleveldb/leveldb"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
- "github.com/syndtr/goleveldb/leveldb"
)
type LevelDbNeedleMap struct {
@@ -32,7 +35,12 @@ func NewLevelDbNeedleMap(dbFileName string, indexFile *os.File, opts *opt.Option
glog.V(1).Infof("Opening %s...", dbFileName)
if m.db, err = leveldb.OpenFile(dbFileName, opts); err != nil {
- return
+ if errors.IsCorrupted(err) {
+ m.db, err = leveldb.RecoverFile(dbFileName, opts)
+ }
+ if err != nil {
+ return
+ }
}
glog.V(1).Infof("Loading %s...", indexFile.Name())
mm, indexLoadError := newNeedleMapMetricFromIndexFile(indexFile)
diff --git a/weed/storage/store.go b/weed/storage/store.go
index 2d02e2f80..14881ffde 100644
--- a/weed/storage/store.go
+++ b/weed/storage/store.go
@@ -12,6 +12,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/stats"
+ "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
@@ -99,6 +100,9 @@ func (s *Store) FindFreeLocation() (ret *DiskLocation) {
max := 0
for _, location := range s.Locations {
currentFreeCount := location.MaxVolumeCount - location.VolumesLen()
+ currentFreeCount *= erasure_coding.DataShardsCount
+ currentFreeCount -= location.EcVolumesLen()
+ currentFreeCount /= erasure_coding.DataShardsCount
if currentFreeCount > max {
max = currentFreeCount
ret = location
@@ -131,33 +135,54 @@ func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind
return fmt.Errorf("No more free space left")
}
-func (s *Store) VolumeInfos() []*VolumeInfo {
- var stats []*VolumeInfo
+func (s *Store) VolumeInfos() (allStats []*VolumeInfo) {
for _, location := range s.Locations {
- location.volumesLock.RLock()
- for k, v := range location.volumes {
- s := &VolumeInfo{
- Id: needle.VolumeId(k),
- Size: v.ContentSize(),
- Collection: v.Collection,
- ReplicaPlacement: v.ReplicaPlacement,
- Version: v.Version(),
- FileCount: int(v.FileCount()),
- DeleteCount: int(v.DeletedCount()),
- DeletedByteCount: v.DeletedSize(),
- ReadOnly: v.noWriteOrDelete || v.noWriteCanDelete,
- Ttl: v.Ttl,
- CompactRevision: uint32(v.CompactionRevision),
- }
- s.RemoteStorageName, s.RemoteStorageKey = v.RemoteStorageNameKey()
- stats = append(stats, s)
- }
- location.volumesLock.RUnlock()
+ stats := collectStatsForOneLocation(location)
+ allStats = append(allStats, stats...)
+ }
+ sortVolumeInfos(allStats)
+ return allStats
+}
+
+func collectStatsForOneLocation(location *DiskLocation) (stats []*VolumeInfo) {
+ location.volumesLock.RLock()
+ defer location.volumesLock.RUnlock()
+
+ for k, v := range location.volumes {
+ s := collectStatForOneVolume(k, v)
+ stats = append(stats, s)
}
- sortVolumeInfos(stats)
return stats
}
+func collectStatForOneVolume(vid needle.VolumeId, v *Volume) (s *VolumeInfo) {
+
+ s = &VolumeInfo{
+ Id: vid,
+ Collection: v.Collection,
+ ReplicaPlacement: v.ReplicaPlacement,
+ Version: v.Version(),
+ ReadOnly: v.IsReadOnly(),
+ Ttl: v.Ttl,
+ CompactRevision: uint32(v.CompactionRevision),
+ }
+ s.RemoteStorageName, s.RemoteStorageKey = v.RemoteStorageNameKey()
+
+ v.dataFileAccessLock.RLock()
+ defer v.dataFileAccessLock.RUnlock()
+
+ if v.nm == nil {
+ return
+ }
+
+ s.FileCount = v.nm.FileCount()
+ s.DeleteCount = v.nm.DeletedCount()
+ s.DeletedByteCount = v.nm.DeletedSize()
+ s.Size = v.nm.ContentSize()
+
+ return
+}
+
func (s *Store) SetDataCenter(dataCenter string) {
s.dataCenter = dataCenter
}
@@ -196,8 +221,14 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
// delete expired volumes.
location.volumesLock.Lock()
for _, vid := range deleteVids {
- location.deleteVolumeById(vid)
- glog.V(0).Infoln("volume", vid, "is deleted.")
+ found, err := location.deleteVolumeById(vid)
+ if found {
+ if err == nil {
+ glog.V(0).Infof("volume %d is deleted", vid)
+ } else {
+ glog.V(0).Infof("delete volume %d: %v", vid, err)
+ }
+ }
}
location.volumesLock.Unlock()
}
@@ -227,17 +258,13 @@ func (s *Store) Close() {
}
}
-func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (size uint32, isUnchanged bool, err error) {
+func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle, fsync bool) (isUnchanged bool, err error) {
if v := s.findVolume(i); v != nil {
- if v.noWriteOrDelete || v.noWriteCanDelete {
+ if v.IsReadOnly() {
err = fmt.Errorf("volume %d is read only", i)
return
}
- if MaxPossibleVolumeSize >= v.ContentSize()+uint64(needle.GetActualSize(size, v.Version())) {
- _, size, isUnchanged, err = v.writeNeedle(n)
- } else {
- err = fmt.Errorf("volume size limit %d exceeded! current size is %d", s.GetVolumeSizeLimit(), v.ContentSize())
- }
+ _, _, isUnchanged, err = v.writeNeedle2(n, fsync)
return
}
glog.V(0).Infoln("volume", i, "not found!")
@@ -250,11 +277,7 @@ func (s *Store) DeleteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (uint32,
if v.noWriteOrDelete {
return 0, fmt.Errorf("volume %d is read only", i)
}
- if MaxPossibleVolumeSize >= v.ContentSize()+uint64(needle.GetActualSize(0, v.Version())) {
- return v.deleteNeedle(n)
- } else {
- return 0, fmt.Errorf("volume size limit %d exceeded! current size is %d", s.GetVolumeSizeLimit(), v.ContentSize())
- }
+ return v.deleteNeedle2(n)
}
return 0, fmt.Errorf("volume %d not found on %s:%d", i, s.Ip, s.Port)
}
@@ -329,7 +352,7 @@ func (s *Store) UnmountVolume(i needle.VolumeId) error {
func (s *Store) DeleteVolume(i needle.VolumeId) error {
v := s.findVolume(i)
if v == nil {
- return nil
+ return fmt.Errorf("delete volume %d not found on disk", i)
}
message := master_pb.VolumeShortInformationMessage{
Id: uint32(v.Id),
@@ -339,7 +362,7 @@ func (s *Store) DeleteVolume(i needle.VolumeId) error {
Ttl: v.Ttl.ToUint32(),
}
for _, location := range s.Locations {
- if error := location.deleteVolumeById(i); error == nil {
+ if found, error := location.deleteVolumeById(i); found && error == nil {
glog.V(0).Infof("DeleteVolume %d", i)
s.DeletedVolumesChan <- message
return nil
@@ -358,7 +381,7 @@ func (s *Store) ConfigureVolume(i needle.VolumeId, replication string) error {
}
// load, modify, save
baseFileName := strings.TrimSuffix(fileInfo.Name(), filepath.Ext(fileInfo.Name()))
- vifFile := filepath.Join(location.Directory, baseFileName + ".vif")
+ vifFile := filepath.Join(location.Directory, baseFileName+".vif")
volumeInfo, _, err := pb.MaybeLoadVolumeInfo(vifFile)
if err != nil {
return fmt.Errorf("volume %d fail to load vif", i)
@@ -381,3 +404,24 @@ func (s *Store) SetVolumeSizeLimit(x uint64) {
func (s *Store) GetVolumeSizeLimit() uint64 {
return atomic.LoadUint64(&s.volumeSizeLimit)
}
+
+func (s *Store) MaybeAdjustVolumeMax() (hasChanges bool) {
+ volumeSizeLimit := s.GetVolumeSizeLimit()
+ for _, diskLocation := range s.Locations {
+ if diskLocation.MaxVolumeCount == 0 {
+ diskStatus := stats.NewDiskStatus(diskLocation.Directory)
+ unusedSpace := diskLocation.UnUsedSpace(volumeSizeLimit)
+ unclaimedSpaces := int64(diskStatus.Free) - int64(unusedSpace)
+ volCount := diskLocation.VolumesLen()
+ maxVolumeCount := volCount
+ if unclaimedSpaces > int64(volumeSizeLimit) {
+ maxVolumeCount += int(uint64(unclaimedSpaces)/volumeSizeLimit) - 1
+ }
+ diskLocation.MaxVolumeCount = maxVolumeCount
+ glog.V(0).Infof("disk %s max %d unclaimedSpace:%dMB, unused:%dMB volumeSizeLimit:%dMB",
+ diskLocation.Directory, maxVolumeCount, unclaimedSpaces/1024/1024, unusedSpace/1024/1024, volumeSizeLimit/1024/1024)
+ hasChanges = true
+ }
+ }
+ return
+}
diff --git a/weed/storage/store_ec.go b/weed/storage/store_ec.go
index 47e061d05..e423e7dca 100644
--- a/weed/storage/store_ec.go
+++ b/weed/storage/store_ec.go
@@ -116,7 +116,7 @@ func (s *Store) DestroyEcVolume(vid needle.VolumeId) {
}
}
-func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *needle.Needle) (int, error) {
+func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle) (int, error) {
for _, location := range s.Locations {
if localEcVolume, found := location.FindEcVolume(vid); found {
@@ -133,7 +133,7 @@ func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *n
if len(intervals) > 1 {
glog.V(3).Infof("ReadEcShardNeedle needle id %s intervals:%+v", n.String(), intervals)
}
- bytes, isDeleted, err := s.readEcShardIntervals(ctx, vid, n.Id, localEcVolume, intervals)
+ bytes, isDeleted, err := s.readEcShardIntervals(vid, n.Id, localEcVolume, intervals)
if err != nil {
return 0, fmt.Errorf("ReadEcShardIntervals: %v", err)
}
@@ -152,14 +152,14 @@ func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *n
return 0, fmt.Errorf("ec shard %d not found", vid)
}
-func (s *Store) readEcShardIntervals(ctx context.Context, vid needle.VolumeId, needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, intervals []erasure_coding.Interval) (data []byte, is_deleted bool, err error) {
+func (s *Store) readEcShardIntervals(vid needle.VolumeId, needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, intervals []erasure_coding.Interval) (data []byte, is_deleted bool, err error) {
- if err = s.cachedLookupEcShardLocations(ctx, ecVolume); err != nil {
+ if err = s.cachedLookupEcShardLocations(ecVolume); err != nil {
return nil, false, fmt.Errorf("failed to locate shard via master grpc %s: %v", s.MasterAddress, err)
}
for i, interval := range intervals {
- if d, isDeleted, e := s.readOneEcShardInterval(ctx, needleId, ecVolume, interval); e != nil {
+ if d, isDeleted, e := s.readOneEcShardInterval(needleId, ecVolume, interval); e != nil {
return nil, isDeleted, e
} else {
if isDeleted {
@@ -175,7 +175,7 @@ func (s *Store) readEcShardIntervals(ctx context.Context, vid needle.VolumeId, n
return
}
-func (s *Store) readOneEcShardInterval(ctx context.Context, needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, interval erasure_coding.Interval) (data []byte, is_deleted bool, err error) {
+func (s *Store) readOneEcShardInterval(needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, interval erasure_coding.Interval) (data []byte, is_deleted bool, err error) {
shardId, actualOffset := interval.ToShardIdAndOffset(erasure_coding.ErasureCodingLargeBlockSize, erasure_coding.ErasureCodingSmallBlockSize)
data = make([]byte, interval.Size)
if shard, found := ecVolume.FindEcVolumeShard(shardId); found {
@@ -190,7 +190,7 @@ func (s *Store) readOneEcShardInterval(ctx context.Context, needleId types.Needl
// try reading directly
if hasShardIdLocation {
- _, is_deleted, err = s.readRemoteEcShardInterval(ctx, sourceDataNodes, needleId, ecVolume.VolumeId, shardId, data, actualOffset)
+ _, is_deleted, err = s.readRemoteEcShardInterval(sourceDataNodes, needleId, ecVolume.VolumeId, shardId, data, actualOffset)
if err == nil {
return
}
@@ -199,7 +199,7 @@ func (s *Store) readOneEcShardInterval(ctx context.Context, needleId types.Needl
}
// try reading by recovering from other shards
- _, is_deleted, err = s.recoverOneRemoteEcShardInterval(ctx, needleId, ecVolume, shardId, data, actualOffset)
+ _, is_deleted, err = s.recoverOneRemoteEcShardInterval(needleId, ecVolume, shardId, data, actualOffset)
if err == nil {
return
}
@@ -215,7 +215,7 @@ func forgetShardId(ecVolume *erasure_coding.EcVolume, shardId erasure_coding.Sha
ecVolume.ShardLocationsLock.Unlock()
}
-func (s *Store) cachedLookupEcShardLocations(ctx context.Context, ecVolume *erasure_coding.EcVolume) (err error) {
+func (s *Store) cachedLookupEcShardLocations(ecVolume *erasure_coding.EcVolume) (err error) {
shardCount := len(ecVolume.ShardLocations)
if shardCount < erasure_coding.DataShardsCount &&
@@ -230,11 +230,11 @@ func (s *Store) cachedLookupEcShardLocations(ctx context.Context, ecVolume *eras
glog.V(3).Infof("lookup and cache ec volume %d locations", ecVolume.VolumeId)
- err = operation.WithMasterServerClient(s.MasterAddress, s.grpcDialOption, func(ctx context.Context, masterClient master_pb.SeaweedClient) error {
+ err = operation.WithMasterServerClient(s.MasterAddress, s.grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
req := &master_pb.LookupEcVolumeRequest{
VolumeId: uint32(ecVolume.VolumeId),
}
- resp, err := masterClient.LookupEcVolume(ctx, req)
+ resp, err := masterClient.LookupEcVolume(context.Background(), req)
if err != nil {
return fmt.Errorf("lookup ec volume %d: %v", ecVolume.VolumeId, err)
}
@@ -258,7 +258,7 @@ func (s *Store) cachedLookupEcShardLocations(ctx context.Context, ecVolume *eras
return
}
-func (s *Store) readRemoteEcShardInterval(ctx context.Context, sourceDataNodes []string, needleId types.NeedleId, vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) {
+func (s *Store) readRemoteEcShardInterval(sourceDataNodes []string, needleId types.NeedleId, vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) {
if len(sourceDataNodes) == 0 {
return 0, false, fmt.Errorf("failed to find ec shard %d.%d", vid, shardId)
@@ -266,7 +266,7 @@ func (s *Store) readRemoteEcShardInterval(ctx context.Context, sourceDataNodes [
for _, sourceDataNode := range sourceDataNodes {
glog.V(3).Infof("read remote ec shard %d.%d from %s", vid, shardId, sourceDataNode)
- n, is_deleted, err = s.doReadRemoteEcShardInterval(ctx, sourceDataNode, needleId, vid, shardId, buf, offset)
+ n, is_deleted, err = s.doReadRemoteEcShardInterval(sourceDataNode, needleId, vid, shardId, buf, offset)
if err == nil {
return
}
@@ -276,12 +276,12 @@ func (s *Store) readRemoteEcShardInterval(ctx context.Context, sourceDataNodes [
return
}
-func (s *Store) doReadRemoteEcShardInterval(ctx context.Context, sourceDataNode string, needleId types.NeedleId, vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) {
+func (s *Store) doReadRemoteEcShardInterval(sourceDataNode string, needleId types.NeedleId, vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) {
- err = operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error {
+ err = operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
// copy data slice
- shardReadClient, err := client.VolumeEcShardRead(ctx, &volume_server_pb.VolumeEcShardReadRequest{
+ shardReadClient, err := client.VolumeEcShardRead(context.Background(), &volume_server_pb.VolumeEcShardReadRequest{
VolumeId: uint32(vid),
ShardId: uint32(shardId),
Offset: offset,
@@ -316,7 +316,7 @@ func (s *Store) doReadRemoteEcShardInterval(ctx context.Context, sourceDataNode
return
}
-func (s *Store) recoverOneRemoteEcShardInterval(ctx context.Context, needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, shardIdToRecover erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) {
+func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, shardIdToRecover erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) {
glog.V(3).Infof("recover ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover)
enc, err := reedsolomon.New(erasure_coding.DataShardsCount, erasure_coding.ParityShardsCount)
@@ -344,7 +344,7 @@ func (s *Store) recoverOneRemoteEcShardInterval(ctx context.Context, needleId ty
go func(shardId erasure_coding.ShardId, locations []string) {
defer wg.Done()
data := make([]byte, len(buf))
- nRead, isDeleted, readErr := s.readRemoteEcShardInterval(ctx, locations, needleId, ecVolume.VolumeId, shardId, data, offset)
+ nRead, isDeleted, readErr := s.readRemoteEcShardInterval(locations, needleId, ecVolume.VolumeId, shardId, data, offset)
if readErr != nil {
glog.V(3).Infof("recover: readRemoteEcShardInterval %d.%d %d bytes from %+v: %v", ecVolume.VolumeId, shardId, nRead, locations, readErr)
forgetShardId(ecVolume, shardId)
diff --git a/weed/storage/store_ec_delete.go b/weed/storage/store_ec_delete.go
index 2ac907f6c..4a75fb20b 100644
--- a/weed/storage/store_ec_delete.go
+++ b/weed/storage/store_ec_delete.go
@@ -12,9 +12,9 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage/types"
)
-func (s *Store) DeleteEcShardNeedle(ctx context.Context, ecVolume *erasure_coding.EcVolume, n *needle.Needle, cookie types.Cookie) (int64, error) {
+func (s *Store) DeleteEcShardNeedle(ecVolume *erasure_coding.EcVolume, n *needle.Needle, cookie types.Cookie) (int64, error) {
- count, err := s.ReadEcShardNeedle(ctx, ecVolume.VolumeId, n)
+ count, err := s.ReadEcShardNeedle(ecVolume.VolumeId, n)
if err != nil {
return 0, err
@@ -24,7 +24,7 @@ func (s *Store) DeleteEcShardNeedle(ctx context.Context, ecVolume *erasure_codin
return 0, fmt.Errorf("unexpected cookie %x", cookie)
}
- if err = s.doDeleteNeedleFromAtLeastOneRemoteEcShards(ctx, ecVolume, n.Id); err != nil {
+ if err = s.doDeleteNeedleFromAtLeastOneRemoteEcShards(ecVolume, n.Id); err != nil {
return 0, err
}
@@ -32,7 +32,7 @@ func (s *Store) DeleteEcShardNeedle(ctx context.Context, ecVolume *erasure_codin
}
-func (s *Store) doDeleteNeedleFromAtLeastOneRemoteEcShards(ctx context.Context, ecVolume *erasure_coding.EcVolume, needleId types.NeedleId) error {
+func (s *Store) doDeleteNeedleFromAtLeastOneRemoteEcShards(ecVolume *erasure_coding.EcVolume, needleId types.NeedleId) error {
_, _, intervals, err := ecVolume.LocateEcShardNeedle(needleId, ecVolume.Version)
@@ -43,13 +43,13 @@ func (s *Store) doDeleteNeedleFromAtLeastOneRemoteEcShards(ctx context.Context,
shardId, _ := intervals[0].ToShardIdAndOffset(erasure_coding.ErasureCodingLargeBlockSize, erasure_coding.ErasureCodingSmallBlockSize)
hasDeletionSuccess := false
- err = s.doDeleteNeedleFromRemoteEcShardServers(ctx, shardId, ecVolume, needleId)
+ err = s.doDeleteNeedleFromRemoteEcShardServers(shardId, ecVolume, needleId)
if err == nil {
hasDeletionSuccess = true
}
for shardId = erasure_coding.DataShardsCount; shardId < erasure_coding.TotalShardsCount; shardId++ {
- if parityDeletionError := s.doDeleteNeedleFromRemoteEcShardServers(ctx, shardId, ecVolume, needleId); parityDeletionError == nil {
+ if parityDeletionError := s.doDeleteNeedleFromRemoteEcShardServers(shardId, ecVolume, needleId); parityDeletionError == nil {
hasDeletionSuccess = true
}
}
@@ -62,7 +62,7 @@ func (s *Store) doDeleteNeedleFromAtLeastOneRemoteEcShards(ctx context.Context,
}
-func (s *Store) doDeleteNeedleFromRemoteEcShardServers(ctx context.Context, shardId erasure_coding.ShardId, ecVolume *erasure_coding.EcVolume, needleId types.NeedleId) error {
+func (s *Store) doDeleteNeedleFromRemoteEcShardServers(shardId erasure_coding.ShardId, ecVolume *erasure_coding.EcVolume, needleId types.NeedleId) error {
ecVolume.ShardLocationsLock.RLock()
sourceDataNodes, hasShardLocations := ecVolume.ShardLocations[shardId]
@@ -74,7 +74,7 @@ func (s *Store) doDeleteNeedleFromRemoteEcShardServers(ctx context.Context, shar
for _, sourceDataNode := range sourceDataNodes {
glog.V(4).Infof("delete from remote ec shard %d.%d from %s", ecVolume.VolumeId, shardId, sourceDataNode)
- err := s.doDeleteNeedleFromRemoteEcShard(ctx, sourceDataNode, ecVolume.VolumeId, ecVolume.Collection, ecVolume.Version, needleId)
+ err := s.doDeleteNeedleFromRemoteEcShard(sourceDataNode, ecVolume.VolumeId, ecVolume.Collection, ecVolume.Version, needleId)
if err != nil {
return err
}
@@ -85,12 +85,12 @@ func (s *Store) doDeleteNeedleFromRemoteEcShardServers(ctx context.Context, shar
}
-func (s *Store) doDeleteNeedleFromRemoteEcShard(ctx context.Context, sourceDataNode string, vid needle.VolumeId, collection string, version needle.Version, needleId types.NeedleId) error {
+func (s *Store) doDeleteNeedleFromRemoteEcShard(sourceDataNode string, vid needle.VolumeId, collection string, version needle.Version, needleId types.NeedleId) error {
- return operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error {
+ return operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
// copy data slice
- _, err := client.VolumeEcBlobDelete(ctx, &volume_server_pb.VolumeEcBlobDeleteRequest{
+ _, err := client.VolumeEcBlobDelete(context.Background(), &volume_server_pb.VolumeEcBlobDeleteRequest{
VolumeId: uint32(vid),
Collection: collection,
FileKey: uint64(needleId),
diff --git a/weed/storage/store_vacuum.go b/weed/storage/store_vacuum.go
index e94d9b516..38159496e 100644
--- a/weed/storage/store_vacuum.go
+++ b/weed/storage/store_vacuum.go
@@ -16,8 +16,7 @@ func (s *Store) CheckCompactVolume(volumeId needle.VolumeId) (float64, error) {
}
func (s *Store) CompactVolume(vid needle.VolumeId, preallocate int64, compactionBytePerSecond int64) error {
if v := s.findVolume(vid); v != nil {
- return v.Compact2(preallocate) // compactionBytePerSecond
- // return v.Compact(preallocate, compactionBytePerSecond)
+ return v.Compact2(preallocate, compactionBytePerSecond)
}
return fmt.Errorf("volume id %d is not found during compact", vid)
}
diff --git a/weed/storage/types/offset_4bytes.go b/weed/storage/types/offset_4bytes.go
index 9acd069d3..ded5b88cb 100644
--- a/weed/storage/types/offset_4bytes.go
+++ b/weed/storage/types/offset_4bytes.go
@@ -12,7 +12,7 @@ type OffsetHigher struct {
const (
OffsetSize = 4
- MaxPossibleVolumeSize = 4 * 1024 * 1024 * 1024 * 8 // 32GB
+ MaxPossibleVolumeSize uint64 = 4 * 1024 * 1024 * 1024 * 8 // 32GB
)
func OffsetToBytes(bytes []byte, offset Offset) {
diff --git a/weed/storage/types/offset_5bytes.go b/weed/storage/types/offset_5bytes.go
index f57e4f6d4..1db22d524 100644
--- a/weed/storage/types/offset_5bytes.go
+++ b/weed/storage/types/offset_5bytes.go
@@ -12,7 +12,7 @@ type OffsetHigher struct {
const (
OffsetSize = 4 + 1
- MaxPossibleVolumeSize = 4 * 1024 * 1024 * 1024 * 8 * 256 /* 256 is from the extra byte */ // 8TB
+ MaxPossibleVolumeSize uint64 = 4 * 1024 * 1024 * 1024 * 8 * 256 /* 256 is from the extra byte */ // 8TB
)
func OffsetToBytes(bytes []byte, offset Offset) {
diff --git a/weed/storage/volume.go b/weed/storage/volume.go
index acede66bf..df63360a1 100644
--- a/weed/storage/volume.go
+++ b/weed/storage/volume.go
@@ -33,6 +33,7 @@ type Volume struct {
super_block.SuperBlock
dataFileAccessLock sync.RWMutex
+ asyncRequestsChan chan *needle.AsyncRequest
lastModifiedTsSeconds uint64 //unix time in seconds
lastAppendAtNs uint64 //unix time in nanoseconds
@@ -46,12 +47,15 @@ type Volume struct {
func NewVolume(dirname string, collection string, id needle.VolumeId, needleMapKind NeedleMapType, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32) (v *Volume, e error) {
// if replicaPlacement is nil, the superblock will be loaded from disk
- v = &Volume{dir: dirname, Collection: collection, Id: id, MemoryMapMaxSizeMb: memoryMapMaxSizeMb}
+ v = &Volume{dir: dirname, Collection: collection, Id: id, MemoryMapMaxSizeMb: memoryMapMaxSizeMb,
+ asyncRequestsChan: make(chan *needle.AsyncRequest, 128)}
v.SuperBlock = super_block.SuperBlock{ReplicaPlacement: replicaPlacement, Ttl: ttl}
v.needleMapKind = needleMapKind
e = v.load(true, true, needleMapKind, preallocate)
+ v.startWorker()
return
}
+
func (v *Volume) String() string {
return fmt.Sprintf("Id:%v, dir:%s, Collection:%s, dataFile:%v, nm:%v, noWrite:%v canDelete:%v", v.Id, v.dir, v.Collection, v.DataBackend, v.nm, v.noWriteOrDelete || v.noWriteCanDelete, v.noWriteCanDelete)
}
@@ -65,6 +69,7 @@ func VolumeFileName(dir string, collection string, id int) (fileName string) {
}
return
}
+
func (v *Volume) FileName() (fileName string) {
return VolumeFileName(v.dir, v.Collection, int(v.Id))
}
@@ -180,9 +185,9 @@ func (v *Volume) expired(volumeSizeLimit uint64) bool {
if v.Ttl == nil || v.Ttl.Minutes() == 0 {
return false
}
- glog.V(1).Infof("now:%v lastModified:%v", time.Now().Unix(), v.lastModifiedTsSeconds)
+ glog.V(2).Infof("now:%v lastModified:%v", time.Now().Unix(), v.lastModifiedTsSeconds)
livedMinutes := (time.Now().Unix() - int64(v.lastModifiedTsSeconds)) / 60
- glog.V(1).Infof("ttl:%v lived:%v", v.Ttl, livedMinutes)
+ glog.V(2).Infof("ttl:%v lived:%v", v.Ttl, livedMinutes)
if int64(v.Ttl.Minutes()) < livedMinutes {
return true
}
@@ -215,7 +220,7 @@ func (v *Volume) ToVolumeInformationMessage() *master_pb.VolumeInformationMessag
FileCount: v.FileCount(),
DeleteCount: v.DeletedCount(),
DeletedByteCount: v.DeletedSize(),
- ReadOnly: v.noWriteOrDelete,
+ ReadOnly: v.IsReadOnly(),
ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()),
Version: uint32(v.Version()),
Ttl: v.Ttl.ToUint32(),
@@ -237,3 +242,7 @@ func (v *Volume) RemoteStorageNameKey() (storageName, storageKey string) {
}
return v.volumeInfo.GetFiles()[0].BackendName(), v.volumeInfo.GetFiles()[0].GetKey()
}
+
+func (v *Volume) IsReadOnly() bool {
+ return v.noWriteOrDelete || v.noWriteCanDelete
+}
diff --git a/weed/storage/volume_backup.go b/weed/storage/volume_backup.go
index 3763d5515..f7075fe2b 100644
--- a/weed/storage/volume_backup.go
+++ b/weed/storage/volume_backup.go
@@ -72,9 +72,9 @@ func (v *Volume) IncrementalBackup(volumeServer string, grpcDialOption grpc.Dial
writeOffset := int64(startFromOffset)
- err = operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error {
+ err = operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
- stream, err := client.VolumeIncrementalCopy(ctx, &volume_server_pb.VolumeIncrementalCopyRequest{
+ stream, err := client.VolumeIncrementalCopy(context.Background(), &volume_server_pb.VolumeIncrementalCopyRequest{
VolumeId: uint32(v.Id),
SinceNs: appendAtNs,
})
diff --git a/weed/storage/volume_checking.go b/weed/storage/volume_checking.go
index a65c2a3ff..c33f0049a 100644
--- a/weed/storage/volume_checking.go
+++ b/weed/storage/volume_checking.go
@@ -58,7 +58,7 @@ func readIndexEntryAtOffset(indexFile *os.File, offset int64) (bytes []byte, err
func verifyNeedleIntegrity(datFile backend.BackendStorageFile, v needle.Version, offset int64, key NeedleId, size uint32) (lastAppendAtNs uint64, err error) {
n := new(needle.Needle)
if err = n.ReadData(datFile, offset, size, v); err != nil {
- return n.AppendAtNs, err
+ return n.AppendAtNs, fmt.Errorf("read data [%d,%d) : %v", offset, offset+int64(size), err)
}
if n.Id != key {
return n.AppendAtNs, fmt.Errorf("index key %#x does not match needle's Id %#x", key, n.Id)
diff --git a/weed/storage/volume_loading.go b/weed/storage/volume_loading.go
index 6b42fc452..b0b17af75 100644
--- a/weed/storage/volume_loading.go
+++ b/weed/storage/volume_loading.go
@@ -54,7 +54,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind
v.DataBackend = backend.NewDiskFile(dataFile)
} else {
if createDatIfMissing {
- v.DataBackend, err = createVolumeFile(fileName+".dat", preallocate, v.MemoryMapMaxSizeMb)
+ v.DataBackend, err = backend.CreateVolumeFile(fileName+".dat", preallocate, v.MemoryMapMaxSizeMb)
} else {
return fmt.Errorf("Volume Data file %s.dat does not exist.", fileName)
}
@@ -94,7 +94,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind
glog.V(0).Infof("volumeDataIntegrityChecking failed %v", err)
}
- if v.noWriteOrDelete || v.noWriteCanDelete {
+ if v.IsReadOnly() {
if v.nm, err = NewSortedFileNeedleMap(fileName, indexFile); err != nil {
glog.V(0).Infof("loading sorted db %s error: %v", fileName+".sdx", err)
}
diff --git a/weed/storage/volume_read_write.go b/weed/storage/volume_read_write.go
index ac6154cef..dce800242 100644
--- a/weed/storage/volume_read_write.go
+++ b/weed/storage/volume_read_write.go
@@ -46,6 +46,7 @@ func (v *Volume) Destroy() (err error) {
err = fmt.Errorf("volume %d is compacting", v.Id)
return
}
+ close(v.asyncRequestsChan)
storageName, storageKey := v.RemoteStorageNameKey()
if v.HasRemoteFile() && storageName != "" && storageKey != "" {
if backendStorage, found := backend.BackendStorages[storageName]; found {
@@ -63,21 +64,91 @@ func (v *Volume) Destroy() (err error) {
return
}
-func (v *Volume) writeNeedle(n *needle.Needle) (offset uint64, size uint32, isUnchanged bool, err error) {
+func (v *Volume) asyncRequestAppend(request *needle.AsyncRequest) {
+ v.asyncRequestsChan <- request
+}
+
+func (v *Volume) syncWrite(n *needle.Needle) (offset uint64, size uint32, isUnchanged bool, err error) {
// glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
+ actualSize := needle.GetActualSize(uint32(len(n.Data)), v.Version())
+
v.dataFileAccessLock.Lock()
defer v.dataFileAccessLock.Unlock()
+
+ if MaxPossibleVolumeSize < v.nm.ContentSize()+uint64(actualSize) {
+ err = fmt.Errorf("volume size limit %d exceeded! current size is %d", MaxPossibleVolumeSize, v.ContentSize())
+ return
+ }
if v.isFileUnchanged(n) {
size = n.DataSize
isUnchanged = true
return
}
+ // check whether existing needle cookie matches
+ nv, ok := v.nm.Get(n.Id)
+ if ok {
+ existingNeedle, _, _, existingNeedleReadErr := needle.ReadNeedleHeader(v.DataBackend, v.Version(), nv.Offset.ToAcutalOffset())
+ if existingNeedleReadErr != nil {
+ err = fmt.Errorf("reading existing needle: %v", existingNeedleReadErr)
+ return
+ }
+ if existingNeedle.Cookie != n.Cookie {
+ glog.V(0).Infof("write cookie mismatch: existing %x, new %x", existingNeedle.Cookie, n.Cookie)
+ err = fmt.Errorf("mismatching cookie %x", n.Cookie)
+ return
+ }
+ }
+
+ // append to dat file
+ n.AppendAtNs = uint64(time.Now().UnixNano())
+ if offset, size, _, err = n.Append(v.DataBackend, v.Version()); err != nil {
+ return
+ }
+
+ v.lastAppendAtNs = n.AppendAtNs
+
+ // add to needle map
+ if !ok || uint64(nv.Offset.ToAcutalOffset()) < offset {
+ if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil {
+ glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err)
+ }
+ }
+ if v.lastModifiedTsSeconds < n.LastModified {
+ v.lastModifiedTsSeconds = n.LastModified
+ }
+ return
+}
+
+func (v *Volume) writeNeedle2(n *needle.Needle, fsync bool) (offset uint64, size uint32, isUnchanged bool, err error) {
+ // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
if n.Ttl == needle.EMPTY_TTL && v.Ttl != needle.EMPTY_TTL {
n.SetHasTtl()
n.Ttl = v.Ttl
}
+ if !fsync {
+ return v.syncWrite(n)
+ } else {
+ asyncRequest := needle.NewAsyncRequest(n, true)
+ // using len(n.Data) here instead of n.Size before n.Size is populated in n.Append()
+ asyncRequest.ActualSize = needle.GetActualSize(uint32(len(n.Data)), v.Version())
+
+ v.asyncRequestAppend(asyncRequest)
+ offset, _, isUnchanged, err = asyncRequest.WaitComplete()
+
+ return
+ }
+}
+
+func (v *Volume) doWriteRequest(n *needle.Needle) (offset uint64, size uint32, isUnchanged bool, err error) {
+ // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
+ if v.isFileUnchanged(n) {
+ size = n.DataSize
+ isUnchanged = true
+ return
+ }
+
// check whether existing needle cookie matches
nv, ok := v.nm.Get(n.Id)
if ok {
@@ -112,10 +183,55 @@ func (v *Volume) writeNeedle(n *needle.Needle) (offset uint64, size uint32, isUn
return
}
-func (v *Volume) deleteNeedle(n *needle.Needle) (uint32, error) {
+func (v *Volume) syncDelete(n *needle.Needle) (uint32, error) {
glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
+ actualSize := needle.GetActualSize(0, v.Version())
v.dataFileAccessLock.Lock()
defer v.dataFileAccessLock.Unlock()
+
+ if MaxPossibleVolumeSize < v.nm.ContentSize()+uint64(actualSize) {
+ err := fmt.Errorf("volume size limit %d exceeded! current size is %d", MaxPossibleVolumeSize, v.ContentSize())
+ return 0, err
+ }
+
+ nv, ok := v.nm.Get(n.Id)
+ //fmt.Println("key", n.Id, "volume offset", nv.Offset, "data_size", n.Size, "cached size", nv.Size)
+ if ok && nv.Size != TombstoneFileSize {
+ size := nv.Size
+ n.Data = nil
+ n.AppendAtNs = uint64(time.Now().UnixNano())
+ offset, _, _, err := n.Append(v.DataBackend, v.Version())
+ if err != nil {
+ return size, err
+ }
+ v.lastAppendAtNs = n.AppendAtNs
+ if err = v.nm.Delete(n.Id, ToOffset(int64(offset))); err != nil {
+ return size, err
+ }
+ return size, err
+ }
+ return 0, nil
+}
+
+func (v *Volume) deleteNeedle2(n *needle.Needle) (uint32, error) {
+ // todo: delete info is always appended no fsync, it may need fsync in future
+ fsync := false
+
+ if !fsync {
+ return v.syncDelete(n)
+ } else {
+ asyncRequest := needle.NewAsyncRequest(n, false)
+ asyncRequest.ActualSize = needle.GetActualSize(0, v.Version())
+
+ v.asyncRequestAppend(asyncRequest)
+ _, size, _, err := asyncRequest.WaitComplete()
+
+ return uint32(size), err
+ }
+}
+
+func (v *Volume) doDeleteRequest(n *needle.Needle) (uint32, error) {
+ glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
nv, ok := v.nm.Get(n.Id)
//fmt.Println("key", n.Id, "volume offset", nv.Offset, "data_size", n.Size, "cached size", nv.Size)
if ok && nv.Size != TombstoneFileSize {
@@ -171,6 +287,81 @@ func (v *Volume) readNeedle(n *needle.Needle) (int, error) {
return -1, ErrorNotFound
}
+func (v *Volume) startWorker() {
+ go func() {
+ chanClosed := false
+ for {
+ // chan closed. go thread will exit
+ if chanClosed {
+ break
+ }
+ currentRequests := make([]*needle.AsyncRequest, 0, 128)
+ currentBytesToWrite := int64(0)
+ for {
+ request, ok := <-v.asyncRequestsChan
+ //volume may be closed
+ if !ok {
+ chanClosed = true
+ break
+ }
+ if MaxPossibleVolumeSize < v.ContentSize()+uint64(currentBytesToWrite+request.ActualSize) {
+ request.Complete(0, 0, false,
+ fmt.Errorf("volume size limit %d exceeded! current size is %d", MaxPossibleVolumeSize, v.ContentSize()))
+ break
+ }
+ currentRequests = append(currentRequests, request)
+ currentBytesToWrite += request.ActualSize
+ // submit at most 4M bytes or 128 requests at one time to decrease request delay.
+ // it also need to break if there is no data in channel to avoid io hang.
+ if currentBytesToWrite >= 4*1024*1024 || len(currentRequests) >= 128 || len(v.asyncRequestsChan) == 0 {
+ break
+ }
+ }
+ if len(currentRequests) == 0 {
+ continue
+ }
+ v.dataFileAccessLock.Lock()
+ end, _, e := v.DataBackend.GetStat()
+ if e != nil {
+ for i := 0; i < len(currentRequests); i++ {
+ currentRequests[i].Complete(0, 0, false,
+ fmt.Errorf("cannot read current volume position: %v", e))
+ }
+ v.dataFileAccessLock.Unlock()
+ continue
+ }
+
+ for i := 0; i < len(currentRequests); i++ {
+ if currentRequests[i].IsWriteRequest {
+ offset, size, isUnchanged, err := v.doWriteRequest(currentRequests[i].N)
+ currentRequests[i].UpdateResult(offset, uint64(size), isUnchanged, err)
+ } else {
+ size, err := v.doDeleteRequest(currentRequests[i].N)
+ currentRequests[i].UpdateResult(0, uint64(size), false, err)
+ }
+ }
+
+ // if sync error, data is not reliable, we should mark the completed request as fail and rollback
+ if err := v.DataBackend.Sync(); err != nil {
+ // todo: this may generate dirty data or cause data inconsistent, may be weed need to panic?
+ if te := v.DataBackend.Truncate(end); te != nil {
+ glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", v.DataBackend.Name(), end, te)
+ }
+ for i := 0; i < len(currentRequests); i++ {
+ if currentRequests[i].IsSucceed() {
+ currentRequests[i].UpdateResult(0, 0, false, err)
+ }
+ }
+ }
+
+ for i := 0; i < len(currentRequests); i++ {
+ currentRequests[i].Submit()
+ }
+ v.dataFileAccessLock.Unlock()
+ }
+ }()
+}
+
type VolumeFileScanner interface {
VisitSuperBlock(super_block.SuperBlock) error
ReadNeedleBody() bool
diff --git a/weed/storage/volume_super_block.go b/weed/storage/volume_super_block.go
index 1d7f35595..5e913e062 100644
--- a/weed/storage/volume_super_block.go
+++ b/weed/storage/volume_super_block.go
@@ -37,7 +37,7 @@ func (v *Volume) maybeWriteSuperBlock() error {
func (v *Volume) readSuperBlock() (err error) {
v.SuperBlock, err = super_block.ReadSuperBlock(v.DataBackend)
- if v.volumeInfo != nil && v.volumeInfo.Replication != ""{
+ if v.volumeInfo != nil && v.volumeInfo.Replication != "" {
if replication, err := super_block.NewReplicaPlacementFromString(v.volumeInfo.Replication); err != nil {
return fmt.Errorf("Error parse volume %d replication %s : %v", v.Id, v.volumeInfo.Replication, err)
} else {
diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go
index 523b37e34..ed8172909 100644
--- a/weed/storage/volume_vacuum.go
+++ b/weed/storage/volume_vacuum.go
@@ -3,6 +3,7 @@ package storage
import (
"fmt"
"os"
+ "runtime"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
@@ -52,11 +53,17 @@ func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error
v.lastCompactIndexOffset = v.IndexFileSize()
v.lastCompactRevision = v.SuperBlock.CompactionRevision
glog.V(3).Infof("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactIndexOffset)
+ if err := v.DataBackend.Sync(); err != nil {
+ glog.V(0).Infof("compact fail to sync volume %d", v.Id)
+ }
+ if err := v.nm.Sync(); err != nil {
+ glog.V(0).Infof("compact fail to sync volume idx %d", v.Id)
+ }
return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx", preallocate, compactionBytePerSecond)
}
// compact a volume based on deletions in .idx files
-func (v *Volume) Compact2(preallocate int64) error {
+func (v *Volume) Compact2(preallocate int64, compactionBytePerSecond int64) error {
if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory
return nil
@@ -72,7 +79,13 @@ func (v *Volume) Compact2(preallocate int64) error {
v.lastCompactIndexOffset = v.IndexFileSize()
v.lastCompactRevision = v.SuperBlock.CompactionRevision
glog.V(3).Infof("creating copies for volume %d ...", v.Id)
- return copyDataBasedOnIndexFile(filePath+".dat", filePath+".idx", filePath+".cpd", filePath+".cpx", v.SuperBlock, v.Version(), preallocate)
+ if err := v.DataBackend.Sync(); err != nil {
+ glog.V(0).Infof("compact2 fail to sync volume dat %d: %v", v.Id, err)
+ }
+ if err := v.nm.Sync(); err != nil {
+ glog.V(0).Infof("compact2 fail to sync volume idx %d: %v", v.Id, err)
+ }
+ return copyDataBasedOnIndexFile(filePath+".dat", filePath+".idx", filePath+".cpd", filePath+".cpx", v.SuperBlock, v.Version(), preallocate, compactionBytePerSecond)
}
func (v *Volume) CommitCompact() error {
@@ -91,8 +104,10 @@ func (v *Volume) CommitCompact() error {
glog.V(3).Infof("Got volume %d committing lock...", v.Id)
v.nm.Close()
- if err := v.DataBackend.Close(); err != nil {
- glog.V(0).Infof("fail to close volume %d", v.Id)
+ if v.DataBackend != nil {
+ if err := v.DataBackend.Close(); err != nil {
+ glog.V(0).Infof("fail to close volume %d", v.Id)
+ }
}
v.DataBackend = nil
stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "volume").Dec()
@@ -109,6 +124,16 @@ func (v *Volume) CommitCompact() error {
return e
}
} else {
+ if runtime.GOOS == "windows" {
+ e = os.RemoveAll(v.FileName() + ".dat")
+ if e != nil {
+ return e
+ }
+ e = os.RemoveAll(v.FileName() + ".idx")
+ if e != nil {
+ return e
+ }
+ }
var e error
if e = os.Rename(v.FileName()+".cpd", v.FileName()+".dat"); e != nil {
return fmt.Errorf("rename %s: %v", v.FileName()+".cpd", e)
@@ -122,7 +147,6 @@ func (v *Volume) CommitCompact() error {
//time.Sleep(20 * time.Second)
os.RemoveAll(v.FileName() + ".ldb")
- os.RemoveAll(v.FileName() + ".bdb")
glog.V(3).Infof("Loading volume %d commit file...", v.Id)
if e = v.load(true, false, v.needleMapKind, 0); e != nil {
@@ -330,12 +354,13 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, prealloca
var (
dst backend.BackendStorageFile
)
- if dst, err = createVolumeFile(dstName, preallocate, 0); err != nil {
+ if dst, err = backend.CreateVolumeFile(dstName, preallocate, 0); err != nil {
return
}
defer dst.Close()
nm := needle_map.NewMemDb()
+ defer nm.Close()
scanner := &VolumeFileScanner4Vacuum{
v: v,
@@ -353,18 +378,20 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, prealloca
return
}
-func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName string, sb super_block.SuperBlock, version needle.Version, preallocate int64) (err error) {
+func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName string, sb super_block.SuperBlock, version needle.Version, preallocate int64, compactionBytePerSecond int64) (err error) {
var (
srcDatBackend, dstDatBackend backend.BackendStorageFile
dataFile *os.File
)
- if dstDatBackend, err = createVolumeFile(dstDatName, preallocate, 0); err != nil {
+ if dstDatBackend, err = backend.CreateVolumeFile(dstDatName, preallocate, 0); err != nil {
return
}
defer dstDatBackend.Close()
oldNm := needle_map.NewMemDb()
+ defer oldNm.Close()
newNm := needle_map.NewMemDb()
+ defer newNm.Close()
if err = oldNm.LoadFromIdx(srcIdxName); err != nil {
return
}
@@ -372,6 +399,7 @@ func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName str
return err
}
srcDatBackend = backend.NewDiskFile(dataFile)
+ defer srcDatBackend.Close()
now := uint64(time.Now().Unix())
@@ -379,6 +407,8 @@ func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName str
dstDatBackend.WriteAt(sb.Bytes(), 0)
newOffset := int64(sb.BlockSize())
+ writeThrottler := util.NewWriteThrottler(compactionBytePerSecond)
+
oldNm.AscendingVisit(func(value needle_map.NeedleValue) error {
offset, size := value.Offset, value.Size
@@ -403,8 +433,10 @@ func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName str
if _, _, _, err = n.Append(dstDatBackend, sb.Version); err != nil {
return fmt.Errorf("cannot append needle: %s", err)
}
- newOffset += n.DiskSize(version)
- glog.V(3).Infoln("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size)
+ delta := n.DiskSize(version)
+ newOffset += delta
+ writeThrottler.MaybeSlowdown(delta)
+ glog.V(4).Infoln("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size)
return nil
})
diff --git a/weed/storage/volume_vacuum_test.go b/weed/storage/volume_vacuum_test.go
index 95f43d6ec..1b5161e63 100644
--- a/weed/storage/volume_vacuum_test.go
+++ b/weed/storage/volume_vacuum_test.go
@@ -84,7 +84,7 @@ func TestCompaction(t *testing.T) {
}
startTime := time.Now()
- v.Compact2(0)
+ v.Compact2(0, 0)
speed := float64(v.ContentSize()) / time.Now().Sub(startTime).Seconds()
t.Logf("compaction speed: %.2f bytes/s", speed)
@@ -129,7 +129,7 @@ func TestCompaction(t *testing.T) {
}
func doSomeWritesDeletes(i int, v *Volume, t *testing.T, infos []*needleInfo) {
n := newRandomNeedle(uint64(i))
- _, size, _, err := v.writeNeedle(n)
+ _, size, _, err := v.writeNeedle2(n, false)
if err != nil {
t.Fatalf("write file %d: %v", i, err)
}
@@ -141,7 +141,7 @@ func doSomeWritesDeletes(i int, v *Volume, t *testing.T, infos []*needleInfo) {
if rand.Float64() < 0.03 {
toBeDeleted := rand.Intn(i) + 1
oldNeedle := newEmptyNeedle(uint64(toBeDeleted))
- v.deleteNeedle(oldNeedle)
+ v.deleteNeedle2(oldNeedle)
// println("deleted file", toBeDeleted)
infos[toBeDeleted-1] = &needleInfo{
size: 0,
diff --git a/weed/topology/allocate_volume.go b/weed/topology/allocate_volume.go
index 6ca987bc5..e5dc48652 100644
--- a/weed/topology/allocate_volume.go
+++ b/weed/topology/allocate_volume.go
@@ -15,7 +15,7 @@ type AllocateVolumeResult struct {
func AllocateVolume(dn *DataNode, grpcDialOption grpc.DialOption, vid needle.VolumeId, option *VolumeGrowOption) error {
- return operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error {
+ return operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
_, deleteErr := client.AllocateVolume(context.Background(), &volume_server_pb.AllocateVolumeRequest{
VolumeId: uint32(vid),
diff --git a/weed/topology/collection.go b/weed/topology/collection.go
index 7a611d904..5b410d1eb 100644
--- a/weed/topology/collection.go
+++ b/weed/topology/collection.go
@@ -11,11 +11,16 @@ import (
type Collection struct {
Name string
volumeSizeLimit uint64
+ replicationAsMin bool
storageType2VolumeLayout *util.ConcurrentReadMap
}
-func NewCollection(name string, volumeSizeLimit uint64) *Collection {
- c := &Collection{Name: name, volumeSizeLimit: volumeSizeLimit}
+func NewCollection(name string, volumeSizeLimit uint64, replicationAsMin bool) *Collection {
+ c := &Collection{
+ Name: name,
+ volumeSizeLimit: volumeSizeLimit,
+ replicationAsMin: replicationAsMin,
+ }
c.storageType2VolumeLayout = util.NewConcurrentReadMap()
return c
}
@@ -30,7 +35,7 @@ func (c *Collection) GetOrCreateVolumeLayout(rp *super_block.ReplicaPlacement, t
keyString += ttl.String()
}
vl := c.storageType2VolumeLayout.Get(keyString, func() interface{} {
- return NewVolumeLayout(rp, ttl, c.volumeSizeLimit)
+ return NewVolumeLayout(rp, ttl, c.volumeSizeLimit, c.replicationAsMin)
})
return vl.(*VolumeLayout)
}
diff --git a/weed/topology/data_node.go b/weed/topology/data_node.go
index 617341e54..0dccdd0f2 100644
--- a/weed/topology/data_node.go
+++ b/weed/topology/data_node.go
@@ -2,6 +2,7 @@ package topology
import (
"fmt"
+ "github.com/chrislusf/seaweedfs/weed/util"
"strconv"
"sync"
@@ -166,6 +167,7 @@ func (dn *DataNode) ToMap() interface{} {
ret := make(map[string]interface{})
ret["Url"] = dn.Url()
ret["Volumes"] = dn.GetVolumeCount()
+ ret["VolumeIds"] = dn.GetVolumeIds()
ret["EcShards"] = dn.GetEcShardCount()
ret["Max"] = dn.GetMaxVolumeCount()
ret["Free"] = dn.FreeSpace()
@@ -190,3 +192,14 @@ func (dn *DataNode) ToDataNodeInfo() *master_pb.DataNodeInfo {
}
return m
}
+
+// GetVolumeIds returns the human readable volume ids limited to count of max 100.
+func (dn *DataNode) GetVolumeIds() string {
+ ids := make([]int, 0, len(dn.volumes))
+
+ for k := range dn.volumes {
+ ids = append(ids, int(k))
+ }
+
+ return util.HumanReadableIntsMax(100, ids...)
+}
diff --git a/weed/topology/node.go b/weed/topology/node.go
index 572a89d4d..114417edf 100644
--- a/weed/topology/node.go
+++ b/weed/topology/node.go
@@ -62,56 +62,64 @@ type NodeImpl struct {
}
// the first node must satisfy filterFirstNodeFn(), the rest nodes must have one free slot
-func (n *NodeImpl) RandomlyPickNodes(numberOfNodes int, filterFirstNodeFn func(dn Node) error) (firstNode Node, restNodes []Node, err error) {
- candidates := make([]Node, 0, len(n.children))
+func (n *NodeImpl) PickNodesByWeight(numberOfNodes int, filterFirstNodeFn func(dn Node) error) (firstNode Node, restNodes []Node, err error) {
+ var totalWeights int64
var errs []string
n.RLock()
+ candidates := make([]Node, 0, len(n.children))
+ candidatesWeights := make([]int64, 0, len(n.children))
+ //pick nodes which has enough free volumes as candidates, and use free volumes number as node weight.
for _, node := range n.children {
+ if node.FreeSpace() <= 0 {
+ continue
+ }
+ totalWeights += node.FreeSpace()
+ candidates = append(candidates, node)
+ candidatesWeights = append(candidatesWeights, node.FreeSpace())
+ }
+ n.RUnlock()
+ if len(candidates) < numberOfNodes {
+ glog.V(0).Infoln(n.Id(), "failed to pick", numberOfNodes, "from ", len(candidates), "node candidates")
+ return nil, nil, errors.New("No enough data node found!")
+ }
+
+ //pick nodes randomly by weights, the node picked earlier has higher final weights
+ sortedCandidates := make([]Node, 0, len(candidates))
+ for i := 0; i < len(candidates); i++ {
+ weightsInterval := rand.Int63n(totalWeights)
+ lastWeights := int64(0)
+ for k, weights := range candidatesWeights {
+ if (weightsInterval >= lastWeights) && (weightsInterval < lastWeights+weights) {
+ sortedCandidates = append(sortedCandidates, candidates[k])
+ candidatesWeights[k] = 0
+ totalWeights -= weights
+ break
+ }
+ lastWeights += weights
+ }
+ }
+
+ restNodes = make([]Node, 0, numberOfNodes-1)
+ ret := false
+ n.RLock()
+ for k, node := range sortedCandidates {
if err := filterFirstNodeFn(node); err == nil {
- candidates = append(candidates, node)
+ firstNode = node
+ if k >= numberOfNodes-1 {
+ restNodes = sortedCandidates[:numberOfNodes-1]
+ } else {
+ restNodes = append(restNodes, sortedCandidates[:k]...)
+ restNodes = append(restNodes, sortedCandidates[k+1:numberOfNodes]...)
+ }
+ ret = true
+ break
} else {
errs = append(errs, string(node.Id())+":"+err.Error())
}
}
n.RUnlock()
- if len(candidates) == 0 {
- return nil, nil, errors.New("No matching data node found! \n" + strings.Join(errs, "\n"))
- }
- firstNode = candidates[rand.Intn(len(candidates))]
- glog.V(2).Infoln(n.Id(), "picked main node:", firstNode.Id())
-
- restNodes = make([]Node, numberOfNodes-1)
- candidates = candidates[:0]
- n.RLock()
- for _, node := range n.children {
- if node.Id() == firstNode.Id() {
- continue
- }
- if node.FreeSpace() <= 0 {
- continue
- }
- glog.V(2).Infoln("select rest node candidate:", node.Id())
- candidates = append(candidates, node)
- }
- n.RUnlock()
- glog.V(2).Infoln(n.Id(), "picking", numberOfNodes-1, "from rest", len(candidates), "node candidates")
- ret := len(restNodes) == 0
- for k, node := range candidates {
- if k < len(restNodes) {
- restNodes[k] = node
- if k == len(restNodes)-1 {
- ret = true
- }
- } else {
- r := rand.Intn(k + 1)
- if r < len(restNodes) {
- restNodes[r] = node
- }
- }
- }
if !ret {
- glog.V(2).Infoln(n.Id(), "failed to pick", numberOfNodes-1, "from rest", len(candidates), "node candidates")
- err = errors.New("No enough data node found!")
+ return nil, nil, errors.New("No matching data node found! \n" + strings.Join(errs, "\n"))
}
return
}
@@ -184,30 +192,45 @@ func (n *NodeImpl) ReserveOneVolume(r int64) (assignedNode *DataNode, err error)
}
func (n *NodeImpl) UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int64) { //can be negative
+ if maxVolumeCountDelta == 0 {
+ return
+ }
atomic.AddInt64(&n.maxVolumeCount, maxVolumeCountDelta)
if n.parent != nil {
n.parent.UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta)
}
}
func (n *NodeImpl) UpAdjustVolumeCountDelta(volumeCountDelta int64) { //can be negative
+ if volumeCountDelta == 0 {
+ return
+ }
atomic.AddInt64(&n.volumeCount, volumeCountDelta)
if n.parent != nil {
n.parent.UpAdjustVolumeCountDelta(volumeCountDelta)
}
}
func (n *NodeImpl) UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta int64) { //can be negative
+ if remoteVolumeCountDelta == 0 {
+ return
+ }
atomic.AddInt64(&n.remoteVolumeCount, remoteVolumeCountDelta)
if n.parent != nil {
n.parent.UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta)
}
}
func (n *NodeImpl) UpAdjustEcShardCountDelta(ecShardCountDelta int64) { //can be negative
+ if ecShardCountDelta == 0 {
+ return
+ }
atomic.AddInt64(&n.ecShardCount, ecShardCountDelta)
if n.parent != nil {
n.parent.UpAdjustEcShardCountDelta(ecShardCountDelta)
}
}
func (n *NodeImpl) UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int64) { //can be negative
+ if activeVolumeCountDelta == 0 {
+ return
+ }
atomic.AddInt64(&n.activeVolumeCount, activeVolumeCountDelta)
if n.parent != nil {
n.parent.UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta)
diff --git a/weed/topology/store_replicate.go b/weed/topology/store_replicate.go
index b195b48ed..236f8d773 100644
--- a/weed/topology/store_replicate.go
+++ b/weed/topology/store_replicate.go
@@ -1,7 +1,6 @@
package topology
import (
- "bytes"
"encoding/json"
"errors"
"fmt"
@@ -18,15 +17,15 @@ import (
"github.com/chrislusf/seaweedfs/weed/util"
)
-func ReplicatedWrite(masterNode string, s *storage.Store,
- volumeId needle.VolumeId, n *needle.Needle,
- r *http.Request) (size uint32, isUnchanged bool, err error) {
+func ReplicatedWrite(masterNode string, s *storage.Store, volumeId needle.VolumeId, n *needle.Needle, r *http.Request) (isUnchanged bool, err error) {
//check JWT
jwt := security.GetJwt(r)
+ // check whether this is a replicated write request
var remoteLocations []operation.Location
if r.FormValue("type") != "replicate" {
+ // this is the initial request
remoteLocations, err = getWritableRemoteReplications(s, volumeId, masterNode)
if err != nil {
glog.V(0).Infoln(err)
@@ -34,11 +33,19 @@ func ReplicatedWrite(masterNode string, s *storage.Store,
}
}
- size, isUnchanged, err = s.WriteVolumeNeedle(volumeId, n)
- if err != nil {
- err = fmt.Errorf("failed to write to local disk: %v", err)
- glog.V(0).Infoln(err)
- return
+ // read fsync value
+ fsync := false
+ if r.FormValue("fsync") == "true" {
+ fsync = true
+ }
+
+ if s.GetVolume(volumeId) != nil {
+ isUnchanged, err = s.WriteVolumeNeedle(volumeId, n, fsync)
+ if err != nil {
+ err = fmt.Errorf("failed to write to local disk: %v", err)
+ glog.V(0).Infoln(err)
+ return
+ }
}
if len(remoteLocations) > 0 { //send to other replica locations
@@ -72,12 +79,10 @@ func ReplicatedWrite(masterNode string, s *storage.Store,
}
}
- _, err := operation.Upload(u.String(),
- string(n.Name), bytes.NewReader(n.Data), n.IsGzipped(), string(n.Mime),
- pairMap, jwt)
+ // volume server do not know about encryption
+ _, err := operation.UploadData(u.String(), string(n.Name), false, n.Data, n.IsGzipped(), string(n.Mime), pairMap, jwt)
return err
}); err != nil {
- size = 0
err = fmt.Errorf("failed to write to replicas for volume %d: %v", volumeId, err)
glog.V(0).Infoln(err)
}
@@ -156,23 +161,32 @@ func distributedOperation(locations []operation.Location, store *storage.Store,
func getWritableRemoteReplications(s *storage.Store, volumeId needle.VolumeId, masterNode string) (
remoteLocations []operation.Location, err error) {
- copyCount := s.GetVolume(volumeId).ReplicaPlacement.GetCopyCount()
- if copyCount > 1 {
- if lookupResult, lookupErr := operation.Lookup(masterNode, volumeId.String()); lookupErr == nil {
- if len(lookupResult.Locations) < copyCount {
- err = fmt.Errorf("replicating opetations [%d] is less than volume's replication copy count [%d]",
- len(lookupResult.Locations), copyCount)
- return
+
+ v := s.GetVolume(volumeId)
+ if v != nil && v.ReplicaPlacement.GetCopyCount() == 1 {
+ return
+ }
+
+ // not on local store, or has replications
+ lookupResult, lookupErr := operation.Lookup(masterNode, volumeId.String())
+ if lookupErr == nil {
+ selfUrl := s.Ip + ":" + strconv.Itoa(s.Port)
+ for _, location := range lookupResult.Locations {
+ if location.Url != selfUrl {
+ remoteLocations = append(remoteLocations, location)
}
- selfUrl := s.Ip + ":" + strconv.Itoa(s.Port)
- for _, location := range lookupResult.Locations {
- if location.Url != selfUrl {
- remoteLocations = append(remoteLocations, location)
- }
- }
- } else {
- err = fmt.Errorf("failed to lookup for %d: %v", volumeId, lookupErr)
- return
+ }
+ } else {
+ err = fmt.Errorf("failed to lookup for %d: %v", volumeId, lookupErr)
+ return
+ }
+
+ if v != nil {
+ // has one local and has remote replications
+ copyCount := v.ReplicaPlacement.GetCopyCount()
+ if len(lookupResult.Locations) < copyCount {
+ err = fmt.Errorf("replicating opetations [%d] is less than volume %d replication copy count [%d]",
+ len(lookupResult.Locations), volumeId, copyCount)
}
}
diff --git a/weed/topology/topology.go b/weed/topology/topology.go
index fbf998707..c24cab9d6 100644
--- a/weed/topology/topology.go
+++ b/weed/topology/topology.go
@@ -27,7 +27,8 @@ type Topology struct {
pulse int64
- volumeSizeLimit uint64
+ volumeSizeLimit uint64
+ replicationAsMin bool
Sequence sequence.Sequencer
@@ -38,7 +39,7 @@ type Topology struct {
RaftServer raft.Server
}
-func NewTopology(id string, seq sequence.Sequencer, volumeSizeLimit uint64, pulse int) *Topology {
+func NewTopology(id string, seq sequence.Sequencer, volumeSizeLimit uint64, pulse int, replicationAsMin bool) *Topology {
t := &Topology{}
t.id = NodeId(id)
t.nodeType = "Topology"
@@ -48,6 +49,7 @@ func NewTopology(id string, seq sequence.Sequencer, volumeSizeLimit uint64, puls
t.ecShardMap = make(map[needle.VolumeId]*EcShardLocations)
t.pulse = int64(pulse)
t.volumeSizeLimit = volumeSizeLimit
+ t.replicationAsMin = replicationAsMin
t.Sequence = seq
@@ -138,7 +140,7 @@ func (t *Topology) PickForWrite(count uint64, option *VolumeGrowOption) (string,
func (t *Topology) GetVolumeLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL) *VolumeLayout {
return t.collectionMap.Get(collectionName, func() interface{} {
- return NewCollection(collectionName, t.volumeSizeLimit)
+ return NewCollection(collectionName, t.volumeSizeLimit, t.replicationAsMin)
}).(*Collection).GetOrCreateVolumeLayout(rp, ttl)
}
diff --git a/weed/topology/topology_test.go b/weed/topology/topology_test.go
index e7676ccf7..2fe381ca2 100644
--- a/weed/topology/topology_test.go
+++ b/weed/topology/topology_test.go
@@ -23,7 +23,7 @@ func TestRemoveDataCenter(t *testing.T) {
}
func TestHandlingVolumeServerHeartbeat(t *testing.T) {
- topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5)
+ topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5, false)
dc := topo.GetOrCreateDataCenter("dc1")
rack := dc.GetOrCreateRack("rack1")
@@ -140,7 +140,7 @@ func assert(t *testing.T, message string, actual, expected int) {
func TestAddRemoveVolume(t *testing.T) {
- topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5)
+ topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5, false)
dc := topo.GetOrCreateDataCenter("dc1")
rack := dc.GetOrCreateRack("rack1")
diff --git a/weed/topology/topology_vacuum.go b/weed/topology/topology_vacuum.go
index e7dbf9b1e..789a01330 100644
--- a/weed/topology/topology_vacuum.go
+++ b/weed/topology/topology_vacuum.go
@@ -5,9 +5,10 @@ import (
"sync/atomic"
"time"
- "github.com/chrislusf/seaweedfs/weed/storage/needle"
"google.golang.org/grpc"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
@@ -19,8 +20,8 @@ func batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vl *VolumeLayout, vi
errCount := int32(0)
for index, dn := range locationlist.list {
go func(index int, url string, vid needle.VolumeId) {
- err := operation.WithVolumeServerClient(url, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
- resp, err := volumeServerClient.VacuumVolumeCheck(ctx, &volume_server_pb.VacuumVolumeCheckRequest{
+ err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ resp, err := volumeServerClient.VacuumVolumeCheck(context.Background(), &volume_server_pb.VacuumVolumeCheckRequest{
VolumeId: uint32(vid),
})
if err != nil {
@@ -63,9 +64,10 @@ func batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout,
for index, dn := range locationlist.list {
go func(index int, url string, vid needle.VolumeId) {
glog.V(0).Infoln(index, "Start vacuuming", vid, "on", url)
- err := operation.WithVolumeServerClient(url, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, err := volumeServerClient.VacuumVolumeCompact(ctx, &volume_server_pb.VacuumVolumeCompactRequest{
- VolumeId: uint32(vid),
+ err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, err := volumeServerClient.VacuumVolumeCompact(context.Background(), &volume_server_pb.VacuumVolumeCompactRequest{
+ VolumeId: uint32(vid),
+ Preallocate: preallocate,
})
return err
})
@@ -91,12 +93,16 @@ func batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout,
}
func batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList) bool {
isCommitSuccess := true
+ isReadOnly := false
for _, dn := range locationlist.list {
glog.V(0).Infoln("Start Committing vacuum", vid, "on", dn.Url())
- err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, err := volumeServerClient.VacuumVolumeCommit(ctx, &volume_server_pb.VacuumVolumeCommitRequest{
+ err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ resp, err := volumeServerClient.VacuumVolumeCommit(context.Background(), &volume_server_pb.VacuumVolumeCommitRequest{
VolumeId: uint32(vid),
})
+ if resp.IsReadOnly {
+ isReadOnly = true
+ }
return err
})
if err != nil {
@@ -105,8 +111,10 @@ func batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, v
} else {
glog.V(0).Infof("Complete Committing vacuum %d on %s", vid, dn.Url())
}
- if isCommitSuccess {
- vl.SetVolumeAvailable(dn, vid)
+ }
+ if isCommitSuccess {
+ for _, dn := range locationlist.list {
+ vl.SetVolumeAvailable(dn, vid, isReadOnly)
}
}
return isCommitSuccess
@@ -114,8 +122,8 @@ func batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, v
func batchVacuumVolumeCleanup(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList) {
for _, dn := range locationlist.list {
glog.V(0).Infoln("Start cleaning up", vid, "on", dn.Url())
- err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error {
- _, err := volumeServerClient.VacuumVolumeCleanup(ctx, &volume_server_pb.VacuumVolumeCleanupRequest{
+ err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
+ _, err := volumeServerClient.VacuumVolumeCleanup(context.Background(), &volume_server_pb.VacuumVolumeCleanupRequest{
VolumeId: uint32(vid),
})
return err
diff --git a/weed/topology/volume_growth.go b/weed/topology/volume_growth.go
index 781a34ba3..58b5702bf 100644
--- a/weed/topology/volume_growth.go
+++ b/weed/topology/volume_growth.go
@@ -85,6 +85,7 @@ func (vg *VolumeGrowth) GrowByCountAndType(grpcDialOption grpc.DialOption, targe
if c, e := vg.findAndGrow(grpcDialOption, topo, option); e == nil {
counter += c
} else {
+ glog.V(0).Infof("create %d volume, created %d: %v", targetCount, counter, e)
return counter, e
}
}
@@ -112,7 +113,7 @@ func (vg *VolumeGrowth) findAndGrow(grpcDialOption grpc.DialOption, topo *Topolo
func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *VolumeGrowOption) (servers []*DataNode, err error) {
//find main datacenter and other data centers
rp := option.ReplicaPlacement
- mainDataCenter, otherDataCenters, dc_err := topo.RandomlyPickNodes(rp.DiffDataCenterCount+1, func(node Node) error {
+ mainDataCenter, otherDataCenters, dc_err := topo.PickNodesByWeight(rp.DiffDataCenterCount+1, func(node Node) error {
if option.DataCenter != "" && node.IsDataCenter() && node.Id() != NodeId(option.DataCenter) {
return fmt.Errorf("Not matching preferred data center:%s", option.DataCenter)
}
@@ -144,7 +145,7 @@ func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *Volum
}
//find main rack and other racks
- mainRack, otherRacks, rackErr := mainDataCenter.(*DataCenter).RandomlyPickNodes(rp.DiffRackCount+1, func(node Node) error {
+ mainRack, otherRacks, rackErr := mainDataCenter.(*DataCenter).PickNodesByWeight(rp.DiffRackCount+1, func(node Node) error {
if option.Rack != "" && node.IsRack() && node.Id() != NodeId(option.Rack) {
return fmt.Errorf("Not matching preferred rack:%s", option.Rack)
}
@@ -171,7 +172,7 @@ func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *Volum
}
//find main rack and other racks
- mainServer, otherServers, serverErr := mainRack.(*Rack).RandomlyPickNodes(rp.SameRackCount+1, func(node Node) error {
+ mainServer, otherServers, serverErr := mainRack.(*Rack).PickNodesByWeight(rp.SameRackCount+1, func(node Node) error {
if option.DataNode != "" && node.IsDataNode() && node.Id() != NodeId(option.DataNode) {
return fmt.Errorf("Not matching preferred data node:%s", option.DataNode)
}
diff --git a/weed/topology/volume_growth_test.go b/weed/topology/volume_growth_test.go
index e3c5cc580..bc9083fd2 100644
--- a/weed/topology/volume_growth_test.go
+++ b/weed/topology/volume_growth_test.go
@@ -81,7 +81,7 @@ func setup(topologyLayout string) *Topology {
fmt.Println("data:", data)
//need to connect all nodes first before server adding volumes
- topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5)
+ topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5, false)
mTopology := data.(map[string]interface{})
for dcKey, dcValue := range mTopology {
dc := NewDataCenter(dcKey)
@@ -131,3 +131,212 @@ func TestFindEmptySlotsForOneVolume(t *testing.T) {
fmt.Println("assigned node :", server.Id())
}
}
+
+var topologyLayout2 = `
+{
+ "dc1":{
+ "rack1":{
+ "server111":{
+ "volumes":[
+ {"id":1, "size":12312},
+ {"id":2, "size":12312},
+ {"id":3, "size":12312}
+ ],
+ "limit":300
+ },
+ "server112":{
+ "volumes":[
+ {"id":4, "size":12312},
+ {"id":5, "size":12312},
+ {"id":6, "size":12312}
+ ],
+ "limit":300
+ },
+ "server113":{
+ "volumes":[],
+ "limit":300
+ },
+ "server114":{
+ "volumes":[],
+ "limit":300
+ },
+ "server115":{
+ "volumes":[],
+ "limit":300
+ },
+ "server116":{
+ "volumes":[],
+ "limit":300
+ }
+ },
+ "rack2":{
+ "server121":{
+ "volumes":[
+ {"id":4, "size":12312},
+ {"id":5, "size":12312},
+ {"id":6, "size":12312}
+ ],
+ "limit":300
+ },
+ "server122":{
+ "volumes":[],
+ "limit":300
+ },
+ "server123":{
+ "volumes":[
+ {"id":2, "size":12312},
+ {"id":3, "size":12312},
+ {"id":4, "size":12312}
+ ],
+ "limit":300
+ },
+ "server124":{
+ "volumes":[],
+ "limit":300
+ },
+ "server125":{
+ "volumes":[],
+ "limit":300
+ },
+ "server126":{
+ "volumes":[],
+ "limit":300
+ }
+ },
+ "rack3":{
+ "server131":{
+ "volumes":[],
+ "limit":300
+ },
+ "server132":{
+ "volumes":[],
+ "limit":300
+ },
+ "server133":{
+ "volumes":[],
+ "limit":300
+ },
+ "server134":{
+ "volumes":[],
+ "limit":300
+ },
+ "server135":{
+ "volumes":[],
+ "limit":300
+ },
+ "server136":{
+ "volumes":[],
+ "limit":300
+ }
+ }
+ }
+}
+`
+
+func TestReplication011(t *testing.T) {
+ topo := setup(topologyLayout2)
+ vg := NewDefaultVolumeGrowth()
+ rp, _ := super_block.NewReplicaPlacementFromString("011")
+ volumeGrowOption := &VolumeGrowOption{
+ Collection: "MAIL",
+ ReplicaPlacement: rp,
+ DataCenter: "dc1",
+ Rack: "",
+ DataNode: "",
+ }
+ servers, err := vg.findEmptySlotsForOneVolume(topo, volumeGrowOption)
+ if err != nil {
+ fmt.Println("finding empty slots error :", err)
+ t.Fail()
+ }
+ for _, server := range servers {
+ fmt.Println("assigned node :", server.Id())
+ }
+}
+
+var topologyLayout3 = `
+{
+ "dc1":{
+ "rack1":{
+ "server111":{
+ "volumes":[],
+ "limit":2000
+ }
+ }
+ },
+ "dc2":{
+ "rack2":{
+ "server222":{
+ "volumes":[],
+ "limit":2000
+ }
+ }
+ },
+ "dc3":{
+ "rack3":{
+ "server333":{
+ "volumes":[],
+ "limit":1000
+ }
+ }
+ },
+ "dc4":{
+ "rack4":{
+ "server444":{
+ "volumes":[],
+ "limit":1000
+ }
+ }
+ },
+ "dc5":{
+ "rack5":{
+ "server555":{
+ "volumes":[],
+ "limit":500
+ }
+ }
+ },
+ "dc6":{
+ "rack6":{
+ "server666":{
+ "volumes":[],
+ "limit":500
+ }
+ }
+ }
+}
+`
+
+func TestFindEmptySlotsForOneVolumeScheduleByWeight(t *testing.T) {
+ topo := setup(topologyLayout3)
+ vg := NewDefaultVolumeGrowth()
+ rp, _ := super_block.NewReplicaPlacementFromString("100")
+ volumeGrowOption := &VolumeGrowOption{
+ Collection: "Weight",
+ ReplicaPlacement: rp,
+ DataCenter: "",
+ Rack: "",
+ DataNode: "",
+ }
+
+ distribution := map[NodeId]int{}
+ // assign 1000 volumes
+ for i := 0; i < 1000; i++ {
+ servers, err := vg.findEmptySlotsForOneVolume(topo, volumeGrowOption)
+ if err != nil {
+ fmt.Println("finding empty slots error :", err)
+ t.Fail()
+ }
+ for _, server := range servers {
+ // fmt.Println("assigned node :", server.Id())
+ if _, ok := distribution[server.id]; !ok {
+ distribution[server.id] = 0
+ }
+ distribution[server.id] += 1
+ }
+ }
+
+ for k, v := range distribution {
+ fmt.Printf("%s : %d\n", k, v)
+ }
+}
diff --git a/weed/topology/volume_layout.go b/weed/topology/volume_layout.go
index 7633b28be..9e84fd2da 100644
--- a/weed/topology/volume_layout.go
+++ b/weed/topology/volume_layout.go
@@ -22,6 +22,7 @@ type VolumeLayout struct {
readonlyVolumes map[needle.VolumeId]bool // transient set of readonly volumes
oversizedVolumes map[needle.VolumeId]bool // set of oversized volumes
volumeSizeLimit uint64
+ replicationAsMin bool
accessLock sync.RWMutex
}
@@ -31,7 +32,7 @@ type VolumeLayoutStats struct {
FileCount uint64
}
-func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeSizeLimit uint64) *VolumeLayout {
+func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeSizeLimit uint64, replicationAsMin bool) *VolumeLayout {
return &VolumeLayout{
rp: rp,
ttl: ttl,
@@ -40,6 +41,7 @@ func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeSi
readonlyVolumes: make(map[needle.VolumeId]bool),
oversizedVolumes: make(map[needle.VolumeId]bool),
volumeSizeLimit: volumeSizeLimit,
+ replicationAsMin: replicationAsMin,
}
}
@@ -51,6 +53,9 @@ func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
vl.accessLock.Lock()
defer vl.accessLock.Unlock()
+ defer vl.ensureCorrectWritables(v)
+ defer vl.rememberOversizedVolume(v)
+
if _, ok := vl.vid2location[v.Id]; !ok {
vl.vid2location[v.Id] = NewVolumeLocationList()
}
@@ -74,9 +79,6 @@ func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
}
}
- vl.rememberOversizedVolume(v)
- vl.ensureCorrectWritables(v)
-
}
func (vl *VolumeLayout) rememberOversizedVolume(v *storage.VolumeInfo) {
@@ -107,24 +109,15 @@ func (vl *VolumeLayout) UnRegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
}
func (vl *VolumeLayout) ensureCorrectWritables(v *storage.VolumeInfo) {
- if vl.vid2location[v.Id].Length() == vl.rp.GetCopyCount() && vl.isWritable(v) {
+ if vl.enoughCopies(v.Id) && vl.isWritable(v) {
if _, ok := vl.oversizedVolumes[v.Id]; !ok {
- vl.addToWritable(v.Id)
+ vl.setVolumeWritable(v.Id)
}
} else {
vl.removeFromWritable(v.Id)
}
}
-func (vl *VolumeLayout) addToWritable(vid needle.VolumeId) {
- for _, id := range vl.writables {
- if vid == id {
- return
- }
- }
- vl.writables = append(vl.writables, vid)
-}
-
func (vl *VolumeLayout) isOversized(v *storage.VolumeInfo) bool {
return uint64(v.Size) >= vl.volumeSizeLimit
}
@@ -266,17 +259,33 @@ func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid needle.VolumeId)
}
return false
}
-func (vl *VolumeLayout) SetVolumeAvailable(dn *DataNode, vid needle.VolumeId) bool {
+func (vl *VolumeLayout) SetVolumeAvailable(dn *DataNode, vid needle.VolumeId, isReadOnly bool) bool {
vl.accessLock.Lock()
defer vl.accessLock.Unlock()
+ vInfo, err := dn.GetVolumesById(vid)
+ if err != nil {
+ return false
+ }
+
vl.vid2location[vid].Set(dn)
- if vl.vid2location[vid].Length() == vl.rp.GetCopyCount() {
+
+ if vInfo.ReadOnly || isReadOnly {
+ return false
+ }
+
+ if vl.enoughCopies(vid) {
return vl.setVolumeWritable(vid)
}
return false
}
+func (vl *VolumeLayout) enoughCopies(vid needle.VolumeId) bool {
+ locations := vl.vid2location[vid].Length()
+ desired := vl.rp.GetCopyCount()
+ return locations == desired || (vl.replicationAsMin && locations > desired)
+}
+
func (vl *VolumeLayout) SetVolumeCapacityFull(vid needle.VolumeId) bool {
vl.accessLock.Lock()
defer vl.accessLock.Unlock()
diff --git a/weed/util/bytes.go b/weed/util/bytes.go
index 9c7e5e2cb..0650919c0 100644
--- a/weed/util/bytes.go
+++ b/weed/util/bytes.go
@@ -2,9 +2,26 @@ package util
import (
"crypto/md5"
+ "fmt"
"io"
)
+// BytesToHumanReadable returns the converted human readable representation of the bytes.
+func BytesToHumanReadable(b uint64) string {
+ const unit = 1024
+ if b < unit {
+ return fmt.Sprintf("%d B", b)
+ }
+
+ div, exp := uint64(unit), 0
+ for n := b / unit; n >= unit; n /= unit {
+ div *= unit
+ exp++
+ }
+
+ return fmt.Sprintf("%.2f %ciB", float64(b)/float64(div), "KMGTPE"[exp])
+}
+
// big endian
func BytesToUint64(b []byte) (v uint64) {
@@ -74,3 +91,26 @@ func HashStringToLong(dir string) (v int64) {
return
}
+
+func HashToInt32(data []byte) (v int32) {
+ h := md5.New()
+ h.Write(data)
+
+ b := h.Sum(nil)
+
+ v += int32(b[0])
+ v <<= 8
+ v += int32(b[1])
+ v <<= 8
+ v += int32(b[2])
+ v <<= 8
+ v += int32(b[3])
+
+ return
+}
+
+func Md5(data []byte) string {
+ hash := md5.New()
+ hash.Write(data)
+ return fmt.Sprintf("%x", hash.Sum(nil))
+}
diff --git a/weed/util/chunk_cache/chunk_cache.go b/weed/util/chunk_cache/chunk_cache.go
new file mode 100644
index 000000000..e1d4b639f
--- /dev/null
+++ b/weed/util/chunk_cache/chunk_cache.go
@@ -0,0 +1,113 @@
+package chunk_cache
+
+import (
+ "sync"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/storage/needle"
+)
+
+const (
+ memCacheSizeLimit = 1024 * 1024
+ onDiskCacheSizeLimit0 = memCacheSizeLimit
+ onDiskCacheSizeLimit1 = 4 * memCacheSizeLimit
+)
+
+// a global cache for recently accessed file chunks
+type ChunkCache struct {
+ memCache *ChunkCacheInMemory
+ diskCaches []*OnDiskCacheLayer
+ sync.RWMutex
+}
+
+func NewChunkCache(maxEntries int64, dir string, diskSizeMB int64) *ChunkCache {
+
+ c := &ChunkCache{
+ memCache: NewChunkCacheInMemory(maxEntries),
+ }
+ c.diskCaches = make([]*OnDiskCacheLayer, 3)
+ c.diskCaches[0] = NewOnDiskCacheLayer(dir, "c0_1", diskSizeMB/4, 4)
+ c.diskCaches[1] = NewOnDiskCacheLayer(dir, "c1_4", diskSizeMB/4, 4)
+ c.diskCaches[2] = NewOnDiskCacheLayer(dir, "cache", diskSizeMB/2, 4)
+
+ return c
+}
+
+func (c *ChunkCache) GetChunk(fileId string, chunkSize uint64) (data []byte) {
+ if c == nil {
+ return
+ }
+
+ c.RLock()
+ defer c.RUnlock()
+
+ return c.doGetChunk(fileId, chunkSize)
+}
+
+func (c *ChunkCache) doGetChunk(fileId string, chunkSize uint64) (data []byte) {
+
+ if chunkSize < memCacheSizeLimit {
+ if data = c.memCache.GetChunk(fileId); data != nil {
+ return data
+ }
+ }
+
+ fid, err := needle.ParseFileIdFromString(fileId)
+ if err != nil {
+ glog.Errorf("failed to parse file id %s", fileId)
+ return nil
+ }
+
+ for _, diskCache := range c.diskCaches {
+ data := diskCache.getChunk(fid.Key)
+ if len(data) != 0 {
+ return data
+ }
+ }
+
+ return nil
+
+}
+
+func (c *ChunkCache) SetChunk(fileId string, data []byte) {
+ if c == nil {
+ return
+ }
+ c.Lock()
+ defer c.Unlock()
+
+ c.doSetChunk(fileId, data)
+}
+
+func (c *ChunkCache) doSetChunk(fileId string, data []byte) {
+
+ if len(data) < memCacheSizeLimit {
+ c.memCache.SetChunk(fileId, data)
+ }
+
+ fid, err := needle.ParseFileIdFromString(fileId)
+ if err != nil {
+ glog.Errorf("failed to parse file id %s", fileId)
+ return
+ }
+
+ if len(data) < onDiskCacheSizeLimit0 {
+ c.diskCaches[0].setChunk(fid.Key, data)
+ } else if len(data) < onDiskCacheSizeLimit1 {
+ c.diskCaches[1].setChunk(fid.Key, data)
+ } else {
+ c.diskCaches[2].setChunk(fid.Key, data)
+ }
+
+}
+
+func (c *ChunkCache) Shutdown() {
+ if c == nil {
+ return
+ }
+ c.Lock()
+ defer c.Unlock()
+ for _, diskCache := range c.diskCaches {
+ diskCache.shutdown()
+ }
+}
diff --git a/weed/util/chunk_cache/chunk_cache_in_memory.go b/weed/util/chunk_cache/chunk_cache_in_memory.go
new file mode 100644
index 000000000..931e45e9a
--- /dev/null
+++ b/weed/util/chunk_cache/chunk_cache_in_memory.go
@@ -0,0 +1,36 @@
+package chunk_cache
+
+import (
+ "time"
+
+ "github.com/karlseguin/ccache"
+)
+
+// a global cache for recently accessed file chunks
+type ChunkCacheInMemory struct {
+ cache *ccache.Cache
+}
+
+func NewChunkCacheInMemory(maxEntries int64) *ChunkCacheInMemory {
+ pruneCount := maxEntries >> 3
+ if pruneCount <= 0 {
+ pruneCount = 500
+ }
+ return &ChunkCacheInMemory{
+ cache: ccache.New(ccache.Configure().MaxSize(maxEntries).ItemsToPrune(uint32(pruneCount))),
+ }
+}
+
+func (c *ChunkCacheInMemory) GetChunk(fileId string) []byte {
+ item := c.cache.Get(fileId)
+ if item == nil {
+ return nil
+ }
+ data := item.Value().([]byte)
+ item.Extend(time.Hour)
+ return data
+}
+
+func (c *ChunkCacheInMemory) SetChunk(fileId string, data []byte) {
+ c.cache.Set(fileId, data, time.Hour)
+}
diff --git a/weed/util/chunk_cache/chunk_cache_on_disk.go b/weed/util/chunk_cache/chunk_cache_on_disk.go
new file mode 100644
index 000000000..2c7ef8d39
--- /dev/null
+++ b/weed/util/chunk_cache/chunk_cache_on_disk.go
@@ -0,0 +1,145 @@
+package chunk_cache
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb/opt"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/backend"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+// This implements an on disk cache
+// The entries are an FIFO with a size limit
+
+type ChunkCacheVolume struct {
+ DataBackend backend.BackendStorageFile
+ nm storage.NeedleMapper
+ fileName string
+ smallBuffer []byte
+ sizeLimit int64
+ lastModTime time.Time
+ fileSize int64
+}
+
+func LoadOrCreateChunkCacheVolume(fileName string, preallocate int64) (*ChunkCacheVolume, error) {
+
+ v := &ChunkCacheVolume{
+ smallBuffer: make([]byte, types.NeedlePaddingSize),
+ fileName: fileName,
+ sizeLimit: preallocate,
+ }
+
+ var err error
+
+ if exists, canRead, canWrite, modTime, fileSize := util.CheckFile(v.fileName + ".dat"); exists {
+ if !canRead {
+ return nil, fmt.Errorf("cannot read cache file %s.dat", v.fileName)
+ }
+ if !canWrite {
+ return nil, fmt.Errorf("cannot write cache file %s.dat", v.fileName)
+ }
+ if dataFile, err := os.OpenFile(v.fileName+".dat", os.O_RDWR|os.O_CREATE, 0644); err != nil {
+ return nil, fmt.Errorf("cannot create cache file %s.dat: %v", v.fileName, err)
+ } else {
+ v.DataBackend = backend.NewDiskFile(dataFile)
+ v.lastModTime = modTime
+ v.fileSize = fileSize
+ }
+ } else {
+ if v.DataBackend, err = backend.CreateVolumeFile(v.fileName+".dat", preallocate, 0); err != nil {
+ return nil, fmt.Errorf("cannot create cache file %s.dat: %v", v.fileName, err)
+ }
+ v.lastModTime = time.Now()
+ }
+
+ var indexFile *os.File
+ if indexFile, err = os.OpenFile(v.fileName+".idx", os.O_RDWR|os.O_CREATE, 0644); err != nil {
+ return nil, fmt.Errorf("cannot write cache index %s.idx: %v", v.fileName, err)
+ }
+
+ glog.V(0).Infoln("loading leveldb", v.fileName+".ldb")
+ opts := &opt.Options{
+ BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB
+ WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB
+ CompactionTableSizeMultiplier: 10, // default value is 1
+ }
+ if v.nm, err = storage.NewLevelDbNeedleMap(v.fileName+".ldb", indexFile, opts); err != nil {
+ return nil, fmt.Errorf("loading leveldb %s error: %v", v.fileName+".ldb", err)
+ }
+
+ return v, nil
+
+}
+
+func (v *ChunkCacheVolume) Shutdown() {
+ if v.DataBackend != nil {
+ v.DataBackend.Close()
+ v.DataBackend = nil
+ }
+ if v.nm != nil {
+ v.nm.Close()
+ v.nm = nil
+ }
+}
+
+func (v *ChunkCacheVolume) destroy() {
+ v.Shutdown()
+ os.Remove(v.fileName + ".dat")
+ os.Remove(v.fileName + ".idx")
+ os.RemoveAll(v.fileName + ".ldb")
+}
+
+func (v *ChunkCacheVolume) Reset() (*ChunkCacheVolume, error) {
+ v.destroy()
+ return LoadOrCreateChunkCacheVolume(v.fileName, v.sizeLimit)
+}
+
+func (v *ChunkCacheVolume) GetNeedle(key types.NeedleId) ([]byte, error) {
+
+ nv, ok := v.nm.Get(key)
+ if !ok {
+ return nil, storage.ErrorNotFound
+ }
+ data := make([]byte, nv.Size)
+ if readSize, readErr := v.DataBackend.ReadAt(data, nv.Offset.ToAcutalOffset()); readErr != nil {
+ return nil, fmt.Errorf("read %s.dat [%d,%d): %v",
+ v.fileName, nv.Offset.ToAcutalOffset(), nv.Offset.ToAcutalOffset()+int64(nv.Size), readErr)
+ } else {
+ if readSize != int(nv.Size) {
+ return nil, fmt.Errorf("read %d, expected %d", readSize, nv.Size)
+ }
+ }
+
+ return data, nil
+}
+
+func (v *ChunkCacheVolume) WriteNeedle(key types.NeedleId, data []byte) error {
+
+ offset := v.fileSize
+
+ written, err := v.DataBackend.WriteAt(data, offset)
+ if err != nil {
+ return err
+ } else if written != len(data) {
+ return fmt.Errorf("partial written %d, expected %d", written, len(data))
+ }
+
+ v.fileSize += int64(written)
+ extraSize := written % types.NeedlePaddingSize
+ if extraSize != 0 {
+ v.DataBackend.WriteAt(v.smallBuffer[:types.NeedlePaddingSize-extraSize], offset+int64(written))
+ v.fileSize += int64(types.NeedlePaddingSize - extraSize)
+ }
+
+ if err := v.nm.Put(key, types.ToOffset(offset), uint32(len(data))); err != nil {
+ glog.V(4).Infof("failed to save in needle map %d: %v", key, err)
+ }
+
+ return nil
+}
diff --git a/weed/util/chunk_cache/chunk_cache_on_disk_test.go b/weed/util/chunk_cache/chunk_cache_on_disk_test.go
new file mode 100644
index 000000000..f061f2ba2
--- /dev/null
+++ b/weed/util/chunk_cache/chunk_cache_on_disk_test.go
@@ -0,0 +1,59 @@
+package chunk_cache
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "math/rand"
+ "os"
+ "testing"
+)
+
+func TestOnDisk(t *testing.T) {
+
+ tmpDir, _ := ioutil.TempDir("", "c")
+ defer os.RemoveAll(tmpDir)
+
+ totalDiskSizeMb := int64(32)
+
+ cache := NewChunkCache(0, tmpDir, totalDiskSizeMb)
+
+ writeCount := 5
+ type test_data struct {
+ data []byte
+ fileId string
+ size uint64
+ }
+ testData := make([]*test_data, writeCount)
+ for i := 0; i < writeCount; i++ {
+ buff := make([]byte, 1024*1024)
+ rand.Read(buff)
+ testData[i] = &test_data{
+ data: buff,
+ fileId: fmt.Sprintf("1,%daabbccdd", i+1),
+ size: uint64(len(buff)),
+ }
+ cache.SetChunk(testData[i].fileId, testData[i].data)
+ }
+
+ for i := 0; i < writeCount; i++ {
+ data := cache.GetChunk(testData[i].fileId, testData[i].size)
+ if bytes.Compare(data, testData[i].data) != 0 {
+ t.Errorf("failed to write to and read from cache: %d", i)
+ }
+ }
+
+ cache.Shutdown()
+
+ cache = NewChunkCache(0, tmpDir, totalDiskSizeMb)
+
+ for i := 0; i < writeCount; i++ {
+ data := cache.GetChunk(testData[i].fileId, testData[i].size)
+ if bytes.Compare(data, testData[i].data) != 0 {
+ t.Errorf("failed to write to and read from cache: %d", i)
+ }
+ }
+
+ cache.Shutdown()
+
+}
diff --git a/weed/util/chunk_cache/on_disk_cache_layer.go b/weed/util/chunk_cache/on_disk_cache_layer.go
new file mode 100644
index 000000000..9cf8e3ab2
--- /dev/null
+++ b/weed/util/chunk_cache/on_disk_cache_layer.go
@@ -0,0 +1,89 @@
+package chunk_cache
+
+import (
+ "fmt"
+ "path"
+ "sort"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/storage"
+ "github.com/chrislusf/seaweedfs/weed/storage/types"
+)
+
+type OnDiskCacheLayer struct {
+ diskCaches []*ChunkCacheVolume
+}
+
+func NewOnDiskCacheLayer(dir, namePrefix string, diskSizeMB int64, segmentCount int) *OnDiskCacheLayer {
+
+ volumeCount, volumeSize := int(diskSizeMB/30000), int64(30000)
+ if volumeCount < segmentCount {
+ volumeCount, volumeSize = segmentCount, diskSizeMB/int64(segmentCount)
+ }
+
+ c := &OnDiskCacheLayer{}
+ for i := 0; i < volumeCount; i++ {
+ fileName := path.Join(dir, fmt.Sprintf("%s_%d", namePrefix, i))
+ diskCache, err := LoadOrCreateChunkCacheVolume(fileName, volumeSize*1024*1024)
+ if err != nil {
+ glog.Errorf("failed to add cache %s : %v", fileName, err)
+ } else {
+ c.diskCaches = append(c.diskCaches, diskCache)
+ }
+ }
+
+ // keep newest cache to the front
+ sort.Slice(c.diskCaches, func(i, j int) bool {
+ return c.diskCaches[i].lastModTime.After(c.diskCaches[j].lastModTime)
+ })
+
+ return c
+}
+
+func (c *OnDiskCacheLayer) setChunk(needleId types.NeedleId, data []byte) {
+
+ if c.diskCaches[0].fileSize+int64(len(data)) > c.diskCaches[0].sizeLimit {
+ t, resetErr := c.diskCaches[len(c.diskCaches)-1].Reset()
+ if resetErr != nil {
+ glog.Errorf("failed to reset cache file %s", c.diskCaches[len(c.diskCaches)-1].fileName)
+ return
+ }
+ for i := len(c.diskCaches) - 1; i > 0; i-- {
+ c.diskCaches[i] = c.diskCaches[i-1]
+ }
+ c.diskCaches[0] = t
+ }
+
+ c.diskCaches[0].WriteNeedle(needleId, data)
+
+}
+
+func (c *OnDiskCacheLayer) getChunk(needleId types.NeedleId) (data []byte) {
+
+ var err error
+
+ for _, diskCache := range c.diskCaches {
+ data, err = diskCache.GetNeedle(needleId)
+ if err == storage.ErrorNotFound {
+ continue
+ }
+ if err != nil {
+ glog.Errorf("failed to read cache file %s id %d", diskCache.fileName, needleId)
+ continue
+ }
+ if len(data) != 0 {
+ return
+ }
+ }
+
+ return nil
+
+}
+
+func (c *OnDiskCacheLayer) shutdown() {
+
+ for _, diskCache := range c.diskCaches {
+ diskCache.Shutdown()
+ }
+
+}
diff --git a/weed/util/cipher.go b/weed/util/cipher.go
new file mode 100644
index 000000000..f044c2ca3
--- /dev/null
+++ b/weed/util/cipher.go
@@ -0,0 +1,60 @@
+package util
+
+import (
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/rand"
+ "errors"
+ "io"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+)
+
+type CipherKey []byte
+
+func GenCipherKey() CipherKey {
+ key := make([]byte, 32)
+ if _, err := io.ReadFull(rand.Reader, key); err != nil {
+ glog.Fatalf("random key gen: %v", err)
+ }
+ return CipherKey(key)
+}
+
+func Encrypt(plaintext []byte, key CipherKey) ([]byte, error) {
+ c, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ gcm, err := cipher.NewGCM(c)
+ if err != nil {
+ return nil, err
+ }
+
+ nonce := make([]byte, gcm.NonceSize())
+ if _, err = io.ReadFull(rand.Reader, nonce); err != nil {
+ return nil, err
+ }
+
+ return gcm.Seal(nonce, nonce, plaintext, nil), nil
+}
+
+func Decrypt(ciphertext []byte, key CipherKey) ([]byte, error) {
+ c, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ gcm, err := cipher.NewGCM(c)
+ if err != nil {
+ return nil, err
+ }
+
+ nonceSize := gcm.NonceSize()
+ if len(ciphertext) < nonceSize {
+ return nil, errors.New("ciphertext too short")
+ }
+
+ nonce, ciphertext := ciphertext[:nonceSize], ciphertext[nonceSize:]
+ return gcm.Open(nil, nonce, ciphertext, nil)
+}
diff --git a/weed/util/cipher_test.go b/weed/util/cipher_test.go
new file mode 100644
index 000000000..026c96ea3
--- /dev/null
+++ b/weed/util/cipher_test.go
@@ -0,0 +1,17 @@
+package util
+
+import (
+ "encoding/base64"
+ "testing"
+)
+
+func TestSameAsJavaImplementation(t *testing.T) {
+ str := "QVVhmqg112NMT7F+G/7QPynqSln3xPIhKdFGmTVKZD6IS0noyr2Z5kXFF6fPjZ/7Hq8kRhlmLeeqZUccxyaZHezOdgkjS6d4NTdHf5IjXzk7"
+ cipherText, _ := base64.StdEncoding.DecodeString(str)
+ secretKey := []byte("256-bit key for AES 256 GCM encr")
+ plantext, err := Decrypt(cipherText, CipherKey(secretKey))
+ if err != nil {
+ println(err.Error())
+ }
+ println(string(plantext))
+}
diff --git a/weed/util/compression.go b/weed/util/compression.go
index 6072df632..1f778b5d5 100644
--- a/weed/util/compression.go
+++ b/weed/util/compression.go
@@ -7,8 +7,9 @@ import (
"io/ioutil"
"strings"
- "github.com/chrislusf/seaweedfs/weed/glog"
"golang.org/x/tools/godoc/util"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
)
func GzipData(input []byte) ([]byte, error) {
@@ -37,7 +38,8 @@ func UnGzipData(input []byte) ([]byte, error) {
/*
* Default more not to gzip since gzip can be done on client side.
- */func IsGzippable(ext, mtype string, data []byte) bool {
+ */
+func IsGzippable(ext, mtype string, data []byte) bool {
shouldBeZipped, iAmSure := IsGzippableFileType(ext, mtype)
if iAmSure {
diff --git a/weed/util/compression_test.go b/weed/util/compression_test.go
new file mode 100644
index 000000000..b515e8988
--- /dev/null
+++ b/weed/util/compression_test.go
@@ -0,0 +1,21 @@
+package util
+
+import (
+ "testing"
+
+ "golang.org/x/tools/godoc/util"
+)
+
+func TestIsGzippable(t *testing.T) {
+ buf := make([]byte, 1024)
+
+ isText := util.IsText(buf)
+
+ if isText {
+ t.Error("buf with zeros are not text")
+ }
+
+ compressed, _ := GzipData(buf)
+
+ t.Logf("compressed size %d\n", len(compressed))
+}
diff --git a/weed/util/config.go b/weed/util/config.go
index dfbfdbd82..7b6e92f08 100644
--- a/weed/util/config.go
+++ b/weed/util/config.go
@@ -27,7 +27,7 @@ func LoadConfiguration(configFileName string, required bool) (loaded bool) {
glog.V(1).Infof("Reading %s.toml from %s", configFileName, viper.ConfigFileUsed())
if err := viper.MergeInConfig(); err != nil { // Handle errors reading the config file
- glog.V(0).Infof("Reading %s: %v", viper.ConfigFileUsed(), err)
+ glog.V(1).Infof("Reading %s: %v", viper.ConfigFileUsed(), err)
if required {
glog.Fatalf("Failed to load %s.toml file from current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/"+
"\n\nPlease use this command to generate the default %s.toml file\n"+
@@ -42,7 +42,8 @@ func LoadConfiguration(configFileName string, required bool) (loaded bool) {
}
func GetViper() *viper.Viper {
- v := viper.GetViper()
+ v := &viper.Viper{}
+ *v = *viper.GetViper()
v.AutomaticEnv()
v.SetEnvPrefix("weed")
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
diff --git a/weed/util/constants.go b/weed/util/constants.go
index 3d61b2006..7c3927a66 100644
--- a/weed/util/constants.go
+++ b/weed/util/constants.go
@@ -5,5 +5,10 @@ import (
)
var (
- VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 53)
+ VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 79)
+ COMMIT = ""
)
+
+func Version() string {
+ return VERSION + " " + COMMIT
+}
\ No newline at end of file
diff --git a/weed/filer2/fullpath.go b/weed/util/fullpath.go
similarity index 62%
rename from weed/filer2/fullpath.go
rename to weed/util/fullpath.go
index 133069f93..4ce8a2f90 100644
--- a/weed/filer2/fullpath.go
+++ b/weed/util/fullpath.go
@@ -1,10 +1,8 @@
-package filer2
+package util
import (
"path/filepath"
"strings"
-
- "github.com/chrislusf/seaweedfs/weed/util"
)
type FullPath string
@@ -38,5 +36,21 @@ func (fp FullPath) Child(name string) FullPath {
}
func (fp FullPath) AsInode() uint64 {
- return uint64(util.HashStringToLong(string(fp)))
+ return uint64(HashStringToLong(string(fp)))
+}
+
+// split, but skipping the root
+func (fp FullPath) Split() []string {
+ if fp == "" || fp == "/" {
+ return []string{}
+ }
+ return strings.Split(string(fp)[1:], "/")
+}
+
+func Join(names ...string) string {
+ return filepath.ToSlash(filepath.Join(names...))
+}
+
+func JoinPath(names ...string) FullPath {
+ return FullPath(Join(names...))
}
diff --git a/weed/util/pprof.go b/weed/util/grace/pprof.go
similarity index 97%
rename from weed/util/pprof.go
rename to weed/util/grace/pprof.go
index a2621ceee..14686bfc8 100644
--- a/weed/util/pprof.go
+++ b/weed/util/grace/pprof.go
@@ -1,4 +1,4 @@
-package util
+package grace
import (
"os"
diff --git a/weed/util/signal_handling.go b/weed/util/grace/signal_handling.go
similarity index 98%
rename from weed/util/signal_handling.go
rename to weed/util/grace/signal_handling.go
index 99447e8be..7cca46764 100644
--- a/weed/util/signal_handling.go
+++ b/weed/util/grace/signal_handling.go
@@ -1,6 +1,6 @@
// +build !plan9
-package util
+package grace
import (
"os"
diff --git a/weed/util/signal_handling_notsupported.go b/weed/util/grace/signal_handling_notsupported.go
similarity index 78%
rename from weed/util/signal_handling_notsupported.go
rename to weed/util/grace/signal_handling_notsupported.go
index c389cfb7e..5335915a1 100644
--- a/weed/util/signal_handling_notsupported.go
+++ b/weed/util/grace/signal_handling_notsupported.go
@@ -1,6 +1,6 @@
// +build plan9
-package util
+package grace
func OnInterrupt(fn func()) {
}
diff --git a/weed/util/grpc_client_server.go b/weed/util/grpc_client_server.go
deleted file mode 100644
index 7e396342b..000000000
--- a/weed/util/grpc_client_server.go
+++ /dev/null
@@ -1,127 +0,0 @@
-package util
-
-import (
- "context"
- "fmt"
- "net/http"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "google.golang.org/grpc"
- "google.golang.org/grpc/keepalive"
-)
-
-var (
- // cache grpc connections
- grpcClients = make(map[string]*grpc.ClientConn)
- grpcClientsLock sync.Mutex
-)
-
-func init() {
- http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost = 1024
-}
-
-func NewGrpcServer(opts ...grpc.ServerOption) *grpc.Server {
- var options []grpc.ServerOption
- options = append(options, grpc.KeepaliveParams(keepalive.ServerParameters{
- Time: 10 * time.Second, // wait time before ping if no activity
- Timeout: 20 * time.Second, // ping timeout
- }), grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
- MinTime: 60 * time.Second, // min time a client should wait before sending a ping
- }))
- for _, opt := range opts {
- if opt != nil {
- options = append(options, opt)
- }
- }
- return grpc.NewServer(options...)
-}
-
-func GrpcDial(ctx context.Context, address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
- // opts = append(opts, grpc.WithBlock())
- // opts = append(opts, grpc.WithTimeout(time.Duration(5*time.Second)))
- var options []grpc.DialOption
- options = append(options,
- // grpc.WithInsecure(),
- grpc.WithKeepaliveParams(keepalive.ClientParameters{
- Time: 30 * time.Second, // client ping server if no activity for this long
- Timeout: 20 * time.Second,
- }))
- for _, opt := range opts {
- if opt != nil {
- options = append(options, opt)
- }
- }
- return grpc.DialContext(ctx, address, options...)
-}
-
-func WithCachedGrpcClient(ctx context.Context, fn func(context.Context, *grpc.ClientConn) error, address string, opts ...grpc.DialOption) error {
-
- grpcClientsLock.Lock()
-
- existingConnection, found := grpcClients[address]
- if found {
- grpcClientsLock.Unlock()
- err := fn(ctx, existingConnection)
- if err != nil {
- grpcClientsLock.Lock()
- delete(grpcClients, address)
- grpcClientsLock.Unlock()
- existingConnection.Close()
- }
- return err
- }
-
- grpcConnection, err := GrpcDial(ctx, address, opts...)
- if err != nil {
- grpcClientsLock.Unlock()
- return fmt.Errorf("fail to dial %s: %v", address, err)
- }
-
- grpcClients[address] = grpcConnection
- grpcClientsLock.Unlock()
-
- err = fn(ctx, grpcConnection)
- if err != nil {
- grpcClientsLock.Lock()
- delete(grpcClients, address)
- grpcClientsLock.Unlock()
- grpcConnection.Close()
- }
-
- return err
-}
-
-func ParseServerToGrpcAddress(server string) (serverGrpcAddress string, err error) {
- colonIndex := strings.LastIndex(server, ":")
- if colonIndex < 0 {
- return "", fmt.Errorf("server should have hostname:port format: %v", server)
- }
-
- port, parseErr := strconv.ParseUint(server[colonIndex+1:], 10, 64)
- if parseErr != nil {
- return "", fmt.Errorf("server port parse error: %v", parseErr)
- }
-
- grpcPort := int(port) + 10000
-
- return fmt.Sprintf("%s:%d", server[:colonIndex], grpcPort), nil
-}
-
-func ServerToGrpcAddress(server string) (serverGrpcAddress string) {
- hostnameAndPort := strings.Split(server, ":")
- if len(hostnameAndPort) != 2 {
- return fmt.Sprintf("unexpected server address: %s", server)
- }
-
- port, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64)
- if parseErr != nil {
- return fmt.Sprintf("failed to parse port for %s:%s", hostnameAndPort[0], hostnameAndPort[1])
- }
-
- grpcPort := int(port) + 10000
-
- return fmt.Sprintf("%s:%d", hostnameAndPort[0], grpcPort)
-}
diff --git a/weed/util/http_util.go b/weed/util/http_util.go
index 08007a038..5df79a7be 100644
--- a/weed/util/http_util.go
+++ b/weed/util/http_util.go
@@ -35,13 +35,13 @@ func PostBytes(url string, body []byte) ([]byte, error) {
return nil, fmt.Errorf("Post to %s: %v", url, err)
}
defer r.Body.Close()
- if r.StatusCode >= 400 {
- return nil, fmt.Errorf("%s: %s", url, r.Status)
- }
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, fmt.Errorf("Read response body: %v", err)
}
+ if r.StatusCode >= 400 {
+ return nil, fmt.Errorf("%s: %s", url, r.Status)
+ }
return b, nil
}
@@ -88,7 +88,7 @@ func Head(url string) (http.Header, error) {
if err != nil {
return nil, err
}
- defer r.Body.Close()
+ defer CloseResponse(r)
if r.StatusCode >= 400 {
return nil, fmt.Errorf("%s: %s", url, r.Status)
}
@@ -117,7 +117,7 @@ func Delete(url string, jwt string) error {
return nil
}
m := make(map[string]interface{})
- if e := json.Unmarshal(body, m); e == nil {
+ if e := json.Unmarshal(body, &m); e == nil {
if s, ok := m["error"].(string); ok {
return errors.New(s)
}
@@ -130,7 +130,7 @@ func GetBufferStream(url string, values url.Values, allocatedBytes []byte, eachB
if err != nil {
return err
}
- defer r.Body.Close()
+ defer CloseResponse(r)
if r.StatusCode != 200 {
return fmt.Errorf("%s: %s", url, r.Status)
}
@@ -153,7 +153,7 @@ func GetUrlStream(url string, values url.Values, readFn func(io.Reader) error) e
if err != nil {
return err
}
- defer r.Body.Close()
+ defer CloseResponse(r)
if r.StatusCode != 200 {
return fmt.Errorf("%s: %s", url, r.Status)
}
@@ -189,13 +189,21 @@ func NormalizeUrl(url string) string {
return "http://" + url
}
-func ReadUrl(fileUrl string, offset int64, size int, buf []byte, isReadRange bool) (int64, error) {
+func ReadUrl(fileUrl string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64, size int, buf []byte) (int64, error) {
+
+ if cipherKey != nil {
+ var n int
+ err := readEncryptedUrl(fileUrl, cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) {
+ n = copy(buf, data)
+ })
+ return int64(n), err
+ }
req, err := http.NewRequest("GET", fileUrl, nil)
if err != nil {
return 0, err
}
- if isReadRange {
+ if !isFullChunk {
req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1))
} else {
req.Header.Set("Accept-Encoding", "gzip")
@@ -250,43 +258,74 @@ func ReadUrl(fileUrl string, offset int64, size int, buf []byte, isReadRange boo
return n, err
}
-func ReadUrlAsStream(fileUrl string, offset int64, size int, fn func(data []byte)) (int64, error) {
+func ReadUrlAsStream(fileUrl string, cipherKey []byte, isContentGzipped bool, isFullChunk bool, offset int64, size int, fn func(data []byte)) error {
+
+ if cipherKey != nil {
+ return readEncryptedUrl(fileUrl, cipherKey, isContentGzipped, isFullChunk, offset, size, fn)
+ }
req, err := http.NewRequest("GET", fileUrl, nil)
if err != nil {
- return 0, err
+ return err
+ }
+
+ if !isFullChunk {
+ req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1))
}
- req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1))
r, err := client.Do(req)
if err != nil {
- return 0, err
+ return err
}
- defer r.Body.Close()
+ defer CloseResponse(r)
if r.StatusCode >= 400 {
- return 0, fmt.Errorf("%s: %s", fileUrl, r.Status)
+ return fmt.Errorf("%s: %s", fileUrl, r.Status)
}
var (
m int
- n int64
)
buf := make([]byte, 64*1024)
for {
m, err = r.Body.Read(buf)
fn(buf[:m])
- n += int64(m)
if err == io.EOF {
- return n, nil
+ return nil
}
if err != nil {
- return n, err
+ return err
}
}
}
+func readEncryptedUrl(fileUrl string, cipherKey []byte, isContentGzipped bool, isFullChunk bool, offset int64, size int, fn func(data []byte)) error {
+ encryptedData, err := Get(fileUrl)
+ if err != nil {
+ return fmt.Errorf("fetch %s: %v", fileUrl, err)
+ }
+ decryptedData, err := Decrypt(encryptedData, CipherKey(cipherKey))
+ if err != nil {
+ return fmt.Errorf("decrypt %s: %v", fileUrl, err)
+ }
+ if isContentGzipped {
+ decryptedData, err = UnGzipData(decryptedData)
+ if err != nil {
+ return fmt.Errorf("unzip decrypt %s: %v", fileUrl, err)
+ }
+ }
+ if len(decryptedData) < int(offset)+size {
+ return fmt.Errorf("read decrypted %s size %d [%d, %d)", fileUrl, len(decryptedData), offset, int(offset)+size)
+ }
+ if isFullChunk {
+ fn(decryptedData)
+ } else {
+ fn(decryptedData[int(offset) : int(offset)+size])
+ }
+ return nil
+}
+
func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, error) {
req, err := http.NewRequest("GET", fileUrl, nil)
@@ -307,3 +346,8 @@ func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, e
return r.Body, nil
}
+
+func CloseResponse(resp *http.Response) {
+ io.Copy(ioutil.Discard, resp.Body)
+ resp.Body.Close()
+}
diff --git a/weed/util/inits.go b/weed/util/inits.go
new file mode 100644
index 000000000..378878012
--- /dev/null
+++ b/weed/util/inits.go
@@ -0,0 +1,52 @@
+package util
+
+import (
+ "fmt"
+ "sort"
+)
+
+// HumanReadableIntsMax joins a serials of inits into a smart one like 1-3 5 ... for human readable.
+func HumanReadableIntsMax(max int, ids ...int) string {
+ if len(ids) <= max {
+ return HumanReadableInts(ids...)
+ }
+
+ return HumanReadableInts(ids[:max]...) + " ..."
+}
+
+// HumanReadableInts joins a serials of inits into a smart one like 1-3 5 7-10 for human readable.
+func HumanReadableInts(ids ...int) string {
+ sort.Ints(ids)
+
+ s := ""
+ start := 0
+ last := 0
+
+ for i, v := range ids {
+ if i == 0 {
+ start = v
+ last = v
+ s = fmt.Sprintf("%d", v)
+ continue
+ }
+
+ if last+1 == v {
+ last = v
+ continue
+ }
+
+ if last > start {
+ s += fmt.Sprintf("-%d", last)
+ }
+
+ s += fmt.Sprintf(" %d", v)
+ start = v
+ last = v
+ }
+
+ if last != start {
+ s += fmt.Sprintf("-%d", last)
+ }
+
+ return s
+}
diff --git a/weed/util/inits_test.go b/weed/util/inits_test.go
new file mode 100644
index 000000000..f2c9b701f
--- /dev/null
+++ b/weed/util/inits_test.go
@@ -0,0 +1,19 @@
+package util
+
+import (
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestHumanReadableIntsMax(t *testing.T) {
+ assert.Equal(t, "1-2 ...", HumanReadableIntsMax(2, 1, 2, 3))
+ assert.Equal(t, "1 3 ...", HumanReadableIntsMax(2, 1, 3, 5))
+}
+
+func TestHumanReadableInts(t *testing.T) {
+ assert.Equal(t, "1-3", HumanReadableInts(1, 2, 3))
+ assert.Equal(t, "1 3", HumanReadableInts(1, 3))
+ assert.Equal(t, "1 3 5", HumanReadableInts(5, 1, 3))
+ assert.Equal(t, "1-3 5", HumanReadableInts(1, 2, 3, 5))
+ assert.Equal(t, "1-3 5 7-9", HumanReadableInts(7, 9, 8, 1, 2, 3, 5))
+}
diff --git a/weed/util/log_buffer/log_buffer.go b/weed/util/log_buffer/log_buffer.go
new file mode 100644
index 000000000..b02c45b52
--- /dev/null
+++ b/weed/util/log_buffer/log_buffer.go
@@ -0,0 +1,278 @@
+package log_buffer
+
+import (
+ "bytes"
+ "sync"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+const BufferSize = 4 * 1024 * 1024
+const PreviousBufferCount = 3
+
+type dataToFlush struct {
+ startTime time.Time
+ stopTime time.Time
+ data *bytes.Buffer
+}
+
+type LogBuffer struct {
+ prevBuffers *SealedBuffers
+ buf []byte
+ idx []int
+ pos int
+ startTime time.Time
+ stopTime time.Time
+ sizeBuf []byte
+ flushInterval time.Duration
+ flushFn func(startTime, stopTime time.Time, buf []byte)
+ notifyFn func()
+ isStopping bool
+ flushChan chan *dataToFlush
+ lastTsNs int64
+ sync.RWMutex
+}
+
+func NewLogBuffer(flushInterval time.Duration, flushFn func(startTime, stopTime time.Time, buf []byte), notifyFn func()) *LogBuffer {
+ lb := &LogBuffer{
+ prevBuffers: newSealedBuffers(PreviousBufferCount),
+ buf: make([]byte, BufferSize),
+ sizeBuf: make([]byte, 4),
+ flushInterval: flushInterval,
+ flushFn: flushFn,
+ notifyFn: notifyFn,
+ flushChan: make(chan *dataToFlush, 256),
+ }
+ go lb.loopFlush()
+ go lb.loopInterval()
+ return lb
+}
+
+func (m *LogBuffer) AddToBuffer(partitionKey, data []byte) {
+
+ m.Lock()
+ defer func() {
+ m.Unlock()
+ if m.notifyFn != nil {
+ m.notifyFn()
+ }
+ }()
+
+ // need to put the timestamp inside the lock
+ ts := time.Now()
+ tsNs := ts.UnixNano()
+ if m.lastTsNs >= tsNs {
+ // this is unlikely to happen, but just in case
+ tsNs = m.lastTsNs + 1
+ ts = time.Unix(0, tsNs)
+ }
+ m.lastTsNs = tsNs
+ logEntry := &filer_pb.LogEntry{
+ TsNs: tsNs,
+ PartitionKeyHash: util.HashToInt32(partitionKey),
+ Data: data,
+ }
+
+ logEntryData, _ := proto.Marshal(logEntry)
+
+ size := len(logEntryData)
+
+ if m.pos == 0 {
+ m.startTime = ts
+ }
+
+ if m.startTime.Add(m.flushInterval).Before(ts) || len(m.buf)-m.pos < size+4 {
+ m.flushChan <- m.copyToFlush()
+ m.startTime = ts
+ if len(m.buf) < size+4 {
+ m.buf = make([]byte, 2*size+4)
+ }
+ }
+ m.stopTime = ts
+
+ m.idx = append(m.idx, m.pos)
+ util.Uint32toBytes(m.sizeBuf, uint32(size))
+ copy(m.buf[m.pos:m.pos+4], m.sizeBuf)
+ copy(m.buf[m.pos+4:m.pos+4+size], logEntryData)
+ m.pos += size + 4
+
+ // fmt.Printf("entry size %d total %d count %d, buffer:%p\n", size, m.pos, len(m.idx), m)
+
+}
+
+func (m *LogBuffer) Shutdown() {
+ m.Lock()
+ defer m.Unlock()
+
+ if m.isStopping {
+ return
+ }
+ m.isStopping = true
+ toFlush := m.copyToFlush()
+ m.flushChan <- toFlush
+ close(m.flushChan)
+}
+
+func (m *LogBuffer) loopFlush() {
+ for d := range m.flushChan {
+ if d != nil {
+ // fmt.Printf("flush [%v, %v] size %d\n", d.startTime, d.stopTime, len(d.data.Bytes()))
+ m.flushFn(d.startTime, d.stopTime, d.data.Bytes())
+ d.releaseMemory()
+ }
+ }
+}
+
+func (m *LogBuffer) loopInterval() {
+ for !m.isStopping {
+ time.Sleep(m.flushInterval)
+ m.Lock()
+ if m.isStopping {
+ m.Unlock()
+ return
+ }
+ // println("loop interval")
+ toFlush := m.copyToFlush()
+ m.flushChan <- toFlush
+ m.Unlock()
+ }
+}
+
+func (m *LogBuffer) copyToFlush() *dataToFlush {
+
+ if m.flushFn != nil && m.pos > 0 {
+ // fmt.Printf("flush buffer %d pos %d empty space %d\n", len(m.buf), m.pos, len(m.buf)-m.pos)
+ d := &dataToFlush{
+ startTime: m.startTime,
+ stopTime: m.stopTime,
+ data: copiedBytes(m.buf[:m.pos]),
+ }
+ // fmt.Printf("flusing [0,%d) with %d entries\n", m.pos, len(m.idx))
+ m.buf = m.prevBuffers.SealBuffer(m.startTime, m.stopTime, m.buf, m.pos)
+ m.pos = 0
+ m.idx = m.idx[:0]
+ return d
+ }
+ return nil
+}
+
+func (d *dataToFlush) releaseMemory() {
+ d.data.Reset()
+ bufferPool.Put(d.data)
+}
+
+func (m *LogBuffer) ReadFromBuffer(lastReadTime time.Time) (bufferCopy *bytes.Buffer) {
+ m.RLock()
+ defer m.RUnlock()
+
+ /*
+ fmt.Printf("read buffer %p: %v last stop time: [%v,%v], pos %d, entries:%d, prevBufs:%d\n", m, lastReadTime, m.startTime, m.stopTime, m.pos, len(m.idx), len(m.prevBuffers.buffers))
+ for i, prevBuf := range m.prevBuffers.buffers {
+ fmt.Printf(" prev %d : %s\n", i, prevBuf.String())
+ }
+ */
+
+ if lastReadTime.Equal(m.stopTime) {
+ return nil
+ }
+ if lastReadTime.After(m.stopTime) {
+ // glog.Fatalf("unexpected last read time %v, older than latest %v", lastReadTime, m.stopTime)
+ return nil
+ }
+ if lastReadTime.Before(m.startTime) {
+ // println("checking ", lastReadTime.UnixNano())
+ for i, buf := range m.prevBuffers.buffers {
+ if buf.startTime.After(lastReadTime) {
+ if i == 0 {
+ // println("return the earliest in memory", buf.startTime.UnixNano())
+ return copiedBytes(buf.buf[:buf.size])
+ }
+ // println("return the", i, "th in memory", buf.startTime.UnixNano())
+ return copiedBytes(buf.buf[:buf.size])
+ }
+ if !buf.startTime.After(lastReadTime) && buf.stopTime.After(lastReadTime) {
+ pos := buf.locateByTs(lastReadTime)
+ // fmt.Printf("locate buffer[%d] pos %d\n", i, pos)
+ return copiedBytes(buf.buf[pos:buf.size])
+ }
+ }
+ // println("return the current buf", lastReadTime.UnixNano())
+ return copiedBytes(m.buf[:m.pos])
+ }
+
+ lastTs := lastReadTime.UnixNano()
+ l, h := 0, len(m.idx)-1
+
+ /*
+ for i, pos := range m.idx {
+ logEntry, ts := readTs(m.buf, pos)
+ event := &filer_pb.SubscribeMetadataResponse{}
+ proto.Unmarshal(logEntry.Data, event)
+ entry := event.EventNotification.OldEntry
+ if entry == nil {
+ entry = event.EventNotification.NewEntry
+ }
+ fmt.Printf("entry %d ts: %v offset:%d dir:%s name:%s\n", i, time.Unix(0, ts), pos, event.Directory, entry.Name)
+ }
+ fmt.Printf("l=%d, h=%d\n", l, h)
+ */
+
+ for l <= h {
+ mid := (l + h) / 2
+ pos := m.idx[mid]
+ _, t := readTs(m.buf, pos)
+ if t <= lastTs {
+ l = mid + 1
+ } else if lastTs < t {
+ var prevT int64
+ if mid > 0 {
+ _, prevT = readTs(m.buf, m.idx[mid-1])
+ }
+ if prevT <= lastTs {
+ // fmt.Printf("found l=%d, m-1=%d(ts=%d), m=%d(ts=%d), h=%d [%d, %d) \n", l, mid-1, prevT, mid, t, h, pos, m.pos)
+ return copiedBytes(m.buf[pos:m.pos])
+ }
+ h = mid
+ }
+ // fmt.Printf("l=%d, h=%d\n", l, h)
+ }
+
+ // FIXME: this could be that the buffer has been flushed already
+ return nil
+
+}
+func (m *LogBuffer) ReleaseMeory(b *bytes.Buffer) {
+ bufferPool.Put(b)
+}
+
+var bufferPool = sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+}
+
+func copiedBytes(buf []byte) (copied *bytes.Buffer) {
+ copied = bufferPool.Get().(*bytes.Buffer)
+ copied.Reset()
+ copied.Write(buf)
+ return
+}
+
+func readTs(buf []byte, pos int) (size int, ts int64) {
+
+ size = int(util.BytesToUint32(buf[pos : pos+4]))
+ entryData := buf[pos+4 : pos+4+size]
+ logEntry := &filer_pb.LogEntry{}
+
+ err := proto.Unmarshal(entryData, logEntry)
+ if err != nil {
+ glog.Fatalf("unexpected unmarshal filer_pb.LogEntry: %v", err)
+ }
+ return size, logEntry.TsNs
+
+}
diff --git a/weed/util/log_buffer/log_buffer_test.go b/weed/util/log_buffer/log_buffer_test.go
new file mode 100644
index 000000000..f9ccc95c2
--- /dev/null
+++ b/weed/util/log_buffer/log_buffer_test.go
@@ -0,0 +1,42 @@
+package log_buffer
+
+import (
+ "fmt"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+)
+
+func TestNewLogBufferFirstBuffer(t *testing.T) {
+ lb := NewLogBuffer(time.Minute, func(startTime, stopTime time.Time, buf []byte) {
+
+ }, func() {
+
+ })
+
+ startTime := time.Now()
+
+ messageSize := 1024
+ messageCount := 5000
+ var buf = make([]byte, messageSize)
+ for i := 0; i < messageCount; i++ {
+ rand.Read(buf)
+ lb.AddToBuffer(nil, buf)
+ }
+
+ receivedmessageCount := 0
+ lb.LoopProcessLogData(startTime, func() bool {
+ // stop if no more messages
+ return false
+ }, func(logEntry *filer_pb.LogEntry) error {
+ receivedmessageCount++
+ return nil
+ })
+
+ if receivedmessageCount != messageCount {
+ fmt.Printf("sent %d received %d\n", messageCount, receivedmessageCount)
+ }
+
+}
diff --git a/weed/util/log_buffer/log_read.go b/weed/util/log_buffer/log_read.go
new file mode 100644
index 000000000..2b73a8064
--- /dev/null
+++ b/weed/util/log_buffer/log_read.go
@@ -0,0 +1,77 @@
+package log_buffer
+
+import (
+ "bytes"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+ "github.com/chrislusf/seaweedfs/weed/util"
+)
+
+func (logBuffer *LogBuffer) LoopProcessLogData(
+ startTreadTime time.Time,
+ waitForDataFn func() bool,
+ eachLogDataFn func(logEntry *filer_pb.LogEntry) error) (err error) {
+ // loop through all messages
+ var bytesBuf *bytes.Buffer
+ lastReadTime := startTreadTime
+ defer func() {
+ if bytesBuf != nil {
+ logBuffer.ReleaseMeory(bytesBuf)
+ }
+ }()
+
+ for {
+
+ if bytesBuf != nil {
+ logBuffer.ReleaseMeory(bytesBuf)
+ }
+ bytesBuf = logBuffer.ReadFromBuffer(lastReadTime)
+ // fmt.Printf("ReadFromBuffer by %v\n", lastReadTime)
+ if bytesBuf == nil {
+ if waitForDataFn() {
+ continue
+ } else {
+ return
+ }
+ }
+
+ buf := bytesBuf.Bytes()
+ // fmt.Printf("ReadFromBuffer by %v size %d\n", lastReadTime, len(buf))
+
+ batchSize := 0
+ var startReadTime time.Time
+
+ for pos := 0; pos+4 < len(buf); {
+
+ size := util.BytesToUint32(buf[pos : pos+4])
+ entryData := buf[pos+4 : pos+4+int(size)]
+
+ // fmt.Printf("read buffer read %d [%d,%d) from [0,%d)\n", batchSize, pos, pos+int(size)+4, len(buf))
+
+ logEntry := &filer_pb.LogEntry{}
+ if err = proto.Unmarshal(entryData, logEntry); err != nil {
+ glog.Errorf("unexpected unmarshal messaging_pb.Message: %v", err)
+ pos += 4 + int(size)
+ continue
+ }
+ lastReadTime = time.Unix(0, logEntry.TsNs)
+ if startReadTime.IsZero() {
+ startReadTime = lastReadTime
+ }
+
+ if err = eachLogDataFn(logEntry); err != nil {
+ return
+ }
+
+ pos += 4 + int(size)
+ batchSize++
+ }
+
+ // fmt.Printf("sent message ts[%d,%d] size %d\n", startReadTime.UnixNano(), lastReadTime.UnixNano(), batchSize)
+ }
+
+}
diff --git a/weed/util/log_buffer/sealed_buffer.go b/weed/util/log_buffer/sealed_buffer.go
new file mode 100644
index 000000000..d133cf8d3
--- /dev/null
+++ b/weed/util/log_buffer/sealed_buffer.go
@@ -0,0 +1,62 @@
+package log_buffer
+
+import (
+ "fmt"
+ "time"
+)
+
+type MemBuffer struct {
+ buf []byte
+ size int
+ startTime time.Time
+ stopTime time.Time
+}
+
+type SealedBuffers struct {
+ buffers []*MemBuffer
+}
+
+func newSealedBuffers(size int) *SealedBuffers {
+ sbs := &SealedBuffers{}
+
+ sbs.buffers = make([]*MemBuffer, size)
+ for i := 0; i < size; i++ {
+ sbs.buffers[i] = &MemBuffer{
+ buf: make([]byte, BufferSize),
+ }
+ }
+
+ return sbs
+}
+
+func (sbs *SealedBuffers) SealBuffer(startTime, stopTime time.Time, buf []byte, pos int) (newBuf []byte) {
+ oldMemBuffer := sbs.buffers[0]
+ size := len(sbs.buffers)
+ for i := 0; i < size-1; i++ {
+ sbs.buffers[i].buf = sbs.buffers[i+1].buf
+ sbs.buffers[i].size = sbs.buffers[i+1].size
+ sbs.buffers[i].startTime = sbs.buffers[i+1].startTime
+ sbs.buffers[i].stopTime = sbs.buffers[i+1].stopTime
+ }
+ sbs.buffers[size-1].buf = buf
+ sbs.buffers[size-1].size = pos
+ sbs.buffers[size-1].startTime = startTime
+ sbs.buffers[size-1].stopTime = stopTime
+ return oldMemBuffer.buf
+}
+
+func (mb *MemBuffer) locateByTs(lastReadTime time.Time) (pos int) {
+ lastReadTs := lastReadTime.UnixNano()
+ for pos < len(mb.buf) {
+ size, t := readTs(mb.buf, pos)
+ if t > lastReadTs {
+ return
+ }
+ pos += size + 4
+ }
+ return len(mb.buf)
+}
+
+func (mb *MemBuffer) String() string {
+ return fmt.Sprintf("[%v,%v] bytes:%d", mb.startTime, mb.stopTime, mb.size)
+}
diff --git a/weed/util/net_timeout.go b/weed/util/net_timeout.go
index b8068e67f..f057a8f5b 100644
--- a/weed/util/net_timeout.go
+++ b/weed/util/net_timeout.go
@@ -35,6 +35,7 @@ type Conn struct {
net.Conn
ReadTimeout time.Duration
WriteTimeout time.Duration
+ isClosed bool
}
func (c *Conn) Read(b []byte) (count int, e error) {
@@ -68,7 +69,10 @@ func (c *Conn) Write(b []byte) (count int, e error) {
func (c *Conn) Close() error {
err := c.Conn.Close()
if err == nil {
- stats.ConnectionClose()
+ if !c.isClosed {
+ stats.ConnectionClose()
+ c.isClosed = true
+ }
}
return err
}
diff --git a/weed/util/network.go b/weed/util/network.go
new file mode 100644
index 000000000..7108cfea6
--- /dev/null
+++ b/weed/util/network.go
@@ -0,0 +1,25 @@
+package util
+
+import (
+ "net"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+)
+
+func DetectedHostAddress() string {
+ addrs, err := net.InterfaceAddrs()
+ if err != nil {
+ glog.V(0).Infof("failed to detect ip address: %v", err)
+ return ""
+ }
+
+ for _, a := range addrs {
+ if ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
+ if ipnet.IP.To4() != nil {
+ return ipnet.IP.String()
+ }
+ }
+ }
+
+ return "localhost"
+}
diff --git a/weed/util/parse.go b/weed/util/parse.go
index 6593d43b6..0955db682 100644
--- a/weed/util/parse.go
+++ b/weed/util/parse.go
@@ -1,6 +1,7 @@
package util
import (
+ "fmt"
"net/url"
"strconv"
"strings"
@@ -45,3 +46,18 @@ func ParseFilerUrl(entryPath string) (filerServer string, filerPort int64, path
path = u.Path
return
}
+
+func ParseHostPort(hostPort string) (filerServer string, filerPort int64, err error) {
+ parts := strings.Split(hostPort, ":")
+ if len(parts) != 2 {
+ err = fmt.Errorf("failed to parse %s\n", hostPort)
+ return
+ }
+
+ filerPort, err = strconv.ParseInt(parts[1], 10, 64)
+ if err == nil {
+ filerServer = parts[0]
+ }
+
+ return
+}
diff --git a/weed/util/queue_unbounded.go b/weed/util/queue_unbounded.go
new file mode 100644
index 000000000..496b9f844
--- /dev/null
+++ b/weed/util/queue_unbounded.go
@@ -0,0 +1,45 @@
+package util
+
+import "sync"
+
+type UnboundedQueue struct {
+ outbound []string
+ outboundLock sync.RWMutex
+ inbound []string
+ inboundLock sync.RWMutex
+}
+
+func NewUnboundedQueue() *UnboundedQueue {
+ q := &UnboundedQueue{}
+ return q
+}
+
+func (q *UnboundedQueue) EnQueue(items ...string) {
+ q.inboundLock.Lock()
+ defer q.inboundLock.Unlock()
+
+ q.inbound = append(q.inbound, items...)
+
+}
+
+func (q *UnboundedQueue) Consume(fn func([]string)) {
+ q.outboundLock.Lock()
+ defer q.outboundLock.Unlock()
+
+ if len(q.outbound) == 0 {
+ q.inboundLock.Lock()
+ inbountLen := len(q.inbound)
+ if inbountLen > 0 {
+ t := q.outbound
+ q.outbound = q.inbound
+ q.inbound = t
+ }
+ q.inboundLock.Unlock()
+ }
+
+ if len(q.outbound) > 0 {
+ fn(q.outbound)
+ q.outbound = q.outbound[:0]
+ }
+
+}
diff --git a/weed/util/queue_unbounded_test.go b/weed/util/queue_unbounded_test.go
new file mode 100644
index 000000000..2d02032cb
--- /dev/null
+++ b/weed/util/queue_unbounded_test.go
@@ -0,0 +1,25 @@
+package util
+
+import "testing"
+
+func TestEnqueueAndConsume(t *testing.T) {
+
+ q := NewUnboundedQueue()
+
+ q.EnQueue("1", "2", "3")
+
+ f := func(items []string) {
+ for _, t := range items {
+ println(t)
+ }
+ println("-----------------------")
+ }
+ q.Consume(f)
+
+ q.Consume(f)
+
+ q.EnQueue("4", "5")
+ q.EnQueue("6", "7")
+ q.Consume(f)
+
+}
diff --git a/weed/wdclient/exclusive_locks/exclusive_locker.go b/weed/wdclient/exclusive_locks/exclusive_locker.go
new file mode 100644
index 000000000..67823e7f4
--- /dev/null
+++ b/weed/wdclient/exclusive_locks/exclusive_locker.go
@@ -0,0 +1,111 @@
+package exclusive_locks
+
+import (
+ "context"
+ "sync/atomic"
+ "time"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
+ "github.com/chrislusf/seaweedfs/weed/wdclient"
+)
+
+const (
+ RenewInteval = 4 * time.Second
+ SafeRenewInteval = 3 * time.Second
+ InitLockInteval = 1 * time.Second
+ AdminLockName = "admin"
+)
+
+type ExclusiveLocker struct {
+ masterClient *wdclient.MasterClient
+ token int64
+ lockTsNs int64
+ isLocking bool
+}
+
+func NewExclusiveLocker(masterClient *wdclient.MasterClient) *ExclusiveLocker {
+ return &ExclusiveLocker{
+ masterClient: masterClient,
+ }
+}
+func (l *ExclusiveLocker) IsLocking() bool {
+ return l.isLocking
+}
+
+func (l *ExclusiveLocker) GetToken() (token int64, lockTsNs int64) {
+ for time.Unix(0, atomic.LoadInt64(&l.lockTsNs)).Add(SafeRenewInteval).Before(time.Now()) {
+ // wait until now is within the safe lock period, no immediate renewal to change the token
+ time.Sleep(100 * time.Millisecond)
+ }
+ return atomic.LoadInt64(&l.token), atomic.LoadInt64(&l.lockTsNs)
+}
+
+func (l *ExclusiveLocker) RequestLock() {
+ if l.isLocking {
+ return
+ }
+
+ // retry to get the lease
+ for {
+ if err := l.masterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err := client.LeaseAdminToken(context.Background(), &master_pb.LeaseAdminTokenRequest{
+ PreviousToken: atomic.LoadInt64(&l.token),
+ PreviousLockTime: atomic.LoadInt64(&l.lockTsNs),
+ LockName: AdminLockName,
+ })
+ if err == nil {
+ atomic.StoreInt64(&l.token, resp.Token)
+ atomic.StoreInt64(&l.lockTsNs, resp.LockTsNs)
+ }
+ return err
+ }); err != nil {
+ // println("leasing problem", err.Error())
+ time.Sleep(InitLockInteval)
+ } else {
+ break
+ }
+ }
+
+ l.isLocking = true
+
+ // start a goroutine to renew the lease
+ go func() {
+ for l.isLocking {
+ if err := l.masterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ resp, err := client.LeaseAdminToken(context.Background(), &master_pb.LeaseAdminTokenRequest{
+ PreviousToken: atomic.LoadInt64(&l.token),
+ PreviousLockTime: atomic.LoadInt64(&l.lockTsNs),
+ LockName: AdminLockName,
+ })
+ if err == nil {
+ atomic.StoreInt64(&l.token, resp.Token)
+ atomic.StoreInt64(&l.lockTsNs, resp.LockTsNs)
+ // println("ts", l.lockTsNs, "token", l.token)
+ }
+ return err
+ }); err != nil {
+ glog.Errorf("failed to renew lock: %v", err)
+ return
+ } else {
+ time.Sleep(RenewInteval)
+ }
+
+ }
+ }()
+
+}
+
+func (l *ExclusiveLocker) ReleaseLock() {
+ l.isLocking = false
+ l.masterClient.WithClient(func(client master_pb.SeaweedClient) error {
+ client.ReleaseAdminToken(context.Background(), &master_pb.ReleaseAdminTokenRequest{
+ PreviousToken: atomic.LoadInt64(&l.token),
+ PreviousLockTime: atomic.LoadInt64(&l.lockTsNs),
+ LockName: AdminLockName,
+ })
+ return nil
+ })
+ atomic.StoreInt64(&l.token, 0)
+ atomic.StoreInt64(&l.lockTsNs, 0)
+}
diff --git a/weed/wdclient/masterclient.go b/weed/wdclient/masterclient.go
index 30b0cf160..4f8e0d5ef 100644
--- a/weed/wdclient/masterclient.go
+++ b/weed/wdclient/masterclient.go
@@ -2,19 +2,20 @@ package wdclient
import (
"context"
- "fmt"
"math/rand"
"time"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
- "github.com/chrislusf/seaweedfs/weed/util"
"google.golang.org/grpc"
+
+ "github.com/chrislusf/seaweedfs/weed/glog"
+ "github.com/chrislusf/seaweedfs/weed/pb"
+ "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
)
type MasterClient struct {
- ctx context.Context
- name string
+ clientType string
+ clientHost string
+ grpcPort uint32
currentMaster string
masters []string
grpcDialOption grpc.DialOption
@@ -22,10 +23,11 @@ type MasterClient struct {
vidMap
}
-func NewMasterClient(ctx context.Context, grpcDialOption grpc.DialOption, clientName string, masters []string) *MasterClient {
+func NewMasterClient(grpcDialOption grpc.DialOption, clientType string, clientHost string, clientGrpcPort uint32, masters []string) *MasterClient {
return &MasterClient{
- ctx: ctx,
- name: clientName,
+ clientType: clientType,
+ clientHost: clientHost,
+ grpcPort: clientGrpcPort,
masters: masters,
grpcDialOption: grpcDialOption,
vidMap: newVidMap(),
@@ -43,7 +45,7 @@ func (mc *MasterClient) WaitUntilConnected() {
}
func (mc *MasterClient) KeepConnectedToMaster() {
- glog.V(1).Infof("%s bootstraps with masters %v", mc.name, mc.masters)
+ glog.V(1).Infof("%s bootstraps with masters %v", mc.clientType, mc.masters)
for {
mc.tryAllMasters()
time.Sleep(time.Second)
@@ -65,27 +67,27 @@ func (mc *MasterClient) tryAllMasters() {
}
func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader string) {
- glog.V(1).Infof("%s Connecting to master %v", mc.name, master)
- gprcErr := withMasterClient(context.Background(), master, mc.grpcDialOption, func(ctx context.Context, client master_pb.SeaweedClient) error {
+ glog.V(1).Infof("%s Connecting to master %v", mc.clientType, master)
+ gprcErr := pb.WithMasterClient(master, mc.grpcDialOption, func(client master_pb.SeaweedClient) error {
- stream, err := client.KeepConnected(ctx)
+ stream, err := client.KeepConnected(context.Background())
if err != nil {
- glog.V(0).Infof("%s failed to keep connected to %s: %v", mc.name, master, err)
+ glog.V(0).Infof("%s failed to keep connected to %s: %v", mc.clientType, master, err)
return err
}
- if err = stream.Send(&master_pb.KeepConnectedRequest{Name: mc.name}); err != nil {
- glog.V(0).Infof("%s failed to send to %s: %v", mc.name, master, err)
+ if err = stream.Send(&master_pb.KeepConnectedRequest{Name: mc.clientType, GrpcPort: mc.grpcPort}); err != nil {
+ glog.V(0).Infof("%s failed to send to %s: %v", mc.clientType, master, err)
return err
}
- glog.V(1).Infof("%s Connected to %v", mc.name, master)
+ glog.V(1).Infof("%s Connected to %v", mc.clientType, master)
mc.currentMaster = master
for {
volumeLocation, err := stream.Recv()
if err != nil {
- glog.V(0).Infof("%s failed to receive from %s: %v", mc.name, master, err)
+ glog.V(0).Infof("%s failed to receive from %s: %v", mc.clientType, master, err)
return err
}
@@ -102,38 +104,27 @@ func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader stri
PublicUrl: volumeLocation.PublicUrl,
}
for _, newVid := range volumeLocation.NewVids {
- glog.V(1).Infof("%s: %s adds volume %d", mc.name, loc.Url, newVid)
+ glog.V(1).Infof("%s: %s adds volume %d", mc.clientType, loc.Url, newVid)
mc.addLocation(newVid, loc)
}
for _, deletedVid := range volumeLocation.DeletedVids {
- glog.V(1).Infof("%s: %s removes volume %d", mc.name, loc.Url, deletedVid)
+ glog.V(1).Infof("%s: %s removes volume %d", mc.clientType, loc.Url, deletedVid)
mc.deleteLocation(deletedVid, loc)
}
}
})
if gprcErr != nil {
- glog.V(0).Infof("%s failed to connect with master %v: %v", mc.name, master, gprcErr)
+ glog.V(0).Infof("%s failed to connect with master %v: %v", mc.clientType, master, gprcErr)
}
return
}
-func withMasterClient(ctx context.Context, master string, grpcDialOption grpc.DialOption, fn func(ctx context.Context, client master_pb.SeaweedClient) error) error {
-
- masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master)
- if parseErr != nil {
- return fmt.Errorf("failed to parse master grpc %v: %v", master, parseErr)
+func (mc *MasterClient) WithClient(fn func(client master_pb.SeaweedClient) error) error {
+ for mc.currentMaster == "" {
+ time.Sleep(3 * time.Second)
}
-
- return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error {
- client := master_pb.NewSeaweedClient(grpcConnection)
- return fn(ctx2, client)
- }, masterGrpcAddress, grpcDialOption)
-
-}
-
-func (mc *MasterClient) WithClient(ctx context.Context, fn func(client master_pb.SeaweedClient) error) error {
- return withMasterClient(ctx, mc.currentMaster, mc.grpcDialOption, func(ctx context.Context, client master_pb.SeaweedClient) error {
+ return pb.WithMasterClient(mc.currentMaster, mc.grpcDialOption, func(client master_pb.SeaweedClient) error {
return fn(client)
})
}