diff --git a/README.md b/README.md index 835557f30..8c32a4e58 100644 --- a/README.md +++ b/README.md @@ -81,17 +81,15 @@ SeaweedFS is a simple and highly scalable distributed file system. There are two 1. to store billions of files! 2. to serve the files fast! -SeaweedFS started as an Object Store to handle small files efficiently. Instead of managing all file metadata in a central master, the central master only manages file volumes, and it lets these volume servers manage files and their metadata. This relieves concurrency pressure from the central master and spreads file metadata into volume servers, allowing faster file access (just one disk read operation). +SeaweedFS started as an Object Store to handle small files efficiently. Instead of managing all file metadata in a central master, the central master only manages file volumes, and it lets these volume servers manage files and their metadata. This relieves concurrency pressure from the central master and spreads file metadata into volume servers, allowing faster file access (O(1), usually just one disk read operation). + +SeaweedFS can transparently integrate with the cloud. With hot data on local cluster, and warm data on the cloud with O(1) access time, SeaweedFS can achieve both fast local access time and elastic cloud storage capacity, without any client side changes. There is only 40 bytes of disk storage overhead for each file's metadata. It is so simple with O(1) disk reads that you are welcome to challenge the performance with your actual use cases. SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). Also, SeaweedFS implements erasure coding with ideas from [f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf) -SeaweedFS can work very well with just the object store. [[Filer]] can then be added later to support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql/Postgres/Redis/Etcd/Cassandra/LevelDB. - -[Back to TOC](#table-of-contents) - -## Features ## +On top of the object store, optional [Filer] can support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql, Postgres, Redis, Etcd, Cassandra, LevelDB, MemSql, TiDB, TiKV, CockroachDB, etc. [Back to TOC](#table-of-contents) @@ -104,8 +102,10 @@ SeaweedFS can work very well with just the object store. [[Filer]] can then be a * Adding/Removing servers does **not** cause any data re-balancing. * Optionally fix the orientation for jpeg pictures. * Support ETag, Accept-Range, Last-Modified, etc. -* Support in-memory/leveldb/boltdb/btree mode tuning for memory/performance balance. +* Support in-memory/leveldb/readonly mode tuning for memory/performance balance. * Support rebalancing the writable and readonly volumes. +* [Transparent cloud integration][CloudTier]: unlimited capacity via tiered cloud storage for warm data. +* [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability. [Back to TOC](#table-of-contents) @@ -113,7 +113,6 @@ SeaweedFS can work very well with just the object store. [[Filer]] can then be a * [filer server][Filer] provide "normal" directories and files via http. * [mount filer][Mount] to read and write files directly as a local directory via FUSE. * [Amazon S3 compatible API][AmazonS3API] to access files with S3 tooling. -* [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability. * [Hadoop Compatible File System][Hadoop] to access files from Hadoop/Spark/Flink/etc jobs. * [Async Backup To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze. * [WebDAV] access as a mapped drive on Mac and Windows, or from mobile devices. @@ -125,6 +124,7 @@ SeaweedFS can work very well with just the object store. [[Filer]] can then be a [Hadoop]: https://github.com/chrislusf/seaweedfs/wiki/Hadoop-Compatible-File-System [WebDAV]: https://github.com/chrislusf/seaweedfs/wiki/WebDAV [ErasureCoding]: https://github.com/chrislusf/seaweedfs/wiki/Erasure-coding-for-warm-storage +[CloudTier]: https://github.com/chrislusf/seaweedfs/wiki/Cloud-Tier [Back to TOC](#table-of-contents) @@ -318,6 +318,16 @@ Each individual file size is limited to the volume size. All file meta information stored on an volume server is readable from memory without disk access. Each file takes just a 16-byte map entry of <64bit key, 32bit offset, 32bit size>. Of course, each map entry has its own space cost for the map. But usually the disk space runs out before the memory does. +### Tiered Storage to the cloud ### + +The local volume servers are much faster, while cloud storages have elastic capacity and are actually more cost-efficient if not accessed often (usually free to upload, but relatively costly to access). With the append-only structure and O(1) access time, SeaweedFS can take advantage of both local and cloud storage by offloading the warm data to the cloud. + +Usually hot data are fresh and warm data are old. SeaweedFS puts the newly created volumes on local servers, and optionally upload the older volumes on the cloud. If the older data are accessed less often, this literally gives you unlimited capacity with limited local servers, and still fast for new data. + +With the O(1) access time, the network latency cost is kept at minimum. + +If the hot~warm data is split as 20~80, with 20 servers, you can achieve storage capacity of 100 servers. That's a cost saving of 80%! Or you can repurpose the 80 servers to store new data also, and get 5X storage throughput. + [Back to TOC](#table-of-contents) ## Compared to Other File Systems ## @@ -344,7 +354,7 @@ The architectures are mostly the same. SeaweedFS aims to store and read files fa * SeaweedFS optimizes for small files, ensuring O(1) disk seek operation, and can also handle large files. * SeaweedFS statically assigns a volume id for a file. Locating file content becomes just a lookup of the volume id, which can be easily cached. -* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Cassandra, Redis, Etcd, MySql, Postgres, etc, and is easy to customized. +* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Cassandra, Redis, Etcd, MySql, Postgres, MemSql, TiDB, CockroachDB, etc, and is easy to customized. * SeaweedFS Volume server also communicates directly with clients via HTTP, supporting range queries, direct uploads, etc. | System | File Meta | File Content Read| POSIX | REST API | Optimized for small files | @@ -376,7 +386,7 @@ Ceph uses CRUSH hashing to automatically manage the data placement. SeaweedFS pl SeaweedFS is optimized for small files. Small files are stored as one continuous block of content, with at most 8 unused bytes between files. Small file access is O(1) disk read. -SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Redis, Etcd, Cassandra, to manage file directories. There are proven, scalable, and easier to manage. +SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Redis, Etcd, Cassandra, MemSql, TiDB, CockroachCB, to manage file directories. There are proven, scalable, and easier to manage. | SeaweedFS | comparable to Ceph | advantage | | ------------- | ------------- | ---------------- | @@ -513,6 +523,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +The text of this page is available for modification and reuse under the terms of the Creative Commons Attribution-Sharealike 3.0 Unported License and the GNU Free Documentation License (unversioned, with no invariant sections, front-cover texts, or back-cover texts). + [Back to TOC](#table-of-contents) ## Stargazers over time ## diff --git a/docker/Dockerfile.go_build b/docker/Dockerfile.go_build index 85cbb6143..306ce3aa1 100644 --- a/docker/Dockerfile.go_build +++ b/docker/Dockerfile.go_build @@ -1,5 +1,15 @@ -FROM golang:latest -RUN go get github.com/chrislusf/seaweedfs/weed +FROM frolvlad/alpine-glibc as builder +RUN apk add git go g++ +RUN mkdir -p /go/src/github.com/chrislusf/ +RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs +RUN cd /go/src/github.com/chrislusf/seaweedfs/weed && go install + +FROM alpine AS final +LABEL author="Chris Lu" +COPY --from=builder /root/go/bin/weed /usr/bin/ +RUN mkdir -p /etc/seaweedfs +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh # volume server gprc port EXPOSE 18080 @@ -20,10 +30,6 @@ RUN mkdir -p /data/filerldb2 VOLUME /data -RUN mkdir -p /etc/seaweedfs -RUN cp /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml -RUN cp /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh RUN chmod +x /entrypoint.sh -RUN cp /go/bin/weed /usr/bin/ ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/README.md b/docker/README.md index cfe281e71..1a2833c7e 100644 --- a/docker/README.md +++ b/docker/README.md @@ -11,11 +11,21 @@ docker-compose -f seaweedfs-compose.yml -p seaweedfs up ``` -## Development +## Try latest tip + +```bash + +wget https://raw.githubusercontent.com/chrislusf/seaweedfs/master/docker/seaweedfs-dev-compose.yml + +docker-compose -f seaweedfs-dev-compose.yml -p seaweedfs up + +``` + +## Local Development ```bash cd $GOPATH/src/github.com/chrislusf/seaweedfs/docker -docker-compose -f dev-compose.yml -p seaweedfs up +docker-compose -f local-dev-compose.yml -p seaweedfs up ``` diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index c28bd263c..791527d3a 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -3,7 +3,7 @@ case "$1" in 'master') - ARGS="-ip `hostname -i` -mdir /data" + ARGS="-mdir /data" # Is this instance linked with an other master? (Docker commandline "--link master1:master") if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then ARGS="$ARGS -peers=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT" diff --git a/docker/dev-compose.yml b/docker/local-dev-compose.yml similarity index 75% rename from docker/dev-compose.yml rename to docker/local-dev-compose.yml index 0306b3cb0..c2f588a60 100644 --- a/docker/dev-compose.yml +++ b/docker/local-dev-compose.yml @@ -8,7 +8,7 @@ services: ports: - 9333:9333 - 19333:19333 - command: "master" + command: "master -ip=master" volume: build: context: . @@ -16,7 +16,7 @@ services: ports: - 8080:8080 - 18080:18080 - command: 'volume -max=5 -mserver="master:9333" -port=8080' + command: '-v=2 volume -max=5 -mserver="master:9333" -port=8080 -ip=volume' depends_on: - master filer: @@ -26,7 +26,7 @@ services: ports: - 8888:8888 - 18888:18888 - command: 'filer -master="master:9333"' + command: '-v=4 filer -master="master:9333"' depends_on: - master - volume @@ -36,7 +36,7 @@ services: dockerfile: Dockerfile.go_build ports: - 8333:8333 - command: 's3 -filer="filer:8888"' + command: '-v=4 s3 -filer="filer:8888"' depends_on: - master - volume diff --git a/docker/seaweedfs-compose.yml b/docker/seaweedfs-compose.yml index d66b921bb..7f0cbc6f9 100644 --- a/docker/seaweedfs-compose.yml +++ b/docker/seaweedfs-compose.yml @@ -6,7 +6,7 @@ services: ports: - 9333:9333 - 19333:19333 - command: "master" + command: "master -ip=master" volume: image: chrislusf/seaweedfs # use a remote image ports: diff --git a/docker/seaweedfs-dev-compose.yml b/docker/seaweedfs-dev-compose.yml new file mode 100644 index 000000000..765770084 --- /dev/null +++ b/docker/seaweedfs-dev-compose.yml @@ -0,0 +1,35 @@ +version: '2' + +services: + master: + image: chrislusf/seaweedfs:dev # use a remote dev image + ports: + - 9333:9333 + - 19333:19333 + command: "master -ip=master" + volume: + image: chrislusf/seaweedfs:dev # use a remote dev image + ports: + - 8080:8080 + - 18080:18080 + command: '-v=2 volume -max=5 -mserver="master:9333" -port=8080 -ip=volume' + depends_on: + - master + filer: + image: chrislusf/seaweedfs:dev # use a remote dev image + ports: + - 8888:8888 + - 18888:18888 + command: '-v=4 filer -master="master:9333"' + depends_on: + - master + - volume + s3: + image: chrislusf/seaweedfs:dev # use a remote dev image + ports: + - 8333:8333 + command: '-v=4 s3 -filer="filer:8888"' + depends_on: + - master + - volume + - filer diff --git a/go.mod b/go.mod index 69e814bbc..48879fd8c 100644 --- a/go.mod +++ b/go.mod @@ -4,21 +4,10 @@ go 1.12 require ( cloud.google.com/go v0.44.3 - contrib.go.opencensus.io/exporter/aws v0.0.0-20190807220307-c50fb1bd7f21 // indirect - contrib.go.opencensus.io/exporter/ocagent v0.6.0 // indirect - contrib.go.opencensus.io/exporter/stackdriver v0.12.5 // indirect - contrib.go.opencensus.io/resource v0.1.2 // indirect - github.com/Azure/azure-amqp-common-go v1.1.4 // indirect github.com/Azure/azure-pipeline-go v0.2.2 // indirect - github.com/Azure/azure-sdk-for-go v33.0.0+incompatible // indirect github.com/Azure/azure-storage-blob-go v0.8.0 - github.com/Azure/go-autorest v13.0.0+incompatible // indirect - github.com/Azure/go-autorest/tracing v0.5.0 // indirect github.com/DataDog/zstd v1.4.1 // indirect - github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190828224159-d93c53a4824c // indirect github.com/Shopify/sarama v1.23.1 - github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect - github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 // indirect github.com/aws/aws-sdk-go v1.23.13 github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92 github.com/coreos/etcd v3.3.15+incompatible // indirect @@ -28,37 +17,34 @@ require ( github.com/disintegration/imaging v1.6.1 github.com/dustin/go-humanize v1.0.0 github.com/eapache/go-resiliency v1.2.0 // indirect - github.com/gabriel-vasile/mimetype v0.3.17 - github.com/go-kit/kit v0.9.0 // indirect + github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a + github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4 + github.com/frankban/quicktest v1.7.2 // indirect + github.com/gabriel-vasile/mimetype v1.0.0 github.com/go-redis/redis v6.15.2+incompatible github.com/go-sql-driver/mysql v1.4.1 github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6 github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 // indirect github.com/golang/protobuf v1.3.2 github.com/google/btree v1.0.0 - github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70 // indirect + github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.7.3 github.com/gorilla/websocket v1.4.1 // indirect github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect + github.com/hashicorp/golang-lru v0.5.3 // indirect github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f github.com/jcmturner/gofork v1.0.0 // indirect - github.com/juju/errors v0.0.0-20190930114154-d42613fe1ab9 // indirect github.com/karlseguin/ccache v2.0.3+incompatible github.com/karlseguin/expect v1.0.1 // indirect github.com/klauspost/cpuid v1.2.1 // indirect github.com/klauspost/crc32 v1.2.0 github.com/klauspost/reedsolomon v1.9.2 github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect - github.com/kr/pty v1.1.8 // indirect github.com/kurin/blazer v0.5.3 github.com/lib/pq v1.2.0 github.com/magiconair/properties v1.8.1 // indirect github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb // indirect - github.com/mattn/go-isatty v0.0.9 // indirect github.com/mattn/go-runewidth v0.0.4 // indirect - github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect - github.com/nats-io/gnatsd v1.4.1 // indirect - github.com/nats-io/go-nats v1.7.2 // indirect github.com/nats-io/nats-server/v2 v2.0.4 // indirect github.com/onsi/ginkgo v1.10.1 // indirect github.com/onsi/gomega v1.7.0 // indirect @@ -75,10 +61,7 @@ require ( github.com/rakyll/statik v0.1.6 github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 // indirect - github.com/rogpeppe/fastuuid v1.2.0 // indirect - github.com/rogpeppe/go-internal v1.3.1 // indirect github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd - github.com/satori/go.uuid v1.2.0 github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff github.com/sirupsen/logrus v1.4.2 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect @@ -86,26 +69,21 @@ require ( github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/viper v1.4.0 github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 // indirect - github.com/stretchr/testify v1.4.0 // indirect + github.com/stretchr/testify v1.4.0 github.com/syndtr/goleveldb v1.0.0 github.com/tidwall/gjson v1.3.2 github.com/tidwall/match v1.0.1 - github.com/twinj/uuid v1.0.0 // indirect github.com/uber-go/atomic v1.4.0 // indirect github.com/uber/jaeger-client-go v2.17.0+incompatible // indirect github.com/uber/jaeger-lib v2.0.0+incompatible // indirect - github.com/ugorji/go v1.1.7 // indirect github.com/willf/bitset v1.1.10 // indirect github.com/willf/bloom v2.0.3+incompatible github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 // indirect go.etcd.io/etcd v3.3.15+incompatible - go.mongodb.org/mongo-driver v1.1.0 // indirect gocloud.dev v0.16.0 gocloud.dev/pubsub/natspubsub v0.16.0 gocloud.dev/pubsub/rabbitpubsub v0.16.0 - golang.org/x/exp v0.0.0-20190829153037-c13cbed26979 // indirect golang.org/x/image v0.0.0-20190829233526-b3c06291d021 // indirect - golang.org/x/mobile v0.0.0-20190830201351-c6da95954960 // indirect golang.org/x/net v0.0.0-20190909003024-a7b16738d86b golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110 @@ -115,8 +93,7 @@ require ( gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect gopkg.in/karlseguin/expect.v1 v1.0.1 // indirect - honnef.co/go/tools v0.0.1-2019.2.2 // indirect - pack.ag/amqp v0.12.1 // indirect + sigs.k8s.io/yaml v1.1.0 // indirect ) replace github.com/satori/go.uuid v1.2.0 => github.com/satori/go.uuid v0.0.0-20181028125025-b2ce2384e17b diff --git a/go.sum b/go.sum index 703e3819b..d16280568 100644 --- a/go.sum +++ b/go.sum @@ -1,29 +1,17 @@ -bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= -cloud.google.com/go v0.43.0 h1:banaiRPAM8kUVYneOSkhgcDsLzEvL25FinuiSZaH/2w= -cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.3 h1:0sMegbmn/8uTwpNkB0q9cLEpZ2W5a6kl+wtBQgPWBJQ= cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= -contrib.go.opencensus.io/exporter/aws v0.0.0-20190807220307-c50fb1bd7f21/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= -contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +contrib.go.opencensus.io/exporter/ocagent v0.5.0 h1:TKXjQSRS0/cCDrP7KvkgU6SmILtF/yV2TOs/02K/WZQ= contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0= -contrib.go.opencensus.io/exporter/ocagent v0.6.0/go.mod h1:zmKjrJcdo0aYcVS7bmEeSEBLPA9YJp5bjrofdU3pIXs= -contrib.go.opencensus.io/exporter/stackdriver v0.11.0/go.mod h1:hA7rlmtavV03FGxzWXAPBUnZeZBhWN/QYQAuMtxc9Bk= contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw= -contrib.go.opencensus.io/exporter/stackdriver v0.12.5/go.mod h1:8x999/OcIPy5ivx/wDiV7Gx4D+VUPODf0mWRGRc5kSk= contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE= -contrib.go.opencensus.io/resource v0.0.0-20190131005048-21591786a5e0/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA= contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA= -contrib.go.opencensus.io/resource v0.1.2/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA= -github.com/Azure/azure-amqp-common-go v1.1.3/go.mod h1:FhZtXirFANw40UXI2ntweO+VOkfaw8s6vZxUiRhLYW8= -github.com/Azure/azure-amqp-common-go v1.1.4/go.mod h1:FhZtXirFANw40UXI2ntweO+VOkfaw8s6vZxUiRhLYW8= github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= @@ -31,26 +19,14 @@ github.com/Azure/azure-pipeline-go v0.2.1 h1:OLBdZJ3yvOn2MezlWvbrBMTEUQC72zAftRZ github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= -github.com/Azure/azure-sdk-for-go v21.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v27.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v33.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-service-bus-go v0.4.1/go.mod h1:d9ho9e/06euiTwGpKxmlbpPhFUsfCsq6a4tZ68r51qI= github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0= github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= -github.com/Azure/azure-storage-blob-go v0.7.0 h1:MuueVOYkufCxJw5YZzF842DY2MBsp+hLuh2apKY0mck= -github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= -github.com/Azure/go-autorest v11.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v11.1.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v11.1.2+incompatible h1:viZ3tV5l4gE2Sw0xrasFHytCGtzYCrT+um/rrSQ1BfA= -github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v12.0.0+incompatible h1:N+VqClcomLGD/sHb3smbSYYtNMgKpVV3Cd5r5i8z6bQ= github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v13.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -58,12 +34,8 @@ github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 h1:2T/jmrHeTezcCM58 github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190418212003-6ac0b49e7197/go.mod h1:aJ4qN3TfrelA6NZ6AXsXRfmEVaYin3EDbSPJrKS8OXo= github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190605020000-c4ba1fdf4d36/go.mod h1:aJ4qN3TfrelA6NZ6AXsXRfmEVaYin3EDbSPJrKS8OXo= -github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190828224159-d93c53a4824c/go.mod h1:mjwGPas4yKduTyubHvD1Atl9r1rUq8DfVy+gkVvZ+oo= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.23.1 h1:XxJBCZEoWJtoWjf/xRbmGUpAmTZGnuuF0ON0EvxxBrs= github.com/Shopify/sarama v1.23.1/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= @@ -71,19 +43,11 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f h1:5ZfJxyXo8KyX8DgGXC5B7ILL8y51fci/qYz2B4j8iLY= github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/aws/aws-sdk-go v1.18.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.19.16/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.19.45/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.21.4 h1:1xB+x6Dzev8ETmeHEiSfUVbIzmC/0EyFfXMkJpzKPCE= -github.com/aws/aws-sdk-go v1.21.4/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.23.13 h1:l/NG+mgQFRGG3dsFzEj0jw9JIs/zYdtU6MXhY1WIDmM= github.com/aws/aws-sdk-go v1.23.13/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -97,20 +61,20 @@ github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCS github.com/blacktear23/go-proxyprotocol v0.0.0-20180807104634-af7a81e8dd0d/go.mod h1:VKt7CNAQxpFpSDz3sXyj9hY/GbVsQCr0sB3w59nE7lU= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92 h1:lM9SFsh0EPXkyJyrTJqLZPAIJBtNFP6LNkYXu2MnSZI= github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92/go.mod h1:4jyiUCD5y548+yKW+oiHtccBiMaLCCbFBpK2t7X4eUo= -github.com/chrislusf/seaweedfs v0.0.0-20190912032620-ae53f636804e h1:PmqW1XGq0V6KnwOFa3hOSqsqa/bH66zxWzCVMOo5Yi4= -github.com/chrislusf/seaweedfs v0.0.0-20190912032620-ae53f636804e/go.mod h1:e5Pz27e2DxLCFt6GbCBP5/qJygD4TkOL5xqSFYFq+2U= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20171208011716-f6d7a1f6fbf3/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -119,6 +83,7 @@ github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgds github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -129,9 +94,9 @@ github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7 github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= +github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65 h1:hxuZop6tSoOi0sxFzoGGYdRqNrPubyaIf9KoBG9tPiE= github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -142,10 +107,7 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f h1:dDxpBYafY/GYpcl+LS4Bn3ziLPuEdGRkRjYAbSlWxSA= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/disintegration/imaging v1.6.0 h1:nVPXRUUQ36Z7MNf0O77UzgnOb1mkMMor7lmJMJXc/mA= -github.com/disintegration/imaging v1.6.0/go.mod h1:xuIt+sRxDFrHS0drzXUlCJthkJ8k7lkkUojDSR247MQ= github.com/disintegration/imaging v1.6.1 h1:JnBbK6ECIZb1NsWIikP9pd8gIlTIRx7fuDNpU9fsxOE= github.com/disintegration/imaging v1.6.1/go.mod h1:xuIt+sRxDFrHS0drzXUlCJthkJ8k7lkkUojDSR247MQ= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -160,27 +122,28 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o= github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= -github.com/envoyproxy/go-control-plane v0.8.6/go.mod h1:XB9+ce7x+IrsjgIVnRnql0O61gj/np0/bGDfhJI3sCU= -github.com/envoyproxy/protoc-gen-validate v0.0.0-20190405222122-d6164de49109/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= +github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= +github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4 h1:0YtRCqIZs2+Tz49QuH6cJVw/IFqzo39gEqZ0iYLxD2M= +github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4/go.mod h1:vsJz7uE339KUCpBXx3JAJzSRH7Uk4iGGyJzR529qDIA= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk= +github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/gabriel-vasile/mimetype v0.3.15 h1:qSK8E/VAF4pxtkxqarYRAVvYNDyCFJXKAYAyGNcESII= -github.com/gabriel-vasile/mimetype v0.3.15/go.mod h1:kMJbg3SlWZCsj4R73F1WDzbT9AyGCOVmUtIxxwO5pmI= github.com/gabriel-vasile/mimetype v0.3.17 h1:NGWgggJJqTofUcTV1E7hkk2zVjZ54EfJa1z5O3z6By4= github.com/gabriel-vasile/mimetype v0.3.17/go.mod h1:kMJbg3SlWZCsj4R73F1WDzbT9AyGCOVmUtIxxwO5pmI= +github.com/gabriel-vasile/mimetype v1.0.0 h1:0QKnAQQhG6oOsb4GK7iPlet7RtjHi9us8RF/nXoTxhI= +github.com/gabriel-vasile/mimetype v1.0.0/go.mod h1:6CDPel/o/3/s4+bp6kIbsWATq8pmgOisOPG40CJa6To= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-ini/ini v1.46.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= @@ -192,12 +155,8 @@ github.com/go-sql-driver/mysql v0.0.0-20170715192408-3955978caca4/go.mod h1:zAC/ github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gocql/gocql v0.0.0-20190717234527-2ba2dd7440dc h1:m9VsbhR3h7mWKHLh5a+Q8LvBdWEjA6dgY1arxhxvQrU= -github.com/gocql/gocql v0.0.0-20190717234527-2ba2dd7440dc/go.mod h1:Q7Sru5153KG8D9zwueuQJB3ccJf9/bIwF/x8b3oKgT8= github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6 h1:P66kRWyEoIx6URKgAC3ijx9jo9gEid7bEhLQ/Z0G65A= github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6/go.mod h1:Q7Sru5153KG8D9zwueuQJB3ccJf9/bIwF/x8b3oKgT8= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -212,6 +171,7 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -232,8 +192,11 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-replayers/grpcreplay v0.1.0 h1:eNb1y9rZFmY4ax45uEEECSa8fsxGRU+8Bil52ASAwic= github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE= +github.com/google/go-replayers/httpreplay v0.1.0 h1:AX7FUb4BjrrzNvblr/OlgwrmFiep6soj5K2QSDW7BGk= github.com/google/go-replayers/httpreplay v0.1.0/go.mod h1:YKZViNhiGgqdBlUbI2MwGpq4pXxNmhJLPHQ7cv2b5no= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -241,15 +204,11 @@ github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible h1:x github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE= github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/wire v0.2.2 h1:fSIRzE/K12IaNgV6X0173X/oLrTwHKRiMcFZhiDrN3s= -github.com/google/wire v0.2.2/go.mod h1:7FHVg6mFpFQrjeUZrm+BaD50N5jnDKm50uVPTpyYOmU= github.com/google/wire v0.3.0 h1:imGQZGEVEHpje5056+K+cgdO72p0LQv2xIIFXNGUf60= github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s= github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww= @@ -267,6 +226,7 @@ github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -280,7 +240,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.0 h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.4/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.11.0 h1:aT5ISUniaOTErogCQ+4pGoYNBB6rm6Fq3g1v8QwYGas= github.com/grpc-ecosystem/grpc-gateway v1.11.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= @@ -305,15 +265,14 @@ github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/U github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/joeslay/seaweedfs v0.0.0-20190912104409-d8c34b032fb6/go.mod h1:ljVry+CyFSNBLlKiell2UlxOKCvXXHjyBhiGDzXa+0c= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/juju/errors v0.0.0-20190930114154-d42613fe1ab9 h1:hJix6idebFclqlfZCHE7EUX7uqLCyb70nHNHH1XKGBg= -github.com/juju/errors v0.0.0-20190930114154-d42613fe1ab9/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= +github.com/juju/ratelimit v1.0.1 h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY= github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/karlseguin/ccache v2.0.3+incompatible h1:j68C9tWOROiOLWTS/kCGg9IcJG+ACqn5+0+t8Oh83UU= @@ -339,17 +298,13 @@ github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.0.0/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kurin/blazer v0.5.3 h1:SAgYv0TKU0kN/ETfO5ExjNAPyMt2FocO2s/UlCHfjAk= github.com/kurin/blazer v0.5.3/go.mod h1:4FCXMUWo9DllR2Do4TtBd377ezyAJ51vB5uTBjt0pGU= -github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/lyft/protoc-gen-validate v0.1.0/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= @@ -363,7 +318,6 @@ github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb h1:hXqqXzQtJbENrs github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= @@ -377,20 +331,20 @@ github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/montanaflynn/stats v0.0.0-20151014174947-eeaced052adb/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808 h1:pmpDGKLw4n82EtrNiLqB+xSz/JQwFOaZuMALYUHwX5s= github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/gnatsd v1.4.1 h1:RconcfDeWpKCD6QIIwiVFcvForlXpWeJP7i5/lDLy44= -github.com/nats-io/gnatsd v1.4.1/go.mod h1:nqco77VO78hLCJpIcVfygDP2rPGfsEHkGTUk94uh5DQ= -github.com/nats-io/go-nats v1.7.2 h1:cJujlwCYR8iMz5ofZSD/p2WLW8FabhkQ2lIEVbSvNSA= -github.com/nats-io/go-nats v1.7.2/go.mod h1:+t7RHT5ApZebkrQdnn6AhQJmhJJiKAvJUio1PiiCtj0= github.com/nats-io/jwt v0.2.6/go.mod h1:mQxQ0uHQ9FhEVPIcTSKwx2lqZEpXWWcCgA7R6NrWvvY= +github.com/nats-io/jwt v0.2.14 h1:wA50KvFz/JXGXMHRygTWsRGh/ixxgC5E3kHvmtGLNf4= github.com/nats-io/jwt v0.2.14/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/nats-server/v2 v2.0.0/go.mod h1:RyVdsHHvY4B6c9pWG+uRLpZ0h0XsqiuKp2XCTurP5LI= +github.com/nats-io/nats-server/v2 v2.0.4 h1:XOMeQRbhl1lGNTIctPhih6pTa15NGif54Uas6ZW5q7g= github.com/nats-io/nats-server/v2 v2.0.4/go.mod h1:AWdGEVbjKRS9ZIx4DSP5eKW48nfFm7q3uiSkP/1KD7M= github.com/nats-io/nats.go v1.8.1 h1:6lF/f1/NN6kzUDBz6pyvQDEXO39jqXcWRLu/tKjtOUQ= github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM= @@ -400,7 +354,9 @@ github.com/nats-io/nkeys v0.1.0 h1:qMd4+pRHgdr1nAClu+2h/2a5F2TmKcCzjCDazVgRoX4= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 h1:7KAv7KMGTTqSmYZtNdcNTgsos+vFzULLwyElndwn+5c= github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8n5lC/jjNEhwcjbRDpl5NT7n2h+4UNcI= +github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef h1:K0Fn+DoFqNqktdZtdV3bPQ/0cuYh2H4rkg0tytX/07k= github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef/go.mod h1:7WjlapSfwQyo6LNmIvEWzsW1hbBQfpUO4JWnuQRmva8= github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= @@ -408,17 +364,18 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= @@ -427,12 +384,11 @@ github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUr github.com/peterh/liner v1.1.0 h1:f+aAedNJA6uk7+6rXsYBnhdo4Xux7ESLe+kcuVUF5os= github.com/peterh/liner v1.1.0/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.2.7+incompatible h1:Eerk9aiqeZo2QzsbWOAsELUf9ddvAxEdMY9LYze/DEc= github.com/pierrec/lz4 v2.2.7+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= +github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9 h1:KH4f4Si9XK6/IW50HtoaiLIFHGkapOM6w83za47UYik= github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM= github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= @@ -452,11 +408,8 @@ github.com/pingcap/parser v0.0.0-20191021083151-7c64f78a5100 h1:TRyps2d+2TsJv1Vk github.com/pingcap/parser v0.0.0-20191021083151-7c64f78a5100/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= github.com/pingcap/pd v1.1.0-beta.0.20190923032047-5c648dc365e0 h1:GIEq+wZfrl2bcJxpuSrEH4H7/nlf5YdmpS+dU9lNIt8= github.com/pingcap/pd v1.1.0-beta.0.20190923032047-5c648dc365e0/go.mod h1:G/6rJpnYwM0LKMec2rI82/5Kg6GaZMvlfB+e6/tvYmI= -github.com/pingcap/pd v2.1.17+incompatible h1:mpfJYffRC14jeAfiq0jbHkqXVc8ZGNV0Lr2xG1sJslw= github.com/pingcap/tidb v1.1.0-beta.0.20191023070859-58fc7d44f73b h1:6GfcYOX9/CCxPnNOivVxiDYXbZrCHU1mRp691iw9EYs= github.com/pingcap/tidb v1.1.0-beta.0.20191023070859-58fc7d44f73b/go.mod h1:YfrHdQ613A+E2FSugyXOdJmeZQbXNjpXX2doNe8MGj8= -github.com/pingcap/tidb v2.0.11+incompatible h1:Shz+ry1DzQNsPk1QAejnM+5tgjbwZuzPnIER5aCjQ6c= -github.com/pingcap/tidb v2.0.11+incompatible/go.mod h1:I8C6jrPINP2rrVunTRd7C9fRRhQrtR43S1/CL5ix/yQ= github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible h1:MkWCxgZpJBgY2f4HtwWMMFzSBb3+JPzeJgF3VrXE/bU= github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= github.com/pingcap/tipb v0.0.0-20191015023537-709b39e7f8bb/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= @@ -466,13 +419,11 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/profile v1.3.0/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -481,7 +432,6 @@ github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQ github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= @@ -489,7 +439,6 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -497,7 +446,6 @@ github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkp github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -505,7 +453,6 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/procfs v0.0.4 h1:w8DjqFMJDjuVwdZBQoOozr4MVWOnwF7RcL/7uxBjY78= github.com/prometheus/procfs v0.0.4/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4= github.com/rakyll/statik v0.1.6 h1:uICcfUXpgqtw2VopbIncslhAmE5hwc4g20TEyEENBNs= github.com/rakyll/statik v0.1.6/go.mod h1:OEi9wJV/fMUAGx1eNjq75DKDsJVuEv1U0oYdX6GX8Zs= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= @@ -516,13 +463,8 @@ github.com/remyoudompheng/bigfft v0.0.0-20190512091148-babf20351dd7/go.mod h1:qq github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd h1:CmH9+J6ZSsIjUK3dcGsnCnO41eRBOnY12zwkn5qVwgc= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= -github.com/satori/go.uuid v0.0.0-20181028125025-b2ce2384e17b h1:8O/3dJ2dGfuLVN0bo2B0IdkG0L8cjpmFJ4r8eRQBCi8= -github.com/satori/go.uuid v0.0.0-20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff h1:uLd5zBvf5OA67wcVRePHrFt60bR4LSskaVhgVwyk0Jg= github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff/go.mod h1:cubdLmQFqEUZ9vNJrznhgc3m3VMAJi/nY2Ix2axXkG0= github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= @@ -563,10 +505,12 @@ github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/struCoder/pidusage v0.1.2/go.mod h1:pWBlW3YuSwRl6h7R5KbvA4N8oOqe9LjaKW5CwT1SPjI= github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= @@ -577,16 +521,14 @@ github.com/tidwall/gjson v1.3.2 h1:+7p3qQFaH3fOMXAJSrdZwGKcOO/lYdGS0HqGhPqDdTI= github.com/tidwall/gjson v1.3.2/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= -github.com/tidwall/pretty v0.0.0-20190325153808-1166b9ac2b65/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/twinj/uuid v1.0.0 h1:fzz7COZnDrXGTAOHGuUGYd6sG+JMq+AoE7+Jlu0przk= -github.com/twinj/uuid v1.0.0/go.mod h1:mMgcE1RHFUFqe5AfiwlINXisXfDGro23fWdPUfOMjRY= github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= +github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o= github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.17.0+incompatible h1:35tpDuT3k0oBiN/aGoSWuiFaqKgKZSciSMnWrazhSHE= @@ -597,10 +539,9 @@ github.com/uber/jaeger-lib v2.0.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6 github.com/ugorji/go v1.1.2/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/unrolled/render v0.0.0-20171102162132-65450fb6b2d3/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= +github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d h1:ggUgChAeyge4NZ4QUw6lhHsVymzwSDJOZcE0s2X8S20= github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/negroni v0.3.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= @@ -618,17 +559,12 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1: github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Ya9AIoYBpE= go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20190320044326-77d4b742cdbf/go.mod h1:KSGwdbiFchh5KIC9My2+ZVl5/3ANcwohw50dpPwa2cw= -go.etcd.io/etcd v3.3.13+incompatible h1:jCejD5EMnlGxFvcGRyEV4VGlENZc7oPQX6o0t7n3xbw= -go.etcd.io/etcd v3.3.13+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI= go.etcd.io/etcd v3.3.15+incompatible h1:0VpOVCF6EFnJptt8Jh0EWEHO4j2fepyV1fpu9xz/UoQ= go.etcd.io/etcd v3.3.15+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI= -go.mongodb.org/mongo-driver v1.0.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -642,44 +578,29 @@ go.uber.org/multierr v1.2.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -gocloud.dev v0.15.0 h1:Tl8dkOHWVZiYBYPxG2ouhpfmluoQGt3mY323DaAHaC8= -gocloud.dev v0.15.0/go.mod h1:ShXCyJaGrJu9y/7a6+DSCyBb9MFGZ1P5wwPa0Wu6w34= gocloud.dev v0.16.0 h1:hWeaQWxamGerwsU7B9xSWvUjx0p7TwG8fcHro2TzbbM= gocloud.dev v0.16.0/go.mod h1:xWGXD8t7bEhqPIuyAUFyXV9qHn+PvEY2F2GKPh7i/O0= -gocloud.dev/pubsub/natspubsub v0.15.0 h1:JarkPUp9xX9+A1v7VgZeY72bATZIQUzkyP1ANJ+bwU4= -gocloud.dev/pubsub/natspubsub v0.15.0/go.mod h1:zgjFYbmxa3Tiqlfp9BnZBULo+/lpK8vZPZ3YMG2MrkI= gocloud.dev/pubsub/natspubsub v0.16.0 h1:MoBGXULDzb1fVaZsGWO5cUCgr6yoI/DHhau8OPGaGEI= gocloud.dev/pubsub/natspubsub v0.16.0/go.mod h1:0n7pT7PkLMClBUHDrOkHfOFVr/o/6kawNMwsyAbwadI= -gocloud.dev/pubsub/rabbitpubsub v0.15.0 h1:Kl+NAY6nt1bUYZXQIbtCr/seoivwhGo7uc0L9XmOA+g= -gocloud.dev/pubsub/rabbitpubsub v0.15.0/go.mod h1:LGg5Acwcpry+GeLNaA01xm0Ij43YUis6kht2qRX2tg0= gocloud.dev/pubsub/rabbitpubsub v0.16.0 h1:Bkv2njMSl2tmT3tGbvbwpiIDAXBIpqzP9dmts+rhD4E= gocloud.dev/pubsub/rabbitpubsub v0.16.0/go.mod h1:JJVdUUIqwgaaMJg/1xHQza0g4sI/4KHHSNiGE+pn4JM= golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472 h1:Gv7RPwsi3eZ2Fgewe3CBsuOebPwO27PoXzRpJPsvSSM= -golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 h1:0hQKqeLdqlt5iIwVOBErRisrHJAN57yOiPRQItI20fU= golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067 h1:KYGJGHOQy8oSi1fDlSpcZF0+juKwk/hEMv5SiwHogR0= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20190829233526-b3c06291d021 h1:j6QOxNFMpEL1wIQX6TUdBPNfGZKmBOJS/vfSm8a7tdM= golang.org/x/image v0.0.0-20190829233526-b3c06291d021/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -688,26 +609,16 @@ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mobile v0.0.0-20190830201351-c6da95954960/go.mod h1:mJOp/i0LXPxJZ9weeIadcPqKVfS05Ai7m6/t9z1Hs/Y= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190322120337-addf6b3196f6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190420063019-afa5a82059c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= @@ -715,17 +626,10 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190909003024-a7b16738d86b h1:XfVGCX+0T4WOStkaOsJRllbsiImhB2jgVBGc9L0lPGc= golang.org/x/net v0.0.0-20190909003024-a7b16738d86b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190319182350-c85d3e98c914/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -740,7 +644,6 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -749,24 +652,17 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190508220229-2d0786266e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190830142957-1e83adbbebd0 h1:7z820YPX9pxWR59qM7BE5+fglp4D/mKqAwCvGt11b+8= -golang.org/x/sys v0.0.0-20190830142957-1e83adbbebd0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190909082730-f460065e899a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b h1:3S2h5FadpNr0zUUCVZjlKIEYF+KaX/OBplTGo89CYHI= golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -774,7 +670,6 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -787,22 +682,12 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190724185037-8aa4eac1a7c1 h1:JwHzEZwWOyWUIR+OxPKGQGUfuOp/feyTesu6DEwqvsM= -golang.org/x/tools v0.0.0-20190724185037-8aa4eac1a7c1/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190830223141-573d9926052a h1:XAHT1kdPpnU8Hk+FPi42KZFhtNFEk4vBg1U4OmIeHTU= -golang.org/x/tools v0.0.0-20190830223141-573d9926052a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110 h1:6S6bidS7O4yAwA5ORRbRIjvNQ9tGbLd5e+LRIaTeVDQ= golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373 h1:PPwnA7z1Pjf7XYaBP9GL1VAMZmcIWyFz7QCMSIIa3Bg= -golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= @@ -816,37 +701,27 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.2 h1:j8RI1yW0SkI+paT6uGwMlrMI/6zwYA6/CFil8rxOzGI= google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181004005441-af9cb2a35e7f/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610 h1:Ygq9/SRJX9+dU0WCIICM8RkWvDw03lvB77hrhJnpxfU= -google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 h1:oFSK4421fpCKRrpzIpybyBVWyht05NegY9+L/3TLAZs= google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= -google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= @@ -857,9 +732,9 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= @@ -888,18 +763,14 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -istio.io/gogo-genproto v0.0.0-20190731221249-06e20ada0df2/go.mod h1:IjvrbUlRbbw4JCpsgvgihcz9USUwEoNTL/uwMtyV5yk= -istio.io/gogo-genproto v0.0.0-20190826122855-47f00599b597/go.mod h1:uKtbae4K9k2rjjX4ToV0l6etglbc1i7gqQ94XdkshzY= -pack.ag/amqp v0.8.0/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= -pack.ag/amqp v0.11.0/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= -pack.ag/amqp v0.12.1/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4 h1:VO9oZbbkvTwqLimlQt15QNdOOBArT2dw/bvzsMZBiqQ= sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67/go.mod h1:L5q+DGLGOQFpo1snNEkLOJT2d1YTW66rWNzatr3He1k= diff --git a/other/java/client/pom.xml b/other/java/client/pom.xml index 47742ab8d..0c585a941 100644 --- a/other/java/client/pom.xml +++ b/other/java/client/pom.xml @@ -4,7 +4,7 @@ com.github.chrislusf seaweedfs-client - 1.2.3 + 1.2.4 org.sonatype.oss diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerClient.java b/other/java/client/src/main/java/seaweedfs/client/FilerClient.java index a1e3cdb89..84aa26ad9 100644 --- a/other/java/client/src/main/java/seaweedfs/client/FilerClient.java +++ b/other/java/client/src/main/java/seaweedfs/client/FilerClient.java @@ -7,6 +7,7 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; +import java.util.Iterator; import java.util.List; public class FilerClient { @@ -173,17 +174,18 @@ public class FilerClient { } public List listEntries(String path, String entryPrefix, String lastEntryName, int limit) { - List entries = filerGrpcClient.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder() + Iterator iter = filerGrpcClient.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder() .setDirectory(path) .setPrefix(entryPrefix) .setStartFromFileName(lastEntryName) .setLimit(limit) - .build()).getEntriesList(); - List fixedEntries = new ArrayList<>(entries.size()); - for (FilerProto.Entry entry : entries) { - fixedEntries.add(fixEntryAfterReading(entry)); + .build()); + List entries = new ArrayList<>(); + while (iter.hasNext()){ + FilerProto.ListEntriesResponse resp = iter.next(); + entries.add(fixEntryAfterReading(resp.getEntry())); } - return fixedEntries; + return entries; } public FilerProto.Entry lookupEntry(String directory, String entryName) { diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java index 2efa64580..b08c14467 100644 --- a/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java @@ -63,7 +63,7 @@ public class SeaweedRead { if (!chunkView.isFullChunk) { request.setHeader(HttpHeaders.ACCEPT_ENCODING, ""); request.setHeader(HttpHeaders.RANGE, - String.format("bytes=%d-%d", chunkView.offset, chunkView.offset + chunkView.size)); + String.format("bytes=%d-%d", chunkView.offset, chunkView.offset + chunkView.size - 1)); } try { diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto index 18ccca44f..6357d971f 100644 --- a/other/java/client/src/main/proto/filer.proto +++ b/other/java/client/src/main/proto/filer.proto @@ -12,7 +12,7 @@ service SeaweedFiler { rpc LookupDirectoryEntry (LookupDirectoryEntryRequest) returns (LookupDirectoryEntryResponse) { } - rpc ListEntries (ListEntriesRequest) returns (ListEntriesResponse) { + rpc ListEntries (ListEntriesRequest) returns (stream ListEntriesResponse) { } rpc CreateEntry (CreateEntryRequest) returns (CreateEntryResponse) { @@ -64,7 +64,7 @@ message ListEntriesRequest { } message ListEntriesResponse { - repeated Entry entries = 1; + Entry entry = 1; } message Entry { @@ -123,9 +123,11 @@ message FuseAttributes { message CreateEntryRequest { string directory = 1; Entry entry = 2; + bool o_excl = 3; } message CreateEntryResponse { + string error = 1; } message UpdateEntryRequest { diff --git a/other/java/hdfs2/dependency-reduced-pom.xml b/other/java/hdfs2/dependency-reduced-pom.xml index 3b964951e..d818bc878 100644 --- a/other/java/hdfs2/dependency-reduced-pom.xml +++ b/other/java/hdfs2/dependency-reduced-pom.xml @@ -127,7 +127,7 @@ - 1.2.3 + 1.2.4 2.9.2 diff --git a/other/java/hdfs2/pom.xml b/other/java/hdfs2/pom.xml index 7782ccbe2..b8c8cb891 100644 --- a/other/java/hdfs2/pom.xml +++ b/other/java/hdfs2/pom.xml @@ -5,7 +5,7 @@ 4.0.0 - 1.2.3 + 1.2.4 2.9.2 diff --git a/other/java/hdfs3/dependency-reduced-pom.xml b/other/java/hdfs3/dependency-reduced-pom.xml index 6a12b1617..ca53ffd22 100644 --- a/other/java/hdfs3/dependency-reduced-pom.xml +++ b/other/java/hdfs3/dependency-reduced-pom.xml @@ -127,7 +127,7 @@ - 1.2.3 + 1.2.4 3.1.1 diff --git a/other/java/hdfs3/pom.xml b/other/java/hdfs3/pom.xml index 2af787767..f5207213c 100644 --- a/other/java/hdfs3/pom.xml +++ b/other/java/hdfs3/pom.xml @@ -5,7 +5,7 @@ 4.0.0 - 1.2.3 + 1.2.4 3.1.1 diff --git a/unmaintained/change_superblock/change_superblock.go b/unmaintained/change_superblock/change_superblock.go index 07d9b94e4..afe651c4e 100644 --- a/unmaintained/change_superblock/change_superblock.go +++ b/unmaintained/change_superblock/change_superblock.go @@ -8,8 +8,9 @@ import ( "strconv" "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) var ( @@ -47,9 +48,10 @@ func main() { if err != nil { glog.Fatalf("Open Volume Data File [ERROR]: %v", err) } - defer datFile.Close() + datBackend := backend.NewDiskFile(datFile) + defer datBackend.Close() - superBlock, err := storage.ReadSuperBlock(datFile) + superBlock, err := super_block.ReadSuperBlock(datBackend) if err != nil { glog.Fatalf("cannot parse existing super block: %v", err) @@ -61,7 +63,7 @@ func main() { hasChange := false if *targetReplica != "" { - replica, err := storage.NewReplicaPlacementFromString(*targetReplica) + replica, err := super_block.NewReplicaPlacementFromString(*targetReplica) if err != nil { glog.Fatalf("cannot parse target replica %s: %v", *targetReplica, err) diff --git a/unmaintained/fix_dat/fix_dat.go b/unmaintained/fix_dat/fix_dat.go index a72a78eed..d6110d870 100644 --- a/unmaintained/fix_dat/fix_dat.go +++ b/unmaintained/fix_dat/fix_dat.go @@ -9,8 +9,9 @@ import ( "strconv" "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -44,11 +45,13 @@ func main() { glog.Fatalf("Read Volume Index %v", err) } defer indexFile.Close() - datFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".dat"), os.O_RDONLY, 0644) + datFileName := path.Join(*fixVolumePath, fileName+".dat") + datFile, err := os.OpenFile(datFileName, os.O_RDONLY, 0644) if err != nil { glog.Fatalf("Read Volume Data %v", err) } - defer datFile.Close() + datBackend := backend.NewDiskFile(datFile) + defer datBackend.Close() newDatFile, err := os.Create(path.Join(*fixVolumePath, fileName+".dat_fixed")) if err != nil { @@ -56,21 +59,21 @@ func main() { } defer newDatFile.Close() - superBlock, err := storage.ReadSuperBlock(datFile) + superBlock, err := super_block.ReadSuperBlock(datBackend) if err != nil { glog.Fatalf("Read Volume Data superblock %v", err) } newDatFile.Write(superBlock.Bytes()) - iterateEntries(datFile, indexFile, func(n *needle.Needle, offset int64) { + iterateEntries(datBackend, indexFile, func(n *needle.Needle, offset int64) { fmt.Printf("needle id=%v name=%s size=%d dataSize=%d\n", n.Id, string(n.Name), n.Size, n.DataSize) - _, s, _, e := n.Append(newDatFile, superBlock.Version()) + _, s, _, e := n.Append(datBackend, superBlock.Version) fmt.Printf("size %d error %v\n", s, e) }) } -func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *needle.Needle, offset int64)) { +func iterateEntries(datBackend backend.BackendStorageFile, idxFile *os.File, visitNeedle func(n *needle.Needle, offset int64)) { // start to read index file var readerOffset int64 bytes := make([]byte, 16) @@ -78,14 +81,14 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *needle.Needle readerOffset += int64(count) // start to read dat file - superBlock, err := storage.ReadSuperBlock(datFile) + superBlock, err := super_block.ReadSuperBlock(datBackend) if err != nil { fmt.Printf("cannot read dat file super block: %v", err) return } offset := int64(superBlock.BlockSize()) - version := superBlock.Version() - n, _, rest, err := needle.ReadNeedleHeader(datFile, version, offset) + version := superBlock.Version + n, _, rest, err := needle.ReadNeedleHeader(datBackend, version, offset) if err != nil { fmt.Printf("cannot read needle header: %v", err) return @@ -115,7 +118,7 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *needle.Needle fmt.Println("Recovered in f", r) } }() - if _, err = n.ReadNeedleBody(datFile, version, offset+int64(types.NeedleHeaderSize), rest); err != nil { + if _, err = n.ReadNeedleBody(datBackend, version, offset+int64(types.NeedleHeaderSize), rest); err != nil { fmt.Printf("cannot read needle body: offset %d body %d %v\n", offset, rest, err) } }() @@ -127,7 +130,7 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *needle.Needle offset += types.NeedleHeaderSize + rest //fmt.Printf("==> new entry offset %d\n", offset) - if n, _, rest, err = needle.ReadNeedleHeader(datFile, version, offset); err != nil { + if n, _, rest, err = needle.ReadNeedleHeader(datBackend, version, offset); err != nil { if err == io.EOF { return } diff --git a/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go b/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go index bbb6f6d9a..84173a663 100644 --- a/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go +++ b/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go @@ -8,7 +8,9 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) var ( @@ -23,15 +25,16 @@ func Checksum(n *needle.Needle) string { type VolumeFileScanner4SeeDat struct { version needle.Version - block storage.SuperBlock + block super_block.SuperBlock - dir string - hashes map[string]bool - dat *os.File + dir string + hashes map[string]bool + dat *os.File + datBackend backend.BackendStorageFile } -func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock storage.SuperBlock) error { - scanner.version = superBlock.Version() +func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock super_block.SuperBlock) error { + scanner.version = superBlock.Version scanner.block = superBlock return nil @@ -42,13 +45,14 @@ func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool { func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error { - if scanner.dat == nil { - newDatFile, err := os.Create(filepath.Join(*volumePath, "dat_fixed")) + if scanner.datBackend == nil { + newFileName := filepath.Join(*volumePath, "dat_fixed") + newDatFile, err := os.Create(newFileName) if err != nil { glog.Fatalf("Write New Volume Data %v", err) } - scanner.dat = newDatFile - scanner.dat.Write(scanner.block.Bytes()) + scanner.datBackend = backend.NewDiskFile(newDatFile) + scanner.datBackend.WriteAt(scanner.block.Bytes(), 0) } checksum := Checksum(n) @@ -59,7 +63,7 @@ func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset in } scanner.hashes[checksum] = true - _, s, _, e := n.Append(scanner.dat, scanner.version) + _, s, _, e := n.Append(scanner.datBackend, scanner.version) fmt.Printf("size %d error %v\n", s, e) return nil diff --git a/unmaintained/repeated_vacuum/repeated_vacuum.go b/unmaintained/repeated_vacuum/repeated_vacuum.go index 28bcabb9b..718b6faa1 100644 --- a/unmaintained/repeated_vacuum/repeated_vacuum.go +++ b/unmaintained/repeated_vacuum/repeated_vacuum.go @@ -7,10 +7,8 @@ import ( "log" "math/rand" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" - "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -23,7 +21,7 @@ func main() { flag.Parse() util.LoadConfiguration("security", false) - grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") for i := 0; i < *repeat; i++ { assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{Count: 1}) diff --git a/unmaintained/see_dat/see_dat.go b/unmaintained/see_dat/see_dat.go index 84a06c625..efc58e751 100644 --- a/unmaintained/see_dat/see_dat.go +++ b/unmaintained/see_dat/see_dat.go @@ -7,6 +7,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) var ( @@ -19,8 +20,8 @@ type VolumeFileScanner4SeeDat struct { version needle.Version } -func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock storage.SuperBlock) error { - scanner.version = superBlock.Version() +func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock super_block.SuperBlock) error { + scanner.version = superBlock.Version return nil } diff --git a/unmaintained/volume_tailer/volume_tailer.go b/unmaintained/volume_tailer/volume_tailer.go index f0ef51c09..3c2d36d22 100644 --- a/unmaintained/volume_tailer/volume_tailer.go +++ b/unmaintained/volume_tailer/volume_tailer.go @@ -25,7 +25,7 @@ func main() { flag.Parse() util2.LoadConfiguration("security", false) - grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") vid := needle.VolumeId(*volumeId) diff --git a/weed/command/backup.go b/weed/command/backup.go index 505de4ae6..eb2b5ba4a 100644 --- a/weed/command/backup.go +++ b/weed/command/backup.go @@ -5,8 +5,8 @@ import ( "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/storage" @@ -64,7 +64,7 @@ var cmdBackup = &Command{ func runBackup(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) - grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") if *s.volumeId == -1 { return false @@ -98,15 +98,15 @@ func runBackup(cmd *Command, args []string) bool { return true } } - var replication *storage.ReplicaPlacement + var replication *super_block.ReplicaPlacement if *s.replication != "" { - replication, err = storage.NewReplicaPlacementFromString(*s.replication) + replication, err = super_block.NewReplicaPlacementFromString(*s.replication) if err != nil { fmt.Printf("Error generate volume %d replication %s : %v\n", vid, *s.replication, err) return true } } else { - replication, err = storage.NewReplicaPlacementFromString(stats.Replication) + replication, err = super_block.NewReplicaPlacementFromString(stats.Replication) if err != nil { fmt.Printf("Error get volume %d replication %s : %v\n", vid, stats.Replication, err) return true @@ -119,7 +119,7 @@ func runBackup(cmd *Command, args []string) bool { } if v.SuperBlock.CompactionRevision < uint16(stats.CompactRevision) { - if err = v.Compact(0, 0); err != nil { + if err = v.Compact2(30 * 1024 * 1024 * 1024); err != nil { fmt.Printf("Compact Volume before synchronizing %v\n", err) return true } @@ -128,7 +128,7 @@ func runBackup(cmd *Command, args []string) bool { return true } v.SuperBlock.CompactionRevision = uint16(stats.CompactRevision) - v.DataFile().WriteAt(v.SuperBlock.Bytes(), 0) + v.DataBackend.WriteAt(v.SuperBlock.Bytes(), 0) } datSize, _, _ := v.FileStat() diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go index 26be1fe3a..382e7c850 100644 --- a/weed/command/benchmark.go +++ b/weed/command/benchmark.go @@ -15,7 +15,6 @@ import ( "sync" "time" - "github.com/spf13/viper" "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/glog" @@ -109,7 +108,7 @@ var ( func runBenchmark(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) - b.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + b.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH) if *b.maxCpu < 1 { diff --git a/weed/command/compact.go b/weed/command/compact.go index 4a54f5670..85313b749 100644 --- a/weed/command/compact.go +++ b/weed/command/compact.go @@ -17,6 +17,9 @@ var cmdCompact = &Command{ The compacted .dat file is stored as .cpd file. The compacted .idx file is stored as .cpx file. + For method=0, it compacts based on the .dat file, works if .idx file is corrupted. + For method=1, it compacts based on the .idx file, works if deletion happened but not written to .dat files. + `, } @@ -47,7 +50,7 @@ func runCompact(cmd *Command, args []string) bool { glog.Fatalf("Compact Volume [ERROR] %s\n", err) } } else { - if err = v.Compact2(); err != nil { + if err = v.Compact2(preallocate); err != nil { glog.Fatalf("Compact Volume [ERROR] %s\n", err) } } diff --git a/weed/command/export.go b/weed/command/export.go index d3a765e09..8d664ad3b 100644 --- a/weed/command/export.go +++ b/weed/command/export.go @@ -4,6 +4,7 @@ import ( "archive/tar" "bytes" "fmt" + "io" "os" "path" "path/filepath" @@ -12,11 +13,11 @@ import ( "text/template" "time" - "io" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/needle_map" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/storage/types" ) @@ -89,12 +90,12 @@ func printNeedle(vid needle.VolumeId, n *needle.Needle, version needle.Version, type VolumeFileScanner4Export struct { version needle.Version counter int - needleMap *storage.NeedleMap + needleMap *needle_map.MemDb vid needle.VolumeId } -func (scanner *VolumeFileScanner4Export) VisitSuperBlock(superBlock storage.SuperBlock) error { - scanner.version = superBlock.Version() +func (scanner *VolumeFileScanner4Export) VisitSuperBlock(superBlock super_block.SuperBlock) error { + scanner.version = superBlock.Version return nil } @@ -192,15 +193,10 @@ func runExport(cmd *Command, args []string) bool { fileName = *export.collection + "_" + fileName } vid := needle.VolumeId(*export.volumeId) - indexFile, err := os.OpenFile(path.Join(*export.dir, fileName+".idx"), os.O_RDONLY, 0644) - if err != nil { - glog.Fatalf("Create Volume Index [ERROR] %s\n", err) - } - defer indexFile.Close() - needleMap, err := storage.LoadBtreeNeedleMap(indexFile) - if err != nil { - glog.Fatalf("cannot load needle map from %s: %s", indexFile.Name(), err) + needleMap := needle_map.NewMemDb() + if err := needleMap.LoadFromIdx(path.Join(*export.dir, fileName+".idx")); err != nil { + glog.Fatalf("cannot load needle map from %s.idx: %s", fileName, err) } volumeFileScanner := &VolumeFileScanner4Export{ diff --git a/weed/command/filer.go b/weed/command/filer.go index b1ceb46f5..ea8392fac 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -6,14 +6,13 @@ import ( "strings" "time" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" + "google.golang.org/grpc/reflection" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc/reflection" ) var ( @@ -145,7 +144,7 @@ func (fo *FilerOptions) startFiler() { if err != nil { glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) } - grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "filer")) + grpcS := util.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.filer")) filer_pb.RegisterSeaweedFilerServer(grpcS, fs) reflection.Register(grpcS) go grpcS.Serve(grpcL) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 9995cf6aa..e5979d786 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -14,13 +14,13 @@ import ( "sync" "time" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" - "github.com/spf13/viper" - "google.golang.org/grpc" ) var ( @@ -105,7 +105,7 @@ func runCopy(cmd *Command, args []string) bool { filerGrpcPort := filerPort + 10000 filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort) - copy.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + copy.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") ctx := context.Background() @@ -331,7 +331,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy }, } - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(ctx, client, request); err != nil { return fmt.Errorf("update fh: %v", err) } return nil @@ -378,7 +378,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC uploadResult, err := operation.Upload(targetUrl, fileName+"-"+strconv.FormatInt(i+1, 10), io.NewSectionReader(f, i*chunkSize, chunkSize), - false, "application/octet-stream", nil, assignResult.Auth) + false, "", nil, assignResult.Auth) if err != nil { uploadError = fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err) return @@ -435,7 +435,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC }, } - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(ctx, client, request); err != nil { return fmt.Errorf("update fh: %v", err) } return nil @@ -466,7 +466,7 @@ func detectMimeType(f *os.File) string { func withFilerClient(ctx context.Context, filerAddress string, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(clientConn *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(ctx context.Context, clientConn *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(clientConn) return fn(client) }, filerAddress, grpcDialOption) diff --git a/weed/command/filer_replication.go b/weed/command/filer_replication.go index c6e7f5dba..737f0d24a 100644 --- a/weed/command/filer_replication.go +++ b/weed/command/filer_replication.go @@ -39,7 +39,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) util.LoadConfiguration("replication", true) util.LoadConfiguration("notification", true) - config := viper.GetViper() + config := util.GetViper() var notificationInput sub.NotificationInput @@ -47,8 +47,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { for _, input := range sub.NotificationInputs { if config.GetBool("notification." + input.GetName() + ".enabled") { - viperSub := config.Sub("notification." + input.GetName()) - if err := input.Initialize(viperSub); err != nil { + if err := input.Initialize(config, "notification."+input.GetName()+"."); err != nil { glog.Fatalf("Failed to initialize notification input for %s: %+v", input.GetName(), err) } @@ -66,10 +65,9 @@ func runFilerReplicate(cmd *Command, args []string) bool { // avoid recursive replication if config.GetBool("notification.source.filer.enabled") && config.GetBool("notification.sink.filer.enabled") { - sourceConfig, sinkConfig := config.Sub("source.filer"), config.Sub("sink.filer") - if sourceConfig.GetString("grpcAddress") == sinkConfig.GetString("grpcAddress") { - fromDir := sourceConfig.GetString("directory") - toDir := sinkConfig.GetString("directory") + if config.GetString("source.filer.grpcAddress") == config.GetString("sink.filer.grpcAddress") { + fromDir := config.GetString("source.filer.directory") + toDir := config.GetString("sink.filer.directory") if strings.HasPrefix(toDir, fromDir) { glog.Fatalf("recursive replication! source directory %s includes the sink directory %s", fromDir, toDir) } @@ -79,8 +77,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { var dataSink sink.ReplicationSink for _, sk := range sink.Sinks { if config.GetBool("sink." + sk.GetName() + ".enabled") { - viperSub := config.Sub("sink." + sk.GetName()) - if err := sk.Initialize(viperSub); err != nil { + if err := sk.Initialize(config, "sink."+sk.GetName()+"."); err != nil { glog.Fatalf("Failed to initialize sink for %s: %+v", sk.GetName(), err) } @@ -98,7 +95,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { return true } - replicator := replication.NewReplicator(config.Sub("source.filer"), dataSink) + replicator := replication.NewReplicator(config, "source.filer.", dataSink) for { key, m, err := notificationInput.ReceiveMessage() diff --git a/weed/command/fix.go b/weed/command/fix.go index 2fbbca5e6..8903595fa 100644 --- a/weed/command/fix.go +++ b/weed/command/fix.go @@ -8,6 +8,8 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/needle_map" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/storage/types" ) @@ -31,11 +33,11 @@ var ( type VolumeFileScanner4Fix struct { version needle.Version - nm *storage.NeedleMap + nm *needle_map.MemDb } -func (scanner *VolumeFileScanner4Fix) VisitSuperBlock(superBlock storage.SuperBlock) error { - scanner.version = superBlock.Version() +func (scanner *VolumeFileScanner4Fix) VisitSuperBlock(superBlock super_block.SuperBlock) error { + scanner.version = superBlock.Version return nil } @@ -46,11 +48,11 @@ func (scanner *VolumeFileScanner4Fix) ReadNeedleBody() bool { func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error { glog.V(2).Infof("key %d offset %d size %d disk_size %d gzip %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped()) if n.Size > 0 && n.Size != types.TombstoneFileSize { - pe := scanner.nm.Put(n.Id, types.ToOffset(offset), n.Size) + pe := scanner.nm.Set(n.Id, types.ToOffset(offset), n.Size) glog.V(2).Infof("saved %d with error %v", n.Size, pe) } else { glog.V(2).Infof("skipping deleted file ...") - return scanner.nm.Delete(n.Id, types.ToOffset(offset)) + return scanner.nm.Delete(n.Id) } return nil } @@ -66,23 +68,21 @@ func runFix(cmd *Command, args []string) bool { baseFileName = *fixVolumeCollection + "_" + baseFileName } indexFileName := path.Join(*fixVolumePath, baseFileName+".idx") - indexFile, err := os.OpenFile(indexFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - glog.Fatalf("Create Volume Index [ERROR] %s\n", err) - } - defer indexFile.Close() - nm := storage.NewBtreeNeedleMap(indexFile) - defer nm.Close() + nm := needle_map.NewMemDb() vid := needle.VolumeId(*fixVolumeId) scanner := &VolumeFileScanner4Fix{ nm: nm, } - err = storage.ScanVolumeFile(*fixVolumePath, *fixVolumeCollection, vid, storage.NeedleMapInMemory, scanner) - if err != nil { - glog.Fatalf("Export Volume File [ERROR] %s\n", err) + if err := storage.ScanVolumeFile(*fixVolumePath, *fixVolumeCollection, vid, storage.NeedleMapInMemory, scanner); err != nil { + glog.Fatalf("scan .dat File: %v", err) + os.Remove(indexFileName) + } + + if err := nm.SaveToIdx(indexFileName); err != nil { + glog.Fatalf("save to .idx File: %v", err) os.Remove(indexFileName) } diff --git a/weed/command/master.go b/weed/command/master.go index 3d33f4f7a..c4b11119b 100644 --- a/weed/command/master.go +++ b/weed/command/master.go @@ -8,14 +8,15 @@ import ( "strings" "github.com/chrislusf/raft/protobuf" + "github.com/gorilla/mux" + "google.golang.org/grpc/reflection" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/gorilla/mux" - "github.com/spf13/viper" - "google.golang.org/grpc/reflection" ) var ( @@ -101,6 +102,8 @@ func runMaster(cmd *Command, args []string) bool { func startMaster(masterOption MasterOptions, masterWhiteList []string) { + backend.LoadConfiguration(util.GetViper()) + myMasterAddress, peers := checkPeers(*masterOption.ip, *masterOption.port, *masterOption.peers) r := mux.NewRouter() @@ -112,7 +115,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) { glog.Fatalf("Master startup error: %v", e) } // start raftServer - raftServer := weed_server.NewRaftServer(security.LoadClientTLS(viper.Sub("grpc"), "master"), + raftServer := weed_server.NewRaftServer(security.LoadClientTLS(util.GetViper(), "grpc.master"), peers, myMasterAddress, *masterOption.metaFolder, ms.Topo, *masterOption.pulseSeconds) if raftServer == nil { glog.Fatalf("please verify %s is writable, see https://github.com/chrislusf/seaweedfs/issues/717", *masterOption.metaFolder) @@ -126,7 +129,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) { glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) } // Create your protocol servers. - grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "master")) + grpcS := util.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master")) master_pb.RegisterSeaweedServer(grpcS, ms) protobuf.RegisterRaftServer(grpcS, raftServer) reflection.Register(grpcS) diff --git a/weed/command/mount.go b/weed/command/mount.go index 71c1a4387..f09b285f7 100644 --- a/weed/command/mount.go +++ b/weed/command/mount.go @@ -10,7 +10,7 @@ type MountOptions struct { filer *string filerMountRootPath *string dir *string - dirListingLimit *int + dirListCacheLimit *int64 collection *string replication *string ttlSec *int @@ -31,7 +31,7 @@ func init() { mountOptions.filer = cmdMount.Flag.String("filer", "localhost:8888", "weed filer location") mountOptions.filerMountRootPath = cmdMount.Flag.String("filer.path", "/", "mount this remote path from filer server") mountOptions.dir = cmdMount.Flag.String("dir", ".", "mount weed filer to this directory") - mountOptions.dirListingLimit = cmdMount.Flag.Int("dirListLimit", 100000, "limit directory listing size") + mountOptions.dirListCacheLimit = cmdMount.Flag.Int64("dirListCacheLimit", 1000000, "limit cache size to speed up directory long format listing") mountOptions.collection = cmdMount.Flag.String("collection", "", "collection to create the files") mountOptions.replication = cmdMount.Flag.String("replication", "", "replication(e.g. 000, 001) to create to files. If empty, let filer decide.") mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds") @@ -64,12 +64,12 @@ var cmdMount = &Command{ func parseFilerGrpcAddress(filer string) (filerGrpcAddress string, err error) { hostnameAndPort := strings.Split(filer, ":") if len(hostnameAndPort) != 2 { - return "", fmt.Errorf("The filer should have hostname:port format: %v", hostnameAndPort) + return "", fmt.Errorf("filer should have hostname:port format: %v", hostnameAndPort) } filerPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64) if parseErr != nil { - return "", fmt.Errorf("The filer filer port parse error: %v", parseErr) + return "", fmt.Errorf("filer port parse error: %v", parseErr) } filerGrpcPort := int(filerPort) + 10000 diff --git a/weed/command/mount_darwin.go b/weed/command/mount_darwin.go index 632691e47..f0a5581e7 100644 --- a/weed/command/mount_darwin.go +++ b/weed/command/mount_darwin.go @@ -7,3 +7,7 @@ import ( func osSpecificMountOptions() []fuse.MountOption { return []fuse.MountOption{} } + +func checkMountPointAvailable(dir string) bool { + return true +} diff --git a/weed/command/mount_freebsd.go b/weed/command/mount_freebsd.go index 632691e47..f0a5581e7 100644 --- a/weed/command/mount_freebsd.go +++ b/weed/command/mount_freebsd.go @@ -7,3 +7,7 @@ import ( func osSpecificMountOptions() []fuse.MountOption { return []fuse.MountOption{} } + +func checkMountPointAvailable(dir string) bool { + return true +} diff --git a/weed/command/mount_linux.go b/weed/command/mount_linux.go index 7d94e5142..80a5f9da4 100644 --- a/weed/command/mount_linux.go +++ b/weed/command/mount_linux.go @@ -1,11 +1,157 @@ package command import ( + "bufio" + "fmt" + "io" + "os" + "strings" + "github.com/seaweedfs/fuse" ) +const ( + /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options*/ + mountinfoFormat = "%d %d %d:%d %s %s %s %s" +) + +// Info reveals information about a particular mounted filesystem. This +// struct is populated from the content in the /proc//mountinfo file. +type Info struct { + // ID is a unique identifier of the mount (may be reused after umount). + ID int + + // Parent indicates the ID of the mount parent (or of self for the top of the + // mount tree). + Parent int + + // Major indicates one half of the device ID which identifies the device class. + Major int + + // Minor indicates one half of the device ID which identifies a specific + // instance of device. + Minor int + + // Root of the mount within the filesystem. + Root string + + // Mountpoint indicates the mount point relative to the process's root. + Mountpoint string + + // Opts represents mount-specific options. + Opts string + + // Optional represents optional fields. + Optional string + + // Fstype indicates the type of filesystem, such as EXT3. + Fstype string + + // Source indicates filesystem specific information or "none". + Source string + + // VfsOpts represents per super block options. + VfsOpts string +} + +// Mounted determines if a specified mountpoint has been mounted. +// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab. +func mounted(mountPoint string) (bool, error) { + entries, err := parseMountTable() + if err != nil { + return false, err + } + + // Search the table for the mountPoint + for _, e := range entries { + if e.Mountpoint == mountPoint { + return true, nil + } + } + return false, nil +} + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable() ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]*Info, error) { + var ( + s = bufio.NewScanner(r) + out []*Info + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + var ( + p = &Info{} + text = s.Text() + optionalFields string + ) + + if _, err := fmt.Sscanf(text, mountinfoFormat, + &p.ID, &p.Parent, &p.Major, &p.Minor, + &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { + return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + if len(postSeparatorFields) < 3 { + return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + if optionalFields != "-" { + p.Optional = optionalFields + } + + p.Fstype = postSeparatorFields[0] + p.Source = postSeparatorFields[1] + p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") + out = append(out, p) + } + return out, nil +} + func osSpecificMountOptions() []fuse.MountOption { return []fuse.MountOption{ fuse.AllowNonEmptyMount(), } } + +func checkMountPointAvailable(dir string) bool { + mountPoint := dir + if mountPoint != "/" && strings.HasSuffix(mountPoint, "/") { + mountPoint = mountPoint[0 : len(mountPoint)-1] + } + + if mounted, err := mounted(mountPoint); err != nil || mounted { + return false + } + + return true +} diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go index 6ca9bfdca..891810e61 100644 --- a/weed/command/mount_std.go +++ b/weed/command/mount_std.go @@ -12,12 +12,11 @@ import ( "strings" "time" - "github.com/chrislusf/seaweedfs/weed/security" "github.com/jacobsa/daemonize" - "github.com/spf13/viper" "github.com/chrislusf/seaweedfs/weed/filesys" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" @@ -43,13 +42,13 @@ func runMount(cmd *Command, args []string) bool { *mountOptions.chunkSizeLimitMB, *mountOptions.allowOthers, *mountOptions.ttlSec, - *mountOptions.dirListingLimit, + *mountOptions.dirListCacheLimit, os.FileMode(umask), ) } func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCenter string, chunkSizeLimitMB int, - allowOthers bool, ttlSec int, dirListingLimit int, umask os.FileMode) bool { + allowOthers bool, ttlSec int, dirListCacheLimit int64, umask os.FileMode) bool { util.LoadConfiguration("security", false) @@ -88,12 +87,18 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente } } + // Ensure target mount point availability + if isValid := checkMountPointAvailable(dir); !isValid { + glog.Fatalf("Expected mount to still be active, target mount point: %s, please check!", dir) + return false + } + mountName := path.Base(dir) options := []fuse.MountOption{ fuse.VolumeName(mountName), - fuse.FSName("SeaweedFS"), - fuse.Subtype("SeaweedFS"), + fuse.FSName(filer + ":" + filerMountRootPath), + fuse.Subtype("seaweedfs"), fuse.NoAppleDouble(), fuse.NoAppleXattr(), fuse.NoBrowse(), @@ -116,9 +121,9 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente c, err := fuse.Mount(dir, options...) if err != nil { - glog.Fatal(err) + glog.V(0).Infof("mount: %v", err) daemonize.SignalOutcome(err) - return false + return true } util.OnInterrupt(func() { @@ -128,9 +133,9 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente filerGrpcAddress, err := parseFilerGrpcAddress(filer) if err != nil { - glog.Fatal(err) + glog.V(0).Infof("parseFilerGrpcAddress: %v", err) daemonize.SignalOutcome(err) - return false + return true } mountRoot := filerMountRootPath @@ -142,14 +147,14 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente err = fs.Serve(c, filesys.NewSeaweedFileSystem(&filesys.Option{ FilerGrpcAddress: filerGrpcAddress, - GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"), + GrpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.client"), FilerMountRootPath: mountRoot, Collection: collection, Replication: replication, TtlSec: int32(ttlSec), ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024, DataCenter: dataCenter, - DirListingLimit: dirListingLimit, + DirListCacheLimit: dirListCacheLimit, EntryCacheTtl: 3 * time.Second, MountUid: uid, MountGid: gid, @@ -165,8 +170,9 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente // check if the mount process has an error to report <-c.Ready if err := c.MountError; err != nil { - glog.Fatal(err) + glog.V(0).Infof("mount process: %v", err) daemonize.SignalOutcome(err) + return true } return true diff --git a/weed/command/s3.go b/weed/command/s3.go index e004bb066..10a486657 100644 --- a/weed/command/s3.go +++ b/weed/command/s3.go @@ -1,18 +1,17 @@ package command import ( + "fmt" "net/http" "time" "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" - "fmt" + "github.com/gorilla/mux" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/s3api" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/gorilla/mux" ) var ( @@ -69,7 +68,7 @@ func (s3opt *S3Options) startS3Server() bool { FilerGrpcAddress: filerGrpcAddress, DomainName: *s3opt.domainName, BucketsPath: *s3opt.filerBucketsPath, - GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"), + GrpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.client"), }) if s3ApiServer_err != nil { glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err) diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index 7a988cdcf..ab658735f 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -14,6 +14,14 @@ var cmdScaffold = &Command{ Short: "generate basic configuration files", Long: `Generate filer.toml with all possible configurations for you to customize. + The options can also be overwritten by environment variables. + For example, the filer.toml mysql password can be overwritten by environment variable + export WEED_MYSQL_PASSWORD=some_password + Environment variable rules: + * Prefix fix with "WEED_" + * Upppercase the reset of variable name. + * Replace '.' with '_' + `, } @@ -59,14 +67,18 @@ const ( # $HOME/.seaweedfs/filer.toml # /etc/seaweedfs/filer.toml -[memory] -# local in memory, mostly for testing purpose -enabled = false +#################################################### +# Customizable filer server options +#################################################### +[filer.options] +# with http DELETE, by default the filer would check whether a folder is empty. +# recursive_delete will delete all sub folders and files, similar to "rm -Rf" +recursive_delete = false -[leveldb] -# local on disk, mostly for simple single-machine setup, fairly scalable -enabled = false -dir = "." # directory to store level db files + +#################################################### +# The following are filer store options +#################################################### [leveldb2] # local on disk, mostly for simple single-machine setup, fairly scalable @@ -74,10 +86,6 @@ dir = "." # directory to store level db files enabled = true dir = "." # directory to store level db files -#################################################### -# multiple filers on shared storage, fairly scalable -#################################################### - [mysql] # or tidb # CREATE TABLE IF NOT EXISTS filemeta ( # dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', @@ -95,6 +103,7 @@ password = "" database = "" # create or use an existing database connection_max_idle = 2 connection_max_open = 100 +interpolateParams = false [postgres] # or cockroachdb # CREATE TABLE IF NOT EXISTS filemeta ( @@ -144,6 +153,10 @@ addresses = [ "localhost:30006", ] password = "" +# allows reads from slave servers or the master, but all writes still go to the master +readOnly = true +# automatically use the closest Redis server for reads +routeByLatency = true [etcd] enabled = false @@ -346,5 +359,32 @@ scripts = """ """ sleep_minutes = 17 # sleep minutes between each script execution +[master.filer] +default_filer_url = "http://localhost:8888/" + +[master.sequencer] +type = "memory" # Choose [memory|etcd] type for storing the file id sequence +# when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence +# example : http://127.0.0.1:2379,http://127.0.0.1:2389 +sequencer_etcd_urls = "http://127.0.0.1:2379" + + +# configurations for tiered cloud storage +# old volumes are transparently moved to cloud for cost efficiency +[storage.backend] + [storage.backend.s3.default] + enabled = false + aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials). + aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials). + region = "us-east-2" + bucket = "your_bucket_name" # an existing bucket + +# create this number of logical volumes if no more writable volumes +[master.volume_growth] +count_1 = 7 # create 1 x 7 = 7 actual volumes +count_2 = 6 # create 2 x 6 = 12 actual volumes +count_3 = 3 # create 3 x 3 = 9 actual volumes +count_other = 1 # create n x 1 = n actual volumes + ` ) diff --git a/weed/command/scaffold_test.go b/weed/command/scaffold_test.go new file mode 100644 index 000000000..423dacc32 --- /dev/null +++ b/weed/command/scaffold_test.go @@ -0,0 +1,44 @@ +package command + +import ( + "bytes" + "fmt" + "testing" + + "github.com/spf13/viper" +) + +func TestReadingTomlConfiguration(t *testing.T) { + + viper.SetConfigType("toml") + + // any approach to require this configuration into your program. + var tomlExample = []byte(` +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +`) + + viper.ReadConfig(bytes.NewBuffer(tomlExample)) + + fmt.Printf("database is %v\n", viper.Get("database")) + fmt.Printf("servers is %v\n", viper.GetStringMap("servers")) + + alpha := viper.Sub("servers.alpha") + + fmt.Printf("alpha ip is %v\n", alpha.GetString("ip")) +} diff --git a/weed/command/server.go b/weed/command/server.go index 87f404ed3..6aa68b6d2 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -89,6 +89,7 @@ func init() { serverOptions.v.fixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", false, "Adjust jpg orientation when uploading.") serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.") serverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int("volume.compactionMBps", 0, "limit compaction speed in mega bytes per second") + serverOptions.v.fileSizeLimitMB = cmdServer.Flag.Int("volume.fileSizeLimitMB", 256, "limit file size to avoid out of memory") serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address") s3Options.filerBucketsPath = cmdServer.Flag.String("s3.filer.dir.buckets", "/buckets", "folder on filer to store all buckets") diff --git a/weed/command/shell.go b/weed/command/shell.go index 91aa8770a..dcf70608f 100644 --- a/weed/command/shell.go +++ b/weed/command/shell.go @@ -2,14 +2,10 @@ package command import ( "fmt" - "net/url" - "strconv" - "strings" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/shell" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" ) var ( @@ -34,10 +30,10 @@ var cmdShell = &Command{ func runShell(command *Command, args []string) bool { util.LoadConfiguration("security", false) - shellOptions.GrpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") var filerPwdErr error - shellOptions.FilerHost, shellOptions.FilerPort, shellOptions.Directory, filerPwdErr = parseFilerUrl(*shellInitialFilerUrl) + shellOptions.FilerHost, shellOptions.FilerPort, shellOptions.Directory, filerPwdErr = util.ParseFilerUrl(*shellInitialFilerUrl) if filerPwdErr != nil { fmt.Printf("failed to parse url filer.url=%s : %v\n", *shellInitialFilerUrl, filerPwdErr) return false @@ -48,22 +44,3 @@ func runShell(command *Command, args []string) bool { return true } - -func parseFilerUrl(entryPath string) (filerServer string, filerPort int64, path string, err error) { - if !strings.HasPrefix(entryPath, "http://") && !strings.HasPrefix(entryPath, "https://") { - entryPath = "http://" + entryPath - } - - var u *url.URL - u, err = url.Parse(entryPath) - if err != nil { - return - } - filerServer = u.Hostname() - portString := u.Port() - if portString != "" { - filerPort, err = strconv.ParseInt(portString, 10, 32) - } - path = u.Path - return -} diff --git a/weed/command/upload.go b/weed/command/upload.go index 25e938d9b..d71046131 100644 --- a/weed/command/upload.go +++ b/weed/command/upload.go @@ -6,11 +6,9 @@ import ( "os" "path/filepath" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" - - "github.com/chrislusf/seaweedfs/weed/operation" ) var ( @@ -63,7 +61,7 @@ var cmdUpload = &Command{ func runUpload(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) - grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") if len(args) == 0 { if *upload.dir == "" { diff --git a/weed/command/volume.go b/weed/command/volume.go index 3c1aa2b50..9d665d143 100644 --- a/weed/command/volume.go +++ b/weed/command/volume.go @@ -1,6 +1,7 @@ package command import ( + "fmt" "net/http" "os" "runtime" @@ -9,15 +10,19 @@ import ( "strings" "time" - "github.com/chrislusf/seaweedfs/weed/security" "github.com/spf13/viper" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util/httpdown" + + "google.golang.org/grpc/reflection" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc/reflection" ) var ( @@ -44,6 +49,7 @@ type VolumeServerOptions struct { cpuProfile *string memProfile *string compactionMBPerSecond *int + fileSizeLimitMB *int } func init() { @@ -64,6 +70,7 @@ func init() { v.cpuProfile = cmdVolume.Flag.String("cpuprofile", "", "cpu profile output file") v.memProfile = cmdVolume.Flag.String("memprofile", "", "memory profile output file") v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit background compaction or copying speed in mega bytes per second") + v.fileSizeLimitMB = cmdVolume.Flag.Int("fileSizeLimitMB", 256, "limit file size to avoid out of memory") } var cmdVolume = &Command{ @@ -94,7 +101,7 @@ func runVolume(cmd *Command, args []string) bool { func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, volumeWhiteListOption string) { - //Set multiple folders and each folder's max volume count limit' + // Set multiple folders and each folder's max volume count limit' v.folders = strings.Split(volumeFolders, ",") maxCountStrings := strings.Split(maxVolumeCounts, ",") for _, maxString := range maxCountStrings { @@ -113,7 +120,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v } } - //security related white list configuration + // security related white list configuration if volumeWhiteListOption != "" { v.whiteList = strings.Split(volumeWhiteListOption, ",") } @@ -128,11 +135,10 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v if *v.publicUrl == "" { *v.publicUrl = *v.ip + ":" + strconv.Itoa(*v.publicPort) } - isSeperatedPublicPort := *v.publicPort != *v.port volumeMux := http.NewServeMux() publicVolumeMux := volumeMux - if isSeperatedPublicPort { + if v.isSeparatedPublicPort() { publicVolumeMux = http.NewServeMux() } @@ -156,53 +162,134 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v v.whiteList, *v.fixJpgOrientation, *v.readRedirect, *v.compactionMBPerSecond, + *v.fileSizeLimitMB, ) + // starting grpc server + grpcS := v.startGrpcService(volumeServer) + + // starting public http server + var publicHttpDown httpdown.Server + if v.isSeparatedPublicPort() { + publicHttpDown = v.startPublicHttpService(publicVolumeMux) + if nil == publicHttpDown { + glog.Fatalf("start public http service failed") + } + } + + // starting the cluster http server + clusterHttpServer := v.startClusterHttpService(volumeMux) + + stopChain := make(chan struct{}) + util.OnInterrupt(func() { + fmt.Println("volume server has be killed") + var startTime time.Time + + // firstly, stop the public http service to prevent from receiving new user request + if nil != publicHttpDown { + startTime = time.Now() + if err := publicHttpDown.Stop(); err != nil { + glog.Warningf("stop the public http server failed, %v", err) + } + delta := time.Now().Sub(startTime).Nanoseconds() / 1e6 + glog.V(0).Infof("stop public http server, elapsed %dms", delta) + } + + startTime = time.Now() + if err := clusterHttpServer.Stop(); err != nil { + glog.Warningf("stop the cluster http server failed, %v", err) + } + delta := time.Now().Sub(startTime).Nanoseconds() / 1e6 + glog.V(0).Infof("graceful stop cluster http server, elapsed [%d]", delta) + + startTime = time.Now() + grpcS.GracefulStop() + delta = time.Now().Sub(startTime).Nanoseconds() / 1e6 + glog.V(0).Infof("graceful stop gRPC, elapsed [%d]", delta) + + startTime = time.Now() + volumeServer.Shutdown() + delta = time.Now().Sub(startTime).Nanoseconds() / 1e6 + glog.V(0).Infof("stop volume server, elapsed [%d]", delta) + + pprof.StopCPUProfile() + + close(stopChain) // notify exit + }) + + select { + case <-stopChain: + } + glog.Warningf("the volume server exit.") +} + +// check whether configure the public port +func (v VolumeServerOptions) isSeparatedPublicPort() bool { + return *v.publicPort != *v.port +} + +func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerServer) *grpc.Server { + grpcPort := *v.port + 10000 + grpcL, err := util.NewListener(*v.bindIp+":"+strconv.Itoa(grpcPort), 0) + if err != nil { + glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) + } + grpcS := util.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.volume")) + volume_server_pb.RegisterVolumeServerServer(grpcS, vs) + reflection.Register(grpcS) + go func() { + if err := grpcS.Serve(grpcL); err != nil { + glog.Fatalf("start gRPC service failed, %s", err) + } + }() + return grpcS +} + +func (v VolumeServerOptions) startPublicHttpService(handler http.Handler) httpdown.Server { + publicListeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.publicPort) + glog.V(0).Infoln("Start Seaweed volume server", util.VERSION, "public at", publicListeningAddress) + publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second) + if e != nil { + glog.Fatalf("Volume server listener error:%v", e) + } + + pubHttp := httpdown.HTTP{StopTimeout: 5 * time.Minute, KillTimeout: 5 * time.Minute} + publicHttpDown := pubHttp.Serve(&http.Server{Handler: handler}, publicListener) + go func() { + if err := publicHttpDown.Wait(); err != nil { + glog.Errorf("public http down wait failed, %v", err) + } + }() + + return publicHttpDown +} + +func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpdown.Server { + var ( + certFile, keyFile string + ) + if viper.GetString("https.volume.key") != "" { + certFile = viper.GetString("https.volume.cert") + keyFile = viper.GetString("https.volume.key") + } + listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port) glog.V(0).Infof("Start Seaweed volume server %s at %s", util.VERSION, listeningAddress) listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second) if e != nil { glog.Fatalf("Volume server listener error:%v", e) } - if isSeperatedPublicPort { - publicListeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.publicPort) - glog.V(0).Infoln("Start Seaweed volume server", util.VERSION, "public at", publicListeningAddress) - publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second) - if e != nil { - glog.Fatalf("Volume server listener error:%v", e) - } - go func() { - if e := http.Serve(publicListener, publicVolumeMux); e != nil { - glog.Fatalf("Volume server fail to serve public: %v", e) - } - }() - } - util.OnInterrupt(func() { - volumeServer.Shutdown() - pprof.StopCPUProfile() - }) - - // starting grpc server - grpcPort := *v.port + 10000 - grpcL, err := util.NewListener(*v.bindIp+":"+strconv.Itoa(grpcPort), 0) - if err != nil { - glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) - } - grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "volume")) - volume_server_pb.RegisterVolumeServerServer(grpcS, volumeServer) - reflection.Register(grpcS) - go grpcS.Serve(grpcL) - - if viper.GetString("https.volume.key") != "" { - if e := http.ServeTLS(listener, volumeMux, - viper.GetString("https.volume.cert"), viper.GetString("https.volume.key")); e != nil { + httpDown := httpdown.HTTP{ + KillTimeout: 5 * time.Minute, + StopTimeout: 5 * time.Minute, + CertFile: certFile, + KeyFile: keyFile} + clusterHttpServer := httpDown.Serve(&http.Server{Handler: handler}, listener) + go func() { + if e := clusterHttpServer.Wait(); e != nil { glog.Fatalf("Volume server fail to serve: %v", e) } - } else { - if e := http.Serve(listener, volumeMux); e != nil { - glog.Fatalf("Volume server fail to serve: %v", e) - } - } - + }() + return clusterHttpServer } diff --git a/weed/command/webdav.go b/weed/command/webdav.go index 371c4a9ad..0e6f89040 100644 --- a/weed/command/webdav.go +++ b/weed/command/webdav.go @@ -11,7 +11,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" ) var ( @@ -75,7 +74,7 @@ func (wo *WebDavOption) startWebDav() bool { ws, webdavServer_err := weed_server.NewWebDavServer(&weed_server.WebDavOption{ Filer: *wo.filer, FilerGrpcAddress: filerGrpcAddress, - GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"), + GrpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.client"), Collection: *wo.collection, Uid: uid, Gid: gid, diff --git a/weed/filer2/abstract_sql/abstract_sql_store.go b/weed/filer2/abstract_sql/abstract_sql_store.go index 3e8554957..47fe507a1 100644 --- a/weed/filer2/abstract_sql/abstract_sql_store.go +++ b/weed/filer2/abstract_sql/abstract_sql_store.go @@ -7,16 +7,18 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" ) type AbstractSqlStore struct { - DB *sql.DB - SqlInsert string - SqlUpdate string - SqlFind string - SqlDelete string - SqlListExclusive string - SqlListInclusive string + DB *sql.DB + SqlInsert string + SqlUpdate string + SqlFind string + SqlDelete string + SqlDeleteFolderChildren string + SqlListExclusive string + SqlListInclusive string } type TxOrDB interface { @@ -64,7 +66,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer2.En return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, hashToLong(dir), name, dir, meta) + res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, util.HashStringToLong(dir), name, dir, meta) if err != nil { return fmt.Errorf("insert %s: %s", entry.FullPath, err) } @@ -84,7 +86,7 @@ func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.En return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, hashToLong(dir), name, dir) + res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, util.HashStringToLong(dir), name, dir) if err != nil { return fmt.Errorf("update %s: %s", entry.FullPath, err) } @@ -99,7 +101,7 @@ func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.En func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (*filer2.Entry, error) { dir, name := fullpath.DirAndName() - row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, hashToLong(dir), name, dir) + row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, util.HashStringToLong(dir), name, dir) var data []byte if err := row.Scan(&data); err != nil { return nil, filer2.ErrNotFound @@ -119,7 +121,7 @@ func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath filer2. dir, name := fullpath.DirAndName() - res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, hashToLong(dir), name, dir) + res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, util.HashStringToLong(dir), name, dir) if err != nil { return fmt.Errorf("delete %s: %s", fullpath, err) } @@ -132,6 +134,21 @@ func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath filer2. return nil } +func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) error { + + res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDeleteFolderChildren, util.HashStringToLong(string(fullpath)), fullpath) + if err != nil { + return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err) + } + + _, err = res.RowsAffected() + if err != nil { + return fmt.Errorf("deleteFolderChildren %s but no rows affected: %s", fullpath, err) + } + + return nil +} + func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { sqlText := store.SqlListExclusive @@ -139,7 +156,7 @@ func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpat sqlText = store.SqlListInclusive } - rows, err := store.getTxOrDB(ctx).QueryContext(ctx, sqlText, hashToLong(string(fullpath)), startFileName, string(fullpath), limit) + rows, err := store.getTxOrDB(ctx).QueryContext(ctx, sqlText, util.HashStringToLong(string(fullpath)), startFileName, string(fullpath), limit) if err != nil { return nil, fmt.Errorf("list %s : %v", fullpath, err) } diff --git a/weed/filer2/abstract_sql/hashing.go b/weed/filer2/abstract_sql/hashing.go deleted file mode 100644 index 5c982c537..000000000 --- a/weed/filer2/abstract_sql/hashing.go +++ /dev/null @@ -1,32 +0,0 @@ -package abstract_sql - -import ( - "crypto/md5" - "io" -) - -// returns a 64 bit big int -func hashToLong(dir string) (v int64) { - h := md5.New() - io.WriteString(h, dir) - - b := h.Sum(nil) - - v += int64(b[0]) - v <<= 8 - v += int64(b[1]) - v <<= 8 - v += int64(b[2]) - v <<= 8 - v += int64(b[3]) - v <<= 8 - v += int64(b[4]) - v <<= 8 - v += int64(b[5]) - v <<= 8 - v += int64(b[6]) - v <<= 8 - v += int64(b[7]) - - return -} diff --git a/weed/filer2/cassandra/cassandra_store.go b/weed/filer2/cassandra/cassandra_store.go index 466be5bf3..f81ef946f 100644 --- a/weed/filer2/cassandra/cassandra_store.go +++ b/weed/filer2/cassandra/cassandra_store.go @@ -22,10 +22,10 @@ func (store *CassandraStore) GetName() string { return "cassandra" } -func (store *CassandraStore) Initialize(configuration util.Configuration) (err error) { +func (store *CassandraStore) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( - configuration.GetString("keyspace"), - configuration.GetStringSlice("hosts"), + configuration.GetString(prefix+"keyspace"), + configuration.GetStringSlice(prefix+"hosts"), ) } @@ -112,6 +112,17 @@ func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath filer2.Fu return nil } +func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) error { + + if err := store.session.Query( + "DELETE FROM filemeta WHERE directory=?", + fullpath).Exec(); err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { diff --git a/weed/filer2/configuration.go b/weed/filer2/configuration.go index 7b05b53dc..a174117ea 100644 --- a/weed/filer2/configuration.go +++ b/weed/filer2/configuration.go @@ -17,8 +17,7 @@ func (f *Filer) LoadConfiguration(config *viper.Viper) { for _, store := range Stores { if config.GetBool(store.GetName() + ".enabled") { - viperSub := config.Sub(store.GetName()) - if err := store.Initialize(viperSub); err != nil { + if err := store.Initialize(config, store.GetName()+"."); err != nil { glog.Fatalf("Failed to initialize store for %s: %+v", store.GetName(), err) } diff --git a/weed/filer2/entry.go b/weed/filer2/entry.go index 3f8a19114..c901927bb 100644 --- a/weed/filer2/entry.go +++ b/weed/filer2/entry.go @@ -30,6 +30,7 @@ type Entry struct { FullPath Attr + Extended map[string][]byte // the following is for files Chunks []*filer_pb.FileChunk `json:"chunks,omitempty"` @@ -56,6 +57,7 @@ func (entry *Entry) ToProtoEntry() *filer_pb.Entry { IsDirectory: entry.IsDirectory(), Attributes: EntryAttributeToPb(entry), Chunks: entry.Chunks, + Extended: entry.Extended, } } diff --git a/weed/filer2/entry_codec.go b/weed/filer2/entry_codec.go index cf4627b74..3a2dc6134 100644 --- a/weed/filer2/entry_codec.go +++ b/weed/filer2/entry_codec.go @@ -1,18 +1,21 @@ package filer2 import ( + "bytes" + "fmt" "os" "time" - "fmt" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) func (entry *Entry) EncodeAttributesAndChunks() ([]byte, error) { message := &filer_pb.Entry{ Attributes: EntryAttributeToPb(entry), Chunks: entry.Chunks, + Extended: entry.Extended, } return proto.Marshal(message) } @@ -27,6 +30,8 @@ func (entry *Entry) DecodeAttributesAndChunks(blob []byte) error { entry.Attr = PbToEntryAttribute(message.Attributes) + entry.Extended = message.Extended + entry.Chunks = message.Chunks return nil @@ -84,6 +89,10 @@ func EqualEntry(a, b *Entry) bool { return false } + if !eq(a.Extended, b.Extended) { + return false + } + for i := 0; i < len(a.Chunks); i++ { if !proto.Equal(a.Chunks[i], b.Chunks[i]) { return false @@ -91,3 +100,17 @@ func EqualEntry(a, b *Entry) bool { } return true } + +func eq(a, b map[string][]byte) bool { + if len(a) != len(b) { + return false + } + + for k, v := range a { + if w, ok := b[k]; !ok || !bytes.Equal(v, w) { + return false + } + } + + return true +} diff --git a/weed/filer2/etcd/etcd_store.go b/weed/filer2/etcd/etcd_store.go index 1b0f928d0..0f0c01426 100644 --- a/weed/filer2/etcd/etcd_store.go +++ b/weed/filer2/etcd/etcd_store.go @@ -28,13 +28,13 @@ func (store *EtcdStore) GetName() string { return "etcd" } -func (store *EtcdStore) Initialize(configuration weed_util.Configuration) (err error) { - servers := configuration.GetString("servers") +func (store *EtcdStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + servers := configuration.GetString(prefix + "servers") if servers == "" { servers = "localhost:2379" } - timeout := configuration.GetString("timeout") + timeout := configuration.GetString(prefix + "timeout") if timeout == "" { timeout = "3s" } @@ -123,6 +123,16 @@ func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPat return nil } +func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { + directoryPrefix := genDirectoryKeyPrefix(fullpath, "") + + if _, err := store.client.Delete(ctx, string(directoryPrefix), clientv3.WithPrefix()); err != nil { + return fmt.Errorf("deleteFolderChildren %s : %v", fullpath, err) + } + + return nil +} + func (store *EtcdStore) ListDirectoryEntries( ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int, ) (entries []*filer2.Entry, err error) { diff --git a/weed/filer2/filechunks_test.go b/weed/filer2/filechunks_test.go index e75e60753..bb4a6c74d 100644 --- a/weed/filer2/filechunks_test.go +++ b/weed/filer2/filechunks_test.go @@ -331,6 +331,42 @@ func TestChunksReading(t *testing.T) { {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100}, }, }, + // case 8: edge cases + { + Chunks: []*filer_pb.FileChunk{ + {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, + {Offset: 90, Size: 200, FileId: "asdf", Mtime: 134}, + {Offset: 190, Size: 300, FileId: "fsad", Mtime: 353}, + }, + Offset: 0, + Size: 300, + Expected: []*ChunkView{ + {Offset: 0, Size: 90, FileId: "abc", LogicOffset: 0}, + {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 90}, + {Offset: 0, Size: 110, FileId: "fsad", LogicOffset: 190}, + }, + }, + // case 9: edge cases + { + Chunks: []*filer_pb.FileChunk{ + {Offset: 0, Size: 43175947, FileId: "2,111fc2cbfac1", Mtime: 1}, + {Offset: 43175936, Size: 52981771 - 43175936, FileId: "2,112a36ea7f85", Mtime: 2}, + {Offset: 52981760, Size: 72564747 - 52981760, FileId: "4,112d5f31c5e7", Mtime: 3}, + {Offset: 72564736, Size: 133255179 - 72564736, FileId: "1,113245f0cdb6", Mtime: 4}, + {Offset: 133255168, Size: 137269259 - 133255168, FileId: "3,1141a70733b5", Mtime: 5}, + {Offset: 137269248, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", Mtime: 6}, + }, + Offset: 0, + Size: 153578836, + Expected: []*ChunkView{ + {Offset: 0, Size: 43175936, FileId: "2,111fc2cbfac1", LogicOffset: 0}, + {Offset: 0, Size: 52981760 - 43175936, FileId: "2,112a36ea7f85", LogicOffset: 43175936}, + {Offset: 0, Size: 72564736 - 52981760, FileId: "4,112d5f31c5e7", LogicOffset: 52981760}, + {Offset: 0, Size: 133255168 - 72564736, FileId: "1,113245f0cdb6", LogicOffset: 72564736}, + {Offset: 0, Size: 137269248 - 133255168, FileId: "3,1141a70733b5", LogicOffset: 133255168}, + {Offset: 0, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", LogicOffset: 137269248}, + }, + }, } for i, testcase := range testcases { diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index 672295dea..a0af942e0 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -3,18 +3,21 @@ package filer2 import ( "context" "fmt" - "google.golang.org/grpc" - "math" "os" "path/filepath" "strings" "time" + "google.golang.org/grpc" + + "github.com/karlseguin/ccache" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/wdclient" - "github.com/karlseguin/ccache" ) +const PaginationSize = 1024 * 256 + var ( OS_UID = uint32(os.Getuid()) OS_GID = uint32(os.Getgid()) @@ -32,7 +35,7 @@ func NewFiler(masters []string, grpcDialOption grpc.DialOption) *Filer { f := &Filer{ directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)), MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "filer", masters), - fileIdDeletionChan: make(chan string, 4096), + fileIdDeletionChan: make(chan string, PaginationSize), GrpcDialOption: grpcDialOption, } @@ -69,7 +72,7 @@ func (f *Filer) RollbackTransaction(ctx context.Context) error { return f.store.RollbackTransaction(ctx) } -func (f *Filer) CreateEntry(ctx context.Context, entry *Entry) error { +func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) error { if string(entry.FullPath) == "/" { return nil @@ -93,7 +96,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry) error { glog.V(4).Infof("find uncached directory: %s", dirPath) dirEntry, _ = f.FindEntry(ctx, FullPath(dirPath)) } else { - glog.V(4).Infof("found cached directory: %s", dirPath) + // glog.V(4).Infof("found cached directory: %s", dirPath) } // no such existing directory @@ -117,6 +120,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry) error { mkdirErr := f.store.InsertEntry(ctx, dirEntry) if mkdirErr != nil { if _, err := f.FindEntry(ctx, FullPath(dirPath)); err == ErrNotFound { + glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr) return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr) } } else { @@ -124,6 +128,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry) error { } } else if !dirEntry.IsDirectory() { + glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath) return fmt.Errorf("%s is a file", dirPath) } @@ -138,6 +143,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry) error { } if lastDirectoryEntry == nil { + glog.Errorf("CreateEntry %s: lastDirectoryEntry is nil", entry.FullPath) return fmt.Errorf("parent folder not found: %v", entry.FullPath) } @@ -151,12 +157,17 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry) error { oldEntry, _ := f.FindEntry(ctx, entry.FullPath) + glog.V(4).Infof("CreateEntry %s: old entry: %v exclusive:%v", entry.FullPath, oldEntry, o_excl) if oldEntry == nil { if err := f.store.InsertEntry(ctx, entry); err != nil { glog.Errorf("insert entry %s: %v", entry.FullPath, err) return fmt.Errorf("insert entry %s: %v", entry.FullPath, err) } } else { + if o_excl { + glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath) + return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath) + } if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil { glog.Errorf("update entry %s: %v", entry.FullPath, err) return fmt.Errorf("update entry %s: %v", entry.FullPath, err) @@ -167,6 +178,8 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry) error { f.deleteChunksIfNotNew(oldEntry, entry) + glog.V(4).Infof("CreateEntry %s: created", entry.FullPath) + return nil } @@ -203,67 +216,6 @@ func (f *Filer) FindEntry(ctx context.Context, p FullPath) (entry *Entry, err er return f.store.FindEntry(ctx, p) } -func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p FullPath, isRecursive bool, ignoreRecursiveError, shouldDeleteChunks bool) (err error) { - entry, err := f.FindEntry(ctx, p) - if err != nil { - return err - } - - if entry.IsDirectory() { - limit := int(1) - if isRecursive { - limit = math.MaxInt32 - } - lastFileName := "" - includeLastFile := false - for limit > 0 { - entries, err := f.ListDirectoryEntries(ctx, p, lastFileName, includeLastFile, 1024) - if err != nil { - glog.Errorf("list folder %s: %v", p, err) - return fmt.Errorf("list folder %s: %v", p, err) - } - - if len(entries) == 0 { - break - } - - if isRecursive { - for _, sub := range entries { - lastFileName = sub.Name() - err = f.DeleteEntryMetaAndData(ctx, sub.FullPath, isRecursive, ignoreRecursiveError, shouldDeleteChunks) - if err != nil && !ignoreRecursiveError { - return err - } - limit-- - if limit <= 0 { - break - } - } - } - - if len(entries) < 1024 { - break - } - } - - f.cacheDelDirectory(string(p)) - - } - - if shouldDeleteChunks { - f.DeleteChunks(p, entry.Chunks) - } - - if p == "/" { - return nil - } - glog.V(3).Infof("deleting entry %v", p) - - f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks) - - return f.store.DeleteEntry(ctx, p) -} - func (f *Filer) ListDirectoryEntries(ctx context.Context, p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) { if strings.HasSuffix(string(p), "/") && len(p) > 1 { p = p[0 : len(p)-1] diff --git a/weed/filer2/filer_client_util.go b/weed/filer2/filer_client_util.go index 7e093eea2..af804b909 100644 --- a/weed/filer2/filer_client_util.go +++ b/weed/filer2/filer_client_util.go @@ -3,6 +3,8 @@ package filer2 import ( "context" "fmt" + "io" + "math" "strings" "sync" @@ -20,10 +22,10 @@ func VolumeId(fileId string) string { } type FilerClient interface { - WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error + WithFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error } -func ReadIntoBuffer(ctx context.Context, filerClient FilerClient, fullFilePath string, buff []byte, chunkViews []*ChunkView, baseOffset int64) (totalRead int64, err error) { +func ReadIntoBuffer(ctx context.Context, filerClient FilerClient, fullFilePath FullPath, buff []byte, chunkViews []*ChunkView, baseOffset int64) (totalRead int64, err error) { var vids []string for _, chunkView := range chunkViews { vids = append(vids, VolumeId(chunkView.FileId)) @@ -31,7 +33,7 @@ func ReadIntoBuffer(ctx context.Context, filerClient FilerClient, fullFilePath s vid2Locations := make(map[string]*filer_pb.Locations) - err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = filerClient.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { glog.V(4).Infof("read fh lookup volume id locations: %v", vids) resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ @@ -91,68 +93,75 @@ func ReadIntoBuffer(ctx context.Context, filerClient FilerClient, fullFilePath s return } -func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath string) (entry *filer_pb.Entry, err error) { +func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath FullPath) (entry *filer_pb.Entry, err error) { - dir, name := FullPath(fullFilePath).DirAndName() + dir, name := fullFilePath.DirAndName() - err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = filerClient.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: dir, Name: name, } - glog.V(3).Infof("read %s request: %v", fullFilePath, request) + // glog.V(3).Infof("read %s request: %v", fullFilePath, request) resp, err := client.LookupDirectoryEntry(ctx, request) if err != nil { if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) { return nil } - glog.V(3).Infof("read %s attr %v: %v", fullFilePath, request, err) + glog.V(3).Infof("read %s %v: %v", fullFilePath, resp, err) return err } - if resp.Entry != nil { - entry = resp.Entry + if resp.Entry == nil { + // glog.V(3).Infof("read %s entry: %v", fullFilePath, entry) + return nil } + entry = resp.Entry return nil }) return } -func ReadDirAllEntries(ctx context.Context, filerClient FilerClient, fullDirPath string, fn func(entry *filer_pb.Entry)) (err error) { +func ReadDirAllEntries(ctx context.Context, filerClient FilerClient, fullDirPath FullPath, prefix string, fn func(entry *filer_pb.Entry, isLast bool)) (err error) { - err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - paginationLimit := 1024 + err = filerClient.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { lastEntryName := "" + request := &filer_pb.ListEntriesRequest{ + Directory: string(fullDirPath), + Prefix: prefix, + StartFromFileName: lastEntryName, + Limit: math.MaxUint32, + } + + glog.V(3).Infof("read directory: %v", request) + stream, err := client.ListEntries(ctx, request) + if err != nil { + return fmt.Errorf("list %s: %v", fullDirPath, err) + } + + var prevEntry *filer_pb.Entry for { - - request := &filer_pb.ListEntriesRequest{ - Directory: fullDirPath, - StartFromFileName: lastEntryName, - Limit: uint32(paginationLimit), + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + if prevEntry != nil { + fn(prevEntry, true) + } + break + } else { + return recvErr + } } - - glog.V(3).Infof("read directory: %v", request) - resp, err := client.ListEntries(ctx, request) - if err != nil { - return fmt.Errorf("list %s: %v", fullDirPath, err) + if prevEntry != nil { + fn(prevEntry, false) } - - for _, entry := range resp.Entries { - fn(entry) - lastEntryName = entry.Name - } - - if len(resp.Entries) < paginationLimit { - break - } - + prevEntry = resp.Entry } return nil diff --git a/weed/filer2/filer_delete_entry.go b/weed/filer2/filer_delete_entry.go new file mode 100644 index 000000000..75a09e7ef --- /dev/null +++ b/weed/filer2/filer_delete_entry.go @@ -0,0 +1,102 @@ +package filer2 + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p FullPath, isRecursive bool, ignoreRecursiveError, shouldDeleteChunks bool) (err error) { + if p == "/" { + return nil + } + + entry, findErr := f.FindEntry(ctx, p) + if findErr != nil { + return findErr + } + + var chunks []*filer_pb.FileChunk + chunks = append(chunks, entry.Chunks...) + if entry.IsDirectory() { + // delete the folder children, not including the folder itself + var dirChunks []*filer_pb.FileChunk + dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks) + if err != nil { + return fmt.Errorf("delete directory %s: %v", p, err) + } + chunks = append(chunks, dirChunks...) + f.cacheDelDirectory(string(p)) + } + // delete the file or folder + err = f.doDeleteEntryMetaAndData(ctx, entry, shouldDeleteChunks) + if err != nil { + return fmt.Errorf("delete file %s: %v", p, err) + } + + if shouldDeleteChunks { + go f.DeleteChunks(chunks) + } + + return nil +} + +func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive bool, ignoreRecursiveError, shouldDeleteChunks bool) (chunks []*filer_pb.FileChunk, err error) { + + lastFileName := "" + includeLastFile := false + for { + entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize) + if err != nil { + glog.Errorf("list folder %s: %v", entry.FullPath, err) + return nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err) + } + if lastFileName == "" && !isRecursive && len(entries) > 0 { + // only for first iteration in the loop + return nil, fmt.Errorf("fail to delete non-empty folder: %s", entry.FullPath) + } + + for _, sub := range entries { + lastFileName = sub.Name() + var dirChunks []*filer_pb.FileChunk + if sub.IsDirectory() { + dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks) + } + if err != nil && !ignoreRecursiveError { + return nil, err + } + if shouldDeleteChunks { + chunks = append(chunks, dirChunks...) + } + } + + if len(entries) < PaginationSize { + break + } + } + + f.cacheDelDirectory(string(entry.FullPath)) + + glog.V(3).Infof("deleting directory %v", entry.FullPath) + + if storeDeletionErr := f.store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil { + return nil, fmt.Errorf("filer store delete: %v", storeDeletionErr) + } + f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks) + + return chunks, nil +} + +func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool) (err error) { + + glog.V(3).Infof("deleting entry %v", entry.FullPath) + + if storeDeletionErr := f.store.DeleteEntry(ctx, entry.FullPath); storeDeletionErr != nil { + return fmt.Errorf("filer store delete: %v", storeDeletionErr) + } + f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks) + + return nil +} diff --git a/weed/filer2/filer_deletion.go b/weed/filer2/filer_deletion.go index 25e27e504..9937685f7 100644 --- a/weed/filer2/filer_deletion.go +++ b/weed/filer2/filer_deletion.go @@ -51,9 +51,8 @@ func (f *Filer) loopProcessingDeletion() { } } -func (f *Filer) DeleteChunks(fullpath FullPath, chunks []*filer_pb.FileChunk) { +func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) { for _, chunk := range chunks { - glog.V(3).Infof("deleting %s chunk %s", fullpath, chunk.String()) f.fileIdDeletionChan <- chunk.GetFileIdString() } } @@ -70,7 +69,7 @@ func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) { return } if newEntry == nil { - f.DeleteChunks(oldEntry.FullPath, oldEntry.Chunks) + f.DeleteChunks(oldEntry.Chunks) } var toDelete []*filer_pb.FileChunk @@ -84,5 +83,5 @@ func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) { toDelete = append(toDelete, oldChunk) } } - f.DeleteChunks(oldEntry.FullPath, toDelete) + f.DeleteChunks(toDelete) } diff --git a/weed/filer2/filerstore.go b/weed/filer2/filerstore.go index 8caa44ee2..ae25534ed 100644 --- a/weed/filer2/filerstore.go +++ b/weed/filer2/filerstore.go @@ -14,12 +14,13 @@ type FilerStore interface { // GetName gets the name to locate the configuration in filer.toml file GetName() string // Initialize initializes the file store - Initialize(configuration util.Configuration) error + Initialize(configuration util.Configuration, prefix string) error InsertEntry(context.Context, *Entry) error UpdateEntry(context.Context, *Entry) (err error) // err == filer2.ErrNotFound if not found FindEntry(context.Context, FullPath) (entry *Entry, err error) DeleteEntry(context.Context, FullPath) (err error) + DeleteFolderChildren(context.Context, FullPath) (err error) ListDirectoryEntries(ctx context.Context, dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) BeginTransaction(ctx context.Context) (context.Context, error) @@ -46,8 +47,8 @@ func (fsw *FilerStoreWrapper) GetName() string { return fsw.actualStore.GetName() } -func (fsw *FilerStoreWrapper) Initialize(configuration util.Configuration) error { - return fsw.actualStore.Initialize(configuration) +func (fsw *FilerStoreWrapper) Initialize(configuration util.Configuration, prefix string) error { + return fsw.actualStore.Initialize(configuration, prefix) } func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) error { @@ -97,6 +98,16 @@ func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp FullPath) (err return fsw.actualStore.DeleteEntry(ctx, fp) } +func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp FullPath) (err error) { + stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "deleteFolderChildren").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "deleteFolderChildren").Observe(time.Since(start).Seconds()) + }() + + return fsw.actualStore.DeleteFolderChildren(ctx, fp) +} + func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) { stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "list").Inc() start := time.Now() diff --git a/weed/filer2/fullpath.go b/weed/filer2/fullpath.go index 191e51cf3..133069f93 100644 --- a/weed/filer2/fullpath.go +++ b/weed/filer2/fullpath.go @@ -3,6 +3,8 @@ package filer2 import ( "path/filepath" "strings" + + "github.com/chrislusf/seaweedfs/weed/util" ) type FullPath string @@ -34,3 +36,7 @@ func (fp FullPath) Child(name string) FullPath { } return FullPath(dir + "/" + name) } + +func (fp FullPath) AsInode() uint64 { + return uint64(util.HashStringToLong(string(fp))) +} diff --git a/weed/filer2/leveldb/leveldb_store.go b/weed/filer2/leveldb/leveldb_store.go index d00eba859..44e6ac0eb 100644 --- a/weed/filer2/leveldb/leveldb_store.go +++ b/weed/filer2/leveldb/leveldb_store.go @@ -5,12 +5,13 @@ import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - weed_util "github.com/chrislusf/seaweedfs/weed/util" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/opt" leveldb_util "github.com/syndtr/goleveldb/leveldb/util" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + weed_util "github.com/chrislusf/seaweedfs/weed/util" ) const ( @@ -29,8 +30,8 @@ func (store *LevelDBStore) GetName() string { return "leveldb" } -func (store *LevelDBStore) Initialize(configuration weed_util.Configuration) (err error) { - dir := configuration.GetString("dir") +func (store *LevelDBStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + dir := configuration.GetString(prefix + "dir") return store.initialize(dir) } @@ -123,6 +124,34 @@ func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath filer2.Full return nil } +func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { + + batch := new(leveldb.Batch) + + directoryPrefix := genDirectoryKeyPrefix(fullpath, "") + iter := store.db.NewIterator(&leveldb_util.Range{Start: directoryPrefix}, nil) + for iter.Next() { + key := iter.Key() + if !bytes.HasPrefix(key, directoryPrefix) { + break + } + fileName := getNameFromKey(key) + if fileName == "" { + continue + } + batch.Delete([]byte(genKey(string(fullpath), fileName))) + } + iter.Release() + + err = store.db.Write(batch, nil) + + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { diff --git a/weed/filer2/leveldb/leveldb_store_test.go b/weed/filer2/leveldb/leveldb_store_test.go index 904de8c97..983e1cbe9 100644 --- a/weed/filer2/leveldb/leveldb_store_test.go +++ b/weed/filer2/leveldb/leveldb_store_test.go @@ -30,7 +30,7 @@ func TestCreateAndFind(t *testing.T) { }, } - if err := filer.CreateEntry(ctx, entry1); err != nil { + if err := filer.CreateEntry(ctx, entry1, false); err != nil { t.Errorf("create entry %v: %v", entry1.FullPath, err) return } diff --git a/weed/filer2/leveldb2/leveldb2_store.go b/weed/filer2/leveldb2/leveldb2_store.go index 4b47d2eb3..358d4d92a 100644 --- a/weed/filer2/leveldb2/leveldb2_store.go +++ b/weed/filer2/leveldb2/leveldb2_store.go @@ -8,12 +8,13 @@ import ( "io" "os" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - weed_util "github.com/chrislusf/seaweedfs/weed/util" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/opt" leveldb_util "github.com/syndtr/goleveldb/leveldb/util" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + weed_util "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -29,8 +30,8 @@ func (store *LevelDB2Store) GetName() string { return "leveldb2" } -func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration) (err error) { - dir := configuration.GetString("dir") +func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + dir := configuration.GetString(prefix + "dir") return store.initialize(dir, 8) } @@ -134,6 +135,34 @@ func (store *LevelDB2Store) DeleteEntry(ctx context.Context, fullpath filer2.Ful return nil } +func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { + directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount) + + batch := new(leveldb.Batch) + + iter := store.dbs[partitionId].NewIterator(&leveldb_util.Range{Start: directoryPrefix}, nil) + for iter.Next() { + key := iter.Key() + if !bytes.HasPrefix(key, directoryPrefix) { + break + } + fileName := getNameFromKey(key) + if fileName == "" { + continue + } + batch.Delete(append(directoryPrefix, []byte(fileName)...)) + } + iter.Release() + + err = store.dbs[partitionId].Write(batch, nil) + + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { diff --git a/weed/filer2/leveldb2/leveldb2_store_test.go b/weed/filer2/leveldb2/leveldb2_store_test.go index e28ef7dac..58637b7b6 100644 --- a/weed/filer2/leveldb2/leveldb2_store_test.go +++ b/weed/filer2/leveldb2/leveldb2_store_test.go @@ -30,7 +30,7 @@ func TestCreateAndFind(t *testing.T) { }, } - if err := filer.CreateEntry(ctx, entry1); err != nil { + if err := filer.CreateEntry(ctx, entry1, false); err != nil { t.Errorf("create entry %v: %v", entry1.FullPath, err) return } diff --git a/weed/filer2/memdb/memdb_store.go b/weed/filer2/memdb/memdb_store.go deleted file mode 100644 index 9c10a5472..000000000 --- a/weed/filer2/memdb/memdb_store.go +++ /dev/null @@ -1,132 +0,0 @@ -package memdb - -import ( - "context" - "fmt" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/util" - "github.com/google/btree" - "strings" - "sync" -) - -func init() { - filer2.Stores = append(filer2.Stores, &MemDbStore{}) -} - -type MemDbStore struct { - tree *btree.BTree - treeLock sync.Mutex -} - -type entryItem struct { - *filer2.Entry -} - -func (a entryItem) Less(b btree.Item) bool { - return strings.Compare(string(a.FullPath), string(b.(entryItem).FullPath)) < 0 -} - -func (store *MemDbStore) GetName() string { - return "memory" -} - -func (store *MemDbStore) Initialize(configuration util.Configuration) (err error) { - store.tree = btree.New(8) - return nil -} - -func (store *MemDbStore) BeginTransaction(ctx context.Context) (context.Context, error) { - return ctx, nil -} -func (store *MemDbStore) CommitTransaction(ctx context.Context) error { - return nil -} -func (store *MemDbStore) RollbackTransaction(ctx context.Context) error { - return nil -} - -func (store *MemDbStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { - // println("inserting", entry.FullPath) - store.treeLock.Lock() - store.tree.ReplaceOrInsert(entryItem{entry}) - store.treeLock.Unlock() - return nil -} - -func (store *MemDbStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { - if _, err = store.FindEntry(ctx, entry.FullPath); err != nil { - return fmt.Errorf("no such file %s : %v", entry.FullPath, err) - } - store.treeLock.Lock() - store.tree.ReplaceOrInsert(entryItem{entry}) - store.treeLock.Unlock() - return nil -} - -func (store *MemDbStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { - item := store.tree.Get(entryItem{&filer2.Entry{FullPath: fullpath}}) - if item == nil { - return nil, filer2.ErrNotFound - } - entry = item.(entryItem).Entry - return entry, nil -} - -func (store *MemDbStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { - store.treeLock.Lock() - store.tree.Delete(entryItem{&filer2.Entry{FullPath: fullpath}}) - store.treeLock.Unlock() - return nil -} - -func (store *MemDbStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { - - startFrom := string(fullpath) - if startFileName != "" { - startFrom = startFrom + "/" + startFileName - } - - store.tree.AscendGreaterOrEqual(entryItem{&filer2.Entry{FullPath: filer2.FullPath(startFrom)}}, - func(item btree.Item) bool { - if limit <= 0 { - return false - } - entry := item.(entryItem).Entry - // println("checking", entry.FullPath) - - if entry.FullPath == fullpath { - // skipping the current directory - // println("skipping the folder", entry.FullPath) - return true - } - - dir, name := entry.FullPath.DirAndName() - if name == startFileName { - if inclusive { - limit-- - entries = append(entries, entry) - } - return true - } - - // only iterate the same prefix - if !strings.HasPrefix(string(entry.FullPath), string(fullpath)) { - // println("breaking from", entry.FullPath) - return false - } - - if dir != string(fullpath) { - // this could be items in deeper directories - // println("skipping deeper folder", entry.FullPath) - return true - } - // now process the directory items - // println("adding entry", entry.FullPath) - limit-- - entries = append(entries, entry) - return true - }, - ) - return entries, nil -} diff --git a/weed/filer2/memdb/memdb_store_test.go b/weed/filer2/memdb/memdb_store_test.go deleted file mode 100644 index 3fd806aeb..000000000 --- a/weed/filer2/memdb/memdb_store_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package memdb - -import ( - "context" - "github.com/chrislusf/seaweedfs/weed/filer2" - "testing" -) - -func TestCreateAndFind(t *testing.T) { - filer := filer2.NewFiler(nil, nil) - store := &MemDbStore{} - store.Initialize(nil) - filer.SetStore(store) - filer.DisableDirectoryCache() - - ctx := context.Background() - - fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg") - - entry1 := &filer2.Entry{ - FullPath: fullpath, - Attr: filer2.Attr{ - Mode: 0440, - Uid: 1234, - Gid: 5678, - }, - } - - if err := filer.CreateEntry(ctx, entry1); err != nil { - t.Errorf("create entry %v: %v", entry1.FullPath, err) - return - } - - entry, err := filer.FindEntry(ctx, fullpath) - - if err != nil { - t.Errorf("find entry: %v", err) - return - } - - if entry.FullPath != entry1.FullPath { - t.Errorf("find wrong entry: %v", entry.FullPath) - return - } - -} - -func TestCreateFileAndList(t *testing.T) { - filer := filer2.NewFiler(nil, nil) - store := &MemDbStore{} - store.Initialize(nil) - filer.SetStore(store) - filer.DisableDirectoryCache() - - ctx := context.Background() - - entry1 := &filer2.Entry{ - FullPath: filer2.FullPath("/home/chris/this/is/one/file1.jpg"), - Attr: filer2.Attr{ - Mode: 0440, - Uid: 1234, - Gid: 5678, - }, - } - - entry2 := &filer2.Entry{ - FullPath: filer2.FullPath("/home/chris/this/is/one/file2.jpg"), - Attr: filer2.Attr{ - Mode: 0440, - Uid: 1234, - Gid: 5678, - }, - } - - filer.CreateEntry(ctx, entry1) - filer.CreateEntry(ctx, entry2) - - // checking the 2 files - entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one/"), "", false, 100) - - if err != nil { - t.Errorf("list entries: %v", err) - return - } - - if len(entries) != 2 { - t.Errorf("list entries count: %v", len(entries)) - return - } - - if entries[0].FullPath != entry1.FullPath { - t.Errorf("find wrong entry 1: %v", entries[0].FullPath) - return - } - - if entries[1].FullPath != entry2.FullPath { - t.Errorf("find wrong entry 2: %v", entries[1].FullPath) - return - } - - // checking the offset - entries, err = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one/"), "file1.jpg", false, 100) - if len(entries) != 1 { - t.Errorf("list entries count: %v", len(entries)) - return - } - - // checking one upper directory - entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is"), "", false, 100) - if len(entries) != 1 { - t.Errorf("list entries count: %v", len(entries)) - return - } - - // checking root directory - entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) - if len(entries) != 1 { - t.Errorf("list entries count: %v", len(entries)) - return - } - - // add file3 - file3Path := filer2.FullPath("/home/chris/this/is/file3.jpg") - entry3 := &filer2.Entry{ - FullPath: file3Path, - Attr: filer2.Attr{ - Mode: 0440, - Uid: 1234, - Gid: 5678, - }, - } - filer.CreateEntry(ctx, entry3) - - // checking one upper directory - entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is"), "", false, 100) - if len(entries) != 2 { - t.Errorf("list entries count: %v", len(entries)) - return - } - - // delete file and count - filer.DeleteEntryMetaAndData(ctx, file3Path, false, false, false) - entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is"), "", false, 100) - if len(entries) != 1 { - t.Errorf("list entries count: %v", len(entries)) - return - } - -} diff --git a/weed/filer2/mysql/mysql_store.go b/weed/filer2/mysql/mysql_store.go index e18299bd2..63d99cd9d 100644 --- a/weed/filer2/mysql/mysql_store.go +++ b/weed/filer2/mysql/mysql_store.go @@ -26,28 +26,35 @@ func (store *MysqlStore) GetName() string { return "mysql" } -func (store *MysqlStore) Initialize(configuration util.Configuration) (err error) { +func (store *MysqlStore) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( - configuration.GetString("username"), - configuration.GetString("password"), - configuration.GetString("hostname"), - configuration.GetInt("port"), - configuration.GetString("database"), - configuration.GetInt("connection_max_idle"), - configuration.GetInt("connection_max_open"), + configuration.GetString(prefix+"username"), + configuration.GetString(prefix+"password"), + configuration.GetString(prefix+"hostname"), + configuration.GetInt(prefix+"port"), + configuration.GetString(prefix+"database"), + configuration.GetInt(prefix+"connection_max_idle"), + configuration.GetInt(prefix+"connection_max_open"), + configuration.GetBool(prefix+"interpolateParams"), ) } -func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen int) (err error) { +func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen int, + interpolateParams bool) (err error) { store.SqlInsert = "INSERT INTO filemeta (dirhash,name,directory,meta) VALUES(?,?,?,?)" store.SqlUpdate = "UPDATE filemeta SET meta=? WHERE dirhash=? AND name=? AND directory=?" store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=? AND name=? AND directory=?" store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=? AND name=? AND directory=?" + store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=? AND directory=?" store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>? AND directory=? ORDER BY NAME ASC LIMIT ?" store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>=? AND directory=? ORDER BY NAME ASC LIMIT ?" sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database) + if interpolateParams { + sqlUrl += "&interpolateParams=true" + } + var dbErr error store.DB, dbErr = sql.Open("mysql", sqlUrl) if dbErr != nil { diff --git a/weed/filer2/postgres/postgres_store.go b/weed/filer2/postgres/postgres_store.go index ffd3d1e01..27a0c2513 100644 --- a/weed/filer2/postgres/postgres_store.go +++ b/weed/filer2/postgres/postgres_store.go @@ -26,16 +26,16 @@ func (store *PostgresStore) GetName() string { return "postgres" } -func (store *PostgresStore) Initialize(configuration util.Configuration) (err error) { +func (store *PostgresStore) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( - configuration.GetString("username"), - configuration.GetString("password"), - configuration.GetString("hostname"), - configuration.GetInt("port"), - configuration.GetString("database"), - configuration.GetString("sslmode"), - configuration.GetInt("connection_max_idle"), - configuration.GetInt("connection_max_open"), + configuration.GetString(prefix+"username"), + configuration.GetString(prefix+"password"), + configuration.GetString(prefix+"hostname"), + configuration.GetInt(prefix+"port"), + configuration.GetString(prefix+"database"), + configuration.GetString(prefix+"sslmode"), + configuration.GetInt(prefix+"connection_max_idle"), + configuration.GetInt(prefix+"connection_max_open"), ) } @@ -45,6 +45,7 @@ func (store *PostgresStore) initialize(user, password, hostname string, port int store.SqlUpdate = "UPDATE filemeta SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4" store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3" store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3" + store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=$1 AND directory=$2" store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4" store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>=$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4" diff --git a/weed/filer2/redis/redis_cluster_store.go b/weed/filer2/redis/redis_cluster_store.go index 11c315391..eaaecb740 100644 --- a/weed/filer2/redis/redis_cluster_store.go +++ b/weed/filer2/redis/redis_cluster_store.go @@ -18,17 +18,25 @@ func (store *RedisClusterStore) GetName() string { return "redis_cluster" } -func (store *RedisClusterStore) Initialize(configuration util.Configuration) (err error) { +func (store *RedisClusterStore) Initialize(configuration util.Configuration, prefix string) (err error) { + + configuration.SetDefault(prefix+"useReadOnly", true) + configuration.SetDefault(prefix+"routeByLatency", true) + return store.initialize( - configuration.GetStringSlice("addresses"), - configuration.GetString("password"), + configuration.GetStringSlice(prefix+"addresses"), + configuration.GetString(prefix+"password"), + configuration.GetBool(prefix+"useReadOnly"), + configuration.GetBool(prefix+"routeByLatency"), ) } -func (store *RedisClusterStore) initialize(addresses []string, password string) (err error) { +func (store *RedisClusterStore) initialize(addresses []string, password string, readOnly, routeByLatency bool) (err error) { store.Client = redis.NewClusterClient(&redis.ClusterOptions{ - Addrs: addresses, - Password: password, + Addrs: addresses, + Password: password, + ReadOnly: readOnly, + RouteByLatency: routeByLatency, }) return } diff --git a/weed/filer2/redis/redis_store.go b/weed/filer2/redis/redis_store.go index c56fa014c..9debdb070 100644 --- a/weed/filer2/redis/redis_store.go +++ b/weed/filer2/redis/redis_store.go @@ -18,11 +18,11 @@ func (store *RedisStore) GetName() string { return "redis" } -func (store *RedisStore) Initialize(configuration util.Configuration) (err error) { +func (store *RedisStore) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( - configuration.GetString("address"), - configuration.GetString("password"), - configuration.GetInt("database"), + configuration.GetString(prefix+"address"), + configuration.GetString(prefix+"password"), + configuration.GetInt(prefix+"database"), ) } diff --git a/weed/filer2/redis/universal_redis_store.go b/weed/filer2/redis/universal_redis_store.go index ce41d4d70..62257e91e 100644 --- a/weed/filer2/redis/universal_redis_store.go +++ b/weed/filer2/redis/universal_redis_store.go @@ -99,6 +99,24 @@ func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath file return nil } +func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { + + members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result() + if err != nil { + return fmt.Errorf("delete folder %s : %v", fullpath, err) + } + + for _, fileName := range members { + path := filer2.NewFullPath(string(fullpath), fileName) + _, err = store.Client.Del(string(path)).Result() + if err != nil { + return fmt.Errorf("delete %s in parent dir: %v", fullpath, err) + } + } + + return nil +} + func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { diff --git a/weed/filer2/tikv/tikv_store.go b/weed/filer2/tikv/tikv_store.go index 8143130ab..24e05e3ad 100644 --- a/weed/filer2/tikv/tikv_store.go +++ b/weed/filer2/tikv/tikv_store.go @@ -1,3 +1,6 @@ +// +build !386 +// +build !arm + package tikv import ( @@ -27,8 +30,8 @@ func (store *TikvStore) GetName() string { return "tikv" } -func (store *TikvStore) Initialize(configuration weed_util.Configuration) (err error) { - pdAddr := configuration.GetString("pdAddress") +func (store *TikvStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + pdAddr := configuration.GetString(prefix + "pdAddress") return store.initialize(pdAddr) } @@ -138,6 +141,38 @@ func (store *TikvStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPat return nil } +func (store *TikvStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { + + directoryPrefix := genDirectoryKeyPrefix(fullpath, "") + + tx := store.getTx(ctx) + + iter, err := tx.Iter(directoryPrefix, nil) + if err != nil { + return fmt.Errorf("deleteFolderChildren %s: %v", fullpath, err) + } + defer iter.Close() + for iter.Valid() { + key := iter.Key() + if !bytes.HasPrefix(key, directoryPrefix) { + break + } + fileName := getNameFromKey(key) + if fileName == "" { + iter.Next() + continue + } + + if err = tx.Delete(genKey(string(fullpath), fileName)); err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + iter.Next() + } + + return nil +} + func (store *TikvStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { diff --git a/weed/filer2/tikv/tikv_store_unsupported.go b/weed/filer2/tikv/tikv_store_unsupported.go new file mode 100644 index 000000000..daf29612e --- /dev/null +++ b/weed/filer2/tikv/tikv_store_unsupported.go @@ -0,0 +1,65 @@ +// +build 386 arm + +package tikv + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer2" + weed_util "github.com/chrislusf/seaweedfs/weed/util" +) + +func init() { + filer2.Stores = append(filer2.Stores, &TikvStore{}) +} + +type TikvStore struct { +} + +func (store *TikvStore) GetName() string { + return "tikv" +} + +func (store *TikvStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + return fmt.Errorf("not implemented for 32 bit computers") +} + +func (store *TikvStore) initialize(pdAddr string) (err error) { + return fmt.Errorf("not implemented for 32 bit computers") +} + +func (store *TikvStore) BeginTransaction(ctx context.Context) (context.Context, error) { + return nil, fmt.Errorf("not implemented for 32 bit computers") +} +func (store *TikvStore) CommitTransaction(ctx context.Context) error { + return fmt.Errorf("not implemented for 32 bit computers") +} +func (store *TikvStore) RollbackTransaction(ctx context.Context) error { + return fmt.Errorf("not implemented for 32 bit computers") +} + +func (store *TikvStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { + return fmt.Errorf("not implemented for 32 bit computers") +} + +func (store *TikvStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { + return fmt.Errorf("not implemented for 32 bit computers") +} + +func (store *TikvStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { + return nil, fmt.Errorf("not implemented for 32 bit computers") +} + +func (store *TikvStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { + return fmt.Errorf("not implemented for 32 bit computers") +} + +func (store *TikvStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { + return fmt.Errorf("not implemented for 32 bit computers") +} + +func (store *TikvStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, + limit int) (entries []*filer2.Entry, err error) { + return nil, fmt.Errorf("not implemented for 32 bit computers") +} diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 79cf45385..abe5a21a6 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -3,7 +3,7 @@ package filesys import ( "context" "os" - "path" + "strings" "time" "github.com/chrislusf/seaweedfs/weed/filer2" @@ -14,9 +14,9 @@ import ( ) type Dir struct { - Path string - wfs *WFS - attributes *filer_pb.FuseAttributes + Path string + wfs *WFS + entry *filer_pb.Entry } var _ = fs.Node(&Dir{}) @@ -27,50 +27,56 @@ var _ = fs.HandleReadDirAller(&Dir{}) var _ = fs.NodeRemover(&Dir{}) var _ = fs.NodeRenamer(&Dir{}) var _ = fs.NodeSetattrer(&Dir{}) +var _ = fs.NodeGetxattrer(&Dir{}) +var _ = fs.NodeSetxattrer(&Dir{}) +var _ = fs.NodeRemovexattrer(&Dir{}) +var _ = fs.NodeListxattrer(&Dir{}) +var _ = fs.NodeForgetter(&Dir{}) func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error { + glog.V(3).Infof("dir Attr %s, existing attr: %+v", dir.Path, attr) + // https://github.com/bazil/fuse/issues/196 attr.Valid = time.Second if dir.Path == dir.wfs.option.FilerMountRootPath { dir.setRootDirAttributes(attr) + glog.V(3).Infof("root dir Attr %s, attr: %+v", dir.Path, attr) return nil } - item := dir.wfs.listDirectoryEntriesCache.Get(dir.Path) - if item != nil && !item.Expired() { - entry := item.Value().(*filer_pb.Entry) - - attr.Mtime = time.Unix(entry.Attributes.Mtime, 0) - attr.Ctime = time.Unix(entry.Attributes.Crtime, 0) - attr.Mode = os.FileMode(entry.Attributes.FileMode) - attr.Gid = entry.Attributes.Gid - attr.Uid = entry.Attributes.Uid - - return nil - } - - entry, err := filer2.GetEntry(ctx, dir.wfs, dir.Path) - if err != nil { - glog.V(2).Infof("read dir %s attr: %v, error: %v", dir.Path, dir.attributes, err) + if err := dir.maybeLoadEntry(ctx); err != nil { + glog.V(3).Infof("dir Attr %s,err: %+v", dir.Path, err) return err } - dir.attributes = entry.Attributes - glog.V(2).Infof("dir %s: %v perm: %v", dir.Path, dir.attributes, os.FileMode(dir.attributes.FileMode)) + attr.Inode = filer2.FullPath(dir.Path).AsInode() + attr.Mode = os.FileMode(dir.entry.Attributes.FileMode) | os.ModeDir + attr.Mtime = time.Unix(dir.entry.Attributes.Mtime, 0) + attr.Ctime = time.Unix(dir.entry.Attributes.Crtime, 0) + attr.Gid = dir.entry.Attributes.Gid + attr.Uid = dir.entry.Attributes.Uid - attr.Mode = os.FileMode(dir.attributes.FileMode) | os.ModeDir - - attr.Mtime = time.Unix(dir.attributes.Mtime, 0) - attr.Ctime = time.Unix(dir.attributes.Crtime, 0) - attr.Gid = dir.attributes.Gid - attr.Uid = dir.attributes.Uid + glog.V(3).Infof("dir Attr %s, attr: %+v", dir.Path, attr) return nil } +func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { + + glog.V(4).Infof("dir Getxattr %s", dir.Path) + + if err := dir.maybeLoadEntry(ctx); err != nil { + return err + } + + return getxattr(dir.entry, req, resp) +} + func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) { + attr.Inode = 1 // filer2.FullPath(dir.Path).AsInode() + attr.Valid = time.Hour attr.Uid = dir.wfs.option.MountUid attr.Gid = dir.wfs.option.MountGid attr.Mode = dir.wfs.option.MountMode @@ -78,16 +84,25 @@ func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) { attr.Ctime = dir.wfs.option.MountCtime attr.Mtime = dir.wfs.option.MountMtime attr.Atime = dir.wfs.option.MountMtime + attr.BlockSize = 1024 * 1024 } -func (dir *Dir) newFile(name string, entry *filer_pb.Entry) *File { - return &File{ - Name: name, - dir: dir, - wfs: dir.wfs, - entry: entry, - entryViewCache: nil, - } +func (dir *Dir) newFile(name string, entry *filer_pb.Entry) fs.Node { + return dir.wfs.getNode(filer2.NewFullPath(dir.Path, name), func() fs.Node { + return &File{ + Name: name, + dir: dir, + wfs: dir.wfs, + entry: entry, + entryViewCache: nil, + } + }) +} + +func (dir *Dir) newDirectory(fullpath filer2.FullPath, entry *filer_pb.Entry) fs.Node { + return dir.wfs.getNode(fullpath, func() fs.Node { + return &Dir{Path: string(fullpath), wfs: dir.wfs, entry: entry} + }) } func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, @@ -109,92 +124,102 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, TtlSec: dir.wfs.option.TtlSec, }, }, + OExcl: req.Flags&fuse.OpenExclusive != 0, } - glog.V(1).Infof("create: %v", request) + glog.V(1).Infof("create: %v", req.String()) - if request.Entry.IsDirectory { - if err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.V(0).Infof("create %s/%s: %v", dir.Path, req.Name, err) - return fuse.EIO + if err := dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + if err := filer_pb.CreateEntry(ctx, client, request); err != nil { + if strings.Contains(err.Error(), "EEXIST") { + return fuse.EEXIST } - return nil - }); err != nil { - return nil, nil, err + return fuse.EIO } + return nil + }); err != nil { + return nil, nil, err + } + var node fs.Node + if request.Entry.IsDirectory { + node = dir.newDirectory(filer2.NewFullPath(dir.Path, req.Name), request.Entry) + return node, nil, nil } - file := dir.newFile(req.Name, request.Entry) - if !request.Entry.IsDirectory { - file.isOpen = true - } + node = dir.newFile(req.Name, request.Entry) + file := node.(*File) + file.isOpen++ fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid) - fh.dirtyMetadata = true return file, fh, nil } func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { - err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + newEntry := &filer_pb.Entry{ + Name: req.Name, + IsDirectory: true, + Attributes: &filer_pb.FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(req.Mode &^ dir.wfs.option.Umask), + Uid: req.Uid, + Gid: req.Gid, + }, + } + + err := dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: dir.Path, - Entry: &filer_pb.Entry{ - Name: req.Name, - IsDirectory: true, - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Now().Unix(), - Crtime: time.Now().Unix(), - FileMode: uint32(req.Mode &^ dir.wfs.option.Umask), - Uid: req.Uid, - Gid: req.Gid, - }, - }, + Entry: newEntry, } glog.V(1).Infof("mkdir: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(ctx, client, request); err != nil { glog.V(0).Infof("mkdir %s/%s: %v", dir.Path, req.Name, err) - return fuse.EIO + return err } return nil }) if err == nil { - node := &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs} + node := dir.newDirectory(filer2.NewFullPath(dir.Path, req.Name), newEntry) return node, nil } - return nil, err + return nil, fuse.EIO } func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fs.Node, err error) { - var entry *filer_pb.Entry - fullFilePath := path.Join(dir.Path, req.Name) + glog.V(4).Infof("dir Lookup %s: %s", dir.Path, req.Name) - item := dir.wfs.listDirectoryEntriesCache.Get(fullFilePath) - if item != nil && !item.Expired() { - entry = item.Value().(*filer_pb.Entry) - } + fullFilePath := filer2.NewFullPath(dir.Path, req.Name) + entry := dir.wfs.cacheGet(fullFilePath) if entry == nil { + // glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath) entry, err = filer2.GetEntry(ctx, dir.wfs, fullFilePath) if err != nil { - return nil, err + glog.V(1).Infof("dir GetEntry %s: %v", fullFilePath, err) + return nil, fuse.ENOENT } + dir.wfs.cacheSet(fullFilePath, entry, 5*time.Minute) + } else { + glog.V(4).Infof("dir Lookup cache hit %s", fullFilePath) } if entry != nil { if entry.IsDirectory { - node = &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs, attributes: entry.Attributes} + node = dir.newDirectory(fullFilePath, entry) } else { node = dir.newFile(req.Name, entry) } - resp.EntryValid = time.Duration(0) + // resp.EntryValid = time.Second + resp.Attr.Inode = fullFilePath.AsInode() + resp.Attr.Valid = time.Second resp.Attr.Mtime = time.Unix(entry.Attributes.Mtime, 0) resp.Attr.Ctime = time.Unix(entry.Attributes.Crtime, 0) resp.Attr.Mode = os.FileMode(entry.Attributes.FileMode) @@ -204,57 +229,32 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse. return node, nil } + glog.V(1).Infof("not found dir GetEntry %s: %v", fullFilePath, err) return nil, fuse.ENOENT } func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) { - err = dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + glog.V(3).Infof("dir ReadDirAll %s", dir.Path) - paginationLimit := 1024 - remaining := dir.wfs.option.DirListingLimit - - lastEntryName := "" - - for remaining >= 0 { - - request := &filer_pb.ListEntriesRequest{ - Directory: dir.Path, - StartFromFileName: lastEntryName, - Limit: uint32(paginationLimit), - } - - glog.V(4).Infof("read directory: %v", request) - resp, err := client.ListEntries(ctx, request) - if err != nil { - glog.V(0).Infof("list %s: %v", dir.Path, err) - return fuse.EIO - } - - cacheTtl := estimatedCacheTtl(len(resp.Entries)) - - for _, entry := range resp.Entries { - if entry.IsDirectory { - dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_Dir} - ret = append(ret, dirent) - } else { - dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_File} - ret = append(ret, dirent) - } - dir.wfs.listDirectoryEntriesCache.Set(path.Join(dir.Path, entry.Name), entry, cacheTtl) - lastEntryName = entry.Name - } - - remaining -= len(resp.Entries) - - if len(resp.Entries) < paginationLimit { - break - } + cacheTtl := 5 * time.Minute + readErr := filer2.ReadDirAllEntries(ctx, dir.wfs, filer2.FullPath(dir.Path), "", func(entry *filer_pb.Entry, isLast bool) { + fullpath := filer2.NewFullPath(dir.Path, entry.Name) + inode := fullpath.AsInode() + if entry.IsDirectory { + dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: fuse.DT_Dir} + ret = append(ret, dirent) + } else { + dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: fuse.DT_File} + ret = append(ret, dirent) } - - return nil + dir.wfs.cacheSet(fullpath, entry, cacheTtl) }) + if readErr != nil { + glog.V(0).Infof("list %s: %v", dir.Path, err) + return ret, fuse.EIO + } return ret, err } @@ -271,14 +271,17 @@ func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error { func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) error { - entry, err := filer2.GetEntry(ctx, dir.wfs, path.Join(dir.Path, req.Name)) + filePath := filer2.NewFullPath(dir.Path, req.Name) + entry, err := filer2.GetEntry(ctx, dir.wfs, filePath) if err != nil { return err } dir.wfs.deleteFileChunks(ctx, entry.Chunks) - return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + dir.wfs.cacheDelete(filePath) + + return dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.DeleteEntryRequest{ Directory: dir.Path, @@ -289,12 +292,10 @@ func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) erro glog.V(3).Infof("remove file: %v", request) _, err := client.DeleteEntry(ctx, request) if err != nil { - glog.V(3).Infof("remove file %s/%s: %v", dir.Path, req.Name, err) + glog.V(3).Infof("not found remove file %s/%s: %v", dir.Path, req.Name, err) return fuse.ENOENT } - dir.wfs.listDirectoryEntriesCache.Delete(path.Join(dir.Path, req.Name)) - return nil }) @@ -302,7 +303,9 @@ func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) erro func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error { - return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + dir.wfs.cacheDelete(filer2.NewFullPath(dir.Path, req.Name)) + + return dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.DeleteEntryRequest{ Directory: dir.Path, @@ -313,12 +316,10 @@ func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error glog.V(3).Infof("remove directory entry: %v", request) _, err := client.DeleteEntry(ctx, request) if err != nil { - glog.V(3).Infof("remove %s/%s: %v", dir.Path, req.Name, err) + glog.V(3).Infof("not found remove %s/%s: %v", dir.Path, req.Name, err) return fuse.ENOENT } - dir.wfs.listDirectoryEntriesCache.Delete(path.Join(dir.Path, req.Name)) - return nil }) @@ -326,66 +327,122 @@ func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { - if dir.attributes == nil { - return nil + glog.V(3).Infof("%v dir setattr %+v", dir.Path, req) + + if err := dir.maybeLoadEntry(ctx); err != nil { + return err } - glog.V(3).Infof("%v dir setattr %+v, fh=%d", dir.Path, req, req.Handle) if req.Valid.Mode() { - dir.attributes.FileMode = uint32(req.Mode) + dir.entry.Attributes.FileMode = uint32(req.Mode) } if req.Valid.Uid() { - dir.attributes.Uid = req.Uid + dir.entry.Attributes.Uid = req.Uid } if req.Valid.Gid() { - dir.attributes.Gid = req.Gid + dir.entry.Attributes.Gid = req.Gid } if req.Valid.Mtime() { - dir.attributes.Mtime = req.Mtime.Unix() + dir.entry.Attributes.Mtime = req.Mtime.Unix() } + dir.wfs.cacheDelete(filer2.FullPath(dir.Path)) + + return dir.saveEntry(ctx) + +} + +func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error { + + glog.V(4).Infof("dir Setxattr %s: %s", dir.Path, req.Name) + + if err := dir.maybeLoadEntry(ctx); err != nil { + return err + } + + if err := setxattr(dir.entry, req); err != nil { + return err + } + + dir.wfs.cacheDelete(filer2.FullPath(dir.Path)) + + return dir.saveEntry(ctx) + +} + +func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error { + + glog.V(4).Infof("dir Removexattr %s: %s", dir.Path, req.Name) + + if err := dir.maybeLoadEntry(ctx); err != nil { + return err + } + + if err := removexattr(dir.entry, req); err != nil { + return err + } + + dir.wfs.cacheDelete(filer2.FullPath(dir.Path)) + + return dir.saveEntry(ctx) + +} + +func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { + + glog.V(4).Infof("dir Listxattr %s", dir.Path) + + if err := dir.maybeLoadEntry(ctx); err != nil { + return err + } + + if err := listxattr(dir.entry, req, resp); err != nil { + return err + } + + return nil + +} + +func (dir *Dir) Forget() { + glog.V(3).Infof("Forget dir %s", dir.Path) + + dir.wfs.forgetNode(filer2.FullPath(dir.Path)) +} + +func (dir *Dir) maybeLoadEntry(ctx context.Context) error { + if dir.entry == nil { + parentDirPath, name := filer2.FullPath(dir.Path).DirAndName() + entry, err := dir.wfs.maybeLoadEntry(ctx, parentDirPath, name) + if err != nil { + return err + } + dir.entry = entry + } + return nil +} + +func (dir *Dir) saveEntry(ctx context.Context) error { + parentDir, name := filer2.FullPath(dir.Path).DirAndName() - return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + + return dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ Directory: parentDir, - Entry: &filer_pb.Entry{ - Name: name, - Attributes: dir.attributes, - }, + Entry: dir.entry, } - glog.V(1).Infof("set attr directory entry: %v", request) + glog.V(1).Infof("save dir entry: %v", request) _, err := client.UpdateEntry(ctx, request) if err != nil { - glog.V(0).Infof("UpdateEntry %s: %v", dir.Path, err) + glog.V(0).Infof("UpdateEntry dir %s/%s: %v", parentDir, name, err) return fuse.EIO } - dir.wfs.listDirectoryEntriesCache.Delete(dir.Path) - return nil }) - -} - -func estimatedCacheTtl(numEntries int) time.Duration { - if numEntries < 100 { - // 30 ms per entry - return 3 * time.Second - } - if numEntries < 1000 { - // 10 ms per entry - return 10 * time.Second - } - if numEntries < 10000 { - // 10 ms per entry - return 100 * time.Second - } - - // 2 ms per entry - return time.Duration(numEntries*2) * time.Millisecond } diff --git a/weed/filesys/dir_link.go b/weed/filesys/dir_link.go index 94e443649..8b7ec7e89 100644 --- a/weed/filesys/dir_link.go +++ b/weed/filesys/dir_link.go @@ -35,8 +35,8 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, }, } - err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - if _, err := client.CreateEntry(ctx, request); err != nil { + err := dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + if err := filer_pb.CreateEntry(ctx, client, request); err != nil { glog.V(0).Infof("symlink %s/%s: %v", dir.Path, req.NewName, err) return fuse.EIO } @@ -51,7 +51,7 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, func (file *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) { - if err := file.maybeLoadAttributes(ctx); err != nil { + if err := file.maybeLoadEntry(ctx); err != nil { return "", err } diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go index e72a15758..4eb3c15b5 100644 --- a/weed/filesys/dir_rename.go +++ b/weed/filesys/dir_rename.go @@ -2,7 +2,9 @@ package filesys import ( "context" - "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" @@ -11,8 +13,9 @@ import ( func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirectory fs.Node) error { newDir := newDirectory.(*Dir) + glog.V(4).Infof("dir Rename %s/%s => %s/%s", dir.Path, req.OldName, newDir.Path, req.NewName) - return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err := dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AtomicRenameEntryRequest{ OldDirectory: dir.Path, @@ -23,11 +26,38 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector _, err := client.AtomicRenameEntry(ctx, request) if err != nil { - return fmt.Errorf("renaming %s/%s => %s/%s: %v", dir.Path, req.OldName, newDir.Path, req.NewName, err) + glog.V(0).Infof("dir Rename %s/%s => %s/%s : %v", dir.Path, req.OldName, newDir.Path, req.NewName, err) + return fuse.EIO } return nil }) + if err == nil { + newPath := filer2.NewFullPath(newDir.Path, req.NewName) + oldPath := filer2.NewFullPath(dir.Path, req.OldName) + dir.wfs.cacheDelete(newPath) + dir.wfs.cacheDelete(oldPath) + + oldFileNode := dir.wfs.getNode(oldPath, func() fs.Node { + return nil + }) + newDirNode := dir.wfs.getNode(filer2.FullPath(dir.Path), func() fs.Node { + return nil + }) + dir.wfs.forgetNode(newPath) + dir.wfs.forgetNode(oldPath) + if oldFileNode != nil && newDirNode != nil { + oldFile := oldFileNode.(*File) + oldFile.Name = req.NewName + oldFile.dir = newDirNode.(*Dir) + dir.wfs.getNode(newPath, func() fs.Node { + return oldFile + }) + + } + } + + return err } diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index baee412b2..5ff128323 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -4,8 +4,8 @@ import ( "bytes" "context" "fmt" + "io" "sync" - "sync/atomic" "time" "github.com/chrislusf/seaweedfs/weed/glog" @@ -15,28 +15,19 @@ import ( ) type ContinuousDirtyPages struct { - hasData bool - Offset int64 - Size int64 - Data []byte - f *File - lock sync.Mutex + intervals *ContinuousIntervals + f *File + lock sync.Mutex } func newDirtyPages(file *File) *ContinuousDirtyPages { return &ContinuousDirtyPages{ - Data: nil, - f: file, + intervals: &ContinuousIntervals{}, + f: file, } } func (pages *ContinuousDirtyPages) releaseResource() { - if pages.Data != nil { - pages.f.wfs.bufPool.Put(pages.Data) - pages.Data = nil - atomic.AddInt32(&counter, -1) - glog.V(3).Infof("%s/%s releasing resource %d", pages.f.dir.Path, pages.f.Name, counter) - } } var counter = int32(0) @@ -46,82 +37,44 @@ func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, da pages.lock.Lock() defer pages.lock.Unlock() - var chunk *filer_pb.FileChunk + glog.V(3).Infof("%s AddPage [%d,%d)", pages.f.fullpath(), offset, offset+int64(len(data))) if len(data) > int(pages.f.wfs.option.ChunkSizeLimit) { // this is more than what buffer can hold. return pages.flushAndSave(ctx, offset, data) } - if pages.Data == nil { - pages.Data = pages.f.wfs.bufPool.Get().([]byte) - atomic.AddInt32(&counter, 1) - glog.V(3).Infof("%s/%s acquire resource %d", pages.f.dir.Path, pages.f.Name, counter) - } + pages.intervals.AddInterval(data, offset) - if offset < pages.Offset || offset >= pages.Offset+int64(len(pages.Data)) || - pages.Offset+int64(len(pages.Data)) < offset+int64(len(data)) { - // if the data is out of range, - // or buffer is full if adding new data, - // flush current buffer and add new data + var chunk *filer_pb.FileChunk + var hasSavedData bool - // println("offset", offset, "size", len(data), "existing offset", pages.Offset, "size", pages.Size) - - if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { - if chunk != nil { - glog.V(4).Infof("%s/%s add save [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) - chunks = append(chunks, chunk) - } - } else { - glog.V(0).Infof("%s/%s add save [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err) - return + if pages.intervals.TotalSize() > pages.f.wfs.option.ChunkSizeLimit { + chunk, hasSavedData, err = pages.saveExistingLargestPageToStorage(ctx) + if hasSavedData { + chunks = append(chunks, chunk) } - pages.Offset = offset - copy(pages.Data, data) - pages.Size = int64(len(data)) - return } - if offset != pages.Offset+pages.Size { - // when this happens, debug shows the data overlapping with existing data is empty - // the data is not just append - if offset == pages.Offset && int(pages.Size) < len(data) { - // glog.V(2).Infof("pages[%d,%d) pages.Data len=%v, data len=%d, pages.Size=%d", pages.Offset, pages.Offset+pages.Size, len(pages.Data), len(data), pages.Size) - copy(pages.Data[pages.Size:], data[pages.Size:]) - } else { - if pages.Size != 0 { - glog.V(1).Infof("%s/%s add page: pages [%d, %d) write [%d, %d)", pages.f.dir.Path, pages.f.Name, pages.Offset, pages.Offset+pages.Size, offset, offset+int64(len(data))) - } - return pages.flushAndSave(ctx, offset, data) - } - } else { - copy(pages.Data[offset-pages.Offset:], data) - } - - pages.Size = max(pages.Size, offset+int64(len(data))-pages.Offset) - return } func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) { var chunk *filer_pb.FileChunk + var newChunks []*filer_pb.FileChunk // flush existing - if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { - if chunk != nil { - glog.V(4).Infof("%s/%s flush existing [%d,%d) to %s", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId) - chunks = append(chunks, chunk) + if newChunks, err = pages.saveExistingPagesToStorage(ctx); err == nil { + if newChunks != nil { + chunks = append(chunks, newChunks...) } } else { - glog.V(0).Infof("%s/%s failed to flush1 [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err) return } - pages.Size = 0 - pages.Offset = 0 // flush the new page - if chunk, err = pages.saveToStorage(ctx, data, offset); err == nil { + if chunk, err = pages.saveToStorage(ctx, bytes.NewReader(data), offset, int64(len(data))); err == nil { if chunk != nil { glog.V(4).Infof("%s/%s flush big request [%d,%d) to %s", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId) chunks = append(chunks, chunk) @@ -134,40 +87,60 @@ func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int6 return } -func (pages *ContinuousDirtyPages) FlushToStorage(ctx context.Context) (chunk *filer_pb.FileChunk, err error) { +func (pages *ContinuousDirtyPages) FlushToStorage(ctx context.Context) (chunks []*filer_pb.FileChunk, err error) { pages.lock.Lock() defer pages.lock.Unlock() - if pages.Size == 0 { - return nil, nil - } + return pages.saveExistingPagesToStorage(ctx) +} - if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { - pages.Size = 0 - pages.Offset = 0 - if chunk != nil { - glog.V(4).Infof("%s/%s flush [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) +func (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Context) (chunks []*filer_pb.FileChunk, err error) { + + var hasSavedData bool + var chunk *filer_pb.FileChunk + + for { + + chunk, hasSavedData, err = pages.saveExistingLargestPageToStorage(ctx) + if !hasSavedData { + return chunks, err + } + + if err == nil { + chunks = append(chunks, chunk) + } else { + return } } + +} + +func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage(ctx context.Context) (chunk *filer_pb.FileChunk, hasSavedData bool, err error) { + + maxList := pages.intervals.RemoveLargestIntervalLinkedList() + if maxList == nil { + return nil, false, nil + } + + chunk, err = pages.saveToStorage(ctx, maxList.ToReader(), maxList.Offset(), maxList.Size()) + if err == nil { + hasSavedData = true + glog.V(3).Infof("%s saveToStorage [%d,%d) %s", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), chunk.FileId) + } else { + glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), err) + return + } + return } -func (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Context) (*filer_pb.FileChunk, error) { - - if pages.Size == 0 { - return nil, nil - } - - return pages.saveToStorage(ctx, pages.Data[:pages.Size], pages.Offset) -} - -func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte, offset int64) (*filer_pb.FileChunk, error) { +func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, reader io.Reader, offset int64, size int64) (*filer_pb.FileChunk, error) { var fileId, host string var auth security.EncodedJwt - if err := pages.f.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + if err := pages.f.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, @@ -191,8 +164,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte } fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) - bufReader := bytes.NewReader(buf) - uploadResult, err := operation.Upload(fileUrl, pages.f.Name, bufReader, false, "application/octet-stream", nil, auth) + uploadResult, err := operation.Upload(fileUrl, pages.f.Name, reader, false, "", nil, auth) if err != nil { glog.V(0).Infof("upload data %v to %s: %v", pages.f.Name, fileUrl, err) return nil, fmt.Errorf("upload data: %v", err) @@ -205,7 +177,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte return &filer_pb.FileChunk{ FileId: fileId, Offset: offset, - Size: uint64(len(buf)), + Size: uint64(size), Mtime: time.Now().UnixNano(), ETag: uploadResult.ETag, }, nil @@ -218,3 +190,18 @@ func max(x, y int64) int64 { } return y } +func min(x, y int64) int64 { + if x < y { + return x + } + return y +} + +func (pages *ContinuousDirtyPages) ReadDirtyData(ctx context.Context, data []byte, startOffset int64) (offset int64, size int) { + + pages.lock.Lock() + defer pages.lock.Unlock() + + return pages.intervals.ReadData(data, startOffset) + +} diff --git a/weed/filesys/dirty_page_interval.go b/weed/filesys/dirty_page_interval.go new file mode 100644 index 000000000..ec94c6df1 --- /dev/null +++ b/weed/filesys/dirty_page_interval.go @@ -0,0 +1,220 @@ +package filesys + +import ( + "bytes" + "io" + "math" +) + +type IntervalNode struct { + Data []byte + Offset int64 + Size int64 + Next *IntervalNode +} + +type IntervalLinkedList struct { + Head *IntervalNode + Tail *IntervalNode +} + +type ContinuousIntervals struct { + lists []*IntervalLinkedList +} + +func (list *IntervalLinkedList) Offset() int64 { + return list.Head.Offset +} +func (list *IntervalLinkedList) Size() int64 { + return list.Tail.Offset + list.Tail.Size - list.Head.Offset +} +func (list *IntervalLinkedList) addNodeToTail(node *IntervalNode) { + // glog.V(4).Infof("add to tail [%d,%d) + [%d,%d) => [%d,%d)", list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, node.Offset+node.Size, list.Head.Offset, node.Offset+node.Size) + list.Tail.Next = node + list.Tail = node +} +func (list *IntervalLinkedList) addNodeToHead(node *IntervalNode) { + // glog.V(4).Infof("add to head [%d,%d) + [%d,%d) => [%d,%d)", node.Offset, node.Offset+node.Size, list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, list.Tail.Offset+list.Tail.Size) + node.Next = list.Head + list.Head = node +} + +func (list *IntervalLinkedList) ReadData(buf []byte, start, stop int64) { + t := list.Head + for { + + nodeStart, nodeStop := max(start, t.Offset), min(stop, t.Offset+t.Size) + if nodeStart < nodeStop { + // glog.V(0).Infof("copying start=%d stop=%d t=[%d,%d) t.data=%d => bufSize=%d nodeStart=%d, nodeStop=%d", start, stop, t.Offset, t.Offset+t.Size, len(t.Data), len(buf), nodeStart, nodeStop) + copy(buf[nodeStart-start:], t.Data[nodeStart-t.Offset:nodeStop-t.Offset]) + } + + if t.Next == nil { + break + } + t = t.Next + } +} + +func (c *ContinuousIntervals) TotalSize() (total int64) { + for _, list := range c.lists { + total += list.Size() + } + return +} + +func subList(list *IntervalLinkedList, start, stop int64) *IntervalLinkedList { + var nodes []*IntervalNode + for t := list.Head; t != nil; t = t.Next { + nodeStart, nodeStop := max(start, t.Offset), min(stop, t.Offset+t.Size) + if nodeStart >= nodeStop { + // skip non overlapping IntervalNode + continue + } + nodes = append(nodes, &IntervalNode{ + Data: t.Data[nodeStart-t.Offset : nodeStop-t.Offset], + Offset: nodeStart, + Size: nodeStop - nodeStart, + Next: nil, + }) + } + for i := 1; i < len(nodes); i++ { + nodes[i-1].Next = nodes[i] + } + return &IntervalLinkedList{ + Head: nodes[0], + Tail: nodes[len(nodes)-1], + } +} + +func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) { + + interval := &IntervalNode{Data: data, Offset: offset, Size: int64(len(data))} + + var newLists []*IntervalLinkedList + for _, list := range c.lists { + // if list is to the left of new interval, add to the new list + if list.Tail.Offset+list.Tail.Size <= interval.Offset { + newLists = append(newLists, list) + } + // if list is to the right of new interval, add to the new list + if interval.Offset+interval.Size <= list.Head.Offset { + newLists = append(newLists, list) + } + // if new interval overwrite the right part of the list + if list.Head.Offset < interval.Offset && interval.Offset < list.Tail.Offset+list.Tail.Size { + // create a new list of the left part of existing list + newLists = append(newLists, subList(list, list.Offset(), interval.Offset)) + } + // if new interval overwrite the left part of the list + if list.Head.Offset < interval.Offset+interval.Size && interval.Offset+interval.Size < list.Tail.Offset+list.Tail.Size { + // create a new list of the right part of existing list + newLists = append(newLists, subList(list, interval.Offset+interval.Size, list.Tail.Offset+list.Tail.Size)) + } + // skip anything that is fully overwritten by the new interval + } + + c.lists = newLists + // add the new interval to the lists, connecting neighbor lists + var prevList, nextList *IntervalLinkedList + + for _, list := range c.lists { + if list.Head.Offset == interval.Offset+interval.Size { + nextList = list + break + } + } + + for _, list := range c.lists { + if list.Head.Offset+list.Size() == offset { + list.addNodeToTail(interval) + prevList = list + break + } + } + + if prevList != nil && nextList != nil { + // glog.V(4).Infof("connecting [%d,%d) + [%d,%d) => [%d,%d)", prevList.Head.Offset, prevList.Tail.Offset+prevList.Tail.Size, nextList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size, prevList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size) + prevList.Tail.Next = nextList.Head + prevList.Tail = nextList.Tail + c.removeList(nextList) + } else if nextList != nil { + // add to head was not done when checking + nextList.addNodeToHead(interval) + } + if prevList == nil && nextList == nil { + c.lists = append(c.lists, &IntervalLinkedList{ + Head: interval, + Tail: interval, + }) + } + + return +} + +func (c *ContinuousIntervals) RemoveLargestIntervalLinkedList() *IntervalLinkedList { + var maxSize int64 + maxIndex := -1 + for k, list := range c.lists { + if maxSize <= list.Size() { + maxSize = list.Size() + maxIndex = k + } + } + if maxSize <= 0 { + return nil + } + + t := c.lists[maxIndex] + c.lists = append(c.lists[0:maxIndex], c.lists[maxIndex+1:]...) + return t + +} + +func (c *ContinuousIntervals) removeList(target *IntervalLinkedList) { + index := -1 + for k, list := range c.lists { + if list.Offset() == target.Offset() { + index = k + } + } + if index < 0 { + return + } + + c.lists = append(c.lists[0:index], c.lists[index+1:]...) + +} + +func (c *ContinuousIntervals) ReadData(data []byte, startOffset int64) (offset int64, size int) { + var minOffset int64 = math.MaxInt64 + var maxStop int64 + for _, list := range c.lists { + start := max(startOffset, list.Offset()) + stop := min(startOffset+int64(len(data)), list.Offset()+list.Size()) + if start <= stop { + list.ReadData(data[start-startOffset:], start, stop) + minOffset = min(minOffset, start) + maxStop = max(maxStop, stop) + } + } + + if minOffset == math.MaxInt64 { + return 0, 0 + } + + offset = minOffset + size = int(maxStop - offset) + return +} + +func (l *IntervalLinkedList) ToReader() io.Reader { + var readers []io.Reader + t := l.Head + readers = append(readers, bytes.NewReader(t.Data)) + for t.Next != nil { + t = t.Next + readers = append(readers, bytes.NewReader(t.Data)) + } + return io.MultiReader(readers...) +} diff --git a/weed/filesys/dirty_page_interval_test.go b/weed/filesys/dirty_page_interval_test.go new file mode 100644 index 000000000..184be2f3b --- /dev/null +++ b/weed/filesys/dirty_page_interval_test.go @@ -0,0 +1,72 @@ +package filesys + +import ( + "bytes" + "testing" +) + +func TestContinuousIntervals_AddIntervalAppend(t *testing.T) { + + c := &ContinuousIntervals{} + + // 25, 25, 25 + c.AddInterval(getBytes(25, 3), 0) + // _, _, 23, 23, 23, 23 + c.AddInterval(getBytes(23, 4), 2) + + expectedData(t, c, 0, 25, 25, 23, 23, 23, 23) + +} + +func TestContinuousIntervals_AddIntervalInnerOverwrite(t *testing.T) { + + c := &ContinuousIntervals{} + + // 25, 25, 25, 25, 25 + c.AddInterval(getBytes(25, 5), 0) + // _, _, 23, 23 + c.AddInterval(getBytes(23, 2), 2) + + expectedData(t, c, 0, 25, 25, 23, 23, 25) + +} + +func TestContinuousIntervals_AddIntervalFullOverwrite(t *testing.T) { + + c := &ContinuousIntervals{} + + // 25, + c.AddInterval(getBytes(25, 1), 0) + // _, _, _, _, 23, 23 + c.AddInterval(getBytes(23, 2), 4) + // _, _, _, 24, 24, 24, 24 + c.AddInterval(getBytes(24, 4), 3) + + // _, 22, 22 + c.AddInterval(getBytes(22, 2), 1) + + expectedData(t, c, 0, 25, 22, 22, 24, 24, 24, 24) + +} + +func expectedData(t *testing.T, c *ContinuousIntervals, offset int, data ...byte) { + start, stop := int64(offset), int64(offset+len(data)) + for _, list := range c.lists { + nodeStart, nodeStop := max(start, list.Head.Offset), min(stop, list.Head.Offset+list.Size()) + if nodeStart < nodeStop { + buf := make([]byte, nodeStop-nodeStart) + list.ReadData(buf, nodeStart, nodeStop) + if bytes.Compare(buf, data[nodeStart-start:nodeStop-start]) != 0 { + t.Errorf("expected %v actual %v", data[nodeStart-start:nodeStop-start], buf) + } + } + } +} + +func getBytes(content byte, length int) []byte { + data := make([]byte, length) + for i := 0; i < length; i++ { + data[i] = content + } + return data +} diff --git a/weed/filesys/file.go b/weed/filesys/file.go index 1b359ebbe..eccef4e58 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -3,7 +3,6 @@ package filesys import ( "context" "os" - "path/filepath" "sort" "time" @@ -20,6 +19,11 @@ var _ = fs.Node(&File{}) var _ = fs.NodeOpener(&File{}) var _ = fs.NodeFsyncer(&File{}) var _ = fs.NodeSetattrer(&File{}) +var _ = fs.NodeGetxattrer(&File{}) +var _ = fs.NodeSetxattrer(&File{}) +var _ = fs.NodeRemovexattrer(&File{}) +var _ = fs.NodeListxattrer(&File{}) +var _ = fs.NodeForgetter(&File{}) type File struct { Name string @@ -27,21 +31,32 @@ type File struct { wfs *WFS entry *filer_pb.Entry entryViewCache []filer2.VisibleInterval - isOpen bool + isOpen int } -func (file *File) fullpath() string { - return filepath.Join(file.dir.Path, file.Name) +func (file *File) fullpath() filer2.FullPath { + return filer2.NewFullPath(file.dir.Path, file.Name) } func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error { - if err := file.maybeLoadAttributes(ctx); err != nil { - return err + glog.V(4).Infof("file Attr %s, open:%v, existing attr: %+v", file.fullpath(), file.isOpen, attr) + + if file.isOpen <= 0 { + if err := file.maybeLoadEntry(ctx); err != nil { + return err + } } + attr.Inode = file.fullpath().AsInode() + attr.Valid = time.Second attr.Mode = os.FileMode(file.entry.Attributes.FileMode) attr.Size = filer2.TotalSize(file.entry.Chunks) + if file.isOpen > 0 { + attr.Size = file.entry.Attributes.FileSize + glog.V(4).Infof("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size) + } + attr.Crtime = time.Unix(file.entry.Attributes.Crtime, 0) attr.Mtime = time.Unix(file.entry.Attributes.Mtime, 0) attr.Gid = file.entry.Attributes.Gid attr.Uid = file.entry.Attributes.Uid @@ -52,11 +67,22 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error { } +func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { + + // glog.V(4).Infof("file Getxattr %s", file.fullpath()) + + if err := file.maybeLoadEntry(ctx); err != nil { + return err + } + + return getxattr(file.entry, req, resp) +} + func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) { - glog.V(3).Infof("%v file open %+v", file.fullpath(), req) + glog.V(4).Infof("file %v open %+v", file.fullpath(), req) - file.isOpen = true + file.isOpen++ handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid) @@ -70,17 +96,28 @@ func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.Op func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { - if err := file.maybeLoadAttributes(ctx); err != nil { + glog.V(3).Infof("%v file setattr %+v, old:%+v", file.fullpath(), req, file.entry.Attributes) + + if err := file.maybeLoadEntry(ctx); err != nil { return err } - glog.V(3).Infof("%v file setattr %+v, old:%+v", file.fullpath(), req, file.entry.Attributes) if req.Valid.Size() { glog.V(3).Infof("%v file setattr set size=%v", file.fullpath(), req.Size) - if req.Size == 0 { + if req.Size < filer2.TotalSize(file.entry.Chunks) { // fmt.Printf("truncate %v \n", fullPath) - file.entry.Chunks = nil + var chunks []*filer_pb.FileChunk + for _, chunk := range file.entry.Chunks { + int64Size := int64(chunk.Size) + if chunk.Offset+int64Size > int64(req.Size) { + int64Size = int64(req.Size) - chunk.Offset + } + if int64Size > 0 { + chunks = append(chunks, chunk) + } + } + file.entry.Chunks = chunks file.entryViewCache = nil } file.entry.Attributes.FileSize = req.Size @@ -105,26 +142,65 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f file.entry.Attributes.Mtime = req.Mtime.Unix() } - if file.isOpen { + if file.isOpen > 0 { return nil } - return file.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + file.wfs.cacheDelete(file.fullpath()) - request := &filer_pb.UpdateEntryRequest{ - Directory: file.dir.Path, - Entry: file.entry, - } + return file.saveEntry(ctx) - glog.V(1).Infof("set attr file entry: %v", request) - _, err := client.UpdateEntry(ctx, request) - if err != nil { - glog.V(0).Infof("UpdateEntry file %s/%s: %v", file.dir.Path, file.Name, err) - return fuse.EIO - } +} - return nil - }) +func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error { + + glog.V(4).Infof("file Setxattr %s: %s", file.fullpath(), req.Name) + + if err := file.maybeLoadEntry(ctx); err != nil { + return err + } + + if err := setxattr(file.entry, req); err != nil { + return err + } + + file.wfs.cacheDelete(file.fullpath()) + + return file.saveEntry(ctx) + +} + +func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error { + + glog.V(4).Infof("file Removexattr %s: %s", file.fullpath(), req.Name) + + if err := file.maybeLoadEntry(ctx); err != nil { + return err + } + + if err := removexattr(file.entry, req); err != nil { + return err + } + + file.wfs.cacheDelete(file.fullpath()) + + return file.saveEntry(ctx) + +} + +func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { + + glog.V(4).Infof("file Listxattr %s", file.fullpath()) + + if err := file.maybeLoadEntry(ctx); err != nil { + return err + } + + if err := listxattr(file.entry, req, resp); err != nil { + return err + } + + return nil } @@ -136,50 +212,26 @@ func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { return nil } -func (file *File) maybeLoadAttributes(ctx context.Context) error { - if file.entry == nil || !file.isOpen { - item := file.wfs.listDirectoryEntriesCache.Get(file.fullpath()) - if item != nil && !item.Expired() { - entry := item.Value().(*filer_pb.Entry) +func (file *File) Forget() { + glog.V(3).Infof("Forget file %s/%s", file.dir.Path, file.Name) + + file.wfs.forgetNode(filer2.NewFullPath(file.dir.Path, file.Name)) + +} + +func (file *File) maybeLoadEntry(ctx context.Context) error { + if file.entry == nil || file.isOpen <= 0 { + entry, err := file.wfs.maybeLoadEntry(ctx, file.dir.Path, file.Name) + if err != nil { + return err + } + if entry != nil { file.setEntry(entry) - // glog.V(1).Infof("file attr read cached %v attributes", file.Name) - } else { - err := file.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - request := &filer_pb.LookupDirectoryEntryRequest{ - Name: file.Name, - Directory: file.dir.Path, - } - - resp, err := client.LookupDirectoryEntry(ctx, request) - if err != nil { - glog.V(3).Infof("file attr read file %v: %v", request, err) - return fuse.ENOENT - } - - file.setEntry(resp.Entry) - - glog.V(3).Infof("file attr %v %+v: %d", file.fullpath(), file.entry.Attributes, filer2.TotalSize(file.entry.Chunks)) - - // file.wfs.listDirectoryEntriesCache.Set(file.fullpath(), file.entry, file.wfs.option.EntryCacheTtl) - - return nil - }) - - if err != nil { - return err - } } } return nil } -func (file *File) addChunk(chunk *filer_pb.FileChunk) { - if chunk != nil { - file.addChunks([]*filer_pb.FileChunk{chunk}) - } -} - func (file *File) addChunks(chunks []*filer_pb.FileChunk) { sort.Slice(chunks, func(i, j int) bool { @@ -203,3 +255,22 @@ func (file *File) setEntry(entry *filer_pb.Entry) { file.entry = entry file.entryViewCache = filer2.NonOverlappingVisibleIntervals(file.entry.Chunks) } + +func (file *File) saveEntry(ctx context.Context) error { + return file.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.UpdateEntryRequest{ + Directory: file.dir.Path, + Entry: file.entry, + } + + glog.V(1).Infof("save file entry: %v", request) + _, err := client.UpdateEntry(ctx, request) + if err != nil { + glog.V(0).Infof("UpdateEntry file %s/%s: %v", file.dir.Path, file.Name, err) + return fuse.EIO + } + + return nil + }) +} diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index 1f4754dd1..cf253a7ed 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -7,10 +7,11 @@ import ( "path" "time" + "github.com/gabriel-vasile/mimetype" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/gabriel-vasile/mimetype" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" ) @@ -50,60 +51,84 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus glog.V(4).Infof("%s read fh %d: [%d,%d)", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size)) - // this value should come from the filer instead of the old f - if len(fh.f.entry.Chunks) == 0 { - glog.V(1).Infof("empty fh %v/%v", fh.f.dir.Path, fh.f.Name) - return nil - } - buff := make([]byte, req.Size) - if fh.f.entryViewCache == nil { - fh.f.entryViewCache = filer2.NonOverlappingVisibleIntervals(fh.f.entry.Chunks) + totalRead, err := fh.readFromChunks(ctx, buff, req.Offset) + if err == nil { + dirtyOffset, dirtySize := fh.readFromDirtyPages(ctx, buff, req.Offset) + if totalRead+req.Offset < dirtyOffset+int64(dirtySize) { + totalRead = dirtyOffset + int64(dirtySize) - req.Offset + } } - chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, req.Offset, req.Size) - - totalRead, err := filer2.ReadIntoBuffer(ctx, fh.f.wfs, fh.f.fullpath(), buff, chunkViews, req.Offset) - resp.Data = buff[:totalRead] if err != nil { glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err) + return fuse.EIO } return err } +func (fh *FileHandle) readFromDirtyPages(ctx context.Context, buff []byte, startOffset int64) (offset int64, size int) { + return fh.dirtyPages.ReadDirtyData(ctx, buff, startOffset) +} + +func (fh *FileHandle) readFromChunks(ctx context.Context, buff []byte, offset int64) (int64, error) { + + // this value should come from the filer instead of the old f + if len(fh.f.entry.Chunks) == 0 { + glog.V(1).Infof("empty fh %v", fh.f.fullpath()) + return 0, nil + } + + if fh.f.entryViewCache == nil { + fh.f.entryViewCache = filer2.NonOverlappingVisibleIntervals(fh.f.entry.Chunks) + } + + chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, offset, len(buff)) + + totalRead, err := filer2.ReadIntoBuffer(ctx, fh.f.wfs, fh.f.fullpath(), buff, chunkViews, offset) + + if err != nil { + glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err) + } + + return totalRead, err +} + // Write to the file handle func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { // write the request to volume servers - glog.V(4).Infof("%+v/%v write fh %d: [%d,%d)", fh.f.dir.Path, fh.f.Name, fh.handle, req.Offset, req.Offset+int64(len(req.Data))) + fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(req.Data)), int64(fh.f.entry.Attributes.FileSize))) + // glog.V(0).Infof("%v write [%d,%d)", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data))) chunks, err := fh.dirtyPages.AddPage(ctx, req.Offset, req.Data) if err != nil { - glog.Errorf("%+v/%v write fh %d: [%d,%d): %v", fh.f.dir.Path, fh.f.Name, fh.handle, req.Offset, req.Offset+int64(len(req.Data)), err) - return fmt.Errorf("write %s/%s at [%d,%d): %v", fh.f.dir.Path, fh.f.Name, req.Offset, req.Offset+int64(len(req.Data)), err) + glog.Errorf("%v write fh %d: [%d,%d): %v", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(len(req.Data)), err) + return fuse.EIO } resp.Size = len(req.Data) if req.Offset == 0 { // detect mime type - var possibleExt string - fh.contentType, possibleExt = mimetype.Detect(req.Data) - if ext := path.Ext(fh.f.Name); ext != possibleExt { + detectedMIME := mimetype.Detect(req.Data) + fh.contentType = detectedMIME.String() + if ext := path.Ext(fh.f.Name); ext != detectedMIME.Extension() { fh.contentType = mime.TypeByExtension(ext) } fh.dirtyMetadata = true } - fh.f.addChunks(chunks) - if len(chunks) > 0 { + + fh.f.addChunks(chunks) + fh.dirtyMetadata = true } @@ -114,11 +139,12 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err glog.V(4).Infof("%v release fh %d", fh.f.fullpath(), fh.handle) - fh.dirtyPages.releaseResource() + fh.f.isOpen-- - fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle)) - - fh.f.isOpen = false + if fh.f.isOpen <= 0 { + fh.dirtyPages.releaseResource() + fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle)) + } return nil } @@ -128,19 +154,22 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { // send the data to the OS glog.V(4).Infof("%s fh %d flush %v", fh.f.fullpath(), fh.handle, req) - chunk, err := fh.dirtyPages.FlushToStorage(ctx) + chunks, err := fh.dirtyPages.FlushToStorage(ctx) if err != nil { - glog.Errorf("flush %s/%s: %v", fh.f.dir.Path, fh.f.Name, err) - return fmt.Errorf("flush %s/%s: %v", fh.f.dir.Path, fh.f.Name, err) + glog.Errorf("flush %s: %v", fh.f.fullpath(), err) + return fuse.EIO } - fh.f.addChunk(chunk) + fh.f.addChunks(chunks) + if len(chunks) > 0 { + fh.dirtyMetadata = true + } if !fh.dirtyMetadata { return nil } - return fh.f.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = fh.f.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { if fh.f.entry.Attributes != nil { fh.f.entry.Attributes.Mime = fh.contentType @@ -156,25 +185,36 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { Entry: fh.f.entry, } - glog.V(3).Infof("%s/%s set chunks: %v", fh.f.dir.Path, fh.f.Name, len(fh.f.entry.Chunks)) + glog.V(3).Infof("%s set chunks: %v", fh.f.fullpath(), len(fh.f.entry.Chunks)) for i, chunk := range fh.f.entry.Chunks { - glog.V(3).Infof("%s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) + glog.V(3).Infof("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) } chunks, garbages := filer2.CompactFileChunks(fh.f.entry.Chunks) fh.f.entry.Chunks = chunks // fh.f.entryViewCache = nil - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(ctx, client, request); err != nil { glog.Errorf("update fh: %v", err) return fmt.Errorf("update fh: %v", err) } fh.f.wfs.deleteFileChunks(ctx, garbages) for i, chunk := range garbages { - glog.V(3).Infof("garbage %s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) + glog.V(3).Infof("garbage %s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) } return nil }) + + if err == nil { + fh.dirtyMetadata = false + } + + if err != nil { + glog.Errorf("%v fh %d flush: %v", fh.f.fullpath(), fh.handle, err) + return fuse.EIO + } + + return nil } diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index 1bd9b5cc9..4807e367b 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -5,16 +5,19 @@ import ( "fmt" "math" "os" + "strings" "sync" "time" + "github.com/karlseguin/ccache" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/karlseguin/ccache" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" - "google.golang.org/grpc" ) type Option struct { @@ -26,7 +29,7 @@ type Option struct { TtlSec int32 ChunkSizeLimit int64 DataCenter string - DirListingLimit int + DirListCacheLimit int64 EntryCacheTtl time.Duration Umask os.FileMode @@ -44,13 +47,19 @@ type WFS struct { option *Option listDirectoryEntriesCache *ccache.Cache - // contains all open handles + // contains all open handles, protected by handlesLock + handlesLock sync.Mutex handles []*FileHandle - pathToHandleIndex map[string]int - pathToHandleLock sync.Mutex - bufPool sync.Pool + pathToHandleIndex map[filer2.FullPath]int + + bufPool sync.Pool stats statsCache + + // nodes, protected by nodesLock + nodesLock sync.Mutex + nodes map[uint64]fs.Node + root fs.Node } type statsCache struct { filer_pb.StatisticsResponse @@ -60,36 +69,53 @@ type statsCache struct { func NewSeaweedFileSystem(option *Option) *WFS { wfs := &WFS{ option: option, - listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(1024 * 8).ItemsToPrune(100)), - pathToHandleIndex: make(map[string]int), + listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(option.DirListCacheLimit * 3).ItemsToPrune(100)), + pathToHandleIndex: make(map[filer2.FullPath]int), bufPool: sync.Pool{ New: func() interface{} { return make([]byte, option.ChunkSizeLimit) }, }, + nodes: make(map[uint64]fs.Node), } + wfs.root = &Dir{Path: wfs.option.FilerMountRootPath, wfs: wfs} + return wfs } func (wfs *WFS) Root() (fs.Node, error) { - return &Dir{Path: wfs.option.FilerMountRootPath, wfs: wfs}, nil + return wfs.root, nil } -func (wfs *WFS) WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { +func (wfs *WFS) WithFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + err := util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(client) + return fn(ctx2, client) }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption) + if err == nil { + return nil + } + if strings.Contains(err.Error(), "context canceled") { + glog.V(2).Infoln("retry context canceled request...") + return util.WithCachedGrpcClient(context.Background(), func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(ctx2, client) + }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption) + } + return err + } func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) { - wfs.pathToHandleLock.Lock() - defer wfs.pathToHandleLock.Unlock() fullpath := file.fullpath() + glog.V(4).Infof("%s AcquireHandle uid=%d gid=%d", fullpath, uid, gid) + + wfs.handlesLock.Lock() + defer wfs.handlesLock.Unlock() index, found := wfs.pathToHandleIndex[fullpath] if found && wfs.handles[index] != nil { @@ -103,24 +129,24 @@ func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHand wfs.handles[i] = fileHandle fileHandle.handle = uint64(i) wfs.pathToHandleIndex[fullpath] = i - glog.V(4).Infoln(fullpath, "reuse fileHandle id", fileHandle.handle) + glog.V(4).Infof("%s reuse fh %d", fullpath, fileHandle.handle) return } } wfs.handles = append(wfs.handles, fileHandle) fileHandle.handle = uint64(len(wfs.handles) - 1) - glog.V(2).Infoln(fullpath, "new fileHandle id", fileHandle.handle) wfs.pathToHandleIndex[fullpath] = int(fileHandle.handle) + glog.V(4).Infof("%s new fh %d", fullpath, fileHandle.handle) return } -func (wfs *WFS) ReleaseHandle(fullpath string, handleId fuse.HandleID) { - wfs.pathToHandleLock.Lock() - defer wfs.pathToHandleLock.Unlock() +func (wfs *WFS) ReleaseHandle(fullpath filer2.FullPath, handleId fuse.HandleID) { + wfs.handlesLock.Lock() + defer wfs.handlesLock.Unlock() - glog.V(4).Infof("%s releasing handle id %d current handles length %d", fullpath, handleId, len(wfs.handles)) + glog.V(4).Infof("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles)) delete(wfs.pathToHandleIndex, fullpath) if int(handleId) < len(wfs.handles) { wfs.handles[int(handleId)] = nil @@ -136,7 +162,7 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse. if wfs.stats.lastChecked < time.Now().Unix()-20 { - err := wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err := wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.StatisticsRequest{ Collection: wfs.option.Collection, @@ -190,3 +216,44 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse. return nil } + +func (wfs *WFS) cacheGet(path filer2.FullPath) *filer_pb.Entry { + item := wfs.listDirectoryEntriesCache.Get(string(path)) + if item != nil && !item.Expired() { + return item.Value().(*filer_pb.Entry) + } + return nil +} +func (wfs *WFS) cacheSet(path filer2.FullPath, entry *filer_pb.Entry, ttl time.Duration) { + if entry == nil { + wfs.listDirectoryEntriesCache.Delete(string(path)) + } else { + wfs.listDirectoryEntriesCache.Set(string(path), entry, ttl) + } +} +func (wfs *WFS) cacheDelete(path filer2.FullPath) { + wfs.listDirectoryEntriesCache.Delete(string(path)) +} + +func (wfs *WFS) getNode(fullpath filer2.FullPath, fn func() fs.Node) fs.Node { + wfs.nodesLock.Lock() + defer wfs.nodesLock.Unlock() + + node, found := wfs.nodes[fullpath.AsInode()] + if found { + return node + } + node = fn() + if node != nil { + wfs.nodes[fullpath.AsInode()] = node + } + return node +} + +func (wfs *WFS) forgetNode(fullpath filer2.FullPath) { + wfs.nodesLock.Lock() + defer wfs.nodesLock.Unlock() + + delete(wfs.nodes, fullpath.AsInode()) + +} diff --git a/weed/filesys/wfs_deletion.go b/weed/filesys/wfs_deletion.go index 6e586b7df..cce0c792c 100644 --- a/weed/filesys/wfs_deletion.go +++ b/weed/filesys/wfs_deletion.go @@ -20,7 +20,7 @@ func (wfs *WFS) deleteFileChunks(ctx context.Context, chunks []*filer_pb.FileChu fileIds = append(fileIds, chunk.GetFileIdString()) } - wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { deleteFileIds(ctx, wfs.option.GrpcDialOption, client, fileIds) return nil }) @@ -50,7 +50,10 @@ func deleteFileIds(ctx context.Context, grpcDialOption grpc.DialOption, client f VolumeId: vid, Locations: nil, } - locations := resp.LocationsMap[vid] + locations, found := resp.LocationsMap[vid] + if !found { + continue + } for _, loc := range locations.Locations { lr.Locations = append(lr.Locations, operation.Location{ Url: loc.Url, diff --git a/weed/filesys/xattr.go b/weed/filesys/xattr.go new file mode 100644 index 000000000..9dfb491fd --- /dev/null +++ b/weed/filesys/xattr.go @@ -0,0 +1,144 @@ +package filesys + +import ( + "context" + "strings" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/seaweedfs/fuse" +) + +func getxattr(entry *filer_pb.Entry, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { + + if entry == nil { + return fuse.ErrNoXattr + } + if entry.Extended == nil { + return fuse.ErrNoXattr + } + data, found := entry.Extended[req.Name] + if !found { + return fuse.ErrNoXattr + } + if req.Position < uint32(len(data)) { + size := req.Size + if req.Position+size >= uint32(len(data)) { + size = uint32(len(data)) - req.Position + } + if size == 0 { + resp.Xattr = data[req.Position:] + } else { + resp.Xattr = data[req.Position : req.Position+size] + } + } + + return nil + +} + +func setxattr(entry *filer_pb.Entry, req *fuse.SetxattrRequest) error { + + if entry == nil { + return fuse.EIO + } + + if entry.Extended == nil { + entry.Extended = make(map[string][]byte) + } + data, _ := entry.Extended[req.Name] + + newData := make([]byte, int(req.Position)+len(req.Xattr)) + + copy(newData, data) + + copy(newData[int(req.Position):], req.Xattr) + + entry.Extended[req.Name] = newData + + return nil + +} + +func removexattr(entry *filer_pb.Entry, req *fuse.RemovexattrRequest) error { + + if entry == nil { + return fuse.ErrNoXattr + } + + if entry.Extended == nil { + return fuse.ErrNoXattr + } + + _, found := entry.Extended[req.Name] + + if !found { + return fuse.ErrNoXattr + } + + delete(entry.Extended, req.Name) + + return nil + +} + +func listxattr(entry *filer_pb.Entry, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { + + if entry == nil { + return fuse.EIO + } + + for k := range entry.Extended { + resp.Append(k) + } + + size := req.Size + if req.Position+size >= uint32(len(resp.Xattr)) { + size = uint32(len(resp.Xattr)) - req.Position + } + + if size == 0 { + resp.Xattr = resp.Xattr[req.Position:] + } else { + resp.Xattr = resp.Xattr[req.Position : req.Position+size] + } + + return nil + +} + +func (wfs *WFS) maybeLoadEntry(ctx context.Context, dir, name string) (entry *filer_pb.Entry, err error) { + + fullpath := filer2.NewFullPath(dir, name) + entry = wfs.cacheGet(fullpath) + if entry != nil { + return + } + // glog.V(3).Infof("read entry cache miss %s", fullpath) + + err = wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.LookupDirectoryEntryRequest{ + Name: name, + Directory: dir, + } + + resp, err := client.LookupDirectoryEntry(ctx, request) + if err != nil || resp == nil || resp.Entry == nil { + if err == filer2.ErrNotFound || strings.Contains(err.Error(), filer2.ErrNotFound.Error()) { + glog.V(3).Infof("file attr read not found file %v: %v", request, err) + return fuse.ENOENT + } + glog.V(3).Infof("attr read %v: %v", request, err) + return fuse.EIO + } + + entry = resp.Entry + wfs.cacheSet(fullpath, entry, wfs.option.EntryCacheTtl) + + return nil + }) + + return +} diff --git a/weed/notification/aws_sqs/aws_sqs_pub.go b/weed/notification/aws_sqs/aws_sqs_pub.go index c1af7f27a..d881049dd 100644 --- a/weed/notification/aws_sqs/aws_sqs_pub.go +++ b/weed/notification/aws_sqs/aws_sqs_pub.go @@ -27,24 +27,24 @@ func (k *AwsSqsPub) GetName() string { return "aws_sqs" } -func (k *AwsSqsPub) Initialize(configuration util.Configuration) (err error) { - glog.V(0).Infof("filer.notification.aws_sqs.region: %v", configuration.GetString("region")) - glog.V(0).Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString("sqs_queue_name")) +func (k *AwsSqsPub) Initialize(configuration util.Configuration, prefix string) (err error) { + glog.V(0).Infof("filer.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region")) + glog.V(0).Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name")) return k.initialize( - configuration.GetString("aws_access_key_id"), - configuration.GetString("aws_secret_access_key"), - configuration.GetString("region"), - configuration.GetString("sqs_queue_name"), + configuration.GetString(prefix+"aws_access_key_id"), + configuration.GetString(prefix+"aws_secret_access_key"), + configuration.GetString(prefix+"region"), + configuration.GetString(prefix+"sqs_queue_name"), ) } -func (k *AwsSqsPub) initialize(awsAccessKeyId, aswSecretAccessKey, region, queueName string) (err error) { +func (k *AwsSqsPub) initialize(awsAccessKeyId, awsSecretAccessKey, region, queueName string) (err error) { config := &aws.Config{ Region: aws.String(region), } - if awsAccessKeyId != "" && aswSecretAccessKey != "" { - config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, aswSecretAccessKey, "") + if awsAccessKeyId != "" && awsSecretAccessKey != "" { + config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "") } sess, err := session.NewSession(config) diff --git a/weed/notification/configuration.go b/weed/notification/configuration.go index 7f8765cc3..36211692c 100644 --- a/weed/notification/configuration.go +++ b/weed/notification/configuration.go @@ -11,7 +11,7 @@ type MessageQueue interface { // GetName gets the name to locate the configuration in filer.toml file GetName() string // Initialize initializes the file store - Initialize(configuration util.Configuration) error + Initialize(configuration util.Configuration, prefix string) error SendMessage(key string, message proto.Message) error } @@ -21,7 +21,7 @@ var ( Queue MessageQueue ) -func LoadConfiguration(config *viper.Viper) { +func LoadConfiguration(config *viper.Viper, prefix string) { if config == nil { return @@ -30,9 +30,8 @@ func LoadConfiguration(config *viper.Viper) { validateOneEnabledQueue(config) for _, queue := range MessageQueues { - if config.GetBool(queue.GetName() + ".enabled") { - viperSub := config.Sub(queue.GetName()) - if err := queue.Initialize(viperSub); err != nil { + if config.GetBool(prefix + queue.GetName() + ".enabled") { + if err := queue.Initialize(config, prefix+queue.GetName()+"."); err != nil { glog.Fatalf("Failed to initialize notification for %s: %+v", queue.GetName(), err) } diff --git a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go index ebf44ea6f..706261b3a 100644 --- a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go +++ b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go @@ -18,12 +18,13 @@ import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/notification" - "github.com/chrislusf/seaweedfs/weed/util" "github.com/golang/protobuf/proto" "gocloud.dev/pubsub" _ "gocloud.dev/pubsub/awssnssqs" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/notification" + "github.com/chrislusf/seaweedfs/weed/util" // _ "gocloud.dev/pubsub/azuresb" _ "gocloud.dev/pubsub/gcppubsub" _ "gocloud.dev/pubsub/natspubsub" @@ -43,8 +44,8 @@ func (k *GoCDKPubSub) GetName() string { return "gocdk_pub_sub" } -func (k *GoCDKPubSub) Initialize(config util.Configuration) error { - k.topicURL = config.GetString("topic_url") +func (k *GoCDKPubSub) Initialize(configuration util.Configuration, prefix string) error { + k.topicURL = configuration.GetString(prefix + "topic_url") glog.V(0).Infof("notification.gocdk_pub_sub.topic_url: %v", k.topicURL) topic, err := pubsub.OpenTopic(context.Background(), k.topicURL) if err != nil { diff --git a/weed/notification/google_pub_sub/google_pub_sub.go b/weed/notification/google_pub_sub/google_pub_sub.go index 7b26bfe38..363a86eb6 100644 --- a/weed/notification/google_pub_sub/google_pub_sub.go +++ b/weed/notification/google_pub_sub/google_pub_sub.go @@ -25,13 +25,13 @@ func (k *GooglePubSub) GetName() string { return "google_pub_sub" } -func (k *GooglePubSub) Initialize(configuration util.Configuration) (err error) { - glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString("project_id")) - glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString("topic")) +func (k *GooglePubSub) Initialize(configuration util.Configuration, prefix string) (err error) { + glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id")) + glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetString("google_application_credentials"), - configuration.GetString("project_id"), - configuration.GetString("topic"), + configuration.GetString(prefix+"google_application_credentials"), + configuration.GetString(prefix+"project_id"), + configuration.GetString(prefix+"topic"), ) } diff --git a/weed/notification/kafka/kafka_queue.go b/weed/notification/kafka/kafka_queue.go index 830709a51..8d83b5892 100644 --- a/weed/notification/kafka/kafka_queue.go +++ b/weed/notification/kafka/kafka_queue.go @@ -21,12 +21,12 @@ func (k *KafkaQueue) GetName() string { return "kafka" } -func (k *KafkaQueue) Initialize(configuration util.Configuration) (err error) { - glog.V(0).Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice("hosts")) - glog.V(0).Infof("filer.notification.kafka.topic: %v\n", configuration.GetString("topic")) +func (k *KafkaQueue) Initialize(configuration util.Configuration, prefix string) (err error) { + glog.V(0).Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts")) + glog.V(0).Infof("filer.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetStringSlice("hosts"), - configuration.GetString("topic"), + configuration.GetStringSlice(prefix+"hosts"), + configuration.GetString(prefix+"topic"), ) } @@ -76,7 +76,7 @@ func (k *KafkaQueue) handleError() { for { err := <-k.producer.Errors() if err != nil { - glog.Errorf("producer message error, partition:%d offset:%d key:%v valus:%s error(%v) topic:%s", err.Msg.Partition, err.Msg.Offset, err.Msg.Key, err.Msg.Value, err.Err, k.topic) + glog.Errorf("producer message error, partition:%d offset:%d key:%v value:%s error(%v) topic:%s", err.Msg.Partition, err.Msg.Offset, err.Msg.Key, err.Msg.Value, err.Err, k.topic) } } } diff --git a/weed/notification/log/log_queue.go b/weed/notification/log/log_queue.go index dcc038dfc..1ca4786a1 100644 --- a/weed/notification/log/log_queue.go +++ b/weed/notification/log/log_queue.go @@ -18,7 +18,7 @@ func (k *LogQueue) GetName() string { return "log" } -func (k *LogQueue) Initialize(configuration util.Configuration) (err error) { +func (k *LogQueue) Initialize(configuration util.Configuration, prefix string) (err error) { return nil } diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go index 4c50eaa26..b67d8b708 100644 --- a/weed/operation/assign_file_id.go +++ b/weed/operation/assign_file_id.go @@ -11,13 +11,14 @@ import ( ) type VolumeAssignRequest struct { - Count uint64 - Replication string - Collection string - Ttl string - DataCenter string - Rack string - DataNode string + Count uint64 + Replication string + Collection string + Ttl string + DataCenter string + Rack string + DataNode string + WritableVolumeCount uint32 } type AssignResult struct { @@ -43,16 +44,17 @@ func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *Volum continue } - lastError = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { + lastError = WithMasterServerClient(server, grpcDialOption, func(ctx context.Context, masterClient master_pb.SeaweedClient) error { req := &master_pb.AssignRequest{ - Count: primaryRequest.Count, - Replication: primaryRequest.Replication, - Collection: primaryRequest.Collection, - Ttl: primaryRequest.Ttl, - DataCenter: primaryRequest.DataCenter, - Rack: primaryRequest.Rack, - DataNode: primaryRequest.DataNode, + Count: primaryRequest.Count, + Replication: primaryRequest.Replication, + Collection: primaryRequest.Collection, + Ttl: primaryRequest.Ttl, + DataCenter: primaryRequest.DataCenter, + Rack: primaryRequest.Rack, + DataNode: primaryRequest.DataNode, + WritableVolumeCount: primaryRequest.WritableVolumeCount, } resp, grpcErr := masterClient.Assign(context.Background(), req) if grpcErr != nil { diff --git a/weed/operation/delete_content.go b/weed/operation/delete_content.go index 6d84be76f..95bbde9f9 100644 --- a/weed/operation/delete_content.go +++ b/weed/operation/delete_content.go @@ -4,12 +4,14 @@ import ( "context" "errors" "fmt" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "google.golang.org/grpc" "net/http" "strings" "sync" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" ) type DeleteResult struct { @@ -94,7 +96,7 @@ func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []str if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, grpcDialOption, fidList); deleteErr != nil { err = deleteErr - } else { + } else if deleteResults != nil { resultChan <- deleteResults } @@ -107,7 +109,7 @@ func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []str ret = append(ret, result...) } - glog.V(0).Infof("deleted %d items", len(ret)) + glog.V(1).Infof("deleted %d items", len(ret)) return ret, err } @@ -115,7 +117,7 @@ func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []str // DeleteFilesAtOneVolumeServer deletes a list of files that is on one volume server via gRpc func DeleteFilesAtOneVolumeServer(volumeServer string, grpcDialOption grpc.DialOption, fileIds []string) (ret []*volume_server_pb.DeleteResult, err error) { - err = WithVolumeServerClient(volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err = WithVolumeServerClient(volumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { req := &volume_server_pb.BatchDeleteRequest{ FileIds: fileIds, diff --git a/weed/operation/grpc_client.go b/weed/operation/grpc_client.go index f6b2b69e9..e7ee2d2ba 100644 --- a/weed/operation/grpc_client.go +++ b/weed/operation/grpc_client.go @@ -12,7 +12,7 @@ import ( "strings" ) -func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, fn func(volume_server_pb.VolumeServerClient) error) error { +func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, fn func(context.Context, volume_server_pb.VolumeServerClient) error) error { ctx := context.Background() @@ -21,9 +21,9 @@ func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, return err } - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { client := volume_server_pb.NewVolumeServerClient(grpcConnection) - return fn(client) + return fn(ctx2, client) }, grpcAddress, grpcDialOption) } @@ -38,7 +38,7 @@ func toVolumeServerGrpcAddress(volumeServer string) (grpcAddress string, err err return fmt.Sprintf("%s:%d", volumeServer[0:sepIndex], port+10000), nil } -func WithMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, fn func(masterClient master_pb.SeaweedClient) error) error { +func WithMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, fn func(ctx2 context.Context, masterClient master_pb.SeaweedClient) error) error { ctx := context.Background() @@ -47,9 +47,9 @@ func WithMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, return fmt.Errorf("failed to parse master grpc %v: %v", masterServer, parseErr) } - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { client := master_pb.NewSeaweedClient(grpcConnection) - return fn(client) + return fn(ctx2, client) }, masterGrpcAddress, grpcDialOption) } diff --git a/weed/operation/lookup.go b/weed/operation/lookup.go index d0773e7fd..78769ac5a 100644 --- a/weed/operation/lookup.go +++ b/weed/operation/lookup.go @@ -99,12 +99,12 @@ func LookupVolumeIds(server string, grpcDialOption grpc.DialOption, vids []strin //only query unknown_vids - err := WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { + err := WithMasterServerClient(server, grpcDialOption, func(ctx context.Context, masterClient master_pb.SeaweedClient) error { req := &master_pb.LookupVolumeRequest{ VolumeIds: unknown_vids, } - resp, grpcErr := masterClient.LookupVolume(context.Background(), req) + resp, grpcErr := masterClient.LookupVolume(ctx, req) if grpcErr != nil { return grpcErr } diff --git a/weed/operation/stats.go b/weed/operation/stats.go index b69a33750..3e6327f19 100644 --- a/weed/operation/stats.go +++ b/weed/operation/stats.go @@ -9,9 +9,9 @@ import ( func Statistics(server string, grpcDialOption grpc.DialOption, req *master_pb.StatisticsRequest) (resp *master_pb.StatisticsResponse, err error) { - err = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { + err = WithMasterServerClient(server, grpcDialOption, func(ctx context.Context, masterClient master_pb.SeaweedClient) error { - grpcResponse, grpcErr := masterClient.Statistics(context.Background(), req) + grpcResponse, grpcErr := masterClient.Statistics(ctx, req) if grpcErr != nil { return grpcErr } diff --git a/weed/operation/submit.go b/weed/operation/submit.go index bdf59d966..62f067430 100644 --- a/weed/operation/submit.go +++ b/weed/operation/submit.go @@ -203,7 +203,7 @@ func upload_one_chunk(filename string, reader io.Reader, master, ) (size uint32, e error) { glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...") uploadResult, uploadError := Upload(fileUrl, filename, reader, false, - "application/octet-stream", nil, jwt) + "", nil, jwt) if uploadError != nil { return 0, uploadError } diff --git a/weed/operation/sync_volume.go b/weed/operation/sync_volume.go index 5562f12ab..4b39ad544 100644 --- a/weed/operation/sync_volume.go +++ b/weed/operation/sync_volume.go @@ -8,9 +8,9 @@ import ( func GetVolumeSyncStatus(server string, grpcDialOption grpc.DialOption, vid uint32) (resp *volume_server_pb.VolumeSyncStatusResponse, err error) { - WithVolumeServerClient(server, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + WithVolumeServerClient(server, grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { - resp, err = client.VolumeSyncStatus(context.Background(), &volume_server_pb.VolumeSyncStatusRequest{ + resp, err = client.VolumeSyncStatus(ctx, &volume_server_pb.VolumeSyncStatusRequest{ VolumeId: vid, }) return nil diff --git a/weed/operation/tail_volume.go b/weed/operation/tail_volume.go index b53f18ce1..1e8b0a16e 100644 --- a/weed/operation/tail_volume.go +++ b/weed/operation/tail_volume.go @@ -26,9 +26,9 @@ func TailVolume(master string, grpcDialOption grpc.DialOption, vid needle.Volume } func TailVolumeFromSource(volumeServer string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, idleTimeoutSeconds int, fn func(n *needle.Needle) error) error { - return WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + return WithVolumeServerClient(volumeServer, grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { - stream, err := client.VolumeTailSender(context.Background(), &volume_server_pb.VolumeTailSenderRequest{ + stream, err := client.VolumeTailSender(ctx, &volume_server_pb.VolumeTailSenderRequest{ VolumeId: uint32(vid), SinceNs: sinceNs, IdleTimeoutSeconds: uint32(idleTimeoutSeconds), diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto index 18ccca44f..6357d971f 100644 --- a/weed/pb/filer.proto +++ b/weed/pb/filer.proto @@ -12,7 +12,7 @@ service SeaweedFiler { rpc LookupDirectoryEntry (LookupDirectoryEntryRequest) returns (LookupDirectoryEntryResponse) { } - rpc ListEntries (ListEntriesRequest) returns (ListEntriesResponse) { + rpc ListEntries (ListEntriesRequest) returns (stream ListEntriesResponse) { } rpc CreateEntry (CreateEntryRequest) returns (CreateEntryResponse) { @@ -64,7 +64,7 @@ message ListEntriesRequest { } message ListEntriesResponse { - repeated Entry entries = 1; + Entry entry = 1; } message Entry { @@ -123,9 +123,11 @@ message FuseAttributes { message CreateEntryRequest { string directory = 1; Entry entry = 2; + bool o_excl = 3; } message CreateEntryResponse { + string error = 1; } message UpdateEntryRequest { diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index 89541d6f3..01b3e8d90 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -151,7 +151,7 @@ func (m *ListEntriesRequest) GetLimit() uint32 { } type ListEntriesResponse struct { - Entries []*Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` + Entry *Entry `protobuf:"bytes,1,opt,name=entry" json:"entry,omitempty"` } func (m *ListEntriesResponse) Reset() { *m = ListEntriesResponse{} } @@ -159,9 +159,9 @@ func (m *ListEntriesResponse) String() string { return proto.CompactT func (*ListEntriesResponse) ProtoMessage() {} func (*ListEntriesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } -func (m *ListEntriesResponse) GetEntries() []*Entry { +func (m *ListEntriesResponse) GetEntry() *Entry { if m != nil { - return m.Entries + return m.Entry } return nil } @@ -497,6 +497,7 @@ func (m *FuseAttributes) GetSymlinkTarget() string { type CreateEntryRequest struct { Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"` + OExcl bool `protobuf:"varint,3,opt,name=o_excl,json=oExcl" json:"o_excl,omitempty"` } func (m *CreateEntryRequest) Reset() { *m = CreateEntryRequest{} } @@ -518,7 +519,15 @@ func (m *CreateEntryRequest) GetEntry() *Entry { return nil } +func (m *CreateEntryRequest) GetOExcl() bool { + if m != nil { + return m.OExcl + } + return false +} + type CreateEntryResponse struct { + Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` } func (m *CreateEntryResponse) Reset() { *m = CreateEntryResponse{} } @@ -526,6 +535,13 @@ func (m *CreateEntryResponse) String() string { return proto.CompactT func (*CreateEntryResponse) ProtoMessage() {} func (*CreateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (m *CreateEntryResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + type UpdateEntryRequest struct { Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"` @@ -1036,7 +1052,7 @@ const _ = grpc.SupportPackageIsVersion4 type SeaweedFilerClient interface { LookupDirectoryEntry(ctx context.Context, in *LookupDirectoryEntryRequest, opts ...grpc.CallOption) (*LookupDirectoryEntryResponse, error) - ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (*ListEntriesResponse, error) + ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (SeaweedFiler_ListEntriesClient, error) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) @@ -1065,13 +1081,36 @@ func (c *seaweedFilerClient) LookupDirectoryEntry(ctx context.Context, in *Looku return out, nil } -func (c *seaweedFilerClient) ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (*ListEntriesResponse, error) { - out := new(ListEntriesResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/ListEntries", in, out, c.cc, opts...) +func (c *seaweedFilerClient) ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (SeaweedFiler_ListEntriesClient, error) { + stream, err := grpc.NewClientStream(ctx, &_SeaweedFiler_serviceDesc.Streams[0], c.cc, "/filer_pb.SeaweedFiler/ListEntries", opts...) if err != nil { return nil, err } - return out, nil + x := &seaweedFilerListEntriesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type SeaweedFiler_ListEntriesClient interface { + Recv() (*ListEntriesResponse, error) + grpc.ClientStream +} + +type seaweedFilerListEntriesClient struct { + grpc.ClientStream +} + +func (x *seaweedFilerListEntriesClient) Recv() (*ListEntriesResponse, error) { + m := new(ListEntriesResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil } func (c *seaweedFilerClient) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) { @@ -1159,7 +1198,7 @@ func (c *seaweedFilerClient) GetFilerConfiguration(ctx context.Context, in *GetF type SeaweedFilerServer interface { LookupDirectoryEntry(context.Context, *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) - ListEntries(context.Context, *ListEntriesRequest) (*ListEntriesResponse, error) + ListEntries(*ListEntriesRequest, SeaweedFiler_ListEntriesServer) error CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error) UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error) DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error) @@ -1193,22 +1232,25 @@ func _SeaweedFiler_LookupDirectoryEntry_Handler(srv interface{}, ctx context.Con return interceptor(ctx, in, info, handler) } -func _SeaweedFiler_ListEntries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListEntriesRequest) - if err := dec(in); err != nil { - return nil, err +func _SeaweedFiler_ListEntries_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListEntriesRequest) + if err := stream.RecvMsg(m); err != nil { + return err } - if interceptor == nil { - return srv.(SeaweedFilerServer).ListEntries(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/filer_pb.SeaweedFiler/ListEntries", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedFilerServer).ListEntries(ctx, req.(*ListEntriesRequest)) - } - return interceptor(ctx, in, info, handler) + return srv.(SeaweedFilerServer).ListEntries(m, &seaweedFilerListEntriesServer{stream}) +} + +type SeaweedFiler_ListEntriesServer interface { + Send(*ListEntriesResponse) error + grpc.ServerStream +} + +type seaweedFilerListEntriesServer struct { + grpc.ServerStream +} + +func (x *seaweedFilerListEntriesServer) Send(m *ListEntriesResponse) error { + return x.ServerStream.SendMsg(m) } func _SeaweedFiler_CreateEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { @@ -1381,10 +1423,6 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ MethodName: "LookupDirectoryEntry", Handler: _SeaweedFiler_LookupDirectoryEntry_Handler, }, - { - MethodName: "ListEntries", - Handler: _SeaweedFiler_ListEntries_Handler, - }, { MethodName: "CreateEntry", Handler: _SeaweedFiler_CreateEntry_Handler, @@ -1422,113 +1460,121 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ Handler: _SeaweedFiler_GetFilerConfiguration_Handler, }, }, - Streams: []grpc.StreamDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ListEntries", + Handler: _SeaweedFiler_ListEntries_Handler, + ServerStreams: true, + }, + }, Metadata: "filer.proto", } func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1608 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0x49, 0x6f, 0xdc, 0x46, - 0x16, 0x36, 0x7b, 0xe7, 0xeb, 0x6e, 0x5b, 0x2a, 0xc9, 0x36, 0xdd, 0x5a, 0x46, 0xa6, 0xc6, 0x1e, - 0x19, 0x63, 0x68, 0x0c, 0x8f, 0x0f, 0xf6, 0x18, 0x03, 0xc4, 0xd6, 0x12, 0x08, 0x91, 0x17, 0x50, - 0x76, 0x90, 0x20, 0x40, 0x08, 0x8a, 0xac, 0x6e, 0x55, 0x44, 0xb2, 0x3a, 0xc5, 0xa2, 0x24, 0xe7, - 0x27, 0xe4, 0x98, 0x63, 0x80, 0x9c, 0xf3, 0x27, 0x82, 0x5c, 0x02, 0xff, 0x9d, 0x1c, 0x73, 0x0e, - 0xaa, 0x8a, 0x64, 0x17, 0x9b, 0x2d, 0xc9, 0x41, 0xe0, 0x1b, 0xeb, 0x2d, 0xdf, 0x5b, 0xea, 0x2d, - 0xd5, 0x0d, 0xdd, 0x21, 0x09, 0x31, 0xdb, 0x1c, 0x33, 0xca, 0x29, 0xea, 0xc8, 0x83, 0x3b, 0x3e, - 0xb4, 0x5f, 0xc1, 0xd2, 0x3e, 0xa5, 0xc7, 0xe9, 0x78, 0x9b, 0x30, 0xec, 0x73, 0xca, 0xde, 0xed, - 0xc4, 0x9c, 0xbd, 0x73, 0xf0, 0xb7, 0x29, 0x4e, 0x38, 0x5a, 0x06, 0x33, 0xc8, 0x19, 0x96, 0xb1, - 0x66, 0x6c, 0x98, 0xce, 0x84, 0x80, 0x10, 0x34, 0x62, 0x2f, 0xc2, 0x56, 0x4d, 0x32, 0xe4, 0xb7, - 0xbd, 0x03, 0xcb, 0xb3, 0x01, 0x93, 0x31, 0x8d, 0x13, 0x8c, 0xee, 0x40, 0x13, 0x0b, 0x82, 0x44, - 0xeb, 0x3e, 0xbc, 0xb6, 0x99, 0xbb, 0xb2, 0xa9, 0xe4, 0x14, 0xd7, 0xfe, 0xd5, 0x00, 0xb4, 0x4f, - 0x12, 0x2e, 0x88, 0x04, 0x27, 0x1f, 0xe6, 0xcf, 0x0d, 0x68, 0x8d, 0x19, 0x1e, 0x92, 0xb3, 0xcc, - 0xa3, 0xec, 0x84, 0xee, 0xc3, 0x7c, 0xc2, 0x3d, 0xc6, 0x77, 0x19, 0x8d, 0x76, 0x49, 0x88, 0x5f, - 0x0a, 0xa7, 0xeb, 0x52, 0xa4, 0xca, 0x40, 0x9b, 0x80, 0x48, 0xec, 0x87, 0x69, 0x42, 0x4e, 0xf0, - 0x41, 0xce, 0xb5, 0x1a, 0x6b, 0xc6, 0x46, 0xc7, 0x99, 0xc1, 0x41, 0x8b, 0xd0, 0x0c, 0x49, 0x44, - 0xb8, 0xd5, 0x5c, 0x33, 0x36, 0xfa, 0x8e, 0x3a, 0xd8, 0x9f, 0xc0, 0x42, 0xc9, 0xff, 0x2c, 0xfc, - 0x7b, 0xd0, 0xc6, 0x8a, 0x64, 0x19, 0x6b, 0xf5, 0x59, 0x09, 0xc8, 0xf9, 0xf6, 0x4f, 0x35, 0x68, - 0x4a, 0x52, 0x91, 0x67, 0x63, 0x92, 0x67, 0x74, 0x1b, 0x7a, 0x24, 0x71, 0x27, 0xc9, 0xa8, 0x49, - 0xff, 0xba, 0x24, 0x29, 0xf2, 0x8e, 0xfe, 0x0d, 0x2d, 0xff, 0x28, 0x8d, 0x8f, 0x13, 0xab, 0x2e, - 0x4d, 0x2d, 0x4c, 0x4c, 0x89, 0x60, 0xb7, 0x04, 0xcf, 0xc9, 0x44, 0xd0, 0x63, 0x00, 0x8f, 0x73, - 0x46, 0x0e, 0x53, 0x8e, 0x13, 0x19, 0x6d, 0xf7, 0xa1, 0xa5, 0x29, 0xa4, 0x09, 0x7e, 0x56, 0xf0, - 0x1d, 0x4d, 0x16, 0x3d, 0x81, 0x0e, 0x3e, 0xe3, 0x38, 0x0e, 0x70, 0x60, 0x35, 0xa5, 0xa1, 0x95, - 0xa9, 0x98, 0x36, 0x77, 0x32, 0xbe, 0x8a, 0xb0, 0x10, 0x1f, 0x3c, 0x85, 0x7e, 0x89, 0x85, 0xe6, - 0xa0, 0x7e, 0x8c, 0xf3, 0x9b, 0x15, 0x9f, 0x22, 0xbb, 0x27, 0x5e, 0x98, 0xaa, 0x22, 0xeb, 0x39, - 0xea, 0xf0, 0xbf, 0xda, 0x63, 0xc3, 0xde, 0x06, 0x73, 0x37, 0x0d, 0xc3, 0x42, 0x31, 0x20, 0x2c, - 0x57, 0x0c, 0x08, 0x9b, 0x14, 0x5a, 0xed, 0xc2, 0x42, 0xfb, 0xc5, 0x80, 0xf9, 0x9d, 0x13, 0x1c, - 0xf3, 0x97, 0x94, 0x93, 0x21, 0xf1, 0x3d, 0x4e, 0x68, 0x8c, 0xee, 0x83, 0x49, 0xc3, 0xc0, 0xbd, - 0xb0, 0x52, 0x3b, 0x34, 0xcc, 0xbc, 0xbe, 0x0f, 0x66, 0x8c, 0x4f, 0xdd, 0x0b, 0xcd, 0x75, 0x62, - 0x7c, 0xaa, 0xa4, 0xd7, 0xa1, 0x1f, 0xe0, 0x10, 0x73, 0xec, 0x16, 0xb7, 0x23, 0xae, 0xae, 0xa7, - 0x88, 0x5b, 0xea, 0x3a, 0xee, 0xc2, 0x35, 0x01, 0x39, 0xf6, 0x18, 0x8e, 0xb9, 0x3b, 0xf6, 0xf8, - 0x91, 0xbc, 0x13, 0xd3, 0xe9, 0xc7, 0xf8, 0xf4, 0xb5, 0xa4, 0xbe, 0xf6, 0xf8, 0x91, 0xfd, 0x87, - 0x01, 0x66, 0x71, 0x99, 0xe8, 0x26, 0xb4, 0x85, 0x59, 0x97, 0x04, 0x59, 0x26, 0x5a, 0xe2, 0xb8, - 0x17, 0x88, 0xce, 0xa0, 0xc3, 0x61, 0x82, 0xb9, 0x74, 0xaf, 0xee, 0x64, 0x27, 0x51, 0x59, 0x09, - 0xf9, 0x4e, 0x35, 0x43, 0xc3, 0x91, 0xdf, 0x22, 0xe3, 0x11, 0x27, 0x11, 0x96, 0x06, 0xeb, 0x8e, - 0x3a, 0xa0, 0x05, 0x68, 0x62, 0x97, 0x7b, 0x23, 0x59, 0xe5, 0xa6, 0xd3, 0xc0, 0x6f, 0xbc, 0x11, - 0xfa, 0x27, 0x5c, 0x4d, 0x68, 0xca, 0x7c, 0xec, 0xe6, 0x66, 0x5b, 0x92, 0xdb, 0x53, 0xd4, 0x5d, - 0x65, 0xdc, 0x86, 0xfa, 0x90, 0x04, 0x56, 0x5b, 0x26, 0x66, 0xae, 0x5c, 0x84, 0x7b, 0x81, 0x23, - 0x98, 0xe8, 0x3f, 0x00, 0x05, 0x52, 0x60, 0x75, 0xce, 0x11, 0x35, 0x73, 0xdc, 0xc0, 0xfe, 0x02, - 0x5a, 0x19, 0xfc, 0x12, 0x98, 0x27, 0x34, 0x4c, 0xa3, 0x22, 0xec, 0xbe, 0xd3, 0x51, 0x84, 0xbd, - 0x00, 0xdd, 0x02, 0x39, 0xeb, 0x5c, 0x51, 0x55, 0x35, 0x19, 0xa4, 0xcc, 0xd0, 0x67, 0x58, 0x4e, - 0x0b, 0x9f, 0xd2, 0x63, 0xa2, 0xa2, 0x6f, 0x3b, 0xd9, 0xc9, 0xfe, 0xbd, 0x06, 0x57, 0xcb, 0xe5, - 0x2e, 0x4c, 0x48, 0x14, 0x99, 0x2b, 0x43, 0xc2, 0x48, 0xd8, 0x83, 0x52, 0xbe, 0x6a, 0x7a, 0xbe, - 0x72, 0x95, 0x88, 0x06, 0xca, 0x40, 0x5f, 0xa9, 0xbc, 0xa0, 0x01, 0x16, 0xd5, 0x9a, 0x92, 0x40, - 0x26, 0xb8, 0xef, 0x88, 0x4f, 0x41, 0x19, 0x91, 0x20, 0x1b, 0x21, 0xe2, 0x53, 0xba, 0xc7, 0x24, - 0x6e, 0x4b, 0x5d, 0x99, 0x3a, 0x89, 0x2b, 0x8b, 0x04, 0xb5, 0xad, 0xee, 0x41, 0x7c, 0xa3, 0x35, - 0xe8, 0x32, 0x3c, 0x0e, 0xb3, 0xea, 0x95, 0xe9, 0x33, 0x1d, 0x9d, 0x84, 0x56, 0x01, 0x7c, 0x1a, - 0x86, 0xd8, 0x97, 0x02, 0xa6, 0x14, 0xd0, 0x28, 0xa2, 0x72, 0x38, 0x0f, 0xdd, 0x04, 0xfb, 0x16, - 0xac, 0x19, 0x1b, 0x4d, 0xa7, 0xc5, 0x79, 0x78, 0x80, 0x7d, 0x11, 0x47, 0x9a, 0x60, 0xe6, 0xca, - 0x01, 0xd4, 0x95, 0x7a, 0x1d, 0x41, 0x90, 0xa3, 0x72, 0x05, 0x60, 0xc4, 0x68, 0x3a, 0x56, 0xdc, - 0xde, 0x5a, 0x5d, 0xcc, 0x63, 0x49, 0x91, 0xec, 0x3b, 0x70, 0x35, 0x79, 0x17, 0x85, 0x24, 0x3e, - 0x76, 0xb9, 0xc7, 0x46, 0x98, 0x5b, 0x7d, 0x55, 0xc3, 0x19, 0xf5, 0x8d, 0x24, 0xda, 0x5f, 0x02, - 0xda, 0x62, 0xd8, 0xe3, 0xf8, 0x2f, 0xac, 0x9e, 0x0f, 0xec, 0xee, 0xeb, 0xb0, 0x50, 0x82, 0x56, - 0x53, 0x58, 0x58, 0x7c, 0x3b, 0x0e, 0x3e, 0x96, 0xc5, 0x12, 0x74, 0x66, 0xf1, 0xbd, 0x01, 0x68, - 0x5b, 0x36, 0xf8, 0xdf, 0xdb, 0xaf, 0xa2, 0xe5, 0xc4, 0xdc, 0x57, 0x03, 0x24, 0xf0, 0xb8, 0x97, - 0x6d, 0xa6, 0x1e, 0x49, 0x14, 0xfe, 0xb6, 0xc7, 0xbd, 0x6c, 0x3b, 0x30, 0xec, 0xa7, 0x4c, 0x2c, - 0x2b, 0x59, 0x57, 0x72, 0x3b, 0x38, 0x39, 0x09, 0x3d, 0x82, 0x1b, 0x64, 0x14, 0x53, 0x86, 0x27, - 0x62, 0x2e, 0x66, 0x8c, 0x32, 0x59, 0x6f, 0x1d, 0x67, 0x51, 0x71, 0x0b, 0x85, 0x1d, 0xc1, 0x13, - 0xe1, 0x95, 0xc2, 0xc8, 0xc2, 0xfb, 0xd1, 0x00, 0xeb, 0x19, 0xa7, 0x11, 0xf1, 0x1d, 0x2c, 0xdc, - 0x2c, 0x05, 0xb9, 0x0e, 0x7d, 0x31, 0x4c, 0xa7, 0x03, 0xed, 0xd1, 0x30, 0x98, 0x2c, 0xab, 0x5b, - 0x20, 0xe6, 0xa9, 0xab, 0xc5, 0xdb, 0xa6, 0x61, 0x20, 0xcb, 0x68, 0x1d, 0xc4, 0xd0, 0xd3, 0xf4, - 0xd5, 0xea, 0xee, 0xc5, 0xf8, 0xb4, 0xa4, 0x2f, 0x84, 0xa4, 0xbe, 0x9a, 0x94, 0xed, 0x18, 0x9f, - 0x0a, 0x7d, 0x7b, 0x09, 0x6e, 0xcd, 0xf0, 0x2d, 0xf3, 0xfc, 0x67, 0x03, 0x16, 0x9e, 0x25, 0x09, - 0x19, 0xc5, 0x9f, 0xcb, 0x99, 0x91, 0x3b, 0xbd, 0x08, 0x4d, 0x9f, 0xa6, 0x31, 0x97, 0xce, 0x36, - 0x1d, 0x75, 0x98, 0x6a, 0xa3, 0x5a, 0xa5, 0x8d, 0xa6, 0x1a, 0xb1, 0x5e, 0x6d, 0x44, 0xad, 0xd1, - 0x1a, 0xa5, 0x46, 0xfb, 0x07, 0x74, 0xc5, 0x75, 0xba, 0x3e, 0x8e, 0x39, 0x66, 0xd9, 0x98, 0x05, - 0x41, 0xda, 0x92, 0x14, 0xfb, 0x7b, 0x03, 0x16, 0xcb, 0x9e, 0x66, 0x6f, 0x8a, 0x73, 0xa7, 0xbe, - 0x18, 0x33, 0x2c, 0xcc, 0xdc, 0x14, 0x9f, 0xa2, 0x61, 0xc7, 0xe9, 0x61, 0x48, 0x7c, 0x57, 0x30, - 0x94, 0x7b, 0xa6, 0xa2, 0xbc, 0x65, 0xe1, 0x24, 0xe8, 0x86, 0x1e, 0x34, 0x82, 0x86, 0x97, 0xf2, - 0xa3, 0x7c, 0xf2, 0x8b, 0x6f, 0xfb, 0x11, 0x2c, 0xa8, 0x67, 0x5e, 0x39, 0x6b, 0x2b, 0x00, 0xc5, - 0x2c, 0x56, 0x2f, 0x1c, 0xd3, 0x31, 0xf3, 0x61, 0x9c, 0xd8, 0xff, 0x07, 0x73, 0x9f, 0xaa, 0x44, - 0x24, 0xe8, 0x01, 0x98, 0x61, 0x7e, 0xc8, 0x1e, 0x43, 0x68, 0xd2, 0x54, 0xb9, 0x9c, 0x33, 0x11, - 0xb2, 0x9f, 0x42, 0x27, 0x27, 0xe7, 0xb1, 0x19, 0xe7, 0xc5, 0x56, 0x9b, 0x8a, 0xcd, 0xfe, 0xcd, - 0x80, 0xc5, 0xb2, 0xcb, 0x59, 0xfa, 0xde, 0x42, 0xbf, 0x30, 0xe1, 0x46, 0xde, 0x38, 0xf3, 0xe5, - 0x81, 0xee, 0x4b, 0x55, 0xad, 0x70, 0x30, 0x79, 0xe1, 0x8d, 0x55, 0x49, 0xf5, 0x42, 0x8d, 0x34, - 0x78, 0x03, 0xf3, 0x15, 0x91, 0x19, 0xef, 0x9b, 0x7b, 0xfa, 0xfb, 0xa6, 0xf4, 0x46, 0x2b, 0xb4, - 0xf5, 0x47, 0xcf, 0x13, 0xb8, 0xa9, 0xfa, 0x6f, 0xab, 0x28, 0xba, 0x3c, 0xf7, 0xe5, 0xda, 0x34, - 0xa6, 0x6b, 0xd3, 0x1e, 0x80, 0x55, 0x55, 0xcd, 0xba, 0x60, 0x04, 0xf3, 0x07, 0xdc, 0xe3, 0x24, - 0xe1, 0xc4, 0x2f, 0x1e, 0xdb, 0x53, 0xc5, 0x6c, 0x5c, 0xb6, 0x55, 0xaa, 0xed, 0x30, 0x07, 0x75, - 0xce, 0xf3, 0x3a, 0x13, 0x9f, 0xe2, 0x16, 0x90, 0x6e, 0x29, 0xbb, 0x83, 0x8f, 0x60, 0x4a, 0xd4, - 0x03, 0xa7, 0xdc, 0x0b, 0xd5, 0xd6, 0x6e, 0xc8, 0xad, 0x6d, 0x4a, 0x8a, 0x5c, 0xdb, 0x6a, 0xb1, - 0x05, 0x8a, 0xdb, 0x54, 0x3b, 0x5d, 0x10, 0x24, 0x73, 0x05, 0x40, 0xb6, 0x94, 0xea, 0x86, 0x96, - 0xd2, 0x15, 0x94, 0x2d, 0x41, 0xb0, 0x57, 0x61, 0xf9, 0x53, 0xcc, 0xc5, 0xfb, 0x83, 0x6d, 0xd1, - 0x78, 0x48, 0x46, 0x29, 0xf3, 0xb4, 0xab, 0xb0, 0x7f, 0x30, 0x60, 0xe5, 0x1c, 0x81, 0x2c, 0x60, - 0x0b, 0xda, 0x91, 0x97, 0x70, 0xcc, 0xf2, 0x2e, 0xc9, 0x8f, 0xd3, 0xa9, 0xa8, 0x5d, 0x96, 0x8a, - 0x7a, 0x25, 0x15, 0xd7, 0xa1, 0x15, 0x79, 0x67, 0x6e, 0x74, 0x98, 0x3d, 0x30, 0x9a, 0x91, 0x77, - 0xf6, 0xe2, 0xf0, 0xe1, 0xfb, 0x36, 0xf4, 0x0e, 0xb0, 0x77, 0x8a, 0x71, 0x20, 0x1d, 0x43, 0xa3, - 0xbc, 0x21, 0xca, 0x3f, 0xd5, 0xd0, 0x9d, 0xe9, 0xca, 0x9f, 0xf9, 0xdb, 0x70, 0x70, 0xf7, 0x32, - 0xb1, 0xac, 0xb6, 0xae, 0xa0, 0x7d, 0xe8, 0x6a, 0xbf, 0x85, 0xd0, 0xb2, 0xa6, 0x58, 0xf9, 0x89, - 0x37, 0x58, 0x39, 0x87, 0xab, 0xa3, 0x69, 0x3b, 0x5d, 0x47, 0xab, 0xbe, 0x22, 0x74, 0xb4, 0x59, - 0x0f, 0x01, 0x89, 0xa6, 0xed, 0x6b, 0x1d, 0xad, 0xfa, 0x42, 0xd0, 0xd1, 0x66, 0x2d, 0x79, 0x89, - 0xa6, 0xad, 0x47, 0x1d, 0xad, 0xba, 0xfc, 0x75, 0xb4, 0x59, 0x3b, 0xf5, 0x0a, 0xfa, 0x1a, 0xe6, - 0x2b, 0x8b, 0x0b, 0xd9, 0x13, 0xad, 0xf3, 0x36, 0xee, 0x60, 0xfd, 0x42, 0x99, 0x02, 0xff, 0x15, - 0xf4, 0xf4, 0x85, 0x82, 0x34, 0x87, 0x66, 0xac, 0xc4, 0xc1, 0xea, 0x79, 0x6c, 0x1d, 0x50, 0x9f, - 0x95, 0x3a, 0xe0, 0x8c, 0x6d, 0xa1, 0x03, 0xce, 0x1a, 0xb1, 0xf6, 0x15, 0xf4, 0x15, 0xcc, 0x4d, - 0xcf, 0x2c, 0x74, 0x7b, 0x3a, 0x6d, 0x95, 0x51, 0x38, 0xb0, 0x2f, 0x12, 0x29, 0xc0, 0xf7, 0x00, - 0x26, 0xa3, 0x08, 0x2d, 0x4d, 0x74, 0x2a, 0xa3, 0x70, 0xb0, 0x3c, 0x9b, 0x59, 0x40, 0x7d, 0x03, - 0xd7, 0x67, 0xf6, 0x3b, 0xd2, 0x9a, 0xe4, 0xa2, 0x89, 0x31, 0xf8, 0xd7, 0xa5, 0x72, 0xb9, 0xad, - 0xe7, 0xab, 0x30, 0x97, 0xa8, 0x36, 0x1e, 0x26, 0x9b, 0x7e, 0x48, 0x70, 0xcc, 0x9f, 0x83, 0xd4, - 0x78, 0xcd, 0x28, 0xa7, 0x87, 0x2d, 0xf9, 0x1f, 0xcf, 0x7f, 0xff, 0x0c, 0x00, 0x00, 0xff, 0xff, - 0x0e, 0xa9, 0xb5, 0x68, 0xf2, 0x11, 0x00, 0x00, + // 1633 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x18, 0x4b, 0x6f, 0xdc, 0xc6, + 0x59, 0xdc, 0x37, 0xbf, 0xdd, 0xb5, 0xa5, 0x59, 0xc9, 0x5e, 0xaf, 0x1e, 0x95, 0xa9, 0xda, 0x55, + 0x61, 0x43, 0x35, 0x54, 0x1f, 0xec, 0xba, 0x3d, 0xd8, 0x7a, 0x14, 0x42, 0xe5, 0x07, 0x28, 0xbb, + 0x68, 0x11, 0x20, 0x04, 0x45, 0xce, 0xae, 0x26, 0x22, 0x39, 0x9b, 0xe1, 0x50, 0x92, 0xf3, 0x13, + 0x72, 0xcc, 0x31, 0x40, 0xce, 0xf9, 0x13, 0x41, 0x2e, 0x41, 0x90, 0x7f, 0x93, 0x63, 0xce, 0xc1, + 0xcc, 0x90, 0xdc, 0xe1, 0x72, 0x25, 0xd9, 0x08, 0x7c, 0x9b, 0xf9, 0xde, 0xef, 0x6f, 0x48, 0x68, + 0x0f, 0x49, 0x80, 0xd9, 0xd6, 0x98, 0x51, 0x4e, 0x51, 0x4b, 0x5e, 0x9c, 0xf1, 0xb1, 0xf5, 0x1a, + 0x96, 0x0f, 0x29, 0x3d, 0x4d, 0xc6, 0xbb, 0x84, 0x61, 0x8f, 0x53, 0xf6, 0x7e, 0x2f, 0xe2, 0xec, + 0xbd, 0x8d, 0xbf, 0x4c, 0x70, 0xcc, 0xd1, 0x0a, 0x98, 0x7e, 0x86, 0xe8, 0x1b, 0xeb, 0xc6, 0xa6, + 0x69, 0x4f, 0x00, 0x08, 0x41, 0x2d, 0x72, 0x43, 0xdc, 0xaf, 0x48, 0x84, 0x3c, 0x5b, 0x7b, 0xb0, + 0x32, 0x5b, 0x60, 0x3c, 0xa6, 0x51, 0x8c, 0xd1, 0x3d, 0xa8, 0x63, 0x01, 0x90, 0xd2, 0xda, 0xdb, + 0x37, 0xb7, 0x32, 0x53, 0xb6, 0x14, 0x9d, 0xc2, 0x5a, 0x3f, 0x1a, 0x80, 0x0e, 0x49, 0xcc, 0x05, + 0x90, 0xe0, 0xf8, 0xc3, 0xec, 0xb9, 0x05, 0x8d, 0x31, 0xc3, 0x43, 0x72, 0x91, 0x5a, 0x94, 0xde, + 0xd0, 0x43, 0x58, 0x88, 0xb9, 0xcb, 0xf8, 0x3e, 0xa3, 0xe1, 0x3e, 0x09, 0xf0, 0x2b, 0x61, 0x74, + 0x55, 0x92, 0x94, 0x11, 0x68, 0x0b, 0x10, 0x89, 0xbc, 0x20, 0x89, 0xc9, 0x19, 0x3e, 0xca, 0xb0, + 0xfd, 0xda, 0xba, 0xb1, 0xd9, 0xb2, 0x67, 0x60, 0xd0, 0x22, 0xd4, 0x03, 0x12, 0x12, 0xde, 0xaf, + 0xaf, 0x1b, 0x9b, 0x5d, 0x5b, 0x5d, 0xac, 0x7f, 0x42, 0xaf, 0x60, 0xff, 0xc7, 0xb9, 0xff, 0x5d, + 0x05, 0xea, 0x12, 0x90, 0xc7, 0xd8, 0x98, 0xc4, 0x18, 0xdd, 0x85, 0x0e, 0x89, 0x9d, 0x49, 0x20, + 0x2a, 0xd2, 0xb6, 0x36, 0x89, 0xf3, 0x98, 0xa3, 0x07, 0xd0, 0xf0, 0x4e, 0x92, 0xe8, 0x34, 0xee, + 0x57, 0xd7, 0xab, 0x9b, 0xed, 0xed, 0xde, 0x44, 0x91, 0x70, 0x74, 0x47, 0xe0, 0xec, 0x94, 0x04, + 0x3d, 0x01, 0x70, 0x39, 0x67, 0xe4, 0x38, 0xe1, 0x38, 0x96, 0x9e, 0xb6, 0xb7, 0xfb, 0x1a, 0x43, + 0x12, 0xe3, 0xe7, 0x39, 0xde, 0xd6, 0x68, 0xd1, 0x53, 0x68, 0xe1, 0x0b, 0x8e, 0x23, 0x1f, 0xfb, + 0xfd, 0xba, 0x54, 0xb4, 0x3a, 0xe5, 0xd1, 0xd6, 0x5e, 0x8a, 0x57, 0xfe, 0xe5, 0xe4, 0x83, 0x67, + 0xd0, 0x2d, 0xa0, 0xd0, 0x3c, 0x54, 0x4f, 0x71, 0x96, 0x55, 0x71, 0x14, 0x91, 0x3d, 0x73, 0x83, + 0x44, 0x15, 0x58, 0xc7, 0x56, 0x97, 0x7f, 0x54, 0x9e, 0x18, 0xd6, 0x2e, 0x98, 0xfb, 0x49, 0x10, + 0xe4, 0x8c, 0x3e, 0x61, 0x19, 0xa3, 0x4f, 0xd8, 0x24, 0xca, 0x95, 0x2b, 0xa3, 0xfc, 0x83, 0x01, + 0x0b, 0x7b, 0x67, 0x38, 0xe2, 0xaf, 0x28, 0x27, 0x43, 0xe2, 0xb9, 0x9c, 0xd0, 0x08, 0x3d, 0x04, + 0x93, 0x06, 0xbe, 0x73, 0x65, 0x9a, 0x5a, 0x34, 0x48, 0xad, 0x7e, 0x08, 0x66, 0x84, 0xcf, 0x9d, + 0x2b, 0xd5, 0xb5, 0x22, 0x7c, 0xae, 0xa8, 0x37, 0xa0, 0xeb, 0xe3, 0x00, 0x73, 0xec, 0xe4, 0xd9, + 0x11, 0xa9, 0xeb, 0x28, 0xe0, 0x8e, 0x4a, 0xc7, 0x7d, 0xb8, 0x29, 0x44, 0x8e, 0x5d, 0x86, 0x23, + 0xee, 0x8c, 0x5d, 0x7e, 0x22, 0x73, 0x62, 0xda, 0xdd, 0x08, 0x9f, 0xbf, 0x91, 0xd0, 0x37, 0x2e, + 0x3f, 0xb1, 0x7e, 0x33, 0xc0, 0xcc, 0x93, 0x89, 0x6e, 0x43, 0x53, 0xa8, 0x75, 0x88, 0x9f, 0x46, + 0xa2, 0x21, 0xae, 0x07, 0xbe, 0xe8, 0x0a, 0x3a, 0x1c, 0xc6, 0x98, 0x4b, 0xf3, 0xaa, 0x76, 0x7a, + 0x13, 0x95, 0x15, 0x93, 0xaf, 0x54, 0x23, 0xd4, 0x6c, 0x79, 0x16, 0x11, 0x0f, 0x39, 0x09, 0xb1, + 0x54, 0x58, 0xb5, 0xd5, 0x05, 0xf5, 0xa0, 0x8e, 0x1d, 0xee, 0x8e, 0x64, 0x85, 0x9b, 0x76, 0x0d, + 0xbf, 0x75, 0x47, 0xe8, 0xcf, 0x70, 0x23, 0xa6, 0x09, 0xf3, 0xb0, 0x93, 0xa9, 0x6d, 0x48, 0x6c, + 0x47, 0x41, 0xf7, 0x95, 0x72, 0x0b, 0xaa, 0x43, 0xe2, 0xf7, 0x9b, 0x32, 0x30, 0xf3, 0xc5, 0x22, + 0x3c, 0xf0, 0x6d, 0x81, 0x44, 0x7f, 0x03, 0xc8, 0x25, 0xf9, 0xfd, 0xd6, 0x25, 0xa4, 0x66, 0x26, + 0xd7, 0xb7, 0xfe, 0x07, 0x8d, 0x54, 0xfc, 0x32, 0x98, 0x67, 0x34, 0x48, 0xc2, 0xdc, 0xed, 0xae, + 0xdd, 0x52, 0x80, 0x03, 0x1f, 0xdd, 0x01, 0x39, 0xe7, 0x1c, 0x51, 0x55, 0x15, 0xe9, 0xa4, 0x8c, + 0xd0, 0x7f, 0xb0, 0x9c, 0x14, 0x1e, 0xa5, 0xa7, 0x44, 0x79, 0xdf, 0xb4, 0xd3, 0x9b, 0xf5, 0x6b, + 0x05, 0x6e, 0x14, 0xcb, 0x5d, 0xa8, 0x90, 0x52, 0x64, 0xac, 0x0c, 0x29, 0x46, 0x8a, 0x3d, 0x2a, + 0xc4, 0xab, 0xa2, 0xc7, 0x2b, 0x63, 0x09, 0xa9, 0xaf, 0x14, 0x74, 0x15, 0xcb, 0x4b, 0xea, 0x63, + 0x51, 0xad, 0x09, 0xf1, 0x65, 0x80, 0xbb, 0xb6, 0x38, 0x0a, 0xc8, 0x88, 0xf8, 0xe9, 0xf8, 0x10, + 0x47, 0x69, 0x1e, 0x93, 0x72, 0x1b, 0x2a, 0x65, 0xea, 0x26, 0x52, 0x16, 0x0a, 0x68, 0x53, 0xe5, + 0x41, 0x9c, 0xd1, 0x3a, 0xb4, 0x19, 0x1e, 0x07, 0x69, 0xf5, 0xca, 0xf0, 0x99, 0xb6, 0x0e, 0x42, + 0x6b, 0x00, 0x1e, 0x0d, 0x02, 0xec, 0x49, 0x02, 0x53, 0x12, 0x68, 0x10, 0x51, 0x39, 0x9c, 0x07, + 0x4e, 0x8c, 0xbd, 0x3e, 0xac, 0x1b, 0x9b, 0x75, 0xbb, 0xc1, 0x79, 0x70, 0x84, 0x3d, 0xe1, 0x47, + 0x12, 0x63, 0xe6, 0xc8, 0x01, 0xd4, 0x96, 0x7c, 0x2d, 0x01, 0x90, 0x63, 0x72, 0x15, 0x60, 0xc4, + 0x68, 0x32, 0x56, 0xd8, 0xce, 0x7a, 0x55, 0xcc, 0x62, 0x09, 0x91, 0xe8, 0x7b, 0x70, 0x23, 0x7e, + 0x1f, 0x06, 0x24, 0x3a, 0x75, 0xb8, 0xcb, 0x46, 0x98, 0xf7, 0xbb, 0xaa, 0x86, 0x53, 0xe8, 0x5b, + 0x09, 0xb4, 0xc6, 0x80, 0x76, 0x18, 0x76, 0x39, 0xfe, 0x88, 0xb5, 0xf3, 0x61, 0xdd, 0x8d, 0x96, + 0xa0, 0x41, 0x1d, 0x7c, 0xe1, 0x05, 0x69, 0x93, 0xd5, 0xe9, 0xde, 0x85, 0x17, 0x58, 0x0f, 0xa0, + 0x57, 0xd0, 0x98, 0x0e, 0xe6, 0x45, 0xa8, 0x63, 0xc6, 0x68, 0x36, 0x46, 0xd4, 0xc5, 0xfa, 0x3f, + 0xa0, 0x77, 0x63, 0xff, 0x53, 0x98, 0x67, 0x2d, 0x41, 0xaf, 0x20, 0x5a, 0xd9, 0x61, 0xfd, 0x6c, + 0x00, 0xda, 0x95, 0xd3, 0xe0, 0x8f, 0x2d, 0x62, 0xd1, 0x9f, 0x62, 0x49, 0xa8, 0x69, 0xe3, 0xbb, + 0xdc, 0x4d, 0x57, 0x58, 0x87, 0xc4, 0x4a, 0xfe, 0xae, 0xcb, 0xdd, 0x74, 0x95, 0x30, 0xec, 0x25, + 0x4c, 0x6c, 0x35, 0x59, 0x84, 0x72, 0x95, 0xd8, 0x19, 0x08, 0x3d, 0x86, 0x5b, 0x64, 0x14, 0x51, + 0x86, 0x27, 0x64, 0x8e, 0x0a, 0x55, 0x43, 0x12, 0x2f, 0x2a, 0x6c, 0xce, 0xb0, 0x27, 0x23, 0xb7, + 0x04, 0xbd, 0x82, 0x1b, 0xa9, 0x7b, 0xdf, 0x1a, 0xd0, 0x7f, 0xce, 0x69, 0x48, 0x3c, 0x1b, 0x0b, + 0x33, 0x0b, 0x4e, 0x6e, 0x40, 0x57, 0x4c, 0xde, 0x69, 0x47, 0x3b, 0x34, 0xf0, 0x27, 0x9b, 0xed, + 0x0e, 0x88, 0xe1, 0xeb, 0x68, 0xfe, 0x36, 0x69, 0xe0, 0xcb, 0x9a, 0xdb, 0x00, 0x31, 0x21, 0x35, + 0x7e, 0xb5, 0xe3, 0x3b, 0x11, 0x3e, 0x2f, 0xf0, 0x0b, 0x22, 0xc9, 0xaf, 0xc6, 0x6a, 0x33, 0xc2, + 0xe7, 0x82, 0xdf, 0x5a, 0x86, 0x3b, 0x33, 0x6c, 0x4b, 0x2d, 0xff, 0xde, 0x80, 0xde, 0xf3, 0x38, + 0x26, 0xa3, 0xe8, 0xbf, 0x72, 0xc0, 0x64, 0x46, 0x2f, 0x42, 0xdd, 0xa3, 0x49, 0xc4, 0xa5, 0xb1, + 0x75, 0x5b, 0x5d, 0xa6, 0x7a, 0xae, 0x52, 0xea, 0xb9, 0xa9, 0xae, 0xad, 0x96, 0xbb, 0x56, 0xeb, + 0xca, 0x5a, 0xa1, 0x2b, 0xff, 0x04, 0x6d, 0x91, 0x4e, 0xc7, 0xc3, 0x11, 0xc7, 0x2c, 0x9d, 0xc9, + 0x20, 0x40, 0x3b, 0x12, 0x62, 0x7d, 0x6d, 0xc0, 0x62, 0xd1, 0xd2, 0xb4, 0xc6, 0x2f, 0x5d, 0x11, + 0x62, 0x26, 0xb1, 0x20, 0x35, 0x53, 0x1c, 0x45, 0x77, 0x8f, 0x93, 0xe3, 0x80, 0x78, 0x8e, 0x40, + 0x28, 0xf3, 0x4c, 0x05, 0x79, 0xc7, 0x82, 0x89, 0xd3, 0x35, 0xdd, 0x69, 0x04, 0x35, 0x37, 0xe1, + 0x27, 0xd9, 0x9a, 0x10, 0x67, 0xeb, 0x31, 0xf4, 0xd4, 0x7b, 0xb0, 0x18, 0xb5, 0x55, 0x80, 0x7c, + 0x70, 0xc7, 0x7d, 0x43, 0x4d, 0x8f, 0x6c, 0x72, 0xc7, 0xd6, 0xbf, 0xc0, 0x3c, 0xa4, 0x2a, 0x10, + 0x31, 0x7a, 0x04, 0x66, 0x90, 0x5d, 0x24, 0x69, 0x7b, 0x1b, 0x4d, 0x9a, 0x2a, 0xa3, 0xb3, 0x27, + 0x44, 0xd6, 0x33, 0x68, 0x65, 0xe0, 0xcc, 0x37, 0xe3, 0x32, 0xdf, 0x2a, 0x53, 0xbe, 0x59, 0x3f, + 0x19, 0xb0, 0x58, 0x34, 0x39, 0x0d, 0xdf, 0x3b, 0xe8, 0xe6, 0x2a, 0x9c, 0xd0, 0x1d, 0xa7, 0xb6, + 0x3c, 0xd2, 0x6d, 0x29, 0xb3, 0xe5, 0x06, 0xc6, 0x2f, 0xdd, 0xb1, 0x2a, 0xa9, 0x4e, 0xa0, 0x81, + 0x06, 0x6f, 0x61, 0xa1, 0x44, 0x32, 0xe3, 0x31, 0xf4, 0x57, 0xfd, 0x31, 0x54, 0x78, 0xd0, 0xe5, + 0xdc, 0xfa, 0x0b, 0xe9, 0x29, 0xdc, 0x56, 0xfd, 0xb7, 0x93, 0x17, 0x5d, 0x16, 0xfb, 0x62, 0x6d, + 0x1a, 0xd3, 0xb5, 0x69, 0x0d, 0xa0, 0x5f, 0x66, 0x4d, 0xbb, 0x60, 0x04, 0x0b, 0x47, 0xdc, 0xe5, + 0x24, 0xe6, 0xc4, 0xcb, 0x5f, 0xe5, 0x53, 0xc5, 0x6c, 0x5c, 0xb7, 0x82, 0xca, 0xed, 0x30, 0x0f, + 0x55, 0xce, 0xb3, 0x3a, 0x13, 0x47, 0x91, 0x05, 0xa4, 0x6b, 0x4a, 0x73, 0xf0, 0x09, 0x54, 0x89, + 0x7a, 0xe0, 0x94, 0xbb, 0x81, 0x5a, 0xf1, 0x35, 0xb9, 0xe2, 0x4d, 0x09, 0x91, 0x3b, 0x5e, 0x6d, + 0x41, 0x5f, 0x61, 0xeb, 0xea, 0x01, 0x20, 0x00, 0x12, 0xb9, 0x0a, 0x20, 0x5b, 0x4a, 0x75, 0x43, + 0x43, 0xf1, 0x0a, 0xc8, 0x8e, 0x00, 0x58, 0x6b, 0xb0, 0xf2, 0x6f, 0xcc, 0xc5, 0x63, 0x85, 0xed, + 0xd0, 0x68, 0x48, 0x46, 0x09, 0x73, 0xb5, 0x54, 0x58, 0xdf, 0x18, 0xb0, 0x7a, 0x09, 0x41, 0xea, + 0x70, 0x1f, 0x9a, 0xa1, 0x1b, 0x73, 0xcc, 0xb2, 0x2e, 0xc9, 0xae, 0xd3, 0xa1, 0xa8, 0x5c, 0x17, + 0x8a, 0x6a, 0x29, 0x14, 0x4b, 0xd0, 0x08, 0xdd, 0x0b, 0x27, 0x3c, 0x4e, 0x5f, 0x23, 0xf5, 0xd0, + 0xbd, 0x78, 0x79, 0xbc, 0xfd, 0x4b, 0x13, 0x3a, 0x47, 0xd8, 0x3d, 0xc7, 0xd8, 0x97, 0x86, 0xa1, + 0x51, 0xd6, 0x10, 0xc5, 0x6f, 0x3a, 0x74, 0x6f, 0xba, 0xf2, 0x67, 0x7e, 0x44, 0x0e, 0xee, 0x5f, + 0x47, 0x96, 0xd6, 0xd6, 0x1c, 0x7a, 0x05, 0x6d, 0xed, 0xa3, 0x09, 0xad, 0x68, 0x8c, 0xa5, 0x6f, + 0xc1, 0xc1, 0xea, 0x25, 0xd8, 0x4c, 0xda, 0x23, 0x03, 0x1d, 0x42, 0x5b, 0xdb, 0xf5, 0xba, 0xbc, + 0xf2, 0xa3, 0x43, 0x97, 0x37, 0xe3, 0x81, 0x60, 0xcd, 0x09, 0x69, 0xda, 0xc6, 0xd6, 0xa5, 0x95, + 0xdf, 0x08, 0xba, 0xb4, 0x59, 0x6b, 0x5e, 0x4a, 0xd3, 0x16, 0xa4, 0x2e, 0xad, 0xbc, 0xfe, 0x75, + 0x69, 0xb3, 0xb6, 0xea, 0x1c, 0xfa, 0x1c, 0x16, 0x4a, 0xab, 0x0b, 0x59, 0x13, 0xae, 0xcb, 0x76, + 0xee, 0x60, 0xe3, 0x4a, 0x9a, 0x5c, 0xfe, 0x6b, 0xe8, 0xe8, 0x2b, 0x05, 0x69, 0x06, 0xcd, 0x58, + 0x8a, 0x83, 0xb5, 0xcb, 0xd0, 0xba, 0x40, 0x7d, 0x5a, 0xea, 0x02, 0x67, 0xec, 0x0b, 0x5d, 0xe0, + 0xac, 0x21, 0x6b, 0xcd, 0xa1, 0xcf, 0x60, 0x7e, 0x7a, 0x6a, 0xa1, 0xbb, 0xd3, 0x61, 0x2b, 0x0d, + 0xc3, 0x81, 0x75, 0x15, 0x49, 0x2e, 0xfc, 0x00, 0x60, 0x32, 0x8c, 0xd0, 0xf2, 0x84, 0xa7, 0x34, + 0x0c, 0x07, 0x2b, 0xb3, 0x91, 0xb9, 0xa8, 0x2f, 0x60, 0x69, 0x66, 0xc7, 0x23, 0xad, 0x4d, 0xae, + 0x9a, 0x19, 0x83, 0xbf, 0x5c, 0x4b, 0x97, 0xe9, 0x7a, 0xb1, 0x06, 0xf3, 0xb1, 0x6a, 0xe4, 0x61, + 0xbc, 0xe5, 0x05, 0x04, 0x47, 0xfc, 0x05, 0x48, 0x8e, 0x37, 0x8c, 0x72, 0x7a, 0xdc, 0x90, 0xbf, + 0x83, 0xfe, 0xfe, 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc5, 0xce, 0x15, 0x02, 0x1d, 0x12, 0x00, + 0x00, } diff --git a/weed/pb/filer_pb/filer_pb_helper.go b/weed/pb/filer_pb/filer_pb_helper.go index 5c40332e6..b2ffacc01 100644 --- a/weed/pb/filer_pb/filer_pb_helper.go +++ b/weed/pb/filer_pb/filer_pb_helper.go @@ -1,6 +1,9 @@ package filer_pb import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/needle" ) @@ -67,3 +70,14 @@ func AfterEntryDeserialization(chunks []*FileChunk) { } } + +func CreateEntry(ctx context.Context, client SeaweedFilerClient, request *CreateEntryRequest) error { + resp, err := client.CreateEntry(ctx, request) + if err == nil && resp.Error != "" { + return fmt.Errorf("CreateEntry: %v", resp.Error) + } + if err != nil { + return fmt.Errorf("CreateEntry: %v", err) + } + return err +} diff --git a/weed/pb/master.proto b/weed/pb/master.proto index 80231a2d8..9b1e884c7 100644 --- a/weed/pb/master.proto +++ b/weed/pb/master.proto @@ -58,6 +58,7 @@ message HeartbeatResponse { string leader = 2; string metrics_address = 3; uint32 metrics_interval_seconds = 4; + repeated StorageBackend storage_backends = 5; } message VolumeInformationMessage { @@ -73,6 +74,8 @@ message VolumeInformationMessage { uint32 ttl = 10; uint32 compact_revision = 11; int64 modified_at_second = 12; + string remote_storage_name = 13; + string remote_storage_key = 14; } message VolumeShortInformationMessage { @@ -89,6 +92,12 @@ message VolumeEcShardInformationMessage { uint32 ec_index_bits = 3; } +message StorageBackend { + string type = 1; + string id = 2; + map properties = 3; +} + message Empty { } @@ -140,6 +149,7 @@ message AssignRequest { string rack = 6; string data_node = 7; uint32 memory_map_max_size_mb = 8; + uint32 Writable_volume_count = 9; } message AssignResponse { string fid = 1; @@ -200,6 +210,7 @@ message DataNodeInfo { uint64 active_volume_count = 5; repeated VolumeInformationMessage volume_infos = 6; repeated VolumeEcShardInformationMessage ec_shard_infos = 7; + uint64 remote_volume_count = 8; } message RackInfo { string id = 1; @@ -208,6 +219,7 @@ message RackInfo { uint64 free_volume_count = 4; uint64 active_volume_count = 5; repeated DataNodeInfo data_node_infos = 6; + uint64 remote_volume_count = 7; } message DataCenterInfo { string id = 1; @@ -216,6 +228,7 @@ message DataCenterInfo { uint64 free_volume_count = 4; uint64 active_volume_count = 5; repeated RackInfo rack_infos = 6; + uint64 remote_volume_count = 7; } message TopologyInfo { string id = 1; @@ -224,6 +237,7 @@ message TopologyInfo { uint64 free_volume_count = 4; uint64 active_volume_count = 5; repeated DataCenterInfo data_center_infos = 6; + uint64 remote_volume_count = 7; } message VolumeListRequest { } diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index 880d39de5..ea4362c92 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -14,6 +14,7 @@ It has these top-level messages: VolumeInformationMessage VolumeShortInformationMessage VolumeEcShardInformationMessage + StorageBackend Empty SuperBlockExtra KeepConnectedRequest @@ -204,10 +205,11 @@ func (m *Heartbeat) GetHasNoEcShards() bool { } type HeartbeatResponse struct { - VolumeSizeLimit uint64 `protobuf:"varint,1,opt,name=volume_size_limit,json=volumeSizeLimit" json:"volume_size_limit,omitempty"` - Leader string `protobuf:"bytes,2,opt,name=leader" json:"leader,omitempty"` - MetricsAddress string `protobuf:"bytes,3,opt,name=metrics_address,json=metricsAddress" json:"metrics_address,omitempty"` - MetricsIntervalSeconds uint32 `protobuf:"varint,4,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds" json:"metrics_interval_seconds,omitempty"` + VolumeSizeLimit uint64 `protobuf:"varint,1,opt,name=volume_size_limit,json=volumeSizeLimit" json:"volume_size_limit,omitempty"` + Leader string `protobuf:"bytes,2,opt,name=leader" json:"leader,omitempty"` + MetricsAddress string `protobuf:"bytes,3,opt,name=metrics_address,json=metricsAddress" json:"metrics_address,omitempty"` + MetricsIntervalSeconds uint32 `protobuf:"varint,4,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds" json:"metrics_interval_seconds,omitempty"` + StorageBackends []*StorageBackend `protobuf:"bytes,5,rep,name=storage_backends,json=storageBackends" json:"storage_backends,omitempty"` } func (m *HeartbeatResponse) Reset() { *m = HeartbeatResponse{} } @@ -243,19 +245,28 @@ func (m *HeartbeatResponse) GetMetricsIntervalSeconds() uint32 { return 0 } +func (m *HeartbeatResponse) GetStorageBackends() []*StorageBackend { + if m != nil { + return m.StorageBackends + } + return nil +} + type VolumeInformationMessage struct { - Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` - Size uint64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` - Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` - FileCount uint64 `protobuf:"varint,4,opt,name=file_count,json=fileCount" json:"file_count,omitempty"` - DeleteCount uint64 `protobuf:"varint,5,opt,name=delete_count,json=deleteCount" json:"delete_count,omitempty"` - DeletedByteCount uint64 `protobuf:"varint,6,opt,name=deleted_byte_count,json=deletedByteCount" json:"deleted_byte_count,omitempty"` - ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` - ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement" json:"replica_placement,omitempty"` - Version uint32 `protobuf:"varint,9,opt,name=version" json:"version,omitempty"` - Ttl uint32 `protobuf:"varint,10,opt,name=ttl" json:"ttl,omitempty"` - CompactRevision uint32 `protobuf:"varint,11,opt,name=compact_revision,json=compactRevision" json:"compact_revision,omitempty"` - ModifiedAtSecond int64 `protobuf:"varint,12,opt,name=modified_at_second,json=modifiedAtSecond" json:"modified_at_second,omitempty"` + Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` + Size uint64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` + FileCount uint64 `protobuf:"varint,4,opt,name=file_count,json=fileCount" json:"file_count,omitempty"` + DeleteCount uint64 `protobuf:"varint,5,opt,name=delete_count,json=deleteCount" json:"delete_count,omitempty"` + DeletedByteCount uint64 `protobuf:"varint,6,opt,name=deleted_byte_count,json=deletedByteCount" json:"deleted_byte_count,omitempty"` + ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` + ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement" json:"replica_placement,omitempty"` + Version uint32 `protobuf:"varint,9,opt,name=version" json:"version,omitempty"` + Ttl uint32 `protobuf:"varint,10,opt,name=ttl" json:"ttl,omitempty"` + CompactRevision uint32 `protobuf:"varint,11,opt,name=compact_revision,json=compactRevision" json:"compact_revision,omitempty"` + ModifiedAtSecond int64 `protobuf:"varint,12,opt,name=modified_at_second,json=modifiedAtSecond" json:"modified_at_second,omitempty"` + RemoteStorageName string `protobuf:"bytes,13,opt,name=remote_storage_name,json=remoteStorageName" json:"remote_storage_name,omitempty"` + RemoteStorageKey string `protobuf:"bytes,14,opt,name=remote_storage_key,json=remoteStorageKey" json:"remote_storage_key,omitempty"` } func (m *VolumeInformationMessage) Reset() { *m = VolumeInformationMessage{} } @@ -347,6 +358,20 @@ func (m *VolumeInformationMessage) GetModifiedAtSecond() int64 { return 0 } +func (m *VolumeInformationMessage) GetRemoteStorageName() string { + if m != nil { + return m.RemoteStorageName + } + return "" +} + +func (m *VolumeInformationMessage) GetRemoteStorageKey() string { + if m != nil { + return m.RemoteStorageKey + } + return "" +} + type VolumeShortInformationMessage struct { Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` @@ -427,13 +452,45 @@ func (m *VolumeEcShardInformationMessage) GetEcIndexBits() uint32 { return 0 } +type StorageBackend struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + Properties map[string]string `protobuf:"bytes,3,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *StorageBackend) Reset() { *m = StorageBackend{} } +func (m *StorageBackend) String() string { return proto.CompactTextString(m) } +func (*StorageBackend) ProtoMessage() {} +func (*StorageBackend) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *StorageBackend) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *StorageBackend) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *StorageBackend) GetProperties() map[string]string { + if m != nil { + return m.Properties + } + return nil +} + type Empty struct { } func (m *Empty) Reset() { *m = Empty{} } func (m *Empty) String() string { return proto.CompactTextString(m) } func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } type SuperBlockExtra struct { ErasureCoding *SuperBlockExtra_ErasureCoding `protobuf:"bytes,1,opt,name=erasure_coding,json=erasureCoding" json:"erasure_coding,omitempty"` @@ -442,7 +499,7 @@ type SuperBlockExtra struct { func (m *SuperBlockExtra) Reset() { *m = SuperBlockExtra{} } func (m *SuperBlockExtra) String() string { return proto.CompactTextString(m) } func (*SuperBlockExtra) ProtoMessage() {} -func (*SuperBlockExtra) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (*SuperBlockExtra) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *SuperBlockExtra) GetErasureCoding() *SuperBlockExtra_ErasureCoding { if m != nil { @@ -461,7 +518,7 @@ func (m *SuperBlockExtra_ErasureCoding) Reset() { *m = SuperBlockExtra_E func (m *SuperBlockExtra_ErasureCoding) String() string { return proto.CompactTextString(m) } func (*SuperBlockExtra_ErasureCoding) ProtoMessage() {} func (*SuperBlockExtra_ErasureCoding) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{6, 0} + return fileDescriptor0, []int{7, 0} } func (m *SuperBlockExtra_ErasureCoding) GetData() uint32 { @@ -492,7 +549,7 @@ type KeepConnectedRequest struct { func (m *KeepConnectedRequest) Reset() { *m = KeepConnectedRequest{} } func (m *KeepConnectedRequest) String() string { return proto.CompactTextString(m) } func (*KeepConnectedRequest) ProtoMessage() {} -func (*KeepConnectedRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*KeepConnectedRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (m *KeepConnectedRequest) GetName() string { if m != nil { @@ -512,7 +569,7 @@ type VolumeLocation struct { func (m *VolumeLocation) Reset() { *m = VolumeLocation{} } func (m *VolumeLocation) String() string { return proto.CompactTextString(m) } func (*VolumeLocation) ProtoMessage() {} -func (*VolumeLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*VolumeLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } func (m *VolumeLocation) GetUrl() string { if m != nil { @@ -557,7 +614,7 @@ type LookupVolumeRequest struct { func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} } func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) } func (*LookupVolumeRequest) ProtoMessage() {} -func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } func (m *LookupVolumeRequest) GetVolumeIds() []string { if m != nil { @@ -580,7 +637,7 @@ type LookupVolumeResponse struct { func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} } func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) } func (*LookupVolumeResponse) ProtoMessage() {} -func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } func (m *LookupVolumeResponse) GetVolumeIdLocations() []*LookupVolumeResponse_VolumeIdLocation { if m != nil { @@ -599,7 +656,7 @@ func (m *LookupVolumeResponse_VolumeIdLocation) Reset() { *m = LookupVol func (m *LookupVolumeResponse_VolumeIdLocation) String() string { return proto.CompactTextString(m) } func (*LookupVolumeResponse_VolumeIdLocation) ProtoMessage() {} func (*LookupVolumeResponse_VolumeIdLocation) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{10, 0} + return fileDescriptor0, []int{11, 0} } func (m *LookupVolumeResponse_VolumeIdLocation) GetVolumeId() string { @@ -631,7 +688,7 @@ type Location struct { func (m *Location) Reset() { *m = Location{} } func (m *Location) String() string { return proto.CompactTextString(m) } func (*Location) ProtoMessage() {} -func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } func (m *Location) GetUrl() string { if m != nil { @@ -648,20 +705,21 @@ func (m *Location) GetPublicUrl() string { } type AssignRequest struct { - Count uint64 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` - Replication string `protobuf:"bytes,2,opt,name=replication" json:"replication,omitempty"` - Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` - Ttl string `protobuf:"bytes,4,opt,name=ttl" json:"ttl,omitempty"` - DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"` - Rack string `protobuf:"bytes,6,opt,name=rack" json:"rack,omitempty"` - DataNode string `protobuf:"bytes,7,opt,name=data_node,json=dataNode" json:"data_node,omitempty"` - MemoryMapMaxSizeMb uint32 `protobuf:"varint,8,opt,name=memory_map_max_size_mb,json=MemoryMapMaxSizeMb" json:"memory_map_max_size_mb,omitempty"` + Count uint64 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` + Replication string `protobuf:"bytes,2,opt,name=replication" json:"replication,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` + Ttl string `protobuf:"bytes,4,opt,name=ttl" json:"ttl,omitempty"` + DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"` + Rack string `protobuf:"bytes,6,opt,name=rack" json:"rack,omitempty"` + DataNode string `protobuf:"bytes,7,opt,name=data_node,json=dataNode" json:"data_node,omitempty"` + MemoryMapMaxSizeMb uint32 `protobuf:"varint,8,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb" json:"memory_map_max_size_mb,omitempty"` + WritableVolumeCount uint32 `protobuf:"varint,9,opt,name=Writable_volume_count,json=WritableVolumeCount" json:"Writable_volume_count,omitempty"` } func (m *AssignRequest) Reset() { *m = AssignRequest{} } func (m *AssignRequest) String() string { return proto.CompactTextString(m) } func (*AssignRequest) ProtoMessage() {} -func (*AssignRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (*AssignRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } func (m *AssignRequest) GetCount() uint64 { if m != nil { @@ -719,6 +777,13 @@ func (m *AssignRequest) GetMemoryMapMaxSizeMb() uint32 { return 0 } +func (m *AssignRequest) GetWritableVolumeCount() uint32 { + if m != nil { + return m.WritableVolumeCount + } + return 0 +} + type AssignResponse struct { Fid string `protobuf:"bytes,1,opt,name=fid" json:"fid,omitempty"` Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` @@ -731,7 +796,7 @@ type AssignResponse struct { func (m *AssignResponse) Reset() { *m = AssignResponse{} } func (m *AssignResponse) String() string { return proto.CompactTextString(m) } func (*AssignResponse) ProtoMessage() {} -func (*AssignResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (*AssignResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } func (m *AssignResponse) GetFid() string { if m != nil { @@ -784,7 +849,7 @@ type StatisticsRequest struct { func (m *StatisticsRequest) Reset() { *m = StatisticsRequest{} } func (m *StatisticsRequest) String() string { return proto.CompactTextString(m) } func (*StatisticsRequest) ProtoMessage() {} -func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } func (m *StatisticsRequest) GetReplication() string { if m != nil { @@ -819,7 +884,7 @@ type StatisticsResponse struct { func (m *StatisticsResponse) Reset() { *m = StatisticsResponse{} } func (m *StatisticsResponse) String() string { return proto.CompactTextString(m) } func (*StatisticsResponse) ProtoMessage() {} -func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } func (m *StatisticsResponse) GetReplication() string { if m != nil { @@ -871,7 +936,7 @@ type StorageType struct { func (m *StorageType) Reset() { *m = StorageType{} } func (m *StorageType) String() string { return proto.CompactTextString(m) } func (*StorageType) ProtoMessage() {} -func (*StorageType) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +func (*StorageType) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } func (m *StorageType) GetReplication() string { if m != nil { @@ -894,7 +959,7 @@ type Collection struct { func (m *Collection) Reset() { *m = Collection{} } func (m *Collection) String() string { return proto.CompactTextString(m) } func (*Collection) ProtoMessage() {} -func (*Collection) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +func (*Collection) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } func (m *Collection) GetName() string { if m != nil { @@ -911,7 +976,7 @@ type CollectionListRequest struct { func (m *CollectionListRequest) Reset() { *m = CollectionListRequest{} } func (m *CollectionListRequest) String() string { return proto.CompactTextString(m) } func (*CollectionListRequest) ProtoMessage() {} -func (*CollectionListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (*CollectionListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } func (m *CollectionListRequest) GetIncludeNormalVolumes() bool { if m != nil { @@ -934,7 +999,7 @@ type CollectionListResponse struct { func (m *CollectionListResponse) Reset() { *m = CollectionListResponse{} } func (m *CollectionListResponse) String() string { return proto.CompactTextString(m) } func (*CollectionListResponse) ProtoMessage() {} -func (*CollectionListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +func (*CollectionListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } func (m *CollectionListResponse) GetCollections() []*Collection { if m != nil { @@ -950,7 +1015,7 @@ type CollectionDeleteRequest struct { func (m *CollectionDeleteRequest) Reset() { *m = CollectionDeleteRequest{} } func (m *CollectionDeleteRequest) String() string { return proto.CompactTextString(m) } func (*CollectionDeleteRequest) ProtoMessage() {} -func (*CollectionDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (*CollectionDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } func (m *CollectionDeleteRequest) GetName() string { if m != nil { @@ -965,7 +1030,7 @@ type CollectionDeleteResponse struct { func (m *CollectionDeleteResponse) Reset() { *m = CollectionDeleteResponse{} } func (m *CollectionDeleteResponse) String() string { return proto.CompactTextString(m) } func (*CollectionDeleteResponse) ProtoMessage() {} -func (*CollectionDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } +func (*CollectionDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } // // volume related @@ -978,12 +1043,13 @@ type DataNodeInfo struct { ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` VolumeInfos []*VolumeInformationMessage `protobuf:"bytes,6,rep,name=volume_infos,json=volumeInfos" json:"volume_infos,omitempty"` EcShardInfos []*VolumeEcShardInformationMessage `protobuf:"bytes,7,rep,name=ec_shard_infos,json=ecShardInfos" json:"ec_shard_infos,omitempty"` + RemoteVolumeCount uint64 `protobuf:"varint,8,opt,name=remote_volume_count,json=remoteVolumeCount" json:"remote_volume_count,omitempty"` } func (m *DataNodeInfo) Reset() { *m = DataNodeInfo{} } func (m *DataNodeInfo) String() string { return proto.CompactTextString(m) } func (*DataNodeInfo) ProtoMessage() {} -func (*DataNodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } +func (*DataNodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } func (m *DataNodeInfo) GetId() string { if m != nil { @@ -1034,6 +1100,13 @@ func (m *DataNodeInfo) GetEcShardInfos() []*VolumeEcShardInformationMessage { return nil } +func (m *DataNodeInfo) GetRemoteVolumeCount() uint64 { + if m != nil { + return m.RemoteVolumeCount + } + return 0 +} + type RackInfo struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"` @@ -1041,12 +1114,13 @@ type RackInfo struct { FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"` ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` DataNodeInfos []*DataNodeInfo `protobuf:"bytes,6,rep,name=data_node_infos,json=dataNodeInfos" json:"data_node_infos,omitempty"` + RemoteVolumeCount uint64 `protobuf:"varint,7,opt,name=remote_volume_count,json=remoteVolumeCount" json:"remote_volume_count,omitempty"` } func (m *RackInfo) Reset() { *m = RackInfo{} } func (m *RackInfo) String() string { return proto.CompactTextString(m) } func (*RackInfo) ProtoMessage() {} -func (*RackInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } +func (*RackInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } func (m *RackInfo) GetId() string { if m != nil { @@ -1090,6 +1164,13 @@ func (m *RackInfo) GetDataNodeInfos() []*DataNodeInfo { return nil } +func (m *RackInfo) GetRemoteVolumeCount() uint64 { + if m != nil { + return m.RemoteVolumeCount + } + return 0 +} + type DataCenterInfo struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"` @@ -1097,12 +1178,13 @@ type DataCenterInfo struct { FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"` ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` RackInfos []*RackInfo `protobuf:"bytes,6,rep,name=rack_infos,json=rackInfos" json:"rack_infos,omitempty"` + RemoteVolumeCount uint64 `protobuf:"varint,7,opt,name=remote_volume_count,json=remoteVolumeCount" json:"remote_volume_count,omitempty"` } func (m *DataCenterInfo) Reset() { *m = DataCenterInfo{} } func (m *DataCenterInfo) String() string { return proto.CompactTextString(m) } func (*DataCenterInfo) ProtoMessage() {} -func (*DataCenterInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } +func (*DataCenterInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } func (m *DataCenterInfo) GetId() string { if m != nil { @@ -1146,6 +1228,13 @@ func (m *DataCenterInfo) GetRackInfos() []*RackInfo { return nil } +func (m *DataCenterInfo) GetRemoteVolumeCount() uint64 { + if m != nil { + return m.RemoteVolumeCount + } + return 0 +} + type TopologyInfo struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"` @@ -1153,12 +1242,13 @@ type TopologyInfo struct { FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"` ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` DataCenterInfos []*DataCenterInfo `protobuf:"bytes,6,rep,name=data_center_infos,json=dataCenterInfos" json:"data_center_infos,omitempty"` + RemoteVolumeCount uint64 `protobuf:"varint,7,opt,name=remote_volume_count,json=remoteVolumeCount" json:"remote_volume_count,omitempty"` } func (m *TopologyInfo) Reset() { *m = TopologyInfo{} } func (m *TopologyInfo) String() string { return proto.CompactTextString(m) } func (*TopologyInfo) ProtoMessage() {} -func (*TopologyInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } +func (*TopologyInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } func (m *TopologyInfo) GetId() string { if m != nil { @@ -1202,13 +1292,20 @@ func (m *TopologyInfo) GetDataCenterInfos() []*DataCenterInfo { return nil } +func (m *TopologyInfo) GetRemoteVolumeCount() uint64 { + if m != nil { + return m.RemoteVolumeCount + } + return 0 +} + type VolumeListRequest struct { } func (m *VolumeListRequest) Reset() { *m = VolumeListRequest{} } func (m *VolumeListRequest) String() string { return proto.CompactTextString(m) } func (*VolumeListRequest) ProtoMessage() {} -func (*VolumeListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } +func (*VolumeListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } type VolumeListResponse struct { TopologyInfo *TopologyInfo `protobuf:"bytes,1,opt,name=topology_info,json=topologyInfo" json:"topology_info,omitempty"` @@ -1218,7 +1315,7 @@ type VolumeListResponse struct { func (m *VolumeListResponse) Reset() { *m = VolumeListResponse{} } func (m *VolumeListResponse) String() string { return proto.CompactTextString(m) } func (*VolumeListResponse) ProtoMessage() {} -func (*VolumeListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } +func (*VolumeListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } func (m *VolumeListResponse) GetTopologyInfo() *TopologyInfo { if m != nil { @@ -1241,7 +1338,7 @@ type LookupEcVolumeRequest struct { func (m *LookupEcVolumeRequest) Reset() { *m = LookupEcVolumeRequest{} } func (m *LookupEcVolumeRequest) String() string { return proto.CompactTextString(m) } func (*LookupEcVolumeRequest) ProtoMessage() {} -func (*LookupEcVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } +func (*LookupEcVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } func (m *LookupEcVolumeRequest) GetVolumeId() uint32 { if m != nil { @@ -1258,7 +1355,7 @@ type LookupEcVolumeResponse struct { func (m *LookupEcVolumeResponse) Reset() { *m = LookupEcVolumeResponse{} } func (m *LookupEcVolumeResponse) String() string { return proto.CompactTextString(m) } func (*LookupEcVolumeResponse) ProtoMessage() {} -func (*LookupEcVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } +func (*LookupEcVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } func (m *LookupEcVolumeResponse) GetVolumeId() uint32 { if m != nil { @@ -1285,7 +1382,7 @@ func (m *LookupEcVolumeResponse_EcShardIdLocation) Reset() { func (m *LookupEcVolumeResponse_EcShardIdLocation) String() string { return proto.CompactTextString(m) } func (*LookupEcVolumeResponse_EcShardIdLocation) ProtoMessage() {} func (*LookupEcVolumeResponse_EcShardIdLocation) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{29, 0} + return fileDescriptor0, []int{30, 0} } func (m *LookupEcVolumeResponse_EcShardIdLocation) GetShardId() uint32 { @@ -1308,7 +1405,7 @@ type GetMasterConfigurationRequest struct { func (m *GetMasterConfigurationRequest) Reset() { *m = GetMasterConfigurationRequest{} } func (m *GetMasterConfigurationRequest) String() string { return proto.CompactTextString(m) } func (*GetMasterConfigurationRequest) ProtoMessage() {} -func (*GetMasterConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } +func (*GetMasterConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } type GetMasterConfigurationResponse struct { MetricsAddress string `protobuf:"bytes,1,opt,name=metrics_address,json=metricsAddress" json:"metrics_address,omitempty"` @@ -1318,7 +1415,7 @@ type GetMasterConfigurationResponse struct { func (m *GetMasterConfigurationResponse) Reset() { *m = GetMasterConfigurationResponse{} } func (m *GetMasterConfigurationResponse) String() string { return proto.CompactTextString(m) } func (*GetMasterConfigurationResponse) ProtoMessage() {} -func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } +func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } func (m *GetMasterConfigurationResponse) GetMetricsAddress() string { if m != nil { @@ -1340,6 +1437,7 @@ func init() { proto.RegisterType((*VolumeInformationMessage)(nil), "master_pb.VolumeInformationMessage") proto.RegisterType((*VolumeShortInformationMessage)(nil), "master_pb.VolumeShortInformationMessage") proto.RegisterType((*VolumeEcShardInformationMessage)(nil), "master_pb.VolumeEcShardInformationMessage") + proto.RegisterType((*StorageBackend)(nil), "master_pb.StorageBackend") proto.RegisterType((*Empty)(nil), "master_pb.Empty") proto.RegisterType((*SuperBlockExtra)(nil), "master_pb.SuperBlockExtra") proto.RegisterType((*SuperBlockExtra_ErasureCoding)(nil), "master_pb.SuperBlockExtra.ErasureCoding") @@ -1809,125 +1907,137 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("master.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1920 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x59, 0xcb, 0x6f, 0x1b, 0xc7, - 0x19, 0xf7, 0x92, 0x14, 0x45, 0x7e, 0x7c, 0x88, 0x1c, 0xc9, 0x0a, 0xcd, 0xd4, 0x16, 0xbd, 0x29, - 0x10, 0xc5, 0x4d, 0xd5, 0x54, 0x09, 0xd0, 0x02, 0x6d, 0x11, 0xd8, 0xb2, 0x92, 0x0a, 0xb6, 0x1c, - 0x7b, 0xe9, 0xba, 0x40, 0x81, 0x62, 0x3b, 0xdc, 0x1d, 0x49, 0x0b, 0xed, 0xab, 0x3b, 0x43, 0x99, - 0x4c, 0x8f, 0xed, 0xad, 0x40, 0x2f, 0x3d, 0xf4, 0xd4, 0x7b, 0xff, 0x86, 0x1e, 0x7a, 0xe9, 0x1f, - 0xd1, 0x73, 0xff, 0x85, 0x5e, 0x8b, 0xa2, 0xc1, 0xbc, 0x76, 0x67, 0x49, 0x4a, 0xb2, 0x02, 0xf8, - 0xe0, 0xdb, 0xec, 0xf7, 0x9a, 0x6f, 0x7e, 0xdf, 0x7c, 0x8f, 0x21, 0xa1, 0x1d, 0x61, 0xca, 0x48, - 0xb6, 0x97, 0x66, 0x09, 0x4b, 0x50, 0x53, 0x7e, 0xb9, 0xe9, 0xc4, 0xfe, 0x63, 0x1d, 0x9a, 0x3f, - 0x27, 0x38, 0x63, 0x13, 0x82, 0x19, 0xea, 0x42, 0x25, 0x48, 0x07, 0xd6, 0xc8, 0xda, 0x6d, 0x3a, - 0x95, 0x20, 0x45, 0x08, 0x6a, 0x69, 0x92, 0xb1, 0x41, 0x65, 0x64, 0xed, 0x76, 0x1c, 0xb1, 0x46, - 0x77, 0x01, 0xd2, 0xe9, 0x24, 0x0c, 0x3c, 0x77, 0x9a, 0x85, 0x83, 0xaa, 0x90, 0x6d, 0x4a, 0xca, - 0x2f, 0xb2, 0x10, 0xed, 0x42, 0x2f, 0xc2, 0x33, 0xf7, 0x22, 0x09, 0xa7, 0x11, 0x71, 0xbd, 0x64, - 0x1a, 0xb3, 0x41, 0x4d, 0xa8, 0x77, 0x23, 0x3c, 0x7b, 0x25, 0xc8, 0x07, 0x9c, 0x8a, 0x46, 0xdc, - 0xab, 0x99, 0x7b, 0x12, 0x84, 0xc4, 0x3d, 0x27, 0xf3, 0xc1, 0xda, 0xc8, 0xda, 0xad, 0x39, 0x10, - 0xe1, 0xd9, 0x17, 0x41, 0x48, 0x9e, 0x90, 0x39, 0xda, 0x81, 0x96, 0x8f, 0x19, 0x76, 0x3d, 0x12, - 0x33, 0x92, 0x0d, 0xea, 0x62, 0x2f, 0xe0, 0xa4, 0x03, 0x41, 0xe1, 0xfe, 0x65, 0xd8, 0x3b, 0x1f, - 0xac, 0x0b, 0x8e, 0x58, 0x73, 0xff, 0xb0, 0x1f, 0x05, 0xb1, 0x2b, 0x3c, 0x6f, 0x88, 0xad, 0x9b, - 0x82, 0xf2, 0x9c, 0xbb, 0xff, 0x33, 0x58, 0x97, 0xbe, 0xd1, 0x41, 0x73, 0x54, 0xdd, 0x6d, 0xed, - 0x7f, 0xb0, 0x97, 0xa3, 0xb1, 0x27, 0xdd, 0x3b, 0x8a, 0x4f, 0x92, 0x2c, 0xc2, 0x2c, 0x48, 0xe2, - 0x63, 0x42, 0x29, 0x3e, 0x25, 0x8e, 0xd6, 0x41, 0x47, 0xd0, 0x8a, 0xc9, 0x6b, 0x57, 0x9b, 0x00, - 0x61, 0x62, 0x77, 0xc9, 0xc4, 0xf8, 0x2c, 0xc9, 0xd8, 0x0a, 0x3b, 0x10, 0x93, 0xd7, 0xaf, 0x94, - 0xa9, 0x17, 0xb0, 0xe1, 0x93, 0x90, 0x30, 0xe2, 0xe7, 0xe6, 0x5a, 0x37, 0x34, 0xd7, 0x55, 0x06, - 0xb4, 0xc9, 0xef, 0x42, 0xf7, 0x0c, 0x53, 0x37, 0x4e, 0x72, 0x8b, 0xed, 0x91, 0xb5, 0xdb, 0x70, - 0xda, 0x67, 0x98, 0x3e, 0x4b, 0xb4, 0xd4, 0x97, 0xd0, 0x24, 0x9e, 0x4b, 0xcf, 0x70, 0xe6, 0xd3, - 0x41, 0x4f, 0x6c, 0xf9, 0x60, 0x69, 0xcb, 0x43, 0x6f, 0xcc, 0x05, 0x56, 0x6c, 0xda, 0x20, 0x92, - 0x45, 0xd1, 0x33, 0xe8, 0x70, 0x30, 0x0a, 0x63, 0xfd, 0x1b, 0x1b, 0xe3, 0x68, 0x1e, 0x6a, 0x7b, - 0xaf, 0xa0, 0xaf, 0x11, 0x29, 0x6c, 0xa2, 0x1b, 0xdb, 0xd4, 0xb0, 0xe6, 0x76, 0x3f, 0x84, 0x9e, - 0x82, 0xa5, 0x30, 0xbb, 0x29, 0x80, 0xe9, 0x08, 0x60, 0xb4, 0xa0, 0xfd, 0x77, 0x0b, 0xfa, 0x79, - 0x36, 0x38, 0x84, 0xa6, 0x49, 0x4c, 0x09, 0x7a, 0x00, 0x7d, 0x75, 0x9d, 0x69, 0xf0, 0x35, 0x71, - 0xc3, 0x20, 0x0a, 0x98, 0x48, 0x92, 0x9a, 0xb3, 0x21, 0x19, 0xe3, 0xe0, 0x6b, 0xf2, 0x94, 0x93, - 0xd1, 0x36, 0xd4, 0x43, 0x82, 0x7d, 0x92, 0x89, 0x9c, 0x69, 0x3a, 0xea, 0x0b, 0x7d, 0x08, 0x1b, - 0x11, 0x61, 0x59, 0xe0, 0x51, 0x17, 0xfb, 0x7e, 0x46, 0x28, 0x55, 0xa9, 0xd3, 0x55, 0xe4, 0x87, - 0x92, 0x8a, 0x7e, 0x0c, 0x03, 0x2d, 0x18, 0xf0, 0x3b, 0x7e, 0x81, 0x43, 0x97, 0x12, 0x2f, 0x89, - 0x7d, 0xaa, 0xf2, 0x68, 0x5b, 0xf1, 0x8f, 0x14, 0x7b, 0x2c, 0xb9, 0xf6, 0x5f, 0xab, 0x30, 0xb8, - 0xec, 0x02, 0x8b, 0xcc, 0xf6, 0x85, 0xd3, 0x1d, 0xa7, 0x12, 0xf8, 0x3c, 0x73, 0xf8, 0x61, 0x84, - 0x97, 0x35, 0x47, 0xac, 0xd1, 0x3d, 0x00, 0x2f, 0x09, 0x43, 0xe2, 0x71, 0x45, 0xe5, 0x9e, 0x41, - 0xe1, 0x99, 0x25, 0x92, 0xb5, 0x48, 0xea, 0x9a, 0xd3, 0xe4, 0x14, 0x99, 0xcf, 0xf7, 0xa1, 0x2d, - 0x81, 0x57, 0x02, 0x32, 0x9f, 0x5b, 0x92, 0x26, 0x45, 0x3e, 0x06, 0xa4, 0x03, 0x3c, 0x99, 0xe7, - 0x82, 0x75, 0x21, 0xd8, 0x53, 0x9c, 0x47, 0x73, 0x2d, 0xfd, 0x3e, 0x34, 0x33, 0x82, 0x7d, 0x37, - 0x89, 0xc3, 0xb9, 0x48, 0xf1, 0x86, 0xd3, 0xe0, 0x84, 0xaf, 0xe2, 0x70, 0x8e, 0xbe, 0x07, 0xfd, - 0x8c, 0xa4, 0x61, 0xe0, 0x61, 0x37, 0x0d, 0xb1, 0x47, 0x22, 0x12, 0xeb, 0x6c, 0xef, 0x29, 0xc6, - 0x73, 0x4d, 0x47, 0x03, 0x58, 0xbf, 0x20, 0x19, 0xe5, 0xc7, 0x6a, 0x0a, 0x11, 0xfd, 0x89, 0x7a, - 0x50, 0x65, 0x2c, 0x1c, 0x80, 0xa0, 0xf2, 0x25, 0xfa, 0x08, 0x7a, 0x5e, 0x12, 0xa5, 0xd8, 0x63, - 0x6e, 0x46, 0x2e, 0x02, 0xa1, 0xd4, 0x12, 0xec, 0x0d, 0x45, 0x77, 0x14, 0x99, 0x1f, 0x27, 0x4a, - 0xfc, 0xe0, 0x24, 0x20, 0xbe, 0x8b, 0x99, 0x0a, 0x93, 0x48, 0xb9, 0xaa, 0xd3, 0xd3, 0x9c, 0x87, - 0x4c, 0x06, 0xc8, 0xfe, 0x9b, 0x05, 0x77, 0xaf, 0x4c, 0xe7, 0xa5, 0x20, 0x5d, 0x17, 0x90, 0xb7, - 0x85, 0x81, 0x3d, 0x85, 0x9d, 0x6b, 0x92, 0xec, 0x1a, 0x5f, 0x2b, 0x4b, 0xbe, 0xda, 0xd0, 0x21, - 0x9e, 0x1b, 0xc4, 0x3e, 0x99, 0xb9, 0x93, 0x80, 0xc9, 0xeb, 0xdf, 0x71, 0x5a, 0xc4, 0x3b, 0xe2, - 0xb4, 0x47, 0x01, 0xa3, 0xf6, 0x3a, 0xac, 0x1d, 0x46, 0x29, 0x9b, 0xdb, 0xff, 0xb0, 0x60, 0x63, - 0x3c, 0x4d, 0x49, 0xf6, 0x28, 0x4c, 0xbc, 0xf3, 0xc3, 0x19, 0xcb, 0x30, 0xfa, 0x0a, 0xba, 0x24, - 0xc3, 0x74, 0x9a, 0xf1, 0x6b, 0xe3, 0x07, 0xf1, 0xa9, 0xd8, 0xbc, 0x5c, 0x2d, 0x17, 0x74, 0xf6, - 0x0e, 0xa5, 0xc2, 0x81, 0x90, 0x77, 0x3a, 0xc4, 0xfc, 0x1c, 0xfe, 0x0a, 0x3a, 0x25, 0x3e, 0xcf, - 0x09, 0xde, 0x5b, 0xd4, 0xa1, 0xc4, 0x9a, 0xe7, 0x73, 0x8a, 0xb3, 0x80, 0xcd, 0x55, 0x0f, 0x54, - 0x5f, 0x3c, 0x17, 0x54, 0x4d, 0x08, 0x7c, 0x7e, 0x96, 0x2a, 0xef, 0x32, 0x92, 0x72, 0xe4, 0x53, - 0xfb, 0x01, 0x6c, 0x3d, 0x21, 0x24, 0x3d, 0x48, 0xe2, 0x98, 0x78, 0x8c, 0xf8, 0x0e, 0xf9, 0xed, - 0x94, 0x50, 0xc6, 0xb7, 0x88, 0x71, 0x44, 0x54, 0x8b, 0x15, 0x6b, 0xfb, 0x2f, 0x16, 0x74, 0x25, - 0xda, 0x4f, 0x13, 0x4f, 0x60, 0xcc, 0x23, 0xc2, 0x9b, 0xab, 0x94, 0xe2, 0xcb, 0x85, 0xae, 0x5b, - 0x59, 0xec, 0xba, 0x77, 0xa0, 0x21, 0xda, 0x52, 0xe1, 0xcc, 0x3a, 0xef, 0x34, 0x81, 0x4f, 0x8b, - 0xb4, 0xf4, 0x25, 0xbb, 0x26, 0xd8, 0x2d, 0xdd, 0x39, 0xb8, 0x48, 0x51, 0xb4, 0xd6, 0xcc, 0xa2, - 0x65, 0xbf, 0x84, 0xcd, 0xa7, 0x49, 0x72, 0x3e, 0x4d, 0xa5, 0x7b, 0xfa, 0x10, 0xe5, 0xb3, 0x5b, - 0xa3, 0x2a, 0xf7, 0x25, 0x3f, 0xfb, 0x75, 0x37, 0xc1, 0xfe, 0x8f, 0x05, 0x5b, 0x65, 0xb3, 0xaa, - 0xce, 0xfe, 0x06, 0x36, 0x73, 0xbb, 0x6e, 0xa8, 0xb0, 0x90, 0x1b, 0xb4, 0xf6, 0x3f, 0x31, 0xc2, - 0xbc, 0x4a, 0x5b, 0xf7, 0x6e, 0x5f, 0x83, 0xe8, 0xf4, 0x2f, 0x16, 0x28, 0x74, 0x38, 0x83, 0xde, - 0xa2, 0x18, 0xaf, 0x32, 0xf9, 0xae, 0x0a, 0xf1, 0x86, 0xd6, 0x44, 0x3f, 0x84, 0x66, 0xe1, 0x48, - 0x45, 0x38, 0xb2, 0x59, 0x72, 0x44, 0xed, 0x55, 0x48, 0xa1, 0x2d, 0x58, 0x23, 0x59, 0x96, 0x64, - 0x2a, 0x5f, 0xe5, 0x87, 0xfd, 0x13, 0x68, 0x7c, 0xeb, 0xe8, 0xda, 0xff, 0xb7, 0xa0, 0xf3, 0x90, - 0xd2, 0xe0, 0x34, 0xd6, 0x21, 0xd8, 0x82, 0x35, 0x59, 0x3b, 0x65, 0x1b, 0x92, 0x1f, 0x68, 0x04, - 0x2d, 0x95, 0xf6, 0x06, 0xf4, 0x26, 0xe9, 0xda, 0x8a, 0xa2, 0x4a, 0x41, 0x4d, 0xba, 0xc6, 0xcb, - 0xe1, 0xc2, 0x0c, 0xb6, 0x76, 0xe9, 0x0c, 0x56, 0x37, 0x66, 0xb0, 0xf7, 0xa1, 0x29, 0x94, 0xe2, - 0xc4, 0x27, 0x6a, 0x38, 0x6b, 0x70, 0xc2, 0xb3, 0xc4, 0x27, 0x68, 0x1f, 0xb6, 0x23, 0x12, 0x25, - 0xd9, 0xdc, 0x8d, 0x70, 0xea, 0xf2, 0x11, 0x50, 0xb4, 0xd5, 0x68, 0xa2, 0x4a, 0x17, 0x92, 0xdc, - 0x63, 0x9c, 0x1e, 0xe3, 0x19, 0xef, 0xac, 0xc7, 0x13, 0xfb, 0xcf, 0x16, 0x74, 0x35, 0x02, 0xea, - 0xb6, 0xf4, 0xa0, 0x7a, 0x92, 0x47, 0x8c, 0x2f, 0x35, 0xae, 0x95, 0xcb, 0x70, 0x5d, 0x9a, 0x55, - 0x73, 0x14, 0x6b, 0x26, 0x8a, 0x79, 0x00, 0xd7, 0x8c, 0x00, 0xf2, 0x63, 0xe2, 0x29, 0x3b, 0xd3, - 0xc7, 0xe4, 0x6b, 0xfb, 0x14, 0xfa, 0x63, 0x86, 0x59, 0x40, 0x59, 0xe0, 0x51, 0x1d, 0x9a, 0x85, - 0x20, 0x58, 0xd7, 0x05, 0xa1, 0x72, 0x59, 0x10, 0xaa, 0x79, 0x10, 0xec, 0x7f, 0x5a, 0x80, 0xcc, - 0x9d, 0x14, 0x04, 0x6f, 0x61, 0x2b, 0x0e, 0x19, 0x4b, 0x18, 0x1f, 0x3a, 0xf8, 0x78, 0xa0, 0x9a, - 0xbc, 0xa0, 0xf0, 0x50, 0xf0, 0xc8, 0x4e, 0x29, 0xf1, 0x25, 0x57, 0x76, 0xf8, 0x06, 0x27, 0x08, - 0x66, 0x79, 0x40, 0xa8, 0x2f, 0x0c, 0x08, 0xf6, 0x43, 0x68, 0x8d, 0x59, 0x92, 0xe1, 0x53, 0xf2, - 0x72, 0x9e, 0xbe, 0x89, 0xf7, 0xca, 0xbb, 0x4a, 0x01, 0xc4, 0x08, 0xe0, 0xa0, 0xf0, 0x7e, 0x55, - 0x35, 0xfd, 0x1d, 0xdc, 0x2e, 0x24, 0x9e, 0x06, 0x94, 0xe9, 0xb8, 0x7c, 0x06, 0xdb, 0x41, 0xec, - 0x85, 0x53, 0x9f, 0xb8, 0x31, 0x6f, 0x66, 0x61, 0x3e, 0x23, 0x5b, 0x62, 0xb4, 0xd8, 0x52, 0xdc, - 0x67, 0x82, 0xa9, 0x67, 0xe5, 0x8f, 0x01, 0x69, 0x2d, 0xe2, 0xe5, 0x1a, 0x15, 0xa1, 0xd1, 0x53, - 0x9c, 0x43, 0x4f, 0x49, 0xdb, 0x2f, 0x60, 0x7b, 0x71, 0x73, 0x15, 0xaa, 0x1f, 0x41, 0xab, 0x80, - 0x5d, 0xd7, 0xb4, 0xdb, 0x46, 0x29, 0x29, 0xf4, 0x1c, 0x53, 0xd2, 0xfe, 0x3e, 0xbc, 0x57, 0xb0, - 0x1e, 0x8b, 0xa2, 0x7d, 0x55, 0x33, 0x19, 0xc2, 0x60, 0x59, 0x5c, 0xfa, 0x60, 0xff, 0xbb, 0x02, - 0xed, 0xc7, 0x2a, 0x0b, 0x79, 0x47, 0x37, 0x7a, 0x78, 0x53, 0xf4, 0xf0, 0xfb, 0xd0, 0x2e, 0xbd, - 0xdb, 0xe4, 0x70, 0xd8, 0xba, 0x30, 0x1e, 0x6d, 0xab, 0x9e, 0x77, 0x55, 0x21, 0xb6, 0xf8, 0xbc, - 0x7b, 0x00, 0xfd, 0x93, 0x8c, 0x90, 0xe5, 0x97, 0x60, 0xcd, 0xd9, 0xe0, 0x0c, 0x53, 0x76, 0x0f, - 0x36, 0xb1, 0xc7, 0x82, 0x8b, 0x05, 0x69, 0x79, 0xbf, 0xfa, 0x92, 0x65, 0xca, 0x7f, 0x91, 0x3b, - 0x1a, 0xc4, 0x27, 0x09, 0x1d, 0xd4, 0xdf, 0xfc, 0x25, 0xa7, 0x4e, 0xc3, 0x39, 0x14, 0x3d, 0x87, - 0xae, 0x7e, 0x11, 0x28, 0x4b, 0xeb, 0x37, 0x7e, 0x6d, 0xb4, 0x49, 0xc1, 0xa2, 0xf6, 0x1f, 0x2a, - 0xd0, 0x70, 0xb0, 0x77, 0xfe, 0x6e, 0xe3, 0xfb, 0x39, 0x6c, 0xe4, 0xf5, 0xbb, 0x04, 0xf1, 0x7b, - 0x06, 0x30, 0xe6, 0x55, 0x72, 0x3a, 0xbe, 0xf1, 0x45, 0xed, 0xff, 0x59, 0xd0, 0x7d, 0x9c, 0xf7, - 0x88, 0x77, 0x1b, 0x8c, 0x7d, 0x00, 0xde, 0xd4, 0x4a, 0x38, 0x98, 0x43, 0x80, 0x0e, 0xb7, 0xd3, - 0xcc, 0xd4, 0x8a, 0xda, 0x7f, 0xaa, 0x40, 0xfb, 0x65, 0x92, 0x26, 0x61, 0x72, 0x3a, 0x7f, 0xb7, - 0x4f, 0x7f, 0x08, 0x7d, 0xa3, 0xff, 0x97, 0x40, 0xb8, 0xb3, 0x70, 0x19, 0x8a, 0x60, 0x3b, 0x1b, - 0x7e, 0xe9, 0x9b, 0xda, 0x9b, 0xd0, 0x57, 0x33, 0x6e, 0x51, 0x92, 0xed, 0xdf, 0x5b, 0x80, 0x4c, - 0xaa, 0xaa, 0x95, 0x3f, 0x85, 0x0e, 0x53, 0xd8, 0x89, 0xfd, 0xd4, 0xa0, 0x6f, 0xde, 0x3d, 0x13, - 0x5b, 0xa7, 0xcd, 0x4c, 0xa4, 0x7f, 0x00, 0x5b, 0x4b, 0xaf, 0x75, 0x3e, 0x5c, 0x48, 0x84, 0xfb, - 0x0b, 0x0f, 0xf6, 0xe3, 0x89, 0xfd, 0x19, 0xdc, 0x96, 0x03, 0xa5, 0xae, 0xe3, 0xba, 0xbe, 0x2e, - 0x4d, 0x86, 0x9d, 0x62, 0x32, 0xb4, 0xff, 0x6b, 0xc1, 0xf6, 0xa2, 0x9a, 0xf2, 0xff, 0x2a, 0x3d, - 0x84, 0x01, 0xa9, 0x7a, 0x63, 0xce, 0xb8, 0x72, 0xb4, 0xfc, 0x74, 0x69, 0xc6, 0x5d, 0xb4, 0xbd, - 0xa7, 0xeb, 0x50, 0x31, 0xe6, 0xf6, 0x68, 0x99, 0x40, 0x87, 0x18, 0xfa, 0x4b, 0x62, 0xfc, 0x85, - 0xa0, 0xf7, 0x55, 0x3e, 0xad, 0x2b, 0xc5, 0x6f, 0x31, 0xe4, 0xda, 0x3b, 0x70, 0xf7, 0x4b, 0xc2, - 0x8e, 0x85, 0xcc, 0x41, 0x12, 0x9f, 0x04, 0xa7, 0xd3, 0x4c, 0x0a, 0x15, 0xa1, 0xbd, 0x77, 0x99, - 0x84, 0x82, 0x69, 0xc5, 0x4f, 0x22, 0xd6, 0x8d, 0x7f, 0x12, 0xa9, 0x5c, 0xf5, 0x93, 0xc8, 0xfe, - 0xbf, 0xea, 0xb0, 0x3e, 0x26, 0xf8, 0x35, 0x21, 0x3e, 0x3a, 0x82, 0xce, 0x98, 0xc4, 0x7e, 0xf1, - 0x63, 0xe7, 0x96, 0x71, 0xc6, 0x9c, 0x3a, 0xfc, 0xce, 0x2a, 0x6a, 0xde, 0x42, 0x6f, 0xed, 0x5a, - 0x9f, 0x58, 0xe8, 0x05, 0x74, 0x4a, 0xaf, 0x3b, 0xb4, 0x63, 0x28, 0xad, 0x7a, 0xf7, 0x0d, 0xef, - 0x2c, 0x35, 0x14, 0x8d, 0x6a, 0x6e, 0xb2, 0x6d, 0xbe, 0x6a, 0xd0, 0xbd, 0x4b, 0x9f, 0x3b, 0xd2, - 0xe0, 0xce, 0x35, 0xcf, 0x21, 0xfb, 0x16, 0xfa, 0x1c, 0xea, 0x72, 0x64, 0x46, 0x03, 0x43, 0xb8, - 0xf4, 0x8e, 0x28, 0xf9, 0x55, 0x9e, 0xaf, 0xed, 0x5b, 0xe8, 0x09, 0x40, 0x31, 0x74, 0x22, 0x13, - 0x98, 0xa5, 0xa9, 0x77, 0x78, 0xf7, 0x12, 0x6e, 0x6e, 0xec, 0x97, 0xd0, 0x2d, 0x8f, 0x46, 0x68, - 0xb4, 0x72, 0xfa, 0x31, 0xea, 0xc3, 0xf0, 0xfe, 0x15, 0x12, 0xb9, 0xe1, 0x5f, 0x43, 0x6f, 0x71, - 0xe2, 0x41, 0xf6, 0x4a, 0xc5, 0xd2, 0xf4, 0x34, 0xfc, 0xe0, 0x4a, 0x19, 0x13, 0x84, 0xa2, 0x44, - 0x95, 0x40, 0x58, 0xaa, 0x67, 0x25, 0x10, 0x96, 0xeb, 0x9a, 0x04, 0xa1, 0x9c, 0xd7, 0x25, 0x10, - 0x56, 0x56, 0xa1, 0x12, 0x08, 0xab, 0x8b, 0x82, 0x7d, 0x0b, 0x25, 0xb0, 0xbd, 0x3a, 0xdb, 0x90, - 0xf9, 0xf3, 0xc8, 0x95, 0x29, 0x3b, 0xfc, 0xe8, 0x0d, 0x24, 0xf5, 0x86, 0x93, 0xba, 0xf8, 0x27, - 0xe1, 0xd3, 0x6f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x5c, 0xb0, 0xe7, 0x7c, 0x59, 0x18, 0x00, 0x00, + // 2102 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x59, 0x4b, 0x6f, 0x1c, 0xc7, + 0x11, 0xd6, 0xec, 0x2e, 0x97, 0xbb, 0xb5, 0xef, 0x26, 0x45, 0xaf, 0xd6, 0x96, 0xb5, 0x1a, 0x07, + 0x30, 0xa5, 0x38, 0x8c, 0x43, 0x1b, 0x88, 0x91, 0xc4, 0x30, 0x24, 0x8a, 0x76, 0x08, 0x89, 0xb4, + 0x34, 0x54, 0x64, 0x20, 0x40, 0x30, 0xe9, 0x9d, 0x69, 0x92, 0x03, 0xce, 0x2b, 0xd3, 0xbd, 0x14, + 0xd7, 0xb9, 0x04, 0xc8, 0x31, 0xa7, 0x20, 0x87, 0xfc, 0x85, 0x5c, 0x72, 0x4a, 0xce, 0xbe, 0xe4, + 0x47, 0xe4, 0x7f, 0xe4, 0xea, 0x4b, 0xd0, 0xaf, 0x99, 0x9e, 0x7d, 0x90, 0xa6, 0x01, 0x1f, 0x74, + 0x9b, 0xae, 0xaa, 0xae, 0xae, 0xfe, 0xaa, 0xbb, 0xea, 0xeb, 0x5d, 0x68, 0x47, 0x98, 0x32, 0x92, + 0xed, 0xa4, 0x59, 0xc2, 0x12, 0xd4, 0x94, 0x23, 0x37, 0x9d, 0xd8, 0x7f, 0xa9, 0x43, 0xf3, 0xd7, + 0x04, 0x67, 0x6c, 0x42, 0x30, 0x43, 0x5d, 0xa8, 0x04, 0xe9, 0xd0, 0x1a, 0x5b, 0xdb, 0x4d, 0xa7, + 0x12, 0xa4, 0x08, 0x41, 0x2d, 0x4d, 0x32, 0x36, 0xac, 0x8c, 0xad, 0xed, 0x8e, 0x23, 0xbe, 0xd1, + 0x5d, 0x80, 0x74, 0x3a, 0x09, 0x03, 0xcf, 0x9d, 0x66, 0xe1, 0xb0, 0x2a, 0x6c, 0x9b, 0x52, 0xf2, + 0x9b, 0x2c, 0x44, 0xdb, 0xd0, 0x8f, 0xf0, 0xa5, 0x7b, 0x91, 0x84, 0xd3, 0x88, 0xb8, 0x5e, 0x32, + 0x8d, 0xd9, 0xb0, 0x26, 0xa6, 0x77, 0x23, 0x7c, 0xf9, 0x4a, 0x88, 0xf7, 0xb8, 0x14, 0x8d, 0x79, + 0x54, 0x97, 0xee, 0x49, 0x10, 0x12, 0xf7, 0x9c, 0xcc, 0x86, 0x6b, 0x63, 0x6b, 0xbb, 0xe6, 0x40, + 0x84, 0x2f, 0x3f, 0x0f, 0x42, 0xf2, 0x94, 0xcc, 0xd0, 0x3d, 0x68, 0xf9, 0x98, 0x61, 0xd7, 0x23, + 0x31, 0x23, 0xd9, 0xb0, 0x2e, 0xd6, 0x02, 0x2e, 0xda, 0x13, 0x12, 0x1e, 0x5f, 0x86, 0xbd, 0xf3, + 0xe1, 0xba, 0xd0, 0x88, 0x6f, 0x1e, 0x1f, 0xf6, 0xa3, 0x20, 0x76, 0x45, 0xe4, 0x0d, 0xb1, 0x74, + 0x53, 0x48, 0x9e, 0xf3, 0xf0, 0x3f, 0x85, 0x75, 0x19, 0x1b, 0x1d, 0x36, 0xc7, 0xd5, 0xed, 0xd6, + 0xee, 0x7b, 0x3b, 0x39, 0x1a, 0x3b, 0x32, 0xbc, 0x83, 0xf8, 0x24, 0xc9, 0x22, 0xcc, 0x82, 0x24, + 0x3e, 0x24, 0x94, 0xe2, 0x53, 0xe2, 0xe8, 0x39, 0xe8, 0x00, 0x5a, 0x31, 0x79, 0xed, 0x6a, 0x17, + 0x20, 0x5c, 0x6c, 0x2f, 0xb8, 0x38, 0x3e, 0x4b, 0x32, 0xb6, 0xc4, 0x0f, 0xc4, 0xe4, 0xf5, 0x2b, + 0xe5, 0xea, 0x05, 0xf4, 0x7c, 0x12, 0x12, 0x46, 0xfc, 0xdc, 0x5d, 0xeb, 0x86, 0xee, 0xba, 0xca, + 0x81, 0x76, 0xf9, 0x23, 0xe8, 0x9e, 0x61, 0xea, 0xc6, 0x49, 0xee, 0xb1, 0x3d, 0xb6, 0xb6, 0x1b, + 0x4e, 0xfb, 0x0c, 0xd3, 0xa3, 0x44, 0x5b, 0x7d, 0x01, 0x4d, 0xe2, 0xb9, 0xf4, 0x0c, 0x67, 0x3e, + 0x1d, 0xf6, 0xc5, 0x92, 0x0f, 0x17, 0x96, 0xdc, 0xf7, 0x8e, 0xb9, 0xc1, 0x92, 0x45, 0x1b, 0x44, + 0xaa, 0x28, 0x3a, 0x82, 0x0e, 0x07, 0xa3, 0x70, 0x36, 0xb8, 0xb1, 0x33, 0x8e, 0xe6, 0xbe, 0xf6, + 0xf7, 0x0a, 0x06, 0x1a, 0x91, 0xc2, 0x27, 0xba, 0xb1, 0x4f, 0x0d, 0x6b, 0xee, 0xf7, 0x7d, 0xe8, + 0x2b, 0x58, 0x0a, 0xb7, 0x1b, 0x02, 0x98, 0x8e, 0x00, 0x46, 0x1b, 0xda, 0x7f, 0xaa, 0xc0, 0x20, + 0xbf, 0x0d, 0x0e, 0xa1, 0x69, 0x12, 0x53, 0x82, 0x1e, 0xc2, 0x40, 0x1d, 0x67, 0x1a, 0x7c, 0x4d, + 0xdc, 0x30, 0x88, 0x02, 0x26, 0x2e, 0x49, 0xcd, 0xe9, 0x49, 0xc5, 0x71, 0xf0, 0x35, 0x79, 0xc6, + 0xc5, 0x68, 0x0b, 0xea, 0x21, 0xc1, 0x3e, 0xc9, 0xc4, 0x9d, 0x69, 0x3a, 0x6a, 0x84, 0xde, 0x87, + 0x5e, 0x44, 0x58, 0x16, 0x78, 0xd4, 0xc5, 0xbe, 0x9f, 0x11, 0x4a, 0xd5, 0xd5, 0xe9, 0x2a, 0xf1, + 0x23, 0x29, 0x45, 0x9f, 0xc0, 0x50, 0x1b, 0x06, 0xfc, 0x8c, 0x5f, 0xe0, 0xd0, 0xa5, 0xc4, 0x4b, + 0x62, 0x9f, 0xaa, 0x7b, 0xb4, 0xa5, 0xf4, 0x07, 0x4a, 0x7d, 0x2c, 0xb5, 0xe8, 0x09, 0xf4, 0x29, + 0x4b, 0x32, 0x7c, 0x4a, 0xdc, 0x09, 0xf6, 0xce, 0x09, 0x9f, 0xb1, 0x26, 0xc0, 0xbb, 0x63, 0x80, + 0x77, 0x2c, 0x4d, 0x1e, 0x4b, 0x0b, 0xa7, 0x47, 0x4b, 0x63, 0x6a, 0x7f, 0x5b, 0x85, 0xe1, 0xaa, + 0x6b, 0x20, 0xea, 0x83, 0x2f, 0xb6, 0xde, 0x71, 0x2a, 0x81, 0xcf, 0xef, 0x1f, 0x87, 0x44, 0xec, + 0xb5, 0xe6, 0x88, 0x6f, 0xf4, 0x2e, 0x80, 0x97, 0x84, 0x21, 0xf1, 0xf8, 0x44, 0xb5, 0x49, 0x43, + 0xc2, 0xef, 0xa7, 0xb8, 0xf2, 0x45, 0x69, 0xa8, 0x39, 0x4d, 0x2e, 0x91, 0x55, 0xe1, 0x3e, 0xb4, + 0x65, 0xfa, 0x94, 0x81, 0xac, 0x0a, 0x2d, 0x29, 0x93, 0x26, 0x1f, 0x00, 0xd2, 0xc7, 0x64, 0x32, + 0xcb, 0x0d, 0xeb, 0xc2, 0xb0, 0xaf, 0x34, 0x8f, 0x67, 0xda, 0xfa, 0x6d, 0x68, 0x66, 0x04, 0xfb, + 0x6e, 0x12, 0x87, 0x33, 0x51, 0x28, 0x1a, 0x4e, 0x83, 0x0b, 0xbe, 0x8c, 0xc3, 0x19, 0xfa, 0x31, + 0x0c, 0x32, 0x92, 0x86, 0x81, 0x87, 0xdd, 0x34, 0xc4, 0x1e, 0x89, 0x48, 0xac, 0x6b, 0x46, 0x5f, + 0x29, 0x9e, 0x6b, 0x39, 0x1a, 0xc2, 0xfa, 0x05, 0xc9, 0x28, 0xdf, 0x56, 0x53, 0x98, 0xe8, 0x21, + 0xea, 0x43, 0x95, 0xb1, 0x70, 0x08, 0x42, 0xca, 0x3f, 0xd1, 0x03, 0xe8, 0x7b, 0x49, 0x94, 0x62, + 0x8f, 0xb9, 0x19, 0xb9, 0x08, 0xc4, 0xa4, 0x96, 0x50, 0xf7, 0x94, 0xdc, 0x51, 0x62, 0xbe, 0x9d, + 0x28, 0xf1, 0x83, 0x93, 0x80, 0xf8, 0x2e, 0x66, 0x2a, 0xd9, 0xe2, 0xe2, 0x56, 0x9d, 0xbe, 0xd6, + 0x3c, 0x62, 0x32, 0xcd, 0x68, 0x07, 0x36, 0x32, 0x12, 0x25, 0x8c, 0xb8, 0x3a, 0xd9, 0x31, 0x8e, + 0xc8, 0xb0, 0x23, 0x70, 0x1e, 0x48, 0x95, 0xca, 0xf1, 0x11, 0x8e, 0x08, 0xf7, 0x3e, 0x67, 0xcf, + 0x6b, 0x6d, 0x57, 0x98, 0xf7, 0x4b, 0xe6, 0x4f, 0xc9, 0xcc, 0xfe, 0x87, 0x05, 0x77, 0xaf, 0x2c, + 0x39, 0x0b, 0x47, 0xe0, 0xba, 0x74, 0xff, 0x50, 0x08, 0xdb, 0x53, 0xb8, 0x77, 0x4d, 0x21, 0xb8, + 0x26, 0xd6, 0xca, 0x42, 0xac, 0x36, 0x74, 0x88, 0xe7, 0x06, 0xb1, 0x4f, 0x2e, 0xdd, 0x49, 0xc0, + 0xe4, 0x15, 0xed, 0x38, 0x2d, 0xe2, 0x1d, 0x70, 0xd9, 0xe3, 0x80, 0x51, 0xfb, 0x1b, 0x0b, 0xba, + 0xe5, 0x3b, 0xc4, 0x6f, 0x01, 0x9b, 0xa5, 0x44, 0xf5, 0x4d, 0xf1, 0xad, 0x96, 0xae, 0xa8, 0x4e, + 0xea, 0xa3, 0x03, 0x80, 0x34, 0x4b, 0x52, 0x92, 0xb1, 0x80, 0x70, 0xbf, 0xfc, 0x5a, 0x3e, 0x58, + 0x79, 0x2d, 0x77, 0x9e, 0xe7, 0xb6, 0xfb, 0x31, 0xcb, 0x66, 0x8e, 0x31, 0x79, 0xf4, 0x29, 0xf4, + 0xe6, 0xd4, 0x1c, 0x1d, 0x9e, 0x55, 0x19, 0x00, 0xff, 0x44, 0x9b, 0xb0, 0x76, 0x81, 0xc3, 0x29, + 0x51, 0x21, 0xc8, 0xc1, 0x2f, 0x2a, 0x9f, 0x58, 0xf6, 0x3a, 0xac, 0xed, 0x47, 0x29, 0x9b, 0xf1, + 0x9d, 0xf4, 0x8e, 0xa7, 0x29, 0xc9, 0x1e, 0x87, 0x89, 0x77, 0xbe, 0x7f, 0xc9, 0x32, 0x8c, 0xbe, + 0x84, 0x2e, 0xc9, 0x30, 0x9d, 0x66, 0xfc, 0x56, 0xf9, 0x41, 0x7c, 0x2a, 0x7c, 0x96, 0x5b, 0xd2, + 0xdc, 0x9c, 0x9d, 0x7d, 0x39, 0x61, 0x4f, 0xd8, 0x3b, 0x1d, 0x62, 0x0e, 0x47, 0xbf, 0x85, 0x4e, + 0x49, 0xcf, 0xc1, 0xe2, 0x0d, 0x5c, 0x65, 0x45, 0x7c, 0xf3, 0xa2, 0x99, 0xe2, 0x2c, 0x60, 0x33, + 0x45, 0x34, 0xd4, 0x88, 0x97, 0x0a, 0x55, 0x78, 0x03, 0x5f, 0x82, 0xd6, 0x71, 0x9a, 0x52, 0x72, + 0xe0, 0x53, 0xfb, 0x21, 0x6c, 0x3e, 0x25, 0x24, 0xdd, 0x4b, 0xe2, 0x98, 0x78, 0x8c, 0xf8, 0x0e, + 0xf9, 0xc3, 0x94, 0x50, 0xc6, 0x97, 0x10, 0x77, 0x42, 0xe5, 0x83, 0x7f, 0xdb, 0x7f, 0xb7, 0xa0, + 0x2b, 0x8f, 0xcb, 0xb3, 0xc4, 0x13, 0x87, 0x84, 0x83, 0xc6, 0x19, 0x8c, 0x02, 0x6d, 0x9a, 0x85, + 0x73, 0xd4, 0xa6, 0x32, 0x4f, 0x6d, 0xee, 0x40, 0x43, 0xf4, 0xfe, 0x22, 0x98, 0x75, 0xde, 0xce, + 0x03, 0x9f, 0x16, 0x55, 0xcb, 0x97, 0xea, 0x9a, 0x50, 0xb7, 0x74, 0x7b, 0xe6, 0x26, 0x45, 0x67, + 0x58, 0x33, 0x3b, 0x83, 0xfd, 0x12, 0x36, 0x9e, 0x25, 0xc9, 0xf9, 0x34, 0x95, 0xe1, 0xe9, 0x4d, + 0x94, 0xf7, 0x6e, 0x8d, 0xab, 0x3c, 0x96, 0x7c, 0xef, 0xd7, 0x1d, 0x65, 0xfb, 0x7f, 0x16, 0x6c, + 0x96, 0xdd, 0xaa, 0x66, 0xf6, 0x7b, 0xd8, 0xc8, 0xfd, 0xba, 0xa1, 0xc2, 0x42, 0x2e, 0xd0, 0xda, + 0xfd, 0xd0, 0x48, 0xf3, 0xb2, 0xd9, 0x9a, 0x20, 0xf9, 0x1a, 0x44, 0x67, 0x70, 0x31, 0x27, 0xa1, + 0xa3, 0x4b, 0xe8, 0xcf, 0x9b, 0xf1, 0x22, 0x9c, 0xaf, 0xaa, 0x10, 0x6f, 0xe8, 0x99, 0xe8, 0x67, + 0xd0, 0x2c, 0x02, 0xa9, 0x88, 0x40, 0x36, 0x4a, 0x81, 0xa8, 0xb5, 0x0a, 0x2b, 0x7e, 0xbc, 0x49, + 0x96, 0x25, 0x99, 0x2a, 0x38, 0x72, 0x60, 0xff, 0x12, 0x1a, 0xdf, 0x3b, 0xbb, 0xf6, 0xbf, 0x2a, + 0xd0, 0x79, 0x44, 0x69, 0x70, 0x1a, 0xeb, 0x14, 0x6c, 0xc2, 0x9a, 0x6c, 0x2d, 0xb2, 0xd7, 0xcb, + 0x01, 0x1a, 0x43, 0x4b, 0xd5, 0x2d, 0x03, 0x7a, 0x53, 0x74, 0x6d, 0x49, 0x54, 0xb5, 0xac, 0x26, + 0x43, 0xe3, 0xdd, 0x62, 0x8e, 0xe8, 0xae, 0xad, 0x24, 0xba, 0x75, 0x83, 0xe8, 0xbe, 0x0d, 0x4d, + 0x31, 0x29, 0x4e, 0x7c, 0xa2, 0x18, 0x70, 0x83, 0x0b, 0x8e, 0x12, 0x9f, 0xa0, 0x5d, 0xd8, 0x8a, + 0x48, 0x94, 0x64, 0x33, 0x37, 0xc2, 0xa9, 0xcb, 0x79, 0xb6, 0xe0, 0x2e, 0xd1, 0x44, 0xd5, 0x5e, + 0x24, 0xb5, 0x87, 0x38, 0x3d, 0xc4, 0x97, 0x9c, 0xbe, 0x1c, 0x4e, 0xd0, 0x2e, 0xdc, 0xfe, 0x2a, + 0x0b, 0x18, 0x9e, 0x84, 0xa4, 0xcc, 0xdf, 0x65, 0x2d, 0xde, 0xd0, 0x4a, 0x83, 0xc4, 0xdb, 0x7f, + 0xb3, 0xa0, 0xab, 0x51, 0x53, 0x27, 0xac, 0x0f, 0xd5, 0x93, 0x3c, 0xcb, 0xfc, 0x53, 0xe7, 0xa2, + 0xb2, 0x2a, 0x17, 0x0b, 0x8f, 0x88, 0x1c, 0xf9, 0x9a, 0x89, 0x7c, 0x9e, 0xf4, 0x35, 0x23, 0xe9, + 0x1c, 0x1a, 0x3c, 0x65, 0x67, 0x1a, 0x1a, 0xfe, 0x6d, 0x9f, 0xc2, 0xe0, 0x98, 0x61, 0x16, 0x50, + 0x16, 0x78, 0x54, 0xa7, 0x73, 0x2e, 0x71, 0xd6, 0x75, 0x89, 0xab, 0xac, 0x4a, 0x5c, 0x35, 0x4f, + 0x9c, 0xfd, 0x1f, 0x0b, 0x90, 0xb9, 0x92, 0x82, 0xe0, 0x07, 0x58, 0x8a, 0x43, 0xc6, 0x12, 0xc6, + 0xd9, 0x20, 0x67, 0x5c, 0x8a, 0x37, 0x09, 0x09, 0x4f, 0x1f, 0x3f, 0x0d, 0x53, 0x4a, 0x7c, 0xa9, + 0x95, 0xa4, 0xa9, 0xc1, 0x05, 0x42, 0x59, 0xe6, 0x5c, 0xf5, 0x39, 0xce, 0x65, 0x3f, 0x82, 0x96, + 0xea, 0x3f, 0x2f, 0x79, 0xef, 0xba, 0x3e, 0x7a, 0x15, 0x5d, 0xa5, 0x00, 0x62, 0x0c, 0xb0, 0x57, + 0x44, 0xbf, 0xac, 0x02, 0xff, 0x11, 0x6e, 0x17, 0x16, 0xcf, 0x02, 0xca, 0x74, 0x5e, 0x3e, 0x86, + 0xad, 0x20, 0xf6, 0xc2, 0xa9, 0x4f, 0xdc, 0x98, 0x77, 0xf0, 0x30, 0x7f, 0xbc, 0x58, 0x82, 0xad, + 0x6d, 0x2a, 0xed, 0x91, 0x50, 0xea, 0x47, 0xcc, 0x07, 0x80, 0xf4, 0x2c, 0xe2, 0xe5, 0x33, 0x2a, + 0x62, 0x46, 0x5f, 0x69, 0xf6, 0x3d, 0x65, 0x6d, 0xbf, 0x80, 0xad, 0xf9, 0xc5, 0x55, 0xaa, 0x7e, + 0x0e, 0xad, 0x02, 0x76, 0x5d, 0x07, 0x6f, 0x1b, 0xe5, 0xa7, 0x98, 0xe7, 0x98, 0x96, 0xf6, 0x4f, + 0xe0, 0xad, 0x42, 0xf5, 0x44, 0x14, 0xfa, 0xab, 0x1a, 0xd0, 0x08, 0x86, 0x8b, 0xe6, 0x32, 0x06, + 0xfb, 0xaf, 0x55, 0x68, 0x3f, 0x51, 0x37, 0x97, 0xd3, 0x18, 0x83, 0xb8, 0x48, 0xf6, 0x70, 0x1f, + 0xda, 0xa5, 0x0b, 0x29, 0xf9, 0x76, 0xeb, 0xc2, 0x78, 0x4d, 0x2f, 0x7b, 0x77, 0x57, 0x85, 0xd9, + 0xfc, 0xbb, 0xfb, 0x21, 0x0c, 0x4e, 0x32, 0x42, 0x16, 0x9f, 0xe8, 0x35, 0xa7, 0xc7, 0x15, 0xa6, + 0xed, 0x0e, 0x6c, 0x60, 0x8f, 0x05, 0x17, 0x73, 0xd6, 0xf2, 0x7c, 0x0d, 0xa4, 0xca, 0xb4, 0xff, + 0x3c, 0x0f, 0x34, 0x88, 0x4f, 0x12, 0x3a, 0xac, 0x7f, 0xf7, 0x27, 0xb6, 0xda, 0x0d, 0xd7, 0x50, + 0xf4, 0x1c, 0xba, 0xfa, 0xa9, 0xa6, 0x3c, 0xad, 0xdf, 0xf8, 0x19, 0xd8, 0x26, 0x85, 0x8a, 0x1a, + 0xbc, 0xb9, 0xb4, 0x93, 0x86, 0xdc, 0x89, 0x54, 0x99, 0x85, 0xed, 0xdf, 0x15, 0x68, 0x38, 0xd8, + 0x3b, 0x7f, 0xb3, 0xf3, 0xf1, 0x19, 0xf4, 0xf2, 0x1e, 0x51, 0x4a, 0xc9, 0x5b, 0x06, 0x90, 0xe6, + 0xd1, 0x73, 0x3a, 0xbe, 0x31, 0x5a, 0x09, 0xdb, 0xfa, 0x2a, 0xd8, 0xfe, 0x59, 0x81, 0xee, 0x93, + 0xbc, 0x6f, 0xbd, 0xd9, 0xe0, 0xed, 0x02, 0xf0, 0x46, 0x5b, 0xc2, 0xcd, 0x24, 0x26, 0xfa, 0x78, + 0x38, 0xcd, 0x4c, 0x7d, 0xdd, 0x1c, 0xaf, 0x6f, 0x2a, 0xd0, 0x7e, 0x99, 0xa4, 0x49, 0x98, 0x9c, + 0xce, 0xde, 0x6c, 0xb4, 0xf6, 0x61, 0x60, 0x70, 0x98, 0x12, 0x68, 0x77, 0xe6, 0x0e, 0x5b, 0x71, + 0x38, 0x9c, 0x9e, 0x5f, 0x1a, 0xdf, 0x1c, 0xc0, 0x0d, 0x18, 0x28, 0x5e, 0x5f, 0xb4, 0x14, 0xfb, + 0xcf, 0x16, 0x20, 0x53, 0xaa, 0x6a, 0xfd, 0xaf, 0xa0, 0xc3, 0x14, 0xd6, 0x22, 0x3e, 0xf5, 0xb8, + 0x31, 0xef, 0x82, 0x99, 0x0b, 0xa7, 0xcd, 0xcc, 0xcc, 0xfc, 0x14, 0x36, 0x17, 0x7e, 0x06, 0xe2, + 0x84, 0x4a, 0x66, 0x64, 0x30, 0xf7, 0x4b, 0xd0, 0xe1, 0xc4, 0xfe, 0x18, 0x6e, 0x4b, 0x12, 0xad, + 0xfb, 0x90, 0xee, 0x0f, 0x0b, 0x6c, 0xb8, 0x53, 0xb0, 0x61, 0xfb, 0x5b, 0x0b, 0xb6, 0xe6, 0xa7, + 0xa9, 0xf8, 0xaf, 0x9a, 0x87, 0x30, 0x20, 0x55, 0x2f, 0x4d, 0x5e, 0x2f, 0xe9, 0xf4, 0x47, 0x0b, + 0xbc, 0x7e, 0xde, 0xf7, 0x8e, 0xae, 0xa3, 0x05, 0xb5, 0xef, 0xd3, 0xb2, 0x80, 0x8e, 0x30, 0x0c, + 0x16, 0xcc, 0xf8, 0xab, 0x48, 0xaf, 0xab, 0x62, 0x5a, 0x57, 0x13, 0xbf, 0x07, 0xb1, 0xb7, 0xef, + 0xc1, 0xdd, 0x2f, 0x08, 0x3b, 0x14, 0x36, 0x7b, 0x49, 0x7c, 0x12, 0x9c, 0x4e, 0x33, 0x69, 0x54, + 0xa4, 0xf6, 0xdd, 0x55, 0x16, 0x0a, 0xa6, 0x25, 0xbf, 0xb5, 0x59, 0x37, 0xfe, 0xad, 0xad, 0x72, + 0xd5, 0x6f, 0x6d, 0xbb, 0xff, 0xad, 0xc3, 0xfa, 0x31, 0xc1, 0xaf, 0x09, 0xe1, 0x4f, 0xfb, 0xce, + 0x31, 0x89, 0xfd, 0xe2, 0x57, 0xf4, 0x4d, 0x63, 0x8f, 0xb9, 0x74, 0xf4, 0xce, 0x32, 0x69, 0x4e, + 0x01, 0x6e, 0x6d, 0x5b, 0x1f, 0x5a, 0xe8, 0x05, 0x74, 0x4a, 0x2f, 0x5a, 0x74, 0xcf, 0x98, 0xb4, + 0xec, 0xad, 0x3b, 0xba, 0xb3, 0xd0, 0x10, 0x35, 0xaa, 0xb9, 0xcb, 0xb6, 0xf9, 0x92, 0x43, 0xef, + 0xae, 0x7c, 0xe2, 0x49, 0x87, 0xf7, 0xae, 0x79, 0x02, 0xda, 0xb7, 0xd0, 0x67, 0x50, 0x97, 0x94, + 0x1f, 0x0d, 0x0d, 0xe3, 0xd2, 0xdb, 0xa9, 0x14, 0x57, 0xf9, 0x7d, 0x60, 0xdf, 0x42, 0x4f, 0x01, + 0x0a, 0xd2, 0x8c, 0xde, 0x29, 0xfd, 0x0c, 0x32, 0xc7, 0xda, 0x47, 0x77, 0x57, 0x68, 0x73, 0x67, + 0x5f, 0x41, 0xb7, 0x4c, 0xed, 0xd0, 0x78, 0x29, 0x7b, 0x33, 0xea, 0xc3, 0xe8, 0xfe, 0x15, 0x16, + 0xb9, 0xe3, 0xdf, 0x41, 0x7f, 0x9e, 0xb1, 0x21, 0x7b, 0xe9, 0xc4, 0x12, 0xfb, 0x1b, 0xbd, 0x77, + 0xa5, 0x8d, 0x09, 0x42, 0x51, 0xa2, 0x4a, 0x20, 0x2c, 0xd4, 0xb3, 0x12, 0x08, 0x8b, 0x75, 0x4d, + 0x82, 0x50, 0xbe, 0xd7, 0x25, 0x10, 0x96, 0x56, 0xa1, 0x12, 0x08, 0xcb, 0x8b, 0x82, 0x7d, 0x0b, + 0x25, 0xb0, 0xb5, 0xfc, 0xb6, 0x21, 0xf3, 0x27, 0xa1, 0x2b, 0xaf, 0xec, 0xe8, 0xc1, 0x77, 0xb0, + 0xd4, 0x0b, 0x4e, 0xea, 0xe2, 0x2f, 0xaa, 0x8f, 0xfe, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x64, + 0x5c, 0xbc, 0xb2, 0x1a, 0x00, 0x00, } diff --git a/weed/pb/proto_read_write_test.go b/weed/pb/proto_read_write_test.go new file mode 100644 index 000000000..7f6444ab5 --- /dev/null +++ b/weed/pb/proto_read_write_test.go @@ -0,0 +1,43 @@ +package pb + +import ( + "fmt" + "testing" + + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/golang/protobuf/jsonpb" +) + +func TestJsonpMarshalUnmarshal(t *testing.T) { + + tv := &volume_server_pb.RemoteFile{ + BackendType: "aws", + BackendId: "", + FileSize: 12, + } + + m := jsonpb.Marshaler{ + EmitDefaults: true, + Indent: " ", + } + + if text, err := m.MarshalToString(tv); err != nil { + fmt.Printf("marshal eror: %v\n", err) + } else { + fmt.Printf("marshalled: %s\n", text) + } + + rawJson := `{ + "backendType":"aws", + "backendId":"temp", + "FileSize":12 + }` + + tv1 := &volume_server_pb.RemoteFile{} + if err := jsonpb.UnmarshalString(rawJson, tv1); err != nil { + fmt.Printf("unmarshal error: %v\n", err) + } + + fmt.Printf("unmarshalled: %+v\n", tv1) + +} diff --git a/weed/pb/volume_info.go b/weed/pb/volume_info.go new file mode 100644 index 000000000..c4f733f5c --- /dev/null +++ b/weed/pb/volume_info.go @@ -0,0 +1,76 @@ +package pb + +import ( + "bytes" + "fmt" + "io/ioutil" + + _ "github.com/chrislusf/seaweedfs/weed/storage/backend/s3_backend" + "github.com/chrislusf/seaweedfs/weed/util" + + "github.com/golang/protobuf/jsonpb" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" +) + +// MaybeLoadVolumeInfo load the file data as *volume_server_pb.VolumeInfo, the returned volumeInfo will not be nil +func MaybeLoadVolumeInfo(fileName string) (*volume_server_pb.VolumeInfo, bool, error) { + + volumeInfo := &volume_server_pb.VolumeInfo{} + + glog.V(1).Infof("maybeLoadVolumeInfo checks %s", fileName) + if exists, canRead, _, _, _ := util.CheckFile(fileName); !exists || !canRead { + if !exists { + return volumeInfo, false, nil + } + if !canRead { + glog.Warningf("can not read %s", fileName) + return volumeInfo, false, fmt.Errorf("can not read %s", fileName) + } + return volumeInfo, false, nil + } + + glog.V(1).Infof("maybeLoadVolumeInfo reads %s", fileName) + tierData, readErr := ioutil.ReadFile(fileName) + if readErr != nil { + glog.Warningf("fail to read %s : %v", fileName, readErr) + return volumeInfo, false, fmt.Errorf("fail to read %s : %v", fileName, readErr) + } + + glog.V(1).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName) + if err := jsonpb.Unmarshal(bytes.NewReader(tierData), volumeInfo); err != nil { + glog.Warningf("unmarshal error: %v", err) + return volumeInfo, false, fmt.Errorf("unmarshal error: %v", err) + } + + if len(volumeInfo.GetFiles()) == 0 { + return volumeInfo, false, nil + } + + return volumeInfo, true, nil +} + +func SaveVolumeInfo(fileName string, volumeInfo *volume_server_pb.VolumeInfo) error { + + if exists, _, canWrite, _, _ := util.CheckFile(fileName); exists && !canWrite { + return fmt.Errorf("%s not writable", fileName) + } + + m := jsonpb.Marshaler{ + EmitDefaults: true, + Indent: " ", + } + + text, marshalErr := m.MarshalToString(volumeInfo) + if marshalErr != nil { + return fmt.Errorf("marshal to %s: %v", fileName, marshalErr) + } + + writeErr := ioutil.WriteFile(fileName, []byte(text), 0755) + if writeErr != nil { + return fmt.Errorf("fail to write %s : %v", fileName, writeErr) + } + + return nil +} diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto index 3a5874c02..405d41e9c 100644 --- a/weed/pb/volume_server.proto +++ b/weed/pb/volume_server.proto @@ -35,6 +35,8 @@ service VolumeServer { } rpc VolumeMarkReadonly (VolumeMarkReadonlyRequest) returns (VolumeMarkReadonlyResponse) { } + rpc VolumeConfigure (VolumeConfigureRequest) returns (VolumeConfigureResponse) { + } // copy the .idx .dat files, and mount this volume rpc VolumeCopy (VolumeCopyRequest) returns (VolumeCopyResponse) { @@ -66,6 +68,14 @@ service VolumeServer { } rpc VolumeEcBlobDelete (VolumeEcBlobDeleteRequest) returns (VolumeEcBlobDeleteResponse) { } + rpc VolumeEcShardsToVolume (VolumeEcShardsToVolumeRequest) returns (VolumeEcShardsToVolumeResponse) { + } + + // tiered storage + rpc VolumeTierMoveDatToRemote (VolumeTierMoveDatToRemoteRequest) returns (stream VolumeTierMoveDatToRemoteResponse) { + } + rpc VolumeTierMoveDatFromRemote (VolumeTierMoveDatFromRemoteRequest) returns (stream VolumeTierMoveDatFromRemoteResponse) { + } // query rpc Query (QueryRequest) returns (stream QueriedStripe) { @@ -181,6 +191,14 @@ message VolumeMarkReadonlyRequest { message VolumeMarkReadonlyResponse { } +message VolumeConfigureRequest { + uint32 volume_id = 1; + string replication = 2; +} +message VolumeConfigureResponse { + string error = 1; +} + message VolumeCopyRequest { uint32 volume_id = 1; string collection = 2; @@ -199,6 +217,7 @@ message CopyFileRequest { uint64 stop_offset = 4; string collection = 5; bool is_ec_volume = 6; + bool ignore_source_file_not_found = 7; } message CopyFileResponse { bytes file_content = 1; @@ -245,6 +264,8 @@ message VolumeEcShardsCopyRequest { repeated uint32 shard_ids = 3; bool copy_ecx_file = 4; string source_data_node = 5; + bool copy_ecj_file = 6; + bool copy_vif_file = 7; } message VolumeEcShardsCopyResponse { } @@ -293,6 +314,13 @@ message VolumeEcBlobDeleteRequest { message VolumeEcBlobDeleteResponse { } +message VolumeEcShardsToVolumeRequest { + uint32 volume_id = 1; + string collection = 2; +} +message VolumeEcShardsToVolumeResponse { +} + message ReadVolumeFileStatusRequest { uint32 volume_id = 1; } @@ -324,6 +352,44 @@ message MemStatus { uint64 stack = 7; } +// tired storage on volume servers +message RemoteFile { + string backend_type = 1; + string backend_id = 2; + string key = 3; + uint64 offset = 4; + uint64 file_size = 5; + uint64 modified_time = 6; + string extension = 7; +} +message VolumeInfo { + repeated RemoteFile files = 1; + uint32 version = 2; + string replication = 3; +} + +message VolumeTierMoveDatToRemoteRequest { + uint32 volume_id = 1; + string collection = 2; + string destination_backend_name = 3; + bool keep_local_dat_file = 4; +} +message VolumeTierMoveDatToRemoteResponse { + int64 processed = 1; + float processedPercentage = 2; +} + +message VolumeTierMoveDatFromRemoteRequest { + uint32 volume_id = 1; + string collection = 2; + bool keep_remote_dat_file = 3; +} +message VolumeTierMoveDatFromRemoteResponse { + int64 processed = 1; + float processedPercentage = 2; +} + +// select on volume servers message QueryRequest { repeated string selections = 1; repeated string from_file_ids = 2; @@ -338,17 +404,17 @@ message QueryRequest { // NONE | GZIP | BZIP2 string compression_type = 1; message CSVInput { - string file_header_info = 1; // Valid values: NONE | USE | IGNORE - string record_delimiter = 2; // Default: \n - string field_delimiter = 3; // Default: , - string quote_charactoer = 4; // Default: " + string file_header_info = 1; // Valid values: NONE | USE | IGNORE + string record_delimiter = 2; // Default: \n + string field_delimiter = 3; // Default: , + string quote_charactoer = 4; // Default: " string quote_escape_character = 5; // Default: " - string comments = 6; // Default: # + string comments = 6; // Default: # // If true, records might contain record delimiters within quote characters - bool allow_quoted_record_delimiter = 7; // default False. + bool allow_quoted_record_delimiter = 7; // default False. } message JSONInput { - string type = 1; // Valid values: DOCUMENT | LINES + string type = 1; // Valid values: DOCUMENT | LINES } message ParquetInput { } @@ -361,10 +427,10 @@ message QueryRequest { message OutputSerialization { message CSVOutput { - string quote_fields = 1; // Valid values: ALWAYS | ASNEEDED - string record_delimiter = 2; // Default: \n - string field_delimiter = 3; // Default: , - string quote_charactoer = 4; // Default: " + string quote_fields = 1; // Valid values: ALWAYS | ASNEEDED + string record_delimiter = 2; // Default: \n + string field_delimiter = 3; // Default: , + string quote_charactoer = 4; // Default: " string quote_escape_character = 5; // Default: " } message JSONOutput { diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index f8af827a6..2a8f91bc5 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -37,6 +37,8 @@ It has these top-level messages: VolumeDeleteResponse VolumeMarkReadonlyRequest VolumeMarkReadonlyResponse + VolumeConfigureRequest + VolumeConfigureResponse VolumeCopyRequest VolumeCopyResponse CopyFileRequest @@ -61,10 +63,18 @@ It has these top-level messages: VolumeEcShardReadResponse VolumeEcBlobDeleteRequest VolumeEcBlobDeleteResponse + VolumeEcShardsToVolumeRequest + VolumeEcShardsToVolumeResponse ReadVolumeFileStatusRequest ReadVolumeFileStatusResponse DiskStatus MemStatus + RemoteFile + VolumeInfo + VolumeTierMoveDatToRemoteRequest + VolumeTierMoveDatToRemoteResponse + VolumeTierMoveDatFromRemoteRequest + VolumeTierMoveDatFromRemoteResponse QueryRequest QueriedStripe */ @@ -320,7 +330,7 @@ type AllocateVolumeRequest struct { Preallocate int64 `protobuf:"varint,3,opt,name=preallocate" json:"preallocate,omitempty"` Replication string `protobuf:"bytes,4,opt,name=replication" json:"replication,omitempty"` Ttl string `protobuf:"bytes,5,opt,name=ttl" json:"ttl,omitempty"` - MemoryMapMaxSizeMb uint32 `protobuf:"varint,6,opt,name=memory_map_max_size_mb,json=MemoryMapMaxSizeMb" json:"memory_map_max_size_mb,omitempty"` + MemoryMapMaxSizeMb uint32 `protobuf:"varint,6,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb" json:"memory_map_max_size_mb,omitempty"` } func (m *AllocateVolumeRequest) Reset() { *m = AllocateVolumeRequest{} } @@ -594,6 +604,46 @@ func (m *VolumeMarkReadonlyResponse) String() string { return proto.C func (*VolumeMarkReadonlyResponse) ProtoMessage() {} func (*VolumeMarkReadonlyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } +type VolumeConfigureRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + Replication string `protobuf:"bytes,2,opt,name=replication" json:"replication,omitempty"` +} + +func (m *VolumeConfigureRequest) Reset() { *m = VolumeConfigureRequest{} } +func (m *VolumeConfigureRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeConfigureRequest) ProtoMessage() {} +func (*VolumeConfigureRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +func (m *VolumeConfigureRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *VolumeConfigureRequest) GetReplication() string { + if m != nil { + return m.Replication + } + return "" +} + +type VolumeConfigureResponse struct { + Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` +} + +func (m *VolumeConfigureResponse) Reset() { *m = VolumeConfigureResponse{} } +func (m *VolumeConfigureResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeConfigureResponse) ProtoMessage() {} +func (*VolumeConfigureResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +func (m *VolumeConfigureResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + type VolumeCopyRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` @@ -605,7 +655,7 @@ type VolumeCopyRequest struct { func (m *VolumeCopyRequest) Reset() { *m = VolumeCopyRequest{} } func (m *VolumeCopyRequest) String() string { return proto.CompactTextString(m) } func (*VolumeCopyRequest) ProtoMessage() {} -func (*VolumeCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } +func (*VolumeCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } func (m *VolumeCopyRequest) GetVolumeId() uint32 { if m != nil { @@ -649,7 +699,7 @@ type VolumeCopyResponse struct { func (m *VolumeCopyResponse) Reset() { *m = VolumeCopyResponse{} } func (m *VolumeCopyResponse) String() string { return proto.CompactTextString(m) } func (*VolumeCopyResponse) ProtoMessage() {} -func (*VolumeCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } +func (*VolumeCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } func (m *VolumeCopyResponse) GetLastAppendAtNs() uint64 { if m != nil { @@ -659,18 +709,19 @@ func (m *VolumeCopyResponse) GetLastAppendAtNs() uint64 { } type CopyFileRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Ext string `protobuf:"bytes,2,opt,name=ext" json:"ext,omitempty"` - CompactionRevision uint32 `protobuf:"varint,3,opt,name=compaction_revision,json=compactionRevision" json:"compaction_revision,omitempty"` - StopOffset uint64 `protobuf:"varint,4,opt,name=stop_offset,json=stopOffset" json:"stop_offset,omitempty"` - Collection string `protobuf:"bytes,5,opt,name=collection" json:"collection,omitempty"` - IsEcVolume bool `protobuf:"varint,6,opt,name=is_ec_volume,json=isEcVolume" json:"is_ec_volume,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + Ext string `protobuf:"bytes,2,opt,name=ext" json:"ext,omitempty"` + CompactionRevision uint32 `protobuf:"varint,3,opt,name=compaction_revision,json=compactionRevision" json:"compaction_revision,omitempty"` + StopOffset uint64 `protobuf:"varint,4,opt,name=stop_offset,json=stopOffset" json:"stop_offset,omitempty"` + Collection string `protobuf:"bytes,5,opt,name=collection" json:"collection,omitempty"` + IsEcVolume bool `protobuf:"varint,6,opt,name=is_ec_volume,json=isEcVolume" json:"is_ec_volume,omitempty"` + IgnoreSourceFileNotFound bool `protobuf:"varint,7,opt,name=ignore_source_file_not_found,json=ignoreSourceFileNotFound" json:"ignore_source_file_not_found,omitempty"` } func (m *CopyFileRequest) Reset() { *m = CopyFileRequest{} } func (m *CopyFileRequest) String() string { return proto.CompactTextString(m) } func (*CopyFileRequest) ProtoMessage() {} -func (*CopyFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } +func (*CopyFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } func (m *CopyFileRequest) GetVolumeId() uint32 { if m != nil { @@ -714,6 +765,13 @@ func (m *CopyFileRequest) GetIsEcVolume() bool { return false } +func (m *CopyFileRequest) GetIgnoreSourceFileNotFound() bool { + if m != nil { + return m.IgnoreSourceFileNotFound + } + return false +} + type CopyFileResponse struct { FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"` } @@ -721,7 +779,7 @@ type CopyFileResponse struct { func (m *CopyFileResponse) Reset() { *m = CopyFileResponse{} } func (m *CopyFileResponse) String() string { return proto.CompactTextString(m) } func (*CopyFileResponse) ProtoMessage() {} -func (*CopyFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } +func (*CopyFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } func (m *CopyFileResponse) GetFileContent() []byte { if m != nil { @@ -739,7 +797,7 @@ type VolumeTailSenderRequest struct { func (m *VolumeTailSenderRequest) Reset() { *m = VolumeTailSenderRequest{} } func (m *VolumeTailSenderRequest) String() string { return proto.CompactTextString(m) } func (*VolumeTailSenderRequest) ProtoMessage() {} -func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } +func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } func (m *VolumeTailSenderRequest) GetVolumeId() uint32 { if m != nil { @@ -771,7 +829,7 @@ type VolumeTailSenderResponse struct { func (m *VolumeTailSenderResponse) Reset() { *m = VolumeTailSenderResponse{} } func (m *VolumeTailSenderResponse) String() string { return proto.CompactTextString(m) } func (*VolumeTailSenderResponse) ProtoMessage() {} -func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } +func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } func (m *VolumeTailSenderResponse) GetNeedleHeader() []byte { if m != nil { @@ -804,7 +862,7 @@ type VolumeTailReceiverRequest struct { func (m *VolumeTailReceiverRequest) Reset() { *m = VolumeTailReceiverRequest{} } func (m *VolumeTailReceiverRequest) String() string { return proto.CompactTextString(m) } func (*VolumeTailReceiverRequest) ProtoMessage() {} -func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } +func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } func (m *VolumeTailReceiverRequest) GetVolumeId() uint32 { if m != nil { @@ -840,7 +898,7 @@ type VolumeTailReceiverResponse struct { func (m *VolumeTailReceiverResponse) Reset() { *m = VolumeTailReceiverResponse{} } func (m *VolumeTailReceiverResponse) String() string { return proto.CompactTextString(m) } func (*VolumeTailReceiverResponse) ProtoMessage() {} -func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } +func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } type VolumeEcShardsGenerateRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -850,7 +908,7 @@ type VolumeEcShardsGenerateRequest struct { func (m *VolumeEcShardsGenerateRequest) Reset() { *m = VolumeEcShardsGenerateRequest{} } func (m *VolumeEcShardsGenerateRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsGenerateRequest) ProtoMessage() {} -func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } +func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } func (m *VolumeEcShardsGenerateRequest) GetVolumeId() uint32 { if m != nil { @@ -872,7 +930,7 @@ type VolumeEcShardsGenerateResponse struct { func (m *VolumeEcShardsGenerateResponse) Reset() { *m = VolumeEcShardsGenerateResponse{} } func (m *VolumeEcShardsGenerateResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} -func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } +func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } type VolumeEcShardsRebuildRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -882,7 +940,7 @@ type VolumeEcShardsRebuildRequest struct { func (m *VolumeEcShardsRebuildRequest) Reset() { *m = VolumeEcShardsRebuildRequest{} } func (m *VolumeEcShardsRebuildRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsRebuildRequest) ProtoMessage() {} -func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } +func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } func (m *VolumeEcShardsRebuildRequest) GetVolumeId() uint32 { if m != nil { @@ -905,7 +963,7 @@ type VolumeEcShardsRebuildResponse struct { func (m *VolumeEcShardsRebuildResponse) Reset() { *m = VolumeEcShardsRebuildResponse{} } func (m *VolumeEcShardsRebuildResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsRebuildResponse) ProtoMessage() {} -func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } +func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } func (m *VolumeEcShardsRebuildResponse) GetRebuiltShardIds() []uint32 { if m != nil { @@ -920,12 +978,14 @@ type VolumeEcShardsCopyRequest struct { ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds" json:"shard_ids,omitempty"` CopyEcxFile bool `protobuf:"varint,4,opt,name=copy_ecx_file,json=copyEcxFile" json:"copy_ecx_file,omitempty"` SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode" json:"source_data_node,omitempty"` + CopyEcjFile bool `protobuf:"varint,6,opt,name=copy_ecj_file,json=copyEcjFile" json:"copy_ecj_file,omitempty"` + CopyVifFile bool `protobuf:"varint,7,opt,name=copy_vif_file,json=copyVifFile" json:"copy_vif_file,omitempty"` } func (m *VolumeEcShardsCopyRequest) Reset() { *m = VolumeEcShardsCopyRequest{} } func (m *VolumeEcShardsCopyRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsCopyRequest) ProtoMessage() {} -func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } +func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } func (m *VolumeEcShardsCopyRequest) GetVolumeId() uint32 { if m != nil { @@ -962,13 +1022,27 @@ func (m *VolumeEcShardsCopyRequest) GetSourceDataNode() string { return "" } +func (m *VolumeEcShardsCopyRequest) GetCopyEcjFile() bool { + if m != nil { + return m.CopyEcjFile + } + return false +} + +func (m *VolumeEcShardsCopyRequest) GetCopyVifFile() bool { + if m != nil { + return m.CopyVifFile + } + return false +} + type VolumeEcShardsCopyResponse struct { } func (m *VolumeEcShardsCopyResponse) Reset() { *m = VolumeEcShardsCopyResponse{} } func (m *VolumeEcShardsCopyResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsCopyResponse) ProtoMessage() {} -func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } +func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } type VolumeEcShardsDeleteRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -979,7 +1053,7 @@ type VolumeEcShardsDeleteRequest struct { func (m *VolumeEcShardsDeleteRequest) Reset() { *m = VolumeEcShardsDeleteRequest{} } func (m *VolumeEcShardsDeleteRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsDeleteRequest) ProtoMessage() {} -func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } +func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } func (m *VolumeEcShardsDeleteRequest) GetVolumeId() uint32 { if m != nil { @@ -1008,7 +1082,7 @@ type VolumeEcShardsDeleteResponse struct { func (m *VolumeEcShardsDeleteResponse) Reset() { *m = VolumeEcShardsDeleteResponse{} } func (m *VolumeEcShardsDeleteResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsDeleteResponse) ProtoMessage() {} -func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } +func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } type VolumeEcShardsMountRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1019,7 +1093,7 @@ type VolumeEcShardsMountRequest struct { func (m *VolumeEcShardsMountRequest) Reset() { *m = VolumeEcShardsMountRequest{} } func (m *VolumeEcShardsMountRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsMountRequest) ProtoMessage() {} -func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } +func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } func (m *VolumeEcShardsMountRequest) GetVolumeId() uint32 { if m != nil { @@ -1048,7 +1122,7 @@ type VolumeEcShardsMountResponse struct { func (m *VolumeEcShardsMountResponse) Reset() { *m = VolumeEcShardsMountResponse{} } func (m *VolumeEcShardsMountResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsMountResponse) ProtoMessage() {} -func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } +func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } type VolumeEcShardsUnmountRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1058,7 +1132,7 @@ type VolumeEcShardsUnmountRequest struct { func (m *VolumeEcShardsUnmountRequest) Reset() { *m = VolumeEcShardsUnmountRequest{} } func (m *VolumeEcShardsUnmountRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsUnmountRequest) ProtoMessage() {} -func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } +func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } func (m *VolumeEcShardsUnmountRequest) GetVolumeId() uint32 { if m != nil { @@ -1080,7 +1154,7 @@ type VolumeEcShardsUnmountResponse struct { func (m *VolumeEcShardsUnmountResponse) Reset() { *m = VolumeEcShardsUnmountResponse{} } func (m *VolumeEcShardsUnmountResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsUnmountResponse) ProtoMessage() {} -func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } +func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } type VolumeEcShardReadRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1093,7 +1167,7 @@ type VolumeEcShardReadRequest struct { func (m *VolumeEcShardReadRequest) Reset() { *m = VolumeEcShardReadRequest{} } func (m *VolumeEcShardReadRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardReadRequest) ProtoMessage() {} -func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } +func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } func (m *VolumeEcShardReadRequest) GetVolumeId() uint32 { if m != nil { @@ -1138,7 +1212,7 @@ type VolumeEcShardReadResponse struct { func (m *VolumeEcShardReadResponse) Reset() { *m = VolumeEcShardReadResponse{} } func (m *VolumeEcShardReadResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardReadResponse) ProtoMessage() {} -func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } +func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } func (m *VolumeEcShardReadResponse) GetData() []byte { if m != nil { @@ -1164,7 +1238,7 @@ type VolumeEcBlobDeleteRequest struct { func (m *VolumeEcBlobDeleteRequest) Reset() { *m = VolumeEcBlobDeleteRequest{} } func (m *VolumeEcBlobDeleteRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcBlobDeleteRequest) ProtoMessage() {} -func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } +func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } func (m *VolumeEcBlobDeleteRequest) GetVolumeId() uint32 { if m != nil { @@ -1200,7 +1274,39 @@ type VolumeEcBlobDeleteResponse struct { func (m *VolumeEcBlobDeleteResponse) Reset() { *m = VolumeEcBlobDeleteResponse{} } func (m *VolumeEcBlobDeleteResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcBlobDeleteResponse) ProtoMessage() {} -func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } +func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } + +type VolumeEcShardsToVolumeRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` +} + +func (m *VolumeEcShardsToVolumeRequest) Reset() { *m = VolumeEcShardsToVolumeRequest{} } +func (m *VolumeEcShardsToVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsToVolumeRequest) ProtoMessage() {} +func (*VolumeEcShardsToVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } + +func (m *VolumeEcShardsToVolumeRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *VolumeEcShardsToVolumeRequest) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} + +type VolumeEcShardsToVolumeResponse struct { +} + +func (m *VolumeEcShardsToVolumeResponse) Reset() { *m = VolumeEcShardsToVolumeResponse{} } +func (m *VolumeEcShardsToVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} +func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } type ReadVolumeFileStatusRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1209,7 +1315,7 @@ type ReadVolumeFileStatusRequest struct { func (m *ReadVolumeFileStatusRequest) Reset() { *m = ReadVolumeFileStatusRequest{} } func (m *ReadVolumeFileStatusRequest) String() string { return proto.CompactTextString(m) } func (*ReadVolumeFileStatusRequest) ProtoMessage() {} -func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } +func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } func (m *ReadVolumeFileStatusRequest) GetVolumeId() uint32 { if m != nil { @@ -1232,7 +1338,7 @@ type ReadVolumeFileStatusResponse struct { func (m *ReadVolumeFileStatusResponse) Reset() { *m = ReadVolumeFileStatusResponse{} } func (m *ReadVolumeFileStatusResponse) String() string { return proto.CompactTextString(m) } func (*ReadVolumeFileStatusResponse) ProtoMessage() {} -func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } +func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } func (m *ReadVolumeFileStatusResponse) GetVolumeId() uint32 { if m != nil { @@ -1300,7 +1406,7 @@ type DiskStatus struct { func (m *DiskStatus) Reset() { *m = DiskStatus{} } func (m *DiskStatus) String() string { return proto.CompactTextString(m) } func (*DiskStatus) ProtoMessage() {} -func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } +func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } func (m *DiskStatus) GetDir() string { if m != nil { @@ -1343,7 +1449,7 @@ type MemStatus struct { func (m *MemStatus) Reset() { *m = MemStatus{} } func (m *MemStatus) String() string { return proto.CompactTextString(m) } func (*MemStatus) ProtoMessage() {} -func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } +func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } func (m *MemStatus) GetGoroutines() int32 { if m != nil { @@ -1394,6 +1500,232 @@ func (m *MemStatus) GetStack() uint64 { return 0 } +// tired storage on volume servers +type RemoteFile struct { + BackendType string `protobuf:"bytes,1,opt,name=backend_type,json=backendType" json:"backend_type,omitempty"` + BackendId string `protobuf:"bytes,2,opt,name=backend_id,json=backendId" json:"backend_id,omitempty"` + Key string `protobuf:"bytes,3,opt,name=key" json:"key,omitempty"` + Offset uint64 `protobuf:"varint,4,opt,name=offset" json:"offset,omitempty"` + FileSize uint64 `protobuf:"varint,5,opt,name=file_size,json=fileSize" json:"file_size,omitempty"` + ModifiedTime uint64 `protobuf:"varint,6,opt,name=modified_time,json=modifiedTime" json:"modified_time,omitempty"` + Extension string `protobuf:"bytes,7,opt,name=extension" json:"extension,omitempty"` +} + +func (m *RemoteFile) Reset() { *m = RemoteFile{} } +func (m *RemoteFile) String() string { return proto.CompactTextString(m) } +func (*RemoteFile) ProtoMessage() {} +func (*RemoteFile) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{60} } + +func (m *RemoteFile) GetBackendType() string { + if m != nil { + return m.BackendType + } + return "" +} + +func (m *RemoteFile) GetBackendId() string { + if m != nil { + return m.BackendId + } + return "" +} + +func (m *RemoteFile) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *RemoteFile) GetOffset() uint64 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *RemoteFile) GetFileSize() uint64 { + if m != nil { + return m.FileSize + } + return 0 +} + +func (m *RemoteFile) GetModifiedTime() uint64 { + if m != nil { + return m.ModifiedTime + } + return 0 +} + +func (m *RemoteFile) GetExtension() string { + if m != nil { + return m.Extension + } + return "" +} + +type VolumeInfo struct { + Files []*RemoteFile `protobuf:"bytes,1,rep,name=files" json:"files,omitempty"` + Version uint32 `protobuf:"varint,2,opt,name=version" json:"version,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication" json:"replication,omitempty"` +} + +func (m *VolumeInfo) Reset() { *m = VolumeInfo{} } +func (m *VolumeInfo) String() string { return proto.CompactTextString(m) } +func (*VolumeInfo) ProtoMessage() {} +func (*VolumeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{61} } + +func (m *VolumeInfo) GetFiles() []*RemoteFile { + if m != nil { + return m.Files + } + return nil +} + +func (m *VolumeInfo) GetVersion() uint32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *VolumeInfo) GetReplication() string { + if m != nil { + return m.Replication + } + return "" +} + +type VolumeTierMoveDatToRemoteRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` + DestinationBackendName string `protobuf:"bytes,3,opt,name=destination_backend_name,json=destinationBackendName" json:"destination_backend_name,omitempty"` + KeepLocalDatFile bool `protobuf:"varint,4,opt,name=keep_local_dat_file,json=keepLocalDatFile" json:"keep_local_dat_file,omitempty"` +} + +func (m *VolumeTierMoveDatToRemoteRequest) Reset() { *m = VolumeTierMoveDatToRemoteRequest{} } +func (m *VolumeTierMoveDatToRemoteRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeTierMoveDatToRemoteRequest) ProtoMessage() {} +func (*VolumeTierMoveDatToRemoteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{62} +} + +func (m *VolumeTierMoveDatToRemoteRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *VolumeTierMoveDatToRemoteRequest) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} + +func (m *VolumeTierMoveDatToRemoteRequest) GetDestinationBackendName() string { + if m != nil { + return m.DestinationBackendName + } + return "" +} + +func (m *VolumeTierMoveDatToRemoteRequest) GetKeepLocalDatFile() bool { + if m != nil { + return m.KeepLocalDatFile + } + return false +} + +type VolumeTierMoveDatToRemoteResponse struct { + Processed int64 `protobuf:"varint,1,opt,name=processed" json:"processed,omitempty"` + ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage" json:"processedPercentage,omitempty"` +} + +func (m *VolumeTierMoveDatToRemoteResponse) Reset() { *m = VolumeTierMoveDatToRemoteResponse{} } +func (m *VolumeTierMoveDatToRemoteResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeTierMoveDatToRemoteResponse) ProtoMessage() {} +func (*VolumeTierMoveDatToRemoteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{63} +} + +func (m *VolumeTierMoveDatToRemoteResponse) GetProcessed() int64 { + if m != nil { + return m.Processed + } + return 0 +} + +func (m *VolumeTierMoveDatToRemoteResponse) GetProcessedPercentage() float32 { + if m != nil { + return m.ProcessedPercentage + } + return 0 +} + +type VolumeTierMoveDatFromRemoteRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` + KeepRemoteDatFile bool `protobuf:"varint,3,opt,name=keep_remote_dat_file,json=keepRemoteDatFile" json:"keep_remote_dat_file,omitempty"` +} + +func (m *VolumeTierMoveDatFromRemoteRequest) Reset() { *m = VolumeTierMoveDatFromRemoteRequest{} } +func (m *VolumeTierMoveDatFromRemoteRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeTierMoveDatFromRemoteRequest) ProtoMessage() {} +func (*VolumeTierMoveDatFromRemoteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{64} +} + +func (m *VolumeTierMoveDatFromRemoteRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *VolumeTierMoveDatFromRemoteRequest) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} + +func (m *VolumeTierMoveDatFromRemoteRequest) GetKeepRemoteDatFile() bool { + if m != nil { + return m.KeepRemoteDatFile + } + return false +} + +type VolumeTierMoveDatFromRemoteResponse struct { + Processed int64 `protobuf:"varint,1,opt,name=processed" json:"processed,omitempty"` + ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage" json:"processedPercentage,omitempty"` +} + +func (m *VolumeTierMoveDatFromRemoteResponse) Reset() { *m = VolumeTierMoveDatFromRemoteResponse{} } +func (m *VolumeTierMoveDatFromRemoteResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeTierMoveDatFromRemoteResponse) ProtoMessage() {} +func (*VolumeTierMoveDatFromRemoteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{65} +} + +func (m *VolumeTierMoveDatFromRemoteResponse) GetProcessed() int64 { + if m != nil { + return m.Processed + } + return 0 +} + +func (m *VolumeTierMoveDatFromRemoteResponse) GetProcessedPercentage() float32 { + if m != nil { + return m.ProcessedPercentage + } + return 0 +} + +// select on volume servers type QueryRequest struct { Selections []string `protobuf:"bytes,1,rep,name=selections" json:"selections,omitempty"` FromFileIds []string `protobuf:"bytes,2,rep,name=from_file_ids,json=fromFileIds" json:"from_file_ids,omitempty"` @@ -1405,7 +1737,7 @@ type QueryRequest struct { func (m *QueryRequest) Reset() { *m = QueryRequest{} } func (m *QueryRequest) String() string { return proto.CompactTextString(m) } func (*QueryRequest) ProtoMessage() {} -func (*QueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } +func (*QueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{66} } func (m *QueryRequest) GetSelections() []string { if m != nil { @@ -1451,7 +1783,7 @@ type QueryRequest_Filter struct { func (m *QueryRequest_Filter) Reset() { *m = QueryRequest_Filter{} } func (m *QueryRequest_Filter) String() string { return proto.CompactTextString(m) } func (*QueryRequest_Filter) ProtoMessage() {} -func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56, 0} } +func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{66, 0} } func (m *QueryRequest_Filter) GetField() string { if m != nil { @@ -1486,7 +1818,7 @@ func (m *QueryRequest_InputSerialization) Reset() { *m = QueryRequest_In func (m *QueryRequest_InputSerialization) String() string { return proto.CompactTextString(m) } func (*QueryRequest_InputSerialization) ProtoMessage() {} func (*QueryRequest_InputSerialization) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{56, 1} + return fileDescriptor0, []int{66, 1} } func (m *QueryRequest_InputSerialization) GetCompressionType() string { @@ -1534,7 +1866,7 @@ func (m *QueryRequest_InputSerialization_CSVInput) Reset() { func (m *QueryRequest_InputSerialization_CSVInput) String() string { return proto.CompactTextString(m) } func (*QueryRequest_InputSerialization_CSVInput) ProtoMessage() {} func (*QueryRequest_InputSerialization_CSVInput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{56, 1, 0} + return fileDescriptor0, []int{66, 1, 0} } func (m *QueryRequest_InputSerialization_CSVInput) GetFileHeaderInfo() string { @@ -1596,7 +1928,7 @@ func (m *QueryRequest_InputSerialization_JSONInput) Reset() { func (m *QueryRequest_InputSerialization_JSONInput) String() string { return proto.CompactTextString(m) } func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{56, 1, 1} + return fileDescriptor0, []int{66, 1, 1} } func (m *QueryRequest_InputSerialization_JSONInput) GetType() string { @@ -1617,7 +1949,7 @@ func (m *QueryRequest_InputSerialization_ParquetInput) String() string { } func (*QueryRequest_InputSerialization_ParquetInput) ProtoMessage() {} func (*QueryRequest_InputSerialization_ParquetInput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{56, 1, 2} + return fileDescriptor0, []int{66, 1, 2} } type QueryRequest_OutputSerialization struct { @@ -1629,7 +1961,7 @@ func (m *QueryRequest_OutputSerialization) Reset() { *m = QueryRequest_O func (m *QueryRequest_OutputSerialization) String() string { return proto.CompactTextString(m) } func (*QueryRequest_OutputSerialization) ProtoMessage() {} func (*QueryRequest_OutputSerialization) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{56, 2} + return fileDescriptor0, []int{66, 2} } func (m *QueryRequest_OutputSerialization) GetCsvOutput() *QueryRequest_OutputSerialization_CSVOutput { @@ -1662,7 +1994,7 @@ func (m *QueryRequest_OutputSerialization_CSVOutput) String() string { } func (*QueryRequest_OutputSerialization_CSVOutput) ProtoMessage() {} func (*QueryRequest_OutputSerialization_CSVOutput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{56, 2, 0} + return fileDescriptor0, []int{66, 2, 0} } func (m *QueryRequest_OutputSerialization_CSVOutput) GetQuoteFields() string { @@ -1712,7 +2044,7 @@ func (m *QueryRequest_OutputSerialization_JSONOutput) String() string { } func (*QueryRequest_OutputSerialization_JSONOutput) ProtoMessage() {} func (*QueryRequest_OutputSerialization_JSONOutput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{56, 2, 1} + return fileDescriptor0, []int{66, 2, 1} } func (m *QueryRequest_OutputSerialization_JSONOutput) GetRecordDelimiter() string { @@ -1729,7 +2061,7 @@ type QueriedStripe struct { func (m *QueriedStripe) Reset() { *m = QueriedStripe{} } func (m *QueriedStripe) String() string { return proto.CompactTextString(m) } func (*QueriedStripe) ProtoMessage() {} -func (*QueriedStripe) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } +func (*QueriedStripe) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{67} } func (m *QueriedStripe) GetRecords() []byte { if m != nil { @@ -1767,6 +2099,8 @@ func init() { proto.RegisterType((*VolumeDeleteResponse)(nil), "volume_server_pb.VolumeDeleteResponse") proto.RegisterType((*VolumeMarkReadonlyRequest)(nil), "volume_server_pb.VolumeMarkReadonlyRequest") proto.RegisterType((*VolumeMarkReadonlyResponse)(nil), "volume_server_pb.VolumeMarkReadonlyResponse") + proto.RegisterType((*VolumeConfigureRequest)(nil), "volume_server_pb.VolumeConfigureRequest") + proto.RegisterType((*VolumeConfigureResponse)(nil), "volume_server_pb.VolumeConfigureResponse") proto.RegisterType((*VolumeCopyRequest)(nil), "volume_server_pb.VolumeCopyRequest") proto.RegisterType((*VolumeCopyResponse)(nil), "volume_server_pb.VolumeCopyResponse") proto.RegisterType((*CopyFileRequest)(nil), "volume_server_pb.CopyFileRequest") @@ -1791,10 +2125,18 @@ func init() { proto.RegisterType((*VolumeEcShardReadResponse)(nil), "volume_server_pb.VolumeEcShardReadResponse") proto.RegisterType((*VolumeEcBlobDeleteRequest)(nil), "volume_server_pb.VolumeEcBlobDeleteRequest") proto.RegisterType((*VolumeEcBlobDeleteResponse)(nil), "volume_server_pb.VolumeEcBlobDeleteResponse") + proto.RegisterType((*VolumeEcShardsToVolumeRequest)(nil), "volume_server_pb.VolumeEcShardsToVolumeRequest") + proto.RegisterType((*VolumeEcShardsToVolumeResponse)(nil), "volume_server_pb.VolumeEcShardsToVolumeResponse") proto.RegisterType((*ReadVolumeFileStatusRequest)(nil), "volume_server_pb.ReadVolumeFileStatusRequest") proto.RegisterType((*ReadVolumeFileStatusResponse)(nil), "volume_server_pb.ReadVolumeFileStatusResponse") proto.RegisterType((*DiskStatus)(nil), "volume_server_pb.DiskStatus") proto.RegisterType((*MemStatus)(nil), "volume_server_pb.MemStatus") + proto.RegisterType((*RemoteFile)(nil), "volume_server_pb.RemoteFile") + proto.RegisterType((*VolumeInfo)(nil), "volume_server_pb.VolumeInfo") + proto.RegisterType((*VolumeTierMoveDatToRemoteRequest)(nil), "volume_server_pb.VolumeTierMoveDatToRemoteRequest") + proto.RegisterType((*VolumeTierMoveDatToRemoteResponse)(nil), "volume_server_pb.VolumeTierMoveDatToRemoteResponse") + proto.RegisterType((*VolumeTierMoveDatFromRemoteRequest)(nil), "volume_server_pb.VolumeTierMoveDatFromRemoteRequest") + proto.RegisterType((*VolumeTierMoveDatFromRemoteResponse)(nil), "volume_server_pb.VolumeTierMoveDatFromRemoteResponse") proto.RegisterType((*QueryRequest)(nil), "volume_server_pb.QueryRequest") proto.RegisterType((*QueryRequest_Filter)(nil), "volume_server_pb.QueryRequest.Filter") proto.RegisterType((*QueryRequest_InputSerialization)(nil), "volume_server_pb.QueryRequest.InputSerialization") @@ -1832,6 +2174,7 @@ type VolumeServerClient interface { VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) + VolumeConfigure(ctx context.Context, in *VolumeConfigureRequest, opts ...grpc.CallOption) (*VolumeConfigureResponse, error) // copy the .idx .dat files, and mount this volume VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (*VolumeCopyResponse, error) ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error) @@ -1847,6 +2190,10 @@ type VolumeServerClient interface { VolumeEcShardsUnmount(ctx context.Context, in *VolumeEcShardsUnmountRequest, opts ...grpc.CallOption) (*VolumeEcShardsUnmountResponse, error) VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (VolumeServer_VolumeEcShardReadClient, error) VolumeEcBlobDelete(ctx context.Context, in *VolumeEcBlobDeleteRequest, opts ...grpc.CallOption) (*VolumeEcBlobDeleteResponse, error) + VolumeEcShardsToVolume(ctx context.Context, in *VolumeEcShardsToVolumeRequest, opts ...grpc.CallOption) (*VolumeEcShardsToVolumeResponse, error) + // tiered storage + VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error) + VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error) // query Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error) } @@ -1999,6 +2346,15 @@ func (c *volumeServerClient) VolumeMarkReadonly(ctx context.Context, in *VolumeM return out, nil } +func (c *volumeServerClient) VolumeConfigure(ctx context.Context, in *VolumeConfigureRequest, opts ...grpc.CallOption) (*VolumeConfigureResponse, error) { + out := new(VolumeConfigureResponse) + err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeConfigure", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *volumeServerClient) VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (*VolumeCopyResponse, error) { out := new(VolumeCopyResponse) err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeCopy", in, out, c.cc, opts...) @@ -2185,8 +2541,81 @@ func (c *volumeServerClient) VolumeEcBlobDelete(ctx context.Context, in *VolumeE return out, nil } +func (c *volumeServerClient) VolumeEcShardsToVolume(ctx context.Context, in *VolumeEcShardsToVolumeRequest, opts ...grpc.CallOption) (*VolumeEcShardsToVolumeResponse, error) { + out := new(VolumeEcShardsToVolumeResponse) + err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error) { + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[4], c.cc, "/volume_server_pb.VolumeServer/VolumeTierMoveDatToRemote", opts...) + if err != nil { + return nil, err + } + x := &volumeServerVolumeTierMoveDatToRemoteClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type VolumeServer_VolumeTierMoveDatToRemoteClient interface { + Recv() (*VolumeTierMoveDatToRemoteResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeTierMoveDatToRemoteClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeTierMoveDatToRemoteClient) Recv() (*VolumeTierMoveDatToRemoteResponse, error) { + m := new(VolumeTierMoveDatToRemoteResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *volumeServerClient) VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error) { + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[5], c.cc, "/volume_server_pb.VolumeServer/VolumeTierMoveDatFromRemote", opts...) + if err != nil { + return nil, err + } + x := &volumeServerVolumeTierMoveDatFromRemoteClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type VolumeServer_VolumeTierMoveDatFromRemoteClient interface { + Recv() (*VolumeTierMoveDatFromRemoteResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeTierMoveDatFromRemoteClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeTierMoveDatFromRemoteClient) Recv() (*VolumeTierMoveDatFromRemoteResponse, error) { + m := new(VolumeTierMoveDatFromRemoteResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + func (c *volumeServerClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[4], c.cc, "/volume_server_pb.VolumeServer/Query", opts...) + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[6], c.cc, "/volume_server_pb.VolumeServer/Query", opts...) if err != nil { return nil, err } @@ -2234,6 +2663,7 @@ type VolumeServerServer interface { VolumeUnmount(context.Context, *VolumeUnmountRequest) (*VolumeUnmountResponse, error) VolumeDelete(context.Context, *VolumeDeleteRequest) (*VolumeDeleteResponse, error) VolumeMarkReadonly(context.Context, *VolumeMarkReadonlyRequest) (*VolumeMarkReadonlyResponse, error) + VolumeConfigure(context.Context, *VolumeConfigureRequest) (*VolumeConfigureResponse, error) // copy the .idx .dat files, and mount this volume VolumeCopy(context.Context, *VolumeCopyRequest) (*VolumeCopyResponse, error) ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error) @@ -2249,6 +2679,10 @@ type VolumeServerServer interface { VolumeEcShardsUnmount(context.Context, *VolumeEcShardsUnmountRequest) (*VolumeEcShardsUnmountResponse, error) VolumeEcShardRead(*VolumeEcShardReadRequest, VolumeServer_VolumeEcShardReadServer) error VolumeEcBlobDelete(context.Context, *VolumeEcBlobDeleteRequest) (*VolumeEcBlobDeleteResponse, error) + VolumeEcShardsToVolume(context.Context, *VolumeEcShardsToVolumeRequest) (*VolumeEcShardsToVolumeResponse, error) + // tiered storage + VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, VolumeServer_VolumeTierMoveDatToRemoteServer) error + VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, VolumeServer_VolumeTierMoveDatFromRemoteServer) error // query Query(*QueryRequest, VolumeServer_QueryServer) error } @@ -2494,6 +2928,24 @@ func _VolumeServer_VolumeMarkReadonly_Handler(srv interface{}, ctx context.Conte return interceptor(ctx, in, info, handler) } +func _VolumeServer_VolumeConfigure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeConfigureRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeConfigure(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeConfigure", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeConfigure(ctx, req.(*VolumeConfigureRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _VolumeServer_VolumeCopy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeCopyRequest) if err := dec(in); err != nil { @@ -2737,6 +3189,66 @@ func _VolumeServer_VolumeEcBlobDelete_Handler(srv interface{}, ctx context.Conte return interceptor(ctx, in, info, handler) } +func _VolumeServer_VolumeEcShardsToVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeEcShardsToVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeEcShardsToVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeEcShardsToVolume(ctx, req.(*VolumeEcShardsToVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VolumeTierMoveDatToRemote_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(VolumeTierMoveDatToRemoteRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(VolumeServerServer).VolumeTierMoveDatToRemote(m, &volumeServerVolumeTierMoveDatToRemoteServer{stream}) +} + +type VolumeServer_VolumeTierMoveDatToRemoteServer interface { + Send(*VolumeTierMoveDatToRemoteResponse) error + grpc.ServerStream +} + +type volumeServerVolumeTierMoveDatToRemoteServer struct { + grpc.ServerStream +} + +func (x *volumeServerVolumeTierMoveDatToRemoteServer) Send(m *VolumeTierMoveDatToRemoteResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _VolumeServer_VolumeTierMoveDatFromRemote_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(VolumeTierMoveDatFromRemoteRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(VolumeServerServer).VolumeTierMoveDatFromRemote(m, &volumeServerVolumeTierMoveDatFromRemoteServer{stream}) +} + +type VolumeServer_VolumeTierMoveDatFromRemoteServer interface { + Send(*VolumeTierMoveDatFromRemoteResponse) error + grpc.ServerStream +} + +type volumeServerVolumeTierMoveDatFromRemoteServer struct { + grpc.ServerStream +} + +func (x *volumeServerVolumeTierMoveDatFromRemoteServer) Send(m *VolumeTierMoveDatFromRemoteResponse) error { + return x.ServerStream.SendMsg(m) +} + func _VolumeServer_Query_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(QueryRequest) if err := stream.RecvMsg(m); err != nil { @@ -2810,6 +3322,10 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ MethodName: "VolumeMarkReadonly", Handler: _VolumeServer_VolumeMarkReadonly_Handler, }, + { + MethodName: "VolumeConfigure", + Handler: _VolumeServer_VolumeConfigure_Handler, + }, { MethodName: "VolumeCopy", Handler: _VolumeServer_VolumeCopy_Handler, @@ -2850,6 +3366,10 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ MethodName: "VolumeEcBlobDelete", Handler: _VolumeServer_VolumeEcBlobDelete_Handler, }, + { + MethodName: "VolumeEcShardsToVolume", + Handler: _VolumeServer_VolumeEcShardsToVolume_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -2872,6 +3392,16 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ Handler: _VolumeServer_VolumeEcShardRead_Handler, ServerStreams: true, }, + { + StreamName: "VolumeTierMoveDatToRemote", + Handler: _VolumeServer_VolumeTierMoveDatToRemote_Handler, + ServerStreams: true, + }, + { + StreamName: "VolumeTierMoveDatFromRemote", + Handler: _VolumeServer_VolumeTierMoveDatFromRemote_Handler, + ServerStreams: true, + }, { StreamName: "Query", Handler: _VolumeServer_Query_Handler, @@ -2884,162 +3414,190 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("volume_server.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 2503 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x1a, 0x4d, 0x77, 0x1c, 0x47, - 0x51, 0xeb, 0x5d, 0x49, 0xbb, 0xb5, 0x2b, 0x4b, 0x6e, 0xc9, 0xd2, 0x7a, 0x6c, 0xc9, 0xca, 0xe4, - 0xc3, 0xb2, 0x9d, 0xc8, 0x8e, 0x02, 0x24, 0x24, 0x04, 0xb0, 0x65, 0x1b, 0x4c, 0x62, 0x99, 0x8c, - 0x1c, 0x13, 0x70, 0x1e, 0xf3, 0x5a, 0x33, 0x2d, 0x6b, 0xd0, 0xcc, 0xf4, 0x78, 0xa6, 0x47, 0xd6, - 0xfa, 0xc1, 0x29, 0x5c, 0xf9, 0x01, 0x9c, 0x39, 0x71, 0xe1, 0xca, 0x0f, 0xe0, 0xc2, 0x0f, 0x80, - 0x2b, 0x17, 0xce, 0x1c, 0xb8, 0xf1, 0x1e, 0x17, 0x5e, 0x7f, 0xcc, 0xec, 0x7c, 0x6a, 0x47, 0xb1, - 0xdf, 0xe3, 0xe5, 0xd6, 0x53, 0x5d, 0x1f, 0x5d, 0xd5, 0x55, 0xd5, 0xd5, 0xd5, 0x03, 0x8b, 0x47, - 0xd4, 0x8d, 0x3d, 0x62, 0x46, 0x24, 0x3c, 0x22, 0xe1, 0x66, 0x10, 0x52, 0x46, 0xd1, 0x42, 0x0e, - 0x68, 0x06, 0x7b, 0xfa, 0x0d, 0x40, 0xb7, 0x31, 0xb3, 0x0e, 0xee, 0x10, 0x97, 0x30, 0x62, 0x90, - 0x67, 0x31, 0x89, 0x18, 0xba, 0x00, 0xdd, 0x7d, 0xc7, 0x25, 0xa6, 0x63, 0x47, 0xc3, 0xd6, 0x7a, - 0x7b, 0xa3, 0x67, 0xcc, 0xf2, 0xef, 0xfb, 0x76, 0xa4, 0x3f, 0x84, 0xc5, 0x1c, 0x41, 0x14, 0x50, - 0x3f, 0x22, 0xe8, 0x03, 0x98, 0x0d, 0x49, 0x14, 0xbb, 0x4c, 0x12, 0xf4, 0xb7, 0xd6, 0x36, 0x8b, - 0xb2, 0x36, 0x53, 0x92, 0xd8, 0x65, 0x46, 0x82, 0xae, 0x7f, 0xd5, 0x82, 0x41, 0x76, 0x06, 0xad, - 0xc0, 0xac, 0x12, 0x3e, 0x6c, 0xad, 0xb7, 0x36, 0x7a, 0xc6, 0x8c, 0x94, 0x8d, 0x96, 0x61, 0x26, - 0x62, 0x98, 0xc5, 0xd1, 0xf0, 0xcc, 0x7a, 0x6b, 0x63, 0xda, 0x50, 0x5f, 0x68, 0x09, 0xa6, 0x49, - 0x18, 0xd2, 0x70, 0xd8, 0x16, 0xe8, 0xf2, 0x03, 0x21, 0xe8, 0x44, 0xce, 0x0b, 0x32, 0xec, 0xac, - 0xb7, 0x36, 0xe6, 0x0c, 0x31, 0x46, 0x43, 0x98, 0x3d, 0x22, 0x61, 0xe4, 0x50, 0x7f, 0x38, 0x2d, - 0xc0, 0xc9, 0xa7, 0x3e, 0x0b, 0xd3, 0x77, 0xbd, 0x80, 0x8d, 0xf4, 0xf7, 0x61, 0xf8, 0x18, 0x5b, - 0x71, 0xec, 0x3d, 0x16, 0xcb, 0xdf, 0x3e, 0x20, 0xd6, 0x61, 0x62, 0x96, 0x8b, 0xd0, 0x53, 0x4a, - 0xa9, 0xb5, 0xcd, 0x19, 0x5d, 0x09, 0xb8, 0x6f, 0xeb, 0x3f, 0x84, 0x0b, 0x15, 0x84, 0xca, 0x3c, - 0xaf, 0xc3, 0xdc, 0x53, 0x1c, 0xee, 0xe1, 0xa7, 0xc4, 0x0c, 0x31, 0x73, 0xa8, 0xa0, 0x6e, 0x19, - 0x03, 0x05, 0x34, 0x38, 0x4c, 0x7f, 0x02, 0x5a, 0x8e, 0x03, 0xf5, 0x02, 0x6c, 0xb1, 0x26, 0xc2, - 0xd1, 0x3a, 0xf4, 0x83, 0x90, 0x60, 0xd7, 0xa5, 0x16, 0x66, 0x44, 0xd8, 0xa7, 0x6d, 0x64, 0x41, - 0xfa, 0x2a, 0x5c, 0xac, 0x64, 0x2e, 0x17, 0xa8, 0x7f, 0x50, 0x58, 0x3d, 0xf5, 0x3c, 0xa7, 0x91, - 0x68, 0xfd, 0x52, 0x69, 0xd5, 0x82, 0x52, 0xf1, 0xfd, 0x6e, 0x61, 0xd6, 0x25, 0xd8, 0x8f, 0x83, - 0x46, 0x8c, 0x8b, 0x2b, 0x4e, 0x48, 0x53, 0xce, 0x2b, 0xd2, 0x6d, 0xb6, 0xa9, 0xeb, 0x12, 0x8b, - 0x39, 0xd4, 0x4f, 0xd8, 0xae, 0x01, 0x58, 0x29, 0x50, 0x39, 0x51, 0x06, 0xa2, 0x6b, 0x30, 0x2c, - 0x93, 0x2a, 0xb6, 0xff, 0x68, 0xc1, 0xf9, 0x5b, 0xca, 0x68, 0x52, 0x70, 0xa3, 0x0d, 0xc8, 0x8b, - 0x3c, 0x53, 0x14, 0x59, 0xdc, 0xa0, 0x76, 0x69, 0x83, 0x38, 0x46, 0x48, 0x02, 0xd7, 0xb1, 0xb0, - 0x60, 0xd1, 0x11, 0x2c, 0xb2, 0x20, 0xb4, 0x00, 0x6d, 0xc6, 0x5c, 0xe1, 0xb9, 0x3d, 0x83, 0x0f, - 0xd1, 0x16, 0x2c, 0x7b, 0xc4, 0xa3, 0xe1, 0xc8, 0xf4, 0x70, 0x60, 0x7a, 0xf8, 0xd8, 0xe4, 0x6e, - 0x6e, 0x7a, 0x7b, 0xc3, 0x19, 0xb1, 0x3e, 0x24, 0x67, 0x1f, 0xe0, 0xe0, 0x01, 0x3e, 0xde, 0x75, - 0x5e, 0x90, 0x07, 0x7b, 0xfa, 0x10, 0x96, 0x8b, 0xfa, 0x29, 0xd5, 0xbf, 0x03, 0x2b, 0x12, 0xb2, - 0x3b, 0xf2, 0xad, 0x5d, 0x11, 0x5b, 0x8d, 0x36, 0xea, 0xbf, 0x2d, 0x18, 0x96, 0x09, 0x95, 0xe7, - 0xbf, 0xac, 0xd5, 0x4e, 0x6d, 0x93, 0xcb, 0xd0, 0x67, 0xd8, 0x71, 0x4d, 0xba, 0xbf, 0x1f, 0x11, - 0x26, 0x0c, 0xd1, 0x31, 0x80, 0x83, 0x1e, 0x0a, 0x08, 0xba, 0x0a, 0x0b, 0x96, 0xf4, 0x7e, 0x33, - 0x24, 0x47, 0x8e, 0xc8, 0x06, 0xb3, 0x62, 0x61, 0xf3, 0x56, 0x12, 0x15, 0x12, 0x8c, 0x74, 0x98, - 0x73, 0xec, 0x63, 0x53, 0xa4, 0x23, 0x91, 0x4c, 0xba, 0x82, 0x5b, 0xdf, 0xb1, 0x8f, 0xef, 0x39, - 0x2e, 0xe1, 0x16, 0xd5, 0x1f, 0xc3, 0x25, 0xa9, 0xfc, 0x7d, 0xdf, 0x0a, 0x89, 0x47, 0x7c, 0x86, - 0xdd, 0x6d, 0x1a, 0x8c, 0x1a, 0xb9, 0xcd, 0x05, 0xe8, 0x46, 0x8e, 0x6f, 0x11, 0xd3, 0x97, 0x49, - 0xad, 0x63, 0xcc, 0x8a, 0xef, 0x9d, 0x48, 0xbf, 0x0d, 0xab, 0x35, 0x7c, 0x95, 0x65, 0x5f, 0x83, - 0x81, 0x58, 0x98, 0x45, 0x7d, 0x46, 0x7c, 0x26, 0x78, 0x0f, 0x8c, 0x3e, 0x87, 0x6d, 0x4b, 0x90, - 0xfe, 0x2e, 0x20, 0xc9, 0xe3, 0x01, 0x8d, 0xfd, 0x66, 0xe1, 0x7c, 0x1e, 0x16, 0x73, 0x24, 0xca, - 0x37, 0xde, 0x83, 0x25, 0x09, 0xfe, 0xdc, 0xf7, 0x1a, 0xf3, 0x5a, 0x81, 0xf3, 0x05, 0x22, 0xc5, - 0x6d, 0x2b, 0x11, 0x92, 0x3f, 0x76, 0x4e, 0x64, 0xb6, 0x9c, 0xac, 0x20, 0x7f, 0xf2, 0x88, 0xcc, - 0x25, 0x17, 0x8c, 0xc3, 0x43, 0x83, 0x60, 0x9b, 0xfa, 0xee, 0xa8, 0x71, 0xe6, 0xaa, 0xa0, 0x54, - 0x7c, 0xff, 0xd4, 0x82, 0x73, 0x49, 0x4a, 0x6b, 0xb8, 0x9b, 0xa7, 0x74, 0xe7, 0x76, 0xad, 0x3b, - 0x77, 0xc6, 0xee, 0xbc, 0x01, 0x0b, 0x11, 0x8d, 0x43, 0x8b, 0x98, 0x36, 0x66, 0xd8, 0xf4, 0xa9, - 0x4d, 0x94, 0xb7, 0x9f, 0x95, 0xf0, 0x3b, 0x98, 0xe1, 0x1d, 0x6a, 0x13, 0xfd, 0x07, 0xc9, 0x66, - 0xe7, 0xbc, 0xe4, 0x2a, 0x9c, 0x73, 0x71, 0xc4, 0x4c, 0x1c, 0x04, 0xc4, 0xb7, 0x4d, 0xcc, 0xb8, - 0xab, 0xb5, 0x84, 0xab, 0x9d, 0xe5, 0x13, 0xb7, 0x04, 0xfc, 0x16, 0xdb, 0x89, 0xf4, 0xbf, 0xb5, - 0x60, 0x9e, 0xd3, 0x72, 0xd7, 0x6e, 0xa4, 0xef, 0x02, 0xb4, 0xc9, 0x31, 0x53, 0x8a, 0xf2, 0x21, - 0xba, 0x01, 0x8b, 0x2a, 0x86, 0x1c, 0xea, 0x8f, 0xc3, 0xab, 0x2d, 0xb3, 0xd1, 0x78, 0x2a, 0x8d, - 0xb0, 0xcb, 0xd0, 0x8f, 0x18, 0x0d, 0x92, 0x68, 0xed, 0xc8, 0x68, 0xe5, 0x20, 0x15, 0xad, 0x79, - 0x9b, 0x4e, 0x57, 0xd8, 0x74, 0xe0, 0x44, 0x26, 0xb1, 0x4c, 0xb9, 0x2a, 0x11, 0xef, 0x5d, 0x03, - 0x9c, 0xe8, 0xae, 0x25, 0xad, 0xa1, 0x7f, 0x1b, 0x16, 0xc6, 0x5a, 0x35, 0x8f, 0x9d, 0xaf, 0x5a, - 0x49, 0x3a, 0x7c, 0x84, 0x1d, 0x77, 0x97, 0xf8, 0x36, 0x09, 0x5f, 0x32, 0xa6, 0xd1, 0x4d, 0x58, - 0x72, 0x6c, 0x97, 0x98, 0xcc, 0xf1, 0x08, 0x8d, 0x99, 0x19, 0x11, 0x8b, 0xfa, 0x76, 0x94, 0xd8, - 0x87, 0xcf, 0x3d, 0x92, 0x53, 0xbb, 0x72, 0x46, 0xff, 0x6d, 0x9a, 0x5b, 0xb3, 0xab, 0x18, 0x57, - 0x15, 0x3e, 0x21, 0x9c, 0xe1, 0x01, 0xc1, 0x36, 0x09, 0x95, 0x1a, 0x03, 0x09, 0xfc, 0xb1, 0x80, - 0x71, 0x0b, 0x2b, 0xa4, 0x3d, 0x6a, 0x8f, 0xc4, 0x8a, 0x06, 0x06, 0x48, 0xd0, 0x6d, 0x6a, 0x8f, - 0x44, 0x92, 0x8b, 0x4c, 0xe1, 0x24, 0xd6, 0x41, 0xec, 0x1f, 0x8a, 0xd5, 0x74, 0x8d, 0xbe, 0x13, - 0x7d, 0x8a, 0x23, 0xb6, 0xcd, 0x41, 0xfa, 0x9f, 0x5b, 0x49, 0x94, 0xf1, 0x65, 0x18, 0xc4, 0x22, - 0xce, 0xd1, 0xff, 0xc1, 0x1c, 0x9c, 0x42, 0x45, 0x43, 0xae, 0xba, 0x54, 0x01, 0x83, 0xe4, 0x9c, - 0x3a, 0x8b, 0xc4, 0xcc, 0x38, 0xc8, 0xf3, 0x0b, 0x57, 0x41, 0xfe, 0x65, 0x92, 0x64, 0xef, 0x5a, - 0xbb, 0x07, 0x38, 0xb4, 0xa3, 0x1f, 0x11, 0x9f, 0x84, 0x98, 0xbd, 0x92, 0x43, 0x5f, 0x5f, 0x87, - 0xb5, 0x3a, 0xee, 0x4a, 0xfe, 0x93, 0xe4, 0xf0, 0x48, 0x30, 0x0c, 0xb2, 0x17, 0x3b, 0xae, 0xfd, - 0x4a, 0xc4, 0x7f, 0x52, 0x54, 0x2e, 0x65, 0xae, 0xfc, 0xe7, 0x1a, 0x9c, 0x0b, 0x05, 0x88, 0x99, - 0x11, 0x47, 0x48, 0xeb, 0xfd, 0x39, 0x63, 0x5e, 0x4d, 0x08, 0x42, 0x5e, 0xf7, 0xff, 0x25, 0xf5, - 0x80, 0x84, 0xdb, 0x2b, 0x4b, 0x8b, 0x17, 0xa1, 0x37, 0x16, 0xdf, 0x16, 0xe2, 0xbb, 0x91, 0x92, - 0xcb, 0xbd, 0xd3, 0xa2, 0xc1, 0xc8, 0x24, 0x96, 0x3c, 0x87, 0xc5, 0x56, 0x77, 0x8d, 0x3e, 0x07, - 0xde, 0xb5, 0xc4, 0x31, 0x7c, 0x8a, 0x1c, 0x99, 0x7a, 0x43, 0x5e, 0x09, 0xb5, 0x1b, 0xcf, 0xe1, - 0x62, 0x7e, 0xb6, 0xf9, 0xf1, 0xf4, 0x52, 0x4a, 0xea, 0x6b, 0x45, 0x37, 0x28, 0x9c, 0x71, 0x47, - 0xc5, 0x65, 0x37, 0x3e, 0xcf, 0x5f, 0x6e, 0x5d, 0xab, 0x45, 0x83, 0xe4, 0x8b, 0x82, 0x2f, 0x8a, - 0xcb, 0x3e, 0x45, 0x71, 0x70, 0xb2, 0xe0, 0xcb, 0x45, 0xd7, 0x2d, 0x56, 0x10, 0xbf, 0x4f, 0xf3, - 0xa2, 0xc2, 0xe0, 0xe7, 0x77, 0xe3, 0x7c, 0xa4, 0xe4, 0x0a, 0x73, 0xcc, 0x19, 0xb3, 0x4a, 0x2c, - 0xbf, 0x60, 0xaa, 0x73, 0x48, 0xd6, 0xe7, 0xea, 0x2b, 0x77, 0x95, 0x6c, 0xab, 0xab, 0x64, 0x72, - 0x45, 0x3e, 0x24, 0x23, 0xe1, 0x6b, 0x1d, 0x79, 0x45, 0xfe, 0x84, 0x8c, 0xf4, 0x9d, 0x42, 0xa4, - 0xc8, 0xa5, 0xa9, 0x98, 0x43, 0xd0, 0xe1, 0x4e, 0xaa, 0x52, 0xb5, 0x18, 0xa3, 0x55, 0x00, 0x27, - 0x32, 0x6d, 0xb1, 0xe7, 0x72, 0x51, 0x5d, 0xa3, 0xe7, 0x28, 0x27, 0xb0, 0xf5, 0xdf, 0x65, 0x42, - 0xef, 0xb6, 0x4b, 0xf7, 0x5e, 0xa1, 0x57, 0x66, 0xb5, 0x68, 0xe7, 0xb4, 0xc8, 0xde, 0x95, 0x3b, - 0xf9, 0xbb, 0x72, 0x26, 0x88, 0xb2, 0xcb, 0x51, 0x3b, 0xf3, 0x21, 0x5c, 0xe4, 0x0a, 0x4b, 0x0c, - 0x51, 0x25, 0x37, 0xbf, 0x49, 0xfc, 0xeb, 0x0c, 0x5c, 0xaa, 0x26, 0x6e, 0x72, 0x9b, 0xf8, 0x08, - 0xb4, 0xb4, 0x5a, 0xe7, 0x47, 0x4a, 0xc4, 0xb0, 0x17, 0xa4, 0x87, 0x8a, 0x3c, 0x7b, 0x56, 0x54, - 0xe9, 0xfe, 0x28, 0x99, 0x4f, 0x4e, 0x96, 0x52, 0xa9, 0xdf, 0x2e, 0x95, 0xfa, 0x5c, 0x80, 0x8d, - 0x59, 0x9d, 0x00, 0x59, 0xbb, 0xac, 0xd8, 0x98, 0xd5, 0x09, 0x48, 0x89, 0x85, 0x00, 0xe9, 0x35, - 0x7d, 0x85, 0x2f, 0x04, 0xac, 0x02, 0xa8, 0xb2, 0x24, 0xf6, 0x93, 0xab, 0x4b, 0x4f, 0x16, 0x25, - 0xb1, 0x5f, 0x5b, 0x5d, 0xcd, 0xd6, 0x56, 0x57, 0xf9, 0xed, 0xef, 0x96, 0x4e, 0x88, 0x2f, 0x00, - 0xee, 0x38, 0xd1, 0xa1, 0x34, 0x32, 0x2f, 0xe7, 0x6c, 0x27, 0x54, 0xf7, 0x65, 0x3e, 0xe4, 0x10, - 0xec, 0xba, 0xca, 0x74, 0x7c, 0xc8, 0xdd, 0x37, 0x8e, 0x88, 0xad, 0xac, 0x23, 0xc6, 0x1c, 0xb6, - 0x1f, 0x12, 0xa2, 0x0c, 0x20, 0xc6, 0xfa, 0x1f, 0x5a, 0xd0, 0x7b, 0x40, 0x3c, 0xc5, 0x79, 0x0d, - 0xe0, 0x29, 0x0d, 0x69, 0xcc, 0x1c, 0x9f, 0xc8, 0xea, 0x73, 0xda, 0xc8, 0x40, 0xbe, 0xbe, 0x1c, - 0x11, 0x9a, 0xc4, 0xdd, 0x57, 0xc6, 0x14, 0x63, 0x0e, 0x3b, 0x20, 0x38, 0x50, 0xf6, 0x13, 0x63, - 0xb4, 0x04, 0xd3, 0x11, 0xc3, 0xd6, 0xa1, 0x30, 0x56, 0xc7, 0x90, 0x1f, 0xfa, 0x7f, 0x06, 0x30, - 0xf8, 0x2c, 0x26, 0xe1, 0x28, 0xd3, 0x39, 0x88, 0x88, 0xb2, 0x4e, 0xd2, 0xfa, 0xca, 0x40, 0xf8, - 0x26, 0xee, 0x87, 0xd4, 0x33, 0xd3, 0xee, 0xd8, 0x19, 0x81, 0xd2, 0xe7, 0xc0, 0x7b, 0xb2, 0x43, - 0x86, 0x3e, 0x86, 0x99, 0x7d, 0xc7, 0x65, 0x44, 0xf6, 0xa3, 0xfa, 0x5b, 0x6f, 0x96, 0x3b, 0x61, - 0x59, 0x99, 0x9b, 0xf7, 0x04, 0xb2, 0xa1, 0x88, 0xd0, 0x1e, 0x2c, 0x3a, 0x7e, 0x20, 0xaa, 0xa1, - 0xd0, 0xc1, 0xae, 0xf3, 0x62, 0x7c, 0xf7, 0xed, 0x6f, 0xbd, 0x3b, 0x81, 0xd7, 0x7d, 0x4e, 0xb9, - 0x9b, 0x25, 0x34, 0x90, 0x53, 0x82, 0x21, 0x02, 0x4b, 0x34, 0x66, 0x65, 0x21, 0xd3, 0x42, 0xc8, - 0xd6, 0x04, 0x21, 0x0f, 0x05, 0x69, 0x5e, 0xca, 0x22, 0x2d, 0x03, 0xb5, 0x1d, 0x98, 0x91, 0xca, - 0x71, 0xf3, 0xef, 0x3b, 0xc4, 0x4d, 0x3a, 0x7a, 0xf2, 0x83, 0xa7, 0x18, 0x1a, 0x90, 0x10, 0xfb, - 0xb6, 0x4a, 0x4d, 0xc9, 0x27, 0xc7, 0x3f, 0xc2, 0x6e, 0x4c, 0x92, 0x96, 0x9e, 0xf8, 0xd0, 0xfe, - 0x3e, 0x0d, 0xa8, 0xac, 0x61, 0x72, 0xa1, 0x0f, 0x49, 0xc4, 0x9d, 0xde, 0x64, 0xa3, 0x80, 0x28, - 0x39, 0xf3, 0x19, 0xf8, 0xa3, 0x51, 0x40, 0xd0, 0xcf, 0xa0, 0x67, 0x45, 0x47, 0xa6, 0x30, 0x89, - 0x90, 0xd9, 0xdf, 0xfa, 0xf0, 0xd4, 0x26, 0xdd, 0xdc, 0xde, 0x7d, 0x2c, 0xa0, 0x46, 0xd7, 0x8a, - 0x8e, 0xc4, 0x08, 0xfd, 0x02, 0xe0, 0x57, 0x11, 0xf5, 0x15, 0x67, 0xb9, 0xf1, 0x1f, 0x9d, 0x9e, - 0xf3, 0x4f, 0x76, 0x1f, 0xee, 0x48, 0xd6, 0x3d, 0xce, 0x4e, 0xf2, 0xb6, 0x60, 0x2e, 0xc0, 0xe1, - 0xb3, 0x98, 0x30, 0xc5, 0x5e, 0xfa, 0xc2, 0xf7, 0x4f, 0xcf, 0xfe, 0xa7, 0x92, 0x8d, 0x94, 0x30, - 0x08, 0x32, 0x5f, 0xda, 0x5f, 0xcf, 0x40, 0x37, 0xd1, 0x8b, 0x17, 0x54, 0xc2, 0xc3, 0xe5, 0xb5, - 0xc2, 0x74, 0xfc, 0x7d, 0xaa, 0x2c, 0x7a, 0x96, 0xc3, 0xe5, 0xcd, 0xe2, 0xbe, 0xbf, 0x4f, 0xb9, - 0xed, 0x43, 0x62, 0xd1, 0xd0, 0xe6, 0xc7, 0x97, 0xe3, 0x39, 0xdc, 0xed, 0xe5, 0x5e, 0xce, 0x4b, - 0xf8, 0x9d, 0x04, 0x8c, 0xae, 0xc0, 0xbc, 0xd8, 0xf6, 0x0c, 0x66, 0x3b, 0xe1, 0x49, 0xdc, 0x0c, - 0xe2, 0x55, 0x58, 0x78, 0x16, 0x53, 0x46, 0x4c, 0xeb, 0x00, 0x87, 0xd8, 0x62, 0x34, 0x2d, 0xf0, - 0xe7, 0x05, 0x7c, 0x3b, 0x05, 0xa3, 0x6f, 0xc1, 0xb2, 0x44, 0x25, 0x91, 0x85, 0x83, 0x94, 0x82, - 0x84, 0xaa, 0xfe, 0x5b, 0x12, 0xb3, 0x77, 0xc5, 0xe4, 0x76, 0x32, 0x87, 0x34, 0xe8, 0x5a, 0xd4, - 0xf3, 0x88, 0xcf, 0x22, 0x91, 0x24, 0x7a, 0x46, 0xfa, 0x8d, 0x6e, 0xc1, 0x2a, 0x76, 0x5d, 0xfa, - 0xdc, 0x14, 0x94, 0xb6, 0x59, 0xd2, 0x6e, 0x56, 0x1c, 0xcf, 0x9a, 0x40, 0xfa, 0x4c, 0xe0, 0x18, - 0x79, 0x45, 0xb5, 0xcb, 0xd0, 0x4b, 0xf7, 0x91, 0x27, 0xa3, 0x8c, 0x43, 0x8a, 0xb1, 0x76, 0x16, - 0x06, 0xd9, 0x9d, 0xd0, 0xfe, 0xdd, 0x86, 0xc5, 0x8a, 0xa0, 0x42, 0x4f, 0x00, 0xb8, 0xb7, 0xca, - 0xd0, 0x52, 0xee, 0xfa, 0xbd, 0xd3, 0x07, 0x27, 0xf7, 0x57, 0x09, 0x36, 0xb8, 0xf7, 0xcb, 0x21, - 0xfa, 0x25, 0xf4, 0x85, 0xc7, 0x2a, 0xee, 0xd2, 0x65, 0x3f, 0xfe, 0x1a, 0xdc, 0xb9, 0xae, 0x8a, - 0xbd, 0x88, 0x01, 0x39, 0xd6, 0xfe, 0xd9, 0x82, 0x5e, 0x2a, 0x98, 0x5f, 0xb8, 0xe5, 0x46, 0x89, - 0xbd, 0x8e, 0x94, 0x39, 0xfa, 0x02, 0x76, 0x4f, 0x80, 0xbe, 0x91, 0xae, 0xa4, 0xbd, 0x0f, 0x30, - 0xd6, 0xbf, 0x52, 0x85, 0x56, 0xa5, 0x0a, 0xfa, 0x55, 0x98, 0xe3, 0x96, 0x75, 0x88, 0xbd, 0xcb, - 0x42, 0x27, 0x10, 0x6f, 0x13, 0x12, 0x27, 0x52, 0xb5, 0x61, 0xf2, 0xb9, 0xf5, 0xc7, 0x15, 0x18, - 0x64, 0xef, 0xb4, 0xe8, 0x4b, 0xe8, 0x67, 0xde, 0x60, 0xd0, 0x1b, 0xe5, 0x4d, 0x2b, 0xbf, 0xe9, - 0x68, 0x6f, 0x4e, 0xc0, 0x52, 0xe5, 0xdb, 0x14, 0xf2, 0xe1, 0x5c, 0xe9, 0x21, 0x03, 0x5d, 0x2b, - 0x53, 0xd7, 0x3d, 0x93, 0x68, 0xd7, 0x1b, 0xe1, 0xa6, 0xf2, 0x18, 0x2c, 0x56, 0xbc, 0x4c, 0xa0, - 0xb7, 0x27, 0x70, 0xc9, 0xbd, 0x8e, 0x68, 0xef, 0x34, 0xc4, 0x4e, 0xa5, 0x3e, 0x03, 0x54, 0x7e, - 0xb6, 0x40, 0xd7, 0x27, 0xb2, 0x19, 0x3f, 0x8b, 0x68, 0x6f, 0x37, 0x43, 0xae, 0x55, 0x54, 0x3e, - 0x68, 0x4c, 0x54, 0x34, 0xf7, 0x64, 0x32, 0x51, 0xd1, 0xc2, 0x2b, 0xc9, 0x14, 0x3a, 0x84, 0x85, - 0xe2, 0x63, 0x07, 0xba, 0x5a, 0xf7, 0x38, 0x57, 0x7a, 0x4b, 0xd1, 0xae, 0x35, 0x41, 0x4d, 0x85, - 0x11, 0x38, 0x9b, 0x7f, 0x5c, 0x40, 0x57, 0xca, 0xf4, 0x95, 0xcf, 0x2b, 0xda, 0xc6, 0x64, 0xc4, - 0xac, 0x4e, 0xc5, 0x07, 0x87, 0x2a, 0x9d, 0x6a, 0x5e, 0x33, 0xaa, 0x74, 0xaa, 0x7b, 0xbf, 0xd0, - 0xa7, 0xd0, 0xaf, 0x93, 0x2e, 0x76, 0xa1, 0x11, 0x8f, 0x36, 0xeb, 0xd8, 0x54, 0xbf, 0x04, 0x68, - 0x37, 0x1a, 0xe3, 0x27, 0xb2, 0x6f, 0xb6, 0x78, 0xac, 0x67, 0xfa, 0xf1, 0x55, 0xb1, 0x5e, 0xee, - 0xf0, 0x57, 0xc5, 0x7a, 0x55, 0x53, 0x7f, 0x0a, 0xed, 0xc1, 0x5c, 0xae, 0x43, 0x8f, 0xde, 0xaa, - 0xa3, 0xcc, 0x5f, 0xed, 0xb5, 0x2b, 0x13, 0xf1, 0x52, 0x19, 0x66, 0x92, 0xbd, 0x54, 0xba, 0xaa, - 0x5d, 0x5c, 0x3e, 0x5f, 0xbd, 0x35, 0x09, 0x2d, 0x17, 0xca, 0xa5, 0x3e, 0x7e, 0x65, 0x28, 0xd7, - 0xbd, 0x13, 0x54, 0x86, 0x72, 0xfd, 0xd3, 0xc0, 0x14, 0xfa, 0x39, 0xc0, 0xb8, 0xd7, 0x8e, 0x5e, - 0xaf, 0xa3, 0xce, 0xee, 0xfe, 0x1b, 0x27, 0x23, 0xa5, 0xac, 0x9f, 0xc3, 0x52, 0xd5, 0x15, 0x18, - 0x55, 0x04, 0xfe, 0x09, 0xf7, 0x6c, 0x6d, 0xb3, 0x29, 0x7a, 0x2a, 0xf8, 0x73, 0xe8, 0x26, 0x7d, - 0x72, 0xf4, 0x5a, 0x99, 0xba, 0xf0, 0x32, 0xa0, 0xe9, 0x27, 0xa1, 0x64, 0x1c, 0xd8, 0x4b, 0x62, - 0x75, 0xdc, 0xc0, 0xae, 0x8f, 0xd5, 0x52, 0xab, 0xbd, 0x3e, 0x56, 0xcb, 0xfd, 0x70, 0x21, 0x2e, - 0x75, 0x86, 0x6c, 0xbf, 0xb7, 0xde, 0x19, 0x2a, 0xda, 0xd9, 0xf5, 0xce, 0x50, 0xd9, 0x42, 0x9e, - 0x42, 0xbf, 0x81, 0xe5, 0xea, 0x36, 0x2f, 0xaa, 0x8d, 0xf8, 0x9a, 0x76, 0xb3, 0x76, 0xb3, 0x39, - 0x41, 0x2a, 0xfe, 0x45, 0x92, 0x9f, 0x0a, 0x6d, 0xde, 0xfa, 0xfc, 0x54, 0xdd, 0x6c, 0xd6, 0x6e, - 0x34, 0xc6, 0x2f, 0x87, 0x5e, 0xb6, 0x9f, 0x5a, 0x6f, 0xed, 0x8a, 0xd6, 0x71, 0xbd, 0xb5, 0x2b, - 0x5b, 0xb4, 0x22, 0x3e, 0xaa, 0x7a, 0xa5, 0x55, 0xf1, 0x71, 0x42, 0x33, 0x57, 0xdb, 0x6c, 0x8a, - 0x9e, 0x3b, 0xbe, 0xcb, 0xcd, 0x50, 0x34, 0x71, 0xfd, 0xb9, 0xcc, 0xfc, 0x4e, 0x43, 0xec, 0xfa, - 0xdd, 0x4d, 0x32, 0xf5, 0x44, 0x05, 0x0a, 0x19, 0xfb, 0x46, 0x63, 0xfc, 0x54, 0x76, 0x90, 0xbc, - 0x80, 0x66, 0x1a, 0x99, 0xe8, 0xda, 0x04, 0x3e, 0x99, 0x46, 0xac, 0x76, 0xbd, 0x11, 0x6e, 0x55, - 0xf4, 0x66, 0x5b, 0x8b, 0x27, 0xf9, 0x53, 0xa9, 0x1f, 0x7a, 0x92, 0x3f, 0x55, 0x74, 0x2b, 0xa7, - 0xd0, 0xa7, 0x30, 0x2d, 0xae, 0x38, 0x68, 0xed, 0xe4, 0xbb, 0x8f, 0x76, 0xb9, 0x7a, 0x3e, 0xad, - 0xe0, 0xb9, 0x02, 0x7b, 0x33, 0xe2, 0x47, 0xab, 0xf7, 0xfe, 0x17, 0x00, 0x00, 0xff, 0xff, 0x02, - 0xd4, 0xce, 0xae, 0x7f, 0x25, 0x00, 0x00, + // 2959 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x5a, 0x4b, 0x73, 0xdc, 0xc6, + 0x11, 0xe6, 0x72, 0xf9, 0xd8, 0xed, 0x5d, 0x8a, 0xd4, 0x90, 0xa6, 0xd6, 0x20, 0x25, 0xd1, 0x90, + 0x1f, 0xa4, 0x6c, 0x91, 0x32, 0x6d, 0xc7, 0x8e, 0x1d, 0x3b, 0x91, 0x28, 0x29, 0x51, 0x6c, 0x51, + 0x36, 0x28, 0xcb, 0x4e, 0xec, 0x0a, 0x0a, 0x04, 0x66, 0x45, 0x98, 0x00, 0x06, 0x02, 0x66, 0x69, + 0xae, 0xca, 0x39, 0x39, 0x87, 0x54, 0xa5, 0x92, 0x43, 0x2a, 0x97, 0x9c, 0x73, 0xf7, 0x35, 0x7f, + 0xc1, 0x7f, 0x20, 0x55, 0x39, 0xe5, 0x92, 0x73, 0x0e, 0xb9, 0xa5, 0x2a, 0x97, 0xd4, 0xbc, 0xb0, + 0x78, 0x72, 0x41, 0x8b, 0xa9, 0x54, 0x6e, 0x83, 0x9e, 0x9e, 0xee, 0x99, 0x9e, 0xee, 0x9e, 0xe9, + 0xf9, 0x00, 0x8b, 0x47, 0xc4, 0x1b, 0xf8, 0xd8, 0x8c, 0x71, 0x74, 0x84, 0xa3, 0xcd, 0x30, 0x22, + 0x94, 0xa0, 0x85, 0x0c, 0xd1, 0x0c, 0xf7, 0xf5, 0x2d, 0x40, 0x37, 0x2d, 0x6a, 0x1f, 0xdc, 0xc2, + 0x1e, 0xa6, 0xd8, 0xc0, 0x8f, 0x07, 0x38, 0xa6, 0xe8, 0x59, 0x68, 0xf5, 0x5d, 0x0f, 0x9b, 0xae, + 0x13, 0xf7, 0x1a, 0x6b, 0xcd, 0xf5, 0xb6, 0x31, 0xcb, 0xbe, 0xef, 0x3a, 0xb1, 0x7e, 0x1f, 0x16, + 0x33, 0x03, 0xe2, 0x90, 0x04, 0x31, 0x46, 0x6f, 0xc1, 0x6c, 0x84, 0xe3, 0x81, 0x47, 0xc5, 0x80, + 0xce, 0xf6, 0xa5, 0xcd, 0xbc, 0xae, 0xcd, 0x64, 0xc8, 0xc0, 0xa3, 0x86, 0x62, 0xd7, 0xbf, 0x6e, + 0x40, 0x37, 0xdd, 0x83, 0x2e, 0xc0, 0xac, 0x54, 0xde, 0x6b, 0xac, 0x35, 0xd6, 0xdb, 0xc6, 0x8c, + 0xd0, 0x8d, 0x96, 0x61, 0x26, 0xa6, 0x16, 0x1d, 0xc4, 0xbd, 0xc9, 0xb5, 0xc6, 0xfa, 0xb4, 0x21, + 0xbf, 0xd0, 0x12, 0x4c, 0xe3, 0x28, 0x22, 0x51, 0xaf, 0xc9, 0xd9, 0xc5, 0x07, 0x42, 0x30, 0x15, + 0xbb, 0x4f, 0x70, 0x6f, 0x6a, 0xad, 0xb1, 0x3e, 0x67, 0xf0, 0x36, 0xea, 0xc1, 0xec, 0x11, 0x8e, + 0x62, 0x97, 0x04, 0xbd, 0x69, 0x4e, 0x56, 0x9f, 0xfa, 0x2c, 0x4c, 0xdf, 0xf6, 0x43, 0x3a, 0xd4, + 0xdf, 0x84, 0xde, 0x43, 0xcb, 0x1e, 0x0c, 0xfc, 0x87, 0x7c, 0xfa, 0x3b, 0x07, 0xd8, 0x3e, 0x54, + 0x66, 0x59, 0x81, 0xb6, 0x5c, 0x94, 0x9c, 0xdb, 0x9c, 0xd1, 0x12, 0x84, 0xbb, 0x8e, 0xfe, 0x23, + 0x78, 0xb6, 0x64, 0xa0, 0x34, 0xcf, 0x15, 0x98, 0x7b, 0x64, 0x45, 0xfb, 0xd6, 0x23, 0x6c, 0x46, + 0x16, 0x75, 0x09, 0x1f, 0xdd, 0x30, 0xba, 0x92, 0x68, 0x30, 0x9a, 0xfe, 0x19, 0x68, 0x19, 0x09, + 0xc4, 0x0f, 0x2d, 0x9b, 0xd6, 0x51, 0x8e, 0xd6, 0xa0, 0x13, 0x46, 0xd8, 0xf2, 0x3c, 0x62, 0x5b, + 0x14, 0x73, 0xfb, 0x34, 0x8d, 0x34, 0x49, 0xbf, 0x08, 0x2b, 0xa5, 0xc2, 0xc5, 0x04, 0xf5, 0xb7, + 0x72, 0xb3, 0x27, 0xbe, 0xef, 0xd6, 0x52, 0xad, 0xaf, 0x16, 0x66, 0xcd, 0x47, 0x4a, 0xb9, 0xdf, + 0xcf, 0xf5, 0x7a, 0xd8, 0x0a, 0x06, 0x61, 0x2d, 0xc1, 0xf9, 0x19, 0xab, 0xa1, 0x89, 0xe4, 0x0b, + 0xc2, 0x6d, 0x76, 0x88, 0xe7, 0x61, 0x9b, 0xba, 0x24, 0x50, 0x62, 0x2f, 0x01, 0xd8, 0x09, 0x51, + 0x3a, 0x51, 0x8a, 0xa2, 0x6b, 0xd0, 0x2b, 0x0e, 0x95, 0x62, 0xff, 0xd6, 0x80, 0x67, 0x6e, 0x48, + 0xa3, 0x09, 0xc5, 0xb5, 0x36, 0x20, 0xab, 0x72, 0x32, 0xaf, 0x32, 0xbf, 0x41, 0xcd, 0xc2, 0x06, + 0x31, 0x8e, 0x08, 0x87, 0x9e, 0x6b, 0x5b, 0x5c, 0xc4, 0x14, 0x17, 0x91, 0x26, 0xa1, 0x05, 0x68, + 0x52, 0xea, 0x71, 0xcf, 0x6d, 0x1b, 0xac, 0x89, 0xb6, 0x61, 0xd9, 0xc7, 0x3e, 0x89, 0x86, 0xa6, + 0x6f, 0x85, 0xa6, 0x6f, 0x1d, 0x9b, 0xcc, 0xcd, 0x4d, 0x7f, 0xbf, 0x37, 0xc3, 0xe7, 0x87, 0x44, + 0xef, 0x3d, 0x2b, 0xbc, 0x67, 0x1d, 0xef, 0xb9, 0x4f, 0xf0, 0xbd, 0x7d, 0xbd, 0x07, 0xcb, 0xf9, + 0xf5, 0xc9, 0xa5, 0x7f, 0x0f, 0x2e, 0x08, 0xca, 0xde, 0x30, 0xb0, 0xf7, 0x78, 0x6c, 0xd5, 0xda, + 0xa8, 0x7f, 0x37, 0xa0, 0x57, 0x1c, 0x28, 0x3d, 0xff, 0x69, 0xad, 0x76, 0x6a, 0x9b, 0x5c, 0x86, + 0x0e, 0xb5, 0x5c, 0xcf, 0x24, 0xfd, 0x7e, 0x8c, 0x29, 0x37, 0xc4, 0x94, 0x01, 0x8c, 0x74, 0x9f, + 0x53, 0xd0, 0x06, 0x2c, 0xd8, 0xc2, 0xfb, 0xcd, 0x08, 0x1f, 0xb9, 0x3c, 0x1b, 0xcc, 0xf2, 0x89, + 0xcd, 0xdb, 0x2a, 0x2a, 0x04, 0x19, 0xe9, 0x30, 0xe7, 0x3a, 0xc7, 0x26, 0x4f, 0x47, 0x3c, 0x99, + 0xb4, 0xb8, 0xb4, 0x8e, 0xeb, 0x1c, 0xdf, 0x71, 0x3d, 0xcc, 0x2c, 0xaa, 0x3f, 0x84, 0x55, 0xb1, + 0xf8, 0xbb, 0x81, 0x1d, 0x61, 0x1f, 0x07, 0xd4, 0xf2, 0x76, 0x48, 0x38, 0xac, 0xe5, 0x36, 0xcf, + 0x42, 0x2b, 0x76, 0x03, 0x1b, 0x9b, 0x81, 0x48, 0x6a, 0x53, 0xc6, 0x2c, 0xff, 0xde, 0x8d, 0xf5, + 0x9b, 0x70, 0xb1, 0x42, 0xae, 0xb4, 0xec, 0x73, 0xd0, 0xe5, 0x13, 0xb3, 0x49, 0x40, 0x71, 0x40, + 0xb9, 0xec, 0xae, 0xd1, 0x61, 0xb4, 0x1d, 0x41, 0xd2, 0x5f, 0x05, 0x24, 0x64, 0xdc, 0x23, 0x83, + 0xa0, 0x5e, 0x38, 0x3f, 0x03, 0x8b, 0x99, 0x21, 0xd2, 0x37, 0x5e, 0x83, 0x25, 0x41, 0xfe, 0x38, + 0xf0, 0x6b, 0xcb, 0xba, 0x00, 0xcf, 0xe4, 0x06, 0x49, 0x69, 0xdb, 0x4a, 0x49, 0xf6, 0xd8, 0x39, + 0x51, 0xd8, 0xb2, 0x9a, 0x41, 0xf6, 0xe4, 0xe1, 0x99, 0x4b, 0x4c, 0xd8, 0x8a, 0x0e, 0x0d, 0x6c, + 0x39, 0x24, 0xf0, 0x86, 0xb5, 0x33, 0x57, 0xc9, 0x48, 0x29, 0xf7, 0x13, 0x58, 0x56, 0x19, 0x2d, + 0xe8, 0xbb, 0x8f, 0x06, 0x11, 0xae, 0x9b, 0x89, 0xd3, 0x2e, 0x3b, 0x59, 0x70, 0x59, 0x7d, 0x4b, + 0x85, 0x59, 0x4a, 0xb0, 0xdc, 0xd2, 0xe4, 0x24, 0x6b, 0xa4, 0x4e, 0x32, 0xfd, 0x9b, 0x06, 0x9c, + 0x57, 0x23, 0x6a, 0xfa, 0xd5, 0x29, 0x03, 0xab, 0x59, 0x19, 0x58, 0x53, 0xa3, 0xc0, 0x5a, 0x87, + 0x85, 0x98, 0x0c, 0x22, 0x1b, 0x9b, 0x8e, 0x45, 0x2d, 0x33, 0x20, 0x0e, 0x96, 0x71, 0x77, 0x4e, + 0xd0, 0x6f, 0x59, 0xd4, 0xda, 0x25, 0x0e, 0xd6, 0x7f, 0xa8, 0xdc, 0x2e, 0xe3, 0xaf, 0x1b, 0x70, + 0xde, 0xb3, 0x62, 0x6a, 0x5a, 0x61, 0x88, 0x03, 0xc7, 0xb4, 0x28, 0x73, 0xfa, 0x06, 0x77, 0xfa, + 0x73, 0xac, 0xe3, 0x06, 0xa7, 0xdf, 0xa0, 0xbb, 0xb1, 0xfe, 0x87, 0x49, 0x98, 0x67, 0x63, 0x59, + 0x90, 0xd5, 0x5a, 0xef, 0x02, 0x34, 0xf1, 0x31, 0x95, 0x0b, 0x65, 0x4d, 0xb4, 0x05, 0x8b, 0x32, + 0x9a, 0x5d, 0x12, 0x8c, 0x02, 0xbd, 0x29, 0xf2, 0xe2, 0xa8, 0x2b, 0x89, 0xf5, 0xcb, 0xd0, 0x89, + 0x29, 0x09, 0x55, 0xde, 0x98, 0x12, 0x79, 0x83, 0x91, 0x64, 0xde, 0xc8, 0xda, 0x74, 0xba, 0xc4, + 0xa6, 0x5d, 0x37, 0x36, 0xb1, 0x6d, 0x8a, 0x59, 0xf1, 0xcc, 0xd3, 0x32, 0xc0, 0x8d, 0x6f, 0xdb, + 0xc2, 0x1a, 0xe8, 0x3d, 0x58, 0x75, 0x1f, 0x05, 0x24, 0xc2, 0xa6, 0x34, 0x24, 0x8f, 0xdf, 0x80, + 0x50, 0xb3, 0x4f, 0x06, 0x81, 0xc3, 0xb3, 0x50, 0xcb, 0xe8, 0x09, 0x9e, 0x3d, 0xce, 0xc2, 0x2c, + 0xb0, 0x4b, 0xe8, 0x1d, 0xd6, 0xaf, 0xbf, 0x01, 0x0b, 0x23, 0xab, 0xd4, 0xcf, 0x02, 0x5f, 0x37, + 0x94, 0xc7, 0x3d, 0xb0, 0x5c, 0x6f, 0x0f, 0x07, 0x0e, 0x8e, 0x9e, 0x32, 0x3b, 0xa1, 0xeb, 0xb0, + 0xe4, 0x3a, 0x1e, 0x36, 0xa9, 0xeb, 0x63, 0x32, 0xa0, 0x66, 0x8c, 0x6d, 0x12, 0x38, 0xb1, 0xb2, + 0x2f, 0xeb, 0x7b, 0x20, 0xba, 0xf6, 0x44, 0x8f, 0xfe, 0xab, 0xe4, 0x94, 0x48, 0xcf, 0x62, 0x74, + 0x3f, 0x0a, 0x30, 0x66, 0x02, 0x0f, 0xb0, 0xe5, 0xe0, 0x48, 0x2e, 0xa3, 0x2b, 0x88, 0x3f, 0xe1, + 0x34, 0xb6, 0x43, 0x92, 0x69, 0x9f, 0x38, 0x43, 0x3e, 0xa3, 0xae, 0x01, 0x82, 0x74, 0x93, 0x38, + 0x43, 0x9e, 0xae, 0x63, 0x93, 0x3b, 0x99, 0x7d, 0x30, 0x08, 0x0e, 0xf9, 0x6c, 0x5a, 0x46, 0xc7, + 0x8d, 0x3f, 0xb0, 0x62, 0xba, 0xc3, 0x48, 0xfa, 0x9f, 0x1b, 0x2a, 0x5f, 0xb0, 0x69, 0x18, 0xd8, + 0xc6, 0xee, 0xd1, 0xff, 0xc0, 0x1c, 0x6c, 0x84, 0x74, 0x82, 0xcc, 0x3d, 0x59, 0x06, 0x1c, 0x12, + 0x7d, 0xf2, 0x54, 0xe5, 0x3d, 0xa3, 0x74, 0x95, 0x9d, 0xb8, 0x4c, 0x57, 0x9f, 0xab, 0xe3, 0xe2, + 0xb6, 0xbd, 0x77, 0x60, 0x45, 0x4e, 0xfc, 0x63, 0x1c, 0xe0, 0xc8, 0xa2, 0x67, 0x72, 0x7d, 0xd1, + 0xd7, 0xe0, 0x52, 0x95, 0x74, 0xa9, 0xff, 0x33, 0x75, 0x0c, 0x2a, 0x0e, 0x03, 0xef, 0x0f, 0x5c, + 0xcf, 0x39, 0x13, 0xf5, 0xef, 0xe7, 0x17, 0x97, 0x08, 0x97, 0xfe, 0x73, 0x15, 0xce, 0x47, 0x9c, + 0x44, 0xcd, 0x98, 0x31, 0x24, 0x95, 0xcb, 0x9c, 0x31, 0x2f, 0x3b, 0xf8, 0x40, 0x56, 0xc1, 0xfc, + 0x66, 0x52, 0x79, 0x80, 0x92, 0x76, 0x66, 0x69, 0x75, 0x05, 0xda, 0x23, 0xf5, 0x4d, 0xae, 0xbe, + 0x15, 0x4b, 0xbd, 0xcc, 0x3b, 0x6d, 0x12, 0x0e, 0x4d, 0x6c, 0x8b, 0x1b, 0x05, 0xdf, 0xea, 0x96, + 0xd1, 0x61, 0xc4, 0xdb, 0x36, 0xbf, 0x50, 0xd4, 0xcf, 0xb1, 0x29, 0x69, 0x5f, 0x08, 0x69, 0x33, + 0x69, 0x69, 0x5f, 0x70, 0x69, 0x8a, 0xe7, 0xc8, 0xed, 0x0b, 0x9e, 0xd9, 0x11, 0xcf, 0x43, 0xb7, + 0xcf, 0x78, 0x46, 0x5e, 0x95, 0x35, 0x86, 0xdc, 0xd5, 0x2f, 0x61, 0x25, 0xdb, 0x5b, 0xff, 0xc0, + 0x7e, 0x2a, 0x63, 0xe9, 0x97, 0xf2, 0xee, 0x94, 0x3b, 0xf5, 0x8f, 0xf2, 0xd3, 0xae, 0x7d, 0xc3, + 0x79, 0xba, 0x79, 0x5d, 0xcc, 0x1b, 0x24, 0x7b, 0x4d, 0xfa, 0x34, 0x3f, 0xed, 0x53, 0x5c, 0x97, + 0x4e, 0x56, 0x7c, 0x39, 0x1f, 0x02, 0xf9, 0x3b, 0xd5, 0x1f, 0x93, 0xfc, 0x2a, 0x39, 0xd8, 0x8d, + 0xa6, 0x76, 0x5e, 0x93, 0x7a, 0xb9, 0x39, 0xe6, 0x8c, 0x59, 0xa9, 0x96, 0x95, 0xdc, 0xf2, 0x3c, + 0x14, 0x15, 0x8b, 0xfc, 0xca, 0x14, 0xd7, 0x4d, 0x59, 0x5c, 0xab, 0x47, 0x83, 0x43, 0x3c, 0xe4, + 0x3e, 0x3b, 0x25, 0x1e, 0x0d, 0xde, 0xc7, 0x43, 0x7d, 0x37, 0x17, 0x71, 0x62, 0x6a, 0x32, 0x76, + 0x11, 0x4c, 0x31, 0x67, 0x97, 0x29, 0x9f, 0xb7, 0xd1, 0x45, 0x00, 0x37, 0x36, 0x1d, 0xbe, 0xe7, + 0x62, 0x52, 0x2d, 0xa3, 0xed, 0x4a, 0x27, 0x70, 0xf4, 0xdf, 0x36, 0x46, 0x02, 0x6f, 0x7a, 0x64, + 0xff, 0x0c, 0xbd, 0x32, 0xbd, 0x8a, 0x66, 0x66, 0x15, 0xe9, 0xd7, 0x83, 0xa9, 0xec, 0xeb, 0x41, + 0x2a, 0x88, 0xd2, 0xd3, 0xa9, 0x4a, 0xcd, 0x0f, 0xc8, 0xd9, 0x55, 0x96, 0xc5, 0xd4, 0x3c, 0x92, + 0x2e, 0xf5, 0xbf, 0x0d, 0x2b, 0xcc, 0xe0, 0x82, 0xca, 0xeb, 0x96, 0xfa, 0xb5, 0xdd, 0x3f, 0x26, + 0x61, 0xb5, 0x7c, 0x70, 0x9d, 0xfa, 0xee, 0x1d, 0xd0, 0x92, 0xfa, 0x89, 0x1d, 0x8d, 0x31, 0xb5, + 0xfc, 0x30, 0x39, 0x1c, 0xc5, 0x19, 0x7a, 0x41, 0x16, 0x53, 0x0f, 0x54, 0xbf, 0x3a, 0x21, 0x0b, + 0xc5, 0x57, 0xb3, 0x50, 0x7c, 0x31, 0x05, 0x8e, 0x45, 0xab, 0x14, 0x88, 0x3b, 0xdc, 0x05, 0xc7, + 0xa2, 0x55, 0x0a, 0x92, 0xc1, 0x5c, 0x81, 0xf0, 0xda, 0x8e, 0xe4, 0xe7, 0x0a, 0x2e, 0x02, 0xc8, + 0xeb, 0xd5, 0x20, 0x50, 0xc5, 0x64, 0x5b, 0x5c, 0xae, 0x06, 0x41, 0xe5, 0x2d, 0x73, 0xb6, 0xf2, + 0x96, 0x99, 0xdd, 0xcd, 0x56, 0x61, 0x37, 0x3f, 0x05, 0xb8, 0xe5, 0xc6, 0x87, 0xc2, 0xc8, 0xec, + 0x5a, 0xeb, 0xb8, 0xaa, 0x1a, 0x60, 0x4d, 0x46, 0xb1, 0x3c, 0x4f, 0x9a, 0x8e, 0x35, 0x59, 0xf8, + 0x0c, 0x62, 0xec, 0x48, 0xeb, 0xf0, 0x36, 0xa3, 0xf5, 0x23, 0x8c, 0xa5, 0x01, 0x78, 0x5b, 0xff, + 0x53, 0x03, 0xda, 0xf7, 0xb0, 0x2f, 0x25, 0x5f, 0x02, 0x78, 0x44, 0x22, 0x32, 0xa0, 0x6e, 0x80, + 0xc5, 0x2d, 0x7c, 0xda, 0x48, 0x51, 0xbe, 0xbb, 0x1e, 0x9e, 0x1a, 0xb0, 0xd7, 0x97, 0xc6, 0xe4, + 0x6d, 0x46, 0x3b, 0xc0, 0x56, 0x28, 0xed, 0xc7, 0xdb, 0xac, 0xd6, 0x89, 0xa9, 0x65, 0x1f, 0x72, + 0x63, 0x4d, 0x19, 0xe2, 0x43, 0xff, 0x6b, 0x03, 0xc0, 0xc0, 0x3e, 0xa1, 0xdc, 0xd7, 0xd8, 0xed, + 0x76, 0xdf, 0xb2, 0x0f, 0x59, 0xbd, 0x40, 0x87, 0x21, 0x96, 0x96, 0xe8, 0x48, 0xda, 0x83, 0x61, + 0xc8, 0x77, 0x48, 0xb1, 0xc8, 0xfc, 0xd5, 0x36, 0xda, 0x92, 0x22, 0x2a, 0x03, 0x15, 0xca, 0x6d, + 0x83, 0x35, 0x53, 0x39, 0x4d, 0x4c, 0x5b, 0xe5, 0xb4, 0x15, 0x68, 0xe7, 0x5d, 0x81, 0xa7, 0x02, + 0xee, 0x07, 0x57, 0x60, 0xce, 0x27, 0x8e, 0xdb, 0x77, 0xb1, 0xc3, 0x1d, 0x4d, 0x2e, 0xa5, 0xab, + 0x88, 0xcc, 0xb9, 0xd0, 0x2a, 0xb4, 0xf1, 0x31, 0xc5, 0x41, 0xe2, 0x03, 0x6d, 0x63, 0x44, 0xd0, + 0xbf, 0x02, 0x50, 0x05, 0x7d, 0x9f, 0xa0, 0x6d, 0x98, 0x66, 0xc2, 0xd5, 0x73, 0xe9, 0x6a, 0xf1, + 0xb9, 0x74, 0x64, 0x06, 0x43, 0xb0, 0xa6, 0x13, 0xd0, 0x64, 0x26, 0x01, 0x8d, 0xaf, 0xe7, 0xf4, + 0x6f, 0x1b, 0xb0, 0x26, 0xaf, 0x8f, 0x2e, 0x8e, 0xee, 0x91, 0x23, 0x76, 0x95, 0x78, 0x40, 0x84, + 0x92, 0x33, 0xc9, 0x9c, 0x6f, 0x41, 0xcf, 0xc1, 0x31, 0x75, 0x03, 0xae, 0xd0, 0x54, 0x9b, 0x12, + 0x58, 0x3e, 0x96, 0x13, 0x5a, 0x4e, 0xf5, 0xdf, 0x14, 0xdd, 0xbb, 0x96, 0x8f, 0xd1, 0x35, 0x58, + 0x3c, 0xc4, 0x38, 0x34, 0x3d, 0x62, 0x5b, 0x9e, 0xa9, 0x62, 0x52, 0xde, 0x8f, 0x16, 0x58, 0xd7, + 0x07, 0xac, 0xe7, 0x96, 0x88, 0x4b, 0x3d, 0x86, 0xe7, 0x4e, 0x58, 0x89, 0xcc, 0x4b, 0xab, 0xd0, + 0x0e, 0x23, 0x62, 0xe3, 0x98, 0xf9, 0x6c, 0x83, 0x1f, 0x53, 0x23, 0x02, 0xba, 0x0e, 0x8b, 0xc9, + 0xc7, 0x87, 0x38, 0xb2, 0x71, 0x40, 0xad, 0x47, 0xe2, 0xdd, 0x74, 0xd2, 0x28, 0xeb, 0xd2, 0x7f, + 0xdf, 0x00, 0xbd, 0xa0, 0xf5, 0x4e, 0x44, 0xfc, 0x33, 0xb4, 0xe0, 0x16, 0x2c, 0x71, 0x3b, 0x44, + 0x5c, 0xe4, 0xc8, 0x10, 0xa2, 0x8c, 0x39, 0xcf, 0xfa, 0x84, 0x36, 0x65, 0x89, 0x01, 0x5c, 0x39, + 0x71, 0x4e, 0xff, 0x25, 0x5b, 0xfc, 0xab, 0x0b, 0xdd, 0x8f, 0x06, 0x38, 0x1a, 0xa6, 0x1e, 0x5c, + 0x63, 0x2c, 0x57, 0xa1, 0x10, 0x83, 0x14, 0x85, 0x65, 0xda, 0x7e, 0x44, 0x7c, 0x33, 0x01, 0x15, + 0x26, 0x39, 0x4b, 0x87, 0x11, 0xef, 0x08, 0x60, 0x01, 0xbd, 0x0b, 0x33, 0x7d, 0xd7, 0xa3, 0x58, + 0x3c, 0xe3, 0x77, 0xb6, 0x5f, 0x28, 0x46, 0x44, 0x5a, 0xe7, 0xe6, 0x1d, 0xce, 0x6c, 0xc8, 0x41, + 0x68, 0x1f, 0x16, 0xdd, 0x20, 0xe4, 0xa5, 0x57, 0xe4, 0x5a, 0x9e, 0xfb, 0x64, 0xf4, 0x64, 0xd8, + 0xd9, 0x7e, 0x75, 0x8c, 0xac, 0xbb, 0x6c, 0xe4, 0x5e, 0x7a, 0xa0, 0x81, 0xdc, 0x02, 0x0d, 0x61, + 0x58, 0x22, 0x03, 0x5a, 0x54, 0x32, 0xcd, 0x95, 0x6c, 0x8f, 0x51, 0x72, 0x9f, 0x0f, 0xcd, 0x6a, + 0x59, 0x24, 0x45, 0xa2, 0xb6, 0x0b, 0x33, 0x62, 0x71, 0x2c, 0x47, 0xf6, 0x5d, 0xec, 0x29, 0x20, + 0x44, 0x7c, 0xb0, 0x34, 0x40, 0x42, 0x1c, 0x59, 0x81, 0x4a, 0x77, 0xea, 0x93, 0xf1, 0x1f, 0x59, + 0xde, 0x40, 0xc5, 0x9b, 0xf8, 0xd0, 0xfe, 0x32, 0x0d, 0xa8, 0xb8, 0x42, 0xf5, 0x0e, 0x1a, 0xe1, + 0x98, 0xa5, 0x90, 0x74, 0x7e, 0x9d, 0x4f, 0xd1, 0x79, 0x8e, 0xfd, 0x04, 0xda, 0x76, 0x7c, 0x64, + 0x72, 0x93, 0x70, 0x9d, 0x9d, 0xed, 0xb7, 0x4f, 0x6d, 0xd2, 0xcd, 0x9d, 0xbd, 0x87, 0x9c, 0x6a, + 0xb4, 0xec, 0xf8, 0x88, 0xb7, 0xd0, 0xcf, 0x01, 0xbe, 0x88, 0x49, 0x20, 0x25, 0x8b, 0x8d, 0x7f, + 0xe7, 0xf4, 0x92, 0x7f, 0xba, 0x77, 0x7f, 0x57, 0x88, 0x6e, 0x33, 0x71, 0x42, 0xb6, 0x0d, 0x73, + 0xa1, 0x15, 0x3d, 0x1e, 0x60, 0x2a, 0xc5, 0x0b, 0x5f, 0x78, 0xef, 0xf4, 0xe2, 0x3f, 0x14, 0x62, + 0x84, 0x86, 0x6e, 0x98, 0xfa, 0xd2, 0xbe, 0x9d, 0x84, 0x96, 0x5a, 0x17, 0xab, 0xde, 0xb8, 0x87, + 0x8b, 0x37, 0x0c, 0xd3, 0x0d, 0xfa, 0x44, 0x5a, 0xf4, 0x1c, 0xa3, 0x8b, 0x67, 0x0c, 0x9e, 0xfd, + 0x37, 0x60, 0x21, 0xc2, 0x36, 0x89, 0x1c, 0x76, 0xc7, 0x75, 0x7d, 0x97, 0xb9, 0xbd, 0xd8, 0xcb, + 0x79, 0x41, 0xbf, 0xa5, 0xc8, 0xe8, 0x25, 0x98, 0xe7, 0xdb, 0x9e, 0xe2, 0x6c, 0x2a, 0x99, 0xd8, + 0x4b, 0x31, 0x6e, 0xc0, 0xc2, 0xe3, 0x01, 0xcb, 0x1b, 0xf6, 0x81, 0x15, 0x59, 0x36, 0x25, 0xc9, + 0x6b, 0xc2, 0x3c, 0xa7, 0xef, 0x24, 0x64, 0xf4, 0x3a, 0x2c, 0x0b, 0x56, 0x1c, 0xdb, 0x56, 0x98, + 0x8c, 0xc0, 0x91, 0x2c, 0x36, 0x97, 0x78, 0xef, 0x6d, 0xde, 0xb9, 0xa3, 0xfa, 0x90, 0x06, 0x2d, + 0x9b, 0xf8, 0x3e, 0x0e, 0x68, 0xcc, 0x8f, 0xbf, 0xb6, 0x91, 0x7c, 0xa3, 0x1b, 0x70, 0xd1, 0xf2, + 0x3c, 0xf2, 0xa5, 0xc9, 0x47, 0x3a, 0x66, 0x61, 0x75, 0xa2, 0xf4, 0xd4, 0x38, 0xd3, 0x47, 0x9c, + 0xc7, 0xc8, 0x2e, 0x54, 0xbb, 0x0c, 0xed, 0x64, 0x1f, 0xd9, 0x8d, 0x21, 0xe5, 0x90, 0xbc, 0xad, + 0x9d, 0x83, 0x6e, 0x7a, 0x27, 0xb4, 0x7f, 0x36, 0x61, 0xb1, 0x24, 0xa8, 0xd0, 0x67, 0x00, 0xcc, + 0x5b, 0x45, 0x68, 0x49, 0x77, 0xfd, 0xc1, 0xe9, 0x83, 0x93, 0xf9, 0xab, 0x20, 0x1b, 0xcc, 0xfb, + 0x45, 0x13, 0xfd, 0x02, 0x3a, 0xdc, 0x63, 0xa5, 0x74, 0xe1, 0xb2, 0xef, 0x7e, 0x07, 0xe9, 0x6c, + 0xad, 0x52, 0x3c, 0x8f, 0x01, 0xd1, 0xd6, 0xfe, 0xde, 0x80, 0x76, 0xa2, 0x98, 0xdd, 0x7f, 0xc4, + 0x46, 0xf1, 0xbd, 0x8e, 0xd5, 0xfd, 0x87, 0xd3, 0xee, 0x70, 0xd2, 0xff, 0xa5, 0x2b, 0x69, 0x6f, + 0x02, 0x8c, 0xd6, 0x5f, 0xba, 0x84, 0x46, 0xe9, 0x12, 0xf4, 0x0d, 0x98, 0x63, 0x96, 0x75, 0xb1, + 0xb3, 0x47, 0x23, 0x37, 0xe4, 0x90, 0xae, 0xe0, 0x89, 0x65, 0x01, 0xa9, 0x3e, 0xb7, 0xbf, 0x59, + 0x81, 0x6e, 0xfa, 0x01, 0x0d, 0x7d, 0x0e, 0x9d, 0x14, 0x74, 0x8d, 0x9e, 0x2f, 0x6e, 0x5a, 0x11, + 0x0a, 0xd7, 0x5e, 0x18, 0xc3, 0x25, 0x6b, 0xac, 0x09, 0x14, 0xc0, 0xf9, 0x02, 0xfe, 0x8b, 0xae, + 0x16, 0x47, 0x57, 0xa1, 0xcb, 0xda, 0xcb, 0xb5, 0x78, 0x13, 0x7d, 0x14, 0x16, 0x4b, 0x00, 0x5d, + 0xf4, 0xca, 0x18, 0x29, 0x19, 0x50, 0x59, 0xbb, 0x56, 0x93, 0x3b, 0xd1, 0xfa, 0x18, 0x50, 0x11, + 0xed, 0x45, 0x2f, 0x8f, 0x15, 0x33, 0x42, 0x93, 0xb5, 0x57, 0xea, 0x31, 0x57, 0x2e, 0x54, 0xe0, + 0xc0, 0x63, 0x17, 0x9a, 0x41, 0x9a, 0xc7, 0x2e, 0x34, 0x07, 0x2e, 0x4f, 0xa0, 0x43, 0x58, 0xc8, + 0x63, 0xc4, 0x68, 0xa3, 0xea, 0x9f, 0x86, 0x02, 0x04, 0xad, 0x5d, 0xad, 0xc3, 0x9a, 0x28, 0xc3, + 0x70, 0x2e, 0x8b, 0xc9, 0xa2, 0x97, 0x8a, 0xe3, 0x4b, 0x51, 0x69, 0x6d, 0x7d, 0x3c, 0x63, 0x7a, + 0x4d, 0x79, 0x9c, 0xb6, 0x6c, 0x4d, 0x15, 0x20, 0x70, 0xd9, 0x9a, 0xaa, 0x60, 0x5f, 0x7d, 0x02, + 0x7d, 0xa5, 0xc0, 0xbf, 0x1c, 0x7e, 0x89, 0x36, 0xab, 0xc4, 0x94, 0x03, 0xa8, 0xda, 0x56, 0x6d, + 0x7e, 0xa5, 0xfb, 0x7a, 0x83, 0xc5, 0x7a, 0x0a, 0xc6, 0x2c, 0x8b, 0xf5, 0x22, 0x30, 0x5a, 0x16, + 0xeb, 0x65, 0x58, 0xe8, 0x04, 0xda, 0x87, 0xb9, 0x0c, 0xb0, 0x89, 0x5e, 0xac, 0x1a, 0x99, 0x7d, + 0xff, 0xd3, 0x5e, 0x1a, 0xcb, 0x97, 0xe8, 0x30, 0x55, 0xf6, 0x92, 0xe9, 0xaa, 0x72, 0x72, 0xd9, + 0x7c, 0xf5, 0xe2, 0x38, 0xb6, 0x4c, 0x28, 0x17, 0xe0, 0xcf, 0xd2, 0x50, 0xae, 0x82, 0x57, 0x4b, + 0x43, 0xb9, 0x1a, 0x51, 0x9d, 0x40, 0x07, 0x30, 0x9f, 0x83, 0x3e, 0xd1, 0x7a, 0x95, 0x88, 0x3c, + 0xec, 0xaa, 0x6d, 0xd4, 0xe0, 0x4c, 0x34, 0xfd, 0x4c, 0x15, 0xdb, 0xdc, 0xe5, 0xae, 0x54, 0x0f, + 0x1d, 0xf9, 0xd9, 0xf3, 0x27, 0x33, 0x25, 0xa2, 0xbf, 0x84, 0xa5, 0xb2, 0x17, 0x31, 0x74, 0xad, + 0xac, 0x84, 0xaf, 0x7c, 0x76, 0xd3, 0x36, 0xeb, 0xb2, 0x27, 0x8a, 0x3f, 0x86, 0x96, 0x82, 0xff, + 0xd0, 0x73, 0xc5, 0xd1, 0x39, 0xc0, 0x54, 0xd3, 0x4f, 0x62, 0x49, 0x85, 0x8a, 0xaf, 0xb2, 0xc2, + 0x08, 0x97, 0xab, 0xce, 0x0a, 0x05, 0x04, 0xb1, 0x3a, 0x2b, 0x14, 0x61, 0x3e, 0xae, 0x2e, 0x71, + 0xbb, 0x34, 0x8c, 0x55, 0xed, 0x76, 0x25, 0x28, 0x5d, 0xb5, 0xdb, 0x95, 0x22, 0x63, 0x13, 0xe8, + 0x97, 0x0a, 0xca, 0xcf, 0xa3, 0x57, 0xa8, 0x32, 0xb7, 0x54, 0xa0, 0x68, 0xda, 0xf5, 0xfa, 0x03, + 0x12, 0xf5, 0x4f, 0x54, 0x26, 0xcc, 0xa1, 0x57, 0xd5, 0x99, 0xb0, 0x1c, 0x43, 0xd3, 0xb6, 0x6a, + 0xf3, 0x17, 0x83, 0x3c, 0x0d, 0xef, 0x54, 0x5b, 0xbb, 0x04, 0x11, 0xab, 0xb6, 0x76, 0x29, 0x62, + 0xc4, 0xe3, 0xa3, 0x0c, 0xba, 0x29, 0x8b, 0x8f, 0x13, 0xb0, 0x25, 0x6d, 0xb3, 0x2e, 0x7b, 0xe6, + 0xa2, 0x50, 0xc4, 0x66, 0xd0, 0xd8, 0xf9, 0x67, 0xce, 0x80, 0x6b, 0x35, 0xb9, 0xab, 0x77, 0x57, + 0x9d, 0x09, 0x63, 0x17, 0x90, 0x3b, 0x1b, 0xb6, 0x6a, 0xf3, 0x27, 0xba, 0x43, 0xf5, 0x63, 0x48, + 0x0a, 0x57, 0x41, 0x57, 0xc7, 0xc8, 0x49, 0xe1, 0x42, 0xda, 0xcb, 0xb5, 0x78, 0xcb, 0xa2, 0x37, + 0x8d, 0x74, 0x9c, 0xe4, 0x4f, 0x05, 0x78, 0xe6, 0x24, 0x7f, 0x2a, 0x01, 0x4f, 0x4a, 0xa2, 0x57, + 0x01, 0x1c, 0xe3, 0xa3, 0x37, 0x07, 0xb4, 0x8c, 0x8f, 0xde, 0x02, 0x76, 0x32, 0x81, 0x7e, 0x3d, + 0xfa, 0x61, 0xa0, 0xf8, 0xdc, 0x88, 0xb6, 0x2b, 0x53, 0x51, 0xe5, 0x2b, 0xab, 0xf6, 0xda, 0xa9, + 0xc6, 0xa4, 0x8c, 0xff, 0xbb, 0x86, 0x42, 0x1f, 0x4b, 0xdf, 0xfb, 0xd0, 0xeb, 0x35, 0x04, 0x17, + 0x9e, 0x2c, 0xb5, 0x37, 0x4e, 0x39, 0x2a, 0x35, 0xa1, 0x0f, 0x60, 0x9a, 0xd7, 0xb9, 0xe8, 0xd2, + 0xc9, 0x05, 0xb0, 0x76, 0xb9, 0xbc, 0x3f, 0x29, 0xe3, 0x98, 0xb4, 0xfd, 0x19, 0xfe, 0x93, 0xf2, + 0x6b, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x66, 0x23, 0x9f, 0xad, 0xbb, 0x2c, 0x00, 0x00, } diff --git a/weed/pb/volume_server_pb/volume_server_helper.go b/weed/pb/volume_server_pb/volume_server_helper.go new file mode 100644 index 000000000..356be27ff --- /dev/null +++ b/weed/pb/volume_server_pb/volume_server_helper.go @@ -0,0 +1,5 @@ +package volume_server_pb + +func (m *RemoteFile) BackendName() string { + return m.BackendType + "." + m.BackendId +} diff --git a/weed/replication/replicator.go b/weed/replication/replicator.go index 7353cdc91..a0ef6591c 100644 --- a/weed/replication/replicator.go +++ b/weed/replication/replicator.go @@ -18,10 +18,10 @@ type Replicator struct { source *source.FilerSource } -func NewReplicator(sourceConfig util.Configuration, dataSink sink.ReplicationSink) *Replicator { +func NewReplicator(sourceConfig util.Configuration, configPrefix string, dataSink sink.ReplicationSink) *Replicator { source := &source.FilerSource{} - source.Initialize(sourceConfig) + source.Initialize(sourceConfig, configPrefix) dataSink.SetSourceFiler(source) diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go index 6381908a1..a0b1a41ab 100644 --- a/weed/replication/sink/azuresink/azure_sink.go +++ b/weed/replication/sink/azuresink/azure_sink.go @@ -35,12 +35,12 @@ func (g *AzureSink) GetSinkToDirectory() string { return g.dir } -func (g *AzureSink) Initialize(configuration util.Configuration) error { +func (g *AzureSink) Initialize(configuration util.Configuration, prefix string) error { return g.initialize( - configuration.GetString("account_name"), - configuration.GetString("account_key"), - configuration.GetString("container"), - configuration.GetString("directory"), + configuration.GetString(prefix+"account_name"), + configuration.GetString(prefix+"account_key"), + configuration.GetString(prefix+"container"), + configuration.GetString(prefix+"directory"), ) } diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go index 35c2230fa..8c80a64bd 100644 --- a/weed/replication/sink/b2sink/b2_sink.go +++ b/weed/replication/sink/b2sink/b2_sink.go @@ -31,12 +31,12 @@ func (g *B2Sink) GetSinkToDirectory() string { return g.dir } -func (g *B2Sink) Initialize(configuration util.Configuration) error { +func (g *B2Sink) Initialize(configuration util.Configuration, prefix string) error { return g.initialize( - configuration.GetString("b2_account_id"), - configuration.GetString("b2_master_application_key"), - configuration.GetString("bucket"), - configuration.GetString("directory"), + configuration.GetString(prefix+"b2_account_id"), + configuration.GetString(prefix+"b2_master_application_key"), + configuration.GetString(prefix+"bucket"), + configuration.GetString(prefix+"directory"), ) } diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index 97e9671a3..26c055da5 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -63,7 +63,7 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi var host string var auth security.EncodedJwt - if err := fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + if err := fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, @@ -104,11 +104,11 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi return } -func (fs *FilerSink) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { +func (fs *FilerSink) withFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(ctx context.Context, grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(client) + return fn(ctx, client) }, fs.grpcAddress, fs.grpcDialOption) } diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index f99c7fdf6..de99fbe1c 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -3,10 +3,11 @@ package filersink import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" @@ -38,13 +39,13 @@ func (fs *FilerSink) GetSinkToDirectory() string { return fs.dir } -func (fs *FilerSink) Initialize(configuration util.Configuration) error { +func (fs *FilerSink) Initialize(configuration util.Configuration, prefix string) error { return fs.initialize( - configuration.GetString("grpcAddress"), - configuration.GetString("directory"), - configuration.GetString("replication"), - configuration.GetString("collection"), - configuration.GetInt("ttlSec"), + configuration.GetString(prefix+"grpcAddress"), + configuration.GetString(prefix+"directory"), + configuration.GetString(prefix+"replication"), + configuration.GetString(prefix+"collection"), + configuration.GetInt(prefix+"ttlSec"), ) } @@ -59,12 +60,12 @@ func (fs *FilerSink) initialize(grpcAddress string, dir string, fs.replication = replication fs.collection = collection fs.ttlSec = int32(ttlSec) - fs.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + fs.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") return nil } func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { - return fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { dir, name := filer2.FullPath(key).DirAndName() @@ -87,7 +88,7 @@ func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, d func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { - return fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { dir, name := filer2.FullPath(key).DirAndName() @@ -124,7 +125,7 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p } glog.V(1).Infof("create: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(ctx, client, request); err != nil { glog.V(0).Infof("create entry %s: %v", key, err) return fmt.Errorf("create entry %s: %v", key, err) } @@ -139,7 +140,7 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file // read existing entry var existingEntry *filer_pb.Entry - err = fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: dir, @@ -191,7 +192,7 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file } // save updated meta data - return true, fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return true, fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ Directory: newParentPath, diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go index abd7c49b9..5aa978ab8 100644 --- a/weed/replication/sink/gcssink/gcs_sink.go +++ b/weed/replication/sink/gcssink/gcs_sink.go @@ -34,11 +34,11 @@ func (g *GcsSink) GetSinkToDirectory() string { return g.dir } -func (g *GcsSink) Initialize(configuration util.Configuration) error { +func (g *GcsSink) Initialize(configuration util.Configuration, prefix string) error { return g.initialize( - configuration.GetString("google_application_credentials"), - configuration.GetString("bucket"), - configuration.GetString("directory"), + configuration.GetString(prefix+"google_application_credentials"), + configuration.GetString(prefix+"bucket"), + configuration.GetString(prefix+"directory"), ) } diff --git a/weed/replication/sink/replication_sink.go b/weed/replication/sink/replication_sink.go index dd54f0005..208bbdf87 100644 --- a/weed/replication/sink/replication_sink.go +++ b/weed/replication/sink/replication_sink.go @@ -9,7 +9,7 @@ import ( type ReplicationSink interface { GetName() string - Initialize(configuration util.Configuration) error + Initialize(configuration util.Configuration, prefix string) error DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go index d5cad3541..e4e097c0f 100644 --- a/weed/replication/sink/s3sink/s3_sink.go +++ b/weed/replication/sink/s3sink/s3_sink.go @@ -39,16 +39,16 @@ func (s3sink *S3Sink) GetSinkToDirectory() string { return s3sink.dir } -func (s3sink *S3Sink) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("sink.s3.region: %v", configuration.GetString("region")) - glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString("bucket")) - glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString("directory")) +func (s3sink *S3Sink) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("sink.s3.region: %v", configuration.GetString(prefix+"region")) + glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString(prefix+"bucket")) + glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString(prefix+"directory")) return s3sink.initialize( - configuration.GetString("aws_access_key_id"), - configuration.GetString("aws_secret_access_key"), - configuration.GetString("region"), - configuration.GetString("bucket"), - configuration.GetString("directory"), + configuration.GetString(prefix+"aws_access_key_id"), + configuration.GetString(prefix+"aws_secret_access_key"), + configuration.GetString(prefix+"region"), + configuration.GetString(prefix+"bucket"), + configuration.GetString(prefix+"directory"), ) } @@ -56,7 +56,7 @@ func (s3sink *S3Sink) SetSourceFiler(s *source.FilerSource) { s3sink.filerSource = s } -func (s3sink *S3Sink) initialize(awsAccessKeyId, aswSecretAccessKey, region, bucket, dir string) error { +func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, bucket, dir string) error { s3sink.region = region s3sink.bucket = bucket s3sink.dir = dir @@ -64,8 +64,8 @@ func (s3sink *S3Sink) initialize(awsAccessKeyId, aswSecretAccessKey, region, buc config := &aws.Config{ Region: aws.String(s3sink.region), } - if awsAccessKeyId != "" && aswSecretAccessKey != "" { - config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, aswSecretAccessKey, "") + if awsAccessKeyId != "" && awsSecretAccessKey != "" { + config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "") } sess, err := session.NewSession(config) diff --git a/weed/replication/source/filer_source.go b/weed/replication/source/filer_source.go index d7b5ebc4d..c3ea44671 100644 --- a/weed/replication/source/filer_source.go +++ b/weed/replication/source/filer_source.go @@ -3,13 +3,14 @@ package source import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" - "google.golang.org/grpc" "io" "net/http" "strings" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" @@ -25,17 +26,17 @@ type FilerSource struct { Dir string } -func (fs *FilerSource) Initialize(configuration util.Configuration) error { +func (fs *FilerSource) Initialize(configuration util.Configuration, prefix string) error { return fs.initialize( - configuration.GetString("grpcAddress"), - configuration.GetString("directory"), + configuration.GetString(prefix+"grpcAddress"), + configuration.GetString(prefix+"directory"), ) } func (fs *FilerSource) initialize(grpcAddress string, dir string) (err error) { fs.grpcAddress = grpcAddress fs.Dir = dir - fs.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + fs.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") return nil } @@ -45,7 +46,7 @@ func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrl s vid := volumeId(part) - err = fs.withFilerClient(ctx, fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err = fs.withFilerClient(ctx, fs.grpcDialOption, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { glog.V(4).Infof("read lookup volume id locations: %v", vid) resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ @@ -89,11 +90,11 @@ func (fs *FilerSource) ReadPart(ctx context.Context, part string) (filename stri return filename, header, readCloser, err } -func (fs *FilerSource) withFilerClient(ctx context.Context, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { +func (fs *FilerSource) withFilerClient(ctx context.Context, grpcDialOption grpc.DialOption, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(client) + return fn(ctx2, client) }, fs.grpcAddress, fs.grpcDialOption) } diff --git a/weed/replication/sub/notification_aws_sqs.go b/weed/replication/sub/notification_aws_sqs.go index f0100f4de..06869e619 100644 --- a/weed/replication/sub/notification_aws_sqs.go +++ b/weed/replication/sub/notification_aws_sqs.go @@ -27,24 +27,24 @@ func (k *AwsSqsInput) GetName() string { return "aws_sqs" } -func (k *AwsSqsInput) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("replication.notification.aws_sqs.region: %v", configuration.GetString("region")) - glog.V(0).Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString("sqs_queue_name")) +func (k *AwsSqsInput) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("replication.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region")) + glog.V(0).Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name")) return k.initialize( - configuration.GetString("aws_access_key_id"), - configuration.GetString("aws_secret_access_key"), - configuration.GetString("region"), - configuration.GetString("sqs_queue_name"), + configuration.GetString(prefix+"aws_access_key_id"), + configuration.GetString(prefix+"aws_secret_access_key"), + configuration.GetString(prefix+"region"), + configuration.GetString(prefix+"sqs_queue_name"), ) } -func (k *AwsSqsInput) initialize(awsAccessKeyId, aswSecretAccessKey, region, queueName string) (err error) { +func (k *AwsSqsInput) initialize(awsAccessKeyId, awsSecretAccessKey, region, queueName string) (err error) { config := &aws.Config{ Region: aws.String(region), } - if awsAccessKeyId != "" && aswSecretAccessKey != "" { - config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, aswSecretAccessKey, "") + if awsAccessKeyId != "" && awsSecretAccessKey != "" { + config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "") } sess, err := session.NewSession(config) diff --git a/weed/replication/sub/notification_gocdk_pub_sub.go b/weed/replication/sub/notification_gocdk_pub_sub.go index eddba9ff8..9726096e5 100644 --- a/weed/replication/sub/notification_gocdk_pub_sub.go +++ b/weed/replication/sub/notification_gocdk_pub_sub.go @@ -27,8 +27,8 @@ func (k *GoCDKPubSubInput) GetName() string { return "gocdk_pub_sub" } -func (k *GoCDKPubSubInput) Initialize(config util.Configuration) error { - subURL := config.GetString("sub_url") +func (k *GoCDKPubSubInput) Initialize(configuration util.Configuration, prefix string) error { + subURL := configuration.GetString(prefix + "sub_url") glog.V(0).Infof("notification.gocdk_pub_sub.sub_url: %v", subURL) sub, err := pubsub.OpenSubscription(context.Background(), subURL) if err != nil { diff --git a/weed/replication/sub/notification_google_pub_sub.go b/weed/replication/sub/notification_google_pub_sub.go index ad6b42a2e..a950bb42b 100644 --- a/weed/replication/sub/notification_google_pub_sub.go +++ b/weed/replication/sub/notification_google_pub_sub.go @@ -27,13 +27,13 @@ func (k *GooglePubSubInput) GetName() string { return "google_pub_sub" } -func (k *GooglePubSubInput) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString("project_id")) - glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString("topic")) +func (k *GooglePubSubInput) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id")) + glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetString("google_application_credentials"), - configuration.GetString("project_id"), - configuration.GetString("topic"), + configuration.GetString(prefix+"google_application_credentials"), + configuration.GetString(prefix+"project_id"), + configuration.GetString(prefix+"topic"), ) } diff --git a/weed/replication/sub/notification_kafka.go b/weed/replication/sub/notification_kafka.go index 1a86a8307..fa9cfad9b 100644 --- a/weed/replication/sub/notification_kafka.go +++ b/weed/replication/sub/notification_kafka.go @@ -28,14 +28,14 @@ func (k *KafkaInput) GetName() string { return "kafka" } -func (k *KafkaInput) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("replication.notification.kafka.hosts: %v\n", configuration.GetStringSlice("hosts")) - glog.V(0).Infof("replication.notification.kafka.topic: %v\n", configuration.GetString("topic")) +func (k *KafkaInput) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("replication.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts")) + glog.V(0).Infof("replication.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetStringSlice("hosts"), - configuration.GetString("topic"), - configuration.GetString("offsetFile"), - configuration.GetInt("offsetSaveIntervalSeconds"), + configuration.GetStringSlice(prefix+"hosts"), + configuration.GetString(prefix+"topic"), + configuration.GetString(prefix+"offsetFile"), + configuration.GetInt(prefix+"offsetSaveIntervalSeconds"), ) } diff --git a/weed/replication/sub/notifications.go b/weed/replication/sub/notifications.go index 66fbef824..8a2668f98 100644 --- a/weed/replication/sub/notifications.go +++ b/weed/replication/sub/notifications.go @@ -9,7 +9,7 @@ type NotificationInput interface { // GetName gets the name to locate the configuration in sync.toml file GetName() string // Initialize initializes the file store - Initialize(configuration util.Configuration) error + Initialize(configuration util.Configuration, prefix string) error ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) } diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go index 3bf4aafac..d3bde66ee 100644 --- a/weed/s3api/filer_multipart.go +++ b/weed/s3api/filer_multipart.go @@ -14,7 +14,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/satori/go.uuid" + "github.com/google/uuid" ) type InitiateMultipartUploadResult struct { @@ -23,7 +23,7 @@ type InitiateMultipartUploadResult struct { } func (s3a *S3ApiServer) createMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code ErrorCode) { - uploadId, _ := uuid.NewV4() + uploadId, _ := uuid.NewRandom() uploadIdString := uploadId.String() if err := s3a.mkdir(ctx, s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) { diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go index b93b603e2..2fceacd2a 100644 --- a/weed/s3api/filer_util.go +++ b/weed/s3api/filer_util.go @@ -3,6 +3,7 @@ package s3api import ( "context" "fmt" + "io" "os" "strings" "time" @@ -36,7 +37,7 @@ func (s3a *S3ApiServer) mkdir(ctx context.Context, parentDirectoryPath string, d } glog.V(1).Infof("mkdir: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(ctx, client, request); err != nil { glog.V(0).Infof("mkdir %v: %v", request, err) return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err) } @@ -67,7 +68,7 @@ func (s3a *S3ApiServer) mkFile(ctx context.Context, parentDirectoryPath string, } glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName) - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(ctx, client, request); err != nil { glog.V(0).Infof("create file %v:%v", request, err) return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err) } @@ -89,13 +90,25 @@ func (s3a *S3ApiServer) list(ctx context.Context, parentDirectoryPath, prefix, s } glog.V(4).Infof("read directory: %v", request) - resp, err := client.ListEntries(ctx, request) + stream, err := client.ListEntries(ctx, request) if err != nil { glog.V(0).Infof("read directory %v: %v", request, err) return fmt.Errorf("list dir %v: %v", parentDirectoryPath, err) } - entries = resp.Entries + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + break + } else { + return recvErr + } + } + + entries = append(entries, resp.Entry) + + } return nil }) diff --git a/weed/s3api/s3api_errors.go b/weed/s3api/s3api_errors.go index 7ba55ed28..96f8d9fd6 100644 --- a/weed/s3api/s3api_errors.go +++ b/weed/s3api/s3api_errors.go @@ -41,6 +41,8 @@ const ( ErrInvalidPartNumberMarker ErrInvalidPart ErrInternalError + ErrInvalidCopyDest + ErrInvalidCopySource ErrNotImplemented ) @@ -118,6 +120,18 @@ var errorCodeResponse = map[ErrorCode]APIError{ Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.", HTTPStatusCode: http.StatusBadRequest, }, + + ErrInvalidCopyDest: { + Code: "InvalidRequest", + Description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidCopySource: { + Code: "InvalidArgument", + Description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrNotImplemented: { Code: "NotImplemented", Description: "A header you provided implies functionality that is not implemented", diff --git a/weed/s3api/s3api_handlers.go b/weed/s3api/s3api_handlers.go index 127be07e3..602f03e5c 100644 --- a/weed/s3api/s3api_handlers.go +++ b/weed/s3api/s3api_handlers.go @@ -39,7 +39,7 @@ func encodeResponse(response interface{}) []byte { func (s3a *S3ApiServer) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(ctx context.Context, grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) }, s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption) diff --git a/weed/s3api/s3api_object_copy_handlers.go b/weed/s3api/s3api_object_copy_handlers.go new file mode 100644 index 000000000..5e0fa5de1 --- /dev/null +++ b/weed/s3api/s3api_object_copy_handlers.go @@ -0,0 +1,149 @@ +package s3api + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/gorilla/mux" + + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { + + vars := mux.Vars(r) + dstBucket := vars["bucket"] + dstObject := getObject(vars) + + // Copy source path. + cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source")) + if err != nil { + // Save unescaped string as is. + cpSrcPath = r.Header.Get("X-Amz-Copy-Source") + } + + srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) + // If source object is empty or bucket is empty, reply back invalid copy source. + if srcObject == "" || srcBucket == "" { + writeErrorResponse(w, ErrInvalidCopySource, r.URL) + return + } + + if srcBucket == dstBucket && srcObject == dstObject { + writeErrorResponse(w, ErrInvalidCopySource, r.URL) + return + } + + dstUrl := fmt.Sprintf("http://%s%s/%s%s?collection=%s", + s3a.option.Filer, s3a.option.BucketsPath, dstBucket, dstObject, dstBucket) + srcUrl := fmt.Sprintf("http://%s%s/%s%s", + s3a.option.Filer, s3a.option.BucketsPath, srcBucket, srcObject) + + _, _, dataReader, err := util.DownloadFile(srcUrl) + if err != nil { + writeErrorResponse(w, ErrInvalidCopySource, r.URL) + return + } + + etag, errCode := s3a.putToFiler(r, dstUrl, dataReader) + + if errCode != ErrNone { + writeErrorResponse(w, errCode, r.URL) + return + } + + setEtag(w, etag) + + response := CopyObjectResult{ + ETag: etag, + LastModified: time.Now(), + } + + writeSuccessResponseXML(w, encodeResponse(response)) + +} + +func pathToBucketAndObject(path string) (bucket, object string) { + path = strings.TrimPrefix(path, "/") + parts := strings.SplitN(path, "/", 2) + if len(parts) == 2 { + return parts[0], "/" + parts[1] + } + return parts[0], "/" +} + +type CopyPartResult struct { + LastModified time.Time `xml:"LastModified"` + ETag string `xml:"ETag"` +} + +func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) { + // https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + vars := mux.Vars(r) + dstBucket := vars["bucket"] + // dstObject := getObject(vars) + + // Copy source path. + cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source")) + if err != nil { + // Save unescaped string as is. + cpSrcPath = r.Header.Get("X-Amz-Copy-Source") + } + + srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) + // If source object is empty or bucket is empty, reply back invalid copy source. + if srcObject == "" || srcBucket == "" { + writeErrorResponse(w, ErrInvalidCopySource, r.URL) + return + } + + uploadID := r.URL.Query().Get("uploadId") + partIDString := r.URL.Query().Get("partNumber") + + partID, err := strconv.Atoi(partIDString) + if err != nil { + writeErrorResponse(w, ErrInvalidPart, r.URL) + return + } + + // check partID with maximum part ID for multipart objects + if partID > 10000 { + writeErrorResponse(w, ErrInvalidMaxParts, r.URL) + return + } + + rangeHeader := r.Header.Get("x-amz-copy-source-range") + + dstUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s", + s3a.option.Filer, s3a.genUploadsFolder(dstBucket), uploadID, partID-1, dstBucket) + srcUrl := fmt.Sprintf("http://%s%s/%s%s", + s3a.option.Filer, s3a.option.BucketsPath, srcBucket, srcObject) + + dataReader, err := util.ReadUrlAsReaderCloser(srcUrl, rangeHeader) + if err != nil { + writeErrorResponse(w, ErrInvalidCopySource, r.URL) + return + } + + etag, errCode := s3a.putToFiler(r, dstUrl, dataReader) + + if errCode != ErrNone { + writeErrorResponse(w, errCode, r.URL) + return + } + + setEtag(w, etag) + + response := CopyPartResult{ + ETag: etag, + LastModified: time.Now(), + } + + writeSuccessResponseXML(w, encodeResponse(response)) + +} diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index 44e93d297..8dc733eb9 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -9,9 +9,10 @@ import ( "net/http" "strings" + "github.com/gorilla/mux" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/server" - "github.com/gorilla/mux" ) var ( diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go index 1fc8b6b37..aa6849cbd 100644 --- a/weed/s3api/s3api_objects_list_handlers.go +++ b/weed/s3api/s3api_objects_list_handlers.go @@ -3,6 +3,7 @@ package s3api import ( "context" "fmt" + "io" "net/http" "net/url" "path/filepath" @@ -107,7 +108,7 @@ func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPr InclusiveStartFrom: false, } - resp, err := client.ListEntries(ctx, request) + stream, err := client.ListEntries(ctx, request) if err != nil { return fmt.Errorf("list buckets: %v", err) } @@ -117,7 +118,18 @@ func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPr var counter int var lastEntryName string var isTruncated bool - for _, entry := range resp.Entries { + + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + break + } else { + return recvErr + } + } + + entry := resp.Entry counter++ if counter > maxKeys { isTruncated = true @@ -143,6 +155,7 @@ func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPr StorageClass: "STANDARD", }) } + } response = ListBucketResult{ diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go index 24458592d..2233c8384 100644 --- a/weed/s3api/s3api_server.go +++ b/weed/s3api/s3api_server.go @@ -1,12 +1,6 @@ package s3api import ( - _ "github.com/chrislusf/seaweedfs/weed/filer2/cassandra" - _ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb" - _ "github.com/chrislusf/seaweedfs/weed/filer2/memdb" - _ "github.com/chrislusf/seaweedfs/weed/filer2/mysql" - _ "github.com/chrislusf/seaweedfs/weed/filer2/postgres" - _ "github.com/chrislusf/seaweedfs/weed/filer2/redis" "github.com/gorilla/mux" "google.golang.org/grpc" "net/http" @@ -50,6 +44,8 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) { // HeadBucket bucket.Methods("HEAD").HandlerFunc(s3a.HeadBucketHandler) + // CopyObjectPart + bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // PutObjectPart bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // CompleteMultipartUpload @@ -63,6 +59,8 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) { // ListMultipartUploads bucket.Methods("GET").HandlerFunc(s3a.ListMultipartUploadsHandler).Queries("uploads", "") + // CopyObject + bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectHandler) // PutObject bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectHandler) // PutBucket @@ -83,11 +81,6 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) { // DeleteMultipleObjects bucket.Methods("POST").HandlerFunc(s3a.DeleteMultipleObjectsHandler).Queries("delete", "") /* - // CopyObject - bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectHandler) - - // CopyObjectPart - bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // not implemented // GetBucketLocation diff --git a/weed/s3api/s3api_test.go b/weed/s3api/s3api_test.go new file mode 100644 index 000000000..026766beb --- /dev/null +++ b/weed/s3api/s3api_test.go @@ -0,0 +1,32 @@ +package s3api + +import ( + "testing" + "time" +) + +func TestCopyObjectResponse(t *testing.T) { + + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + + response := CopyObjectResult{ + ETag: "12345678", + LastModified: time.Now(), + } + + println(string(encodeResponse(response))) + +} + +func TestCopyPartResponse(t *testing.T) { + + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + + response := CopyPartResult{ + ETag: "12345678", + LastModified: time.Now(), + } + + println(string(encodeResponse(response))) + +} diff --git a/weed/security/tls.go b/weed/security/tls.go index e81ba4831..f4f525ede 100644 --- a/weed/security/tls.go +++ b/weed/security/tls.go @@ -22,7 +22,7 @@ func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption { glog.Errorf("load cert/key error: %v", err) return nil } - caCert, err := ioutil.ReadFile(config.GetString("ca")) + caCert, err := ioutil.ReadFile(config.GetString(component + ".ca")) if err != nil { glog.Errorf("read ca cert file error: %v", err) return nil @@ -49,7 +49,7 @@ func LoadClientTLS(config *viper.Viper, component string) grpc.DialOption { glog.Errorf("load cert/key error: %v", err) return grpc.WithInsecure() } - caCert, err := ioutil.ReadFile(config.GetString("ca")) + caCert, err := ioutil.ReadFile(config.GetString(component + ".ca")) if err != nil { glog.Errorf("read ca cert file error: %v", err) return grpc.WithInsecure() diff --git a/weed/sequence/etcd_sequencer.go b/weed/sequence/etcd_sequencer.go new file mode 100644 index 000000000..1fc378640 --- /dev/null +++ b/weed/sequence/etcd_sequencer.go @@ -0,0 +1,296 @@ +package sequence + +/* +Note : +(1) store the sequence in the ETCD cluster, and local file(sequence.dat) +(2) batch get the sequences from ETCD cluster, and store the max sequence id in the local file +(3) the sequence range is : [currentSeqId, maxSeqId), when the currentSeqId >= maxSeqId, fetch the new maxSeqId. +*/ + +import ( + "context" + "fmt" + "sync" + "time" + + "io" + "os" + "strconv" + "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" + "go.etcd.io/etcd/client" +) + +const ( + // EtcdKeyPrefix = "/seaweedfs" + EtcdKeySequence = "/master/sequence" + EtcdContextTimeoutSecond = 100 * time.Second + DefaultEtcdSteps uint64 = 500 // internal counter + SequencerFileName = "sequencer.dat" + FileMaxSequenceLength = 128 +) + +type EtcdSequencer struct { + sequenceLock sync.Mutex + + // available sequence range : [currentSeqId, maxSeqId) + currentSeqId uint64 + maxSeqId uint64 + + keysAPI client.KeysAPI + seqFile *os.File +} + +func NewEtcdSequencer(etcdUrls string, metaFolder string) (*EtcdSequencer, error) { + file, err := openSequenceFile(metaFolder + "/" + SequencerFileName) + if nil != err { + return nil, fmt.Errorf("open sequence file fialed, %v", err) + } + + cli, err := client.New(client.Config{ + Endpoints: strings.Split(etcdUrls, ","), + Username: "", + Password: "", + }) + if err != nil { + return nil, err + } + keysApi := client.NewKeysAPI(cli) + + // TODO: the current sequence id in local file is not used + maxValue, _, err := readSequenceFile(file) + if err != nil { + return nil, fmt.Errorf("read sequence from file failed, %v", err) + } + glog.V(4).Infof("read sequence from file : %d", maxValue) + + newSeq, err := setMaxSequenceToEtcd(keysApi, maxValue) + if err != nil { + return nil, err + } + + sequencer := &EtcdSequencer{maxSeqId: newSeq, + currentSeqId: newSeq, + keysAPI: keysApi, + seqFile: file, + } + return sequencer, nil +} + +func (es *EtcdSequencer) NextFileId(count uint64) uint64 { + es.sequenceLock.Lock() + defer es.sequenceLock.Unlock() + + if (es.currentSeqId + count) >= es.maxSeqId { + reqSteps := DefaultEtcdSteps + if count > DefaultEtcdSteps { + reqSteps += count + } + maxId, err := batchGetSequenceFromEtcd(es.keysAPI, reqSteps) + glog.V(4).Infof("get max sequence id from etcd, %d", maxId) + if err != nil { + glog.Error(err) + return 0 + } + es.currentSeqId, es.maxSeqId = maxId-reqSteps, maxId + glog.V(4).Infof("current id : %d, max id : %d", es.currentSeqId, es.maxSeqId) + + if err := writeSequenceFile(es.seqFile, es.maxSeqId, es.currentSeqId); err != nil { + glog.Errorf("flush sequence to file failed, %v", err) + } + } + + ret := es.currentSeqId + es.currentSeqId += count + return ret +} + +/** +instead of collecting the max value from volume server, +the max value should be saved in local config file and ETCD cluster +*/ +func (es *EtcdSequencer) SetMax(seenValue uint64) { + es.sequenceLock.Lock() + defer es.sequenceLock.Unlock() + if seenValue > es.maxSeqId { + maxId, err := setMaxSequenceToEtcd(es.keysAPI, seenValue) + if err != nil { + glog.Errorf("set Etcd Max sequence failed : %v", err) + return + } + es.currentSeqId, es.maxSeqId = maxId, maxId + + if err := writeSequenceFile(es.seqFile, maxId, maxId); err != nil { + glog.Errorf("flush sequence to file failed, %v", err) + } + } +} + +func (es *EtcdSequencer) GetMax() uint64 { + return es.maxSeqId +} + +func (es *EtcdSequencer) Peek() uint64 { + return es.currentSeqId +} + +func batchGetSequenceFromEtcd(kvApi client.KeysAPI, step uint64) (uint64, error) { + if step <= 0 { + return 0, fmt.Errorf("the step must be large than 1") + } + + ctx, cancel := context.WithTimeout(context.Background(), EtcdContextTimeoutSecond) + var endSeqValue uint64 = 0 + defer cancel() + for { + getResp, err := kvApi.Get(ctx, EtcdKeySequence, &client.GetOptions{Recursive: false, Quorum: true}) + if err != nil { + return 0, err + } + if getResp.Node == nil { + continue + } + + prevValue := getResp.Node.Value + prevSeqValue, err := strconv.ParseUint(prevValue, 10, 64) + if err != nil { + return 0, fmt.Errorf("get sequence from etcd failed, %v", err) + } + endSeqValue = prevSeqValue + step + endSeqStr := strconv.FormatUint(endSeqValue, 10) + + _, err = kvApi.Set(ctx, EtcdKeySequence, endSeqStr, &client.SetOptions{PrevValue: prevValue}) + if err == nil { + break + } + glog.Error(err) + } + + return endSeqValue, nil +} + +/** +update the value of the key EtcdKeySequence in ETCD cluster with the parameter of maxSeq, +when the value of the key EtcdKeySequence is equal to or large than the parameter maxSeq, +return the value of EtcdKeySequence in the ETCD cluster; +when the value of the EtcdKeySequence is less than the parameter maxSeq, +return the value of the parameter maxSeq +*/ +func setMaxSequenceToEtcd(kvApi client.KeysAPI, maxSeq uint64) (uint64, error) { + maxSeqStr := strconv.FormatUint(maxSeq, 10) + ctx, cancel := context.WithTimeout(context.Background(), EtcdContextTimeoutSecond) + defer cancel() + + for { + getResp, err := kvApi.Get(ctx, EtcdKeySequence, &client.GetOptions{Recursive: false, Quorum: true}) + if err != nil { + if ce, ok := err.(client.Error); ok && (ce.Code == client.ErrorCodeKeyNotFound) { + _, err := kvApi.Create(ctx, EtcdKeySequence, maxSeqStr) + if err == nil { + continue + } + if ce, ok = err.(client.Error); ok && (ce.Code == client.ErrorCodeNodeExist) { + continue + } + return 0, err + } else { + return 0, err + } + } + + if getResp.Node == nil { + continue + } + prevSeqStr := getResp.Node.Value + prevSeq, err := strconv.ParseUint(prevSeqStr, 10, 64) + if err != nil { + return 0, err + } + if prevSeq >= maxSeq { + return prevSeq, nil + } + + _, err = kvApi.Set(ctx, EtcdKeySequence, maxSeqStr, &client.SetOptions{PrevValue: prevSeqStr}) + if err != nil { + return 0, err + } + } +} + +func openSequenceFile(file string) (*os.File, error) { + _, err := os.Stat(file) + if os.IsNotExist(err) { + fid, err := os.OpenFile(file, os.O_RDWR|os.O_CREATE, 0644) + if err != nil { + return nil, err + } + if err := writeSequenceFile(fid, 1, 0); err != nil { + return nil, err + } + return fid, nil + } else { + return os.OpenFile(file, os.O_RDWR|os.O_CREATE, 0644) + } +} + +/* +read sequence and step from sequence file +*/ +func readSequenceFile(file *os.File) (uint64, uint64, error) { + sequence := make([]byte, FileMaxSequenceLength) + size, err := file.ReadAt(sequence, 0) + if (err != nil) && (err != io.EOF) { + err := fmt.Errorf("cannot read file %s, %v", file.Name(), err) + return 0, 0, err + } + sequence = sequence[0:size] + seqs := strings.Split(string(sequence), ":") + maxId, err := strconv.ParseUint(seqs[0], 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("parse sequence from file failed, %v", err) + } + + if len(seqs) > 1 { + step, err := strconv.ParseUint(seqs[1], 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("parse sequence from file failed, %v", err) + } + return maxId, step, nil + } + + return maxId, 0, nil +} + +/** +write the sequence and step to sequence file +*/ +func writeSequenceFile(file *os.File, sequence, step uint64) error { + _ = step + seqStr := fmt.Sprintf("%d:%d", sequence, sequence) + if _, err := file.Seek(0, 0); err != nil { + err = fmt.Errorf("cannot seek to the beginning of %s: %v", file.Name(), err) + return err + } + if err := file.Truncate(0); err != nil { + return fmt.Errorf("truncate sequence file faield : %v", err) + } + if _, err := file.WriteString(seqStr); err != nil { + return fmt.Errorf("write file %s failed, %v", file.Name(), err) + } + if err := file.Sync(); err != nil { + return fmt.Errorf("flush file %s failed, %v", file.Name(), err) + } + return nil +} + +// the UT helper method +// func deleteEtcdKey(kvApi client.KeysAPI, key string) error { +// ctx, cancel := context.WithTimeout(context.Background(), EtcdContextTimeoutSecond) +// defer cancel() +// _, err := kvApi.Delete(ctx, key, &client.DeleteOptions{Dir: false}) +// if err != nil { +// return err +// } +// return nil +// } diff --git a/weed/sequence/memory_sequencer.go b/weed/sequence/memory_sequencer.go index d727dc723..e20c29cc7 100644 --- a/weed/sequence/memory_sequencer.go +++ b/weed/sequence/memory_sequencer.go @@ -15,12 +15,12 @@ func NewMemorySequencer() (m *MemorySequencer) { return } -func (m *MemorySequencer) NextFileId(count uint64) (uint64, uint64) { +func (m *MemorySequencer) NextFileId(count uint64) uint64 { m.sequenceLock.Lock() defer m.sequenceLock.Unlock() ret := m.counter - m.counter += uint64(count) - return ret, count + m.counter += count + return ret } func (m *MemorySequencer) SetMax(seenValue uint64) { diff --git a/weed/sequence/sequence.go b/weed/sequence/sequence.go index fbdc3b8ef..2258d001b 100644 --- a/weed/sequence/sequence.go +++ b/weed/sequence/sequence.go @@ -1,7 +1,7 @@ package sequence type Sequencer interface { - NextFileId(count uint64) (uint64, uint64) + NextFileId(count uint64) uint64 SetMax(uint64) Peek() uint64 } diff --git a/weed/server/common.go b/weed/server/common.go index d50c283f2..31a9a73b8 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -11,17 +11,18 @@ import ( "strings" "time" - "github.com/chrislusf/seaweedfs/weed/storage/needle" "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/util" - _ "github.com/chrislusf/seaweedfs/weed/statik" "github.com/gorilla/mux" statik "github.com/rakyll/statik/fs" + + _ "github.com/chrislusf/seaweedfs/weed/statik" ) var serverStats *stats.ServerStats @@ -76,7 +77,8 @@ func writeJson(w http.ResponseWriter, r *http.Request, httpStatus int, obj inter // wrapper for writeJson - just logs errors func writeJsonQuiet(w http.ResponseWriter, r *http.Request, httpStatus int, obj interface{}) { if err := writeJson(w, r, httpStatus, obj); err != nil { - glog.V(0).Infof("error writing JSON %+v status %d: %v", obj, httpStatus, err) + glog.V(0).Infof("error writing JSON status %d: %v", httpStatus, err) + glog.V(1).Infof("JSON content: %+v", obj) } } func writeJsonError(w http.ResponseWriter, r *http.Request, httpStatus int, err error) { @@ -97,7 +99,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st } debug("parsing upload file...") - fname, data, mimeType, pairMap, isGzipped, originalDataSize, lastModified, _, _, pe := needle.ParseUpload(r) + fname, data, mimeType, pairMap, isGzipped, originalDataSize, lastModified, _, _, pe := needle.ParseUpload(r, 256*1024*1024) if pe != nil { writeJsonError(w, r, http.StatusBadRequest, pe) return @@ -115,6 +117,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st } ar := &operation.VolumeAssignRequest{ Count: count, + DataCenter: r.FormValue("dataCenter"), Replication: r.FormValue("replication"), Collection: r.FormValue("collection"), Ttl: r.FormValue("ttl"), diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index 9c35e8846..03954a58c 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -19,7 +19,11 @@ import ( func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) { entry, err := fs.filer.FindEntry(ctx, filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name)))) + if err == filer2.ErrNotFound { + return &filer_pb.LookupDirectoryEntryResponse{}, nil + } if err != nil { + glog.V(3).Infof("LookupDirectoryEntry %s: %+v, ", filepath.Join(req.Directory, req.Name), err) return nil, err } @@ -29,27 +33,32 @@ func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.L IsDirectory: entry.IsDirectory(), Attributes: filer2.EntryAttributeToPb(entry), Chunks: entry.Chunks, + Extended: entry.Extended, }, }, nil } -func (fs *FilerServer) ListEntries(ctx context.Context, req *filer_pb.ListEntriesRequest) (*filer_pb.ListEntriesResponse, error) { +func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream filer_pb.SeaweedFiler_ListEntriesServer) error { limit := int(req.Limit) if limit == 0 { limit = fs.option.DirListingLimit } - resp := &filer_pb.ListEntriesResponse{} + paginationLimit := filer2.PaginationSize + if limit < paginationLimit { + paginationLimit = limit + } + lastFileName := req.StartFromFileName includeLastFile := req.InclusiveStartFrom for limit > 0 { - entries, err := fs.filer.ListDirectoryEntries(ctx, filer2.FullPath(req.Directory), lastFileName, includeLastFile, 1024) + entries, err := fs.filer.ListDirectoryEntries(stream.Context(), filer2.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit) if err != nil { - return nil, err + return err } if len(entries) == 0 { - return resp, nil + return nil } includeLastFile = false @@ -64,22 +73,30 @@ func (fs *FilerServer) ListEntries(ctx context.Context, req *filer_pb.ListEntrie } } - resp.Entries = append(resp.Entries, &filer_pb.Entry{ - Name: entry.Name(), - IsDirectory: entry.IsDirectory(), - Chunks: entry.Chunks, - Attributes: filer2.EntryAttributeToPb(entry), - }) + if err := stream.Send(&filer_pb.ListEntriesResponse{ + Entry: &filer_pb.Entry{ + Name: entry.Name(), + IsDirectory: entry.IsDirectory(), + Chunks: entry.Chunks, + Attributes: filer2.EntryAttributeToPb(entry), + Extended: entry.Extended, + }, + }); err != nil { + return err + } limit-- + if limit == 0 { + return nil + } } - if len(resp.Entries) < 1024 { + if len(entries) < paginationLimit { break } } - return resp, nil + return nil } func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVolumeRequest) (*filer_pb.LookupVolumeResponse, error) { @@ -115,24 +132,31 @@ func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVol func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) { + resp = &filer_pb.CreateEntryResponse{} + fullpath := filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name))) chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks) if req.Entry.Attributes == nil { - return nil, fmt.Errorf("can not create entry with empty attributes") + glog.V(3).Infof("CreateEntry %s: nil attributes", filepath.Join(req.Directory, req.Entry.Name)) + resp.Error = fmt.Sprintf("can not create entry with empty attributes") + return } - err = fs.filer.CreateEntry(ctx, &filer2.Entry{ + createErr := fs.filer.CreateEntry(ctx, &filer2.Entry{ FullPath: fullpath, Attr: filer2.PbToEntryAttribute(req.Entry.Attributes), Chunks: chunks, - }) + }, req.OExcl) - if err == nil { - fs.filer.DeleteChunks(fullpath, garbages) + if createErr == nil { + fs.filer.DeleteChunks(garbages) + } else { + glog.V(3).Infof("CreateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), createErr) + resp.Error = createErr.Error() } - return &filer_pb.CreateEntryResponse{}, err + return } func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntryRequest) (*filer_pb.UpdateEntryResponse, error) { @@ -151,12 +175,14 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr newEntry := &filer2.Entry{ FullPath: filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name))), Attr: entry.Attr, + Extended: req.Entry.Extended, Chunks: chunks, } - glog.V(3).Infof("updating %s: %+v, chunks %d: %v => %+v, chunks %d: %v", + glog.V(3).Infof("updating %s: %+v, chunks %d: %v => %+v, chunks %d: %v, extended: %v => %v", fullpath, entry.Attr, len(entry.Chunks), entry.Chunks, - req.Entry.Attributes, len(req.Entry.Chunks), req.Entry.Chunks) + req.Entry.Attributes, len(req.Entry.Chunks), req.Entry.Chunks, + entry.Extended, req.Entry.Extended) if req.Entry.Attributes != nil { if req.Entry.Attributes.Mtime != 0 { @@ -178,8 +204,10 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr } if err = fs.filer.UpdateEntry(ctx, entry, newEntry); err == nil { - fs.filer.DeleteChunks(entry.FullPath, unusedChunks) - fs.filer.DeleteChunks(entry.FullPath, garbages) + fs.filer.DeleteChunks(unusedChunks) + fs.filer.DeleteChunks(garbages) + } else { + glog.V(3).Infof("UpdateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), err) } fs.filer.NotifyUpdateEvent(entry, newEntry, true) @@ -224,9 +252,11 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol } assignResult, err := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, assignRequest, altRequest) if err != nil { + glog.V(3).Infof("AssignVolume: %v", err) return nil, fmt.Errorf("assign volume: %v", err) } if assignResult.Error != "" { + glog.V(3).Infof("AssignVolume error: %v", assignResult.Error) return nil, fmt.Errorf("assign volume result: %v", assignResult.Error) } diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go index dfa59e7fe..0669a26f1 100644 --- a/weed/server/filer_grpc_server_rename.go +++ b/weed/server/filer_grpc_server_rename.go @@ -107,7 +107,7 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent filer2.FullP Attr: entry.Attr, Chunks: entry.Chunks, } - createErr := fs.filer.CreateEntry(ctx, newEntry) + createErr := fs.filer.CreateEntry(ctx, newEntry, false) if createErr != nil { return createErr } diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 2cf26b1bb..72cca1f6f 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -7,18 +7,18 @@ import ( "os" "time" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/filer2" _ "github.com/chrislusf/seaweedfs/weed/filer2/cassandra" _ "github.com/chrislusf/seaweedfs/weed/filer2/etcd" _ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb" _ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb2" - _ "github.com/chrislusf/seaweedfs/weed/filer2/memdb" _ "github.com/chrislusf/seaweedfs/weed/filer2/mysql" _ "github.com/chrislusf/seaweedfs/weed/filer2/postgres" _ "github.com/chrislusf/seaweedfs/weed/filer2/redis" @@ -31,7 +31,6 @@ import ( _ "github.com/chrislusf/seaweedfs/weed/notification/kafka" _ "github.com/chrislusf/seaweedfs/weed/notification/log" "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" ) type FilerOption struct { @@ -46,6 +45,7 @@ type FilerOption struct { DefaultLevelDbDir string DisableHttp bool Port int + recursiveDelete bool } type FilerServer struct { @@ -59,7 +59,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) fs = &FilerServer{ option: option, - grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "filer"), + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"), } if len(option.Masters) == 0 { @@ -70,7 +70,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) go fs.filer.KeepConnectedToMaster() - v := viper.GetViper() + v := util.GetViper() if !util.LoadConfiguration("filer", false) { v.Set("leveldb2.enabled", true) v.Set("leveldb2.dir", option.DefaultLevelDbDir) @@ -81,9 +81,10 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) } util.LoadConfiguration("notification", false) + fs.option.recursiveDelete = v.GetBool("filer.options.recursive_delete") fs.filer.LoadConfiguration(v) - notification.LoadConfiguration(v.Sub("notification")) + notification.LoadConfiguration(v, "notification.") handleStaticResources(defaultMux) if !option.DisableHttp { @@ -121,8 +122,8 @@ func maybeStartMetrics(fs *FilerServer, option *FilerOption) { } func readFilerConfiguration(grpcDialOption grpc.DialOption, masterGrpcAddress string) (metricsAddress string, metricsIntervalSec int, err error) { - err = operation.WithMasterServerClient(masterGrpcAddress, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { - resp, err := masterClient.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{}) + err = operation.WithMasterServerClient(masterGrpcAddress, grpcDialOption, func(ctx context.Context, masterClient master_pb.SeaweedClient) error { + resp, err := masterClient.GetMasterConfiguration(ctx, &master_pb.GetMasterConfigurationRequest{}) if err != nil { return fmt.Errorf("get master %s configuration: %v", masterGrpcAddress, err) } diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index 5d95a5d7e..4707f1011 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -32,7 +32,7 @@ var ( type FilerPostResult struct { Name string `json:"name,omitempty"` - Size uint32 `json:"size,omitempty"` + Size int64 `json:"size,omitempty"` Error string `json:"error,omitempty"` Fid string `json:"fid,omitempty"` Url string `json:"url,omitempty"` @@ -130,7 +130,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { // send back post result reply := FilerPostResult{ Name: ret.Name, - Size: ret.Size, + Size: int64(ret.Size), Error: ret.Error, Fid: fileId, Url: urlLocation, @@ -149,6 +149,16 @@ func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w stats.FilerRequestHistogram.WithLabelValues("postStoreWrite").Observe(time.Since(start).Seconds()) }() + modeStr := r.URL.Query().Get("mode") + if modeStr == "" { + modeStr = "0660" + } + mode, err := strconv.ParseUint(modeStr, 8, 32) + if err != nil { + glog.Errorf("Invalid mode format: %s, use 0660 by default", modeStr) + mode = 0660 + } + path := r.URL.Path if strings.HasSuffix(path, "/") { if ret.Name != "" { @@ -165,7 +175,7 @@ func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w Attr: filer2.Attr{ Mtime: time.Now(), Crtime: crTime, - Mode: 0660, + Mode: os.FileMode(mode), Uid: OS_UID, Gid: OS_GID, Replication: replication, @@ -183,8 +193,8 @@ func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w entry.Attr.Mime = mime.TypeByExtension(ext) } // glog.V(4).Infof("saving %s => %+v", path, entry) - if dbErr := fs.filer.CreateEntry(ctx, entry); dbErr != nil { - fs.filer.DeleteChunks(entry.FullPath, entry.Chunks) + if dbErr := fs.filer.CreateEntry(ctx, entry, false); dbErr != nil { + fs.filer.DeleteChunks(entry.Chunks) glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr) writeJsonError(w, r, http.StatusInternalServerError, dbErr) err = dbErr @@ -270,15 +280,26 @@ func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth se // curl -X DELETE http://localhost:8888/path/to // curl -X DELETE http://localhost:8888/path/to?recursive=true // curl -X DELETE http://localhost:8888/path/to?recursive=true&ignoreRecursiveError=true +// curl -X DELETE http://localhost:8888/path/to?recursive=true&skipChunkDeletion=true func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { isRecursive := r.FormValue("recursive") == "true" + if !isRecursive && fs.option.recursiveDelete { + if r.FormValue("recursive") != "false" { + isRecursive = true + } + } ignoreRecursiveError := r.FormValue("ignoreRecursiveError") == "true" + skipChunkDeletion := r.FormValue("skipChunkDeletion") == "true" - err := fs.filer.DeleteEntryMetaAndData(context.Background(), filer2.FullPath(r.URL.Path), isRecursive, ignoreRecursiveError, true) + err := fs.filer.DeleteEntryMetaAndData(context.Background(), filer2.FullPath(r.URL.Path), isRecursive, ignoreRecursiveError, !skipChunkDeletion) if err != nil { glog.V(1).Infoln("deleting", r.URL.Path, ":", err.Error()) - writeJsonError(w, r, http.StatusInternalServerError, err) + httpStatus := http.StatusInternalServerError + if err == filer2.ErrNotFound { + httpStatus = http.StatusNotFound + } + writeJsonError(w, r, httpStatus, err) return } diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index 492b55943..25c0a4b4d 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -1,10 +1,8 @@ package weed_server import ( - "bytes" "context" "io" - "io/ioutil" "net/http" "path" "strconv" @@ -92,66 +90,47 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r var fileChunks []*filer_pb.FileChunk - totalBytesRead := int64(0) - tmpBufferSize := int32(1024 * 1024) - tmpBuffer := bytes.NewBuffer(make([]byte, 0, tmpBufferSize)) - chunkBuf := make([]byte, chunkSize+tmpBufferSize, chunkSize+tmpBufferSize) // chunk size plus a little overflow - chunkBufOffset := int32(0) chunkOffset := int64(0) - writtenChunks := 0 - filerResult = &FilerPostResult{ - Name: fileName, - } + for chunkOffset < contentLength { + limitedReader := io.LimitReader(part1, int64(chunkSize)) - for totalBytesRead < contentLength { - tmpBuffer.Reset() - bytesRead, readErr := io.CopyN(tmpBuffer, part1, int64(tmpBufferSize)) - readFully := readErr != nil && readErr == io.EOF - tmpBuf := tmpBuffer.Bytes() - bytesToCopy := tmpBuf[0:int(bytesRead)] - - copy(chunkBuf[chunkBufOffset:chunkBufOffset+int32(bytesRead)], bytesToCopy) - chunkBufOffset = chunkBufOffset + int32(bytesRead) - - if chunkBufOffset >= chunkSize || readFully || (chunkBufOffset > 0 && bytesRead == 0) { - writtenChunks = writtenChunks + 1 - fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) - if assignErr != nil { - return nil, assignErr - } - - // upload the chunk to the volume server - chunkName := fileName + "_chunk_" + strconv.FormatInt(int64(len(fileChunks)+1), 10) - uploadErr := fs.doUpload(urlLocation, w, r, chunkBuf[0:chunkBufOffset], chunkName, "application/octet-stream", fileId, auth) - if uploadErr != nil { - return nil, uploadErr - } - - // Save to chunk manifest structure - fileChunks = append(fileChunks, - &filer_pb.FileChunk{ - FileId: fileId, - Offset: chunkOffset, - Size: uint64(chunkBufOffset), - Mtime: time.Now().UnixNano(), - }, - ) - - // reset variables for the next chunk - chunkBufOffset = 0 - chunkOffset = totalBytesRead + int64(bytesRead) + // assign one file id for one chunk + fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) + if assignErr != nil { + return nil, assignErr } - totalBytesRead = totalBytesRead + int64(bytesRead) + // upload the chunk to the volume server + chunkName := fileName + "_chunk_" + strconv.FormatInt(int64(len(fileChunks)+1), 10) + uploadedSize, uploadErr := fs.doUpload(urlLocation, w, r, limitedReader, chunkName, "", fileId, auth) + if uploadErr != nil { + return nil, uploadErr + } - if bytesRead == 0 || readFully { + // if last chunk exhausted the reader exactly at the border + if uploadedSize == 0 { break } - if readErr != nil { - return nil, readErr + // Save to chunk manifest structure + fileChunks = append(fileChunks, + &filer_pb.FileChunk{ + FileId: fileId, + Offset: chunkOffset, + Size: uint64(uploadedSize), + Mtime: time.Now().UnixNano(), + }, + ) + + glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d) of %d", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadedSize), contentLength) + + // if last chunk was not at full chunk size, but already exhausted the reader + if uploadedSize < int64(chunkSize) { + break } + // reset variables for the next chunk + chunkOffset = chunkOffset + int64(uploadedSize) } path := r.URL.Path @@ -176,8 +155,14 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r }, Chunks: fileChunks, } - if dbErr := fs.filer.CreateEntry(ctx, entry); dbErr != nil { - fs.filer.DeleteChunks(entry.FullPath, entry.Chunks) + + filerResult = &FilerPostResult{ + Name: fileName, + Size: chunkOffset, + } + + if dbErr := fs.filer.CreateEntry(ctx, entry, false); dbErr != nil { + fs.filer.DeleteChunks(entry.Chunks) replyerr = dbErr filerResult.Error = dbErr.Error() glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr) @@ -188,7 +173,7 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r } func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, - chunkBuf []byte, fileName string, contentType string, fileId string, auth security.EncodedJwt) (err error) { + limitedReader io.Reader, fileName string, contentType string, fileId string, auth security.EncodedJwt) (size int64, err error) { stats.FilerRequestCounter.WithLabelValues("postAutoChunkUpload").Inc() start := time.Now() @@ -196,13 +181,9 @@ func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *ht stats.FilerRequestHistogram.WithLabelValues("postAutoChunkUpload").Observe(time.Since(start).Seconds()) }() - ioReader := ioutil.NopCloser(bytes.NewBuffer(chunkBuf)) - uploadResult, uploadError := operation.Upload(urlLocation, fileName, ioReader, false, contentType, nil, auth) - if uploadResult != nil { - glog.V(0).Infoln("Chunk upload result. Name:", uploadResult.Name, "Fid:", fileId, "Size:", uploadResult.Size) - } + uploadResult, uploadError := operation.Upload(urlLocation, fileName, limitedReader, false, contentType, nil, auth) if uploadError != nil { - err = uploadError + return 0, uploadError } - return + return int64(uploadResult.Size), nil } diff --git a/weed/server/filer_ui/breadcrumb.go b/weed/server/filer_ui/breadcrumb.go index 55a1909a8..2f0df7f91 100644 --- a/weed/server/filer_ui/breadcrumb.go +++ b/weed/server/filer_ui/breadcrumb.go @@ -14,10 +14,14 @@ func ToBreadcrumb(fullpath string) (crumbs []Breadcrumb) { parts := strings.Split(fullpath, "/") for i := 0; i < len(parts); i++ { - crumbs = append(crumbs, Breadcrumb{ - Name: parts[i] + "/", + crumb := Breadcrumb{ + Name: parts[i] + " /", Link: "/" + filepath.ToSlash(filepath.Join(parts[0:i+1]...)), - }) + } + if !strings.HasSuffix(crumb.Link, "/") { + crumb.Link += "/" + } + crumbs = append(crumbs, crumb) } return diff --git a/weed/server/filer_ui/templates.go b/weed/server/filer_ui/templates.go index 884798936..e532b27e2 100644 --- a/weed/server/filer_ui/templates.go +++ b/weed/server/filer_ui/templates.go @@ -50,7 +50,7 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`
{{ range $entry := .Breadcrumbs }} - + {{ $entry.Name }} {{ end }} @@ -78,20 +78,19 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(` {{end}} - + {{if $entry.IsDirectory}} {{else}} - {{ $entry.Mime }} + {{ $entry.Mime }}  {{end}} - + {{if $entry.IsDirectory}} {{else}} - {{ $entry.Size | humanizeBytes }} -     + {{ $entry.Size | humanizeBytes }}  {{end}} - + {{ $entry.Timestamp.Format "2006-01-02 15:04" }} diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go index 82a190e39..fcfd98f7b 100644 --- a/weed/server/master_grpc_server.go +++ b/weed/server/master_grpc_server.go @@ -9,6 +9,7 @@ import ( "github.com/chrislusf/raft" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/topology" "google.golang.org/grpc/peer" @@ -76,7 +77,10 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ int64(heartbeat.MaxVolumeCount)) glog.V(0).Infof("added volume server %v:%d", heartbeat.GetIp(), heartbeat.GetPort()) if err := stream.Send(&master_pb.HeartbeatResponse{ - VolumeSizeLimit: uint64(ms.option.VolumeSizeLimitMB) * 1024 * 1024, + VolumeSizeLimit: uint64(ms.option.VolumeSizeLimitMB) * 1024 * 1024, + MetricsAddress: ms.option.MetricsAddress, + MetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec), + StorageBackends: backend.ToPbStorageBackends(), }); err != nil { glog.Warningf("SendHeartbeat.Send volume size to %s:%d %v", dn.Ip, dn.Port, err) return err @@ -164,9 +168,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ return err } if err := stream.Send(&master_pb.HeartbeatResponse{ - Leader: newLeader, - MetricsAddress: ms.option.MetricsAddress, - MetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec), + Leader: newLeader, }); err != nil { glog.Warningf("SendHeartbeat.Send response to to %s:%d %v", dn.Ip, dn.Port, err) return err diff --git a/weed/server/master_grpc_server_collection.go b/weed/server/master_grpc_server_collection.go index f8e0785f6..f02b0f242 100644 --- a/weed/server/master_grpc_server_collection.go +++ b/weed/server/master_grpc_server_collection.go @@ -57,8 +57,8 @@ func (ms *MasterServer) doDeleteNormalCollection(collectionName string) error { } for _, server := range collection.ListVolumeServers() { - err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { - _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ + err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { + _, deleteErr := client.DeleteCollection(ctx, &volume_server_pb.DeleteCollectionRequest{ Collection: collectionName, }) return deleteErr @@ -77,8 +77,8 @@ func (ms *MasterServer) doDeleteEcCollection(collectionName string) error { listOfEcServers := ms.Topo.ListEcServersByCollection(collectionName) for _, server := range listOfEcServers { - err := operation.WithVolumeServerClient(server, ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { - _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ + err := operation.WithVolumeServerClient(server, ms.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { + _, deleteErr := client.DeleteCollection(ctx, &volume_server_pb.DeleteCollectionRequest{ Collection: collectionName, }) return deleteErr diff --git a/weed/server/master_grpc_server_volume.go b/weed/server/master_grpc_server_volume.go index 0580acf76..856c07890 100644 --- a/weed/server/master_grpc_server_volume.go +++ b/weed/server/master_grpc_server_volume.go @@ -5,10 +5,11 @@ import ( "fmt" "github.com/chrislusf/raft" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/security" - "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/topology" ) @@ -52,7 +53,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest if req.Replication == "" { req.Replication = ms.option.DefaultReplicaPlacement } - replicaPlacement, err := storage.NewReplicaPlacementFromString(req.Replication) + replicaPlacement, err := super_block.NewReplicaPlacementFromString(req.Replication) if err != nil { return nil, err } @@ -78,7 +79,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest } ms.vgLock.Lock() if !ms.Topo.HasWritableVolume(option) { - if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOption, ms.Topo); err != nil { + if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOption, ms.Topo, int(req.WritableVolumeCount)); err != nil { ms.vgLock.Unlock() return nil, fmt.Errorf("Cannot grow volume group! %v", err) } @@ -108,7 +109,7 @@ func (ms *MasterServer) Statistics(ctx context.Context, req *master_pb.Statistic if req.Replication == "" { req.Replication = ms.option.DefaultReplicaPlacement } - replicaPlacement, err := storage.NewReplicaPlacementFromString(req.Replication) + replicaPlacement, err := super_block.NewReplicaPlacementFromString(req.Replication) if err != nil { return nil, err } diff --git a/weed/server/master_server.go b/weed/server/master_server.go index cde583560..b3cc310e6 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -14,6 +14,9 @@ import ( "time" "github.com/chrislusf/raft" + "github.com/gorilla/mux" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/security" @@ -22,9 +25,11 @@ import ( "github.com/chrislusf/seaweedfs/weed/topology" "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" - "github.com/gorilla/mux" - "github.com/spf13/viper" - "google.golang.org/grpc" +) + +const ( + SequencerType = "master.sequencer.type" + SequencerEtcdUrls = "master.sequencer.sequencer_etcd_urls" ) type MasterOption struct { @@ -64,7 +69,7 @@ type MasterServer struct { func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *MasterServer { - v := viper.GetViper() + v := util.GetViper() signingKey := v.GetString("jwt.signing.key") v.SetDefault("jwt.signing.expires_after_seconds", 10) expiresAfterSec := v.GetInt("jwt.signing.expires_after_seconds") @@ -78,7 +83,7 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste preallocateSize = int64(option.VolumeSizeLimitMB) * (1 << 20) } - grpcDialOption := security.LoadClientTLS(v.Sub("grpc"), "master") + grpcDialOption := security.LoadClientTLS(v, "grpc.master") ms := &MasterServer{ option: option, preallocateSize: preallocateSize, @@ -87,7 +92,11 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "master", peers), } ms.bounedLeaderChan = make(chan int, 16) - seq := sequence.NewMemorySequencer() + + seq := ms.createSequencer(option) + if nil == seq { + glog.Fatalf("create sequencer failed.") + } ms.Topo = topology.NewTopology("topo", seq, uint64(ms.option.VolumeSizeLimitMB)*1024*1024, ms.option.PulseSeconds) ms.vg = topology.NewDefaultVolumeGrowth() glog.V(0).Infoln("Volume Size Limit is", ms.option.VolumeSizeLimitMB, "MB") @@ -165,33 +174,41 @@ func (ms *MasterServer) proxyToLeader(f func(w http.ResponseWriter, r *http.Requ proxy.Transport = util.Transport proxy.ServeHTTP(w, r) } else { - //drop it to the floor - //writeJsonError(w, r, errors.New(ms.Topo.RaftServer.Name()+" does not know Leader yet:"+ms.Topo.RaftServer.Leader())) + // drop it to the floor + // writeJsonError(w, r, errors.New(ms.Topo.RaftServer.Name()+" does not know Leader yet:"+ms.Topo.RaftServer.Leader())) } } } func (ms *MasterServer) startAdminScripts() { - v := viper.GetViper() - adminScripts := v.GetString("master.maintenance.scripts") - v.SetDefault("master.maintenance.sleep_minutes", 17) - sleepMinutes := v.GetInt("master.maintenance.sleep_minutes") + var err error + v := util.GetViper() + adminScripts := v.GetString("master.maintenance.scripts") glog.V(0).Infof("adminScripts:\n%v", adminScripts) if adminScripts == "" { return } + v.SetDefault("master.maintenance.sleep_minutes", 17) + sleepMinutes := v.GetInt("master.maintenance.sleep_minutes") + + v.SetDefault("master.filer.default_filer_url", "http://localhost:8888/") + filerURL := v.GetString("master.filer.default_filer_url") + scriptLines := strings.Split(adminScripts, "\n") masterAddress := "localhost:" + strconv.Itoa(ms.option.Port) var shellOptions shell.ShellOptions - shellOptions.GrpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "master") + shellOptions.GrpcDialOption = security.LoadClientTLS(v, "grpc.master") shellOptions.Masters = &masterAddress - shellOptions.FilerHost = "localhost" - shellOptions.FilerPort = 8888 - shellOptions.Directory = "/" + + shellOptions.FilerHost, shellOptions.FilerPort, shellOptions.Directory, err = util.ParseFilerUrl(filerURL) + if err != nil { + glog.V(0).Infof("failed to parse master.filer.default_filer_urll=%s : %v\n", filerURL, err) + return + } commandEnv := shell.NewCommandEnv(shellOptions) @@ -230,3 +247,24 @@ func (ms *MasterServer) startAdminScripts() { } }() } + +func (ms *MasterServer) createSequencer(option *MasterOption) sequence.Sequencer { + var seq sequence.Sequencer + v := util.GetViper() + seqType := strings.ToLower(v.GetString(SequencerType)) + glog.V(1).Infof("[%s] : [%s]", SequencerType, seqType) + switch strings.ToLower(seqType) { + case "etcd": + var err error + urls := v.GetString(SequencerEtcdUrls) + glog.V(0).Infof("[%s] : [%s]", SequencerEtcdUrls, urls) + seq, err = sequence.NewEtcdSequencer(urls, option.MetaFolder) + if err != nil { + glog.Error(err) + seq = nil + } + default: + seq = sequence.NewMemorySequencer() + } + return seq +} diff --git a/weed/server/master_server_handlers.go b/weed/server/master_server_handlers.go index c10f9a5b7..514d86800 100644 --- a/weed/server/master_server_handlers.go +++ b/weed/server/master_server_handlers.go @@ -65,11 +65,17 @@ func (ms *MasterServer) findVolumeLocation(collection, vid string) operation.Loo var err error if ms.Topo.IsLeader() { volumeId, newVolumeIdErr := needle.NewVolumeId(vid) - machines := ms.Topo.Lookup(collection, volumeId) - for _, loc := range machines { - locations = append(locations, operation.Location{Url: loc.Url(), PublicUrl: loc.PublicUrl}) + if newVolumeIdErr != nil { + err = fmt.Errorf("Unknown volume id %s", vid) + } else { + machines := ms.Topo.Lookup(collection, volumeId) + for _, loc := range machines { + locations = append(locations, operation.Location{Url: loc.Url(), PublicUrl: loc.PublicUrl}) + } + if locations == nil { + err = fmt.Errorf("volume id %s not found", vid) + } } - err = newVolumeIdErr } else { machines, getVidLocationsErr := ms.MasterClient.GetVidLocations(vid) for _, loc := range machines { @@ -94,6 +100,11 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request) requestedCount = 1 } + writableVolumeCount, e := strconv.Atoi(r.FormValue("writableVolumeCount")) + if e != nil { + writableVolumeCount = 0 + } + option, err := ms.getVolumeGrowOption(r) if err != nil { writeJsonQuiet(w, r, http.StatusNotAcceptable, operation.AssignResult{Error: err.Error()}) @@ -108,7 +119,7 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request) ms.vgLock.Lock() defer ms.vgLock.Unlock() if !ms.Topo.HasWritableVolume(option) { - if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOption, ms.Topo); err != nil { + if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOption, ms.Topo, writableVolumeCount); err != nil { writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Cannot grow volume group! %v", err)) return diff --git a/weed/server/master_server_handlers_admin.go b/weed/server/master_server_handlers_admin.go index 486bf31f4..44a04cb86 100644 --- a/weed/server/master_server_handlers_admin.go +++ b/weed/server/master_server_handlers_admin.go @@ -10,22 +10,23 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/topology" "github.com/chrislusf/seaweedfs/weed/util" ) func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.Request) { - collection, ok := ms.Topo.FindCollection(r.FormValue("collection")) + collectionName := r.FormValue("collection") + collection, ok := ms.Topo.FindCollection(collectionName) if !ok { - writeJsonError(w, r, http.StatusBadRequest, fmt.Errorf("collection %s does not exist", r.FormValue("collection"))) + writeJsonError(w, r, http.StatusBadRequest, fmt.Errorf("collection %s does not exist", collectionName)) return } for _, server := range collection.ListVolumeServers() { - err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { - _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ + err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { + _, deleteErr := client.DeleteCollection(ctx, &volume_server_pb.DeleteCollectionRequest{ Collection: collection.Name, }) return deleteErr @@ -35,7 +36,10 @@ func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.R return } } - ms.Topo.DeleteCollection(r.FormValue("collection")) + ms.Topo.DeleteCollection(collectionName) + + w.WriteHeader(http.StatusNoContent) + return } func (ms *MasterServer) dirStatusHandler(w http.ResponseWriter, r *http.Request) { @@ -53,6 +57,7 @@ func (ms *MasterServer) volumeVacuumHandler(w http.ResponseWriter, r *http.Reque gcThreshold, err = strconv.ParseFloat(gcString, 32) if err != nil { glog.V(0).Infof("garbageThreshold %s is not a valid float number: %v", gcString, err) + writeJsonError(w, r, http.StatusNotAcceptable, fmt.Errorf("garbageThreshold %s is not a valid float number", gcString)) return } } @@ -140,7 +145,7 @@ func (ms *MasterServer) getVolumeGrowOption(r *http.Request) (*topology.VolumeGr if replicationString == "" { replicationString = ms.option.DefaultReplicaPlacement } - replicaPlacement, err := storage.NewReplicaPlacementFromString(replicationString) + replicaPlacement, err := super_block.NewReplicaPlacementFromString(replicationString) if err != nil { return nil, err } diff --git a/weed/server/volume_grpc_admin.go b/weed/server/volume_grpc_admin.go index c631d2535..43987b748 100644 --- a/weed/server/volume_grpc_admin.go +++ b/weed/server/volume_grpc_admin.go @@ -2,10 +2,12 @@ package weed_server import ( "context" + "fmt" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) func (vs *VolumeServer) DeleteCollection(ctx context.Context, req *volume_server_pb.DeleteCollectionRequest) (*volume_server_pb.DeleteCollectionResponse, error) { @@ -96,6 +98,41 @@ func (vs *VolumeServer) VolumeDelete(ctx context.Context, req *volume_server_pb. } +func (vs *VolumeServer) VolumeConfigure(ctx context.Context, req *volume_server_pb.VolumeConfigureRequest) (*volume_server_pb.VolumeConfigureResponse, error) { + + resp := &volume_server_pb.VolumeConfigureResponse{} + + // check replication format + if _, err := super_block.NewReplicaPlacementFromString(req.Replication); err != nil { + resp.Error = fmt.Sprintf("volume configure replication %v: %v", req, err) + return resp, nil + } + + // unmount + if err := vs.store.UnmountVolume(needle.VolumeId(req.VolumeId)); err != nil { + glog.Errorf("volume configure unmount %v: %v", req, err) + resp.Error = fmt.Sprintf("volume configure unmount %v: %v", req, err) + return resp, nil + } + + // modify the volume info file + if err := vs.store.ConfigureVolume(needle.VolumeId(req.VolumeId), req.Replication); err != nil { + glog.Errorf("volume configure %v: %v", req, err) + resp.Error = fmt.Sprintf("volume configure %v: %v", req, err) + return resp, nil + } + + // mount + if err := vs.store.MountVolume(needle.VolumeId(req.VolumeId)); err != nil { + glog.Errorf("volume configure mount %v: %v", req, err) + resp.Error = fmt.Sprintf("volume configure mount %v: %v", req, err) + return resp, nil + } + + return resp, nil + +} + func (vs *VolumeServer) VolumeMarkReadonly(ctx context.Context, req *volume_server_pb.VolumeMarkReadonlyRequest) (*volume_server_pb.VolumeMarkReadonlyResponse, error) { resp := &volume_server_pb.VolumeMarkReadonlyResponse{} diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go index 731675b48..dc47c2884 100644 --- a/weed/server/volume_grpc_client_to_master.go +++ b/weed/server/volume_grpc_client_to_master.go @@ -5,15 +5,17 @@ import ( "net" "time" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" - "github.com/spf13/viper" "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + + "golang.org/x/net/context" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/util" - "golang.org/x/net/context" ) func (vs *VolumeServer) GetMaster() string { @@ -25,7 +27,7 @@ func (vs *VolumeServer) heartbeat() { vs.store.SetDataCenter(vs.dataCenter) vs.store.SetRack(vs.rack) - grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "volume") + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.volume") var err error var newLeader string @@ -90,6 +92,9 @@ func (vs *VolumeServer) doHeartbeat(ctx context.Context, masterNode, masterGrpcA vs.MetricsAddress = in.GetMetricsAddress() vs.MetricsIntervalSec = int(in.GetMetricsIntervalSeconds()) } + if len(in.StorageBackends) > 0 { + backend.LoadFromPbStorageBackends(in.StorageBackends) + } } }() diff --git a/weed/server/volume_grpc_copy.go b/weed/server/volume_grpc_copy.go index 711a3ebad..6d74f8171 100644 --- a/weed/server/volume_grpc_copy.go +++ b/weed/server/volume_grpc_copy.go @@ -20,7 +20,7 @@ import ( const BufferSizeLimit = 1024 * 1024 * 2 -// VolumeCopy copy the .idx .dat files, and mount the volume +// VolumeCopy copy the .idx .dat .vif files, and mount the volume func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.VolumeCopyRequest) (*volume_server_pb.VolumeCopyResponse, error) { v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) @@ -41,7 +41,7 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo // confirm size and timestamp var volFileInfoResp *volume_server_pb.ReadVolumeFileStatusResponse var volumeFileName, idxFileName, datFileName string - err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { var err error volFileInfoResp, err = client.ReadVolumeFileStatus(ctx, &volume_server_pb.ReadVolumeFileStatusRequest{ @@ -55,11 +55,15 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo // println("source:", volFileInfoResp.String()) // copy ecx file - if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, volumeFileName, ".idx", false); err != nil { + if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, volumeFileName, ".idx", false, false); err != nil { return err } - if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".dat", false); err != nil { + if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".dat", false, true); err != nil { + return err + } + + if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".vif", false, true); err != nil { return err } @@ -70,12 +74,9 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo datFileName = volumeFileName + ".dat" if err != nil && volumeFileName != "" { - if idxFileName != "" { - os.Remove(idxFileName) - } - if datFileName != "" { - os.Remove(datFileName) - } + os.Remove(idxFileName) + os.Remove(datFileName) + os.Remove(volumeFileName + ".vif") return nil, err } @@ -95,15 +96,16 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo } func (vs *VolumeServer) doCopyFile(ctx context.Context, client volume_server_pb.VolumeServerClient, isEcVolume bool, collection string, vid uint32, - compactRevision uint32, stopOffset uint64, baseFileName, ext string, isAppend bool) error { + compactRevision uint32, stopOffset uint64, baseFileName, ext string, isAppend bool, ignoreSourceFileNotFound bool) error { copyFileClient, err := client.CopyFile(ctx, &volume_server_pb.CopyFileRequest{ - VolumeId: vid, - Ext: ext, - CompactionRevision: compactRevision, - StopOffset: stopOffset, - Collection: collection, - IsEcVolume: isEcVolume, + VolumeId: vid, + Ext: ext, + CompactionRevision: compactRevision, + StopOffset: stopOffset, + Collection: collection, + IsEcVolume: isEcVolume, + IgnoreSourceFileNotFound: ignoreSourceFileNotFound, }) if err != nil { return fmt.Errorf("failed to start copying volume %d %s file: %v", vid, ext, err) @@ -213,6 +215,9 @@ func (vs *VolumeServer) CopyFile(req *volume_server_pb.CopyFileRequest, stream v } } if fileName == "" { + if req.IgnoreSourceFileNotFound { + return nil + } return fmt.Errorf("CopyFile not found ec volume id %d", req.VolumeId) } } @@ -221,6 +226,9 @@ func (vs *VolumeServer) CopyFile(req *volume_server_pb.CopyFileRequest, stream v file, err := os.Open(fileName) if err != nil { + if req.IgnoreSourceFileNotFound && err == os.ErrNotExist { + return nil + } return err } defer file.Close() diff --git a/weed/server/volume_grpc_copy_incremental.go b/weed/server/volume_grpc_copy_incremental.go index f56fbeef4..6d6c3daa3 100644 --- a/weed/server/volume_grpc_copy_incremental.go +++ b/weed/server/volume_grpc_copy_incremental.go @@ -4,9 +4,9 @@ import ( "context" "fmt" "io" - "os" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/needle" ) @@ -30,7 +30,7 @@ func (vs *VolumeServer) VolumeIncrementalCopy(req *volume_server_pb.VolumeIncrem startOffset := foundOffset.ToAcutalOffset() buf := make([]byte, 1024*1024*2) - return sendFileContent(v.DataFile(), buf, startOffset, int64(stopOffset), stream) + return sendFileContent(v.DataBackend, buf, startOffset, int64(stopOffset), stream) } @@ -47,10 +47,10 @@ func (vs *VolumeServer) VolumeSyncStatus(ctx context.Context, req *volume_server } -func sendFileContent(datFile *os.File, buf []byte, startOffset, stopOffset int64, stream volume_server_pb.VolumeServer_VolumeIncrementalCopyServer) error { +func sendFileContent(datBackend backend.BackendStorageFile, buf []byte, startOffset, stopOffset int64, stream volume_server_pb.VolumeServer_VolumeIncrementalCopyServer) error { var blockSizeLimit = int64(len(buf)) for i := int64(0); i < stopOffset-startOffset; i += blockSizeLimit { - n, readErr := datFile.ReadAt(buf, startOffset+i) + n, readErr := datBackend.ReadAt(buf, startOffset+i) if readErr == nil || readErr == io.EOF { resp := &volume_server_pb.VolumeIncrementalCopyResponse{} resp.FileContent = buf[:int64(n)] diff --git a/weed/server/volume_grpc_erasure_coding.go b/weed/server/volume_grpc_erasure_coding.go index 8140a06f6..256e7c447 100644 --- a/weed/server/volume_grpc_erasure_coding.go +++ b/weed/server/volume_grpc_erasure_coding.go @@ -8,10 +8,12 @@ import ( "math" "os" "path" + "path/filepath" "strings" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" @@ -24,7 +26,7 @@ import ( Steps to apply erasure coding to .dat .idx files 0. ensure the volume is readonly -1. client call VolumeEcShardsGenerate to generate the .ecx and .ec01~.ec14 files +1. client call VolumeEcShardsGenerate to generate the .ecx and .ec00 ~ .ec13 files 2. client ask master for possible servers to hold the ec files, at least 4 servers 3. client call VolumeEcShardsCopy on above target servers to copy ec files from the source server 4. target servers report the new ec files to the master @@ -33,7 +35,7 @@ Steps to apply erasure coding to .dat .idx files */ -// VolumeEcShardsGenerate generates the .ecx and .ec01 ~ .ec14 files +// VolumeEcShardsGenerate generates the .ecx and .ec00 ~ .ec13 files func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_server_pb.VolumeEcShardsGenerateRequest) (*volume_server_pb.VolumeEcShardsGenerateResponse, error) { v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) @@ -47,19 +49,24 @@ func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_ } // write .ecx file - if err := erasure_coding.WriteSortedEcxFile(baseFileName); err != nil { - return nil, fmt.Errorf("WriteSortedEcxFile %s: %v", baseFileName, err) + if err := erasure_coding.WriteSortedFileFromIdx(baseFileName, ".ecx"); err != nil { + return nil, fmt.Errorf("WriteSortedFileFromIdx %s: %v", baseFileName, err) } - // write .ec01 ~ .ec14 files + // write .ec00 ~ .ec13 files if err := erasure_coding.WriteEcFiles(baseFileName); err != nil { return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err) } + // write .vif files + if err := pb.SaveVolumeInfo(baseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(v.Version())}); err != nil { + return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err) + } + return &volume_server_pb.VolumeEcShardsGenerateResponse{}, nil } -// VolumeEcShardsRebuild generates the any of the missing .ec01 ~ .ec14 files +// VolumeEcShardsRebuild generates the any of the missing .ec00 ~ .ec13 files func (vs *VolumeServer) VolumeEcShardsRebuild(ctx context.Context, req *volume_server_pb.VolumeEcShardsRebuildRequest) (*volume_server_pb.VolumeEcShardsRebuildResponse, error) { baseFileName := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId)) @@ -68,7 +75,7 @@ func (vs *VolumeServer) VolumeEcShardsRebuild(ctx context.Context, req *volume_s for _, location := range vs.store.Locations { if util.FileExists(path.Join(location.Directory, baseFileName+".ecx")) { - // write .ec01 ~ .ec14 files + // write .ec00 ~ .ec13 files baseFileName = path.Join(location.Directory, baseFileName) if generatedShardIds, err := erasure_coding.RebuildEcFiles(baseFileName); err != nil { return nil, fmt.Errorf("RebuildEcFiles %s: %v", baseFileName, err) @@ -99,27 +106,36 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv baseFileName := storage.VolumeFileName(location.Directory, req.Collection, int(req.VolumeId)) - err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { // copy ec data slices for _, shardId := range req.ShardIds { - if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, erasure_coding.ToExt(int(shardId)), false); err != nil { + if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, erasure_coding.ToExt(int(shardId)), false, false); err != nil { return err } } - if !req.CopyEcxFile { + if req.CopyEcxFile { + + // copy ecx file + if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecx", false, false); err != nil { + return err + } return nil } - // copy ecx file - if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecx", false); err != nil { - return err + if req.CopyEcjFile { + // copy ecj file + if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecj", true, true); err != nil { + return err + } } - // copy ecj file - if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecj", true); err != nil { - return err + if req.CopyVifFile { + // copy vif file + if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".vif", false, true); err != nil { + return err + } } return nil @@ -137,6 +153,8 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se baseFilename := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId)) + glog.V(0).Infof("ec volume %d shard delete %v", req.VolumeId, req.ShardIds) + found := false for _, location := range vs.store.Locations { if util.FileExists(path.Join(location.Directory, baseFilename+".ecx")) { @@ -153,21 +171,27 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se return nil, nil } - // check whether to delete the ecx file also + // check whether to delete the .ecx and .ecj file also hasEcxFile := false + hasIdxFile := false existingShardCount := 0 + bName := filepath.Base(baseFilename) for _, location := range vs.store.Locations { fileInfos, err := ioutil.ReadDir(location.Directory) if err != nil { continue } for _, fileInfo := range fileInfos { - if fileInfo.Name() == baseFilename+".ecx" { + if fileInfo.Name() == bName+".ecx" || fileInfo.Name() == bName+".ecj" { hasEcxFile = true continue } - if strings.HasPrefix(fileInfo.Name(), baseFilename+".ec") { + if fileInfo.Name() == bName+".idx" { + hasIdxFile = true + continue + } + if strings.HasPrefix(fileInfo.Name(), bName+".ec") { existingShardCount++ } } @@ -181,6 +205,10 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se return nil, err } } + if !hasIdxFile { + // .vif is used for ec volumes and normal volumes + os.Remove(baseFilename + ".vif") + } return &volume_server_pb.VolumeEcShardsDeleteResponse{}, nil } @@ -252,9 +280,14 @@ func (vs *VolumeServer) VolumeEcShardRead(req *volume_server_pb.VolumeEcShardRea startOffset, bytesToRead := req.Offset, req.Size for bytesToRead > 0 { - bytesread, err := ecShard.ReadAt(buffer, startOffset) + // min of bytesToRead and bufSize + bufferSize := bufSize + if bufferSize > bytesToRead { + bufferSize = bytesToRead + } + bytesread, err := ecShard.ReadAt(buffer[0:bufferSize], startOffset) - // println(fileName, "read", bytesread, "bytes, with target", bytesToRead) + // println("read", ecShard.FileName(), "startOffset", startOffset, bytesread, "bytes, with target", bufferSize) if bytesread > 0 { if int64(bytesread) > bytesToRead { @@ -268,6 +301,7 @@ func (vs *VolumeServer) VolumeEcShardRead(req *volume_server_pb.VolumeEcShardRea return err } + startOffset += int64(bytesread) bytesToRead -= int64(bytesread) } @@ -311,3 +345,35 @@ func (vs *VolumeServer) VolumeEcBlobDelete(ctx context.Context, req *volume_serv return resp, nil } + +// VolumeEcShardsToVolume generates the .idx, .dat files from .ecx, .ecj and .ec01 ~ .ec14 files +func (vs *VolumeServer) VolumeEcShardsToVolume(ctx context.Context, req *volume_server_pb.VolumeEcShardsToVolumeRequest) (*volume_server_pb.VolumeEcShardsToVolumeResponse, error) { + + v, found := vs.store.FindEcVolume(needle.VolumeId(req.VolumeId)) + if !found { + return nil, fmt.Errorf("ec volume %d not found", req.VolumeId) + } + baseFileName := v.FileName() + + if v.Collection != req.Collection { + return nil, fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection) + } + + // calculate .dat file size + datFileSize, err := erasure_coding.FindDatFileSize(baseFileName) + if err != nil { + return nil, fmt.Errorf("FindDatFileSize %s: %v", baseFileName, err) + } + + // write .dat file from .ec00 ~ .ec09 files + if err := erasure_coding.WriteDatFile(baseFileName, datFileSize); err != nil { + return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err) + } + + // write .idx file from .ecx and .ecj files + if err := erasure_coding.WriteIdxFileFromEcIndex(baseFileName); err != nil { + return nil, fmt.Errorf("WriteIdxFileFromEcIndex %s: %v", baseFileName, err) + } + + return &volume_server_pb.VolumeEcShardsToVolumeResponse{}, nil +} diff --git a/weed/server/volume_grpc_tail.go b/weed/server/volume_grpc_tail.go index cb0d320ad..c26d6ed8f 100644 --- a/weed/server/volume_grpc_tail.go +++ b/weed/server/volume_grpc_tail.go @@ -10,6 +10,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) func (vs *VolumeServer) VolumeTailSender(req *volume_server_pb.VolumeTailSenderRequest, stream volume_server_pb.VolumeServer_VolumeTailSenderServer) error { @@ -71,7 +72,7 @@ func sendNeedlesSince(stream volume_server_pb.VolumeServer_VolumeTailSenderServe stream: stream, } - err = storage.ScanVolumeFileFrom(v.Version(), v.DataFile(), foundOffset.ToAcutalOffset(), scanner) + err = storage.ScanVolumeFileFrom(v.Version(), v.DataBackend, foundOffset.ToAcutalOffset(), scanner) return scanner.lastProcessedTimestampNs, err @@ -101,7 +102,7 @@ type VolumeFileScanner4Tailing struct { lastProcessedTimestampNs uint64 } -func (scanner *VolumeFileScanner4Tailing) VisitSuperBlock(superBlock storage.SuperBlock) error { +func (scanner *VolumeFileScanner4Tailing) VisitSuperBlock(superBlock super_block.SuperBlock) error { return nil } diff --git a/weed/server/volume_grpc_tier_download.go b/weed/server/volume_grpc_tier_download.go new file mode 100644 index 000000000..7b3982e40 --- /dev/null +++ b/weed/server/volume_grpc_tier_download.go @@ -0,0 +1,85 @@ +package weed_server + +import ( + "fmt" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +// VolumeTierMoveDatFromRemote copy dat file from a remote tier to local volume server +func (vs *VolumeServer) VolumeTierMoveDatFromRemote(req *volume_server_pb.VolumeTierMoveDatFromRemoteRequest, stream volume_server_pb.VolumeServer_VolumeTierMoveDatFromRemoteServer) error { + + // find existing volume + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return fmt.Errorf("volume %d not found", req.VolumeId) + } + + // verify the collection + if v.Collection != req.Collection { + return fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection) + } + + // locate the disk file + storageName, storageKey := v.RemoteStorageNameKey() + if storageName == "" || storageKey == "" { + return fmt.Errorf("volume %d is already on local disk", req.VolumeId) + } + + // check whether the local .dat already exists + _, ok := v.DataBackend.(*backend.DiskFile) + if ok { + return fmt.Errorf("volume %d is already on local disk", req.VolumeId) + } + + // check valid storage backend type + backendStorage, found := backend.BackendStorages[storageName] + if !found { + var keys []string + for key := range backend.BackendStorages { + keys = append(keys, key) + } + return fmt.Errorf("remote storage %s not found from suppported: %v", storageName, keys) + } + + startTime := time.Now() + fn := func(progressed int64, percentage float32) error { + now := time.Now() + if now.Sub(startTime) < time.Second { + return nil + } + startTime = now + return stream.Send(&volume_server_pb.VolumeTierMoveDatFromRemoteResponse{ + Processed: progressed, + ProcessedPercentage: percentage, + }) + } + // copy the data file + _, err := backendStorage.DownloadFile(v.FileName()+".dat", storageKey, fn) + if err != nil { + return fmt.Errorf("backend %s copy file %s: %v", storageName, v.FileName()+".dat", err) + } + + if req.KeepRemoteDatFile { + return nil + } + + // remove remote file + if err := backendStorage.DeleteFile(storageKey); err != nil { + return fmt.Errorf("volume %d fail to delete remote file %s: %v", v.Id, storageKey, err) + } + + // forget remote file + v.GetVolumeInfo().Files = v.GetVolumeInfo().Files[1:] + if err := v.SaveVolumeInfo(); err != nil { + return fmt.Errorf("volume %d fail to save remote file info: %v", v.Id, err) + } + + v.DataBackend.Close() + v.DataBackend = nil + + return nil +} diff --git a/weed/server/volume_grpc_tier_upload.go b/weed/server/volume_grpc_tier_upload.go new file mode 100644 index 000000000..c9694df59 --- /dev/null +++ b/weed/server/volume_grpc_tier_upload.go @@ -0,0 +1,100 @@ +package weed_server + +import ( + "fmt" + "os" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +// VolumeTierMoveDatToRemote copy dat file to a remote tier +func (vs *VolumeServer) VolumeTierMoveDatToRemote(req *volume_server_pb.VolumeTierMoveDatToRemoteRequest, stream volume_server_pb.VolumeServer_VolumeTierMoveDatToRemoteServer) error { + + // find existing volume + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return fmt.Errorf("volume %d not found", req.VolumeId) + } + + // verify the collection + if v.Collection != req.Collection { + return fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection) + } + + // locate the disk file + diskFile, ok := v.DataBackend.(*backend.DiskFile) + if !ok { + return fmt.Errorf("volume %d is not on local disk", req.VolumeId) + } + + // check valid storage backend type + backendStorage, found := backend.BackendStorages[req.DestinationBackendName] + if !found { + var keys []string + for key := range backend.BackendStorages { + keys = append(keys, key) + } + return fmt.Errorf("destination %s not found, suppported: %v", req.DestinationBackendName, keys) + } + + // check whether the existing backend storage is the same as requested + // if same, skip + backendType, backendId := backend.BackendNameToTypeId(req.DestinationBackendName) + for _, remoteFile := range v.GetVolumeInfo().GetFiles() { + if remoteFile.BackendType == backendType && remoteFile.BackendId == backendId { + return fmt.Errorf("destination %s already exists", req.DestinationBackendName) + } + } + + startTime := time.Now() + fn := func(progressed int64, percentage float32) error { + now := time.Now() + if now.Sub(startTime) < time.Second { + return nil + } + startTime = now + return stream.Send(&volume_server_pb.VolumeTierMoveDatToRemoteResponse{ + Processed: progressed, + ProcessedPercentage: percentage, + }) + } + + // remember the file original source + attributes := make(map[string]string) + attributes["volumeId"] = v.Id.String() + attributes["collection"] = v.Collection + attributes["ext"] = ".dat" + // copy the data file + key, size, err := backendStorage.CopyFile(diskFile.File, attributes, fn) + if err != nil { + return fmt.Errorf("backend %s copy file %s: %v", req.DestinationBackendName, diskFile.Name(), err) + } + + // save the remote file to volume tier info + v.GetVolumeInfo().Files = append(v.GetVolumeInfo().GetFiles(), &volume_server_pb.RemoteFile{ + BackendType: backendType, + BackendId: backendId, + Key: key, + Offset: 0, + FileSize: uint64(size), + ModifiedTime: uint64(time.Now().Unix()), + Extension: ".dat", + }) + + if err := v.SaveVolumeInfo(); err != nil { + return fmt.Errorf("volume %d fail to save remote file info: %v", v.Id, err) + } + + if err := v.LoadRemoteFile(); err != nil { + return fmt.Errorf("volume %d fail to load remote file: %v", v.Id, err) + } + + if !req.KeepLocalDatFile { + os.Remove(v.FileName() + ".dat") + } + + return nil +} diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go index 6cf654738..0fdcf662a 100644 --- a/weed/server/volume_server.go +++ b/weed/server/volume_server.go @@ -4,13 +4,14 @@ import ( "fmt" "net/http" - "github.com/chrislusf/seaweedfs/weed/stats" "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/storage" - "github.com/spf13/viper" ) type VolumeServer struct { @@ -29,6 +30,7 @@ type VolumeServer struct { compactionBytePerSecond int64 MetricsAddress string MetricsIntervalSec int + fileSizeLimitBytes int64 } func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, @@ -41,9 +43,10 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, fixJpgOrientation bool, readRedirect bool, compactionMBPerSecond int, + fileSizeLimitMB int, ) *VolumeServer { - v := viper.GetViper() + v := util.GetViper() signingKey := v.GetString("jwt.signing.key") v.SetDefault("jwt.signing.expires_after_seconds", 10) expiresAfterSec := v.GetInt("jwt.signing.expires_after_seconds") @@ -60,8 +63,9 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, needleMapKind: needleMapKind, FixJpgOrientation: fixJpgOrientation, ReadRedirect: readRedirect, - grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "volume"), + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.volume"), compactionBytePerSecond: int64(compactionMBPerSecond) * 1024 * 1024, + fileSizeLimitBytes: int64(fileSizeLimitMB) * 1024 * 1024, } vs.SeedMasterNodes = masterNodes vs.store = storage.NewStore(vs.grpcDialOption, port, ip, publicUrl, folders, maxCounts, vs.needleMapKind) diff --git a/weed/server/volume_server_handlers_admin.go b/weed/server/volume_server_handlers_admin.go index 25b6582f7..1938a34c4 100644 --- a/weed/server/volume_server_handlers_admin.go +++ b/weed/server/volume_server_handlers_admin.go @@ -12,7 +12,7 @@ import ( func (vs *VolumeServer) statusHandler(w http.ResponseWriter, r *http.Request) { m := make(map[string]interface{}) m["Version"] = util.VERSION - m["Volumes"] = vs.store.Status() + m["Volumes"] = vs.store.VolumeInfos() writeJsonQuiet(w, r, http.StatusOK, m) } diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go index cd11356b9..d89d13a0d 100644 --- a/weed/server/volume_server_handlers_read.go +++ b/weed/server/volume_server_handlers_read.go @@ -54,7 +54,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) return } - glog.V(4).Infoln("volume", volumeId, "reading", n) + // glog.V(4).Infoln("volume", volumeId, "reading", n) hasVolume := vs.store.HasVolume(volumeId) _, hasEcVolume := vs.store.FindEcVolume(volumeId) if !hasVolume && !hasEcVolume { @@ -88,7 +88,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) } else if hasEcVolume { count, err = vs.store.ReadEcShardNeedle(context.Background(), volumeId, n) } - glog.V(4).Infoln("read bytes", count, "error", err) + // glog.V(4).Infoln("read bytes", count, "error", err) if err != nil || count < 0 { glog.V(0).Infof("read %s isNormalVolume %v error: %v", r.URL.Path, hasVolume, err) w.WriteHeader(http.StatusNotFound) diff --git a/weed/server/volume_server_handlers_ui.go b/weed/server/volume_server_handlers_ui.go index 852f0b751..8d35c9c8b 100644 --- a/weed/server/volume_server_handlers_ui.go +++ b/weed/server/volume_server_handlers_ui.go @@ -8,6 +8,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" ui "github.com/chrislusf/seaweedfs/weed/server/volume_server_ui" "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -20,19 +21,30 @@ func (vs *VolumeServer) uiStatusHandler(w http.ResponseWriter, r *http.Request) ds = append(ds, stats.NewDiskStatus(dir)) } } + volumeInfos := vs.store.VolumeInfos() + var normalVolumeInfos, remoteVolumeInfos []*storage.VolumeInfo + for _, vinfo := range volumeInfos { + if vinfo.IsRemote() { + remoteVolumeInfos = append(remoteVolumeInfos, vinfo) + } else { + normalVolumeInfos = append(normalVolumeInfos, vinfo) + } + } args := struct { - Version string - Masters []string - Volumes interface{} - EcVolumes interface{} - DiskStatuses interface{} - Stats interface{} - Counters *stats.ServerStats + Version string + Masters []string + Volumes interface{} + EcVolumes interface{} + RemoteVolumes interface{} + DiskStatuses interface{} + Stats interface{} + Counters *stats.ServerStats }{ util.VERSION, vs.SeedMasterNodes, - vs.store.Status(), + normalVolumeInfos, vs.store.EcVolumes(), + remoteVolumeInfos, ds, infos, serverStats, diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go index db8fcb555..cd35255e5 100644 --- a/weed/server/volume_server_handlers_write.go +++ b/weed/server/volume_server_handlers_write.go @@ -43,7 +43,7 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) { return } - needle, originalSize, ne := needle.CreateNeedleFromRequest(r, vs.FixJpgOrientation) + needle, originalSize, ne := needle.CreateNeedleFromRequest(r, vs.FixJpgOrientation, vs.fileSizeLimitBytes) if ne != nil { writeJsonError(w, r, http.StatusBadRequest, ne) return @@ -51,10 +51,14 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) { ret := operation.UploadResult{} _, isUnchanged, writeError := topology.ReplicatedWrite(vs.GetMaster(), vs.store, volumeId, needle, r) - httpStatus := http.StatusCreated - if isUnchanged { - httpStatus = http.StatusNotModified + + // http 304 status code does not allow body + if writeError == nil && isUnchanged { + w.WriteHeader(http.StatusNotModified) + return } + + httpStatus := http.StatusCreated if writeError != nil { httpStatus = http.StatusInternalServerError ret.Error = writeError.Error() diff --git a/weed/server/volume_server_ui/templates.go b/weed/server/volume_server_ui/templates.go index eafc0aaeb..81496b1de 100644 --- a/weed/server/volume_server_ui/templates.go +++ b/weed/server/volume_server_ui/templates.go @@ -107,10 +107,11 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(` Id Collection - Size + Data Size Files Trash TTL + ReadOnly @@ -122,6 +123,37 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`{{ .FileCount }} {{ .DeleteCount }} / {{.DeletedByteCount}} Bytes {{ .Ttl }} + {{ .ReadOnly }} + + {{ end }} + + +
+ +
+

Remote Volumes

+ + + + + + + + + + + + + + {{ range .RemoteVolumes }} + + + + + + + + {{ end }} diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index 151b48a78..d75869f30 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -10,16 +10,16 @@ import ( "strings" "time" + "golang.org/x/net/webdav" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "golang.org/x/net/webdav" - "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" ) type WebDavOption struct { @@ -47,7 +47,7 @@ func NewWebDavServer(option *WebDavOption) (ws *WebDavServer, err error) { ws = &WebDavServer{ option: option, - grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "filer"), + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"), Handler: &webdav.Handler{ FileSystem: fs, LockSystem: webdav.NewMemLS(), @@ -96,11 +96,11 @@ func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) { }, nil } -func (fs *WebDavFileSystem) WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { +func (fs *WebDavFileSystem) WithFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(client) + return fn(ctx2, client) }, fs.option.FilerGrpcAddress, fs.option.GrpcDialOption) } @@ -135,7 +135,7 @@ func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm return os.ErrExist } - return fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { dir, name := filer2.FullPath(fullDirPath).DirAndName() request := &filer_pb.CreateEntryRequest{ Directory: dir, @@ -153,7 +153,7 @@ func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm } glog.V(1).Infof("mkdir: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(ctx, client, request); err != nil { return fmt.Errorf("mkdir %s/%s: %v", dir, name, err) } @@ -163,7 +163,7 @@ func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, flag int, perm os.FileMode) (webdav.File, error) { - glog.V(2).Infof("WebDavFileSystem.OpenFile %v", fullFilePath) + glog.V(2).Infof("WebDavFileSystem.OpenFile %v %x", fullFilePath, flag) var err error if fullFilePath, err = clearName(fullFilePath); err != nil { @@ -175,12 +175,6 @@ func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, f if strings.HasSuffix(fullFilePath, "/") { return nil, os.ErrInvalid } - // based directory should be exists. - dir, _ := path.Split(fullFilePath) - _, err := fs.stat(ctx, dir) - if err != nil { - return nil, os.ErrInvalid - } _, err = fs.stat(ctx, fullFilePath) if err == nil { if flag&os.O_EXCL != 0 { @@ -190,8 +184,8 @@ func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, f } dir, name := filer2.FullPath(fullFilePath).DirAndName() - err = fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - if _, err := client.CreateEntry(ctx, &filer_pb.CreateEntryRequest{ + err = fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + if err := filer_pb.CreateEntry(ctx, client, &filer_pb.CreateEntryRequest{ Directory: dir, Entry: &filer_pb.Entry{ Name: name, @@ -255,7 +249,7 @@ func (fs *WebDavFileSystem) removeAll(ctx context.Context, fullFilePath string) //_, err = fs.db.Exec(`delete from filesystem where fullFilePath = ?`, fullFilePath) } dir, name := filer2.FullPath(fullFilePath).DirAndName() - err = fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.DeleteEntryRequest{ Directory: dir, @@ -314,7 +308,7 @@ func (fs *WebDavFileSystem) Rename(ctx context.Context, oldName, newName string) oldDir, oldBaseName := filer2.FullPath(oldName).DirAndName() newDir, newBaseName := filer2.FullPath(newName).DirAndName() - return fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AtomicRenameEntryRequest{ OldDirectory: oldDir, @@ -339,8 +333,10 @@ func (fs *WebDavFileSystem) stat(ctx context.Context, fullFilePath string) (os.F return nil, err } + fullpath := filer2.FullPath(fullFilePath) + var fi FileInfo - entry, err := filer2.GetEntry(ctx, fs, fullFilePath) + entry, err := filer2.GetEntry(ctx, fs, fullpath) if entry == nil { return nil, os.ErrNotExist } @@ -348,14 +344,12 @@ func (fs *WebDavFileSystem) stat(ctx context.Context, fullFilePath string) (os.F return nil, err } fi.size = int64(filer2.TotalSize(entry.GetChunks())) - fi.name = fullFilePath + fi.name = string(fullpath) fi.mode = os.FileMode(entry.Attributes.FileMode) fi.modifiledTime = time.Unix(entry.Attributes.Mtime, 0) fi.isDirectory = entry.IsDirectory - _, fi.name = path.Split(path.Clean(fi.name)) - if fi.name == "" { - fi.name = "/" + if fi.name == "/" { fi.modifiledTime = time.Now() fi.isDirectory = true } @@ -376,7 +370,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { var err error ctx := context.Background() if f.entry == nil { - f.entry, err = filer2.GetEntry(ctx, f.fs, f.name) + f.entry, err = filer2.GetEntry(ctx, f.fs, filer2.FullPath(f.name)) } if f.entry == nil { @@ -389,7 +383,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { var fileId, host string var auth security.EncodedJwt - if err = f.fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + if err = f.fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, @@ -412,7 +406,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) bufReader := bytes.NewReader(buf) - uploadResult, err := operation.Upload(fileUrl, f.name, bufReader, false, "application/octet-stream", nil, auth) + uploadResult, err := operation.Upload(fileUrl, f.name, bufReader, false, "", nil, auth) if err != nil { glog.V(0).Infof("upload data %v to %s: %v", f.name, fileUrl, err) return 0, fmt.Errorf("upload data: %v", err) @@ -433,7 +427,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { f.entry.Chunks = append(f.entry.Chunks, chunk) dir, _ := filer2.FullPath(f.name).DirAndName() - err = f.fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = f.fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { f.entry.Attributes.Mtime = time.Now().Unix() request := &filer_pb.UpdateEntryRequest{ @@ -448,9 +442,11 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { return nil }) - if err != nil { + if err == nil { + glog.V(3).Infof("WebDavFileSystem.Write %v: written [%d,%d)", f.name, f.off, f.off+int64(len(buf))) f.off += int64(len(buf)) } + return len(buf), err } @@ -472,7 +468,7 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) { ctx := context.Background() if f.entry == nil { - f.entry, err = filer2.GetEntry(ctx, f.fs, f.name) + f.entry, err = filer2.GetEntry(ctx, f.fs, filer2.FullPath(f.name)) } if f.entry == nil { return 0, err @@ -488,16 +484,19 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) { } chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, f.off, len(p)) - totalRead, err := filer2.ReadIntoBuffer(ctx, f.fs, f.name, p, chunkViews, f.off) + totalRead, err := filer2.ReadIntoBuffer(ctx, f.fs, filer2.FullPath(f.name), p, chunkViews, f.off) if err != nil { return 0, err } readSize = int(totalRead) + glog.V(3).Infof("WebDavFileSystem.Read %v: [%d,%d)", f.name, f.off, f.off+totalRead) + f.off += totalRead if readSize == 0 { return 0, io.EOF } + return } @@ -506,12 +505,9 @@ func (f *WebDavFile) Readdir(count int) (ret []os.FileInfo, err error) { glog.V(2).Infof("WebDavFileSystem.Readdir %v count %d", f.name, count) ctx := context.Background() - dir := f.name - if dir != "/" && strings.HasSuffix(dir, "/") { - dir = dir[:len(dir)-1] - } + dir, _ := filer2.FullPath(f.name).DirAndName() - err = filer2.ReadDirAllEntries(ctx, f.fs, dir, func(entry *filer_pb.Entry) { + err = filer2.ReadDirAllEntries(ctx, f.fs, filer2.FullPath(dir), "", func(entry *filer_pb.Entry, isLast bool) { fi := FileInfo{ size: int64(filer2.TotalSize(entry.GetChunks())), name: entry.Name, diff --git a/weed/shell/command_ec_balance.go b/weed/shell/command_ec_balance.go index 47ae7bad3..96599372e 100644 --- a/weed/shell/command_ec_balance.go +++ b/weed/shell/command_ec_balance.go @@ -207,7 +207,7 @@ func doDeduplicateEcShards(ctx context.Context, commandEnv *CommandEnv, collecti if len(ecNodes) <= 1 { continue } - sortEcNodes(ecNodes) + sortEcNodesByFreeslotsAscending(ecNodes) fmt.Printf("ec shard %d.%d has %d copies, keeping %v\n", vid, shardId, len(ecNodes), ecNodes[0].info.Id) if !applyBalancing { continue @@ -266,6 +266,10 @@ func doBalanceEcShardsAcrossRacks(ctx context.Context, commandEnv *CommandEnv, c for shardId, ecNode := range ecShardsToMove { rackId := pickOneRack(racks, rackToShardCount, averageShardsPerEcRack) + if rackId == "" { + fmt.Printf("ec shard %d.%d at %s can not find a destination rack\n", vid, shardId, ecNode.info.Id) + continue + } var possibleDestinationEcNodes []*EcNode for _, n := range racks[rackId].ecNodes { possibleDestinationEcNodes = append(possibleDestinationEcNodes, n) @@ -436,10 +440,9 @@ func doBalanceEcRack(ctx context.Context, commandEnv *CommandEnv, ecRack *EcRack return nil } -func pickOneEcNodeAndMoveOneShard(ctx context.Context, commandEnv *CommandEnv, expectedTotalEcShards int, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error { +func pickOneEcNodeAndMoveOneShard(ctx context.Context, commandEnv *CommandEnv, averageShardsPerEcNode int, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error { - sortEcNodes(possibleDestinationEcNodes) - averageShardsPerEcNode := ceilDivide(expectedTotalEcShards, len(possibleDestinationEcNodes)) + sortEcNodesByFreeslotsDecending(possibleDestinationEcNodes) for _, destEcNode := range possibleDestinationEcNodes { if destEcNode.info.Id == existingLocation.info.Id { diff --git a/weed/shell/command_ec_common.go b/weed/shell/command_ec_common.go index d0fe16a68..e187d5a3b 100644 --- a/weed/shell/command_ec_common.go +++ b/weed/shell/command_ec_common.go @@ -22,7 +22,7 @@ func moveMountedShardToEcNode(ctx context.Context, commandEnv *CommandEnv, exist if applyBalancing { // ask destination node to copy shard and the ecx file from source node, and mount it - copiedShardIds, err = oneServerCopyAndMountEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, destinationEcNode, uint32(shardId), 1, vid, collection, existingLocation.info.Id) + copiedShardIds, err = oneServerCopyAndMountEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, destinationEcNode, []uint32{uint32(shardId)}, vid, collection, existingLocation.info.Id) if err != nil { return err } @@ -51,16 +51,12 @@ func moveMountedShardToEcNode(ctx context.Context, commandEnv *CommandEnv, exist } func oneServerCopyAndMountEcShardsFromSource(ctx context.Context, grpcDialOption grpc.DialOption, - targetServer *EcNode, startFromShardId uint32, shardCount int, + targetServer *EcNode, shardIdsToCopy []uint32, volumeId needle.VolumeId, collection string, existingLocation string) (copiedShardIds []uint32, err error) { - var shardIdsToCopy []uint32 - for shardId := startFromShardId; shardId < startFromShardId+uint32(shardCount); shardId++ { - shardIdsToCopy = append(shardIdsToCopy, shardId) - } fmt.Printf("allocate %d.%v %s => %s\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id) - err = operation.WithVolumeServerClient(targetServer.info.Id, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err = operation.WithVolumeServerClient(targetServer.info.Id, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { if targetServer.info.Id != existingLocation { @@ -70,6 +66,8 @@ func oneServerCopyAndMountEcShardsFromSource(ctx context.Context, grpcDialOption Collection: collection, ShardIds: shardIdsToCopy, CopyEcxFile: true, + CopyEcjFile: true, + CopyVifFile: true, SourceDataNode: existingLocation, }) if copyErr != nil { @@ -112,12 +110,18 @@ func eachDataNode(topo *master_pb.TopologyInfo, fn func(dc string, rack RackId, } } -func sortEcNodes(ecNodes []*EcNode) { +func sortEcNodesByFreeslotsDecending(ecNodes []*EcNode) { sort.Slice(ecNodes, func(i, j int) bool { return ecNodes[i].freeEcSlot > ecNodes[j].freeEcSlot }) } +func sortEcNodesByFreeslotsAscending(ecNodes []*EcNode) { + sort.Slice(ecNodes, func(i, j int) bool { + return ecNodes[i].freeEcSlot < ecNodes[j].freeEcSlot + }) +} + type CandidateEcNode struct { ecNode *EcNode shardCount int @@ -156,7 +160,7 @@ func countShards(ecShardInfos []*master_pb.VolumeEcShardInformationMessage) (cou } func countFreeShardSlots(dn *master_pb.DataNodeInfo) (count int) { - return int(dn.FreeVolumeCount)*10 - countShards(dn.EcShardInfos) + return int(dn.MaxVolumeCount-dn.ActiveVolumeCount)*erasure_coding.DataShardsCount - countShards(dn.EcShardInfos) } type RackId string @@ -191,18 +195,18 @@ func collectEcNodes(ctx context.Context, commandEnv *CommandEnv, selectedDataCen if selectedDataCenter != "" && selectedDataCenter != dc { return } - if freeEcSlots := countFreeShardSlots(dn); freeEcSlots > 0 { - ecNodes = append(ecNodes, &EcNode{ - info: dn, - dc: dc, - rack: rack, - freeEcSlot: int(freeEcSlots), - }) - totalFreeEcSlots += freeEcSlots - } + + freeEcSlots := countFreeShardSlots(dn) + ecNodes = append(ecNodes, &EcNode{ + info: dn, + dc: dc, + rack: rack, + freeEcSlot: int(freeEcSlots), + }) + totalFreeEcSlots += freeEcSlots }) - sortEcNodes(ecNodes) + sortEcNodesByFreeslotsDecending(ecNodes) return } @@ -212,7 +216,7 @@ func sourceServerDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOpt fmt.Printf("delete %d.%v from %s\n", volumeId, toBeDeletedShardIds, sourceLocation) - return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, deleteErr := volumeServerClient.VolumeEcShardsDelete(ctx, &volume_server_pb.VolumeEcShardsDeleteRequest{ VolumeId: uint32(volumeId), Collection: collection, @@ -228,7 +232,7 @@ func unmountEcShards(ctx context.Context, grpcDialOption grpc.DialOption, fmt.Printf("unmount %d.%v from %s\n", volumeId, toBeUnmountedhardIds, sourceLocation) - return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, deleteErr := volumeServerClient.VolumeEcShardsUnmount(ctx, &volume_server_pb.VolumeEcShardsUnmountRequest{ VolumeId: uint32(volumeId), ShardIds: toBeUnmountedhardIds, @@ -242,7 +246,7 @@ func mountEcShards(ctx context.Context, grpcDialOption grpc.DialOption, fmt.Printf("mount %d.%v on %s\n", volumeId, toBeMountedhardIds, sourceLocation) - return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, mountErr := volumeServerClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{ VolumeId: uint32(volumeId), Collection: collection, diff --git a/weed/shell/command_ec_decode.go b/weed/shell/command_ec_decode.go new file mode 100644 index 000000000..8a705a5ae --- /dev/null +++ b/weed/shell/command_ec_decode.go @@ -0,0 +1,265 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func init() { + Commands = append(Commands, &commandEcDecode{}) +} + +type commandEcDecode struct { +} + +func (c *commandEcDecode) Name() string { + return "ec.decode" +} + +func (c *commandEcDecode) Help() string { + return `decode a erasure coded volume into a normal volume + + ec.decode [-collection=""] [-volumeId=] + +` +} + +func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + encodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeId := encodeCommand.Int("volumeId", 0, "the volume id") + collection := encodeCommand.String("collection", "", "the collection name") + if err = encodeCommand.Parse(args); err != nil { + return nil + } + + ctx := context.Background() + vid := needle.VolumeId(*volumeId) + + // collect topology information + topologyInfo, err := collectTopologyInfo(ctx, commandEnv) + if err != nil { + return err + } + + // volumeId is provided + if vid != 0 { + return doEcDecode(ctx, commandEnv, topologyInfo, *collection, vid) + } + + // apply to all volumes in the collection + volumeIds := collectEcShardIds(topologyInfo, *collection) + fmt.Printf("ec encode volumes: %v\n", volumeIds) + for _, vid := range volumeIds { + if err = doEcDecode(ctx, commandEnv, topologyInfo, *collection, vid); err != nil { + return err + } + } + + return nil +} + +func doEcDecode(ctx context.Context, commandEnv *CommandEnv, topoInfo *master_pb.TopologyInfo, collection string, vid needle.VolumeId) (err error) { + // find volume location + nodeToEcIndexBits := collectEcNodeShardBits(topoInfo, vid) + + fmt.Printf("ec volume %d shard locations: %+v\n", vid, nodeToEcIndexBits) + + // collect ec shards to the server with most space + targetNodeLocation, err := collectEcShards(ctx, commandEnv, nodeToEcIndexBits, collection, vid) + if err != nil { + return fmt.Errorf("collectEcShards for volume %d: %v", vid, err) + } + + // generate a normal volume + err = generateNormalVolume(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, targetNodeLocation) + if err != nil { + return fmt.Errorf("generate normal volume %d on %s: %v", vid, targetNodeLocation, err) + } + + // delete the previous ec shards + err = mountVolumeAndDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, targetNodeLocation, nodeToEcIndexBits, vid) + if err != nil { + return fmt.Errorf("delete ec shards for volume %d: %v", vid, err) + } + + return nil +} + +func mountVolumeAndDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOption, collection, targetNodeLocation string, nodeToEcIndexBits map[string]erasure_coding.ShardBits, vid needle.VolumeId) error { + + // mount volume + if err := operation.WithVolumeServerClient(targetNodeLocation, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { + _, mountErr := volumeServerClient.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{ + VolumeId: uint32(vid), + }) + return mountErr + }); err != nil { + return fmt.Errorf("mountVolumeAndDeleteEcShards mount volume %d on %s: %v", vid, targetNodeLocation, err) + } + + // unmount ec shards + for location, ecIndexBits := range nodeToEcIndexBits { + fmt.Printf("unmount ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds()) + err := unmountEcShards(ctx, grpcDialOption, vid, location, ecIndexBits.ToUint32Slice()) + if err != nil { + return fmt.Errorf("mountVolumeAndDeleteEcShards unmount ec volume %d on %s: %v", vid, location, err) + } + } + // delete ec shards + for location, ecIndexBits := range nodeToEcIndexBits { + fmt.Printf("delete ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds()) + err := sourceServerDeleteEcShards(ctx, grpcDialOption, collection, vid, location, ecIndexBits.ToUint32Slice()) + if err != nil { + return fmt.Errorf("mountVolumeAndDeleteEcShards delete ec volume %d on %s: %v", vid, location, err) + } + } + + return nil +} + +func generateNormalVolume(ctx context.Context, grpcDialOption grpc.DialOption, vid needle.VolumeId, collection string, sourceVolumeServer string) error { + + fmt.Printf("generateNormalVolume from ec volume %d on %s\n", vid, sourceVolumeServer) + + err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { + _, genErr := volumeServerClient.VolumeEcShardsToVolume(ctx, &volume_server_pb.VolumeEcShardsToVolumeRequest{ + VolumeId: uint32(vid), + Collection: collection, + }) + return genErr + }) + + return err + +} + +func collectEcShards(ctx context.Context, commandEnv *CommandEnv, nodeToEcIndexBits map[string]erasure_coding.ShardBits, collection string, vid needle.VolumeId) (targetNodeLocation string, err error) { + + maxShardCount := 0 + var exisitngEcIndexBits erasure_coding.ShardBits + for loc, ecIndexBits := range nodeToEcIndexBits { + toBeCopiedShardCount := ecIndexBits.MinusParityShards().ShardIdCount() + if toBeCopiedShardCount > maxShardCount { + maxShardCount = toBeCopiedShardCount + targetNodeLocation = loc + exisitngEcIndexBits = ecIndexBits + } + } + + fmt.Printf("collectEcShards: ec volume %d collect shards to %s from: %+v\n", vid, targetNodeLocation, nodeToEcIndexBits) + + var copiedEcIndexBits erasure_coding.ShardBits + for loc, ecIndexBits := range nodeToEcIndexBits { + if loc == targetNodeLocation { + continue + } + + needToCopyEcIndexBits := ecIndexBits.Minus(exisitngEcIndexBits).MinusParityShards() + if needToCopyEcIndexBits.ShardIdCount() == 0 { + continue + } + + err = operation.WithVolumeServerClient(targetNodeLocation, commandEnv.option.GrpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { + + fmt.Printf("copy %d.%v %s => %s\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation) + + _, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{ + VolumeId: uint32(vid), + Collection: collection, + ShardIds: needToCopyEcIndexBits.ToUint32Slice(), + CopyEcxFile: false, + CopyEcjFile: true, + CopyVifFile: true, + SourceDataNode: loc, + }) + if copyErr != nil { + return fmt.Errorf("copy %d.%v %s => %s : %v\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation, copyErr) + } + + return nil + }) + + if err != nil { + break + } + + copiedEcIndexBits = copiedEcIndexBits.Plus(needToCopyEcIndexBits) + + } + + nodeToEcIndexBits[targetNodeLocation] = exisitngEcIndexBits.Plus(copiedEcIndexBits) + + return targetNodeLocation, err + +} + +func collectTopologyInfo(ctx context.Context, commandEnv *CommandEnv) (topoInfo *master_pb.TopologyInfo, err error) { + + var resp *master_pb.VolumeListResponse + err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + return err + }) + if err != nil { + return + } + + return resp.TopologyInfo, nil + +} + +func collectEcShardInfos(topoInfo *master_pb.TopologyInfo, selectedCollection string, vid needle.VolumeId) (ecShardInfos []*master_pb.VolumeEcShardInformationMessage) { + + eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + for _, v := range dn.EcShardInfos { + if v.Collection == selectedCollection && v.Id == uint32(vid) { + ecShardInfos = append(ecShardInfos, v) + } + } + }) + + return +} + +func collectEcShardIds(topoInfo *master_pb.TopologyInfo, selectedCollection string) (vids []needle.VolumeId) { + + vidMap := make(map[uint32]bool) + eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + for _, v := range dn.EcShardInfos { + if v.Collection == selectedCollection { + vidMap[v.Id] = true + } + } + }) + + for vid := range vidMap { + vids = append(vids, needle.VolumeId(vid)) + } + + return +} + +func collectEcNodeShardBits(topoInfo *master_pb.TopologyInfo, vid needle.VolumeId) map[string]erasure_coding.ShardBits { + + nodeToEcIndexBits := make(map[string]erasure_coding.ShardBits) + eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + for _, v := range dn.EcShardInfos { + if v.Id == uint32(vid) { + nodeToEcIndexBits[dn.Id] = erasure_coding.ShardBits(v.EcIndexBits) + } + } + }) + + return nodeToEcIndexBits +} diff --git a/weed/shell/command_ec_encode.go b/weed/shell/command_ec_encode.go index f07cb93f9..587b59388 100644 --- a/weed/shell/command_ec_encode.go +++ b/weed/shell/command_ec_encode.go @@ -8,13 +8,14 @@ import ( "sync" "time" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/wdclient" - "google.golang.org/grpc" ) func init() { @@ -92,10 +93,12 @@ func doEcEncode(ctx context.Context, commandEnv *CommandEnv, collection string, return fmt.Errorf("volume %d not found", vid) } + // fmt.Printf("found ec %d shards on %v\n", vid, locations) + // mark the volume as readonly err = markVolumeReadonly(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations) if err != nil { - return fmt.Errorf("generate ec shards for volume %d on %s: %v", vid, locations[0].Url, err) + return fmt.Errorf("mark volume %d as readonly on %s: %v", vid, locations[0].Url, err) } // generate ec shards @@ -117,7 +120,7 @@ func markVolumeReadonly(ctx context.Context, grpcDialOption grpc.DialOption, vol for _, location := range locations { - err := operation.WithVolumeServerClient(location.Url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err := operation.WithVolumeServerClient(location.Url, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, markErr := volumeServerClient.VolumeMarkReadonly(ctx, &volume_server_pb.VolumeMarkReadonlyRequest{ VolumeId: uint32(volumeId), }) @@ -135,7 +138,7 @@ func markVolumeReadonly(ctx context.Context, grpcDialOption grpc.DialOption, vol func generateEcShards(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, collection string, sourceVolumeServer string) error { - err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, genErr := volumeServerClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{ VolumeId: uint32(volumeId), Collection: collection, @@ -163,10 +166,10 @@ func spreadEcShards(ctx context.Context, commandEnv *CommandEnv, volumeId needle } // calculate how many shards to allocate for these servers - allocated := balancedEcDistribution(allocatedDataNodes) + allocatedEcIds := balancedEcDistribution(allocatedDataNodes) // ask the data nodes to copy from the source volume server - copiedShardIds, err := parallelCopyEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, allocatedDataNodes, allocated, volumeId, collection, existingLocations[0]) + copiedShardIds, err := parallelCopyEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, allocatedDataNodes, allocatedEcIds, volumeId, collection, existingLocations[0]) if err != nil { return err } @@ -196,31 +199,29 @@ func spreadEcShards(ctx context.Context, commandEnv *CommandEnv, volumeId needle } func parallelCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.DialOption, - targetServers []*EcNode, allocated []int, + targetServers []*EcNode, allocatedEcIds [][]uint32, volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (actuallyCopied []uint32, err error) { // parallelize shardIdChan := make(chan []uint32, len(targetServers)) var wg sync.WaitGroup - startFromShardId := uint32(0) for i, server := range targetServers { - if allocated[i] <= 0 { + if len(allocatedEcIds[i]) <= 0 { continue } wg.Add(1) - go func(server *EcNode, startFromShardId uint32, shardCount int) { + go func(server *EcNode, allocatedEcShardIds []uint32) { defer wg.Done() copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(ctx, grpcDialOption, server, - startFromShardId, shardCount, volumeId, collection, existingLocation.Url) + allocatedEcShardIds, volumeId, collection, existingLocation.Url) if copyErr != nil { err = copyErr } else { shardIdChan <- copiedShardIds server.addEcVolumeShards(volumeId, collection, copiedShardIds) } - }(server, startFromShardId, allocated[i]) - startFromShardId += uint32(allocated[i]) + }(server, allocatedEcIds[i]) } wg.Wait() close(shardIdChan) @@ -236,18 +237,18 @@ func parallelCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.Dia return } -func balancedEcDistribution(servers []*EcNode) (allocated []int) { - allocated = make([]int, len(servers)) - allocatedCount := 0 - for allocatedCount < erasure_coding.TotalShardsCount { - for i, server := range servers { - if server.freeEcSlot-allocated[i] > 0 { - allocated[i] += 1 - allocatedCount += 1 - } - if allocatedCount >= erasure_coding.TotalShardsCount { - break - } +func balancedEcDistribution(servers []*EcNode) (allocated [][]uint32) { + allocated = make([][]uint32, len(servers)) + allocatedShardIdIndex := uint32(0) + serverIndex := 0 + for allocatedShardIdIndex < erasure_coding.TotalShardsCount { + if servers[serverIndex].freeEcSlot > 0 { + allocated[serverIndex] = append(allocated[serverIndex], allocatedShardIdIndex) + allocatedShardIdIndex++ + } + serverIndex++ + if serverIndex >= len(servers) { + serverIndex = 0 } } @@ -281,7 +282,7 @@ func collectVolumeIdsForEcEncode(ctx context.Context, commandEnv *CommandEnv, se } }) - for vid, _ := range vidMap { + for vid := range vidMap { vids = append(vids, needle.VolumeId(vid)) } diff --git a/weed/shell/command_ec_rebuild.go b/weed/shell/command_ec_rebuild.go index 63b7c4088..600a8cb45 100644 --- a/weed/shell/command_ec_rebuild.go +++ b/weed/shell/command_ec_rebuild.go @@ -111,7 +111,7 @@ func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection s return fmt.Errorf("ec volume %d is unrepairable with %d shards\n", vid, shardCount) } - sortEcNodes(allEcNodes) + sortEcNodesByFreeslotsDecending(allEcNodes) if allEcNodes[0].freeEcSlot < erasure_coding.TotalShardsCount { return fmt.Errorf("disk space is not enough") @@ -170,7 +170,7 @@ func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder * func generateMissingShards(ctx context.Context, grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string) (rebuiltShardIds []uint32, err error) { - err = operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err = operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { resp, rebultErr := volumeServerClient.VolumeEcShardsRebuild(ctx, &volume_server_pb.VolumeEcShardsRebuildRequest{ VolumeId: uint32(volumeId), Collection: collection, @@ -209,12 +209,14 @@ func prepareDataToRecover(ctx context.Context, commandEnv *CommandEnv, rebuilder var copyErr error if applyBalancing { - copyErr = operation.WithVolumeServerClient(rebuilder.info.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + copyErr = operation.WithVolumeServerClient(rebuilder.info.Id, commandEnv.option.GrpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{ VolumeId: uint32(volumeId), Collection: collection, ShardIds: []uint32{uint32(shardId)}, CopyEcxFile: needEcxFile, + CopyEcjFile: needEcxFile, + CopyVifFile: needEcxFile, SourceDataNode: ecNodes[0].info.Id, }) return copyErr diff --git a/weed/shell/command_ec_test.go b/weed/shell/command_ec_test.go index 9e578ed28..c233d25d0 100644 --- a/weed/shell/command_ec_test.go +++ b/weed/shell/command_ec_test.go @@ -2,12 +2,25 @@ package shell import ( "context" + "fmt" "testing" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/storage/needle" ) +func TestCommandEcDistribution(t *testing.T) { + + allEcNodes := []*EcNode{ + newEcNode("dc1", "rack1", "dn1", 100), + newEcNode("dc1", "rack2", "dn2", 100), + } + + allocated := balancedEcDistribution(allEcNodes) + + fmt.Printf("allocated: %+v", allocated) +} + func TestCommandEcBalanceSmall(t *testing.T) { allEcNodes := []*EcNode{ diff --git a/weed/shell/command_fs_cat.go b/weed/shell/command_fs_cat.go index 66ced46c5..238dee7f9 100644 --- a/weed/shell/command_fs_cat.go +++ b/weed/shell/command_fs_cat.go @@ -24,12 +24,8 @@ func (c *commandFsCat) Name() string { func (c *commandFsCat) Help() string { return `stream the file content on to the screen - fs.cat /dir/ fs.cat /dir/file_name - fs.cat /dir/file_prefix - fs.cat http://:/dir/ fs.cat http://:/dir/file_name - fs.cat http://:/dir/file_prefix ` } @@ -50,7 +46,7 @@ func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Write dir, name := filer2.FullPath(path).DirAndName() - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Name: name, diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go index 5e634c82a..d6ea51d0c 100644 --- a/weed/shell/command_fs_du.go +++ b/weed/shell/command_fs_du.go @@ -3,11 +3,13 @@ package shell import ( "context" "fmt" + "io" + + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" - "io" ) func init() { @@ -43,62 +45,11 @@ func (c *commandFsDu) Do(args []string, commandEnv *CommandEnv, writer io.Writer path = path + "/" } + var blockCount, byteCount uint64 dir, name := filer2.FullPath(path).DirAndName() + blockCount, byteCount, err = duTraverseDirectory(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), dir, name) - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { - - _, _, err = paginateDirectory(ctx, writer, client, dir, name, 1000) - - return err - - }) - -} - -func paginateDirectory(ctx context.Context, writer io.Writer, client filer_pb.SeaweedFilerClient, dir, name string, paginateSize int) (blockCount uint64, byteCount uint64, err error) { - - paginatedCount := -1 - startFromFileName := "" - - for paginatedCount == -1 || paginatedCount == paginateSize { - resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ - Directory: dir, - Prefix: name, - StartFromFileName: startFromFileName, - InclusiveStartFrom: false, - Limit: uint32(paginateSize), - }) - if listErr != nil { - err = listErr - return - } - - paginatedCount = len(resp.Entries) - - for _, entry := range resp.Entries { - if entry.IsDirectory { - subDir := fmt.Sprintf("%s/%s", dir, entry.Name) - if dir == "/" { - subDir = "/" + entry.Name - } - numBlock, numByte, err := paginateDirectory(ctx, writer, client, subDir, "", paginateSize) - if err == nil { - blockCount += numBlock - byteCount += numByte - } - } else { - blockCount += uint64(len(entry.Chunks)) - byteCount += filer2.TotalSize(entry.Chunks) - } - startFromFileName = entry.Name - - if name != "" && !entry.IsDirectory { - fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s/%s\n", blockCount, byteCount, dir, name) - } - } - } - - if name == "" { + if name == "" && err == nil { fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s\n", blockCount, byteCount, dir) } @@ -106,12 +57,54 @@ func paginateDirectory(ctx context.Context, writer io.Writer, client filer_pb.Se } -func (env *CommandEnv) withFilerClient(ctx context.Context, filerServer string, filerPort int64, fn func(filer_pb.SeaweedFilerClient) error) error { +func duTraverseDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, dir, name string) (blockCount uint64, byteCount uint64, err error) { + + err = filer2.ReadDirAllEntries(ctx, filerClient, filer2.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) { + if entry.IsDirectory { + subDir := fmt.Sprintf("%s/%s", dir, entry.Name) + if dir == "/" { + subDir = "/" + entry.Name + } + numBlock, numByte, err := duTraverseDirectory(ctx, writer, filerClient, subDir, "") + if err == nil { + blockCount += numBlock + byteCount += numByte + } + } else { + blockCount += uint64(len(entry.Chunks)) + byteCount += filer2.TotalSize(entry.Chunks) + } + + if name != "" && !entry.IsDirectory { + fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s/%s\n", blockCount, byteCount, dir, name) + } + }) + return +} + +func (env *CommandEnv) withFilerClient(ctx context.Context, filerServer string, filerPort int64, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error { filerGrpcAddress := fmt.Sprintf("%s:%d", filerServer, filerPort+10000) - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(client) + return fn(ctx2, client) }, filerGrpcAddress, env.option.GrpcDialOption) } + +type commandFilerClient struct { + env *CommandEnv + filerServer string + filerPort int64 +} + +func (env *CommandEnv) getFilerClient(filerServer string, filerPort int64) *commandFilerClient { + return &commandFilerClient{ + env: env, + filerServer: filerServer, + filerPort: filerPort, + } +} +func (c *commandFilerClient) WithFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error { + return c.env.withFilerClient(ctx, c.filerServer, c.filerPort, fn) +} diff --git a/weed/shell/command_fs_ls.go b/weed/shell/command_fs_ls.go index 6979635e1..0c63f71fa 100644 --- a/weed/shell/command_fs_ls.go +++ b/weed/shell/command_fs_ls.go @@ -3,13 +3,14 @@ package shell import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "io" "os" "os/user" "strconv" "strings" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) func init() { @@ -66,83 +67,51 @@ func (c *commandFsLs) Do(args []string, commandEnv *CommandEnv, writer io.Writer } dir, name := filer2.FullPath(path).DirAndName() - - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { - - return paginateOneDirectory(ctx, writer, client, dir, name, 1000, isLongFormat, showHidden) - - }) - -} - -func paginateOneDirectory(ctx context.Context, writer io.Writer, client filer_pb.SeaweedFilerClient, dir, name string, paginateSize int, isLongFormat, showHidden bool) (err error) { - entryCount := 0 - paginatedCount := -1 - startFromFileName := "" - for paginatedCount == -1 || paginatedCount == paginateSize { - resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ - Directory: dir, - Prefix: name, - StartFromFileName: startFromFileName, - InclusiveStartFrom: false, - Limit: uint32(paginateSize), - }) - if listErr != nil { - err = listErr + err = filer2.ReadDirAllEntries(ctx, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) { + + if !showHidden && strings.HasPrefix(entry.Name, ".") { return } - paginatedCount = len(resp.Entries) + entryCount++ - for _, entry := range resp.Entries { - - if !showHidden && strings.HasPrefix(entry.Name, ".") { - continue + if isLongFormat { + fileMode := os.FileMode(entry.Attributes.FileMode) + userName, groupNames := entry.Attributes.UserName, entry.Attributes.GroupName + if userName == "" { + if user, userErr := user.LookupId(strconv.Itoa(int(entry.Attributes.Uid))); userErr == nil { + userName = user.Username + } + } + groupName := "" + if len(groupNames) > 0 { + groupName = groupNames[0] + } + if groupName == "" { + if group, groupErr := user.LookupGroupId(strconv.Itoa(int(entry.Attributes.Gid))); groupErr == nil { + groupName = group.Name + } } - entryCount++ - - if isLongFormat { - fileMode := os.FileMode(entry.Attributes.FileMode) - userName, groupNames := entry.Attributes.UserName, entry.Attributes.GroupName - if userName == "" { - if user, userErr := user.LookupId(strconv.Itoa(int(entry.Attributes.Uid))); userErr == nil { - userName = user.Username - } - } - groupName := "" - if len(groupNames) > 0 { - groupName = groupNames[0] - } - if groupName == "" { - if group, groupErr := user.LookupGroupId(strconv.Itoa(int(entry.Attributes.Gid))); groupErr == nil { - groupName = group.Name - } - } - - if dir == "/" { - // just for printing - dir = "" - } - fmt.Fprintf(writer, "%s %3d %s %s %6d %s/%s\n", - fileMode, len(entry.Chunks), - userName, groupName, - filer2.TotalSize(entry.Chunks), dir, entry.Name) - } else { - fmt.Fprintf(writer, "%s\n", entry.Name) + if dir == "/" { + // just for printing + dir = "" } - - startFromFileName = entry.Name - + fmt.Fprintf(writer, "%s %3d %s %s %6d %s/%s\n", + fileMode, len(entry.Chunks), + userName, groupName, + filer2.TotalSize(entry.Chunks), dir, entry.Name) + } else { + fmt.Fprintf(writer, "%s\n", entry.Name) } - } - if isLongFormat { + }) + + if isLongFormat && err == nil { fmt.Fprintf(writer, "total %d\n", entryCount) } return - } diff --git a/weed/shell/command_fs_meta_cat.go b/weed/shell/command_fs_meta_cat.go new file mode 100644 index 000000000..9980f67a2 --- /dev/null +++ b/weed/shell/command_fs_meta_cat.go @@ -0,0 +1,75 @@ +package shell + +import ( + "context" + "fmt" + "io" + + "github.com/golang/protobuf/jsonpb" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func init() { + Commands = append(Commands, &commandFsMetaCat{}) +} + +type commandFsMetaCat struct { +} + +func (c *commandFsMetaCat) Name() string { + return "fs.meta.cat" +} + +func (c *commandFsMetaCat) Help() string { + return `print out the meta data content for a file or directory + + fs.meta.cat /dir/ + fs.meta.cat /dir/file_name + fs.meta.cat http://:/dir/ + fs.meta.cat http://:/dir/file_name +` +} + +func (c *commandFsMetaCat) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + input := findInputDirectory(args) + + filerServer, filerPort, path, err := commandEnv.parseUrl(input) + if err != nil { + return err + } + + ctx := context.Background() + + dir, name := filer2.FullPath(path).DirAndName() + + return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.LookupDirectoryEntryRequest{ + Name: name, + Directory: dir, + } + respLookupEntry, err := client.LookupDirectoryEntry(ctx, request) + if err != nil { + return err + } + + m := jsonpb.Marshaler{ + EmitDefaults: true, + Indent: " ", + } + + text, marshalErr := m.MarshalToString(respLookupEntry.Entry) + if marshalErr != nil { + return fmt.Errorf("marshal meta: %v", marshalErr) + } + + fmt.Fprintf(writer, "%s\n", text) + + return nil + + }) + +} diff --git a/weed/shell/command_fs_meta_load.go b/weed/shell/command_fs_meta_load.go index 5ea8de9f5..8f2ef95e3 100644 --- a/weed/shell/command_fs_meta_load.go +++ b/weed/shell/command_fs_meta_load.go @@ -55,7 +55,7 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io. ctx := context.Background() - err = commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + err = commandEnv.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { sizeBuf := make([]byte, 4) @@ -80,7 +80,7 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io. return err } - if _, err = client.CreateEntry(ctx, &filer_pb.CreateEntryRequest{ + if err := filer_pb.CreateEntry(ctx, client, &filer_pb.CreateEntryRequest{ Directory: fullEntry.Dir, Entry: fullEntry.Entry, }); err != nil { diff --git a/weed/shell/command_fs_meta_notify.go b/weed/shell/command_fs_meta_notify.go index 13b272fbf..e2b2d22cc 100644 --- a/weed/shell/command_fs_meta_notify.go +++ b/weed/shell/command_fs_meta_notify.go @@ -9,7 +9,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/notification" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" ) func init() { @@ -41,38 +40,38 @@ func (c *commandFsMetaNotify) Do(args []string, commandEnv *CommandEnv, writer i } util.LoadConfiguration("notification", true) - v := viper.GetViper() - notification.LoadConfiguration(v.Sub("notification")) + v := util.GetViper() + notification.LoadConfiguration(v, "notification.") ctx := context.Background() - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + var dirCount, fileCount uint64 - var dirCount, fileCount uint64 + err = doTraverseBFS(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) { - err = doTraverse(ctx, writer, client, filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) error { - - if entry.IsDirectory { - dirCount++ - } else { - fileCount++ - } - - return notification.Queue.SendMessage( - string(parentPath.Child(entry.Name)), - &filer_pb.EventNotification{ - NewEntry: entry, - }, - ) - - }) - - if err == nil { - fmt.Fprintf(writer, "\ntotal notified %d directories, %d files\n", dirCount, fileCount) + if entry.IsDirectory { + dirCount++ + } else { + fileCount++ } - return err + notifyErr := notification.Queue.SendMessage( + string(parentPath.Child(entry.Name)), + &filer_pb.EventNotification{ + NewEntry: entry, + }, + ) + + if notifyErr != nil { + fmt.Fprintf(writer, "fail to notify new entry event for %s: %v\n", parentPath.Child(entry.Name), notifyErr) + } }) + if err == nil { + fmt.Fprintf(writer, "\ntotal notified %d directories, %d files\n", dirCount, fileCount) + } + + return err + } diff --git a/weed/shell/command_fs_meta_save.go b/weed/shell/command_fs_meta_save.go index e710fe297..178c826d5 100644 --- a/weed/shell/command_fs_meta_save.go +++ b/weed/shell/command_fs_meta_save.go @@ -6,12 +6,15 @@ import ( "fmt" "io" "os" + "sync" + "sync/atomic" "time" + "github.com/golang/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/golang/protobuf/proto" ) func init() { @@ -51,114 +54,132 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io. return nil } - filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(fsMetaSaveCommand.Args())) - if err != nil { - return err + filerServer, filerPort, path, parseErr := commandEnv.parseUrl(findInputDirectory(fsMetaSaveCommand.Args())) + if parseErr != nil { + return parseErr } ctx := context.Background() - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + t := time.Now() + fileName := *outputFileName + if fileName == "" { + fileName = fmt.Sprintf("%s-%d-%4d%02d%02d-%02d%02d%02d.meta", + filerServer, filerPort, t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) + } - t := time.Now() - fileName := *outputFileName - if fileName == "" { - fileName = fmt.Sprintf("%s-%d-%4d%02d%02d-%02d%02d%02d.meta", - filerServer, filerPort, t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) - } - - dst, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return nil - } - defer dst.Close() - - var dirCount, fileCount uint64 + dst, openErr := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if openErr != nil { + return fmt.Errorf("failed to create file %s: %v", fileName, openErr) + } + defer dst.Close() + var wg sync.WaitGroup + wg.Add(1) + outputChan := make(chan []byte, 1024) + go func() { sizeBuf := make([]byte, 4) - - err = doTraverse(ctx, writer, client, filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) error { - - protoMessage := &filer_pb.FullEntry{ - Dir: string(parentPath), - Entry: entry, - } - - bytes, err := proto.Marshal(protoMessage) - if err != nil { - return fmt.Errorf("marshall error: %v", err) - } - - util.Uint32toBytes(sizeBuf, uint32(len(bytes))) - + for b := range outputChan { + util.Uint32toBytes(sizeBuf, uint32(len(b))) dst.Write(sizeBuf) - dst.Write(bytes) + dst.Write(b) + } + wg.Done() + }() - if entry.IsDirectory { - dirCount++ - } else { - fileCount++ - } + var dirCount, fileCount uint64 - if *verbose { - println(parentPath.Child(entry.Name)) - } + err = doTraverseBFS(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) { - return nil - - }) - - if err == nil { - fmt.Fprintf(writer, "\ntotal %d directories, %d files", dirCount, fileCount) - fmt.Fprintf(writer, "\nmeta data for http://%s:%d%s is saved to %s\n", filerServer, filerPort, path, fileName) + protoMessage := &filer_pb.FullEntry{ + Dir: string(parentPath), + Entry: entry, } - return err - - }) - -} -func doTraverse(ctx context.Context, writer io.Writer, client filer_pb.SeaweedFilerClient, parentPath filer2.FullPath, fn func(parentPath filer2.FullPath, entry *filer_pb.Entry) error) (err error) { - - paginatedCount := -1 - startFromFileName := "" - paginateSize := 1000 - - for paginatedCount == -1 || paginatedCount == paginateSize { - resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ - Directory: string(parentPath), - Prefix: "", - StartFromFileName: startFromFileName, - InclusiveStartFrom: false, - Limit: uint32(paginateSize), - }) - if listErr != nil { - err = listErr + bytes, err := proto.Marshal(protoMessage) + if err != nil { + fmt.Fprintf(writer, "marshall error: %v\n", err) return } - paginatedCount = len(resp.Entries) - - for _, entry := range resp.Entries { - - if err = fn(parentPath, entry); err != nil { - return err - } - - if entry.IsDirectory { - subDir := fmt.Sprintf("%s/%s", parentPath, entry.Name) - if parentPath == "/" { - subDir = "/" + entry.Name - } - if err = doTraverse(ctx, writer, client, filer2.FullPath(subDir), fn); err != nil { - return err - } - } - startFromFileName = entry.Name + outputChan <- bytes + if entry.IsDirectory { + atomic.AddUint64(&dirCount, 1) + } else { + atomic.AddUint64(&fileCount, 1) } + + if *verbose { + println(parentPath.Child(entry.Name)) + } + + }) + + close(outputChan) + + wg.Wait() + + if err == nil { + fmt.Fprintf(writer, "total %d directories, %d files\n", dirCount, fileCount) + fmt.Fprintf(writer, "meta data for http://%s:%d%s is saved to %s\n", filerServer, filerPort, path, fileName) } - return + return err + +} +func doTraverseBFS(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, + parentPath filer2.FullPath, fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) { + + K := 5 + + var jobQueueWg sync.WaitGroup + queue := util.NewQueue() + jobQueueWg.Add(1) + queue.Enqueue(parentPath) + var isTerminating bool + + for i := 0; i < K; i++ { + go func() { + for { + if isTerminating { + break + } + t := queue.Dequeue() + if t == nil { + time.Sleep(329 * time.Millisecond) + continue + } + dir := t.(filer2.FullPath) + processErr := processOneDirectory(ctx, writer, filerClient, dir, queue, &jobQueueWg, fn) + if processErr != nil { + err = processErr + } + jobQueueWg.Done() + } + }() + } + jobQueueWg.Wait() + isTerminating = true + return +} + +func processOneDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, + parentPath filer2.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup, + fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) { + + return filer2.ReadDirAllEntries(ctx, filerClient, parentPath, "", func(entry *filer_pb.Entry, isLast bool) { + + fn(parentPath, entry) + + if entry.IsDirectory { + subDir := fmt.Sprintf("%s/%s", parentPath, entry.Name) + if parentPath == "/" { + subDir = "/" + entry.Name + } + jobQueueWg.Add(1) + queue.Enqueue(filer2.FullPath(subDir)) + } + }) } diff --git a/weed/shell/command_fs_mv.go b/weed/shell/command_fs_mv.go index 67606ab53..e77755921 100644 --- a/weed/shell/command_fs_mv.go +++ b/weed/shell/command_fs_mv.go @@ -53,7 +53,7 @@ func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer destinationDir, destinationName := filer2.FullPath(destinationPath).DirAndName() - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { // collect destination entry info destinationRequest := &filer_pb.LookupDirectoryEntryRequest{ diff --git a/weed/shell/command_fs_tree.go b/weed/shell/command_fs_tree.go index 8474e43ea..8660030e3 100644 --- a/weed/shell/command_fs_tree.go +++ b/weed/shell/command_fs_tree.go @@ -3,10 +3,11 @@ package shell import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "io" "strings" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) func init() { @@ -38,77 +39,44 @@ func (c *commandFsTree) Do(args []string, commandEnv *CommandEnv, writer io.Writ ctx := context.Background() - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(dir), name, newPrefix(), -1) - dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, client, dir, name, newPrefix(), -1) - - if terr == nil { - fmt.Fprintf(writer, "%d directories, %d files\n", dirCount, fCount) - } - - return terr - - }) - -} -func treeTraverseDirectory(ctx context.Context, writer io.Writer, client filer_pb.SeaweedFilerClient, dir, name string, prefix *Prefix, level int) (directoryCount, fileCount int64, err error) { - - paginatedCount := -1 - startFromFileName := "" - paginateSize := 1000 - - for paginatedCount == -1 || paginatedCount == paginateSize { - resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ - Directory: dir, - Prefix: name, - StartFromFileName: startFromFileName, - InclusiveStartFrom: false, - Limit: uint32(paginateSize), - }) - if listErr != nil { - err = listErr - return - } - - paginatedCount = len(resp.Entries) - if paginatedCount > 0 { - prefix.addMarker(level) - } - - for i, entry := range resp.Entries { - - if level < 0 && name != "" { - if entry.Name != name { - break - } - } - - // 0.1% wrong prefix here, but fixing it would need to paginate to the next batch first - isLast := paginatedCount < paginateSize && i == paginatedCount-1 - fmt.Fprintf(writer, "%s%s\n", prefix.getPrefix(level, isLast), entry.Name) - - if entry.IsDirectory { - directoryCount++ - subDir := fmt.Sprintf("%s/%s", dir, entry.Name) - if dir == "/" { - subDir = "/" + entry.Name - } - dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, client, subDir, "", prefix, level+1) - directoryCount += dirCount - fileCount += fCount - err = terr - } else { - fileCount++ - } - startFromFileName = entry.Name - - } + if terr == nil { + fmt.Fprintf(writer, "%d directories, %d files\n", dirCount, fCount) } - return + return terr } +func treeTraverseDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, dir filer2.FullPath, name string, prefix *Prefix, level int) (directoryCount, fileCount int64, err error) { + + prefix.addMarker(level) + + err = filer2.ReadDirAllEntries(ctx, filerClient, dir, name, func(entry *filer_pb.Entry, isLast bool) { + if level < 0 && name != "" { + if entry.Name != name { + return + } + } + + fmt.Fprintf(writer, "%s%s\n", prefix.getPrefix(level, isLast), entry.Name) + + if entry.IsDirectory { + directoryCount++ + subDir := dir.Child(entry.Name) + dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, filerClient, subDir, "", prefix, level+1) + directoryCount += dirCount + fileCount += fCount + err = terr + } else { + fileCount++ + } + + }) + return +} + type Prefix struct { markers map[int]bool } diff --git a/weed/shell/command_volume_balance.go b/weed/shell/command_volume_balance.go index d7ef0d005..bed4f4306 100644 --- a/weed/shell/command_volume_balance.go +++ b/weed/shell/command_volume_balance.go @@ -27,7 +27,7 @@ func (c *commandVolumeBalance) Name() string { func (c *commandVolumeBalance) Help() string { return `balance all volumes among volume servers - volume.balance [-c ALL|EACH_COLLECTION|] [-force] [-dataCenter=] + volume.balance [-collection ALL|EACH_COLLECTION|] [-force] [-dataCenter=] Algorithm: @@ -79,8 +79,10 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer } typeToNodes := collectVolumeServersByType(resp.TopologyInfo, *dc) - for _, volumeServers := range typeToNodes { + + for maxVolumeCount, volumeServers := range typeToNodes { if len(volumeServers) < 2 { + fmt.Printf("only 1 node is configured max %d volumes, skipping balancing\n", maxVolumeCount) continue } if *collection == "EACH_COLLECTION" { @@ -93,8 +95,8 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer return err } } - } else if *collection == "ALL" { - if err = balanceVolumeServers(commandEnv, volumeServers, resp.VolumeSizeLimitMb*1024*1024, "ALL", *applyBalancing); err != nil { + } else if *collection == "ALL_COLLECTIONS" { + if err = balanceVolumeServers(commandEnv, volumeServers, resp.VolumeSizeLimitMb*1024*1024, "ALL_COLLECTIONS", *applyBalancing); err != nil { return err } } else { @@ -108,6 +110,7 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer } func balanceVolumeServers(commandEnv *CommandEnv, dataNodeInfos []*master_pb.DataNodeInfo, volumeSizeLimit uint64, collection string, applyBalancing bool) error { + var nodes []*Node for _, dn := range dataNodeInfos { nodes = append(nodes, &Node{ @@ -118,7 +121,7 @@ func balanceVolumeServers(commandEnv *CommandEnv, dataNodeInfos []*master_pb.Dat // balance writable volumes for _, n := range nodes { n.selectVolumes(func(v *master_pb.VolumeInformationMessage) bool { - if collection != "ALL" { + if collection != "ALL_COLLECTIONS" { if v.Collection != collection { return false } @@ -133,7 +136,7 @@ func balanceVolumeServers(commandEnv *CommandEnv, dataNodeInfos []*master_pb.Dat // balance readable volumes for _, n := range nodes { n.selectVolumes(func(v *master_pb.VolumeInformationMessage) bool { - if collection != "ALL" { + if collection != "ALL_COLLECTIONS" { if v.Collection != collection { return false } diff --git a/weed/shell/command_volume_configure_replication.go b/weed/shell/command_volume_configure_replication.go new file mode 100644 index 000000000..6000d0de0 --- /dev/null +++ b/weed/shell/command_volume_configure_replication.go @@ -0,0 +1,105 @@ +package shell + +import ( + "context" + "errors" + "flag" + "fmt" + "io" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" +) + +func init() { + Commands = append(Commands, &commandVolumeConfigureReplication{}) +} + +type commandVolumeConfigureReplication struct { +} + +func (c *commandVolumeConfigureReplication) Name() string { + return "volume.configure.replication" +} + +func (c *commandVolumeConfigureReplication) Help() string { + return `change volume replication value + + This command changes a volume replication value. It should be followed by volume.fix.replication. + +` +} + +func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + configureReplicationCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeIdInt := configureReplicationCommand.Int("volumeId", 0, "the volume id") + replicationString := configureReplicationCommand.String("replication", "", "the intended replication value") + if err = configureReplicationCommand.Parse(args); err != nil { + return nil + } + + if *replicationString == "" { + return fmt.Errorf("empty replication value") + } + + replicaPlacement, err := super_block.NewReplicaPlacementFromString(*replicationString) + if err != nil { + return fmt.Errorf("replication format: %v", err) + } + replicaPlacementInt32 := uint32(replicaPlacement.Byte()) + + var resp *master_pb.VolumeListResponse + ctx := context.Background() + err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + return err + }) + if err != nil { + return err + } + + vid := needle.VolumeId(*volumeIdInt) + + // find all data nodes with volumes that needs replication change + var allLocations []location + eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + loc := newLocation(dc, string(rack), dn) + for _, v := range dn.VolumeInfos { + if v.Id == uint32(vid) && v.ReplicaPlacement != replicaPlacementInt32 { + allLocations = append(allLocations, loc) + continue + } + } + }) + + if len(allLocations) == 0 { + return fmt.Errorf("no volume needs change") + } + + for _, dst := range allLocations { + err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { + resp, configureErr := volumeServerClient.VolumeConfigure(ctx, &volume_server_pb.VolumeConfigureRequest{ + VolumeId: uint32(vid), + Replication: replicaPlacement.String(), + }) + if configureErr != nil { + return configureErr + } + if resp.Error != "" { + return errors.New(resp.Error) + } + return nil + }) + + if err != nil { + return err + } + + } + + return nil +} diff --git a/weed/shell/command_volume_fix_replication.go b/weed/shell/command_volume_fix_replication.go index 4c7a794c0..7a1a77cbe 100644 --- a/weed/shell/command_volume_fix_replication.go +++ b/weed/shell/command_volume_fix_replication.go @@ -3,13 +3,14 @@ package shell import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/operation" - "github.com/chrislusf/seaweedfs/weed/pb/master_pb" - "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/chrislusf/seaweedfs/weed/storage" "io" "math/rand" "sort" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) func init() { @@ -78,7 +79,7 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv, underReplicatedVolumeLocations := make(map[uint32][]location) for vid, locations := range replicatedVolumeLocations { volumeInfo := replicatedVolumeInfo[vid] - replicaPlacement, _ := storage.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement)) + replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement)) if replicaPlacement.GetCopyCount() > len(locations) { underReplicatedVolumeLocations[vid] = locations } @@ -97,7 +98,7 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv, for vid, locations := range underReplicatedVolumeLocations { volumeInfo := replicatedVolumeInfo[vid] - replicaPlacement, _ := storage.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement)) + replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement)) foundNewLocation := false for _, dst := range allLocations { // check whether data nodes satisfy the constraints @@ -112,7 +113,7 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv, break } - err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, replicateErr := volumeServerClient.VolumeCopy(ctx, &volume_server_pb.VolumeCopyRequest{ VolumeId: volumeInfo.Id, SourceDataNode: sourceNode.dataNode.Id, @@ -145,7 +146,7 @@ func keepDataNodesSorted(dataNodes []location) { }) } -func satisfyReplicaPlacement(replicaPlacement *storage.ReplicaPlacement, existingLocations []location, possibleLocation location) bool { +func satisfyReplicaPlacement(replicaPlacement *super_block.ReplicaPlacement, existingLocations []location, possibleLocation location) bool { existingDataCenters := make(map[string]bool) existingRacks := make(map[string]bool) diff --git a/weed/shell/command_volume_list.go b/weed/shell/command_volume_list.go index 91b5a0d32..c6c79d150 100644 --- a/weed/shell/command_volume_list.go +++ b/weed/shell/command_volume_list.go @@ -46,7 +46,7 @@ func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io. } func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64) statistics { - fmt.Fprintf(writer, "Topology volume:%d/%d active:%d free:%d volumeSizeLimit:%d MB\n", t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, volumeSizeLimitMb) + fmt.Fprintf(writer, "Topology volume:%d/%d active:%d free:%d remote:%d volumeSizeLimit:%d MB\n", t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount, volumeSizeLimitMb) sort.Slice(t.DataCenterInfos, func(i, j int) bool { return t.DataCenterInfos[i].Id < t.DataCenterInfos[j].Id }) @@ -58,7 +58,7 @@ func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLi return s } func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo) statistics { - fmt.Fprintf(writer, " DataCenter %s volume:%d/%d active:%d free:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount) + fmt.Fprintf(writer, " DataCenter %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount) var s statistics sort.Slice(t.RackInfos, func(i, j int) bool { return t.RackInfos[i].Id < t.RackInfos[j].Id @@ -70,7 +70,7 @@ func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo) statisti return s } func writeRackInfo(writer io.Writer, t *master_pb.RackInfo) statistics { - fmt.Fprintf(writer, " Rack %s volume:%d/%d active:%d free:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount) + fmt.Fprintf(writer, " Rack %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount) var s statistics sort.Slice(t.DataNodeInfos, func(i, j int) bool { return t.DataNodeInfos[i].Id < t.DataNodeInfos[j].Id @@ -82,7 +82,7 @@ func writeRackInfo(writer io.Writer, t *master_pb.RackInfo) statistics { return s } func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo) statistics { - fmt.Fprintf(writer, " DataNode %s volume:%d/%d active:%d free:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount) + fmt.Fprintf(writer, " DataNode %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount) var s statistics sort.Slice(t.VolumeInfos, func(i, j int) bool { return t.VolumeInfos[i].Id < t.VolumeInfos[j].Id diff --git a/weed/shell/command_volume_mount.go b/weed/shell/command_volume_mount.go index 50a307492..21bc342b4 100644 --- a/weed/shell/command_volume_mount.go +++ b/weed/shell/command_volume_mount.go @@ -51,7 +51,7 @@ func (c *commandVolumeMount) Do(args []string, commandEnv *CommandEnv, writer io } func mountVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { - return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, mountErr := volumeServerClient.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{ VolumeId: uint32(volumeId), }) diff --git a/weed/shell/command_volume_move.go b/weed/shell/command_volume_move.go index 08d87c988..2e39c0600 100644 --- a/weed/shell/command_volume_move.go +++ b/weed/shell/command_volume_move.go @@ -25,7 +25,7 @@ func (c *commandVolumeMove) Name() string { } func (c *commandVolumeMove) Help() string { - return ` move a live volume from one volume server to another volume server + return `move a live volume from one volume server to another volume server volume.move @@ -88,7 +88,7 @@ func LiveMoveVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeI func copyVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string) (lastAppendAtNs uint64, err error) { - err = operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err = operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { resp, replicateErr := volumeServerClient.VolumeCopy(ctx, &volume_server_pb.VolumeCopyRequest{ VolumeId: uint32(volumeId), SourceDataNode: sourceVolumeServer, @@ -104,7 +104,7 @@ func copyVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId ne func tailVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, lastAppendAtNs uint64, idleTimeout time.Duration) (err error) { - return operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + return operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, replicateErr := volumeServerClient.VolumeTailReceiver(ctx, &volume_server_pb.VolumeTailReceiverRequest{ VolumeId: uint32(volumeId), SinceNs: lastAppendAtNs, @@ -117,7 +117,7 @@ func tailVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId ne } func deleteVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { - return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, deleteErr := volumeServerClient.VolumeDelete(ctx, &volume_server_pb.VolumeDeleteRequest{ VolumeId: uint32(volumeId), }) diff --git a/weed/shell/command_volume_tier_download.go b/weed/shell/command_volume_tier_download.go new file mode 100644 index 000000000..0f1a1bb6e --- /dev/null +++ b/weed/shell/command_volume_tier_download.go @@ -0,0 +1,167 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func init() { + Commands = append(Commands, &commandVolumeTierDownload{}) +} + +type commandVolumeTierDownload struct { +} + +func (c *commandVolumeTierDownload) Name() string { + return "volume.tier.download" +} + +func (c *commandVolumeTierDownload) Help() string { + return `download the dat file of a volume from a remote tier + + volume.tier.download [-collection=""] + volume.tier.download [-collection=""] -volumeId= + + e.g.: + volume.tier.download -volumeId=7 + volume.tier.download -volumeId=7 + + This command will download the dat file of a volume from a remote tier to a volume server in local cluster. + +` +} + +func (c *commandVolumeTierDownload) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeId := tierCommand.Int("volumeId", 0, "the volume id") + collection := tierCommand.String("collection", "", "the collection name") + if err = tierCommand.Parse(args); err != nil { + return nil + } + + ctx := context.Background() + vid := needle.VolumeId(*volumeId) + + // collect topology information + topologyInfo, err := collectTopologyInfo(ctx, commandEnv) + if err != nil { + return err + } + + // volumeId is provided + if vid != 0 { + return doVolumeTierDownload(ctx, commandEnv, writer, *collection, vid) + } + + // apply to all volumes in the collection + // reusing collectVolumeIdsForEcEncode for now + volumeIds := collectRemoteVolumes(topologyInfo, *collection) + if err != nil { + return err + } + fmt.Printf("tier download volumes: %v\n", volumeIds) + for _, vid := range volumeIds { + if err = doVolumeTierDownload(ctx, commandEnv, writer, *collection, vid); err != nil { + return err + } + } + + return nil +} + +func collectRemoteVolumes(topoInfo *master_pb.TopologyInfo, selectedCollection string) (vids []needle.VolumeId) { + + vidMap := make(map[uint32]bool) + eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + for _, v := range dn.VolumeInfos { + if v.Collection == selectedCollection && v.RemoteStorageKey != "" && v.RemoteStorageName != "" { + vidMap[v.Id] = true + } + } + }) + + for vid := range vidMap { + vids = append(vids, needle.VolumeId(vid)) + } + + return +} + +func doVolumeTierDownload(ctx context.Context, commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId) (err error) { + // find volume location + locations, found := commandEnv.MasterClient.GetLocations(uint32(vid)) + if !found { + return fmt.Errorf("volume %d not found", vid) + } + + // TODO parallelize this + for _, loc := range locations { + // copy the .dat file from remote tier to local + err = downloadDatFromRemoteTier(ctx, commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, loc.Url) + if err != nil { + return fmt.Errorf("download dat file for volume %d to %s: %v", vid, loc.Url, err) + } + } + + return nil +} + +func downloadDatFromRemoteTier(ctx context.Context, grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, targetVolumeServer string) error { + + err := operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { + stream, downloadErr := volumeServerClient.VolumeTierMoveDatFromRemote(ctx, &volume_server_pb.VolumeTierMoveDatFromRemoteRequest{ + VolumeId: uint32(volumeId), + Collection: collection, + }) + + var lastProcessed int64 + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + break + } else { + return recvErr + } + } + + processingSpeed := float64(resp.Processed-lastProcessed) / 1024.0 / 1024.0 + + fmt.Fprintf(writer, "downloaded %.2f%%, %d bytes, %.2fMB/s\n", resp.ProcessedPercentage, resp.Processed, processingSpeed) + + lastProcessed = resp.Processed + } + if downloadErr != nil { + return downloadErr + } + + _, unmountErr := volumeServerClient.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{ + VolumeId: uint32(volumeId), + }) + if unmountErr != nil { + return unmountErr + } + + _, mountErr := volumeServerClient.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{ + VolumeId: uint32(volumeId), + }) + if mountErr != nil { + return mountErr + } + + return nil + }) + + return err + +} diff --git a/weed/shell/command_volume_tier_upload.go b/weed/shell/command_volume_tier_upload.go new file mode 100644 index 000000000..20da1187c --- /dev/null +++ b/weed/shell/command_volume_tier_upload.go @@ -0,0 +1,148 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + "time" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func init() { + Commands = append(Commands, &commandVolumeTierUpload{}) +} + +type commandVolumeTierUpload struct { +} + +func (c *commandVolumeTierUpload) Name() string { + return "volume.tier.upload" +} + +func (c *commandVolumeTierUpload) Help() string { + return `upload the dat file of a volume to a remote tier + + volume.tier.upload [-collection=""] [-fullPercent=95] [-quietFor=1h] + volume.tier.upload [-collection=""] -volumeId= -dest= [-keepLocalDatFile] + + e.g.: + volume.tier.upload -volumeId=7 -dest=s3 + volume.tier.upload -volumeId=7 -dest=s3.default + + The is defined in master.toml. + For example, "s3.default" in [storage.backend.s3.default] + + This command will move the dat file of a volume to a remote tier. + + SeaweedFS enables scalable and fast local access to lots of files, + and the cloud storage is slower by cost efficient. How to combine them together? + + Usually the data follows 80/20 rule: only 20% of data is frequently accessed. + We can offload the old volumes to the cloud. + + With this, SeaweedFS can be both fast and scalable, and infinite storage space. + Just add more local SeaweedFS volume servers to increase the throughput. + + The index file is still local, and the same O(1) disk read is applied to the remote file. + +` +} + +func (c *commandVolumeTierUpload) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeId := tierCommand.Int("volumeId", 0, "the volume id") + collection := tierCommand.String("collection", "", "the collection name") + fullPercentage := tierCommand.Float64("fullPercent", 95, "the volume reaches the percentage of max volume size") + quietPeriod := tierCommand.Duration("quietFor", 24*time.Hour, "select volumes without no writes for this period") + dest := tierCommand.String("dest", "", "the target tier name") + keepLocalDatFile := tierCommand.Bool("keepLocalDatFile", false, "whether keep local dat file") + if err = tierCommand.Parse(args); err != nil { + return nil + } + + ctx := context.Background() + vid := needle.VolumeId(*volumeId) + + // volumeId is provided + if vid != 0 { + return doVolumeTierUpload(ctx, commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile) + } + + // apply to all volumes in the collection + // reusing collectVolumeIdsForEcEncode for now + volumeIds, err := collectVolumeIdsForEcEncode(ctx, commandEnv, *collection, *fullPercentage, *quietPeriod) + if err != nil { + return err + } + fmt.Printf("tier upload volumes: %v\n", volumeIds) + for _, vid := range volumeIds { + if err = doVolumeTierUpload(ctx, commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile); err != nil { + return err + } + } + + return nil +} + +func doVolumeTierUpload(ctx context.Context, commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId, dest string, keepLocalDatFile bool) (err error) { + // find volume location + locations, found := commandEnv.MasterClient.GetLocations(uint32(vid)) + if !found { + return fmt.Errorf("volume %d not found", vid) + } + + err = markVolumeReadonly(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations) + if err != nil { + return fmt.Errorf("mark volume %d as readonly on %s: %v", vid, locations[0].Url, err) + } + + // copy the .dat file to remote tier + err = uploadDatToRemoteTier(ctx, commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, locations[0].Url, dest, keepLocalDatFile) + if err != nil { + return fmt.Errorf("copy dat file for volume %d on %s to %s: %v", vid, locations[0].Url, dest, err) + } + + return nil +} + +func uploadDatToRemoteTier(ctx context.Context, grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, sourceVolumeServer string, dest string, keepLocalDatFile bool) error { + + err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { + stream, copyErr := volumeServerClient.VolumeTierMoveDatToRemote(ctx, &volume_server_pb.VolumeTierMoveDatToRemoteRequest{ + VolumeId: uint32(volumeId), + Collection: collection, + DestinationBackendName: dest, + KeepLocalDatFile: keepLocalDatFile, + }) + + var lastProcessed int64 + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + break + } else { + return recvErr + } + } + + processingSpeed := float64(resp.Processed-lastProcessed) / 1024.0 / 1024.0 + + fmt.Fprintf(writer, "copied %.2f%%, %d bytes, %.2fMB/s\n", resp.ProcessedPercentage, resp.Processed, processingSpeed) + + lastProcessed = resp.Processed + } + + return copyErr + }) + + return err + +} diff --git a/weed/shell/command_volume_unmount.go b/weed/shell/command_volume_unmount.go index 8096f34d8..826258dfb 100644 --- a/weed/shell/command_volume_unmount.go +++ b/weed/shell/command_volume_unmount.go @@ -51,7 +51,7 @@ func (c *commandVolumeUnmount) Do(args []string, commandEnv *CommandEnv, writer } func unmountVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { - return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, unmountErr := volumeServerClient.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{ VolumeId: uint32(volumeId), }) diff --git a/weed/shell/commands.go b/weed/shell/commands.go index b642ec253..f1fcb62d4 100644 --- a/weed/shell/commands.go +++ b/weed/shell/commands.go @@ -9,10 +9,11 @@ import ( "strconv" "strings" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/wdclient" - "google.golang.org/grpc" ) type ShellOptions struct { @@ -69,28 +70,21 @@ func (ce *CommandEnv) checkDirectory(ctx context.Context, filerServer string, fi dir, name := filer2.FullPath(path).DirAndName() - return ce.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + return ce.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { - resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ - Directory: dir, - Prefix: name, - StartFromFileName: name, - InclusiveStartFrom: true, - Limit: 1, + resp, lookupErr := client.LookupDirectoryEntry(ctx, &filer_pb.LookupDirectoryEntryRequest{ + Directory: dir, + Name: name, }) - if listErr != nil { - return listErr + if lookupErr != nil { + return lookupErr } - if len(resp.Entries) == 0 { + if resp.Entry == nil { return fmt.Errorf("entry not found") } - if resp.Entries[0].Name != name { - return fmt.Errorf("not a valid directory, found %s", resp.Entries[0].Name) - } - - if !resp.Entries[0].IsDirectory { + if !resp.Entry.IsDirectory { return fmt.Errorf("not a directory") } diff --git a/weed/storage/backend/backend.go b/weed/storage/backend/backend.go new file mode 100644 index 000000000..6941ca5a1 --- /dev/null +++ b/weed/storage/backend/backend.go @@ -0,0 +1,135 @@ +package backend + +import ( + "io" + "os" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/spf13/viper" +) + +type BackendStorageFile interface { + io.ReaderAt + io.WriterAt + Truncate(off int64) error + io.Closer + GetStat() (datSize int64, modTime time.Time, err error) + Name() string +} + +type BackendStorage interface { + ToProperties() map[string]string + NewStorageFile(key string, tierInfo *volume_server_pb.VolumeInfo) BackendStorageFile + CopyFile(f *os.File, attributes map[string]string, fn func(progressed int64, percentage float32) error) (key string, size int64, err error) + DownloadFile(fileName string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error) + DeleteFile(key string) (err error) +} + +type StringProperties interface { + GetString(key string) string +} +type StorageType string +type BackendStorageFactory interface { + StorageType() StorageType + BuildStorage(configuration StringProperties, configPrefix string, id string) (BackendStorage, error) +} + +var ( + BackendStorageFactories = make(map[StorageType]BackendStorageFactory) + BackendStorages = make(map[string]BackendStorage) +) + +// used by master to load remote storage configurations +func LoadConfiguration(config *viper.Viper) { + + StorageBackendPrefix := "storage.backend" + + for backendTypeName := range config.GetStringMap(StorageBackendPrefix) { + backendStorageFactory, found := BackendStorageFactories[StorageType(backendTypeName)] + if !found { + glog.Fatalf("backend storage type %s not found", backendTypeName) + } + for backendStorageId := range config.GetStringMap(StorageBackendPrefix + "." + backendTypeName) { + if !config.GetBool(StorageBackendPrefix + "." + backendTypeName + "." + backendStorageId + ".enabled") { + continue + } + backendStorage, buildErr := backendStorageFactory.BuildStorage(config, + StorageBackendPrefix+"."+backendTypeName+"."+backendStorageId+".", backendStorageId) + if buildErr != nil { + glog.Fatalf("fail to create backend storage %s.%s", backendTypeName, backendStorageId) + } + BackendStorages[backendTypeName+"."+backendStorageId] = backendStorage + if backendStorageId == "default" { + BackendStorages[backendTypeName] = backendStorage + } + } + } + +} + +// used by volume server to receive remote storage configurations from master +func LoadFromPbStorageBackends(storageBackends []*master_pb.StorageBackend) { + + for _, storageBackend := range storageBackends { + backendStorageFactory, found := BackendStorageFactories[StorageType(storageBackend.Type)] + if !found { + glog.Warningf("storage type %s not found", storageBackend.Type) + continue + } + backendStorage, buildErr := backendStorageFactory.BuildStorage(newProperties(storageBackend.Properties), "", storageBackend.Id) + if buildErr != nil { + glog.Fatalf("fail to create backend storage %s.%s", storageBackend.Type, storageBackend.Id) + } + BackendStorages[storageBackend.Type+"."+storageBackend.Id] = backendStorage + if storageBackend.Id == "default" { + BackendStorages[storageBackend.Type] = backendStorage + } + } +} + +type Properties struct { + m map[string]string +} + +func newProperties(m map[string]string) *Properties { + return &Properties{m: m} +} + +func (p *Properties) GetString(key string) string { + if v, found := p.m[key]; found { + return v + } + return "" +} + +func ToPbStorageBackends() (backends []*master_pb.StorageBackend) { + for sName, s := range BackendStorages { + sType, sId := BackendNameToTypeId(sName) + if sType == "" { + continue + } + backends = append(backends, &master_pb.StorageBackend{ + Type: sType, + Id: sId, + Properties: s.ToProperties(), + }) + } + return +} + +func BackendNameToTypeId(backendName string) (backendType, backendId string) { + parts := strings.Split(backendName, ".") + if len(parts) == 1 { + return backendName, "default" + } + if len(parts) != 2 { + return + } + + backendType, backendId = parts[0], parts[1] + return +} diff --git a/weed/storage/backend/disk_file.go b/weed/storage/backend/disk_file.go new file mode 100644 index 000000000..c4b3caffb --- /dev/null +++ b/weed/storage/backend/disk_file.go @@ -0,0 +1,50 @@ +package backend + +import ( + "os" + "time" +) + +var ( + _ BackendStorageFile = &DiskFile{} +) + +type DiskFile struct { + File *os.File + fullFilePath string +} + +func NewDiskFile(f *os.File) *DiskFile { + return &DiskFile{ + fullFilePath: f.Name(), + File: f, + } +} + +func (df *DiskFile) ReadAt(p []byte, off int64) (n int, err error) { + return df.File.ReadAt(p, off) +} + +func (df *DiskFile) WriteAt(p []byte, off int64) (n int, err error) { + return df.File.WriteAt(p, off) +} + +func (df *DiskFile) Truncate(off int64) error { + return df.File.Truncate(off) +} + +func (df *DiskFile) Close() error { + return df.File.Close() +} + +func (df *DiskFile) GetStat() (datSize int64, modTime time.Time, err error) { + stat, e := df.File.Stat() + if e == nil { + return stat.Size(), stat.ModTime(), nil + } + return 0, time.Time{}, err +} + +func (df *DiskFile) Name() string { + return df.fullFilePath +} diff --git a/weed/storage/backend/memory_map/memory_map.go b/weed/storage/backend/memory_map/memory_map.go index e940fcc0e..5dc7ba33d 100644 --- a/weed/storage/backend/memory_map/memory_map.go +++ b/weed/storage/backend/memory_map/memory_map.go @@ -21,8 +21,6 @@ type MemoryMap struct { End_of_file int64 } -var FileMemoryMap = make(map[string]*MemoryMap) - func ReadMemoryMapMaxSizeMb(memoryMapMaxSizeMbString string) (uint32, error) { if memoryMapMaxSizeMbString == "" { return 0, nil diff --git a/weed/storage/backend/memory_map/memory_map_backend.go b/weed/storage/backend/memory_map/memory_map_backend.go new file mode 100644 index 000000000..03e7308d0 --- /dev/null +++ b/weed/storage/backend/memory_map/memory_map_backend.go @@ -0,0 +1,60 @@ +package memory_map + +import ( + "os" + "time" + + "github.com/chrislusf/seaweedfs/weed/storage/backend" +) + +var ( + _ backend.BackendStorageFile = &MemoryMappedFile{} +) + +type MemoryMappedFile struct { + mm *MemoryMap +} + +func NewMemoryMappedFile(f *os.File, memoryMapSizeMB uint32) *MemoryMappedFile { + mmf := &MemoryMappedFile{ + mm: new(MemoryMap), + } + mmf.mm.CreateMemoryMap(f, 1024*1024*uint64(memoryMapSizeMB)) + return mmf +} + +func (mmf *MemoryMappedFile) ReadAt(p []byte, off int64) (n int, err error) { + readBytes, e := mmf.mm.ReadMemory(uint64(off), uint64(len(p))) + if e != nil { + return 0, e + } + // TODO avoid the extra copy + copy(p, readBytes) + return len(readBytes), nil +} + +func (mmf *MemoryMappedFile) WriteAt(p []byte, off int64) (n int, err error) { + mmf.mm.WriteMemory(uint64(off), uint64(len(p)), p) + return len(p), nil +} + +func (mmf *MemoryMappedFile) Truncate(off int64) error { + return nil +} + +func (mmf *MemoryMappedFile) Close() error { + mmf.mm.DeleteFileAndMemoryMap() + return nil +} + +func (mmf *MemoryMappedFile) GetStat() (datSize int64, modTime time.Time, err error) { + stat, e := mmf.mm.File.Stat() + if e == nil { + return mmf.mm.End_of_file + 1, stat.ModTime(), nil + } + return 0, time.Time{}, err +} + +func (mmf *MemoryMappedFile) Name() string { + return mmf.mm.File.Name() +} diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go new file mode 100644 index 000000000..8d71861c2 --- /dev/null +++ b/weed/storage/backend/s3_backend/s3_backend.go @@ -0,0 +1,177 @@ +package s3_backend + +import ( + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" + "github.com/google/uuid" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/backend" +) + +func init() { + backend.BackendStorageFactories["s3"] = &S3BackendFactory{} +} + +type S3BackendFactory struct { +} + +func (factory *S3BackendFactory) StorageType() backend.StorageType { + return backend.StorageType("s3") +} +func (factory *S3BackendFactory) BuildStorage(configuration backend.StringProperties, configPrefix string, id string) (backend.BackendStorage, error) { + return newS3BackendStorage(configuration, configPrefix, id) +} + +type S3BackendStorage struct { + id string + aws_access_key_id string + aws_secret_access_key string + region string + bucket string + conn s3iface.S3API +} + +func newS3BackendStorage(configuration backend.StringProperties, configPrefix string, id string) (s *S3BackendStorage, err error) { + s = &S3BackendStorage{} + s.id = id + s.aws_access_key_id = configuration.GetString(configPrefix + "aws_access_key_id") + s.aws_secret_access_key = configuration.GetString(configPrefix + "aws_secret_access_key") + s.region = configuration.GetString(configPrefix + "region") + s.bucket = configuration.GetString(configPrefix + "bucket") + s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region) + + glog.V(0).Infof("created backend storage s3.%s for region %s bucket %s", s.id, s.region, s.bucket) + return +} + +func (s *S3BackendStorage) ToProperties() map[string]string { + m := make(map[string]string) + m["aws_access_key_id"] = s.aws_access_key_id + m["aws_secret_access_key"] = s.aws_secret_access_key + m["region"] = s.region + m["bucket"] = s.bucket + return m +} + +func (s *S3BackendStorage) NewStorageFile(key string, tierInfo *volume_server_pb.VolumeInfo) backend.BackendStorageFile { + if strings.HasPrefix(key, "/") { + key = key[1:] + } + + f := &S3BackendStorageFile{ + backendStorage: s, + key: key, + tierInfo: tierInfo, + } + + return f +} + +func (s *S3BackendStorage) CopyFile(f *os.File, attributes map[string]string, fn func(progressed int64, percentage float32) error) (key string, size int64, err error) { + randomUuid, _ := uuid.NewRandom() + key = randomUuid.String() + + glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key) + + size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, attributes, fn) + + return +} + +func (s *S3BackendStorage) DownloadFile(fileName string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error) { + + glog.V(1).Infof("download dat file of %s from remote s3.%s as %s", fileName, s.id, key) + + size, err = downloadFromS3(s.conn, fileName, s.bucket, key, fn) + + return +} + +func (s *S3BackendStorage) DeleteFile(key string) (err error) { + + glog.V(1).Infof("delete dat file %s from remote", key) + + err = deleteFromS3(s.conn, s.bucket, key) + + return +} + +type S3BackendStorageFile struct { + backendStorage *S3BackendStorage + key string + tierInfo *volume_server_pb.VolumeInfo +} + +func (s3backendStorageFile S3BackendStorageFile) ReadAt(p []byte, off int64) (n int, err error) { + + bytesRange := fmt.Sprintf("bytes=%d-%d", off, off+int64(len(p))-1) + + // glog.V(0).Infof("read %s %s", s3backendStorageFile.key, bytesRange) + + getObjectOutput, getObjectErr := s3backendStorageFile.backendStorage.conn.GetObject(&s3.GetObjectInput{ + Bucket: &s3backendStorageFile.backendStorage.bucket, + Key: &s3backendStorageFile.key, + Range: &bytesRange, + }) + + if getObjectErr != nil { + return 0, fmt.Errorf("bucket %s GetObject %s: %v", s3backendStorageFile.backendStorage.bucket, s3backendStorageFile.key, getObjectErr) + } + defer getObjectOutput.Body.Close() + + glog.V(4).Infof("read %s %s", s3backendStorageFile.key, bytesRange) + glog.V(4).Infof("content range: %s, contentLength: %d", *getObjectOutput.ContentRange, *getObjectOutput.ContentLength) + + for { + if n, err = getObjectOutput.Body.Read(p); err == nil && n < len(p) { + p = p[n:] + } else { + break + } + } + + if err == io.EOF { + err = nil + } + + return +} + +func (s3backendStorageFile S3BackendStorageFile) WriteAt(p []byte, off int64) (n int, err error) { + panic("not implemented") +} + +func (s3backendStorageFile S3BackendStorageFile) Truncate(off int64) error { + panic("not implemented") +} + +func (s3backendStorageFile S3BackendStorageFile) Close() error { + return nil +} + +func (s3backendStorageFile S3BackendStorageFile) GetStat() (datSize int64, modTime time.Time, err error) { + + files := s3backendStorageFile.tierInfo.GetFiles() + + if len(files) == 0 { + err = fmt.Errorf("remote file info not found") + return + } + + datSize = int64(files[0].FileSize) + modTime = time.Unix(int64(files[0].ModifiedTime), 0) + + return +} + +func (s3backendStorageFile S3BackendStorageFile) Name() string { + return s3backendStorageFile.key +} diff --git a/weed/storage/backend/s3_backend/s3_download.go b/weed/storage/backend/s3_backend/s3_download.go new file mode 100644 index 000000000..dbc28446a --- /dev/null +++ b/weed/storage/backend/s3_backend/s3_download.go @@ -0,0 +1,98 @@ +package s3_backend + +import ( + "fmt" + "os" + "sync/atomic" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + + "github.com/chrislusf/seaweedfs/weed/glog" +) + +func downloadFromS3(sess s3iface.S3API, destFileName string, sourceBucket string, sourceKey string, + fn func(progressed int64, percentage float32) error) (fileSize int64, err error) { + + fileSize, err = getFileSize(sess, sourceBucket, sourceKey) + if err != nil { + return + } + + //open the file + f, err := os.OpenFile(destFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return 0, fmt.Errorf("failed to open file %q, %v", destFileName, err) + } + defer f.Close() + + // Create a downloader with the session and custom options + downloader := s3manager.NewDownloaderWithClient(sess, func(u *s3manager.Downloader) { + u.PartSize = int64(64 * 1024 * 1024) + u.Concurrency = 5 + }) + + fileWriter := &s3DownloadProgressedWriter{ + fp: f, + size: fileSize, + written: 0, + fn: fn, + } + + // Download the file from S3. + fileSize, err = downloader.Download(fileWriter, &s3.GetObjectInput{ + Bucket: aws.String(sourceBucket), + Key: aws.String(sourceKey), + }) + if err != nil { + return fileSize, fmt.Errorf("failed to download file %s: %v", destFileName, err) + } + + glog.V(1).Infof("downloaded file %s\n", destFileName) + + return +} + +// adapted from https://github.com/aws/aws-sdk-go/pull/1868 +// and https://petersouter.xyz/s3-download-progress-bar-in-golang/ +type s3DownloadProgressedWriter struct { + fp *os.File + size int64 + written int64 + fn func(progressed int64, percentage float32) error +} + +func (w *s3DownloadProgressedWriter) WriteAt(p []byte, off int64) (int, error) { + n, err := w.fp.WriteAt(p, off) + if err != nil { + return n, err + } + + // Got the length have read( or means has uploaded), and you can construct your message + atomic.AddInt64(&w.written, int64(n)) + + if w.fn != nil { + written := w.written + if err := w.fn(written, float32(written*100)/float32(w.size)); err != nil { + return n, err + } + } + + return n, err +} + +func getFileSize(svc s3iface.S3API, bucket string, key string) (filesize int64, error error) { + params := &s3.HeadObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + } + + resp, err := svc.HeadObject(params) + if err != nil { + return 0, err + } + + return *resp.ContentLength, nil +} diff --git a/weed/storage/backend/s3_backend/s3_sessions.go b/weed/storage/backend/s3_backend/s3_sessions.go new file mode 100644 index 000000000..5fdbcb66b --- /dev/null +++ b/weed/storage/backend/s3_backend/s3_sessions.go @@ -0,0 +1,62 @@ +package s3_backend + +import ( + "fmt" + "sync" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +var ( + s3Sessions = make(map[string]s3iface.S3API) + sessionsLock sync.RWMutex +) + +func getSession(region string) (s3iface.S3API, bool) { + sessionsLock.RLock() + defer sessionsLock.RUnlock() + + sess, found := s3Sessions[region] + return sess, found +} + +func createSession(awsAccessKeyId, awsSecretAccessKey, region string) (s3iface.S3API, error) { + + sessionsLock.Lock() + defer sessionsLock.Unlock() + + if t, found := s3Sessions[region]; found { + return t, nil + } + + config := &aws.Config{ + Region: aws.String(region), + } + if awsAccessKeyId != "" && awsSecretAccessKey != "" { + config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "") + } + + sess, err := session.NewSession(config) + if err != nil { + return nil, fmt.Errorf("create aws session in region %s: %v", region, err) + } + + t := s3.New(sess) + + s3Sessions[region] = t + + return t, nil + +} + +func deleteFromS3(sess s3iface.S3API, sourceBucket string, sourceKey string) (err error) { + _, err = sess.DeleteObject(&s3.DeleteObjectInput{ + Bucket: aws.String(sourceBucket), + Key: aws.String(sourceKey), + }) + return err +} diff --git a/weed/storage/backend/s3_backend/s3_upload.go b/weed/storage/backend/s3_backend/s3_upload.go new file mode 100644 index 000000000..500a85590 --- /dev/null +++ b/weed/storage/backend/s3_backend/s3_upload.go @@ -0,0 +1,114 @@ +package s3_backend + +import ( + "fmt" + "os" + "sync/atomic" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3/s3iface" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + + "github.com/chrislusf/seaweedfs/weed/glog" +) + +func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string, + attributes map[string]string, + fn func(progressed int64, percentage float32) error) (fileSize int64, err error) { + + //open the file + f, err := os.Open(filename) + if err != nil { + return 0, fmt.Errorf("failed to open file %q, %v", filename, err) + } + defer f.Close() + + info, err := f.Stat() + if err != nil { + return 0, fmt.Errorf("failed to stat file %q, %v", filename, err) + } + + fileSize = info.Size() + + partSize := int64(64 * 1024 * 1024) // The minimum/default allowed part size is 5MB + for partSize*1000 < fileSize { + partSize *= 4 + } + + // Create an uploader with the session and custom options + uploader := s3manager.NewUploaderWithClient(sess, func(u *s3manager.Uploader) { + u.PartSize = partSize + u.Concurrency = 5 + }) + + fileReader := &s3UploadProgressedReader{ + fp: f, + size: fileSize, + read: -fileSize, + fn: fn, + } + + // process tagging + tags := "" + for k, v := range attributes { + if len(tags) > 0 { + tags = tags + "&" + } + tags = tags + k + "=" + v + } + + // Upload the file to S3. + var result *s3manager.UploadOutput + result, err = uploader.Upload(&s3manager.UploadInput{ + Bucket: aws.String(destBucket), + Key: aws.String(destKey), + Body: fileReader, + ACL: aws.String("private"), + ServerSideEncryption: aws.String("AES256"), + StorageClass: aws.String("STANDARD_IA"), + Tagging: aws.String(tags), + }) + + //in case it fails to upload + if err != nil { + return 0, fmt.Errorf("failed to upload file %s: %v", filename, err) + } + glog.V(1).Infof("file %s uploaded to %s\n", filename, result.Location) + + return +} + +// adapted from https://github.com/aws/aws-sdk-go/pull/1868 +type s3UploadProgressedReader struct { + fp *os.File + size int64 + read int64 + fn func(progressed int64, percentage float32) error +} + +func (r *s3UploadProgressedReader) Read(p []byte) (int, error) { + return r.fp.Read(p) +} + +func (r *s3UploadProgressedReader) ReadAt(p []byte, off int64) (int, error) { + n, err := r.fp.ReadAt(p, off) + if err != nil { + return n, err + } + + // Got the length have read( or means has uploaded), and you can construct your message + atomic.AddInt64(&r.read, int64(n)) + + if r.fn != nil { + read := r.read + if err := r.fn(read, float32(read*100)/float32(r.size)); err != nil { + return n, err + } + } + + return n, err +} + +func (r *s3UploadProgressedReader) Seek(offset int64, whence int) (int64, error) { + return r.fp.Seek(offset, whence) +} diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go index c7faa57a6..a12a68cbc 100644 --- a/weed/storage/disk_location.go +++ b/weed/storage/disk_location.go @@ -1,13 +1,12 @@ package storage import ( + "fmt" "io/ioutil" "os" "strings" "sync" - "fmt" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/needle" @@ -17,7 +16,7 @@ type DiskLocation struct { Directory string MaxVolumeCount int volumes map[needle.VolumeId]*Volume - sync.RWMutex + volumesLock sync.RWMutex // erasure coding ecVolumes map[needle.VolumeId]*erasure_coding.EcVolume @@ -33,8 +32,8 @@ func NewDiskLocation(dir string, maxVolumeCount int) *DiskLocation { func (l *DiskLocation) volumeIdFromPath(dir os.FileInfo) (needle.VolumeId, string, error) { name := dir.Name() - if !dir.IsDir() && strings.HasSuffix(name, ".dat") { - base := name[:len(name)-len(".dat")] + if !dir.IsDir() && strings.HasSuffix(name, ".idx") { + base := name[:len(name)-len(".idx")] collection, volumeId, err := parseCollectionVolumeId(base) return volumeId, collection, err } @@ -53,17 +52,17 @@ func parseCollectionVolumeId(base string) (collection string, vid needle.VolumeI func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind NeedleMapType) { name := fileInfo.Name() - if !fileInfo.IsDir() && strings.HasSuffix(name, ".dat") { + if !fileInfo.IsDir() && strings.HasSuffix(name, ".idx") { vid, collection, err := l.volumeIdFromPath(fileInfo) if err == nil { - l.RLock() + l.volumesLock.RLock() _, found := l.volumes[vid] - l.RUnlock() + l.volumesLock.RUnlock() if !found { if v, e := NewVolume(l.Directory, collection, vid, needleMapKind, nil, nil, 0, 0); e == nil { - l.Lock() + l.volumesLock.Lock() l.volumes[vid] = v - l.Unlock() + l.volumesLock.Unlock() size, _, _ := v.FileStat() glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s", l.Directory+"/"+name, v.ReplicaPlacement, v.Version(), size, v.Ttl.String()) @@ -71,7 +70,6 @@ func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind Ne } else { glog.V(0).Infof("new volume %s error %s", name, e) } - } } } @@ -115,29 +113,46 @@ func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapType) { func (l *DiskLocation) DeleteCollectionFromDiskLocation(collection string) (e error) { - l.Lock() - for k, v := range l.volumes { - if v.Collection == collection { - e = l.deleteVolumeById(k) - if e != nil { - l.Unlock() - return - } - } - } - l.Unlock() + l.volumesLock.Lock() + delVolsMap := l.unmountVolumeByCollection(collection) + l.volumesLock.Unlock() l.ecVolumesLock.Lock() - for k, v := range l.ecVolumes { - if v.Collection == collection { - e = l.deleteEcVolumeById(k) - if e != nil { - l.ecVolumesLock.Unlock() - return + delEcVolsMap := l.unmountEcVolumeByCollection(collection) + l.ecVolumesLock.Unlock() + + errChain := make(chan error, 2) + var wg sync.WaitGroup + wg.Add(2) + go func() { + for _, v := range delVolsMap { + if err := v.Destroy(); err != nil { + errChain <- err } } + wg.Done() + }() + + go func() { + for _, v := range delEcVolsMap { + v.Destroy() + } + wg.Done() + }() + + go func() { + wg.Wait() + close(errChain) + }() + + errBuilder := strings.Builder{} + for err := range errChain { + errBuilder.WriteString(err.Error()) + errBuilder.WriteString("; ") + } + if errBuilder.Len() > 0 { + e = fmt.Errorf(errBuilder.String()) } - l.ecVolumesLock.Unlock() return } @@ -156,22 +171,16 @@ func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId) (e error) { } func (l *DiskLocation) LoadVolume(vid needle.VolumeId, needleMapKind NeedleMapType) bool { - if fileInfos, err := ioutil.ReadDir(l.Directory); err == nil { - for _, fileInfo := range fileInfos { - volId, _, err := l.volumeIdFromPath(fileInfo) - if vid == volId && err == nil { - l.loadExistingVolume(fileInfo, needleMapKind) - return true - } - } + if fileInfo, found := l.LocateVolume(vid); found { + l.loadExistingVolume(fileInfo, needleMapKind) + return true } - return false } func (l *DiskLocation) DeleteVolume(vid needle.VolumeId) error { - l.Lock() - defer l.Unlock() + l.volumesLock.Lock() + defer l.volumesLock.Unlock() _, ok := l.volumes[vid] if !ok { @@ -181,8 +190,8 @@ func (l *DiskLocation) DeleteVolume(vid needle.VolumeId) error { } func (l *DiskLocation) UnloadVolume(vid needle.VolumeId) error { - l.Lock() - defer l.Unlock() + l.volumesLock.Lock() + defer l.volumesLock.Unlock() v, ok := l.volumes[vid] if !ok { @@ -193,34 +202,48 @@ func (l *DiskLocation) UnloadVolume(vid needle.VolumeId) error { return nil } +func (l *DiskLocation) unmountVolumeByCollection(collectionName string) map[needle.VolumeId]*Volume { + deltaVols := make(map[needle.VolumeId]*Volume, 0) + for k, v := range l.volumes { + if v.Collection == collectionName && !v.isCompacting { + deltaVols[k] = v + } + } + + for k := range deltaVols { + delete(l.volumes, k) + } + return deltaVols +} + func (l *DiskLocation) SetVolume(vid needle.VolumeId, volume *Volume) { - l.Lock() - defer l.Unlock() + l.volumesLock.Lock() + defer l.volumesLock.Unlock() l.volumes[vid] = volume } func (l *DiskLocation) FindVolume(vid needle.VolumeId) (*Volume, bool) { - l.RLock() - defer l.RUnlock() + l.volumesLock.RLock() + defer l.volumesLock.RUnlock() v, ok := l.volumes[vid] return v, ok } func (l *DiskLocation) VolumesLen() int { - l.RLock() - defer l.RUnlock() + l.volumesLock.RLock() + defer l.volumesLock.RUnlock() return len(l.volumes) } func (l *DiskLocation) Close() { - l.Lock() + l.volumesLock.Lock() for _, v := range l.volumes { v.Close() } - l.Unlock() + l.volumesLock.Unlock() l.ecVolumesLock.Lock() for _, ecVolume := range l.ecVolumes { @@ -230,3 +253,16 @@ func (l *DiskLocation) Close() { return } + +func (l *DiskLocation) LocateVolume(vid needle.VolumeId) (os.FileInfo, bool) { + if fileInfos, err := ioutil.ReadDir(l.Directory); err == nil { + for _, fileInfo := range fileInfos { + volId, _, err := l.volumeIdFromPath(fileInfo) + if vid == volId && err == nil { + return fileInfo, true + } + } + } + + return nil, false +} diff --git a/weed/storage/disk_location_ec.go b/weed/storage/disk_location_ec.go index ba0824c6d..f6c44e966 100644 --- a/weed/storage/disk_location_ec.go +++ b/weed/storage/disk_location_ec.go @@ -169,3 +169,17 @@ func (l *DiskLocation) deleteEcVolumeById(vid needle.VolumeId) (e error) { delete(l.ecVolumes, vid) return } + +func (l *DiskLocation) unmountEcVolumeByCollection(collectionName string) map[needle.VolumeId]*erasure_coding.EcVolume { + deltaVols := make(map[needle.VolumeId]*erasure_coding.EcVolume, 0) + for k, v := range l.ecVolumes { + if v.Collection == collectionName { + deltaVols[k] = v + } + } + + for k, _ := range deltaVols { + delete(l.ecVolumes, k) + } + return deltaVols +} diff --git a/weed/storage/erasure_coding/ec_decoder.go b/weed/storage/erasure_coding/ec_decoder.go new file mode 100644 index 000000000..ae77cee3f --- /dev/null +++ b/weed/storage/erasure_coding/ec_decoder.go @@ -0,0 +1,198 @@ +package erasure_coding + +import ( + "fmt" + "io" + "os" + + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/idx" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/needle_map" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" + "github.com/chrislusf/seaweedfs/weed/storage/types" +) + +// write .idx file from .ecx and .ecj files +func WriteIdxFileFromEcIndex(baseFileName string) (err error) { + + ecxFile, openErr := os.OpenFile(baseFileName+".ecx", os.O_RDONLY, 0644) + if openErr != nil { + return fmt.Errorf("cannot open ec index %s.ecx: %v", baseFileName, openErr) + } + defer ecxFile.Close() + + idxFile, openErr := os.OpenFile(baseFileName+".idx", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if openErr != nil { + return fmt.Errorf("cannot open %s.idx: %v", baseFileName, openErr) + } + defer idxFile.Close() + + io.Copy(idxFile, ecxFile) + + err = iterateEcjFile(baseFileName, func(key types.NeedleId) error { + + bytes := needle_map.ToBytes(key, types.Offset{}, types.TombstoneFileSize) + idxFile.Write(bytes) + + return nil + }) + + return err +} + +// FindDatFileSize calculate .dat file size from max offset entry +// there may be extra deletions after that entry +// but they are deletions anyway +func FindDatFileSize(baseFileName string) (datSize int64, err error) { + + version, err := readEcVolumeVersion(baseFileName) + if err != nil { + return 0, fmt.Errorf("read ec volume %s version: %v", baseFileName, err) + } + + err = iterateEcxFile(baseFileName, func(key types.NeedleId, offset types.Offset, size uint32) error { + + if size == types.TombstoneFileSize { + return nil + } + + entryStopOffset := offset.ToAcutalOffset() + needle.GetActualSize(size, version) + if datSize < entryStopOffset { + datSize = entryStopOffset + } + + return nil + }) + + return +} + +func readEcVolumeVersion(baseFileName string) (version needle.Version, err error) { + + // find volume version + datFile, err := os.OpenFile(baseFileName+".ec00", os.O_RDONLY, 0644) + if err != nil { + return 0, fmt.Errorf("open ec volume %s superblock: %v", baseFileName, err) + } + datBackend := backend.NewDiskFile(datFile) + + superBlock, err := super_block.ReadSuperBlock(datBackend) + datBackend.Close() + if err != nil { + return 0, fmt.Errorf("read ec volume %s superblock: %v", baseFileName, err) + } + + return superBlock.Version, nil + +} + +func iterateEcxFile(baseFileName string, processNeedleFn func(key types.NeedleId, offset types.Offset, size uint32) error) error { + ecxFile, openErr := os.OpenFile(baseFileName+".ecx", os.O_RDONLY, 0644) + if openErr != nil { + return fmt.Errorf("cannot open ec index %s.ecx: %v", baseFileName, openErr) + } + defer ecxFile.Close() + + buf := make([]byte, types.NeedleMapEntrySize) + for { + n, err := ecxFile.Read(buf) + if n != types.NeedleMapEntrySize { + if err == io.EOF { + return nil + } + return err + } + key, offset, size := idx.IdxFileEntry(buf) + if processNeedleFn != nil { + err = processNeedleFn(key, offset, size) + } + if err != nil { + if err != io.EOF { + return err + } + return nil + } + } + +} + +func iterateEcjFile(baseFileName string, processNeedleFn func(key types.NeedleId) error) error { + ecjFile, openErr := os.OpenFile(baseFileName+".ecj", os.O_RDONLY, 0644) + if openErr != nil { + return fmt.Errorf("cannot open ec index %s.ecx: %v", baseFileName, openErr) + } + defer ecjFile.Close() + + buf := make([]byte, types.NeedleIdSize) + for { + n, err := ecjFile.Read(buf) + if n != types.NeedleIdSize { + if err == io.EOF { + return nil + } + return err + } + if processNeedleFn != nil { + err = processNeedleFn(types.BytesToNeedleId(buf)) + } + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + +} + +// WriteDatFile generates .dat from from .ec00 ~ .ec09 files +func WriteDatFile(baseFileName string, datFileSize int64) error { + + datFile, openErr := os.OpenFile(baseFileName+".dat", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if openErr != nil { + return fmt.Errorf("cannot write volume %s.dat: %v", baseFileName, openErr) + } + defer datFile.Close() + + inputFiles := make([]*os.File, DataShardsCount) + + for shardId := 0; shardId < DataShardsCount; shardId++ { + shardFileName := baseFileName + ToExt(shardId) + inputFiles[shardId], openErr = os.OpenFile(shardFileName, os.O_RDONLY, 0) + if openErr != nil { + return openErr + } + defer inputFiles[shardId].Close() + } + + for datFileSize >= DataShardsCount*ErasureCodingLargeBlockSize { + for shardId := 0; shardId < DataShardsCount; shardId++ { + w, err := io.CopyN(datFile, inputFiles[shardId], ErasureCodingLargeBlockSize) + if w != ErasureCodingLargeBlockSize { + return fmt.Errorf("copy %s large block %d: %v", baseFileName, shardId, err) + } + datFileSize -= ErasureCodingLargeBlockSize + } + } + + for datFileSize > 0 { + for shardId := 0; shardId < DataShardsCount; shardId++ { + toRead := min(datFileSize, ErasureCodingSmallBlockSize) + w, err := io.CopyN(datFile, inputFiles[shardId], toRead) + if w != toRead { + return fmt.Errorf("copy %s small block %d: %v", baseFileName, shardId, err) + } + datFileSize -= toRead + } + } + + return nil +} + +func min(x, y int64) int64 { + if x > y { + return y + } + return x +} diff --git a/weed/storage/erasure_coding/ec_encoder.go b/weed/storage/erasure_coding/ec_encoder.go index 97010a1ed..9e2edf57d 100644 --- a/weed/storage/erasure_coding/ec_encoder.go +++ b/weed/storage/erasure_coding/ec_encoder.go @@ -5,12 +5,13 @@ import ( "io" "os" + "github.com/klauspost/reedsolomon" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/idx" "github.com/chrislusf/seaweedfs/weed/storage/needle_map" "github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/klauspost/reedsolomon" ) const ( @@ -21,35 +22,35 @@ const ( ErasureCodingSmallBlockSize = 1024 * 1024 // 1MB ) -// WriteSortedEcxFile generates .ecx file from existing .idx file +// WriteSortedFileFromIdx generates .ecx file from existing .idx file // all keys are sorted in ascending order -func WriteSortedEcxFile(baseFileName string) (e error) { +func WriteSortedFileFromIdx(baseFileName string, ext string) (e error) { - cm, err := readCompactMap(baseFileName) + nm, err := readNeedleMap(baseFileName) if err != nil { - return fmt.Errorf("readCompactMap: %v", err) + return fmt.Errorf("readNeedleMap: %v", err) } - ecxFile, err := os.OpenFile(baseFileName+".ecx", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + ecxFile, err := os.OpenFile(baseFileName+ext, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { return fmt.Errorf("failed to open ecx file: %v", err) } defer ecxFile.Close() - err = cm.AscendingVisit(func(value needle_map.NeedleValue) error { + err = nm.AscendingVisit(func(value needle_map.NeedleValue) error { bytes := value.ToBytes() _, writeErr := ecxFile.Write(bytes) return writeErr }) if err != nil { - return fmt.Errorf("failed to visit ecx file: %v", err) + return fmt.Errorf("failed to visit idx file: %v", err) } return nil } -// WriteEcFiles generates .ec01 ~ .ec14 files +// WriteEcFiles generates .ec00 ~ .ec13 files func WriteEcFiles(baseFileName string) error { return generateEcFiles(baseFileName, 256*1024, ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize) } @@ -280,14 +281,14 @@ func rebuildEcFiles(shardHasData []bool, inputFiles []*os.File, outputFiles []*o } -func readCompactMap(baseFileName string) (*needle_map.CompactMap, error) { +func readNeedleMap(baseFileName string) (*needle_map.MemDb, error) { indexFile, err := os.OpenFile(baseFileName+".idx", os.O_RDONLY, 0644) if err != nil { return nil, fmt.Errorf("cannot read Volume Index %s.idx: %v", baseFileName, err) } defer indexFile.Close() - cm := needle_map.NewCompactMap() + cm := needle_map.NewMemDb() err = idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error { if !offset.IsZero() && size != types.TombstoneFileSize { cm.Set(key, offset, size) diff --git a/weed/storage/erasure_coding/ec_test.go b/weed/storage/erasure_coding/ec_test.go index 57df09525..b2c94cfd7 100644 --- a/weed/storage/erasure_coding/ec_test.go +++ b/weed/storage/erasure_coding/ec_test.go @@ -26,14 +26,14 @@ func TestEncodingDecoding(t *testing.T) { t.Logf("generateEcFiles: %v", err) } - err = WriteSortedEcxFile(baseFileName) + err = WriteSortedFileFromIdx(baseFileName, ".ecx") if err != nil { - t.Logf("WriteSortedEcxFile: %v", err) + t.Logf("WriteSortedFileFromIdx: %v", err) } err = validateFiles(baseFileName) if err != nil { - t.Logf("WriteSortedEcxFile: %v", err) + t.Logf("WriteSortedFileFromIdx: %v", err) } removeGeneratedFiles(baseFileName) @@ -41,9 +41,9 @@ func TestEncodingDecoding(t *testing.T) { } func validateFiles(baseFileName string) error { - cm, err := readCompactMap(baseFileName) + nm, err := readNeedleMap(baseFileName) if err != nil { - return fmt.Errorf("readCompactMap: %v", err) + return fmt.Errorf("readNeedleMap: %v", err) } datFile, err := os.OpenFile(baseFileName+".dat", os.O_RDONLY, 0) @@ -60,7 +60,7 @@ func validateFiles(baseFileName string) error { ecFiles, err := openEcFiles(baseFileName, true) defer closeEcFiles(ecFiles) - err = cm.AscendingVisit(func(value needle_map.NeedleValue) error { + err = nm.AscendingVisit(func(value needle_map.NeedleValue) error { return assertSame(datFile, fi.Size(), ecFiles, value.Offset, value.Size) }) if err != nil { diff --git a/weed/storage/erasure_coding/ec_volume.go b/weed/storage/erasure_coding/ec_volume.go index bcae164ca..3d9aa2cff 100644 --- a/weed/storage/erasure_coding/ec_volume.go +++ b/weed/storage/erasure_coding/ec_volume.go @@ -9,7 +9,9 @@ import ( "sync" "time" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage/idx" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/types" @@ -56,6 +58,14 @@ func NewEcVolume(dir string, collection string, vid needle.VolumeId) (ev *EcVolu return nil, fmt.Errorf("cannot open ec volume journal %s.ecj: %v", baseFileName, err) } + // read volume info + ev.Version = needle.Version3 + if volumeInfo, found, _ := pb.MaybeLoadVolumeInfo(baseFileName + ".vif"); found { + ev.Version = needle.Version(volumeInfo.Version) + } else { + pb.SaveVolumeInfo(baseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(ev.Version)}) + } + ev.ShardLocations = make(map[ShardId][]string) return @@ -126,6 +136,7 @@ func (ev *EcVolume) Destroy() { } os.Remove(ev.FileName() + ".ecx") os.Remove(ev.FileName() + ".ecj") + os.Remove(ev.FileName() + ".vif") } func (ev *EcVolume) FileName() string { @@ -186,10 +197,10 @@ func (ev *EcVolume) LocateEcShardNeedle(needleId types.NeedleId, version needle. } func (ev *EcVolume) FindNeedleFromEcx(needleId types.NeedleId) (offset types.Offset, size uint32, err error) { - return searchNeedleFromEcx(ev.ecxFile, ev.ecxFileSize, needleId, nil) + return SearchNeedleFromSortedIndex(ev.ecxFile, ev.ecxFileSize, needleId, nil) } -func searchNeedleFromEcx(ecxFile *os.File, ecxFileSize int64, needleId types.NeedleId, processNeedleFn func(file *os.File, offset int64) error) (offset types.Offset, size uint32, err error) { +func SearchNeedleFromSortedIndex(ecxFile *os.File, ecxFileSize int64, needleId types.NeedleId, processNeedleFn func(file *os.File, offset int64) error) (offset types.Offset, size uint32, err error) { var key types.NeedleId buf := make([]byte, types.NeedleMapEntrySize) l, h := int64(0), ecxFileSize/types.NeedleMapEntrySize diff --git a/weed/storage/erasure_coding/ec_volume_delete.go b/weed/storage/erasure_coding/ec_volume_delete.go index 04102ec9e..822a9e923 100644 --- a/weed/storage/erasure_coding/ec_volume_delete.go +++ b/weed/storage/erasure_coding/ec_volume_delete.go @@ -10,15 +10,15 @@ import ( ) var ( - markNeedleDeleted = func(file *os.File, offset int64) error { + MarkNeedleDeleted = func(file *os.File, offset int64) error { b := make([]byte, types.SizeSize) util.Uint32toBytes(b, types.TombstoneFileSize) n, err := file.WriteAt(b, offset+types.NeedleIdSize+types.OffsetSize) if err != nil { - return fmt.Errorf("ecx write error: %v", err) + return fmt.Errorf("sorted needle write error: %v", err) } if n != types.SizeSize { - return fmt.Errorf("ecx written %d bytes, expecting %d", n, types.SizeSize) + return fmt.Errorf("sorted needle written %d bytes, expecting %d", n, types.SizeSize) } return nil } @@ -26,7 +26,7 @@ var ( func (ev *EcVolume) DeleteNeedleFromEcx(needleId types.NeedleId) (err error) { - _, _, err = searchNeedleFromEcx(ev.ecxFile, ev.ecxFileSize, needleId, markNeedleDeleted) + _, _, err = SearchNeedleFromSortedIndex(ev.ecxFile, ev.ecxFileSize, needleId, MarkNeedleDeleted) if err != nil { if err == NotFoundError { @@ -81,7 +81,7 @@ func RebuildEcxFile(baseFileName string) error { needleId := types.BytesToNeedleId(buf) - _, _, err = searchNeedleFromEcx(ecxFile, ecxFileSize, needleId, markNeedleDeleted) + _, _, err = SearchNeedleFromSortedIndex(ecxFile, ecxFileSize, needleId, MarkNeedleDeleted) if err != nil && err != NotFoundError { ecxFile.Close() diff --git a/weed/storage/erasure_coding/ec_volume_info.go b/weed/storage/erasure_coding/ec_volume_info.go index c9e85c662..8ff65bb0f 100644 --- a/weed/storage/erasure_coding/ec_volume_info.go +++ b/weed/storage/erasure_coding/ec_volume_info.go @@ -81,6 +81,15 @@ func (b ShardBits) ShardIds() (ret []ShardId) { return } +func (b ShardBits) ToUint32Slice() (ret []uint32) { + for i := uint32(0); i < TotalShardsCount; i++ { + if b.HasShardId(ShardId(i)) { + ret = append(ret, i) + } + } + return +} + func (b ShardBits) ShardIdCount() (count int) { for count = 0; b > 0; count++ { b &= b - 1 @@ -95,3 +104,10 @@ func (b ShardBits) Minus(other ShardBits) ShardBits { func (b ShardBits) Plus(other ShardBits) ShardBits { return b | other } + +func (b ShardBits) MinusParityShards() ShardBits { + for i := DataShardsCount; i < TotalShardsCount; i++ { + b = b.RemoveShardId(ShardId(i)) + } + return b +} diff --git a/weed/storage/needle/needle.go b/weed/storage/needle/needle.go index 2f03ba87b..494cc138e 100644 --- a/weed/storage/needle/needle.go +++ b/weed/storage/needle/needle.go @@ -3,13 +3,13 @@ package needle import ( "encoding/json" "fmt" + "io" + "io/ioutil" "net/http" "strconv" "strings" "time" - "io/ioutil" - "github.com/chrislusf/seaweedfs/weed/images" . "github.com/chrislusf/seaweedfs/weed/storage/types" ) @@ -50,7 +50,7 @@ func (n *Needle) String() (str string) { return } -func ParseUpload(r *http.Request) ( +func ParseUpload(r *http.Request, sizeLimit int64) ( fileName string, data []byte, mimeType string, pairMap map[string]string, isGzipped bool, originalDataSize int, modifiedTime uint64, ttl *TTL, isChunkedFile bool, e error) { pairMap = make(map[string]string) @@ -61,13 +61,17 @@ func ParseUpload(r *http.Request) ( } if r.Method == "POST" { - fileName, data, mimeType, isGzipped, originalDataSize, isChunkedFile, e = parseMultipart(r) + fileName, data, mimeType, isGzipped, originalDataSize, isChunkedFile, e = parseMultipart(r, sizeLimit) } else { isGzipped = false mimeType = r.Header.Get("Content-Type") fileName = "" - data, e = ioutil.ReadAll(r.Body) + data, e = ioutil.ReadAll(io.LimitReader(r.Body, sizeLimit+1)) originalDataSize = len(data) + if e == io.EOF || int64(originalDataSize) == sizeLimit+1 { + io.Copy(ioutil.Discard, r.Body) + } + r.Body.Close() } if e != nil { return @@ -78,11 +82,11 @@ func ParseUpload(r *http.Request) ( return } -func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool) (n *Needle, originalSize int, e error) { +func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool, sizeLimit int64) (n *Needle, originalSize int, e error) { var pairMap map[string]string fname, mimeType, isGzipped, isChunkedFile := "", "", false, false n = new(Needle) - fname, n.Data, mimeType, pairMap, isGzipped, originalSize, n.LastModified, n.Ttl, isChunkedFile, e = ParseUpload(r) + fname, n.Data, mimeType, pairMap, isGzipped, originalSize, n.LastModified, n.Ttl, isChunkedFile, e = ParseUpload(r, sizeLimit) if e != nil { return } diff --git a/weed/storage/needle/needle_parse_multipart.go b/weed/storage/needle/needle_parse_multipart.go index 8be1a1da4..8c9032f5f 100644 --- a/weed/storage/needle/needle_parse_multipart.go +++ b/weed/storage/needle/needle_parse_multipart.go @@ -1,9 +1,7 @@ package needle import ( - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/util" - + "fmt" "io" "io/ioutil" "mime" @@ -11,9 +9,12 @@ import ( "path" "strconv" "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" ) -func parseMultipart(r *http.Request) ( +func parseMultipart(r *http.Request, sizeLimit int64) ( fileName string, data []byte, mimeType string, isGzipped bool, originalDataSize int, isChunkedFile bool, e error) { defer func() { if e != nil && r.Body != nil { @@ -41,11 +42,15 @@ func parseMultipart(r *http.Request) ( fileName = path.Base(fileName) } - data, e = ioutil.ReadAll(part) + data, e = ioutil.ReadAll(io.LimitReader(part, sizeLimit+1)) if e != nil { glog.V(0).Infoln("Reading Content [ERROR]", e) return } + if len(data) == int(sizeLimit)+1 { + e = fmt.Errorf("file over the limited %d bytes", sizeLimit) + return + } //if the filename is empty string, do a search on the other multi-part items for fileName == "" { @@ -58,12 +63,16 @@ func parseMultipart(r *http.Request) ( //found the first multi-part has filename if fName != "" { - data2, fe2 := ioutil.ReadAll(part2) + data2, fe2 := ioutil.ReadAll(io.LimitReader(part2, sizeLimit+1)) if fe2 != nil { glog.V(0).Infoln("Reading Content [ERROR]", fe2) e = fe2 return } + if len(data) == int(sizeLimit)+1 { + e = fmt.Errorf("file over the limited %d bytes", sizeLimit) + return + } //update data = data2 diff --git a/weed/storage/needle/needle_read_write.go b/weed/storage/needle/needle_read_write.go index 04308a8a7..7f8aa4823 100644 --- a/weed/storage/needle/needle_read_write.go +++ b/weed/storage/needle/needle_read_write.go @@ -4,12 +4,10 @@ import ( "errors" "fmt" "io" - "os" - "math" "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map" + "github.com/chrislusf/seaweedfs/weed/storage/backend" . "github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -127,53 +125,39 @@ func (n *Needle) prepareWriteBuffer(version Version) ([]byte, uint32, int64, err return writeBytes, 0, 0, fmt.Errorf("Unsupported Version! (%d)", version) } -func (n *Needle) Append(w *os.File, version Version) (offset uint64, size uint32, actualSize int64, err error) { +func (n *Needle) Append(w backend.BackendStorageFile, version Version) (offset uint64, size uint32, actualSize int64, err error) { - mMap, exists := memory_map.FileMemoryMap[w.Name()] - if !exists { - if end, e := w.Seek(0, io.SeekEnd); e == nil { - defer func(w *os.File, off int64) { - if err != nil { - if te := w.Truncate(end); te != nil { - glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", w.Name(), end, te) - } + if end, _, e := w.GetStat(); e == nil { + defer func(w backend.BackendStorageFile, off int64) { + if err != nil { + if te := w.Truncate(end); te != nil { + glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", w.Name(), end, te) } - }(w, end) - offset = uint64(end) - } else { - err = fmt.Errorf("Cannot Read Current Volume Position: %v", e) - return - } + } + }(w, end) + offset = uint64(end) } else { - offset = uint64(mMap.End_of_file + 1) + err = fmt.Errorf("Cannot Read Current Volume Position: %v", e) + return } bytesToWrite, size, actualSize, err := n.prepareWriteBuffer(version) if err == nil { - if exists { - mMap.WriteMemory(offset, uint64(len(bytesToWrite)), bytesToWrite) - } else { - _, err = w.WriteAt(bytesToWrite, int64(offset)) - } + _, err = w.WriteAt(bytesToWrite, int64(offset)) } return offset, size, actualSize, err } -func ReadNeedleBlob(r *os.File, offset int64, size uint32, version Version) (dataSlice []byte, err error) { +func ReadNeedleBlob(r backend.BackendStorageFile, offset int64, size uint32, version Version) (dataSlice []byte, err error) { dataSize := GetActualSize(size, version) dataSlice = make([]byte, int(dataSize)) - mMap, exists := memory_map.FileMemoryMap[r.Name()] - if exists { - dataSlice, err := mMap.ReadMemory(uint64(offset), uint64(dataSize)) - return dataSlice, err - } else { - _, err = r.ReadAt(dataSlice, offset) - return dataSlice, err - } + _, err = r.ReadAt(dataSlice, offset) + return dataSlice, err + } // ReadBytes hydrates the needle from the bytes buffer, with only n.Id is set. @@ -207,7 +191,7 @@ func (n *Needle) ReadBytes(bytes []byte, offset int64, size uint32, version Vers } // ReadData hydrates the needle from the file, with only n.Id is set. -func (n *Needle) ReadData(r *os.File, offset int64, size uint32, version Version) (err error) { +func (n *Needle) ReadData(r backend.BackendStorageFile, offset int64, size uint32, version Version) (err error) { bytes, err := ReadNeedleBlob(r, offset, size, version) if err != nil { return err @@ -282,24 +266,17 @@ func (n *Needle) readNeedleDataVersion2(bytes []byte) (err error) { return nil } -func ReadNeedleHeader(r *os.File, version Version, offset int64) (n *Needle, bytes []byte, bodyLength int64, err error) { +func ReadNeedleHeader(r backend.BackendStorageFile, version Version, offset int64) (n *Needle, bytes []byte, bodyLength int64, err error) { n = new(Needle) if version == Version1 || version == Version2 || version == Version3 { bytes = make([]byte, NeedleHeaderSize) - mMap, exists := memory_map.FileMemoryMap[r.Name()] - if exists { - bytes, err = mMap.ReadMemory(uint64(offset), NeedleHeaderSize) - if err != nil { - return nil, bytes, 0, err - } - } else { - var count int - count, err = r.ReadAt(bytes, offset) - if count <= 0 || err != nil { - return nil, bytes, 0, err - } + var count int + count, err = r.ReadAt(bytes, offset) + if count <= 0 || err != nil { + return nil, bytes, 0, err } + n.ParseNeedleHeader(bytes) bodyLength = NeedleBodyLength(n.Size, version) } @@ -324,7 +301,7 @@ func NeedleBodyLength(needleSize uint32, version Version) int64 { //n should be a needle already read the header //the input stream will read until next file entry -func (n *Needle) ReadNeedleBody(r *os.File, version Version, offset int64, bodyLength int64) (bytes []byte, err error) { +func (n *Needle) ReadNeedleBody(r backend.BackendStorageFile, version Version, offset int64, bodyLength int64) (bytes []byte, err error) { if bodyLength <= 0 { return nil, nil diff --git a/weed/storage/needle/needle_read_write_test.go b/weed/storage/needle/needle_read_write_test.go index 4c507f9e6..47582dd26 100644 --- a/weed/storage/needle/needle_read_write_test.go +++ b/weed/storage/needle/needle_read_write_test.go @@ -5,6 +5,7 @@ import ( "os" "testing" + "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/types" ) @@ -54,7 +55,10 @@ func TestAppend(t *testing.T) { os.Remove(tempFile.Name()) }() - offset, _, _, _ := n.Append(tempFile, CurrentVersion) + datBackend := backend.NewDiskFile(tempFile) + defer datBackend.Close() + + offset, _, _, _ := n.Append(datBackend, CurrentVersion) if offset != uint64(fileSize) { t.Errorf("Fail to Append Needle.") } diff --git a/weed/storage/needle/volume_ttl.go b/weed/storage/needle/volume_ttl.go index 4a169870d..179057876 100644 --- a/weed/storage/needle/volume_ttl.go +++ b/weed/storage/needle/volume_ttl.go @@ -69,6 +69,9 @@ func (t *TTL) ToBytes(output []byte) { } func (t *TTL) ToUint32() (output uint32) { + if t == nil || t.Count == 0 { + return 0 + } output = uint32(t.Count) << 8 output += uint32(t.Unit) return output diff --git a/weed/storage/needle_map/btree_map.go b/weed/storage/needle_map/btree_map.go deleted file mode 100644 index a26c5e068..000000000 --- a/weed/storage/needle_map/btree_map.go +++ /dev/null @@ -1,53 +0,0 @@ -package needle_map - -import ( - . "github.com/chrislusf/seaweedfs/weed/storage/types" - "github.com/google/btree" -) - -//This map assumes mostly inserting increasing keys -type BtreeMap struct { - tree *btree.BTree -} - -func NewBtreeMap() *BtreeMap { - return &BtreeMap{ - tree: btree.New(32), - } -} - -func (cm *BtreeMap) Set(key NeedleId, offset Offset, size uint32) (oldOffset Offset, oldSize uint32) { - found := cm.tree.ReplaceOrInsert(NeedleValue{key, offset, size}) - if found != nil { - old := found.(NeedleValue) - return old.Offset, old.Size - } - return -} - -func (cm *BtreeMap) Delete(key NeedleId) (oldSize uint32) { - found := cm.tree.Delete(NeedleValue{key, Offset{}, 0}) - if found != nil { - old := found.(NeedleValue) - return old.Size - } - return -} -func (cm *BtreeMap) Get(key NeedleId) (*NeedleValue, bool) { - found := cm.tree.Get(NeedleValue{key, Offset{}, 0}) - if found != nil { - old := found.(NeedleValue) - return &old, true - } - return nil, false -} - -// Visit visits all entries or stop if any error when visiting -func (cm *BtreeMap) AscendingVisit(visit func(NeedleValue) error) (ret error) { - cm.tree.Ascend(func(item btree.Item) bool { - needle := item.(NeedleValue) - ret = visit(needle) - return ret == nil - }) - return ret -} diff --git a/weed/storage/needle_map/compact_map_test.go b/weed/storage/needle_map/compact_map_test.go index 3bad85727..7eea3969a 100644 --- a/weed/storage/needle_map/compact_map_test.go +++ b/weed/storage/needle_map/compact_map_test.go @@ -8,7 +8,14 @@ import ( func TestOverflow2(t *testing.T) { m := NewCompactMap() - m.Set(NeedleId(150088), ToOffset(8), 3000073) + _, oldSize := m.Set(NeedleId(150088), ToOffset(8), 3000073) + if oldSize != 0 { + t.Fatalf("expecting no previous data") + } + _, oldSize = m.Set(NeedleId(150088), ToOffset(8), 3000073) + if oldSize != 3000073 { + t.Fatalf("expecting previous data size is %d, not %d", 3000073, oldSize) + } m.Set(NeedleId(150073), ToOffset(8), 3000073) m.Set(NeedleId(150089), ToOffset(8), 3000073) m.Set(NeedleId(150076), ToOffset(8), 3000073) diff --git a/weed/storage/needle_map/memdb.go b/weed/storage/needle_map/memdb.go new file mode 100644 index 000000000..9eb4d9f56 --- /dev/null +++ b/weed/storage/needle_map/memdb.go @@ -0,0 +1,115 @@ +package needle_map + +import ( + "fmt" + "os" + + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/idx" + . "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/util" +) + +//This map uses in memory level db +type MemDb struct { + db *leveldb.DB +} + +func NewMemDb() *MemDb { + opts := &opt.Options{} + + var err error + t := &MemDb{} + if t.db, err = leveldb.Open(storage.NewMemStorage(), opts); err != nil { + glog.V(0).Infof("MemDb fails to open: %v", err) + return nil + } + + return t +} + +func (cm *MemDb) Set(key NeedleId, offset Offset, size uint32) error { + + bytes := ToBytes(key, offset, size) + + if err := cm.db.Put(bytes[0:NeedleIdSize], bytes[NeedleIdSize:NeedleIdSize+OffsetSize+SizeSize], nil); err != nil { + return fmt.Errorf("failed to write temp leveldb: %v", err) + } + return nil +} + +func (cm *MemDb) Delete(key NeedleId) error { + bytes := make([]byte, NeedleIdSize) + NeedleIdToBytes(bytes, key) + return cm.db.Delete(bytes, nil) + +} +func (cm *MemDb) Get(key NeedleId) (*NeedleValue, bool) { + bytes := make([]byte, NeedleIdSize) + NeedleIdToBytes(bytes[0:NeedleIdSize], key) + data, err := cm.db.Get(bytes, nil) + if err != nil || len(data) != OffsetSize+SizeSize { + return nil, false + } + offset := BytesToOffset(data[0:OffsetSize]) + size := util.BytesToUint32(data[OffsetSize : OffsetSize+SizeSize]) + return &NeedleValue{Key: key, Offset: offset, Size: size}, true +} + +// Visit visits all entries or stop if any error when visiting +func (cm *MemDb) AscendingVisit(visit func(NeedleValue) error) (ret error) { + iter := cm.db.NewIterator(nil, nil) + for iter.Next() { + key := BytesToNeedleId(iter.Key()) + data := iter.Value() + offset := BytesToOffset(data[0:OffsetSize]) + size := util.BytesToUint32(data[OffsetSize : OffsetSize+SizeSize]) + + needle := NeedleValue{Key: key, Offset: offset, Size: size} + ret = visit(needle) + if ret != nil { + return + } + } + iter.Release() + ret = iter.Error() + + return +} + +func (cm *MemDb) SaveToIdx(idxName string) (ret error) { + idxFile, err := os.OpenFile(idxName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return + } + defer idxFile.Close() + + return cm.AscendingVisit(func(value NeedleValue) error { + if value.Offset.IsZero() || value.Size == TombstoneFileSize { + return nil + } + _, err := idxFile.Write(value.ToBytes()) + return err + }) + +} + +func (cm *MemDb) LoadFromIdx(idxName string) (ret error) { + idxFile, err := os.OpenFile(idxName, os.O_RDONLY, 0644) + if err != nil { + return + } + defer idxFile.Close() + + return idx.WalkIndexFile(idxFile, func(key NeedleId, offset Offset, size uint32) error { + if offset.IsZero() || size == TombstoneFileSize { + return cm.Delete(key) + } + return cm.Set(key, offset, size) + }) + +} diff --git a/weed/storage/needle_map_leveldb.go b/weed/storage/needle_map_leveldb.go index ef8571e83..3bb258559 100644 --- a/weed/storage/needle_map_leveldb.go +++ b/weed/storage/needle_map_leveldb.go @@ -128,8 +128,17 @@ func (m *LevelDbNeedleMap) Delete(key NeedleId, offset Offset) error { } func (m *LevelDbNeedleMap) Close() { - m.indexFile.Close() - m.db.Close() + indexFileName := m.indexFile.Name() + if err := m.indexFile.Sync(); err != nil { + glog.Warningf("sync file %s failed: %v", indexFileName, err) + } + if err := m.indexFile.Close(); err != nil { + glog.Warningf("close index file %s failed: %v", indexFileName, err) + } + + if err := m.db.Close(); err != nil { + glog.Warningf("close levelDB failed: %v", err) + } } func (m *LevelDbNeedleMap) Destroy() error { diff --git a/weed/storage/needle_map_memory.go b/weed/storage/needle_map_memory.go index ee639a7e6..84197912f 100644 --- a/weed/storage/needle_map_memory.go +++ b/weed/storage/needle_map_memory.go @@ -22,24 +22,11 @@ func NewCompactNeedleMap(file *os.File) *NeedleMap { return nm } -func NewBtreeNeedleMap(file *os.File) *NeedleMap { - nm := &NeedleMap{ - m: needle_map.NewBtreeMap(), - } - nm.indexFile = file - return nm -} - func LoadCompactNeedleMap(file *os.File) (*NeedleMap, error) { nm := NewCompactNeedleMap(file) return doLoading(file, nm) } -func LoadBtreeNeedleMap(file *os.File) (*NeedleMap, error) { - nm := NewBtreeNeedleMap(file) - return doLoading(file, nm) -} - func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) { e := idx.WalkIndexFile(file, func(key NeedleId, offset Offset, size uint32) error { nm.MaybeSetMaxFileKey(key) @@ -47,14 +34,12 @@ func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) { nm.FileCounter++ nm.FileByteCounter = nm.FileByteCounter + uint64(size) oldOffset, oldSize := nm.m.Set(NeedleId(key), offset, size) - // glog.V(3).Infoln("reading key", key, "offset", offset*NeedlePaddingSize, "size", size, "oldSize", oldSize) if !oldOffset.IsZero() && oldSize != TombstoneFileSize { nm.DeletionCounter++ nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize) } } else { oldSize := nm.m.Delete(NeedleId(key)) - // glog.V(3).Infoln("removing key", key, "offset", offset*NeedlePaddingSize, "size", size, "oldSize", oldSize) nm.DeletionCounter++ nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize) } @@ -79,6 +64,10 @@ func (nm *NeedleMap) Delete(key NeedleId, offset Offset) error { return nm.appendToIndexFile(key, offset, TombstoneFileSize) } func (nm *NeedleMap) Close() { + indexFileName := nm.indexFile.Name() + if err := nm.indexFile.Sync(); err != nil { + glog.Warningf("sync file %s failed, %v", indexFileName, err) + } _ = nm.indexFile.Close() } func (nm *NeedleMap) Destroy() error { diff --git a/weed/storage/needle_map_metric_test.go b/weed/storage/needle_map_metric_test.go index 539f83a87..ae2177a30 100644 --- a/weed/storage/needle_map_metric_test.go +++ b/weed/storage/needle_map_metric_test.go @@ -1,17 +1,18 @@ package storage import ( - "github.com/chrislusf/seaweedfs/weed/glog" - . "github.com/chrislusf/seaweedfs/weed/storage/types" "io/ioutil" "math/rand" "testing" + + "github.com/chrislusf/seaweedfs/weed/glog" + . "github.com/chrislusf/seaweedfs/weed/storage/types" ) func TestFastLoadingNeedleMapMetrics(t *testing.T) { idxFile, _ := ioutil.TempFile("", "tmp.idx") - nm := NewBtreeNeedleMap(idxFile) + nm := NewCompactNeedleMap(idxFile) for i := 0; i < 10000; i++ { nm.Put(Uint64ToNeedleId(uint64(i+1)), Uint32ToOffset(uint32(0)), uint32(1)) diff --git a/weed/storage/needle_map_sorted_file.go b/weed/storage/needle_map_sorted_file.go new file mode 100644 index 000000000..e6f9258f3 --- /dev/null +++ b/weed/storage/needle_map_sorted_file.go @@ -0,0 +1,105 @@ +package storage + +import ( + "os" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/chrislusf/seaweedfs/weed/storage/needle_map" + . "github.com/chrislusf/seaweedfs/weed/storage/types" +) + +type SortedFileNeedleMap struct { + baseNeedleMapper + baseFileName string + dbFile *os.File + dbFileSize int64 +} + +func NewSortedFileNeedleMap(baseFileName string, indexFile *os.File) (m *SortedFileNeedleMap, err error) { + m = &SortedFileNeedleMap{baseFileName: baseFileName} + m.indexFile = indexFile + fileName := baseFileName + ".sdx" + if !isSortedFileFresh(fileName, indexFile) { + glog.V(0).Infof("Start to Generate %s from %s", fileName, indexFile.Name()) + erasure_coding.WriteSortedFileFromIdx(baseFileName, ".sdx") + glog.V(0).Infof("Finished Generating %s from %s", fileName, indexFile.Name()) + } + glog.V(1).Infof("Opening %s...", fileName) + + if m.dbFile, err = os.Open(baseFileName + ".sdx"); err != nil { + return + } + dbStat, _ := m.dbFile.Stat() + m.dbFileSize = dbStat.Size() + glog.V(1).Infof("Loading %s...", indexFile.Name()) + mm, indexLoadError := newNeedleMapMetricFromIndexFile(indexFile) + if indexLoadError != nil { + return nil, indexLoadError + } + m.mapMetric = *mm + return +} + +func isSortedFileFresh(dbFileName string, indexFile *os.File) bool { + // normally we always write to index file first + dbFile, err := os.Open(dbFileName) + if err != nil { + return false + } + defer dbFile.Close() + dbStat, dbStatErr := dbFile.Stat() + indexStat, indexStatErr := indexFile.Stat() + if dbStatErr != nil || indexStatErr != nil { + glog.V(0).Infof("Can not stat file: %v and %v", dbStatErr, indexStatErr) + return false + } + + return dbStat.ModTime().After(indexStat.ModTime()) +} + +func (m *SortedFileNeedleMap) Get(key NeedleId) (element *needle_map.NeedleValue, ok bool) { + offset, size, err := erasure_coding.SearchNeedleFromSortedIndex(m.dbFile, m.dbFileSize, key, nil) + ok = err == nil + return &needle_map.NeedleValue{Key: key, Offset: offset, Size: size}, ok + +} + +func (m *SortedFileNeedleMap) Put(key NeedleId, offset Offset, size uint32) error { + return os.ErrInvalid +} + +func (m *SortedFileNeedleMap) Delete(key NeedleId, offset Offset) error { + + _, size, err := erasure_coding.SearchNeedleFromSortedIndex(m.dbFile, m.dbFileSize, key, nil) + + if err != nil { + if err == erasure_coding.NotFoundError { + return nil + } + return err + } + + if size == TombstoneFileSize { + return nil + } + + // write to index file first + if err := m.appendToIndexFile(key, offset, TombstoneFileSize); err != nil { + return err + } + _, _, err = erasure_coding.SearchNeedleFromSortedIndex(m.dbFile, m.dbFileSize, key, erasure_coding.MarkNeedleDeleted) + + return err +} + +func (m *SortedFileNeedleMap) Close() { + m.indexFile.Close() + m.dbFile.Close() +} + +func (m *SortedFileNeedleMap) Destroy() error { + m.Close() + os.Remove(m.indexFile.Name()) + return os.Remove(m.baseFileName + ".sdx") +} diff --git a/weed/storage/store.go b/weed/storage/store.go index 66dd021ff..2d02e2f80 100644 --- a/weed/storage/store.go +++ b/weed/storage/store.go @@ -2,14 +2,19 @@ package storage import ( "fmt" + "path/filepath" + "strings" "sync/atomic" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" . "github.com/chrislusf/seaweedfs/weed/storage/types" - "google.golang.org/grpc" ) const ( @@ -60,7 +65,7 @@ func NewStore(grpcDialOption grpc.DialOption, port int, ip, publicUrl string, di return } func (s *Store) AddVolume(volumeId needle.VolumeId, collection string, needleMapKind NeedleMapType, replicaPlacement string, ttlString string, preallocate int64, MemoryMapMaxSizeMb uint32) error { - rt, e := NewReplicaPlacementFromString(replicaPlacement) + rt, e := super_block.NewReplicaPlacementFromString(replicaPlacement) if e != nil { return e } @@ -101,14 +106,14 @@ func (s *Store) FindFreeLocation() (ret *DiskLocation) { } return ret } -func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind NeedleMapType, replicaPlacement *ReplicaPlacement, ttl *needle.TTL, preallocate int64, MemoryMapMaxSizeMb uint32) error { +func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind NeedleMapType, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32) error { if s.findVolume(vid) != nil { return fmt.Errorf("Volume Id %d already exists!", vid) } if location := s.FindFreeLocation(); location != nil { glog.V(0).Infof("In dir %s adds volume:%v collection:%s replicaPlacement:%v ttl:%v", location.Directory, vid, collection, replicaPlacement, ttl) - if volume, err := NewVolume(location.Directory, collection, vid, needleMapKind, replicaPlacement, ttl, preallocate, MemoryMapMaxSizeMb); err == nil { + if volume, err := NewVolume(location.Directory, collection, vid, needleMapKind, replicaPlacement, ttl, preallocate, memoryMapMaxSizeMb); err == nil { location.SetVolume(vid, volume) glog.V(0).Infof("add volume %d", vid) s.NewVolumesChan <- master_pb.VolumeShortInformationMessage{ @@ -126,10 +131,10 @@ func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind return fmt.Errorf("No more free space left") } -func (s *Store) Status() []*VolumeInfo { +func (s *Store) VolumeInfos() []*VolumeInfo { var stats []*VolumeInfo for _, location := range s.Locations { - location.RLock() + location.volumesLock.RLock() for k, v := range location.volumes { s := &VolumeInfo{ Id: needle.VolumeId(k), @@ -140,13 +145,14 @@ func (s *Store) Status() []*VolumeInfo { FileCount: int(v.FileCount()), DeleteCount: int(v.DeletedCount()), DeletedByteCount: v.DeletedSize(), - ReadOnly: v.readOnly, + ReadOnly: v.noWriteOrDelete || v.noWriteCanDelete, Ttl: v.Ttl, CompactRevision: uint32(v.CompactionRevision), } + s.RemoteStorageName, s.RemoteStorageKey = v.RemoteStorageNameKey() stats = append(stats, s) } - location.RUnlock() + location.volumesLock.RUnlock() } sortVolumeInfos(stats) return stats @@ -165,8 +171,9 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat { var maxFileKey NeedleId collectionVolumeSize := make(map[string]uint64) for _, location := range s.Locations { + var deleteVids []needle.VolumeId maxVolumeCount = maxVolumeCount + location.MaxVolumeCount - location.Lock() + location.volumesLock.RLock() for _, v := range location.volumes { if maxFileKey < v.MaxFileKey() { maxFileKey = v.MaxFileKey() @@ -175,8 +182,7 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat { volumeMessages = append(volumeMessages, v.ToVolumeInformationMessage()) } else { if v.expiredLongEnough(MAX_TTL_VOLUME_REMOVAL_DELAY) { - location.deleteVolumeById(v.Id) - glog.V(0).Infoln("volume", v.Id, "is deleted.") + deleteVids = append(deleteVids, v.Id) } else { glog.V(0).Infoln("volume", v.Id, "is expired.") } @@ -184,7 +190,17 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat { fileSize, _, _ := v.FileStat() collectionVolumeSize[v.Collection] += fileSize } - location.Unlock() + location.volumesLock.RUnlock() + + if len(deleteVids) > 0 { + // delete expired volumes. + location.volumesLock.Lock() + for _, vid := range deleteVids { + location.deleteVolumeById(vid) + glog.V(0).Infoln("volume", vid, "is deleted.") + } + location.volumesLock.Unlock() + } } for col, size := range collectionVolumeSize { @@ -213,11 +229,11 @@ func (s *Store) Close() { func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (size uint32, isUnchanged bool, err error) { if v := s.findVolume(i); v != nil { - if v.readOnly { + if v.noWriteOrDelete || v.noWriteCanDelete { err = fmt.Errorf("volume %d is read only", i) return } - if MaxPossibleVolumeSize >= v.ContentSize()+uint64(needle.GetActualSize(size, v.version)) { + if MaxPossibleVolumeSize >= v.ContentSize()+uint64(needle.GetActualSize(size, v.Version())) { _, size, isUnchanged, err = v.writeNeedle(n) } else { err = fmt.Errorf("volume size limit %d exceeded! current size is %d", s.GetVolumeSizeLimit(), v.ContentSize()) @@ -231,10 +247,10 @@ func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (size uin func (s *Store) DeleteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (uint32, error) { if v := s.findVolume(i); v != nil { - if v.readOnly { + if v.noWriteOrDelete { return 0, fmt.Errorf("volume %d is read only", i) } - if MaxPossibleVolumeSize >= v.ContentSize()+uint64(needle.GetActualSize(0, v.version)) { + if MaxPossibleVolumeSize >= v.ContentSize()+uint64(needle.GetActualSize(0, v.Version())) { return v.deleteNeedle(n) } else { return 0, fmt.Errorf("volume size limit %d exceeded! current size is %d", s.GetVolumeSizeLimit(), v.ContentSize()) @@ -263,7 +279,7 @@ func (s *Store) MarkVolumeReadonly(i needle.VolumeId) error { if v == nil { return fmt.Errorf("volume %d not found", i) } - v.readOnly = true + v.noWriteOrDelete = true return nil } @@ -333,6 +349,31 @@ func (s *Store) DeleteVolume(i needle.VolumeId) error { return fmt.Errorf("volume %d not found on disk", i) } +func (s *Store) ConfigureVolume(i needle.VolumeId, replication string) error { + + for _, location := range s.Locations { + fileInfo, found := location.LocateVolume(i) + if !found { + continue + } + // load, modify, save + baseFileName := strings.TrimSuffix(fileInfo.Name(), filepath.Ext(fileInfo.Name())) + vifFile := filepath.Join(location.Directory, baseFileName + ".vif") + volumeInfo, _, err := pb.MaybeLoadVolumeInfo(vifFile) + if err != nil { + return fmt.Errorf("volume %d fail to load vif", i) + } + volumeInfo.Replication = replication + err = pb.SaveVolumeInfo(vifFile, volumeInfo) + if err != nil { + return fmt.Errorf("volume %d fail to save vif", i) + } + return nil + } + + return fmt.Errorf("volume %d not found on disk", i) +} + func (s *Store) SetVolumeSizeLimit(x uint64) { atomic.StoreUint64(&s.volumeSizeLimit, x) } diff --git a/weed/storage/store_ec.go b/weed/storage/store_ec.go index 8271324cf..47e061d05 100644 --- a/weed/storage/store_ec.go +++ b/weed/storage/store_ec.go @@ -8,6 +8,8 @@ import ( "sync" "time" + "github.com/klauspost/reedsolomon" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" @@ -16,7 +18,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/types" - "github.com/klauspost/reedsolomon" ) func (s *Store) CollectErasureCodingHeartbeat() *master_pb.Heartbeat { @@ -119,15 +120,7 @@ func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *n for _, location := range s.Locations { if localEcVolume, found := location.FindEcVolume(vid); found { - // read the volume version - for localEcVolume.Version == 0 { - err := s.readEcVolumeVersion(ctx, vid, localEcVolume) - time.Sleep(1357 * time.Millisecond) - glog.V(0).Infof("ReadEcShardNeedle vid %d version:%v: %v", vid, localEcVolume.Version, err) - } - version := localEcVolume.Version - - offset, size, intervals, err := localEcVolume.LocateEcShardNeedle(n.Id, version) + offset, size, intervals, err := localEcVolume.LocateEcShardNeedle(n.Id, localEcVolume.Version) if err != nil { return 0, fmt.Errorf("locate in local ec volume: %v", err) } @@ -148,7 +141,7 @@ func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *n return 0, fmt.Errorf("ec entry %s is deleted", n.Id) } - err = n.ReadBytes(bytes, offset.ToAcutalOffset(), size, version) + err = n.ReadBytes(bytes, offset.ToAcutalOffset(), size, localEcVolume.Version) if err != nil { return 0, fmt.Errorf("readbytes: %v", err) } @@ -159,22 +152,6 @@ func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *n return 0, fmt.Errorf("ec shard %d not found", vid) } -func (s *Store) readEcVolumeVersion(ctx context.Context, vid needle.VolumeId, ecVolume *erasure_coding.EcVolume) (err error) { - - interval := erasure_coding.Interval{ - BlockIndex: 0, - InnerBlockOffset: 0, - Size: _SuperBlockSize, - IsLargeBlock: true, // it could be large block, but ok in this place - LargeBlockRowsCount: 0, - } - data, _, err := s.readEcShardIntervals(ctx, vid, 0, ecVolume, []erasure_coding.Interval{interval}) - if err == nil { - ecVolume.Version = needle.Version(data[0]) - } - return -} - func (s *Store) readEcShardIntervals(ctx context.Context, vid needle.VolumeId, needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, intervals []erasure_coding.Interval) (data []byte, is_deleted bool, err error) { if err = s.cachedLookupEcShardLocations(ctx, ecVolume); err != nil { @@ -253,7 +230,7 @@ func (s *Store) cachedLookupEcShardLocations(ctx context.Context, ecVolume *eras glog.V(3).Infof("lookup and cache ec volume %d locations", ecVolume.VolumeId) - err = operation.WithMasterServerClient(s.MasterAddress, s.grpcDialOption, func(masterClient master_pb.SeaweedClient) error { + err = operation.WithMasterServerClient(s.MasterAddress, s.grpcDialOption, func(ctx context.Context, masterClient master_pb.SeaweedClient) error { req := &master_pb.LookupEcVolumeRequest{ VolumeId: uint32(ecVolume.VolumeId), } @@ -288,7 +265,7 @@ func (s *Store) readRemoteEcShardInterval(ctx context.Context, sourceDataNodes [ } for _, sourceDataNode := range sourceDataNodes { - glog.V(4).Infof("read remote ec shard %d.%d from %s", vid, shardId, sourceDataNode) + glog.V(3).Infof("read remote ec shard %d.%d from %s", vid, shardId, sourceDataNode) n, is_deleted, err = s.doReadRemoteEcShardInterval(ctx, sourceDataNode, needleId, vid, shardId, buf, offset) if err == nil { return @@ -301,7 +278,7 @@ func (s *Store) readRemoteEcShardInterval(ctx context.Context, sourceDataNodes [ func (s *Store) doReadRemoteEcShardInterval(ctx context.Context, sourceDataNode string, needleId types.NeedleId, vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { - err = operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + err = operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { // copy data slice shardReadClient, err := client.VolumeEcShardRead(ctx, &volume_server_pb.VolumeEcShardReadRequest{ @@ -340,7 +317,7 @@ func (s *Store) doReadRemoteEcShardInterval(ctx context.Context, sourceDataNode } func (s *Store) recoverOneRemoteEcShardInterval(ctx context.Context, needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, shardIdToRecover erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { - glog.V(4).Infof("recover ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover) + glog.V(3).Infof("recover ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover) enc, err := reedsolomon.New(erasure_coding.DataShardsCount, erasure_coding.ParityShardsCount) if err != nil { diff --git a/weed/storage/store_ec_delete.go b/weed/storage/store_ec_delete.go index e027d2887..2ac907f6c 100644 --- a/weed/storage/store_ec_delete.go +++ b/weed/storage/store_ec_delete.go @@ -87,7 +87,7 @@ func (s *Store) doDeleteNeedleFromRemoteEcShardServers(ctx context.Context, shar func (s *Store) doDeleteNeedleFromRemoteEcShard(ctx context.Context, sourceDataNode string, vid needle.VolumeId, collection string, version needle.Version, needleId types.NeedleId) error { - return operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + return operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { // copy data slice _, err := client.VolumeEcBlobDelete(ctx, &volume_server_pb.VolumeEcBlobDeleteRequest{ diff --git a/weed/storage/store_vacuum.go b/weed/storage/store_vacuum.go index b1f1a6277..e94d9b516 100644 --- a/weed/storage/store_vacuum.go +++ b/weed/storage/store_vacuum.go @@ -16,7 +16,8 @@ func (s *Store) CheckCompactVolume(volumeId needle.VolumeId) (float64, error) { } func (s *Store) CompactVolume(vid needle.VolumeId, preallocate int64, compactionBytePerSecond int64) error { if v := s.findVolume(vid); v != nil { - return v.Compact(preallocate, compactionBytePerSecond) + return v.Compact2(preallocate) // compactionBytePerSecond + // return v.Compact(preallocate, compactionBytePerSecond) } return fmt.Errorf("volume id %d is not found during compact", vid) } diff --git a/weed/storage/replica_placement.go b/weed/storage/super_block/replica_placement.go similarity index 98% rename from weed/storage/replica_placement.go rename to weed/storage/super_block/replica_placement.go index c1aca52eb..fcccbba7d 100644 --- a/weed/storage/replica_placement.go +++ b/weed/storage/super_block/replica_placement.go @@ -1,4 +1,4 @@ -package storage +package super_block import ( "errors" diff --git a/weed/storage/replica_placement_test.go b/weed/storage/super_block/replica_placement_test.go similarity index 93% rename from weed/storage/replica_placement_test.go rename to weed/storage/super_block/replica_placement_test.go index 7968af7cb..7742ba548 100644 --- a/weed/storage/replica_placement_test.go +++ b/weed/storage/super_block/replica_placement_test.go @@ -1,4 +1,4 @@ -package storage +package super_block import ( "testing" diff --git a/weed/storage/super_block/super_block.go b/weed/storage/super_block/super_block.go new file mode 100644 index 000000000..f48cd0bdc --- /dev/null +++ b/weed/storage/super_block/super_block.go @@ -0,0 +1,69 @@ +package super_block + +import ( + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" +) + +const ( + SuperBlockSize = 8 +) + +/* +* Super block currently has 8 bytes allocated for each volume. +* Byte 0: version, 1 or 2 +* Byte 1: Replica Placement strategy, 000, 001, 002, 010, etc +* Byte 2 and byte 3: Time to live. See TTL for definition +* Byte 4 and byte 5: The number of times the volume has been compacted. +* Rest bytes: Reserved + */ +type SuperBlock struct { + Version needle.Version + ReplicaPlacement *ReplicaPlacement + Ttl *needle.TTL + CompactionRevision uint16 + Extra *master_pb.SuperBlockExtra + ExtraSize uint16 +} + +func (s *SuperBlock) BlockSize() int { + switch s.Version { + case needle.Version2, needle.Version3: + return SuperBlockSize + int(s.ExtraSize) + } + return SuperBlockSize +} + +func (s *SuperBlock) Bytes() []byte { + header := make([]byte, SuperBlockSize) + header[0] = byte(s.Version) + header[1] = s.ReplicaPlacement.Byte() + s.Ttl.ToBytes(header[2:4]) + util.Uint16toBytes(header[4:6], s.CompactionRevision) + + if s.Extra != nil { + extraData, err := proto.Marshal(s.Extra) + if err != nil { + glog.Fatalf("cannot marshal super block extra %+v: %v", s.Extra, err) + } + extraSize := len(extraData) + if extraSize > 256*256-2 { + // reserve a couple of bits for future extension + glog.Fatalf("super block extra size is %d bigger than %d", extraSize, 256*256-2) + } + s.ExtraSize = uint16(extraSize) + util.Uint16toBytes(header[6:8], s.ExtraSize) + + header = append(header, extraData...) + } + + return header +} + +func (s *SuperBlock) Initialized() bool { + return s.ReplicaPlacement != nil && s.Ttl != nil +} diff --git a/weed/storage/super_block/super_block_read.go.go b/weed/storage/super_block/super_block_read.go.go new file mode 100644 index 000000000..9eb12e116 --- /dev/null +++ b/weed/storage/super_block/super_block_read.go.go @@ -0,0 +1,44 @@ +package super_block + +import ( + "fmt" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" +) + +// ReadSuperBlock reads from data file and load it into volume's super block +func ReadSuperBlock(datBackend backend.BackendStorageFile) (superBlock SuperBlock, err error) { + + header := make([]byte, SuperBlockSize) + if _, e := datBackend.ReadAt(header, 0); e != nil { + err = fmt.Errorf("cannot read volume %s super block: %v", datBackend.Name(), e) + return + } + + superBlock.Version = needle.Version(header[0]) + if superBlock.ReplicaPlacement, err = NewReplicaPlacementFromByte(header[1]); err != nil { + err = fmt.Errorf("cannot read replica type: %s", err.Error()) + return + } + superBlock.Ttl = needle.LoadTTLFromBytes(header[2:4]) + superBlock.CompactionRevision = util.BytesToUint16(header[4:6]) + superBlock.ExtraSize = util.BytesToUint16(header[6:8]) + + if superBlock.ExtraSize > 0 { + // read more + extraData := make([]byte, int(superBlock.ExtraSize)) + superBlock.Extra = &master_pb.SuperBlockExtra{} + err = proto.Unmarshal(extraData, superBlock.Extra) + if err != nil { + err = fmt.Errorf("cannot read volume %s super block extra: %v", datBackend.Name(), err) + return + } + } + + return +} diff --git a/weed/storage/volume_super_block_test.go b/weed/storage/super_block/super_block_test.go similarity index 86% rename from weed/storage/volume_super_block_test.go rename to weed/storage/super_block/super_block_test.go index 06ad8a5d3..25699070d 100644 --- a/weed/storage/volume_super_block_test.go +++ b/weed/storage/super_block/super_block_test.go @@ -1,4 +1,4 @@ -package storage +package super_block import ( "testing" @@ -10,7 +10,7 @@ func TestSuperBlockReadWrite(t *testing.T) { rp, _ := NewReplicaPlacementFromByte(byte(001)) ttl, _ := needle.ReadTTL("15d") s := &SuperBlock{ - version: needle.CurrentVersion, + Version: needle.CurrentVersion, ReplicaPlacement: rp, Ttl: ttl, } diff --git a/weed/storage/volume.go b/weed/storage/volume.go index 6084b4df0..acede66bf 100644 --- a/weed/storage/volume.go +++ b/weed/storage/volume.go @@ -2,18 +2,19 @@ package storage import ( "fmt" - - "github.com/chrislusf/seaweedfs/weed/pb/master_pb" - "github.com/chrislusf/seaweedfs/weed/stats" - "github.com/chrislusf/seaweedfs/weed/storage/needle" - "github.com/chrislusf/seaweedfs/weed/storage/types" - - "os" "path" "strconv" "sync" "time" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" + "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/glog" ) @@ -21,15 +22,17 @@ type Volume struct { Id needle.VolumeId dir string Collection string - dataFile *os.File + DataBackend backend.BackendStorageFile nm NeedleMapper needleMapKind NeedleMapType - readOnly bool + noWriteOrDelete bool // if readonly, either noWriteOrDelete or noWriteCanDelete + noWriteCanDelete bool // if readonly, either noWriteOrDelete or noWriteCanDelete + hasRemoteFile bool // if the volume has a remote file MemoryMapMaxSizeMb uint32 - SuperBlock + super_block.SuperBlock - dataFileAccessLock sync.Mutex + dataFileAccessLock sync.RWMutex lastModifiedTsSeconds uint64 //unix time in seconds lastAppendAtNs uint64 //unix time in nanoseconds @@ -37,18 +40,20 @@ type Volume struct { lastCompactRevision uint16 isCompacting bool + + volumeInfo *volume_server_pb.VolumeInfo } -func NewVolume(dirname string, collection string, id needle.VolumeId, needleMapKind NeedleMapType, replicaPlacement *ReplicaPlacement, ttl *needle.TTL, preallocate int64, MemoryMapMaxSizeMb uint32) (v *Volume, e error) { +func NewVolume(dirname string, collection string, id needle.VolumeId, needleMapKind NeedleMapType, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32) (v *Volume, e error) { // if replicaPlacement is nil, the superblock will be loaded from disk - v = &Volume{dir: dirname, Collection: collection, Id: id, MemoryMapMaxSizeMb: MemoryMapMaxSizeMb} - v.SuperBlock = SuperBlock{ReplicaPlacement: replicaPlacement, Ttl: ttl} + v = &Volume{dir: dirname, Collection: collection, Id: id, MemoryMapMaxSizeMb: memoryMapMaxSizeMb} + v.SuperBlock = super_block.SuperBlock{ReplicaPlacement: replicaPlacement, Ttl: ttl} v.needleMapKind = needleMapKind e = v.load(true, true, needleMapKind, preallocate) return } func (v *Volume) String() string { - return fmt.Sprintf("Id:%v, dir:%s, Collection:%s, dataFile:%v, nm:%v, readOnly:%v", v.Id, v.dir, v.Collection, v.dataFile, v.nm, v.readOnly) + return fmt.Sprintf("Id:%v, dir:%s, Collection:%s, dataFile:%v, nm:%v, noWrite:%v canDelete:%v", v.Id, v.dir, v.Collection, v.DataBackend, v.nm, v.noWriteOrDelete || v.noWriteCanDelete, v.noWriteCanDelete) } func VolumeFileName(dir string, collection string, id int) (fileName string) { @@ -63,33 +68,33 @@ func VolumeFileName(dir string, collection string, id int) (fileName string) { func (v *Volume) FileName() (fileName string) { return VolumeFileName(v.dir, v.Collection, int(v.Id)) } -func (v *Volume) DataFile() *os.File { - return v.dataFile -} func (v *Volume) Version() needle.Version { - return v.SuperBlock.Version() + if v.volumeInfo.Version != 0 { + v.SuperBlock.Version = needle.Version(v.volumeInfo.Version) + } + return v.SuperBlock.Version } func (v *Volume) FileStat() (datSize uint64, idxSize uint64, modTime time.Time) { - v.dataFileAccessLock.Lock() - defer v.dataFileAccessLock.Unlock() + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() - if v.dataFile == nil { + if v.DataBackend == nil { return } - stat, e := v.dataFile.Stat() + datFileSize, modTime, e := v.DataBackend.GetStat() if e == nil { - return uint64(stat.Size()), v.nm.IndexFileSize(), stat.ModTime() + return uint64(datFileSize), v.nm.IndexFileSize(), modTime } - glog.V(0).Infof("Failed to read file size %s %v", v.dataFile.Name(), e) + glog.V(0).Infof("Failed to read file size %s %v", v.DataBackend.Name(), e) return // -1 causes integer overflow and the volume to become unwritable. } func (v *Volume) ContentSize() uint64 { - v.dataFileAccessLock.Lock() - defer v.dataFileAccessLock.Unlock() + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() if v.nm == nil { return 0 } @@ -97,8 +102,8 @@ func (v *Volume) ContentSize() uint64 { } func (v *Volume) DeletedSize() uint64 { - v.dataFileAccessLock.Lock() - defer v.dataFileAccessLock.Unlock() + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() if v.nm == nil { return 0 } @@ -106,8 +111,8 @@ func (v *Volume) DeletedSize() uint64 { } func (v *Volume) FileCount() uint64 { - v.dataFileAccessLock.Lock() - defer v.dataFileAccessLock.Unlock() + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() if v.nm == nil { return 0 } @@ -115,8 +120,8 @@ func (v *Volume) FileCount() uint64 { } func (v *Volume) DeletedCount() uint64 { - v.dataFileAccessLock.Lock() - defer v.dataFileAccessLock.Unlock() + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() if v.nm == nil { return 0 } @@ -124,8 +129,8 @@ func (v *Volume) DeletedCount() uint64 { } func (v *Volume) MaxFileKey() types.NeedleId { - v.dataFileAccessLock.Lock() - defer v.dataFileAccessLock.Unlock() + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() if v.nm == nil { return 0 } @@ -133,8 +138,8 @@ func (v *Volume) MaxFileKey() types.NeedleId { } func (v *Volume) IndexFileSize() uint64 { - v.dataFileAccessLock.Lock() - defer v.dataFileAccessLock.Unlock() + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() if v.nm == nil { return 0 } @@ -149,9 +154,9 @@ func (v *Volume) Close() { v.nm.Close() v.nm = nil } - if v.dataFile != nil { - _ = v.dataFile.Close() - v.dataFile = nil + if v.DataBackend != nil { + _ = v.DataBackend.Close() + v.DataBackend = nil stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "volume").Dec() } } @@ -203,18 +208,32 @@ func (v *Volume) expiredLongEnough(maxDelayMinutes uint32) bool { func (v *Volume) ToVolumeInformationMessage() *master_pb.VolumeInformationMessage { size, _, modTime := v.FileStat() - return &master_pb.VolumeInformationMessage{ + volumInfo := &master_pb.VolumeInformationMessage{ Id: uint32(v.Id), Size: size, Collection: v.Collection, - FileCount: uint64(v.FileCount()), - DeleteCount: uint64(v.DeletedCount()), + FileCount: v.FileCount(), + DeleteCount: v.DeletedCount(), DeletedByteCount: v.DeletedSize(), - ReadOnly: v.readOnly, + ReadOnly: v.noWriteOrDelete, ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()), Version: uint32(v.Version()), Ttl: v.Ttl.ToUint32(), CompactRevision: uint32(v.SuperBlock.CompactionRevision), ModifiedAtSecond: modTime.Unix(), } + + volumInfo.RemoteStorageName, volumInfo.RemoteStorageKey = v.RemoteStorageNameKey() + + return volumInfo +} + +func (v *Volume) RemoteStorageNameKey() (storageName, storageKey string) { + if v.volumeInfo == nil { + return + } + if len(v.volumeInfo.GetFiles()) == 0 { + return + } + return v.volumeInfo.GetFiles()[0].BackendName(), v.volumeInfo.GetFiles()[0].GetKey() } diff --git a/weed/storage/volume_backup.go b/weed/storage/volume_backup.go index f48ccbb68..3763d5515 100644 --- a/weed/storage/volume_backup.go +++ b/weed/storage/volume_backup.go @@ -6,21 +6,23 @@ import ( "io" "os" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage/idx" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" . "github.com/chrislusf/seaweedfs/weed/storage/types" - "google.golang.org/grpc" ) func (v *Volume) GetVolumeSyncStatus() *volume_server_pb.VolumeSyncStatusResponse { - v.dataFileAccessLock.Lock() - defer v.dataFileAccessLock.Unlock() + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() var syncStatus = &volume_server_pb.VolumeSyncStatusResponse{} - if stat, err := v.dataFile.Stat(); err == nil { - syncStatus.TailOffset = uint64(stat.Size()) + if datSize, _, err := v.DataBackend.GetStat(); err == nil { + syncStatus.TailOffset = uint64(datSize) } syncStatus.Collection = v.Collection syncStatus.IdxFileSize = v.nm.IndexFileSize() @@ -62,15 +64,15 @@ update needle map when receiving new .dat bytes. But seems not necessary now.) func (v *Volume) IncrementalBackup(volumeServer string, grpcDialOption grpc.DialOption) error { - ctx := context.Background() - startFromOffset, _, _ := v.FileStat() appendAtNs, err := v.findLastAppendAtNs() if err != nil { return err } - err = operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + writeOffset := int64(startFromOffset) + + err = operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { stream, err := client.VolumeIncrementalCopy(ctx, &volume_server_pb.VolumeIncrementalCopyRequest{ VolumeId: uint32(v.Id), @@ -80,8 +82,6 @@ func (v *Volume) IncrementalBackup(volumeServer string, grpcDialOption grpc.Dial return err } - v.dataFile.Seek(int64(startFromOffset), io.SeekStart) - for { resp, recvErr := stream.Recv() if recvErr != nil { @@ -92,10 +92,11 @@ func (v *Volume) IncrementalBackup(volumeServer string, grpcDialOption grpc.Dial } } - _, writeErr := v.dataFile.Write(resp.FileContent) + n, writeErr := v.DataBackend.WriteAt(resp.FileContent, writeOffset) if writeErr != nil { return writeErr } + writeOffset += int64(n) } return nil @@ -107,7 +108,7 @@ func (v *Volume) IncrementalBackup(volumeServer string, grpcDialOption grpc.Dial } // add to needle map - return ScanVolumeFileFrom(v.version, v.dataFile, int64(startFromOffset), &VolumeFileScanner4GenIdx{v: v}) + return ScanVolumeFileFrom(v.Version(), v.DataBackend, int64(startFromOffset), &VolumeFileScanner4GenIdx{v: v}) } @@ -153,11 +154,11 @@ func (v *Volume) locateLastAppendEntry() (Offset, error) { func (v *Volume) readAppendAtNs(offset Offset) (uint64, error) { - n, _, bodyLength, err := needle.ReadNeedleHeader(v.dataFile, v.SuperBlock.version, offset.ToAcutalOffset()) + n, _, bodyLength, err := needle.ReadNeedleHeader(v.DataBackend, v.SuperBlock.Version, offset.ToAcutalOffset()) if err != nil { return 0, fmt.Errorf("ReadNeedleHeader: %v", err) } - _, err = n.ReadNeedleBody(v.dataFile, v.SuperBlock.version, offset.ToAcutalOffset()+int64(NeedleHeaderSize), bodyLength) + _, err = n.ReadNeedleBody(v.DataBackend, v.SuperBlock.Version, offset.ToAcutalOffset()+int64(NeedleHeaderSize), bodyLength) if err != nil { return 0, fmt.Errorf("ReadNeedleBody offset %d, bodyLength %d: %v", offset.ToAcutalOffset(), bodyLength, err) } @@ -243,7 +244,7 @@ type VolumeFileScanner4GenIdx struct { v *Volume } -func (scanner *VolumeFileScanner4GenIdx) VisitSuperBlock(superBlock SuperBlock) error { +func (scanner *VolumeFileScanner4GenIdx) VisitSuperBlock(superBlock super_block.SuperBlock) error { return nil } diff --git a/weed/storage/volume_checking.go b/weed/storage/volume_checking.go index 8f930546f..a65c2a3ff 100644 --- a/weed/storage/volume_checking.go +++ b/weed/storage/volume_checking.go @@ -4,6 +4,7 @@ import ( "fmt" "os" + "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/idx" "github.com/chrislusf/seaweedfs/weed/storage/needle" . "github.com/chrislusf/seaweedfs/weed/storage/types" @@ -29,7 +30,7 @@ func CheckVolumeDataIntegrity(v *Volume, indexFile *os.File) (lastAppendAtNs uin if size == TombstoneFileSize { size = 0 } - if lastAppendAtNs, e = verifyNeedleIntegrity(v.dataFile, v.Version(), offset.ToAcutalOffset(), key, size); e != nil { + if lastAppendAtNs, e = verifyNeedleIntegrity(v.DataBackend, v.Version(), offset.ToAcutalOffset(), key, size); e != nil { return lastAppendAtNs, fmt.Errorf("verifyNeedleIntegrity %s failed: %v", indexFile.Name(), e) } return @@ -54,7 +55,7 @@ func readIndexEntryAtOffset(indexFile *os.File, offset int64) (bytes []byte, err return } -func verifyNeedleIntegrity(datFile *os.File, v needle.Version, offset int64, key NeedleId, size uint32) (lastAppendAtNs uint64, err error) { +func verifyNeedleIntegrity(datFile backend.BackendStorageFile, v needle.Version, offset int64, key NeedleId, size uint32) (lastAppendAtNs uint64, err error) { n := new(needle.Needle) if err = n.ReadData(datFile, offset, size, v); err != nil { return n.AppendAtNs, err diff --git a/weed/storage/volume_create.go b/weed/storage/volume_create.go index ef58e5871..ffcb246a4 100644 --- a/weed/storage/volume_create.go +++ b/weed/storage/volume_create.go @@ -6,12 +6,16 @@ import ( "os" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/backend" ) -func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (*os.File, error) { +func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (backend.BackendStorageFile, error) { file, e := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if e != nil { + return nil, e + } if preallocate > 0 { glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName) } - return file, e + return backend.NewDiskFile(file), nil } diff --git a/weed/storage/volume_create_linux.go b/weed/storage/volume_create_linux.go index d9dfc3862..ee599ac32 100644 --- a/weed/storage/volume_create_linux.go +++ b/weed/storage/volume_create_linux.go @@ -7,13 +7,17 @@ import ( "syscall" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/backend" ) -func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (*os.File, error) { +func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (backend.BackendStorageFile, error) { file, e := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if e != nil { + return nil, e + } if preallocate != 0 { syscall.Fallocate(int(file.Fd()), 1, 0, preallocate) glog.V(0).Infof("Preallocated %d bytes disk space for %s", preallocate, fileName) } - return file, e + return backend.NewDiskFile(file), nil } diff --git a/weed/storage/volume_create_windows.go b/weed/storage/volume_create_windows.go index 12826f613..e1c0b961f 100644 --- a/weed/storage/volume_create_windows.go +++ b/weed/storage/volume_create_windows.go @@ -3,36 +3,31 @@ package storage import ( - "os" - "github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map" "golang.org/x/sys/windows" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map/os_overloads" ) -func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (*os.File, error) { - - mMap, exists := memory_map.FileMemoryMap[fileName] - if !exists { - - if preallocate > 0 { - glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName) - } - - if memoryMapSizeMB > 0 { - file, e := os_overloads.OpenFile(fileName, windows.O_RDWR|windows.O_CREAT, 0644, true) - memory_map.FileMemoryMap[fileName] = new(memory_map.MemoryMap) - - new_mMap := memory_map.FileMemoryMap[fileName] - new_mMap.CreateMemoryMap(file, 1024*1024*uint64(memoryMapSizeMB)) - return file, e - } else { - file, e := os_overloads.OpenFile(fileName, windows.O_RDWR|windows.O_CREAT|windows.O_TRUNC, 0644, false) - return file, e - } - } else { - return mMap.File, nil +func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (backend.BackendStorageFile, error) { + if preallocate > 0 { + glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName) } + + if memoryMapSizeMB > 0 { + file, e := os_overloads.OpenFile(fileName, windows.O_RDWR|windows.O_CREAT, 0644, true) + if e != nil { + return nil, e + } + return memory_map.NewMemoryMappedFile(file, memoryMapSizeMB), nil + } else { + file, e := os_overloads.OpenFile(fileName, windows.O_RDWR|windows.O_CREAT|windows.O_TRUNC, 0644, false) + if e != nil { + return nil, e + } + return backend.NewDiskFile(file), nil + } + } diff --git a/weed/storage/volume_info.go b/weed/storage/volume_info.go index 111058b6e..313818cde 100644 --- a/weed/storage/volume_info.go +++ b/weed/storage/volume_info.go @@ -6,37 +6,42 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) type VolumeInfo struct { - Id needle.VolumeId - Size uint64 - ReplicaPlacement *ReplicaPlacement - Ttl *needle.TTL - Collection string - Version needle.Version - FileCount int - DeleteCount int - DeletedByteCount uint64 - ReadOnly bool - CompactRevision uint32 - ModifiedAtSecond int64 + Id needle.VolumeId + Size uint64 + ReplicaPlacement *super_block.ReplicaPlacement + Ttl *needle.TTL + Collection string + Version needle.Version + FileCount int + DeleteCount int + DeletedByteCount uint64 + ReadOnly bool + CompactRevision uint32 + ModifiedAtSecond int64 + RemoteStorageName string + RemoteStorageKey string } func NewVolumeInfo(m *master_pb.VolumeInformationMessage) (vi VolumeInfo, err error) { vi = VolumeInfo{ - Id: needle.VolumeId(m.Id), - Size: m.Size, - Collection: m.Collection, - FileCount: int(m.FileCount), - DeleteCount: int(m.DeleteCount), - DeletedByteCount: m.DeletedByteCount, - ReadOnly: m.ReadOnly, - Version: needle.Version(m.Version), - CompactRevision: m.CompactRevision, - ModifiedAtSecond: m.ModifiedAtSecond, + Id: needle.VolumeId(m.Id), + Size: m.Size, + Collection: m.Collection, + FileCount: int(m.FileCount), + DeleteCount: int(m.DeleteCount), + DeletedByteCount: m.DeletedByteCount, + ReadOnly: m.ReadOnly, + Version: needle.Version(m.Version), + CompactRevision: m.CompactRevision, + ModifiedAtSecond: m.ModifiedAtSecond, + RemoteStorageName: m.RemoteStorageName, + RemoteStorageKey: m.RemoteStorageKey, } - rp, e := NewReplicaPlacementFromByte(byte(m.ReplicaPlacement)) + rp, e := super_block.NewReplicaPlacementFromByte(byte(m.ReplicaPlacement)) if e != nil { return vi, e } @@ -51,7 +56,7 @@ func NewVolumeInfoFromShort(m *master_pb.VolumeShortInformationMessage) (vi Volu Collection: m.Collection, Version: needle.Version(m.Version), } - rp, e := NewReplicaPlacementFromByte(byte(m.ReplicaPlacement)) + rp, e := super_block.NewReplicaPlacementFromByte(byte(m.ReplicaPlacement)) if e != nil { return vi, e } @@ -60,6 +65,10 @@ func NewVolumeInfoFromShort(m *master_pb.VolumeShortInformationMessage) (vi Volu return vi, nil } +func (vi VolumeInfo) IsRemote() bool { + return vi.RemoteStorageName != "" +} + func (vi VolumeInfo) String() string { return fmt.Sprintf("Id:%d, Size:%d, ReplicaPlacement:%s, Collection:%s, Version:%v, FileCount:%d, DeleteCount:%d, DeletedByteCount:%d, ReadOnly:%v", vi.Id, vi.Size, vi.ReplicaPlacement, vi.Collection, vi.Version, vi.FileCount, vi.DeleteCount, vi.DeletedByteCount, vi.ReadOnly) @@ -67,18 +76,20 @@ func (vi VolumeInfo) String() string { func (vi VolumeInfo) ToVolumeInformationMessage() *master_pb.VolumeInformationMessage { return &master_pb.VolumeInformationMessage{ - Id: uint32(vi.Id), - Size: uint64(vi.Size), - Collection: vi.Collection, - FileCount: uint64(vi.FileCount), - DeleteCount: uint64(vi.DeleteCount), - DeletedByteCount: vi.DeletedByteCount, - ReadOnly: vi.ReadOnly, - ReplicaPlacement: uint32(vi.ReplicaPlacement.Byte()), - Version: uint32(vi.Version), - Ttl: vi.Ttl.ToUint32(), - CompactRevision: vi.CompactRevision, - ModifiedAtSecond: vi.ModifiedAtSecond, + Id: uint32(vi.Id), + Size: uint64(vi.Size), + Collection: vi.Collection, + FileCount: uint64(vi.FileCount), + DeleteCount: uint64(vi.DeleteCount), + DeletedByteCount: vi.DeletedByteCount, + ReadOnly: vi.ReadOnly, + ReplicaPlacement: uint32(vi.ReplicaPlacement.Byte()), + Version: uint32(vi.Version), + Ttl: vi.Ttl.ToUint32(), + CompactRevision: vi.CompactRevision, + ModifiedAtSecond: vi.ModifiedAtSecond, + RemoteStorageName: vi.RemoteStorageName, + RemoteStorageKey: vi.RemoteStorageKey, } } diff --git a/weed/storage/volume_loading.go b/weed/storage/volume_loading.go index be58588f2..6b42fc452 100644 --- a/weed/storage/volume_loading.go +++ b/weed/storage/volume_loading.go @@ -3,142 +3,148 @@ package storage import ( "fmt" "os" - "time" - "github.com/chrislusf/seaweedfs/weed/stats" - "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/syndtr/goleveldb/leveldb/opt" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" + "github.com/chrislusf/seaweedfs/weed/util" ) -func loadVolumeWithoutIndex(dirname string, collection string, id needle.VolumeId, needleMapKind NeedleMapType) (v *Volume, e error) { +func loadVolumeWithoutIndex(dirname string, collection string, id needle.VolumeId, needleMapKind NeedleMapType) (v *Volume, err error) { v = &Volume{dir: dirname, Collection: collection, Id: id} - v.SuperBlock = SuperBlock{} + v.SuperBlock = super_block.SuperBlock{} v.needleMapKind = needleMapKind - e = v.load(false, false, needleMapKind, 0) + err = v.load(false, false, needleMapKind, 0) return } -func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind NeedleMapType, preallocate int64) error { - var e error +func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind NeedleMapType, preallocate int64) (err error) { fileName := v.FileName() alreadyHasSuperBlock := false - if exists, canRead, canWrite, modifiedTime, fileSize := checkFile(fileName + ".dat"); exists { + hasVolumeInfoFile := v.maybeLoadVolumeInfo() && v.volumeInfo.Version != 0 + + if v.HasRemoteFile() { + v.noWriteCanDelete = true + v.noWriteOrDelete = false + glog.V(0).Infof("loading volume %d from remote %v", v.Id, v.volumeInfo.Files) + v.LoadRemoteFile() + alreadyHasSuperBlock = true + } else if exists, canRead, canWrite, modifiedTime, fileSize := util.CheckFile(fileName + ".dat"); exists { + // open dat file if !canRead { return fmt.Errorf("cannot read Volume Data file %s.dat", fileName) } + var dataFile *os.File if canWrite { - v.dataFile, e = os.OpenFile(fileName+".dat", os.O_RDWR|os.O_CREATE, 0644) + dataFile, err = os.OpenFile(fileName+".dat", os.O_RDWR|os.O_CREATE, 0644) } else { glog.V(0).Infoln("opening " + fileName + ".dat in READONLY mode") - v.dataFile, e = os.Open(fileName + ".dat") - v.readOnly = true + dataFile, err = os.Open(fileName + ".dat") + v.noWriteOrDelete = true } v.lastModifiedTsSeconds = uint64(modifiedTime.Unix()) - if fileSize >= _SuperBlockSize { + if fileSize >= super_block.SuperBlockSize { alreadyHasSuperBlock = true } + v.DataBackend = backend.NewDiskFile(dataFile) } else { if createDatIfMissing { - v.dataFile, e = createVolumeFile(fileName+".dat", preallocate, v.MemoryMapMaxSizeMb) + v.DataBackend, err = createVolumeFile(fileName+".dat", preallocate, v.MemoryMapMaxSizeMb) } else { return fmt.Errorf("Volume Data file %s.dat does not exist.", fileName) } } - if e != nil { - if !os.IsPermission(e) { - return fmt.Errorf("cannot load Volume Data %s.dat: %v", fileName, e) + if err != nil { + if !os.IsPermission(err) { + return fmt.Errorf("cannot load Volume Data %s.dat: %v", fileName, err) } else { - return fmt.Errorf("load data file %s.dat: %v", fileName, e) + return fmt.Errorf("load data file %s.dat: %v", fileName, err) } } if alreadyHasSuperBlock { - e = v.readSuperBlock() + err = v.readSuperBlock() } else { if !v.SuperBlock.Initialized() { return fmt.Errorf("volume %s.dat not initialized", fileName) } - e = v.maybeWriteSuperBlock() + err = v.maybeWriteSuperBlock() } - if e == nil && alsoLoadIndex { + if err == nil && alsoLoadIndex { var indexFile *os.File - if v.readOnly { - glog.V(1).Infoln("open to read file", fileName+".idx") - if indexFile, e = os.OpenFile(fileName+".idx", os.O_RDONLY, 0644); e != nil { - return fmt.Errorf("cannot read Volume Index %s.idx: %v", fileName, e) + if v.noWriteOrDelete { + glog.V(0).Infoln("open to read file", fileName+".idx") + if indexFile, err = os.OpenFile(fileName+".idx", os.O_RDONLY, 0644); err != nil { + return fmt.Errorf("cannot read Volume Index %s.idx: %v", fileName, err) } } else { glog.V(1).Infoln("open to write file", fileName+".idx") - if indexFile, e = os.OpenFile(fileName+".idx", os.O_RDWR|os.O_CREATE, 0644); e != nil { - return fmt.Errorf("cannot write Volume Index %s.idx: %v", fileName, e) + if indexFile, err = os.OpenFile(fileName+".idx", os.O_RDWR|os.O_CREATE, 0644); err != nil { + return fmt.Errorf("cannot write Volume Index %s.idx: %v", fileName, err) } } - if v.lastAppendAtNs, e = CheckVolumeDataIntegrity(v, indexFile); e != nil { - v.readOnly = true - glog.V(0).Infof("volumeDataIntegrityChecking failed %v", e) + if v.lastAppendAtNs, err = CheckVolumeDataIntegrity(v, indexFile); err != nil { + v.noWriteOrDelete = true + glog.V(0).Infof("volumeDataIntegrityChecking failed %v", err) } - switch needleMapKind { - case NeedleMapInMemory: - glog.V(0).Infoln("loading index", fileName+".idx", "to memory readonly", v.readOnly) - if v.nm, e = LoadCompactNeedleMap(indexFile); e != nil { - glog.V(0).Infof("loading index %s to memory error: %v", fileName+".idx", e) + + if v.noWriteOrDelete || v.noWriteCanDelete { + if v.nm, err = NewSortedFileNeedleMap(fileName, indexFile); err != nil { + glog.V(0).Infof("loading sorted db %s error: %v", fileName+".sdx", err) } - case NeedleMapLevelDb: - glog.V(0).Infoln("loading leveldb", fileName+".ldb") - opts := &opt.Options{ - BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB - WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB - CompactionTableSizeMultiplier: 10, // default value is 1 - } - if v.nm, e = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); e != nil { - glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", e) - } - case NeedleMapLevelDbMedium: - glog.V(0).Infoln("loading leveldb medium", fileName+".ldb") - opts := &opt.Options{ - BlockCacheCapacity: 4 * 1024 * 1024, // default value is 8MiB - WriteBuffer: 2 * 1024 * 1024, // default value is 4MiB - CompactionTableSizeMultiplier: 10, // default value is 1 - } - if v.nm, e = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); e != nil { - glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", e) - } - case NeedleMapLevelDbLarge: - glog.V(0).Infoln("loading leveldb large", fileName+".ldb") - opts := &opt.Options{ - BlockCacheCapacity: 8 * 1024 * 1024, // default value is 8MiB - WriteBuffer: 4 * 1024 * 1024, // default value is 4MiB - CompactionTableSizeMultiplier: 10, // default value is 1 - } - if v.nm, e = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); e != nil { - glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", e) + } else { + switch needleMapKind { + case NeedleMapInMemory: + glog.V(0).Infoln("loading index", fileName+".idx", "to memory") + if v.nm, err = LoadCompactNeedleMap(indexFile); err != nil { + glog.V(0).Infof("loading index %s to memory error: %v", fileName+".idx", err) + } + case NeedleMapLevelDb: + glog.V(0).Infoln("loading leveldb", fileName+".ldb") + opts := &opt.Options{ + BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB + WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB + CompactionTableSizeMultiplier: 10, // default value is 1 + } + if v.nm, err = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); err != nil { + glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", err) + } + case NeedleMapLevelDbMedium: + glog.V(0).Infoln("loading leveldb medium", fileName+".ldb") + opts := &opt.Options{ + BlockCacheCapacity: 4 * 1024 * 1024, // default value is 8MiB + WriteBuffer: 2 * 1024 * 1024, // default value is 4MiB + CompactionTableSizeMultiplier: 10, // default value is 1 + } + if v.nm, err = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); err != nil { + glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", err) + } + case NeedleMapLevelDbLarge: + glog.V(0).Infoln("loading leveldb large", fileName+".ldb") + opts := &opt.Options{ + BlockCacheCapacity: 8 * 1024 * 1024, // default value is 8MiB + WriteBuffer: 4 * 1024 * 1024, // default value is 4MiB + CompactionTableSizeMultiplier: 10, // default value is 1 + } + if v.nm, err = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); err != nil { + glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", err) + } } } } + if !hasVolumeInfoFile { + v.volumeInfo.Version = uint32(v.SuperBlock.Version) + v.SaveVolumeInfo() + } + stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "volume").Inc() - return e -} - -func checkFile(filename string) (exists, canRead, canWrite bool, modTime time.Time, fileSize int64) { - exists = true - fi, err := os.Stat(filename) - if os.IsNotExist(err) { - exists = false - return - } - if fi.Mode()&0400 != 0 { - canRead = true - } - if fi.Mode()&0200 != 0 { - canWrite = true - } - modTime = fi.ModTime() - fileSize = fi.Size() - return + return err } diff --git a/weed/storage/volume_read_write.go b/weed/storage/volume_read_write.go index 7c9539021..ac6154cef 100644 --- a/weed/storage/volume_read_write.go +++ b/weed/storage/volume_read_write.go @@ -9,8 +9,9 @@ import ( "time" "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map" + "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" . "github.com/chrislusf/seaweedfs/weed/storage/types" ) @@ -26,7 +27,7 @@ func (v *Volume) isFileUnchanged(n *needle.Needle) bool { nv, ok := v.nm.Get(n.Id) if ok && !nv.Offset.IsZero() && nv.Size != TombstoneFileSize { oldNeedle := new(needle.Needle) - err := oldNeedle.ReadData(v.dataFile, nv.Offset.ToAcutalOffset(), nv.Size, v.Version()) + err := oldNeedle.ReadData(v.DataBackend, nv.Offset.ToAcutalOffset(), nv.Size, v.Version()) if err != nil { glog.V(0).Infof("Failed to check updated file at offset %d size %d: %v", nv.Offset.ToAcutalOffset(), nv.Size, err) return false @@ -45,28 +46,25 @@ func (v *Volume) Destroy() (err error) { err = fmt.Errorf("volume %d is compacting", v.Id) return } - mMap, exists := memory_map.FileMemoryMap[v.dataFile.Name()] - if exists { - mMap.DeleteFileAndMemoryMap() - delete(memory_map.FileMemoryMap, v.dataFile.Name()) + storageName, storageKey := v.RemoteStorageNameKey() + if v.HasRemoteFile() && storageName != "" && storageKey != "" { + if backendStorage, found := backend.BackendStorages[storageName]; found { + backendStorage.DeleteFile(storageKey) + } } - v.Close() os.Remove(v.FileName() + ".dat") os.Remove(v.FileName() + ".idx") + os.Remove(v.FileName() + ".vif") + os.Remove(v.FileName() + ".sdx") os.Remove(v.FileName() + ".cpd") os.Remove(v.FileName() + ".cpx") - os.Remove(v.FileName() + ".ldb") - os.Remove(v.FileName() + ".bdb") + os.RemoveAll(v.FileName() + ".ldb") return } func (v *Volume) writeNeedle(n *needle.Needle) (offset uint64, size uint32, isUnchanged bool, err error) { - glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) - if v.readOnly { - err = fmt.Errorf("%s is read-only", v.dataFile.Name()) - return - } + // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) v.dataFileAccessLock.Lock() defer v.dataFileAccessLock.Unlock() if v.isFileUnchanged(n) { @@ -83,7 +81,7 @@ func (v *Volume) writeNeedle(n *needle.Needle) (offset uint64, size uint32, isUn // check whether existing needle cookie matches nv, ok := v.nm.Get(n.Id) if ok { - existingNeedle, _, _, existingNeedleReadErr := needle.ReadNeedleHeader(v.dataFile, v.Version(), nv.Offset.ToAcutalOffset()) + existingNeedle, _, _, existingNeedleReadErr := needle.ReadNeedleHeader(v.DataBackend, v.Version(), nv.Offset.ToAcutalOffset()) if existingNeedleReadErr != nil { err = fmt.Errorf("reading existing needle: %v", existingNeedleReadErr) return @@ -97,7 +95,7 @@ func (v *Volume) writeNeedle(n *needle.Needle) (offset uint64, size uint32, isUn // append to dat file n.AppendAtNs = uint64(time.Now().UnixNano()) - if offset, size, _, err = n.Append(v.dataFile, v.Version()); err != nil { + if offset, size, _, err = n.Append(v.DataBackend, v.Version()); err != nil { return } v.lastAppendAtNs = n.AppendAtNs @@ -116,9 +114,6 @@ func (v *Volume) writeNeedle(n *needle.Needle) (offset uint64, size uint32, isUn func (v *Volume) deleteNeedle(n *needle.Needle) (uint32, error) { glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) - if v.readOnly { - return 0, fmt.Errorf("%s is read-only", v.dataFile.Name()) - } v.dataFileAccessLock.Lock() defer v.dataFileAccessLock.Unlock() nv, ok := v.nm.Get(n.Id) @@ -127,7 +122,7 @@ func (v *Volume) deleteNeedle(n *needle.Needle) (uint32, error) { size := nv.Size n.Data = nil n.AppendAtNs = uint64(time.Now().UnixNano()) - offset, _, _, err := n.Append(v.dataFile, v.Version()) + offset, _, _, err := n.Append(v.DataBackend, v.Version()) if err != nil { return size, err } @@ -142,8 +137,8 @@ func (v *Volume) deleteNeedle(n *needle.Needle) (uint32, error) { // read fills in Needle content by looking up n.Id from NeedleMapper func (v *Volume) readNeedle(n *needle.Needle) (int, error) { - v.dataFileAccessLock.Lock() - defer v.dataFileAccessLock.Unlock() + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() nv, ok := v.nm.Get(n.Id) if !ok || nv.Offset.IsZero() { @@ -155,7 +150,7 @@ func (v *Volume) readNeedle(n *needle.Needle) (int, error) { if nv.Size == 0 { return 0, nil } - err := n.ReadData(v.dataFile, nv.Offset.ToAcutalOffset(), nv.Size, v.Version()) + err := n.ReadData(v.DataBackend, nv.Offset.ToAcutalOffset(), nv.Size, v.Version()) if err != nil { return 0, err } @@ -177,7 +172,7 @@ func (v *Volume) readNeedle(n *needle.Needle) (int, error) { } type VolumeFileScanner interface { - VisitSuperBlock(SuperBlock) error + VisitSuperBlock(super_block.SuperBlock) error ReadNeedleBody() bool VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error } @@ -189,8 +184,10 @@ func ScanVolumeFile(dirname string, collection string, id needle.VolumeId, if v, err = loadVolumeWithoutIndex(dirname, collection, id, needleMapKind); err != nil { return fmt.Errorf("failed to load volume %d: %v", id, err) } - if err = volumeFileScanner.VisitSuperBlock(v.SuperBlock); err != nil { - return fmt.Errorf("failed to process volume %d super block: %v", id, err) + if v.volumeInfo.Version == 0 { + if err = volumeFileScanner.VisitSuperBlock(v.SuperBlock); err != nil { + return fmt.Errorf("failed to process volume %d super block: %v", id, err) + } } defer v.Close() @@ -198,21 +195,21 @@ func ScanVolumeFile(dirname string, collection string, id needle.VolumeId, offset := int64(v.SuperBlock.BlockSize()) - return ScanVolumeFileFrom(version, v.dataFile, offset, volumeFileScanner) + return ScanVolumeFileFrom(version, v.DataBackend, offset, volumeFileScanner) } -func ScanVolumeFileFrom(version needle.Version, dataFile *os.File, offset int64, volumeFileScanner VolumeFileScanner) (err error) { - n, nh, rest, e := needle.ReadNeedleHeader(dataFile, version, offset) +func ScanVolumeFileFrom(version needle.Version, datBackend backend.BackendStorageFile, offset int64, volumeFileScanner VolumeFileScanner) (err error) { + n, nh, rest, e := needle.ReadNeedleHeader(datBackend, version, offset) if e != nil { if e == io.EOF { return nil } - return fmt.Errorf("cannot read %s at offset %d: %v", dataFile.Name(), offset, e) + return fmt.Errorf("cannot read %s at offset %d: %v", datBackend.Name(), offset, e) } for n != nil { var needleBody []byte if volumeFileScanner.ReadNeedleBody() { - if needleBody, err = n.ReadNeedleBody(dataFile, version, offset+NeedleHeaderSize, rest); err != nil { + if needleBody, err = n.ReadNeedleBody(datBackend, version, offset+NeedleHeaderSize, rest); err != nil { glog.V(0).Infof("cannot read needle body: %v", err) //err = fmt.Errorf("cannot read needle body: %v", err) //return @@ -228,7 +225,7 @@ func ScanVolumeFileFrom(version needle.Version, dataFile *os.File, offset int64, } offset += NeedleHeaderSize + rest glog.V(4).Infof("==> new entry offset %d", offset) - if n, nh, rest, err = needle.ReadNeedleHeader(dataFile, version, offset); err != nil { + if n, nh, rest, err = needle.ReadNeedleHeader(datBackend, version, offset); err != nil { if err == io.EOF { return nil } diff --git a/weed/storage/volume_super_block.go b/weed/storage/volume_super_block.go index cb34a2347..1d7f35595 100644 --- a/weed/storage/volume_super_block.go +++ b/weed/storage/volume_super_block.go @@ -4,150 +4,45 @@ import ( "fmt" "os" - "github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/needle" - "github.com/chrislusf/seaweedfs/weed/util" - "github.com/golang/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) -const ( - _SuperBlockSize = 8 -) - -/* -* Super block currently has 8 bytes allocated for each volume. -* Byte 0: version, 1 or 2 -* Byte 1: Replica Placement strategy, 000, 001, 002, 010, etc -* Byte 2 and byte 3: Time to live. See TTL for definition -* Byte 4 and byte 5: The number of times the volume has been compacted. -* Rest bytes: Reserved - */ -type SuperBlock struct { - version needle.Version - ReplicaPlacement *ReplicaPlacement - Ttl *needle.TTL - CompactionRevision uint16 - Extra *master_pb.SuperBlockExtra - extraSize uint16 -} - -func (s *SuperBlock) BlockSize() int { - switch s.version { - case needle.Version2, needle.Version3: - return _SuperBlockSize + int(s.extraSize) - } - return _SuperBlockSize -} - -func (s *SuperBlock) Version() needle.Version { - return s.version -} -func (s *SuperBlock) Bytes() []byte { - header := make([]byte, _SuperBlockSize) - header[0] = byte(s.version) - header[1] = s.ReplicaPlacement.Byte() - s.Ttl.ToBytes(header[2:4]) - util.Uint16toBytes(header[4:6], s.CompactionRevision) - - if s.Extra != nil { - extraData, err := proto.Marshal(s.Extra) - if err != nil { - glog.Fatalf("cannot marshal super block extra %+v: %v", s.Extra, err) - } - extraSize := len(extraData) - if extraSize > 256*256-2 { - // reserve a couple of bits for future extension - glog.Fatalf("super block extra size is %d bigger than %d", extraSize, 256*256-2) - } - s.extraSize = uint16(extraSize) - util.Uint16toBytes(header[6:8], s.extraSize) - - header = append(header, extraData...) - } - - return header -} - -func (s *SuperBlock) Initialized() bool { - return s.ReplicaPlacement != nil && s.Ttl != nil -} - func (v *Volume) maybeWriteSuperBlock() error { - mMap, exists := memory_map.FileMemoryMap[v.dataFile.Name()] - if exists { - if mMap.End_of_file == -1 { - v.SuperBlock.version = needle.CurrentVersion - mMap.WriteMemory(0, uint64(len(v.SuperBlock.Bytes())), v.SuperBlock.Bytes()) - } - return nil - } else { - stat, e := v.dataFile.Stat() - if e != nil { - glog.V(0).Infof("failed to stat datafile %s: %v", v.dataFile.Name(), e) - return e - } - if stat.Size() == 0 { - v.SuperBlock.version = needle.CurrentVersion - _, e = v.dataFile.WriteAt(v.SuperBlock.Bytes(), 0) - if e != nil && os.IsPermission(e) { - //read-only, but zero length - recreate it! - if v.dataFile, e = os.Create(v.dataFile.Name()); e == nil { - if _, e = v.dataFile.Write(v.SuperBlock.Bytes()); e == nil { - v.readOnly = false - } + datSize, _, e := v.DataBackend.GetStat() + if e != nil { + glog.V(0).Infof("failed to stat datafile %s: %v", v.DataBackend.Name(), e) + return e + } + if datSize == 0 { + v.SuperBlock.Version = needle.CurrentVersion + _, e = v.DataBackend.WriteAt(v.SuperBlock.Bytes(), 0) + if e != nil && os.IsPermission(e) { + //read-only, but zero length - recreate it! + var dataFile *os.File + if dataFile, e = os.Create(v.DataBackend.Name()); e == nil { + v.DataBackend = backend.NewDiskFile(dataFile) + if _, e = v.DataBackend.WriteAt(v.SuperBlock.Bytes(), 0); e == nil { + v.noWriteOrDelete = false + v.noWriteCanDelete = false } } } - return e } + return e } func (v *Volume) readSuperBlock() (err error) { - v.SuperBlock, err = ReadSuperBlock(v.dataFile) + v.SuperBlock, err = super_block.ReadSuperBlock(v.DataBackend) + if v.volumeInfo != nil && v.volumeInfo.Replication != ""{ + if replication, err := super_block.NewReplicaPlacementFromString(v.volumeInfo.Replication); err != nil { + return fmt.Errorf("Error parse volume %d replication %s : %v", v.Id, v.volumeInfo.Replication, err) + } else { + v.SuperBlock.ReplicaPlacement = replication + } + } return err } - -// ReadSuperBlock reads from data file and load it into volume's super block -func ReadSuperBlock(dataFile *os.File) (superBlock SuperBlock, err error) { - - header := make([]byte, _SuperBlockSize) - mMap, exists := memory_map.FileMemoryMap[dataFile.Name()] - if exists { - header, err = mMap.ReadMemory(0, _SuperBlockSize) - if err != nil { - err = fmt.Errorf("cannot read volume %s super block: %v", dataFile.Name(), err) - return - } - } else { - if _, e := dataFile.ReadAt(header, 0); e != nil { - err = fmt.Errorf("cannot read volume %s super block: %v", dataFile.Name(), e) - return - } - } - - superBlock.version = needle.Version(header[0]) - if superBlock.ReplicaPlacement, err = NewReplicaPlacementFromByte(header[1]); err != nil { - err = fmt.Errorf("cannot read replica type: %s", err.Error()) - return - } - superBlock.Ttl = needle.LoadTTLFromBytes(header[2:4]) - superBlock.CompactionRevision = util.BytesToUint16(header[4:6]) - superBlock.extraSize = util.BytesToUint16(header[6:8]) - - if superBlock.extraSize > 0 { - // read more - extraData := make([]byte, int(superBlock.extraSize)) - superBlock.Extra = &master_pb.SuperBlockExtra{} - err = proto.Unmarshal(extraData, superBlock.Extra) - if err != nil { - err = fmt.Errorf("cannot read volume %s super block extra: %v", dataFile.Name(), err) - return - } - } - - return -} diff --git a/weed/storage/volume_tier.go b/weed/storage/volume_tier.go new file mode 100644 index 000000000..fd7b08654 --- /dev/null +++ b/weed/storage/volume_tier.go @@ -0,0 +1,50 @@ +package storage + +import ( + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + _ "github.com/chrislusf/seaweedfs/weed/storage/backend/s3_backend" +) + +func (v *Volume) GetVolumeInfo() *volume_server_pb.VolumeInfo { + return v.volumeInfo +} + +func (v *Volume) maybeLoadVolumeInfo() (found bool) { + + v.volumeInfo, v.hasRemoteFile, _ = pb.MaybeLoadVolumeInfo(v.FileName() + ".vif") + + if v.hasRemoteFile { + glog.V(0).Infof("volume %d is tiered to %s as %s and read only", v.Id, + v.volumeInfo.Files[0].BackendName(), v.volumeInfo.Files[0].Key) + } + + return + +} + +func (v *Volume) HasRemoteFile() bool { + return v.hasRemoteFile +} + +func (v *Volume) LoadRemoteFile() error { + tierFile := v.volumeInfo.GetFiles()[0] + backendStorage := backend.BackendStorages[tierFile.BackendName()] + + if v.DataBackend != nil { + v.DataBackend.Close() + } + + v.DataBackend = backendStorage.NewStorageFile(tierFile.Key, v.volumeInfo) + return nil +} + +func (v *Volume) SaveVolumeInfo() error { + + tierFileName := v.FileName() + ".vif" + + return pb.SaveVolumeInfo(tierFileName, v.volumeInfo) + +} diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index 73314f022..523b37e34 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -7,9 +7,11 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/backend" idx2 "github.com/chrislusf/seaweedfs/weed/storage/idx" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle_map" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" . "github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -18,101 +20,113 @@ func (v *Volume) garbageLevel() float64 { if v.ContentSize() == 0 { return 0 } - return float64(v.DeletedSize()) / float64(v.ContentSize()) + deletedSize := v.DeletedSize() + fileSize := v.ContentSize() + if v.DeletedCount() > 0 && v.DeletedSize() == 0 { + // this happens for .sdx converted back to normal .idx + // where deleted entry size is missing + datFileSize, _, _ := v.FileStat() + deletedSize = datFileSize - fileSize - super_block.SuperBlockSize + fileSize = datFileSize + } + return float64(deletedSize) / float64(fileSize) } +// compact a volume based on deletions in .dat files func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error { - if v.MemoryMapMaxSizeMb > 0 { //it makes no sense to compact in memory - glog.V(3).Infof("Compacting volume %d ...", v.Id) - //no need to lock for copy on write - //v.accessLock.Lock() - //defer v.accessLock.Unlock() - //glog.V(3).Infof("Got Compaction lock...") - v.isCompacting = true - defer func() { - v.isCompacting = false - }() - - filePath := v.FileName() - v.lastCompactIndexOffset = v.IndexFileSize() - v.lastCompactRevision = v.SuperBlock.CompactionRevision - glog.V(3).Infof("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactIndexOffset) - return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx", preallocate, compactionBytePerSecond) - } else { + if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory return nil } + glog.V(3).Infof("Compacting volume %d ...", v.Id) + //no need to lock for copy on write + //v.accessLock.Lock() + //defer v.accessLock.Unlock() + //glog.V(3).Infof("Got Compaction lock...") + v.isCompacting = true + defer func() { + v.isCompacting = false + }() + + filePath := v.FileName() + v.lastCompactIndexOffset = v.IndexFileSize() + v.lastCompactRevision = v.SuperBlock.CompactionRevision + glog.V(3).Infof("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactIndexOffset) + return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx", preallocate, compactionBytePerSecond) } -func (v *Volume) Compact2() error { +// compact a volume based on deletions in .idx files +func (v *Volume) Compact2(preallocate int64) error { - if v.MemoryMapMaxSizeMb > 0 { //it makes no sense to compact in memory - glog.V(3).Infof("Compact2 volume %d ...", v.Id) - - v.isCompacting = true - defer func() { - v.isCompacting = false - }() - - filePath := v.FileName() - glog.V(3).Infof("creating copies for volume %d ...", v.Id) - return v.copyDataBasedOnIndexFile(filePath+".cpd", filePath+".cpx") - } else { + if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory return nil } + glog.V(3).Infof("Compact2 volume %d ...", v.Id) + + v.isCompacting = true + defer func() { + v.isCompacting = false + }() + + filePath := v.FileName() + v.lastCompactIndexOffset = v.IndexFileSize() + v.lastCompactRevision = v.SuperBlock.CompactionRevision + glog.V(3).Infof("creating copies for volume %d ...", v.Id) + return copyDataBasedOnIndexFile(filePath+".dat", filePath+".idx", filePath+".cpd", filePath+".cpx", v.SuperBlock, v.Version(), preallocate) } func (v *Volume) CommitCompact() error { - if v.MemoryMapMaxSizeMb > 0 { //it makes no sense to compact in memory - glog.V(0).Infof("Committing volume %d vacuuming...", v.Id) + if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory + return nil + } + glog.V(0).Infof("Committing volume %d vacuuming...", v.Id) - v.isCompacting = true - defer func() { - v.isCompacting = false - }() + v.isCompacting = true + defer func() { + v.isCompacting = false + }() - v.dataFileAccessLock.Lock() - defer v.dataFileAccessLock.Unlock() + v.dataFileAccessLock.Lock() + defer v.dataFileAccessLock.Unlock() - glog.V(3).Infof("Got volume %d committing lock...", v.Id) - v.nm.Close() - if err := v.dataFile.Close(); err != nil { - glog.V(0).Infof("fail to close volume %d", v.Id) - } - v.dataFile = nil - stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "volume").Dec() + glog.V(3).Infof("Got volume %d committing lock...", v.Id) + v.nm.Close() + if err := v.DataBackend.Close(); err != nil { + glog.V(0).Infof("fail to close volume %d", v.Id) + } + v.DataBackend = nil + stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "volume").Dec() - var e error - if e = v.makeupDiff(v.FileName()+".cpd", v.FileName()+".cpx", v.FileName()+".dat", v.FileName()+".idx"); e != nil { - glog.V(0).Infof("makeupDiff in CommitCompact volume %d failed %v", v.Id, e) - e = os.Remove(v.FileName() + ".cpd") - if e != nil { - return e - } - e = os.Remove(v.FileName() + ".cpx") - if e != nil { - return e - } - } else { - var e error - if e = os.Rename(v.FileName()+".cpd", v.FileName()+".dat"); e != nil { - return fmt.Errorf("rename %s: %v", v.FileName()+".cpd", e) - } - if e = os.Rename(v.FileName()+".cpx", v.FileName()+".idx"); e != nil { - return fmt.Errorf("rename %s: %v", v.FileName()+".cpx", e) - } - } - - //glog.V(3).Infof("Pretending to be vacuuming...") - //time.Sleep(20 * time.Second) - - os.RemoveAll(v.FileName() + ".ldb") - os.RemoveAll(v.FileName() + ".bdb") - - glog.V(3).Infof("Loading volume %d commit file...", v.Id) - if e = v.load(true, false, v.needleMapKind, 0); e != nil { + var e error + if e = v.makeupDiff(v.FileName()+".cpd", v.FileName()+".cpx", v.FileName()+".dat", v.FileName()+".idx"); e != nil { + glog.V(0).Infof("makeupDiff in CommitCompact volume %d failed %v", v.Id, e) + e = os.Remove(v.FileName() + ".cpd") + if e != nil { return e } + e = os.Remove(v.FileName() + ".cpx") + if e != nil { + return e + } + } else { + var e error + if e = os.Rename(v.FileName()+".cpd", v.FileName()+".dat"); e != nil { + return fmt.Errorf("rename %s: %v", v.FileName()+".cpd", e) + } + if e = os.Rename(v.FileName()+".cpx", v.FileName()+".idx"); e != nil { + return fmt.Errorf("rename %s: %v", v.FileName()+".cpx", e) + } + } + + //glog.V(3).Infof("Pretending to be vacuuming...") + //time.Sleep(20 * time.Second) + + os.RemoveAll(v.FileName() + ".ldb") + os.RemoveAll(v.FileName() + ".bdb") + + glog.V(3).Infof("Loading volume %d commit file...", v.Id) + if e = v.load(true, false, v.needleMapKind, 0); e != nil { + return e } return nil } @@ -131,14 +145,15 @@ func (v *Volume) cleanupCompact() error { return nil } -func fetchCompactRevisionFromDatFile(file *os.File) (compactRevision uint16, err error) { - superBlock, err := ReadSuperBlock(file) +func fetchCompactRevisionFromDatFile(datBackend backend.BackendStorageFile) (compactRevision uint16, err error) { + superBlock, err := super_block.ReadSuperBlock(datBackend) if err != nil { return 0, err } return superBlock.CompactionRevision, nil } +// if old .dat and .idx files are updated, this func tries to apply the same changes to new files accordingly func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldIdxFileName string) (err error) { var indexSize int64 @@ -146,8 +161,10 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI defer oldIdxFile.Close() oldDatFile, err := os.Open(oldDatFileName) - defer oldDatFile.Close() + oldDatBackend := backend.NewDiskFile(oldDatFile) + defer oldDatBackend.Close() + // skip if the old .idx file has not changed if indexSize, err = verifyIndexFileIntegrity(oldIdxFile); err != nil { return fmt.Errorf("verifyIndexFileIntegrity %s failed: %v", oldIdxFileName, err) } @@ -155,7 +172,8 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI return nil } - oldDatCompactRevision, err := fetchCompactRevisionFromDatFile(oldDatFile) + // fail if the old .dat file has changed to a new revision + oldDatCompactRevision, err := fetchCompactRevisionFromDatFile(oldDatBackend) if err != nil { return fmt.Errorf("fetchCompactRevisionFromDatFile src %s failed: %v", oldDatFile.Name(), err) } @@ -196,7 +214,8 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI if dst, err = os.OpenFile(newDatFileName, os.O_RDWR, 0644); err != nil { return fmt.Errorf("open dat file %s failed: %v", newDatFileName, err) } - defer dst.Close() + dstDatBackend := backend.NewDiskFile(dst) + defer dstDatBackend.Close() if idx, err = os.OpenFile(newIdxFileName, os.O_RDWR, 0644); err != nil { return fmt.Errorf("open idx file %s failed: %v", newIdxFileName, err) @@ -204,7 +223,7 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI defer idx.Close() var newDatCompactRevision uint16 - newDatCompactRevision, err = fetchCompactRevisionFromDatFile(dst) + newDatCompactRevision, err = fetchCompactRevisionFromDatFile(dstDatBackend) if err != nil { return fmt.Errorf("fetchCompactRevisionFromDatFile dst %s failed: %v", dst.Name(), err) } @@ -235,7 +254,7 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI //even the needle cache in memory is hit, the need_bytes is correct glog.V(4).Infof("file %d offset %d size %d", key, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size) var needleBytes []byte - needleBytes, err = needle.ReadNeedleBlob(oldDatFile, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size, v.Version()) + needleBytes, err = needle.ReadNeedleBlob(oldDatBackend, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size, v.Version()) if err != nil { return fmt.Errorf("ReadNeedleBlob %s key %d offset %d size %d failed: %v", oldDatFile.Name(), key, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size, err) } @@ -247,7 +266,7 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI fakeDelNeedle.Id = key fakeDelNeedle.Cookie = 0x12345678 fakeDelNeedle.AppendAtNs = uint64(time.Now().UnixNano()) - _, _, _, err = fakeDelNeedle.Append(dst, v.Version()) + _, _, _, err = fakeDelNeedle.Append(dstDatBackend, v.Version()) if err != nil { return fmt.Errorf("append deleted %d failed: %v", key, err) } @@ -267,17 +286,17 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI type VolumeFileScanner4Vacuum struct { version needle.Version v *Volume - dst *os.File - nm *NeedleMap + dstBackend backend.BackendStorageFile + nm *needle_map.MemDb newOffset int64 now uint64 writeThrottler *util.WriteThrottler } -func (scanner *VolumeFileScanner4Vacuum) VisitSuperBlock(superBlock SuperBlock) error { - scanner.version = superBlock.Version() +func (scanner *VolumeFileScanner4Vacuum) VisitSuperBlock(superBlock super_block.SuperBlock) error { + scanner.version = superBlock.Version superBlock.CompactionRevision++ - _, err := scanner.dst.Write(superBlock.Bytes()) + _, err := scanner.dstBackend.WriteAt(superBlock.Bytes(), 0) scanner.newOffset = int64(superBlock.BlockSize()) return err @@ -293,10 +312,10 @@ func (scanner *VolumeFileScanner4Vacuum) VisitNeedle(n *needle.Needle, offset in nv, ok := scanner.v.nm.Get(n.Id) glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv) if ok && nv.Offset.ToAcutalOffset() == offset && nv.Size > 0 && nv.Size != TombstoneFileSize { - if err := scanner.nm.Put(n.Id, ToOffset(scanner.newOffset), n.Size); err != nil { + if err := scanner.nm.Set(n.Id, ToOffset(scanner.newOffset), n.Size); err != nil { return fmt.Errorf("cannot put needle: %s", err) } - if _, _, _, err := n.Append(scanner.dst, scanner.v.Version()); err != nil { + if _, _, _, err := n.Append(scanner.dstBackend, scanner.v.Version()); err != nil { return fmt.Errorf("cannot append needle: %s", err) } delta := n.DiskSize(scanner.version) @@ -309,88 +328,88 @@ func (scanner *VolumeFileScanner4Vacuum) VisitNeedle(n *needle.Needle, offset in func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, preallocate int64, compactionBytePerSecond int64) (err error) { var ( - dst, idx *os.File + dst backend.BackendStorageFile ) - if dst, err = createVolumeFile(dstName, preallocate, v.MemoryMapMaxSizeMb); err != nil { + if dst, err = createVolumeFile(dstName, preallocate, 0); err != nil { return } defer dst.Close() - if idx, err = os.OpenFile(idxName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644); err != nil { - return - } - defer idx.Close() + nm := needle_map.NewMemDb() scanner := &VolumeFileScanner4Vacuum{ v: v, now: uint64(time.Now().Unix()), - nm: NewBtreeNeedleMap(idx), - dst: dst, + nm: nm, + dstBackend: dst, writeThrottler: util.NewWriteThrottler(compactionBytePerSecond), } err = ScanVolumeFile(v.dir, v.Collection, v.Id, v.needleMapKind, scanner) + if err != nil { + return nil + } + + err = nm.SaveToIdx(idxName) return } -func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string) (err error) { +func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName string, sb super_block.SuperBlock, version needle.Version, preallocate int64) (err error) { var ( - dst, idx, oldIndexFile *os.File + srcDatBackend, dstDatBackend backend.BackendStorageFile + dataFile *os.File ) - if dst, err = os.OpenFile(dstName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644); err != nil { + if dstDatBackend, err = createVolumeFile(dstDatName, preallocate, 0); err != nil { return } - defer dst.Close() + defer dstDatBackend.Close() - if idx, err = os.OpenFile(idxName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644); err != nil { + oldNm := needle_map.NewMemDb() + newNm := needle_map.NewMemDb() + if err = oldNm.LoadFromIdx(srcIdxName); err != nil { return } - defer idx.Close() - - if oldIndexFile, err = os.OpenFile(v.FileName()+".idx", os.O_RDONLY, 0644); err != nil { - return + if dataFile, err = os.Open(srcDatName); err != nil { + return err } - defer oldIndexFile.Close() + srcDatBackend = backend.NewDiskFile(dataFile) - nm := NewBtreeNeedleMap(idx) now := uint64(time.Now().Unix()) - v.SuperBlock.CompactionRevision++ - dst.Write(v.SuperBlock.Bytes()) - newOffset := int64(v.SuperBlock.BlockSize()) + sb.CompactionRevision++ + dstDatBackend.WriteAt(sb.Bytes(), 0) + newOffset := int64(sb.BlockSize()) + + oldNm.AscendingVisit(func(value needle_map.NeedleValue) error { + + offset, size := value.Offset, value.Size - idx2.WalkIndexFile(oldIndexFile, func(key NeedleId, offset Offset, size uint32) error { if offset.IsZero() || size == TombstoneFileSize { return nil } - nv, ok := v.nm.Get(key) - if !ok { - return nil - } - n := new(needle.Needle) - err := n.ReadData(v.dataFile, offset.ToAcutalOffset(), size, v.Version()) + err := n.ReadData(srcDatBackend, offset.ToAcutalOffset(), size, version) if err != nil { return nil } - if n.HasTtl() && now >= n.LastModified+uint64(v.Ttl.Minutes()*60) { + if n.HasTtl() && now >= n.LastModified+uint64(sb.Ttl.Minutes()*60) { return nil } - glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv) - if nv.Offset == offset && nv.Size > 0 { - if err = nm.Put(n.Id, ToOffset(newOffset), n.Size); err != nil { - return fmt.Errorf("cannot put needle: %s", err) - } - if _, _, _, err = n.Append(dst, v.Version()); err != nil { - return fmt.Errorf("cannot append needle: %s", err) - } - newOffset += n.DiskSize(v.Version()) - glog.V(3).Infoln("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size) + if err = newNm.Set(n.Id, ToOffset(newOffset), n.Size); err != nil { + return fmt.Errorf("cannot put needle: %s", err) } + if _, _, _, err = n.Append(dstDatBackend, sb.Version); err != nil { + return fmt.Errorf("cannot append needle: %s", err) + } + newOffset += n.DiskSize(version) + glog.V(3).Infoln("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size) + return nil }) + newNm.SaveToIdx(datIdxName) + return } diff --git a/weed/storage/volume_vacuum_test.go b/weed/storage/volume_vacuum_test.go index ba1e59f2c..95f43d6ec 100644 --- a/weed/storage/volume_vacuum_test.go +++ b/weed/storage/volume_vacuum_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/storage/types" ) @@ -46,7 +47,7 @@ func TestMakeDiff(t *testing.T) { v := new(Volume) //lastCompactIndexOffset value is the index file size before step 4 v.lastCompactIndexOffset = 96 - v.SuperBlock.version = 0x2 + v.SuperBlock.Version = 0x2 /* err := v.makeupDiff( "/yourpath/1.cpd", @@ -68,7 +69,7 @@ func TestCompaction(t *testing.T) { } defer os.RemoveAll(dir) // clean up - v, err := NewVolume(dir, "", 1, NeedleMapInMemory, &ReplicaPlacement{}, &needle.TTL{}, 0, 0) + v, err := NewVolume(dir, "", 1, NeedleMapInMemory, &super_block.ReplicaPlacement{}, &needle.TTL{}, 0, 0) if err != nil { t.Fatalf("volume creation: %v", err) } @@ -83,7 +84,7 @@ func TestCompaction(t *testing.T) { } startTime := time.Now() - v.Compact(0, 1024*1024) + v.Compact2(0) speed := float64(v.ContentSize()) / time.Now().Sub(startTime).Seconds() t.Logf("compaction speed: %.2f bytes/s", speed) diff --git a/weed/topology/allocate_volume.go b/weed/topology/allocate_volume.go index e5dc48652..6ca987bc5 100644 --- a/weed/topology/allocate_volume.go +++ b/weed/topology/allocate_volume.go @@ -15,7 +15,7 @@ type AllocateVolumeResult struct { func AllocateVolume(dn *DataNode, grpcDialOption grpc.DialOption, vid needle.VolumeId, option *VolumeGrowOption) error { - return operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + return operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { _, deleteErr := client.AllocateVolume(context.Background(), &volume_server_pb.AllocateVolumeRequest{ VolumeId: uint32(vid), diff --git a/weed/topology/collection.go b/weed/topology/collection.go index f6b728ec9..7a611d904 100644 --- a/weed/topology/collection.go +++ b/weed/topology/collection.go @@ -3,8 +3,8 @@ package topology import ( "fmt" - "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -24,7 +24,7 @@ func (c *Collection) String() string { return fmt.Sprintf("Name:%s, volumeSizeLimit:%d, storageType2VolumeLayout:%v", c.Name, c.volumeSizeLimit, c.storageType2VolumeLayout) } -func (c *Collection) GetOrCreateVolumeLayout(rp *storage.ReplicaPlacement, ttl *needle.TTL) *VolumeLayout { +func (c *Collection) GetOrCreateVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL) *VolumeLayout { keyString := rp.String() if ttl != nil { keyString += ttl.String() diff --git a/weed/topology/data_center.go b/weed/topology/data_center.go index 640cb1937..dc3accb71 100644 --- a/weed/topology/data_center.go +++ b/weed/topology/data_center.go @@ -48,6 +48,7 @@ func (dc *DataCenter) ToDataCenterInfo() *master_pb.DataCenterInfo { MaxVolumeCount: uint64(dc.GetMaxVolumeCount()), FreeVolumeCount: uint64(dc.FreeSpace()), ActiveVolumeCount: uint64(dc.GetActiveVolumeCount()), + RemoteVolumeCount: uint64(dc.GetRemoteVolumeCount()), } for _, c := range dc.Children() { rack := c.(*Rack) diff --git a/weed/topology/data_node.go b/weed/topology/data_node.go index 3e72ccdbf..617341e54 100644 --- a/weed/topology/data_node.go +++ b/weed/topology/data_node.go @@ -2,14 +2,13 @@ package topology import ( "fmt" + "strconv" "sync" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/needle" - "strconv" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" ) @@ -44,15 +43,26 @@ func (dn *DataNode) String() string { func (dn *DataNode) AddOrUpdateVolume(v storage.VolumeInfo) (isNew bool) { dn.Lock() defer dn.Unlock() - if _, ok := dn.volumes[v.Id]; !ok { + if oldV, ok := dn.volumes[v.Id]; !ok { dn.volumes[v.Id] = v dn.UpAdjustVolumeCountDelta(1) + if v.IsRemote() { + dn.UpAdjustRemoteVolumeCountDelta(1) + } if !v.ReadOnly { dn.UpAdjustActiveVolumeCountDelta(1) } dn.UpAdjustMaxVolumeId(v.Id) isNew = true } else { + if oldV.IsRemote() != v.IsRemote() { + if v.IsRemote() { + dn.UpAdjustRemoteVolumeCountDelta(1) + } + if oldV.IsRemote() { + dn.UpAdjustRemoteVolumeCountDelta(-1) + } + } dn.volumes[v.Id] = v } return @@ -70,7 +80,12 @@ func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolume delete(dn.volumes, vid) deletedVolumes = append(deletedVolumes, v) dn.UpAdjustVolumeCountDelta(-1) - dn.UpAdjustActiveVolumeCountDelta(-1) + if v.IsRemote() { + dn.UpAdjustRemoteVolumeCountDelta(-1) + } + if !v.ReadOnly { + dn.UpAdjustActiveVolumeCountDelta(-1) + } } } dn.Unlock() @@ -88,7 +103,12 @@ func (dn *DataNode) DeltaUpdateVolumes(newlVolumes, deletedVolumes []storage.Vol for _, v := range deletedVolumes { delete(dn.volumes, v.Id) dn.UpAdjustVolumeCountDelta(-1) - dn.UpAdjustActiveVolumeCountDelta(-1) + if v.IsRemote() { + dn.UpAdjustRemoteVolumeCountDelta(-1) + } + if !v.ReadOnly { + dn.UpAdjustActiveVolumeCountDelta(-1) + } } dn.Unlock() for _, v := range newlVolumes { @@ -160,6 +180,7 @@ func (dn *DataNode) ToDataNodeInfo() *master_pb.DataNodeInfo { MaxVolumeCount: uint64(dn.GetMaxVolumeCount()), FreeVolumeCount: uint64(dn.FreeSpace()), ActiveVolumeCount: uint64(dn.GetActiveVolumeCount()), + RemoteVolumeCount: uint64(dn.GetRemoteVolumeCount()), } for _, v := range dn.GetVolumes() { m.VolumeInfos = append(m.VolumeInfos, v.ToVolumeInformationMessage()) diff --git a/weed/topology/node.go b/weed/topology/node.go index b2808f589..572a89d4d 100644 --- a/weed/topology/node.go +++ b/weed/topology/node.go @@ -20,6 +20,7 @@ type Node interface { ReserveOneVolume(r int64) (*DataNode, error) UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int64) UpAdjustVolumeCountDelta(volumeCountDelta int64) + UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta int64) UpAdjustEcShardCountDelta(ecShardCountDelta int64) UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int64) UpAdjustMaxVolumeId(vid needle.VolumeId) @@ -27,6 +28,7 @@ type Node interface { GetVolumeCount() int64 GetEcShardCount() int64 GetActiveVolumeCount() int64 + GetRemoteVolumeCount() int64 GetMaxVolumeCount() int64 GetMaxVolumeId() needle.VolumeId SetParent(Node) @@ -44,6 +46,7 @@ type Node interface { } type NodeImpl struct { volumeCount int64 + remoteVolumeCount int64 activeVolumeCount int64 ecShardCount int64 maxVolumeCount int64 @@ -132,10 +135,11 @@ func (n *NodeImpl) Id() NodeId { return n.id } func (n *NodeImpl) FreeSpace() int64 { + freeVolumeSlotCount := n.maxVolumeCount + n.remoteVolumeCount - n.volumeCount if n.ecShardCount > 0 { - return n.maxVolumeCount - n.volumeCount - n.ecShardCount/erasure_coding.DataShardsCount - 1 + freeVolumeSlotCount = freeVolumeSlotCount - n.ecShardCount/erasure_coding.DataShardsCount - 1 } - return n.maxVolumeCount - n.volumeCount + return freeVolumeSlotCount } func (n *NodeImpl) SetParent(node Node) { n.parent = node @@ -191,6 +195,12 @@ func (n *NodeImpl) UpAdjustVolumeCountDelta(volumeCountDelta int64) { //can be n n.parent.UpAdjustVolumeCountDelta(volumeCountDelta) } } +func (n *NodeImpl) UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta int64) { //can be negative + atomic.AddInt64(&n.remoteVolumeCount, remoteVolumeCountDelta) + if n.parent != nil { + n.parent.UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta) + } +} func (n *NodeImpl) UpAdjustEcShardCountDelta(ecShardCountDelta int64) { //can be negative atomic.AddInt64(&n.ecShardCount, ecShardCountDelta) if n.parent != nil { @@ -220,6 +230,9 @@ func (n *NodeImpl) GetVolumeCount() int64 { func (n *NodeImpl) GetEcShardCount() int64 { return n.ecShardCount } +func (n *NodeImpl) GetRemoteVolumeCount() int64 { + return n.remoteVolumeCount +} func (n *NodeImpl) GetActiveVolumeCount() int64 { return n.activeVolumeCount } @@ -235,6 +248,7 @@ func (n *NodeImpl) LinkChildNode(node Node) { n.UpAdjustMaxVolumeCountDelta(node.GetMaxVolumeCount()) n.UpAdjustMaxVolumeId(node.GetMaxVolumeId()) n.UpAdjustVolumeCountDelta(node.GetVolumeCount()) + n.UpAdjustRemoteVolumeCountDelta(node.GetRemoteVolumeCount()) n.UpAdjustEcShardCountDelta(node.GetEcShardCount()) n.UpAdjustActiveVolumeCountDelta(node.GetActiveVolumeCount()) node.SetParent(n) @@ -250,6 +264,7 @@ func (n *NodeImpl) UnlinkChildNode(nodeId NodeId) { node.SetParent(nil) delete(n.children, node.Id()) n.UpAdjustVolumeCountDelta(-node.GetVolumeCount()) + n.UpAdjustRemoteVolumeCountDelta(-node.GetRemoteVolumeCount()) n.UpAdjustEcShardCountDelta(-node.GetEcShardCount()) n.UpAdjustActiveVolumeCountDelta(-node.GetActiveVolumeCount()) n.UpAdjustMaxVolumeCountDelta(-node.GetMaxVolumeCount()) diff --git a/weed/topology/rack.go b/weed/topology/rack.go index 932c1a804..1921c0c05 100644 --- a/weed/topology/rack.go +++ b/weed/topology/rack.go @@ -67,6 +67,7 @@ func (r *Rack) ToRackInfo() *master_pb.RackInfo { MaxVolumeCount: uint64(r.GetMaxVolumeCount()), FreeVolumeCount: uint64(r.FreeSpace()), ActiveVolumeCount: uint64(r.GetActiveVolumeCount()), + RemoteVolumeCount: uint64(r.GetRemoteVolumeCount()), } for _, c := range r.Children() { dn := c.(*DataNode) diff --git a/weed/topology/store_replicate.go b/weed/topology/store_replicate.go index d21c4d210..b195b48ed 100644 --- a/weed/topology/store_replicate.go +++ b/weed/topology/store_replicate.go @@ -25,58 +25,61 @@ func ReplicatedWrite(masterNode string, s *storage.Store, //check JWT jwt := security.GetJwt(r) + var remoteLocations []operation.Location + if r.FormValue("type") != "replicate" { + remoteLocations, err = getWritableRemoteReplications(s, volumeId, masterNode) + if err != nil { + glog.V(0).Infoln(err) + return + } + } + size, isUnchanged, err = s.WriteVolumeNeedle(volumeId, n) if err != nil { err = fmt.Errorf("failed to write to local disk: %v", err) + glog.V(0).Infoln(err) return } - needToReplicate := !s.HasVolume(volumeId) - needToReplicate = needToReplicate || s.GetVolume(volumeId).NeedToReplicate() - if !needToReplicate { - needToReplicate = s.GetVolume(volumeId).NeedToReplicate() - } - if needToReplicate { //send to other replica locations - if r.FormValue("type") != "replicate" { - - if err = distributedOperation(masterNode, s, volumeId, func(location operation.Location) error { - u := url.URL{ - Scheme: "http", - Host: location.Url, - Path: r.URL.Path, - } - q := url.Values{ - "type": {"replicate"}, - "ttl": {n.Ttl.String()}, - } - if n.LastModified > 0 { - q.Set("ts", strconv.FormatUint(n.LastModified, 10)) - } - if n.IsChunkedManifest() { - q.Set("cm", "true") - } - u.RawQuery = q.Encode() - - pairMap := make(map[string]string) - if n.HasPairs() { - tmpMap := make(map[string]string) - err := json.Unmarshal(n.Pairs, &tmpMap) - if err != nil { - glog.V(0).Infoln("Unmarshal pairs error:", err) - } - for k, v := range tmpMap { - pairMap[needle.PairNamePrefix+k] = v - } - } - - _, err := operation.Upload(u.String(), - string(n.Name), bytes.NewReader(n.Data), n.IsGzipped(), string(n.Mime), - pairMap, jwt) - return err - }); err != nil { - size = 0 - err = fmt.Errorf("failed to write to replicas for volume %d: %v", volumeId, err) + if len(remoteLocations) > 0 { //send to other replica locations + if err = distributedOperation(remoteLocations, s, func(location operation.Location) error { + u := url.URL{ + Scheme: "http", + Host: location.Url, + Path: r.URL.Path, } + q := url.Values{ + "type": {"replicate"}, + "ttl": {n.Ttl.String()}, + } + if n.LastModified > 0 { + q.Set("ts", strconv.FormatUint(n.LastModified, 10)) + } + if n.IsChunkedManifest() { + q.Set("cm", "true") + } + u.RawQuery = q.Encode() + + pairMap := make(map[string]string) + if n.HasPairs() { + tmpMap := make(map[string]string) + err := json.Unmarshal(n.Pairs, &tmpMap) + if err != nil { + glog.V(0).Infoln("Unmarshal pairs error:", err) + } + for k, v := range tmpMap { + pairMap[needle.PairNamePrefix+k] = v + } + } + + _, err := operation.Upload(u.String(), + string(n.Name), bytes.NewReader(n.Data), n.IsGzipped(), string(n.Mime), + pairMap, jwt) + return err + }); err != nil { + size = 0 + err = fmt.Errorf("failed to write to replicas for volume %d: %v", volumeId, err) + glog.V(0).Infoln(err) } } return @@ -84,31 +87,34 @@ func ReplicatedWrite(masterNode string, s *storage.Store, func ReplicatedDelete(masterNode string, store *storage.Store, volumeId needle.VolumeId, n *needle.Needle, - r *http.Request) (uint32, error) { + r *http.Request) (size uint32, err error) { //check JWT jwt := security.GetJwt(r) - ret, err := store.DeleteVolumeNeedle(volumeId, n) - if err != nil { - glog.V(0).Infoln("delete error:", err) - return ret, err - } - - needToReplicate := !store.HasVolume(volumeId) - if !needToReplicate && ret > 0 { - needToReplicate = store.GetVolume(volumeId).NeedToReplicate() - } - if needToReplicate { //send to other replica locations - if r.FormValue("type") != "replicate" { - if err = distributedOperation(masterNode, store, volumeId, func(location operation.Location) error { - return util.Delete("http://"+location.Url+r.URL.Path+"?type=replicate", string(jwt)) - }); err != nil { - ret = 0 - } + var remoteLocations []operation.Location + if r.FormValue("type") != "replicate" { + remoteLocations, err = getWritableRemoteReplications(store, volumeId, masterNode) + if err != nil { + glog.V(0).Infoln(err) + return } } - return ret, err + + size, err = store.DeleteVolumeNeedle(volumeId, n) + if err != nil { + glog.V(0).Infoln("delete error:", err) + return + } + + if len(remoteLocations) > 0 { //send to other replica locations + if err = distributedOperation(remoteLocations, store, func(location operation.Location) error { + return util.Delete("http://"+location.Url+r.URL.Path+"?type=replicate", string(jwt)) + }); err != nil { + size = 0 + } + } + return } type DistributedOperationResult map[string]error @@ -131,32 +137,44 @@ type RemoteResult struct { Error error } -func distributedOperation(masterNode string, store *storage.Store, volumeId needle.VolumeId, op func(location operation.Location) error) error { - if lookupResult, lookupErr := operation.Lookup(masterNode, volumeId.String()); lookupErr == nil { - length := 0 - selfUrl := (store.Ip + ":" + strconv.Itoa(store.Port)) - results := make(chan RemoteResult) - for _, location := range lookupResult.Locations { - if location.Url != selfUrl { - length++ - go func(location operation.Location, results chan RemoteResult) { - results <- RemoteResult{location.Url, op(location)} - }(location, results) - } - } - ret := DistributedOperationResult(make(map[string]error)) - for i := 0; i < length; i++ { - result := <-results - ret[result.Host] = result.Error - } - if volume := store.GetVolume(volumeId); volume != nil { - if length+1 < volume.ReplicaPlacement.GetCopyCount() { - return fmt.Errorf("replicating opetations [%d] is less than volume's replication copy count [%d]", length+1, volume.ReplicaPlacement.GetCopyCount()) - } - } - return ret.Error() - } else { - glog.V(0).Infoln() - return fmt.Errorf("Failed to lookup for %d: %v", volumeId, lookupErr) +func distributedOperation(locations []operation.Location, store *storage.Store, op func(location operation.Location) error) error { + length := len(locations) + results := make(chan RemoteResult) + for _, location := range locations { + go func(location operation.Location, results chan RemoteResult) { + results <- RemoteResult{location.Url, op(location)} + }(location, results) } + ret := DistributedOperationResult(make(map[string]error)) + for i := 0; i < length; i++ { + result := <-results + ret[result.Host] = result.Error + } + + return ret.Error() +} + +func getWritableRemoteReplications(s *storage.Store, volumeId needle.VolumeId, masterNode string) ( + remoteLocations []operation.Location, err error) { + copyCount := s.GetVolume(volumeId).ReplicaPlacement.GetCopyCount() + if copyCount > 1 { + if lookupResult, lookupErr := operation.Lookup(masterNode, volumeId.String()); lookupErr == nil { + if len(lookupResult.Locations) < copyCount { + err = fmt.Errorf("replicating opetations [%d] is less than volume's replication copy count [%d]", + len(lookupResult.Locations), copyCount) + return + } + selfUrl := s.Ip + ":" + strconv.Itoa(s.Port) + for _, location := range lookupResult.Locations { + if location.Url != selfUrl { + remoteLocations = append(remoteLocations, location) + } + } + } else { + err = fmt.Errorf("failed to lookup for %d: %v", volumeId, lookupErr) + return + } + } + + return } diff --git a/weed/topology/topology.go b/weed/topology/topology.go index ea0769248..fbf998707 100644 --- a/weed/topology/topology.go +++ b/weed/topology/topology.go @@ -7,11 +7,13 @@ import ( "sync" "github.com/chrislusf/raft" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/sequence" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -58,7 +60,12 @@ func NewTopology(id string, seq sequence.Sequencer, volumeSizeLimit uint64, puls func (t *Topology) IsLeader() bool { if t.RaftServer != nil { - return t.RaftServer.State() == raft.Leader + if t.RaftServer.State() == raft.Leader { + return true + } + if t.RaftServer.Leader() == "" { + return true + } } return false } @@ -73,7 +80,7 @@ func (t *Topology) Leader() (string, error) { if l == "" { // We are a single node cluster, we are the leader - return t.RaftServer.Name(), errors.New("Raft Server not initialized!") + return t.RaftServer.Name(), nil } return l, nil @@ -125,11 +132,11 @@ func (t *Topology) PickForWrite(count uint64, option *VolumeGrowOption) (string, if datanodes.Length() == 0 { return "", 0, nil, fmt.Errorf("no writable volumes available for collection:%s replication:%s ttl:%s", option.Collection, option.ReplicaPlacement.String(), option.Ttl.String()) } - fileId, count := t.Sequence.NextFileId(count) + fileId := t.Sequence.NextFileId(count) return needle.NewFileId(*vid, fileId, rand.Uint32()).String(), count, datanodes.Head(), nil } -func (t *Topology) GetVolumeLayout(collectionName string, rp *storage.ReplicaPlacement, ttl *needle.TTL) *VolumeLayout { +func (t *Topology) GetVolumeLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL) *VolumeLayout { return t.collectionMap.Get(collectionName, func() interface{} { return NewCollection(collectionName, t.volumeSizeLimit) }).(*Collection).GetOrCreateVolumeLayout(rp, ttl) @@ -150,7 +157,7 @@ func (t *Topology) ListCollections(includeNormalVolumes, includeEcVolumes bool) t.ecShardMapLock.RUnlock() } - for k, _ := range mapOfCollections { + for k := range mapOfCollections { ret = append(ret, k) } return ret diff --git a/weed/topology/topology_event_handling.go b/weed/topology/topology_event_handling.go index 041351492..068bd401e 100644 --- a/weed/topology/topology_event_handling.go +++ b/weed/topology/topology_event_handling.go @@ -59,6 +59,7 @@ func (t *Topology) UnRegisterDataNode(dn *DataNode) { vl.SetVolumeUnavailable(dn, v.Id) } dn.UpAdjustVolumeCountDelta(-dn.GetVolumeCount()) + dn.UpAdjustRemoteVolumeCountDelta(-dn.GetRemoteVolumeCount()) dn.UpAdjustActiveVolumeCountDelta(-dn.GetActiveVolumeCount()) dn.UpAdjustMaxVolumeCountDelta(-dn.GetMaxVolumeCount()) if dn.Parent() != nil { diff --git a/weed/topology/topology_map.go b/weed/topology/topology_map.go index 37a88c9ed..73c55d77d 100644 --- a/weed/topology/topology_map.go +++ b/weed/topology/topology_map.go @@ -23,7 +23,7 @@ func (t *Topology) ToMap() interface{} { } } } - m["layouts"] = layouts + m["Layouts"] = layouts return m } @@ -85,6 +85,7 @@ func (t *Topology) ToTopologyInfo() *master_pb.TopologyInfo { MaxVolumeCount: uint64(t.GetMaxVolumeCount()), FreeVolumeCount: uint64(t.FreeSpace()), ActiveVolumeCount: uint64(t.GetActiveVolumeCount()), + RemoteVolumeCount: uint64(t.GetRemoteVolumeCount()), } for _, c := range t.Children() { dc := c.(*DataCenter) diff --git a/weed/topology/topology_test.go b/weed/topology/topology_test.go index 8f79ad684..e7676ccf7 100644 --- a/weed/topology/topology_test.go +++ b/weed/topology/topology_test.go @@ -5,6 +5,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/sequence" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "testing" ) @@ -94,7 +95,7 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) { []*master_pb.VolumeShortInformationMessage{newVolumeShortMessage}, nil, dn) - rp, _ := storage.NewReplicaPlacementFromString("000") + rp, _ := super_block.NewReplicaPlacementFromString("000") layout := topo.GetVolumeLayout("", rp, needle.EMPTY_TTL) assert(t, "writables after repeated add", len(layout.writables), volumeCount) @@ -154,7 +155,7 @@ func TestAddRemoveVolume(t *testing.T) { DeletedByteCount: 45, ReadOnly: false, Version: needle.CurrentVersion, - ReplicaPlacement: &storage.ReplicaPlacement{}, + ReplicaPlacement: &super_block.ReplicaPlacement{}, Ttl: needle.EMPTY_TTL, } diff --git a/weed/topology/topology_vacuum.go b/weed/topology/topology_vacuum.go index 37a6a30b9..e7dbf9b1e 100644 --- a/weed/topology/topology_vacuum.go +++ b/weed/topology/topology_vacuum.go @@ -13,20 +13,26 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" ) -func batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList, garbageThreshold float64) bool { - ch := make(chan bool, locationlist.Length()) +func batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, + locationlist *VolumeLocationList, garbageThreshold float64) (*VolumeLocationList, bool) { + ch := make(chan int, locationlist.Length()) + errCount := int32(0) for index, dn := range locationlist.list { go func(index int, url string, vid needle.VolumeId) { - err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - resp, err := volumeServerClient.VacuumVolumeCheck(context.Background(), &volume_server_pb.VacuumVolumeCheckRequest{ + err := operation.WithVolumeServerClient(url, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { + resp, err := volumeServerClient.VacuumVolumeCheck(ctx, &volume_server_pb.VacuumVolumeCheckRequest{ VolumeId: uint32(vid), }) if err != nil { - ch <- false + atomic.AddInt32(&errCount, 1) + ch <- -1 return err } - isNeeded := resp.GarbageRatio > garbageThreshold - ch <- isNeeded + if resp.GarbageRatio >= garbageThreshold { + ch <- index + } else { + ch <- -1 + } return nil }) if err != nil { @@ -34,19 +40,21 @@ func batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vl *VolumeLayout, vi } }(index, dn.Url(), vid) } - isCheckSuccess := true - for _ = range locationlist.list { + vacuumLocationList := NewVolumeLocationList() + for range locationlist.list { select { - case canVacuum := <-ch: - isCheckSuccess = isCheckSuccess && canVacuum + case index := <-ch: + if index != -1 { + vacuumLocationList.list = append(vacuumLocationList.list, locationlist.list[index]) + } case <-time.After(30 * time.Minute): - isCheckSuccess = false - break + return vacuumLocationList, false } } - return isCheckSuccess + return vacuumLocationList, errCount == 0 && len(vacuumLocationList.list) > 0 } -func batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList, preallocate int64) bool { +func batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, + locationlist *VolumeLocationList, preallocate int64) bool { vl.accessLock.Lock() vl.removeFromWritable(vid) vl.accessLock.Unlock() @@ -55,8 +63,8 @@ func batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout, for index, dn := range locationlist.list { go func(index int, url string, vid needle.VolumeId) { glog.V(0).Infoln(index, "Start vacuuming", vid, "on", url) - err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, err := volumeServerClient.VacuumVolumeCompact(context.Background(), &volume_server_pb.VacuumVolumeCompactRequest{ + err := operation.WithVolumeServerClient(url, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { + _, err := volumeServerClient.VacuumVolumeCompact(ctx, &volume_server_pb.VacuumVolumeCompactRequest{ VolumeId: uint32(vid), }) return err @@ -71,13 +79,12 @@ func batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout, }(index, dn.Url(), vid) } isVacuumSuccess := true - for _ = range locationlist.list { + for range locationlist.list { select { case canCommit := <-ch: isVacuumSuccess = isVacuumSuccess && canCommit case <-time.After(30 * time.Minute): - isVacuumSuccess = false - break + return false } } return isVacuumSuccess @@ -86,8 +93,8 @@ func batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, v isCommitSuccess := true for _, dn := range locationlist.list { glog.V(0).Infoln("Start Committing vacuum", vid, "on", dn.Url()) - err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, err := volumeServerClient.VacuumVolumeCommit(context.Background(), &volume_server_pb.VacuumVolumeCommitRequest{ + err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { + _, err := volumeServerClient.VacuumVolumeCommit(ctx, &volume_server_pb.VacuumVolumeCommitRequest{ VolumeId: uint32(vid), }) return err @@ -107,8 +114,8 @@ func batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, v func batchVacuumVolumeCleanup(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList) { for _, dn := range locationlist.list { glog.V(0).Infoln("Start cleaning up", vid, "on", dn.Url()) - err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, err := volumeServerClient.VacuumVolumeCleanup(context.Background(), &volume_server_pb.VacuumVolumeCleanupRequest{ + err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { + _, err := volumeServerClient.VacuumVolumeCleanup(ctx, &volume_server_pb.VacuumVolumeCleanupRequest{ VolumeId: uint32(vid), }) return err @@ -165,11 +172,12 @@ func vacuumOneVolumeLayout(grpcDialOption grpc.DialOption, volumeLayout *VolumeL } glog.V(2).Infof("check vacuum on collection:%s volume:%d", c.Name, vid) - if batchVacuumVolumeCheck(grpcDialOption, volumeLayout, vid, locationList, garbageThreshold) { - if batchVacuumVolumeCompact(grpcDialOption, volumeLayout, vid, locationList, preallocate) { - batchVacuumVolumeCommit(grpcDialOption, volumeLayout, vid, locationList) + if vacuumLocationList, needVacuum := batchVacuumVolumeCheck( + grpcDialOption, volumeLayout, vid, locationList, garbageThreshold); needVacuum { + if batchVacuumVolumeCompact(grpcDialOption, volumeLayout, vid, vacuumLocationList, preallocate) { + batchVacuumVolumeCommit(grpcDialOption, volumeLayout, vid, vacuumLocationList) } else { - batchVacuumVolumeCleanup(grpcDialOption, volumeLayout, vid, locationList) + batchVacuumVolumeCleanup(grpcDialOption, volumeLayout, vid, vacuumLocationList) } } } diff --git a/weed/topology/volume_growth.go b/weed/topology/volume_growth.go index f21ab45ef..781a34ba3 100644 --- a/weed/topology/volume_growth.go +++ b/weed/topology/volume_growth.go @@ -6,6 +6,9 @@ import ( "sync" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" + "github.com/chrislusf/seaweedfs/weed/util" + "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/glog" @@ -22,7 +25,7 @@ This package is created to resolve these replica placement issues: type VolumeGrowOption struct { Collection string - ReplicaPlacement *storage.ReplicaPlacement + ReplicaPlacement *super_block.ReplicaPlacement Ttl *needle.TTL Prealloacte int64 DataCenter string @@ -46,21 +49,29 @@ func NewDefaultVolumeGrowth() *VolumeGrowth { // one replication type may need rp.GetCopyCount() actual volumes // given copyCount, how many logical volumes to create func (vg *VolumeGrowth) findVolumeCount(copyCount int) (count int) { + v := util.GetViper() + v.SetDefault("master.volume_growth.copy_1", 7) + v.SetDefault("master.volume_growth.copy_2", 6) + v.SetDefault("master.volume_growth.copy_3", 3) + v.SetDefault("master.volume_growth.copy_other", 1) switch copyCount { case 1: - count = 7 + count = v.GetInt("master.volume_growth.copy_1") case 2: - count = 6 + count = v.GetInt("master.volume_growth.copy_2") case 3: - count = 3 + count = v.GetInt("master.volume_growth.copy_3") default: - count = 1 + count = v.GetInt("master.volume_growth.copy_other") } return } -func (vg *VolumeGrowth) AutomaticGrowByType(option *VolumeGrowOption, grpcDialOption grpc.DialOption, topo *Topology) (count int, err error) { - count, err = vg.GrowByCountAndType(grpcDialOption, vg.findVolumeCount(option.ReplicaPlacement.GetCopyCount()), option, topo) +func (vg *VolumeGrowth) AutomaticGrowByType(option *VolumeGrowOption, grpcDialOption grpc.DialOption, topo *Topology, targetCount int) (count int, err error) { + if targetCount == 0 { + targetCount = vg.findVolumeCount(option.ReplicaPlacement.GetCopyCount()) + } + count, err = vg.GrowByCountAndType(grpcDialOption, targetCount, option, topo) if count > 0 && count%option.ReplicaPlacement.GetCopyCount() == 0 { return count, nil } diff --git a/weed/topology/volume_growth_test.go b/weed/topology/volume_growth_test.go index 3573365fd..e3c5cc580 100644 --- a/weed/topology/volume_growth_test.go +++ b/weed/topology/volume_growth_test.go @@ -8,6 +8,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/sequence" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) var topologyLayout = ` @@ -113,7 +114,7 @@ func setup(topologyLayout string) *Topology { func TestFindEmptySlotsForOneVolume(t *testing.T) { topo := setup(topologyLayout) vg := NewDefaultVolumeGrowth() - rp, _ := storage.NewReplicaPlacementFromString("002") + rp, _ := super_block.NewReplicaPlacementFromString("002") volumeGrowOption := &VolumeGrowOption{ Collection: "", ReplicaPlacement: rp, diff --git a/weed/topology/volume_layout.go b/weed/topology/volume_layout.go index 799cbca62..7633b28be 100644 --- a/weed/topology/volume_layout.go +++ b/weed/topology/volume_layout.go @@ -10,11 +10,12 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) // mapping from volume to its locations, inverted from server to volume type VolumeLayout struct { - rp *storage.ReplicaPlacement + rp *super_block.ReplicaPlacement ttl *needle.TTL vid2location map[needle.VolumeId]*VolumeLocationList writables []needle.VolumeId // transient array of writable volume id @@ -30,7 +31,7 @@ type VolumeLayoutStats struct { FileCount uint64 } -func NewVolumeLayout(rp *storage.ReplicaPlacement, ttl *needle.TTL, volumeSizeLimit uint64) *VolumeLayout { +func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeSizeLimit uint64) *VolumeLayout { return &VolumeLayout{ rp: rp, ttl: ttl, diff --git a/weed/util/bytes.go b/weed/util/bytes.go index dfa4ae665..9c7e5e2cb 100644 --- a/weed/util/bytes.go +++ b/weed/util/bytes.go @@ -1,5 +1,10 @@ package util +import ( + "crypto/md5" + "io" +) + // big endian func BytesToUint64(b []byte) (v uint64) { @@ -43,3 +48,29 @@ func Uint16toBytes(b []byte, v uint16) { func Uint8toBytes(b []byte, v uint8) { b[0] = byte(v) } + +// returns a 64 bit big int +func HashStringToLong(dir string) (v int64) { + h := md5.New() + io.WriteString(h, dir) + + b := h.Sum(nil) + + v += int64(b[0]) + v <<= 8 + v += int64(b[1]) + v <<= 8 + v += int64(b[2]) + v <<= 8 + v += int64(b[3]) + v <<= 8 + v += int64(b[4]) + v <<= 8 + v += int64(b[5]) + v <<= 8 + v += int64(b[6]) + v <<= 8 + v += int64(b[7]) + + return +} diff --git a/weed/util/compression.go b/weed/util/compression.go index c6c9423e2..6072df632 100644 --- a/weed/util/compression.go +++ b/weed/util/compression.go @@ -60,7 +60,7 @@ func UnGzipData(input []byte) ([]byte, error) { // images switch ext { - case ".svg", ".bmp": + case ".svg", ".bmp", ".wav": return true, true } if strings.HasPrefix(mtype, "image/") { @@ -87,6 +87,14 @@ func UnGzipData(input []byte) ([]byte, error) { if strings.HasSuffix(mtype, "script") { return true, true } + + } + + if strings.HasPrefix(mtype, "audio/") { + switch strings.TrimPrefix(mtype, "audio/") { + case "wave", "wav", "x-wav", "x-pn-wav": + return true, true + } } return false, false diff --git a/weed/util/config.go b/weed/util/config.go index 1ea833d1f..dfbfdbd82 100644 --- a/weed/util/config.go +++ b/weed/util/config.go @@ -1,17 +1,19 @@ package util import ( - "github.com/chrislusf/seaweedfs/weed/glog" + "strings" + "github.com/spf13/viper" + + "github.com/chrislusf/seaweedfs/weed/glog" ) type Configuration interface { GetString(key string) string GetBool(key string) bool GetInt(key string) int - GetInt64(key string) int64 - GetFloat64(key string) float64 GetStringSlice(key string) []string + SetDefault(key string, value interface{}) } func LoadConfiguration(configFileName string, required bool) (loaded bool) { @@ -28,10 +30,7 @@ func LoadConfiguration(configFileName string, required bool) (loaded bool) { glog.V(0).Infof("Reading %s: %v", viper.ConfigFileUsed(), err) if required { glog.Fatalf("Failed to load %s.toml file from current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/"+ - "\n\nPlease follow this example and add a filer.toml file to "+ - "current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/:\n"+ - " https://github.com/chrislusf/seaweedfs/blob/master/weed/%s.toml\n"+ - "\nOr use this command to generate the default toml file\n"+ + "\n\nPlease use this command to generate the default %s.toml file\n"+ " weed scaffold -config=%s -output=.\n\n\n", configFileName, configFileName, configFileName) } else { @@ -40,5 +39,12 @@ func LoadConfiguration(configFileName string, required bool) (loaded bool) { } return true - +} + +func GetViper() *viper.Viper { + v := viper.GetViper() + v.AutomaticEnv() + v.SetEnvPrefix("weed") + v.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + return v } diff --git a/weed/util/constants.go b/weed/util/constants.go index 4f16a783c..3d61b2006 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 44) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 53) ) diff --git a/weed/util/file_util.go b/weed/util/file_util.go index 78add6724..bef9f7cd6 100644 --- a/weed/util/file_util.go +++ b/weed/util/file_util.go @@ -3,6 +3,7 @@ package util import ( "errors" "os" + "time" "github.com/chrislusf/seaweedfs/weed/glog" ) @@ -40,3 +41,21 @@ func FileExists(filename string) bool { return true } + +func CheckFile(filename string) (exists, canRead, canWrite bool, modTime time.Time, fileSize int64) { + exists = true + fi, err := os.Stat(filename) + if os.IsNotExist(err) { + exists = false + return + } + if fi.Mode()&0400 != 0 { + canRead = true + } + if fi.Mode()&0200 != 0 { + canWrite = true + } + modTime = fi.ModTime() + fileSize = fi.Size() + return +} diff --git a/weed/util/grpc_client_server.go b/weed/util/grpc_client_server.go index 31497ad35..7e396342b 100644 --- a/weed/util/grpc_client_server.go +++ b/weed/util/grpc_client_server.go @@ -57,14 +57,21 @@ func GrpcDial(ctx context.Context, address string, opts ...grpc.DialOption) (*gr return grpc.DialContext(ctx, address, options...) } -func WithCachedGrpcClient(ctx context.Context, fn func(*grpc.ClientConn) error, address string, opts ...grpc.DialOption) error { +func WithCachedGrpcClient(ctx context.Context, fn func(context.Context, *grpc.ClientConn) error, address string, opts ...grpc.DialOption) error { grpcClientsLock.Lock() existingConnection, found := grpcClients[address] if found { grpcClientsLock.Unlock() - return fn(existingConnection) + err := fn(ctx, existingConnection) + if err != nil { + grpcClientsLock.Lock() + delete(grpcClients, address) + grpcClientsLock.Unlock() + existingConnection.Close() + } + return err } grpcConnection, err := GrpcDial(ctx, address, opts...) @@ -76,7 +83,7 @@ func WithCachedGrpcClient(ctx context.Context, fn func(*grpc.ClientConn) error, grpcClients[address] = grpcConnection grpcClientsLock.Unlock() - err = fn(grpcConnection) + err = fn(ctx, grpcConnection) if err != nil { grpcClientsLock.Lock() delete(grpcClients, address) diff --git a/weed/util/http_util.go b/weed/util/http_util.go index 79a442a56..08007a038 100644 --- a/weed/util/http_util.go +++ b/weed/util/http_util.go @@ -11,6 +11,8 @@ import ( "net/http" "net/url" "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" ) var ( @@ -28,7 +30,7 @@ func init() { } func PostBytes(url string, body []byte) ([]byte, error) { - r, err := client.Post(url, "application/octet-stream", bytes.NewReader(body)) + r, err := client.Post(url, "", bytes.NewReader(body)) if err != nil { return nil, fmt.Errorf("Post to %s: %v", url, err) } @@ -187,11 +189,14 @@ func NormalizeUrl(url string) string { return "http://" + url } -func ReadUrl(fileUrl string, offset int64, size int, buf []byte, isReadRange bool) (n int64, e error) { +func ReadUrl(fileUrl string, offset int64, size int, buf []byte, isReadRange bool) (int64, error) { - req, _ := http.NewRequest("GET", fileUrl, nil) + req, err := http.NewRequest("GET", fileUrl, nil) + if err != nil { + return 0, err + } if isReadRange { - req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size))) + req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1)) } else { req.Header.Set("Accept-Encoding", "gzip") } @@ -207,7 +212,8 @@ func ReadUrl(fileUrl string, offset int64, size int, buf []byte, isReadRange boo } var reader io.ReadCloser - switch r.Header.Get("Content-Encoding") { + contentEncoding := r.Header.Get("Content-Encoding") + switch contentEncoding { case "gzip": reader, err = gzip.NewReader(r.Body) defer reader.Close() @@ -215,29 +221,42 @@ func ReadUrl(fileUrl string, offset int64, size int, buf []byte, isReadRange boo reader = r.Body } - var i, m int + var ( + i, m int + n int64 + ) + // refers to https://github.com/golang/go/blob/master/src/bytes/buffer.go#L199 + // commit id c170b14c2c1cfb2fd853a37add92a82fd6eb4318 for { m, err = reader.Read(buf[i:]) - if m == 0 { - return - } i += m n += int64(m) if err == io.EOF { return n, nil } - if e != nil { - return n, e + if err != nil { + return n, err + } + if n == int64(len(buf)) { + break } } - + // drains the response body to avoid memory leak + data, _ := ioutil.ReadAll(reader) + if len(data) != 0 { + glog.V(1).Infof("%s reader has remaining %d bytes", contentEncoding, len(data)) + } + return n, err } -func ReadUrlAsStream(fileUrl string, offset int64, size int, fn func(data []byte)) (n int64, e error) { +func ReadUrlAsStream(fileUrl string, offset int64, size int, fn func(data []byte)) (int64, error) { - req, _ := http.NewRequest("GET", fileUrl, nil) - req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size))) + req, err := http.NewRequest("GET", fileUrl, nil) + if err != nil { + return 0, err + } + req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1)) r, err := client.Do(req) if err != nil { @@ -248,22 +267,43 @@ func ReadUrlAsStream(fileUrl string, offset int64, size int, fn func(data []byte return 0, fmt.Errorf("%s: %s", fileUrl, r.Status) } - var m int + var ( + m int + n int64 + ) buf := make([]byte, 64*1024) for { m, err = r.Body.Read(buf) - if m == 0 { - return - } fn(buf[:m]) n += int64(m) if err == io.EOF { return n, nil } - if e != nil { - return n, e + if err != nil { + return n, err } } } + +func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, error) { + + req, err := http.NewRequest("GET", fileUrl, nil) + if err != nil { + return nil, err + } + if rangeHeader != "" { + req.Header.Add("Range", rangeHeader) + } + + r, err := client.Do(req) + if err != nil { + return nil, err + } + if r.StatusCode >= 400 { + return nil, fmt.Errorf("%s: %s", fileUrl, r.Status) + } + + return r.Body, nil +} diff --git a/weed/util/httpdown/http_down.go b/weed/util/httpdown/http_down.go new file mode 100644 index 000000000..5cbd9611c --- /dev/null +++ b/weed/util/httpdown/http_down.go @@ -0,0 +1,395 @@ +// Package httpdown provides http.ConnState enabled graceful termination of +// http.Server. +// based on github.com/facebookarchive/httpdown, who's licence is MIT-licence, +// we add a feature of supporting for http TLS +package httpdown + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/facebookgo/clock" + "github.com/facebookgo/stats" +) + +const ( + defaultStopTimeout = time.Minute + defaultKillTimeout = time.Minute +) + +// A Server allows encapsulates the process of accepting new connections and +// serving them, and gracefully shutting down the listener without dropping +// active connections. +type Server interface { + // Wait waits for the serving loop to finish. This will happen when Stop is + // called, at which point it returns no error, or if there is an error in the + // serving loop. You must call Wait after calling Serve or ListenAndServe. + Wait() error + + // Stop stops the listener. It will block until all connections have been + // closed. + Stop() error +} + +// HTTP defines the configuration for serving a http.Server. Multiple calls to +// Serve or ListenAndServe can be made on the same HTTP instance. The default +// timeouts of 1 minute each result in a maximum of 2 minutes before a Stop() +// returns. +type HTTP struct { + // StopTimeout is the duration before we begin force closing connections. + // Defaults to 1 minute. + StopTimeout time.Duration + + // KillTimeout is the duration before which we completely give up and abort + // even though we still have connected clients. This is useful when a large + // number of client connections exist and closing them can take a long time. + // Note, this is in addition to the StopTimeout. Defaults to 1 minute. + KillTimeout time.Duration + + // Stats is optional. If provided, it will be used to record various metrics. + Stats stats.Client + + // Clock allows for testing timing related functionality. Do not specify this + // in production code. + Clock clock.Clock + + // when set CertFile and KeyFile, the httpDown will start a http with TLS. + // Files containing a certificate and matching private key for the + // server must be provided if neither the Server's + // TLSConfig.Certificates nor TLSConfig.GetCertificate are populated. + // If the certificate is signed by a certificate authority, the + // certFile should be the concatenation of the server's certificate, + // any intermediates, and the CA's certificate. + CertFile, KeyFile string +} + +// Serve provides the low-level API which is useful if you're creating your own +// net.Listener. +func (h HTTP) Serve(s *http.Server, l net.Listener) Server { + stopTimeout := h.StopTimeout + if stopTimeout == 0 { + stopTimeout = defaultStopTimeout + } + killTimeout := h.KillTimeout + if killTimeout == 0 { + killTimeout = defaultKillTimeout + } + klock := h.Clock + if klock == nil { + klock = clock.New() + } + + ss := &server{ + stopTimeout: stopTimeout, + killTimeout: killTimeout, + stats: h.Stats, + clock: klock, + oldConnState: s.ConnState, + listener: l, + server: s, + serveDone: make(chan struct{}), + serveErr: make(chan error, 1), + new: make(chan net.Conn), + active: make(chan net.Conn), + idle: make(chan net.Conn), + closed: make(chan net.Conn), + stop: make(chan chan struct{}), + kill: make(chan chan struct{}), + certFile: h.CertFile, + keyFile: h.KeyFile, + } + s.ConnState = ss.connState + go ss.manage() + go ss.serve() + return ss +} + +// ListenAndServe returns a Server for the given http.Server. It is equivalent +// to ListenAndServe from the standard library, but returns immediately. +// Requests will be accepted in a background goroutine. If the http.Server has +// a non-nil TLSConfig, a TLS enabled listener will be setup. +func (h HTTP) ListenAndServe(s *http.Server) (Server, error) { + addr := s.Addr + if addr == "" { + if s.TLSConfig == nil { + addr = ":http" + } else { + addr = ":https" + } + } + l, err := net.Listen("tcp", addr) + if err != nil { + stats.BumpSum(h.Stats, "listen.error", 1) + return nil, err + } + if s.TLSConfig != nil { + l = tls.NewListener(l, s.TLSConfig) + } + return h.Serve(s, l), nil +} + +// server manages the serving process and allows for gracefully stopping it. +type server struct { + stopTimeout time.Duration + killTimeout time.Duration + stats stats.Client + clock clock.Clock + + oldConnState func(net.Conn, http.ConnState) + server *http.Server + serveDone chan struct{} + serveErr chan error + listener net.Listener + + new chan net.Conn + active chan net.Conn + idle chan net.Conn + closed chan net.Conn + stop chan chan struct{} + kill chan chan struct{} + + stopOnce sync.Once + stopErr error + + certFile, keyFile string +} + +func (s *server) connState(c net.Conn, cs http.ConnState) { + if s.oldConnState != nil { + s.oldConnState(c, cs) + } + + switch cs { + case http.StateNew: + s.new <- c + case http.StateActive: + s.active <- c + case http.StateIdle: + s.idle <- c + case http.StateHijacked, http.StateClosed: + s.closed <- c + } +} + +func (s *server) manage() { + defer func() { + close(s.new) + close(s.active) + close(s.idle) + close(s.closed) + close(s.stop) + close(s.kill) + }() + + var stopDone chan struct{} + + conns := map[net.Conn]http.ConnState{} + var countNew, countActive, countIdle float64 + + // decConn decrements the count associated with the current state of the + // given connection. + decConn := func(c net.Conn) { + switch conns[c] { + default: + panic(fmt.Errorf("unknown existing connection: %s", c)) + case http.StateNew: + countNew-- + case http.StateActive: + countActive-- + case http.StateIdle: + countIdle-- + } + } + + // setup a ticker to report various values every minute. if we don't have a + // Stats implementation provided, we Stop it so it never ticks. + statsTicker := s.clock.Ticker(time.Minute) + if s.stats == nil { + statsTicker.Stop() + } + + for { + select { + case <-statsTicker.C: + // we'll only get here when s.stats is not nil + s.stats.BumpAvg("http-state.new", countNew) + s.stats.BumpAvg("http-state.active", countActive) + s.stats.BumpAvg("http-state.idle", countIdle) + s.stats.BumpAvg("http-state.total", countNew+countActive+countIdle) + case c := <-s.new: + conns[c] = http.StateNew + countNew++ + case c := <-s.active: + decConn(c) + countActive++ + + conns[c] = http.StateActive + case c := <-s.idle: + decConn(c) + countIdle++ + + conns[c] = http.StateIdle + + // if we're already stopping, close it + if stopDone != nil { + c.Close() + } + case c := <-s.closed: + stats.BumpSum(s.stats, "conn.closed", 1) + decConn(c) + delete(conns, c) + + // if we're waiting to stop and are all empty, we just closed the last + // connection and we're done. + if stopDone != nil && len(conns) == 0 { + close(stopDone) + return + } + case stopDone = <-s.stop: + // if we're already all empty, we're already done + if len(conns) == 0 { + close(stopDone) + return + } + + // close current idle connections right away + for c, cs := range conns { + if cs == http.StateIdle { + c.Close() + } + } + + // continue the loop and wait for all the ConnState updates which will + // eventually close(stopDone) and return from this goroutine. + + case killDone := <-s.kill: + // force close all connections + stats.BumpSum(s.stats, "kill.conn.count", float64(len(conns))) + for c := range conns { + c.Close() + } + + // don't block the kill. + close(killDone) + + // continue the loop and we wait for all the ConnState updates and will + // return from this goroutine when we're all done. otherwise we'll try to + // send those ConnState updates on closed channels. + + } + } +} + +func (s *server) serve() { + stats.BumpSum(s.stats, "serve", 1) + if s.certFile == "" && s.keyFile == "" { + s.serveErr <- s.server.Serve(s.listener) + } else { + s.serveErr <- s.server.ServeTLS(s.listener, s.certFile, s.keyFile) + } + close(s.serveDone) + close(s.serveErr) +} + +func (s *server) Wait() error { + if err := <-s.serveErr; !isUseOfClosedError(err) { + return err + } + return nil +} + +func (s *server) Stop() error { + s.stopOnce.Do(func() { + defer stats.BumpTime(s.stats, "stop.time").End() + stats.BumpSum(s.stats, "stop", 1) + + // first disable keep-alive for new connections + s.server.SetKeepAlivesEnabled(false) + + // then close the listener so new connections can't connect come thru + closeErr := s.listener.Close() + <-s.serveDone + + // then trigger the background goroutine to stop and wait for it + stopDone := make(chan struct{}) + s.stop <- stopDone + + // wait for stop + select { + case <-stopDone: + case <-s.clock.After(s.stopTimeout): + defer stats.BumpTime(s.stats, "kill.time").End() + stats.BumpSum(s.stats, "kill", 1) + + // stop timed out, wait for kill + killDone := make(chan struct{}) + s.kill <- killDone + select { + case <-killDone: + case <-s.clock.After(s.killTimeout): + // kill timed out, give up + stats.BumpSum(s.stats, "kill.timeout", 1) + } + } + + if closeErr != nil && !isUseOfClosedError(closeErr) { + stats.BumpSum(s.stats, "listener.close.error", 1) + s.stopErr = closeErr + } + }) + return s.stopErr +} + +func isUseOfClosedError(err error) bool { + if err == nil { + return false + } + if opErr, ok := err.(*net.OpError); ok { + err = opErr.Err + } + return err.Error() == "use of closed network connection" +} + +// ListenAndServe is a convenience function to serve and wait for a SIGTERM +// or SIGINT before shutting down. +func ListenAndServe(s *http.Server, hd *HTTP) error { + if hd == nil { + hd = &HTTP{} + } + hs, err := hd.ListenAndServe(s) + if err != nil { + return err + } + + waiterr := make(chan error, 1) + go func() { + defer close(waiterr) + waiterr <- hs.Wait() + }() + + signals := make(chan os.Signal, 10) + signal.Notify(signals, syscall.SIGTERM, syscall.SIGINT) + + select { + case err := <-waiterr: + if err != nil { + return err + } + case <-signals: + signal.Stop(signals) + if err := hs.Stop(); err != nil { + return err + } + if err := <-waiterr; err != nil { + return err + } + } + return nil +} diff --git a/weed/util/parse.go b/weed/util/parse.go index 0a8317c19..6593d43b6 100644 --- a/weed/util/parse.go +++ b/weed/util/parse.go @@ -1,7 +1,9 @@ package util import ( + "net/url" "strconv" + "strings" ) func ParseInt(text string, defaultValue int) int { @@ -24,3 +26,22 @@ func ParseUint64(text string, defaultValue uint64) uint64 { } return count } + +func ParseFilerUrl(entryPath string) (filerServer string, filerPort int64, path string, err error) { + if !strings.HasPrefix(entryPath, "http://") && !strings.HasPrefix(entryPath, "https://") { + entryPath = "http://" + entryPath + } + + var u *url.URL + u, err = url.Parse(entryPath) + if err != nil { + return + } + filerServer = u.Hostname() + portString := u.Port() + if portString != "" { + filerPort, err = strconv.ParseInt(portString, 10, 32) + } + path = u.Path + return +} diff --git a/weed/util/queue.go b/weed/util/queue.go new file mode 100644 index 000000000..1e6211e0d --- /dev/null +++ b/weed/util/queue.go @@ -0,0 +1,61 @@ +package util + +import "sync" + +type node struct { + data interface{} + next *node +} + +type Queue struct { + head *node + tail *node + count int + sync.RWMutex +} + +func NewQueue() *Queue { + q := &Queue{} + return q +} + +func (q *Queue) Len() int { + q.RLock() + defer q.RUnlock() + return q.count +} + +func (q *Queue) Enqueue(item interface{}) { + q.Lock() + defer q.Unlock() + + n := &node{data: item} + + if q.tail == nil { + q.tail = n + q.head = n + } else { + q.tail.next = n + q.tail = n + } + q.count++ +} + +func (q *Queue) Dequeue() interface{} { + q.Lock() + defer q.Unlock() + + if q.head == nil { + return nil + } + + n := q.head + q.head = n.next + + if q.head == nil { + q.tail = nil + } + q.count-- + + return n.data +} diff --git a/weed/wdclient/masterclient.go b/weed/wdclient/masterclient.go index 6ba668ade..30b0cf160 100644 --- a/weed/wdclient/masterclient.go +++ b/weed/wdclient/masterclient.go @@ -91,7 +91,7 @@ func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader stri // maybe the leader is changed if volumeLocation.Leader != "" { - glog.V(1).Infof("redirected to leader %v", volumeLocation.Leader) + glog.V(0).Infof("redirected to leader %v", volumeLocation.Leader) nextHintedLeader = volumeLocation.Leader return nil } @@ -125,9 +125,9 @@ func withMasterClient(ctx context.Context, master string, grpcDialOption grpc.Di return fmt.Errorf("failed to parse master grpc %v: %v", master, parseErr) } - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { client := master_pb.NewSeaweedClient(grpcConnection) - return fn(ctx, client) + return fn(ctx2, client) }, masterGrpcAddress, grpcDialOption) }
IdCollectionSizeFilesTrashRemoteKey
{{ .Id }}{{ .Collection }}{{ .Size }} Bytes{{ .FileCount }}{{ .DeleteCount }} / {{.DeletedByteCount}} Bytes{{ .RemoteStorageName }}{{ .RemoteStorageKey }}