mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-11-29 05:49:03 +08:00
Merge remote-tracking branch 'upstream/master'
This commit is contained in:
commit
7cc07655d4
2
.github/workflows/go.yml
vendored
2
.github/workflows/go.yml
vendored
@ -34,4 +34,4 @@ jobs:
|
||||
run: cd weed; go build -v .
|
||||
|
||||
- name: Test
|
||||
run: cd weed; go test -v .
|
||||
run: cd weed; go test -v ./...
|
||||
|
@ -1,9 +1,8 @@
|
||||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.12.x
|
||||
- 1.13.x
|
||||
- 1.14.x
|
||||
- 1.15.x
|
||||
|
||||
before_install:
|
||||
- export PATH=/home/travis/gopath/bin:$PATH
|
||||
@ -45,4 +44,4 @@ deploy:
|
||||
on:
|
||||
tags: true
|
||||
repo: chrislusf/seaweedfs
|
||||
go: 1.14.x
|
||||
go: 1.15.x
|
||||
|
13
Makefile
13
Makefile
@ -36,11 +36,14 @@ deps:
|
||||
build: deps
|
||||
go build $(GO_FLAGS) -ldflags "$(LDFLAGS)" -o $(BINARY) $(SOURCE_DIR)
|
||||
|
||||
install: deps
|
||||
go install $(GO_FLAGS) -ldflags "$(LDFLAGS)" $(SOURCE_DIR)
|
||||
|
||||
linux: deps
|
||||
mkdir -p linux
|
||||
GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -ldflags "$(LDFLAGS)" -o linux/$(BINARY) $(SOURCE_DIR)
|
||||
|
||||
release: deps windows_build darwin_build linux_build bsd_build 5_byte_linux_build 5_byte_darwin_build 5_byte_windows_build
|
||||
release: deps windows_build darwin_build linux_build bsd_build 5_byte_linux_build 5_byte_arm64_build 5_byte_darwin_build 5_byte_windows_build
|
||||
|
||||
##### LINUX BUILDS #####
|
||||
5_byte_linux_build:
|
||||
@ -55,6 +58,14 @@ release: deps windows_build darwin_build linux_build bsd_build 5_byte_linux_buil
|
||||
$(call build_large,windows,amd64,.exe)
|
||||
$(call zip_large,windows,amd64,.exe)
|
||||
|
||||
5_byte_arm_build: $(sources)
|
||||
$(call build_large,linux,arm,)
|
||||
$(call tar_large,linux,arm)
|
||||
|
||||
5_byte_arm64_build: $(sources)
|
||||
$(call build_large,linux,arm64,)
|
||||
$(call tar_large,linux,arm64)
|
||||
|
||||
linux_build: build/linux_arm.tar.gz build/linux_arm64.tar.gz build/linux_386.tar.gz build/linux_amd64.tar.gz
|
||||
|
||||
build/linux_386.tar.gz: $(sources)
|
||||
|
75
README.md
75
README.md
@ -4,7 +4,7 @@
|
||||
[![Build Status](https://travis-ci.org/chrislusf/seaweedfs.svg?branch=master)](https://travis-ci.org/chrislusf/seaweedfs)
|
||||
[![GoDoc](https://godoc.org/github.com/chrislusf/seaweedfs/weed?status.svg)](https://godoc.org/github.com/chrislusf/seaweedfs/weed)
|
||||
[![Wiki](https://img.shields.io/badge/docs-wiki-blue.svg)](https://github.com/chrislusf/seaweedfs/wiki)
|
||||
[![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs.svg?maxAge=604800)](https://hub.docker.com/r/chrislusf/seaweedfs/)
|
||||
[![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs.svg?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs/)
|
||||
|
||||
![SeaweedFS Logo](https://raw.githubusercontent.com/chrislusf/seaweedfs/master/note/seaweedfs.png)
|
||||
|
||||
@ -90,7 +90,7 @@ There is only 40 bytes of disk storage overhead for each file's metadata. It is
|
||||
|
||||
SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). Also, SeaweedFS implements erasure coding with ideas from [f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf)
|
||||
|
||||
On top of the object store, optional [Filer] can support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql, Postgres, Mongodb, Redis, Etcd, Cassandra, LevelDB, MemSql, TiDB, TiKV, CockroachDB, etc.
|
||||
On top of the object store, optional [Filer] can support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql, Postgres, Mongodb, Redis, Cassandra, Elastic Search, LevelDB, MemSql, TiDB, Etcd, CockroachDB, etc.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
@ -112,19 +112,23 @@ On top of the object store, optional [Filer] can support directories and POSIX a
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Filer Features ##
|
||||
* [filer server][Filer] provide "normal" directories and files via http.
|
||||
* [mount filer][Mount] to read and write files directly as a local directory via FUSE.
|
||||
* [Amazon S3 compatible API][AmazonS3API] to access files with S3 tooling.
|
||||
* [Hadoop Compatible File System][Hadoop] to access files from Hadoop/Spark/Flink/etc jobs.
|
||||
* [Async Backup To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze.
|
||||
* [WebDAV] access as a mapped drive on Mac and Windows, or from mobile devices.
|
||||
* [Filer server][Filer] provides "normal" directories and files via http.
|
||||
* [Super Large Files][SuperLargeFiles] stores large or super large files in tens of TB.
|
||||
* [Mount filer][Mount] reads and writes files directly as a local directory via FUSE.
|
||||
* [Active-Active Replication][ActiveActiveAsyncReplication] enables asynchronous one-way or two-way cross cluster continuous replication.
|
||||
* [Amazon S3 compatible API][AmazonS3API] accesses files with S3 tooling.
|
||||
* [Hadoop Compatible File System][Hadoop] accesses files from Hadoop/Spark/Flink/etc or even runs HBase.
|
||||
* [Async Replication To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze.
|
||||
* [WebDAV] accesses as a mapped drive on Mac and Windows, or from mobile devices.
|
||||
* [AES256-GCM Encrypted Storage][FilerDataEncryption] safely stores the encrypted data.
|
||||
* [File TTL][FilerTTL] automatically purge file metadata and actual file data.
|
||||
* [File TTL][FilerTTL] automatically purges file metadata and actual file data.
|
||||
* [Kubernetes CSI Driver][SeaweedFsCsiDriver] A Container Storage Interface (CSI) Driver. [![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs-csi-driver.svg?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs-csi-driver/)
|
||||
|
||||
[Filer]: https://github.com/chrislusf/seaweedfs/wiki/Directories-and-Files
|
||||
[Mount]: https://github.com/chrislusf/seaweedfs/wiki/Mount
|
||||
[SuperLargeFiles]: https://github.com/chrislusf/seaweedfs/wiki/Data-Structure-for-Large-Files
|
||||
[Mount]: https://github.com/chrislusf/seaweedfs/wiki/FUSE-Mount
|
||||
[AmazonS3API]: https://github.com/chrislusf/seaweedfs/wiki/Amazon-S3-API
|
||||
[BackupToCloud]: https://github.com/chrislusf/seaweedfs/wiki/Backup-to-Cloud
|
||||
[BackupToCloud]: https://github.com/chrislusf/seaweedfs/wiki/Async-Replication-to-Cloud
|
||||
[Hadoop]: https://github.com/chrislusf/seaweedfs/wiki/Hadoop-Compatible-File-System
|
||||
[WebDAV]: https://github.com/chrislusf/seaweedfs/wiki/WebDAV
|
||||
[ErasureCoding]: https://github.com/chrislusf/seaweedfs/wiki/Erasure-coding-for-warm-storage
|
||||
@ -132,6 +136,8 @@ On top of the object store, optional [Filer] can support directories and POSIX a
|
||||
[FilerDataEncryption]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Data-Encryption
|
||||
[FilerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Stores
|
||||
[VolumeServerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Store-file-with-a-Time-To-Live
|
||||
[SeaweedFsCsiDriver]: https://github.com/seaweedfs/seaweedfs-csi-driver
|
||||
[ActiveActiveAsyncReplication]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Active-Active-cross-cluster-continuous-synchronization
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
@ -343,6 +349,8 @@ Most other distributed file systems seem more complicated than necessary.
|
||||
|
||||
SeaweedFS is meant to be fast and simple, in both setup and operation. If you do not understand how it works when you reach here, we've failed! Please raise an issue with any questions or update this file with clarifications.
|
||||
|
||||
SeaweedFS is constantly moving forward. Same with other systems. These comparisons can be outdated quickly. Please help to keep them updated.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
### Compared to HDFS ###
|
||||
@ -361,16 +369,17 @@ The architectures are mostly the same. SeaweedFS aims to store and read files fa
|
||||
|
||||
* SeaweedFS optimizes for small files, ensuring O(1) disk seek operation, and can also handle large files.
|
||||
* SeaweedFS statically assigns a volume id for a file. Locating file content becomes just a lookup of the volume id, which can be easily cached.
|
||||
* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Cassandra, Mongodb, Redis, Etcd, MySql, Postgres, MemSql, TiDB, CockroachDB, etc, and is easy to customized.
|
||||
* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Cassandra, Mongodb, Redis, Elastic Search, MySql, Postgres, MemSql, TiDB, CockroachDB, Etcd etc, and is easy to customized.
|
||||
* SeaweedFS Volume server also communicates directly with clients via HTTP, supporting range queries, direct uploads, etc.
|
||||
|
||||
| System | File Meta | File Content Read| POSIX | REST API | Optimized for small files |
|
||||
| System | File Metadata | File Content Read| POSIX | REST API | Optimized for large number of small files |
|
||||
| ------------- | ------------------------------- | ---------------- | ------ | -------- | ------------------------- |
|
||||
| SeaweedFS | lookup volume id, cacheable | O(1) disk seek | | Yes | Yes |
|
||||
| SeaweedFS Filer| Linearly Scalable, Customizable | O(1) disk seek | FUSE | Yes | Yes |
|
||||
| GlusterFS | hashing | | FUSE, NFS | | |
|
||||
| Ceph | hashing + rules | | FUSE | Yes | |
|
||||
| MooseFS | in memory | | FUSE | | No |
|
||||
| MinIO | separate meta file for each file | | | Yes | No |
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
@ -402,7 +411,7 @@ Ceph uses CRUSH hashing to automatically manage the data placement. SeaweedFS pl
|
||||
|
||||
SeaweedFS is optimized for small files. Small files are stored as one continuous block of content, with at most 8 unused bytes between files. Small file access is O(1) disk read.
|
||||
|
||||
SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Mongodb, Redis, Etcd, Cassandra, MemSql, TiDB, CockroachCB, to manage file directories. These stores are proven, scalable, and easier to manage.
|
||||
SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Mongodb, Redis, Elastic Search, Cassandra, MemSql, TiDB, CockroachCB, Etcd, to manage file directories. These stores are proven, scalable, and easier to manage.
|
||||
|
||||
| SeaweedFS | comparable to Ceph | advantage |
|
||||
| ------------- | ------------- | ---------------- |
|
||||
@ -412,6 +421,22 @@ SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Mongodb, Red
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
### Compared to MinIO ###
|
||||
|
||||
MinIO follows AWS S3 closely and is ideal for testing for S3 API. It has good UI, policies, versionings, etc. SeaweedFS is trying to catch up here. It is also possible to put MinIO as a gateway in front of SeaweedFS later.
|
||||
|
||||
MinIO metadata are in simple files. Each file write will incur meta file writes.
|
||||
|
||||
MinIO does not have optimization for large number of small files.
|
||||
|
||||
MinIO has multiple disk IO to read one file. SeaweedFS has O(1) disk reads, even for erasure coded files.
|
||||
|
||||
MinIO has full-time erasure coding. SeaweedFS uses replication on hot data for faster speed and optionally applies erasure coding on warm data.
|
||||
|
||||
MinIO does not have POSIX-like API support.
|
||||
|
||||
MinIO has specific requirements on storage layout. It is not flexible to adjust capacity. In SeaweedFS, just start one volume server pointing to the master. That's all.
|
||||
|
||||
## Dev Plan ##
|
||||
|
||||
More tools and documentation, on how to maintain and scale the system. For example, how to move volumes, automatically balancing data, how to grow volumes, how to check system status, etc.
|
||||
@ -438,30 +463,18 @@ https://golang.org/doc/install
|
||||
make sure you set up your $GOPATH
|
||||
|
||||
|
||||
Step 2: also you may need to install Mercurial by following the instructions at:
|
||||
|
||||
http://mercurial.selenic.com/downloads
|
||||
|
||||
Step 2: checkout this repo:
|
||||
```bash
|
||||
git clone https://github.com/chrislusf/seaweedfs.git
|
||||
```
|
||||
Step 3: download, compile, and install the project by executing the following command
|
||||
|
||||
```bash
|
||||
go get github.com/chrislusf/seaweedfs/weed
|
||||
make install
|
||||
```
|
||||
|
||||
Once this is done, you will find the executable "weed" in your `$GOPATH/bin` directory
|
||||
|
||||
Note:
|
||||
* If you got into this problem, try to `rm -Rf $GOPATH/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace` and build again.
|
||||
```
|
||||
panic: /debug/requests is already registered. You may have two independent copies of golang.org/x/net/trace in your binary, trying to maintain separate state. This may involve a vendored copy of golang.org/x/net/trace.
|
||||
```
|
||||
|
||||
Step 4: after you modify your code locally, you could start a local build by calling `go install` under
|
||||
|
||||
```
|
||||
$GOPATH/src/github.com/chrislusf/seaweedfs/weed
|
||||
```
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Disk Related Topics ##
|
||||
|
@ -1,15 +1,19 @@
|
||||
FROM frolvlad/alpine-glibc
|
||||
FROM alpine
|
||||
|
||||
# Supercronic install settings
|
||||
ENV SUPERCRONIC_URL=https://github.com/aptible/supercronic/releases/download/v0.1.8/supercronic-linux-amd64 \
|
||||
SUPERCRONIC=supercronic-linux-amd64 \
|
||||
SUPERCRONIC_SHA1SUM=be43e64c45acd6ec4fce5831e03759c89676a0ea
|
||||
|
||||
# Install SeaweedFS and Supercronic ( for cron job mode )
|
||||
# Tried to use curl only (curl -o /tmp/linux_amd64.tar.gz ...), however it turned out that the following tar command failed with "gzip: stdin: not in gzip format"
|
||||
RUN apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \
|
||||
wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/latest | egrep -o 'chrislusf/seaweedfs/releases/download/.*/linux_amd64.tar.gz') && \
|
||||
tar -C /usr/bin/ -xzvf /tmp/linux_amd64.tar.gz && \
|
||||
RUN \
|
||||
ARCH=$(if [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "64" ]; then echo "amd64"; \
|
||||
elif [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "32" ]; then echo "386"; \
|
||||
elif [ $(uname -m) == "aarch64" ]; then echo "arm64"; \
|
||||
elif [ $(uname -m) == "armv7l" ]; then echo "arm"; \
|
||||
elif [ $(uname -m) == "armv6l" ]; then echo "arm"; fi;) && \
|
||||
echo "Building for $ARCH" 1>&2 && \
|
||||
SUPERCRONIC_SHA1SUM=$(echo $ARCH | sed 's/386/e0126b0102b9f388ecd55714358e3ad60d0cebdb/g' | sed 's/amd64/5ddf8ea26b56d4a7ff6faecdd8966610d5cb9d85/g' | sed 's/arm64/e2714c43e7781bf1579c85aa61259245f56dbba1/g' | sed 's/arm/47481c3341bc3a1ae91a728e0cc63c8e6d3791ad/g') && \
|
||||
SUPERCRONIC_URL=https://github.com/aptible/supercronic/releases/download/v0.1.9/supercronic-linux-$ARCH && \
|
||||
SUPERCRONIC=supercronic-linux-$ARCH && \
|
||||
# Install SeaweedFS and Supercronic ( for cron job mode )
|
||||
apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \
|
||||
wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/latest | egrep -o "chrislusf/seaweedfs/releases/download/.*/linux_$ARCH.tar.gz") && \
|
||||
tar -C /usr/bin/ -xzvf /tmp/linux_$ARCH.tar.gz && \
|
||||
curl -fsSLO "$SUPERCRONIC_URL" && \
|
||||
echo "${SUPERCRONIC_SHA1SUM} ${SUPERCRONIC}" | sha1sum -c - && \
|
||||
chmod +x "$SUPERCRONIC" && \
|
||||
|
@ -2,7 +2,9 @@ FROM frolvlad/alpine-glibc as builder
|
||||
RUN apk add git go g++
|
||||
RUN mkdir -p /go/src/github.com/chrislusf/
|
||||
RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs
|
||||
RUN cd /go/src/github.com/chrislusf/seaweedfs/weed && go install
|
||||
RUN cd /go/src/github.com/chrislusf/seaweedfs/weed \
|
||||
&& export LDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \
|
||||
&& go install -ldflags "${LDFLAGS}"
|
||||
|
||||
FROM alpine AS final
|
||||
LABEL author="Chris Lu"
|
||||
|
37
docker/Dockerfile.go_build_large
Normal file
37
docker/Dockerfile.go_build_large
Normal file
@ -0,0 +1,37 @@
|
||||
FROM frolvlad/alpine-glibc as builder
|
||||
RUN apk add git go g++
|
||||
RUN mkdir -p /go/src/github.com/chrislusf/
|
||||
RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs
|
||||
RUN cd /go/src/github.com/chrislusf/seaweedfs/weed \
|
||||
&& export LDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \
|
||||
&& go install -tags 5BytesOffset -ldflags "${LDFLAGS}"
|
||||
|
||||
FROM alpine AS final
|
||||
LABEL author="Chris Lu"
|
||||
COPY --from=builder /root/go/bin/weed /usr/bin/
|
||||
RUN mkdir -p /etc/seaweedfs
|
||||
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml
|
||||
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh
|
||||
|
||||
# volume server gprc port
|
||||
EXPOSE 18080
|
||||
# volume server http port
|
||||
EXPOSE 8080
|
||||
# filer server gprc port
|
||||
EXPOSE 18888
|
||||
# filer server http port
|
||||
EXPOSE 8888
|
||||
# master server shared gprc port
|
||||
EXPOSE 19333
|
||||
# master server shared http port
|
||||
EXPOSE 9333
|
||||
# s3 server http port
|
||||
EXPOSE 8333
|
||||
|
||||
RUN mkdir -p /data/filerldb2
|
||||
|
||||
VOLUME /data
|
||||
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
@ -27,3 +27,13 @@ docker-compose -f seaweedfs-dev-compose.yml -p seaweedfs up
|
||||
cd $GOPATH/src/github.com/chrislusf/seaweedfs/docker
|
||||
make
|
||||
```
|
||||
|
||||
## Build and push a multiarch build
|
||||
|
||||
Make sure that `docker buildx` is supported (might be an experimental docker feature)
|
||||
```bash
|
||||
BUILDER=$(docker buildx create --driver docker-container --use)
|
||||
docker buildx build --pull --push --platform linux/386,linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6 . -t chrislusf/seaweedfs
|
||||
docker buildx stop $BUILDER
|
||||
```
|
||||
|
||||
|
@ -1,36 +1,60 @@
|
||||
#!/bin/sh
|
||||
|
||||
isArgPassed() {
|
||||
arg="$1"
|
||||
argWithEqualSign="$1="
|
||||
shift
|
||||
while [ $# -gt 0 ]; do
|
||||
passedArg="$1"
|
||||
shift
|
||||
case $passedArg in
|
||||
$arg)
|
||||
return 0
|
||||
;;
|
||||
$argWithEqualSign*)
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
|
||||
'master')
|
||||
ARGS="-mdir=/data -volumePreallocate -volumeSizeLimitMB=1024"
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
shift
|
||||
exec /usr/bin/weed master $ARGS $@
|
||||
;;
|
||||
|
||||
'volume')
|
||||
ARGS="-dir=/data -max=0"
|
||||
if [[ $@ == *"-max="* ]]; then
|
||||
if isArgPassed "-max" "$@"; then
|
||||
ARGS="-dir=/data"
|
||||
fi
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
shift
|
||||
exec /usr/bin/weed volume $ARGS $@
|
||||
;;
|
||||
|
||||
'server')
|
||||
ARGS="-dir=/data -volume.max=0 -master.volumePreallocate -master.volumeSizeLimitMB=1024"
|
||||
if [[ $@ == *"-volume.max="* ]]; then
|
||||
if isArgPassed "-volume.max" "$@"; then
|
||||
ARGS="-dir=/data -master.volumePreallocate -master.volumeSizeLimitMB=1024"
|
||||
fi
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
shift
|
||||
exec /usr/bin/weed server $ARGS $@
|
||||
;;
|
||||
|
||||
'filer')
|
||||
ARGS=""
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
shift
|
||||
exec /usr/bin/weed filer $ARGS $@
|
||||
;;
|
||||
|
||||
's3')
|
||||
ARGS="-domainName=$S3_DOMAIN_NAME -key.file=$S3_KEY_FILE -cert.file=$S3_CERT_FILE"
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
shift
|
||||
exec /usr/bin/weed s3 $ARGS $@
|
||||
;;
|
||||
|
||||
'cronjob')
|
||||
|
@ -6,25 +6,25 @@ services:
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335"
|
||||
command: "master -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335 -mdir=/data/m1"
|
||||
master1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9334:9334
|
||||
- 19334:19334
|
||||
command: "master -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335"
|
||||
command: "master -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335 -mdir=/data/m2"
|
||||
master2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9335:9335
|
||||
- 19335:19335
|
||||
command: "master -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335"
|
||||
command: "master -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335 -mdir=/data/m3"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume'
|
||||
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume -publicUrl=localhost:8080'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
|
47
go.mod
47
go.mod
@ -6,17 +6,13 @@ require (
|
||||
cloud.google.com/go v0.44.3
|
||||
github.com/Azure/azure-pipeline-go v0.2.2 // indirect
|
||||
github.com/Azure/azure-storage-blob-go v0.8.0
|
||||
github.com/DataDog/zstd v1.4.1 // indirect
|
||||
github.com/OneOfOne/xxhash v1.2.2
|
||||
github.com/Shopify/sarama v1.23.1
|
||||
github.com/aws/aws-sdk-go v1.23.13
|
||||
github.com/aws/aws-sdk-go v1.33.5
|
||||
github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72
|
||||
github.com/cespare/xxhash v1.1.0
|
||||
github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92
|
||||
github.com/coreos/bbolt v1.3.3 // indirect
|
||||
github.com/coreos/etcd v3.3.15+incompatible // indirect
|
||||
github.com/chrislusf/raft v1.0.2-0.20201002174524-b13c3bfdb011
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
||||
github.com/disintegration/imaging v1.6.2
|
||||
github.com/dustin/go-humanize v1.0.0
|
||||
@ -28,21 +24,21 @@ require (
|
||||
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect
|
||||
github.com/frankban/quicktest v1.7.2 // indirect
|
||||
github.com/go-redis/redis v6.15.7+incompatible
|
||||
github.com/go-sql-driver/mysql v1.4.1
|
||||
github.com/go-sql-driver/mysql v1.5.0
|
||||
github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6
|
||||
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6
|
||||
github.com/golang/protobuf v1.4.2
|
||||
github.com/google/btree v1.0.0
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/gorilla/mux v1.7.3
|
||||
github.com/gorilla/mux v1.7.4
|
||||
github.com/gorilla/websocket v1.4.1 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.3 // indirect
|
||||
github.com/jcmturner/gofork v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.10
|
||||
github.com/karlseguin/ccache v2.0.3+incompatible
|
||||
github.com/karlseguin/expect v1.0.1 // indirect
|
||||
github.com/klauspost/compress v1.10.9
|
||||
github.com/klauspost/cpuid v1.2.1 // indirect
|
||||
github.com/klauspost/crc32 v1.2.0
|
||||
github.com/klauspost/reedsolomon v1.9.2
|
||||
@ -52,6 +48,7 @@ require (
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb // indirect
|
||||
github.com/mattn/go-runewidth v0.0.4 // indirect
|
||||
github.com/nats-io/nats-server/v2 v2.0.4 // indirect
|
||||
github.com/olivere/elastic/v7 v7.0.19
|
||||
github.com/onsi/ginkgo v1.10.1 // indirect
|
||||
github.com/onsi/gomega v1.7.0 // indirect
|
||||
github.com/peterh/liner v1.1.0
|
||||
@ -60,41 +57,43 @@ require (
|
||||
github.com/prometheus/procfs v0.0.4 // indirect
|
||||
github.com/rakyll/statik v0.1.7
|
||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect
|
||||
github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff
|
||||
github.com/seaweedfs/fuse v1.0.7
|
||||
github.com/seaweedfs/goexif v1.0.2
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/spf13/afero v1.2.2 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/viper v1.4.0
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 // indirect
|
||||
github.com/stretchr/testify v1.3.0
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/syndtr/goleveldb v1.0.0
|
||||
github.com/tidwall/gjson v1.3.2
|
||||
github.com/tidwall/match v1.0.1
|
||||
github.com/valyala/bytebufferpool v1.0.0
|
||||
github.com/willf/bitset v1.1.10 // indirect
|
||||
github.com/willf/bloom v2.0.3+incompatible
|
||||
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 // indirect
|
||||
go.etcd.io/bbolt v1.3.3 // indirect
|
||||
go.etcd.io/etcd v3.3.15+incompatible
|
||||
go.mongodb.org/mongo-driver v1.3.2
|
||||
go.uber.org/multierr v1.2.0 // indirect
|
||||
gocloud.dev v0.16.0
|
||||
gocloud.dev/pubsub/natspubsub v0.16.0
|
||||
gocloud.dev/pubsub/rabbitpubsub v0.16.0
|
||||
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 // indirect
|
||||
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect
|
||||
golang.org/x/net v0.0.0-20190909003024-a7b16738d86b
|
||||
golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b
|
||||
golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2
|
||||
golang.org/x/sync v0.0.0-20200930132711-30421366ff76 // indirect
|
||||
golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5
|
||||
google.golang.org/api v0.9.0
|
||||
google.golang.org/appengine v1.6.2 // indirect
|
||||
google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 // indirect
|
||||
google.golang.org/grpc v1.26.0
|
||||
google.golang.org/grpc v1.29.1
|
||||
google.golang.org/protobuf v1.24.0
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
|
||||
gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect
|
||||
gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect
|
||||
gopkg.in/karlseguin/expect.v1 v1.0.1 // indirect
|
||||
sigs.k8s.io/yaml v1.1.0 // indirect
|
||||
)
|
||||
|
||||
replace github.com/satori/go.uuid v1.2.0 => github.com/satori/go.uuid v0.0.0-20181028125025-b2ce2384e17b
|
||||
// replace github.com/seaweedfs/fuse => /Users/chris/go/src/github.com/seaweedfs/fuse
|
||||
// replace github.com/chrislusf/raft => /Users/chris/go/src/github.com/chrislusf/raft
|
||||
|
||||
replace go.etcd.io/etcd => go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547
|
||||
|
167
go.sum
167
go.sum
@ -32,8 +32,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 h1:2T/jmrHeTezcCM58lvEQXs0UpQJCo5SoGAcg+mbSTIg=
|
||||
github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM=
|
||||
github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190605020000-c4ba1fdf4d36/go.mod h1:aJ4qN3TfrelA6NZ6AXsXRfmEVaYin3EDbSPJrKS8OXo=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
@ -47,13 +45,14 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5
|
||||
github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
|
||||
github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.19.45/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.23.13 h1:l/NG+mgQFRGG3dsFzEj0jw9JIs/zYdtU6MXhY1WIDmM=
|
||||
github.com/aws/aws-sdk-go v1.23.13/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.33.5 h1:p2fr1ryvNTU6avUWLI+/H7FGv0TBIjzVM5WDgXBBv4U=
|
||||
github.com/aws/aws-sdk-go v1.33.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY=
|
||||
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
|
||||
@ -66,27 +65,27 @@ github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92 h1:lM9SFsh0EPXkyJyrTJqLZPAIJBtNFP6LNkYXu2MnSZI=
|
||||
github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92/go.mod h1:4jyiUCD5y548+yKW+oiHtccBiMaLCCbFBpK2t7X4eUo=
|
||||
github.com/chrislusf/raft v1.0.2-0.20201002174524-b13c3bfdb011 h1:vN1GvfLgDg8kIPCdhuVKAjlYpxG1B86jiKejB6MC/Q0=
|
||||
github.com/chrislusf/raft v1.0.2-0.20201002174524-b13c3bfdb011/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY=
|
||||
github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE=
|
||||
github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c=
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28=
|
||||
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@ -97,6 +96,7 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8
|
||||
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
|
||||
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
|
||||
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
|
||||
@ -107,7 +107,9 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=
|
||||
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA=
|
||||
@ -121,6 +123,7 @@ github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpm
|
||||
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
|
||||
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk=
|
||||
github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
|
||||
@ -136,6 +139,8 @@ github.com/go-redis/redis v6.15.7+incompatible h1:3skhDh95XQMpnqeqNftPkQD9jL9e5e
|
||||
github.com/go-redis/redis v6.15.7+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
|
||||
@ -164,6 +169,7 @@ github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/V
|
||||
github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
|
||||
github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6 h1:P66kRWyEoIx6URKgAC3ijx9jo9gEid7bEhLQ/Z0G65A=
|
||||
github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6/go.mod h1:Q7Sru5153KG8D9zwueuQJB3ccJf9/bIwF/x8b3oKgT8=
|
||||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
@ -171,6 +177,7 @@ github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 h1:X+zN6RZXsvnrSJa
|
||||
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
|
||||
@ -182,6 +189,16 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
@ -194,6 +211,10 @@ github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-replayers/grpcreplay v0.1.0 h1:eNb1y9rZFmY4ax45uEEECSa8fsxGRU+8Bil52ASAwic=
|
||||
github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE=
|
||||
github.com/google/go-replayers/httpreplay v0.1.0 h1:AX7FUb4BjrrzNvblr/OlgwrmFiep6soj5K2QSDW7BGk=
|
||||
@ -204,7 +225,9 @@ github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible h1:x
|
||||
github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/wire v0.3.0 h1:imGQZGEVEHpje5056+K+cgdO72p0LQv2xIIFXNGUf60=
|
||||
@ -214,8 +237,9 @@ github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
|
||||
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
|
||||
@ -230,6 +254,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0 h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.11.0 h1:aT5ISUniaOTErogCQ+4pGoYNBB6rm6Fq3g1v8QwYGas=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.11.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
|
||||
@ -239,8 +264,6 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk=
|
||||
github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
@ -253,12 +276,16 @@ github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/U
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
|
||||
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
|
||||
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
|
||||
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/karlseguin/ccache v2.0.3+incompatible h1:j68C9tWOROiOLWTS/kCGg9IcJG+ACqn5+0+t8Oh83UU=
|
||||
@ -272,6 +299,8 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.9.5 h1:U+CaK85mrNNb4k8BNOfgJtJ/gr6kswUCFj6miSzVC6M=
|
||||
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.10.9 h1:pPRt1Z78crspaHISkpSSHjDlx+Tt9suHe519dsI0vF4=
|
||||
github.com/klauspost/compress v1.10.9/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=
|
||||
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/crc32 v1.2.0 h1:0VuyqOCruD33/lJ/ojXNvzVyl8Zr5zdTmj9l9qLZ86I=
|
||||
@ -297,15 +326,20 @@ github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDe
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.7.1 h1:mdxE1MF9o53iCb2Ghj1VfWvh7ZOwHpnVG/xwXrV90U8=
|
||||
github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
||||
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
|
||||
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149 h1:HfxbT6/JcvIljmERptWhwa8XzP7H3T+Z2N26gTsaDaA=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb h1:hXqqXzQtJbENrsb+rsIqkVqcg4FUJL0SQFGw08Dgivw=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||
@ -338,6 +372,9 @@ github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/olivere/elastic/v7 v7.0.19 h1:w4F6JpqOISadhYf/n0NR1cNj73xHqh4pzPwD1Gkidts=
|
||||
github.com/olivere/elastic/v7 v7.0.19/go.mod h1:4Jqt5xvjqpjCqgnTcHwl3j8TLs8mvoOK8NYgo/qEOu4=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
@ -347,6 +384,7 @@ github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg=
|
||||
@ -359,6 +397,8 @@ github.com/pierrec/lz4 v2.2.7+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
@ -399,6 +439,10 @@ github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff h1:uLd5zBvf5OA67wcVRePHrFt60bR4LSskaVhgVwyk0Jg=
|
||||
github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff/go.mod h1:cubdLmQFqEUZ9vNJrznhgc3m3VMAJi/nY2Ix2axXkG0=
|
||||
github.com/seaweedfs/fuse v1.0.6 h1:htaOrJvqCxX6EL9q+APl0fFbA8AHgm0OyQpDAAVEjWU=
|
||||
github.com/seaweedfs/fuse v1.0.6/go.mod h1:W7ubwr1l7KQsMeUpxFFOFOSxUL/ucTRMAlVYs4xdfQ8=
|
||||
github.com/seaweedfs/fuse v1.0.7 h1:tESMXhI3gXzN+dlWsCUrkIZDiWA4dZX18rQMoqmvazw=
|
||||
github.com/seaweedfs/fuse v1.0.7/go.mod h1:W7ubwr1l7KQsMeUpxFFOFOSxUL/ucTRMAlVYs4xdfQ8=
|
||||
github.com/seaweedfs/goexif v1.0.2 h1:p+rTXYdQ2mgxd+1JaTrQ9N8DvYuw9UH9xgYmJ+Bb29E=
|
||||
github.com/seaweedfs/goexif v1.0.2/go.mod h1:MrKs5LK0HXdffrdCZrW3OIMegL2xXpC6ThLyXMyjdrk=
|
||||
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
|
||||
@ -407,6 +451,11 @@ github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0=
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
|
||||
github.com/smartystreets/assertions v1.1.1/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
|
||||
github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM=
|
||||
github.com/smartystreets/gunit v1.3.4/go.mod h1:ZjM1ozSIMJlAz/ay4SG8PeKF00ckUp+zMHZXV9/bvak=
|
||||
github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
@ -423,6 +472,7 @@ github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=
|
||||
@ -439,6 +489,12 @@ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoH
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
|
||||
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
|
||||
github.com/tidwall/gjson v1.3.2 h1:+7p3qQFaH3fOMXAJSrdZwGKcOO/lYdGS0HqGhPqDdTI=
|
||||
@ -447,10 +503,14 @@ github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
|
||||
github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
|
||||
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc=
|
||||
github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/willf/bloom v2.0.3+incompatible h1:QDacWdqcAUI1MPOwIQZRy9kOR7yxfyEmxX8Wdm2/JPA=
|
||||
@ -467,24 +527,32 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd v3.3.15+incompatible h1:0VpOVCF6EFnJptt8Jh0EWEHO4j2fepyV1fpu9xz/UoQ=
|
||||
go.etcd.io/etcd v3.3.15+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI=
|
||||
go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg=
|
||||
go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547 h1:s71VGheLtWmCYsnNjf+s7XE8HsrZnd3EYGrLGWVm7nY=
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547/go.mod h1:YoUyTScD3Vcv2RBm3eGVOq7i1ULiz3OuXoQFWOirmAM=
|
||||
go.mongodb.org/mongo-driver v1.3.2 h1:IYppNjEV/C+/3VPbhHVxQ4t04eVW0cLp0/pNdW++6Ug=
|
||||
go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE=
|
||||
go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.2.0 h1:6I+W7f5VwC5SV9dNrZ3qXrDB9mD0dyGOi/ZJmYw03T4=
|
||||
go.uber.org/multierr v1.2.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo=
|
||||
go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
||||
gocloud.dev v0.16.0 h1:hWeaQWxamGerwsU7B9xSWvUjx0p7TwG8fcHro2TzbbM=
|
||||
gocloud.dev v0.16.0/go.mod h1:xWGXD8t7bEhqPIuyAUFyXV9qHn+PvEY2F2GKPh7i/O0=
|
||||
gocloud.dev/pubsub/natspubsub v0.16.0 h1:MoBGXULDzb1fVaZsGWO5cUCgr6yoI/DHhau8OPGaGEI=
|
||||
@ -496,12 +564,13 @@ golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnf
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 h1:0hQKqeLdqlt5iIwVOBErRisrHJAN57yOiPRQItI20fU=
|
||||
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc h1:c0o/qxkaO2LF5t6fQrT4b5hzyggAkLLlCUjqfRxd8Q4=
|
||||
golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067 h1:KYGJGHOQy8oSi1fDlSpcZF0+juKwk/hEMv5SiwHogR0=
|
||||
@ -514,7 +583,10 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -531,8 +603,9 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190909003024-a7b16738d86b h1:XfVGCX+0T4WOStkaOsJRllbsiImhB2jgVBGc9L0lPGc=
|
||||
golang.org/x/net v0.0.0-20190909003024-a7b16738d86b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@ -545,6 +618,8 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200930132711-30421366ff76 h1:JnxiSYT3Nm0BT2a8CyvYyM6cnrWpidecD1UuSYbhKm0=
|
||||
golang.org/x/sync v0.0.0-20200930132711-30421366ff76/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -566,12 +641,15 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b h1:3S2h5FadpNr0zUUCVZjlKIEYF+KaX/OBplTGo89CYHI=
|
||||
golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd h1:WgqgiQvkiZWz7XLhphjt2GI2GcGCTIZs9jqXMWmH+oc=
|
||||
golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@ -592,12 +670,16 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110 h1:6S6bidS7O4yAwA5ORRbRIjvNQ9tGbLd5e+LRIaTeVDQ=
|
||||
golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4=
|
||||
@ -622,17 +704,33 @@ google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRn
|
||||
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
|
||||
google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 h1:oFSK4421fpCKRrpzIpybyBVWyht05NegY9+L/3TLAZs=
|
||||
google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@ -640,6 +738,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
@ -666,10 +765,14 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
||||
|
@ -1,4 +1,4 @@
|
||||
apiVersion: v1
|
||||
description: SeaweedFS
|
||||
name: seaweedfs
|
||||
version: 1.81
|
||||
version: 2.07
|
@ -111,4 +111,18 @@ Inject extra environment vars in the format key:value, if populated
|
||||
{{- $tag := .Values.global.imageTag | toString -}}
|
||||
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Return the proper cronjob image */}}
|
||||
{{- define "cronjob.image" -}}
|
||||
{{- if .Values.cronjob.imageOverride -}}
|
||||
{{- $imageOverride := .Values.cronjob.imageOverride -}}
|
||||
{{- printf "%s" $imageOverride -}}
|
||||
{{- else -}}
|
||||
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
|
||||
{{- $repositoryName := .Values.image.repository | toString -}}
|
||||
{{- $name := .Values.global.imageName | toString -}}
|
||||
{{- $tag := .Values.global.imageTag | toString -}}
|
||||
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
54
k8s/seaweedfs/templates/cronjob.yaml
Normal file
54
k8s/seaweedfs/templates/cronjob.yaml
Normal file
@ -0,0 +1,54 @@
|
||||
{{- if .Values.cronjob }}
|
||||
{{- if .Values.cronjob.enabled }}
|
||||
apiVersion: batch/v1beta1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: {{ include "seaweedfs.fullname" . }}-cronjob
|
||||
spec:
|
||||
schedule: "{{ .Values.cronjob.schedule }}"
|
||||
concurrencyPolicy: Forbid
|
||||
failedJobsHistoryLimit: 2
|
||||
successfulJobsHistoryLimit: 2
|
||||
jobTemplate:
|
||||
spec:
|
||||
backoffLimit: 2
|
||||
template:
|
||||
spec:
|
||||
{{- with .Values.cronjob.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.cronjob.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: shell
|
||||
image: {{ template "cronjob.image" . }}
|
||||
imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
|
||||
resources:
|
||||
{{- toYaml .Values.cronjob.resources| nindent 16 }}
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
set -ex
|
||||
echo -e "lock\n\
|
||||
volume.balance -force\
|
||||
{{ if .Values.volume.dataCenter }} -dataCenter {{ .Values.volume.dataCenter }}{{ end }}\
|
||||
{{ if .Values.cronjob.collection }} -collection {{ .Values.cronjob.collection }}{{ end }}\n\
|
||||
volume.fix.replication\nunlock\n" | \
|
||||
/usr/bin/weed shell \
|
||||
{{- if .Values.cronjob.master }}
|
||||
-master {{ .Values.cronjob.master }} \
|
||||
{{- else }}
|
||||
-master {{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc:{{ .Values.master.port }} \
|
||||
{{- end }}
|
||||
{{- if .Values.cronjob.filer }}
|
||||
-filer {{ .Values.cronjob.filer }}
|
||||
{{- else }}
|
||||
-filer {{ template "seaweedfs.name" . }}-filer.{{ .Release.Namespace }}.svc:{{ .Values.filer.port }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -17,6 +17,12 @@ spec:
|
||||
port: {{ .Values.filer.grpcPort }}
|
||||
targetPort: {{ .Values.filer.grpcPort }}
|
||||
protocol: TCP
|
||||
{{- if .Values.filer.metricsPort }}
|
||||
- name: "swfs-filer-metrics"
|
||||
port: {{ .Values.filer.metricsPort }}
|
||||
targetPort: {{ .Values.filer.metricsPort }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
selector:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: filer
|
18
k8s/seaweedfs/templates/filer-servicemonitor.yaml
Normal file
18
k8s/seaweedfs/templates/filer-servicemonitor.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
{{- if .Values.filer.metricsPort }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-filer
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: filer
|
||||
spec:
|
||||
endpoints:
|
||||
- interval: 30s
|
||||
port: swfs-filer-metrics
|
||||
scrapeTimeout: 5s
|
||||
selector:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: filer
|
||||
{{- end }}
|
@ -99,6 +99,12 @@ spec:
|
||||
{{- end }}
|
||||
filer \
|
||||
-port={{ .Values.filer.port }} \
|
||||
{{- if .Values.filer.metricsPort }}
|
||||
-metricsPort {{ .Values.filer.metricsPort }} \
|
||||
{{- end }}}
|
||||
{{- if .Values.filer.redirectOnRead }}
|
||||
-redirectOnRead \
|
||||
{{- end }}
|
||||
{{- if .Values.filer.disableHttp }}
|
||||
-disableHttp \
|
||||
{{- end }}
|
||||
@ -106,7 +112,24 @@ spec:
|
||||
-disableDirListing \
|
||||
{{- end }}
|
||||
-dirListLimit={{ .Values.filer.dirListLimit }} \
|
||||
{{- if .Values.global.enableReplication }}
|
||||
-defaultReplicaPlacement={{ .Values.global.replicationPlacment }} \
|
||||
{{- else }}
|
||||
-defaultReplicaPlacement={{ .Values.filer.defaultReplicaPlacement }} \
|
||||
{{- end }}
|
||||
{{- if .Values.filer.disableDirListing }}
|
||||
-disableDirListing \
|
||||
{{- end }}
|
||||
{{- if .Values.filer.maxMB }}
|
||||
-maxMB={{ .Values.filer.maxMB }} \
|
||||
{{- end }}
|
||||
{{- if .Values.filer.encryptVolumeData }}
|
||||
-encryptVolumeData \
|
||||
{{- end }}
|
||||
-ip=${POD_IP} \
|
||||
{{- if gt (.Values.filer.replicas | int) 1 }}
|
||||
-peers={{ range $index := until (.Values.filer.replicas | int) }}${SEAWEEDFS_FULLNAME}-filer-{{ $index }}.${SEAWEEDFS_FULLNAME}-filer:{{ $.Values.filer.port }}{{ if lt $index (sub ($.Values.filer.replicas | int) 1) }},{{ end }}{{ end }}
|
||||
{{- end }}
|
||||
-master={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
|
||||
{{- if or (.Values.global.enableSecurity) (.Values.filer.extraVolumeMounts) }}
|
||||
volumeMounts:
|
||||
@ -149,6 +172,7 @@ spec:
|
||||
periodSeconds: 15
|
||||
successThreshold: 1
|
||||
failureThreshold: 100
|
||||
timeoutSeconds: 3
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
@ -158,6 +182,7 @@ spec:
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 3
|
||||
{{- if .Values.filer.resources }}
|
||||
resources:
|
||||
{{ tpl .Values.filer.resources . | nindent 12 | trim }}
|
||||
|
@ -70,6 +70,12 @@ spec:
|
||||
fieldPath: metadata.namespace
|
||||
- name: SEAWEEDFS_FULLNAME
|
||||
value: "{{ template "seaweedfs.name" . }}"
|
||||
{{- if .Values.master.extraEnvironmentVars }}
|
||||
{{- range $key, $value := .Values.master.extraEnvironmentVars }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-ec"
|
||||
@ -84,6 +90,11 @@ spec:
|
||||
-port={{ .Values.master.port }} \
|
||||
-mdir=/data \
|
||||
-ip.bind={{ .Values.master.ipBind }} \
|
||||
{{- if .Values.global.enableReplication }}
|
||||
-defaultReplication={{ .Values.global.replicationPlacment }} \
|
||||
{{- else }}
|
||||
-defaultReplication={{ .Values.master.defaultReplication }} \
|
||||
{{- end }}
|
||||
{{- if .Values.master.volumePreallocate }}
|
||||
-volumePreallocate \
|
||||
{{- end }}
|
||||
@ -94,6 +105,15 @@ spec:
|
||||
{{- if .Values.master.disableHttp }}
|
||||
-disableHttp \
|
||||
{{- end }}
|
||||
{{- if .Values.master.pulseSeconds }}
|
||||
-pulseSeconds={{ .Values.master.pulseSeconds }} \
|
||||
{{- end }}
|
||||
{{- if .Values.master.garbageThreshold }}
|
||||
-garbageThreshold={{ .Values.master.garbageThreshold }} \
|
||||
{{- end }}
|
||||
{{- if .Values.master.metricsIntervalSec }}
|
||||
-metrics.intervalSeconds={{ .Values.master.metricsIntervalSec }} \
|
||||
{{- end }}
|
||||
-ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-master \
|
||||
-peers={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
|
||||
volumeMounts:
|
||||
@ -133,19 +153,21 @@ spec:
|
||||
path: /cluster/status
|
||||
port: {{ .Values.master.port }}
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 15
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 45
|
||||
successThreshold: 2
|
||||
failureThreshold: 100
|
||||
timeoutSeconds: 5
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /cluster/status
|
||||
port: {{ .Values.master.port }}
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 10
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
failureThreshold: 6
|
||||
failureThreshold: 4
|
||||
timeoutSeconds: 5
|
||||
{{- if .Values.master.resources }}
|
||||
resources:
|
||||
{{ tpl .Values.master.resources . | nindent 12 | trim }}
|
||||
|
@ -71,6 +71,9 @@ spec:
|
||||
{{- end }}
|
||||
s3 \
|
||||
-port={{ .Values.s3.port }} \
|
||||
{{- if .Values.s3.metricsPort }}
|
||||
-metricsPort {{ .Values.s3.metricsPort }} \
|
||||
{{- end }}}
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
-cert.file=/usr/local/share/ca-certificates/client/tls.crt \
|
||||
-key.file=/usr/local/share/ca-certificates/client/tls.key \
|
||||
@ -116,6 +119,7 @@ spec:
|
||||
periodSeconds: 15
|
||||
successThreshold: 1
|
||||
failureThreshold: 100
|
||||
timeoutSeconds: 3
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
@ -125,6 +129,7 @@ spec:
|
||||
periodSeconds: 60
|
||||
successThreshold: 1
|
||||
failureThreshold: 20
|
||||
timeoutSeconds: 3
|
||||
{{- if .Values.s3.resources }}
|
||||
resources:
|
||||
{{ tpl .Values.s3.resources . | nindent 12 | trim }}
|
||||
|
@ -12,6 +12,12 @@ spec:
|
||||
port: {{ .Values.s3.port }}
|
||||
targetPort: {{ .Values.s3.port }}
|
||||
protocol: TCP
|
||||
{{- if .Values.s3.metricsPort }}
|
||||
- name: "swfs-s3-metrics"
|
||||
port: {{ .Values.filer.s3 }}
|
||||
targetPort: {{ .Values.s3.metricsPort }}
|
||||
protocol: TCP
|
||||
{{- end }}}
|
||||
selector:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: s3
|
18
k8s/seaweedfs/templates/s3-servicemonitor.yaml
Normal file
18
k8s/seaweedfs/templates/s3-servicemonitor.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
{{- if .Values.s3.metricsPort }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-s3
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: s3
|
||||
spec:
|
||||
endpoints:
|
||||
- interval: 30s
|
||||
port: swfs-s3-metrics
|
||||
scrapeTimeout: 5s
|
||||
selector:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: s3
|
||||
{{- end }}}
|
@ -91,7 +91,7 @@ data:
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Filer Request Duration 95th percentile",
|
||||
"title": "Filer Request Duration 80th percentile",
|
||||
"tooltip": {
|
||||
"msResolution": true,
|
||||
"shared": true,
|
||||
@ -1349,4 +1349,4 @@ data:
|
||||
"title": "SeaweedFS",
|
||||
"version": 3
|
||||
}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
@ -17,6 +17,12 @@ spec:
|
||||
port: {{ .Values.volume.grpcPort }}
|
||||
targetPort: {{ .Values.volume.grpcPort }}
|
||||
protocol: TCP
|
||||
selector:
|
||||
{{- if .Values.volume.metricsPort }}
|
||||
- name: "swfs-volume-metrics"
|
||||
port: {{ .Values.volume.metricsPort }}
|
||||
targetPort: {{ .Values.volume.metricsPort }}
|
||||
protocol: TCP
|
||||
{{- end }}}
|
||||
selector:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: volume
|
18
k8s/seaweedfs/templates/volume-servicemonitor.yaml
Normal file
18
k8s/seaweedfs/templates/volume-servicemonitor.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
{{- if .Values.volume.metricsPort }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-volume
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: volume
|
||||
spec:
|
||||
endpoints:
|
||||
- interval: 30s
|
||||
port: swfs-volume-metrics
|
||||
scrapeTimeout: 5s
|
||||
selector:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: volume
|
||||
{{- end }}}
|
@ -12,6 +12,7 @@ metadata:
|
||||
spec:
|
||||
serviceName: {{ template "seaweedfs.name" . }}-volume
|
||||
replicas: {{ .Values.volume.replicas }}
|
||||
podManagementPolicy: Parallel
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
@ -33,7 +34,7 @@ spec:
|
||||
restartPolicy: {{ default .Values.global.restartPolicy .Values.volume.restartPolicy }}
|
||||
{{- if .Values.volume.tolerations }}
|
||||
tolerations:
|
||||
{{ tpl .Values.volume.tolerations . | nindent 8 | trim }}
|
||||
{{ tpl .Values.volume.tolerations . | nindent 8 | trim }}
|
||||
{{- end }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
@ -62,7 +63,7 @@ spec:
|
||||
fieldRef:
|
||||
fieldPath: status.hostIP
|
||||
- name: SEAWEEDFS_FULLNAME
|
||||
value: "{{ template "seaweedfs.name" . }}"
|
||||
value: "{{ template "seaweedfs.name" . }}"
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-ec"
|
||||
@ -75,6 +76,9 @@ spec:
|
||||
{{- end }}
|
||||
volume \
|
||||
-port={{ .Values.volume.port }} \
|
||||
{{- if .Values.volume.metricsPort }}
|
||||
-metricsPort {{ .Values.volume.metricsPort }} \
|
||||
{{- end }}}
|
||||
-dir={{ .Values.volume.dir }} \
|
||||
-max={{ .Values.volume.maxVolumes }} \
|
||||
{{- if .Values.volume.rack }}
|
||||
@ -91,6 +95,16 @@ spec:
|
||||
{{- if .Values.volume.imagesFixOrientation }}
|
||||
-images.fix.orientation \
|
||||
{{- end }}
|
||||
{{- if .Values.volume.pulseSeconds }}
|
||||
-pulseSeconds={{ .Values.volume.pulseSeconds }} \
|
||||
{{- end }}
|
||||
{{- if .Values.volume.index }}
|
||||
-index={{ .Values.volume.index }} \
|
||||
{{- end }}
|
||||
{{- if .Values.volume.fileSizeLimitMB }}
|
||||
-fileSizeLimitMB={{ .Values.volume.fileSizeLimitMB }} \
|
||||
{{- end }}
|
||||
-minFreeSpacePercent={{ .Values.volume.minFreeSpacePercent }} \
|
||||
-ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-volume \
|
||||
-compactionMBps={{ .Values.volume.compactionMBps }} \
|
||||
-mserver={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
|
||||
@ -131,19 +145,21 @@ spec:
|
||||
path: /status
|
||||
port: {{ .Values.volume.port }}
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 15
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 90
|
||||
successThreshold: 1
|
||||
failureThreshold: 100
|
||||
timeoutSeconds: 5
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /status
|
||||
port: {{ .Values.volume.port }}
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 30
|
||||
periodSeconds: 90
|
||||
successThreshold: 1
|
||||
failureThreshold: 10
|
||||
failureThreshold: 4
|
||||
timeoutSeconds: 5
|
||||
{{- if .Values.volume.resources }}
|
||||
resources:
|
||||
{{ tpl .Values.volume.resources . | nindent 12 | trim }}
|
||||
|
@ -4,7 +4,7 @@ global:
|
||||
registry: ""
|
||||
repository: ""
|
||||
imageName: chrislusf/seaweedfs
|
||||
imageTag: "1.81"
|
||||
imageTag: "2.07"
|
||||
imagePullPolicy: IfNotPresent
|
||||
imagePullSecrets: imagepullsecret
|
||||
restartPolicy: Always
|
||||
@ -14,6 +14,13 @@ global:
|
||||
enabled: false
|
||||
gatewayHost: null
|
||||
gatewayPort: null
|
||||
# if enabled will use global.replicationPlacment and override master & filer defaultReplicaPlacement config
|
||||
enableReplication: false
|
||||
# replication type is XYZ:
|
||||
# X number of replica in other data centers
|
||||
# Y number of replica in other racks in the same data center
|
||||
# Z number of replica in other servers in the same rack
|
||||
replicationPlacment: "001"
|
||||
|
||||
image:
|
||||
registry: ""
|
||||
@ -31,8 +38,20 @@ master:
|
||||
grpcPort: 19333
|
||||
ipBind: "0.0.0.0"
|
||||
volumePreallocate: false
|
||||
#Master stops directing writes to oversized volumes
|
||||
volumeSizeLimitMB: 30000
|
||||
loggingOverrideLevel: null
|
||||
#number of seconds between heartbeats, default 5
|
||||
pulseSeconds: null
|
||||
#threshold to vacuum and reclaim spaces, default 0.3 (30%)
|
||||
garbageThreshold: null
|
||||
#Prometheus push interval in seconds, default 15
|
||||
metricsIntervalSec: 15
|
||||
# replication type is XYZ:
|
||||
# X number of replica in other data centers
|
||||
# Y number of replica in other racks in the same data center
|
||||
# Z number of replica in other servers in the same rack
|
||||
defaultReplication: "000"
|
||||
|
||||
# Disable http request, only gRpc operations are allowed
|
||||
disableHttp: false
|
||||
@ -87,6 +106,11 @@ master:
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
||||
priorityClassName: ""
|
||||
|
||||
extraEnvironmentVars:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 7
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 6
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_3: 3
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
|
||||
volume:
|
||||
enabled: true
|
||||
@ -97,12 +121,22 @@ volume:
|
||||
restartPolicy: null
|
||||
port: 8080
|
||||
grpcPort: 18080
|
||||
metricsPort: 9327
|
||||
ipBind: "0.0.0.0"
|
||||
replicas: 1
|
||||
loggingOverrideLevel: null
|
||||
# number of seconds between heartbeats, must be smaller than or equal to the master's setting
|
||||
pulseSeconds: null
|
||||
# Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance., default memory
|
||||
index: null
|
||||
# limit file size to avoid out of memory, default 256mb
|
||||
fileSizeLimitMB: null
|
||||
# minimum free disk space(in percents). If free disk space lower this value - all volumes marks as ReadOnly
|
||||
minFreeSpacePercent: 1
|
||||
|
||||
|
||||
# limit background compaction or copying speed in mega bytes per second
|
||||
compactionMBps: "40"
|
||||
compactionMBps: "50"
|
||||
|
||||
# Directories to store data files. dir[,dir]... (default "/tmp")
|
||||
dir: "/data"
|
||||
@ -176,7 +210,22 @@ filer:
|
||||
replicas: 1
|
||||
port: 8888
|
||||
grpcPort: 18888
|
||||
metricsPort: 9327
|
||||
loggingOverrideLevel: null
|
||||
# replication type is XYZ:
|
||||
# X number of replica in other data centers
|
||||
# Y number of replica in other racks in the same data center
|
||||
# Z number of replica in other servers in the same rack
|
||||
defaultReplicaPlacement: "000"
|
||||
# turn off directory listing
|
||||
disableDirListing: false
|
||||
# split files larger than the limit, default 32
|
||||
maxMB: null
|
||||
# encrypt data on volume servers
|
||||
encryptVolumeData: false
|
||||
|
||||
# Whether proxy or redirect to volume server during file GET request
|
||||
redirectOnRead: false
|
||||
|
||||
# Limit sub dir listing size (default 100000)
|
||||
dirListLimit: 100000
|
||||
@ -237,11 +286,6 @@ filer:
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
||||
priorityClassName: ""
|
||||
|
||||
dbSchema:
|
||||
imageName: db-schema
|
||||
imageTag: "development"
|
||||
imageOverride: ""
|
||||
|
||||
# extraEnvVars is a list of extra enviroment variables to set with the stateful set.
|
||||
extraEnvironmentVars:
|
||||
WEED_MYSQL_ENABLED: "true"
|
||||
@ -260,6 +304,8 @@ filer:
|
||||
WEED_FILER_BUCKETS_FOLDER: "/buckets"
|
||||
# directories under this folder will be store message queue data
|
||||
WEED_FILER_QUEUES_FOLDER: "/queues"
|
||||
# WEED_FILER_OPTIONS_BUCKETS_FSYNC a list of buckets names with all write requests fsync=true
|
||||
WEED_FILER_OPTIONS_BUCKETS_FSYNC: []
|
||||
|
||||
s3:
|
||||
enabled: true
|
||||
@ -269,6 +315,7 @@ s3:
|
||||
restartPolicy: null
|
||||
replicas: 1
|
||||
port: 8333
|
||||
metricsPort: 9327
|
||||
loggingOverrideLevel: null
|
||||
|
||||
# Suffix of the host name, {bucket}.{domainName}
|
||||
@ -300,6 +347,19 @@ s3:
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
||||
priorityClassName: ""
|
||||
|
||||
cronjob:
|
||||
enabled: false
|
||||
schedule: "*/7 * * * *"
|
||||
resources: null
|
||||
# balance all volumes among volume servers
|
||||
# ALL|EACH_COLLECTION|<collection_name>
|
||||
collection: ""
|
||||
master: ""
|
||||
filer: ""
|
||||
tolerations: ""
|
||||
nodeSelector: |
|
||||
sw-backend: "true"
|
||||
|
||||
certificates:
|
||||
commonName: "SeaweedFS CA"
|
||||
ipAddresses: []
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.2.8</version>
|
||||
<version>1.5.2</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
@ -65,7 +65,7 @@
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.12</version>
|
||||
<version>4.13.1</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
170
other/java/client/pom.xml.deploy
Normal file
170
other/java/client/pom.xml.deploy
Normal file
@ -0,0 +1,170 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.5.2</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
<artifactId>oss-parent</artifactId>
|
||||
<version>9</version>
|
||||
</parent>
|
||||
|
||||
<properties>
|
||||
<protobuf.version>3.9.1</protobuf.version>
|
||||
<!-- follow https://github.com/grpc/grpc-java -->
|
||||
<grpc.version>1.23.0</grpc.version>
|
||||
<guava.version>28.0-jre</guava.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.moandjiezana.toml</groupId>
|
||||
<artifactId>toml4j</artifactId>
|
||||
<version>0.7.2</version>
|
||||
</dependency>
|
||||
<!-- https://mvnrepository.com/artifact/com.google.protobuf/protobuf-java -->
|
||||
<dependency>
|
||||
<groupId>com.google.protobuf</groupId>
|
||||
<artifactId>protobuf-java</artifactId>
|
||||
<version>${protobuf.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
<version>${guava.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.grpc</groupId>
|
||||
<artifactId>grpc-netty-shaded</artifactId>
|
||||
<version>${grpc.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.grpc</groupId>
|
||||
<artifactId>grpc-protobuf</artifactId>
|
||||
<version>${grpc.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.grpc</groupId>
|
||||
<artifactId>grpc-stub</artifactId>
|
||||
<version>${grpc.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-api</artifactId>
|
||||
<version>1.7.25</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.httpcomponents</groupId>
|
||||
<artifactId>httpmime</artifactId>
|
||||
<version>4.5.6</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.12</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<distributionManagement>
|
||||
<snapshotRepository>
|
||||
<id>ossrh</id>
|
||||
<url>https://oss.sonatype.org/content/repositories/snapshots</url>
|
||||
</snapshotRepository>
|
||||
</distributionManagement>
|
||||
<build>
|
||||
<extensions>
|
||||
<extension>
|
||||
<groupId>kr.motd.maven</groupId>
|
||||
<artifactId>os-maven-plugin</artifactId>
|
||||
<version>1.6.2</version>
|
||||
</extension>
|
||||
</extensions>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<configuration>
|
||||
<source>8</source>
|
||||
<target>8</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.xolstice.maven.plugins</groupId>
|
||||
<artifactId>protobuf-maven-plugin</artifactId>
|
||||
<version>0.6.1</version>
|
||||
<configuration>
|
||||
<protocArtifact>com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}
|
||||
</protocArtifact>
|
||||
<pluginId>grpc-java</pluginId>
|
||||
<pluginArtifact>io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}
|
||||
</pluginArtifact>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>compile</goal>
|
||||
<goal>compile-custom</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-gpg-plugin</artifactId>
|
||||
<version>1.5</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>sign-artifacts</id>
|
||||
<phase>verify</phase>
|
||||
<goals>
|
||||
<goal>sign</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.sonatype.plugins</groupId>
|
||||
<artifactId>nexus-staging-maven-plugin</artifactId>
|
||||
<version>1.6.7</version>
|
||||
<extensions>true</extensions>
|
||||
<configuration>
|
||||
<serverId>ossrh</serverId>
|
||||
<nexusUrl>https://oss.sonatype.org/</nexusUrl>
|
||||
<autoReleaseAfterClose>true</autoReleaseAfterClose>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
<version>2.2.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-sources</id>
|
||||
<goals>
|
||||
<goal>jar-no-fork</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<version>2.9.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-javadocs</id>
|
||||
<goals>
|
||||
<goal>jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
</project>
|
@ -5,7 +5,7 @@
|
||||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.2.8</version>
|
||||
<version>1.5.2</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
@ -65,7 +65,7 @@
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.12</version>
|
||||
<version>4.13.1</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
@ -0,0 +1,41 @@
|
||||
package seaweedfs.client;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public class ByteBufferPool {
|
||||
|
||||
private static final int MIN_BUFFER_SIZE = 8 * 1024 * 1024;
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ByteBufferPool.class);
|
||||
|
||||
private static final List<ByteBuffer> bufferList = new ArrayList<>();
|
||||
|
||||
public static synchronized ByteBuffer request(int bufferSize) {
|
||||
if (bufferSize < MIN_BUFFER_SIZE) {
|
||||
bufferSize = MIN_BUFFER_SIZE;
|
||||
}
|
||||
LOG.debug("requested new buffer {}", bufferSize);
|
||||
if (bufferList.isEmpty()) {
|
||||
return ByteBuffer.allocate(bufferSize);
|
||||
}
|
||||
ByteBuffer buffer = bufferList.remove(bufferList.size() - 1);
|
||||
if (buffer.capacity() >= bufferSize) {
|
||||
return buffer;
|
||||
}
|
||||
|
||||
LOG.info("add new buffer from {} to {}", buffer.capacity(), bufferSize);
|
||||
bufferList.add(0, buffer);
|
||||
return ByteBuffer.allocate(bufferSize);
|
||||
|
||||
}
|
||||
|
||||
public static synchronized void release(ByteBuffer obj) {
|
||||
obj.clear();
|
||||
bufferList.add(0, obj);
|
||||
}
|
||||
|
||||
}
|
@ -7,20 +7,30 @@ import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class ChunkCache {
|
||||
|
||||
private final Cache<String, byte[]> cache;
|
||||
private Cache<String, byte[]> cache = null;
|
||||
|
||||
public ChunkCache(int maxEntries) {
|
||||
if (maxEntries == 0) {
|
||||
return;
|
||||
}
|
||||
this.cache = CacheBuilder.newBuilder()
|
||||
.maximumSize(maxEntries)
|
||||
.weakValues()
|
||||
.expireAfterAccess(1, TimeUnit.HOURS)
|
||||
.build();
|
||||
}
|
||||
|
||||
public byte[] getChunk(String fileId) {
|
||||
if (this.cache == null) {
|
||||
return null;
|
||||
}
|
||||
return this.cache.getIfPresent(fileId);
|
||||
}
|
||||
|
||||
public void setChunk(String fileId, byte[] data) {
|
||||
if (this.cache == null) {
|
||||
return;
|
||||
}
|
||||
this.cache.put(fileId, data);
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,140 @@
|
||||
package seaweedfs.client;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public class FileChunkManifest {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(FileChunkManifest.class);
|
||||
|
||||
private static final int mergeFactor = 1000;
|
||||
|
||||
public static boolean hasChunkManifest(List<FilerProto.FileChunk> chunks) {
|
||||
for (FilerProto.FileChunk chunk : chunks) {
|
||||
if (chunk.getIsChunkManifest()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public static List<FilerProto.FileChunk> resolveChunkManifest(
|
||||
final FilerGrpcClient filerGrpcClient, List<FilerProto.FileChunk> chunks) throws IOException {
|
||||
|
||||
List<FilerProto.FileChunk> dataChunks = new ArrayList<>();
|
||||
|
||||
for (FilerProto.FileChunk chunk : chunks) {
|
||||
if (!chunk.getIsChunkManifest()) {
|
||||
dataChunks.add(chunk);
|
||||
continue;
|
||||
}
|
||||
|
||||
// IsChunkManifest
|
||||
LOG.debug("fetching chunk manifest:{}", chunk);
|
||||
byte[] data = fetchChunk(filerGrpcClient, chunk);
|
||||
FilerProto.FileChunkManifest m = FilerProto.FileChunkManifest.newBuilder().mergeFrom(data).build();
|
||||
List<FilerProto.FileChunk> resolvedChunks = new ArrayList<>();
|
||||
for (FilerProto.FileChunk t : m.getChunksList()) {
|
||||
// avoid deprecated chunk.getFileId()
|
||||
resolvedChunks.add(t.toBuilder().setFileId(FilerClient.toFileId(t.getFid())).build());
|
||||
}
|
||||
dataChunks.addAll(resolveChunkManifest(filerGrpcClient, resolvedChunks));
|
||||
}
|
||||
|
||||
return dataChunks;
|
||||
}
|
||||
|
||||
private static byte[] fetchChunk(final FilerGrpcClient filerGrpcClient, FilerProto.FileChunk chunk) throws IOException {
|
||||
|
||||
String vid = "" + chunk.getFid().getVolumeId();
|
||||
FilerProto.Locations locations = filerGrpcClient.vidLocations.get(vid);
|
||||
if (locations == null) {
|
||||
FilerProto.LookupVolumeRequest.Builder lookupRequest = FilerProto.LookupVolumeRequest.newBuilder();
|
||||
lookupRequest.addVolumeIds(vid);
|
||||
FilerProto.LookupVolumeResponse lookupResponse = filerGrpcClient
|
||||
.getBlockingStub().lookupVolume(lookupRequest.build());
|
||||
locations = lookupResponse.getLocationsMapMap().get(vid);
|
||||
filerGrpcClient.vidLocations.put(vid, locations);
|
||||
LOG.debug("fetchChunk vid:{} locations:{}", vid, locations);
|
||||
}
|
||||
|
||||
SeaweedRead.ChunkView chunkView = new SeaweedRead.ChunkView(
|
||||
FilerClient.toFileId(chunk.getFid()), // avoid deprecated chunk.getFileId()
|
||||
0,
|
||||
-1,
|
||||
0,
|
||||
true,
|
||||
chunk.getCipherKey().toByteArray(),
|
||||
chunk.getIsCompressed());
|
||||
|
||||
byte[] chunkData = SeaweedRead.chunkCache.getChunk(chunkView.fileId);
|
||||
if (chunkData == null) {
|
||||
LOG.debug("doFetchFullChunkData:{}", chunkView);
|
||||
chunkData = SeaweedRead.doFetchFullChunkData(chunkView, locations);
|
||||
}
|
||||
if (chunk.getIsChunkManifest()){
|
||||
LOG.debug("chunk {} size {}", chunkView.fileId, chunkData.length);
|
||||
SeaweedRead.chunkCache.setChunk(chunkView.fileId, chunkData);
|
||||
}
|
||||
|
||||
return chunkData;
|
||||
|
||||
}
|
||||
|
||||
public static List<FilerProto.FileChunk> maybeManifestize(
|
||||
final FilerGrpcClient filerGrpcClient, List<FilerProto.FileChunk> inputChunks, String parentDirectory) throws IOException {
|
||||
// the return variable
|
||||
List<FilerProto.FileChunk> chunks = new ArrayList<>();
|
||||
|
||||
List<FilerProto.FileChunk> dataChunks = new ArrayList<>();
|
||||
for (FilerProto.FileChunk chunk : inputChunks) {
|
||||
if (!chunk.getIsChunkManifest()) {
|
||||
dataChunks.add(chunk);
|
||||
} else {
|
||||
chunks.add(chunk);
|
||||
}
|
||||
}
|
||||
|
||||
int remaining = dataChunks.size();
|
||||
for (int i = 0; i + mergeFactor < dataChunks.size(); i += mergeFactor) {
|
||||
FilerProto.FileChunk chunk = mergeIntoManifest(filerGrpcClient, dataChunks.subList(i, i + mergeFactor), parentDirectory);
|
||||
chunks.add(chunk);
|
||||
remaining -= mergeFactor;
|
||||
}
|
||||
|
||||
// remaining
|
||||
for (int i = dataChunks.size() - remaining; i < dataChunks.size(); i++) {
|
||||
chunks.add(dataChunks.get(i));
|
||||
}
|
||||
return chunks;
|
||||
}
|
||||
|
||||
private static FilerProto.FileChunk mergeIntoManifest(final FilerGrpcClient filerGrpcClient, List<FilerProto.FileChunk> dataChunks, String parentDirectory) throws IOException {
|
||||
// create and serialize the manifest
|
||||
dataChunks = FilerClient.beforeEntrySerialization(dataChunks);
|
||||
FilerProto.FileChunkManifest.Builder m = FilerProto.FileChunkManifest.newBuilder().addAllChunks(dataChunks);
|
||||
byte[] data = m.build().toByteArray();
|
||||
|
||||
long minOffset = Long.MAX_VALUE;
|
||||
long maxOffset = -1;
|
||||
for (FilerProto.FileChunk chunk : dataChunks) {
|
||||
minOffset = Math.min(minOffset, chunk.getOffset());
|
||||
maxOffset = Math.max(maxOffset, chunk.getSize() + chunk.getOffset());
|
||||
}
|
||||
|
||||
FilerProto.FileChunk.Builder manifestChunk = SeaweedWrite.writeChunk(
|
||||
filerGrpcClient.getReplication(),
|
||||
filerGrpcClient,
|
||||
minOffset,
|
||||
data, 0, data.length, parentDirectory);
|
||||
manifestChunk.setIsChunkManifest(true);
|
||||
manifestChunk.setSize(maxOffset - minOffset);
|
||||
return manifestChunk.build();
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -1,5 +1,6 @@
|
||||
package seaweedfs.client;
|
||||
|
||||
import com.google.common.base.Strings;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@ -24,6 +25,67 @@ public class FilerClient {
|
||||
this.filerGrpcClient = filerGrpcClient;
|
||||
}
|
||||
|
||||
public static String toFileId(FilerProto.FileId fid) {
|
||||
if (fid == null) {
|
||||
return null;
|
||||
}
|
||||
return String.format("%d,%x%08x", fid.getVolumeId(), fid.getFileKey(), fid.getCookie());
|
||||
}
|
||||
|
||||
public static FilerProto.FileId toFileIdObject(String fileIdStr) {
|
||||
if (fileIdStr == null || fileIdStr.length() == 0) {
|
||||
return null;
|
||||
}
|
||||
int commaIndex = fileIdStr.lastIndexOf(',');
|
||||
String volumeIdStr = fileIdStr.substring(0, commaIndex);
|
||||
String fileKeyStr = fileIdStr.substring(commaIndex + 1, fileIdStr.length() - 8);
|
||||
String cookieStr = fileIdStr.substring(fileIdStr.length() - 8);
|
||||
|
||||
return FilerProto.FileId.newBuilder()
|
||||
.setVolumeId(Integer.parseInt(volumeIdStr))
|
||||
.setFileKey(Long.parseLong(fileKeyStr, 16))
|
||||
.setCookie((int) Long.parseLong(cookieStr, 16))
|
||||
.build();
|
||||
}
|
||||
|
||||
public static List<FilerProto.FileChunk> beforeEntrySerialization(List<FilerProto.FileChunk> chunks) {
|
||||
List<FilerProto.FileChunk> cleanedChunks = new ArrayList<>();
|
||||
for (FilerProto.FileChunk chunk : chunks) {
|
||||
FilerProto.FileChunk.Builder chunkBuilder = chunk.toBuilder();
|
||||
chunkBuilder.clearFileId();
|
||||
chunkBuilder.clearSourceFileId();
|
||||
chunkBuilder.setFid(toFileIdObject(chunk.getFileId()));
|
||||
FilerProto.FileId sourceFid = toFileIdObject(chunk.getSourceFileId());
|
||||
if (sourceFid != null) {
|
||||
chunkBuilder.setSourceFid(sourceFid);
|
||||
}
|
||||
cleanedChunks.add(chunkBuilder.build());
|
||||
}
|
||||
return cleanedChunks;
|
||||
}
|
||||
|
||||
public static FilerProto.Entry afterEntryDeserialization(FilerProto.Entry entry) {
|
||||
if (entry.getChunksList().size() <= 0) {
|
||||
return entry;
|
||||
}
|
||||
String fileId = entry.getChunks(0).getFileId();
|
||||
if (fileId != null && fileId.length() != 0) {
|
||||
return entry;
|
||||
}
|
||||
FilerProto.Entry.Builder entryBuilder = entry.toBuilder();
|
||||
entryBuilder.clearChunks();
|
||||
for (FilerProto.FileChunk chunk : entry.getChunksList()) {
|
||||
FilerProto.FileChunk.Builder chunkBuilder = chunk.toBuilder();
|
||||
chunkBuilder.setFileId(toFileId(chunk.getFid()));
|
||||
String sourceFileId = toFileId(chunk.getSourceFid());
|
||||
if (sourceFileId != null) {
|
||||
chunkBuilder.setSourceFileId(sourceFileId);
|
||||
}
|
||||
entryBuilder.addChunks(chunkBuilder);
|
||||
}
|
||||
return entryBuilder.build();
|
||||
}
|
||||
|
||||
public boolean mkdirs(String path, int mode) {
|
||||
String currentUser = System.getProperty("user.name");
|
||||
return mkdirs(path, mode, 0, 0, currentUser, new String[]{});
|
||||
@ -156,7 +218,7 @@ public class FilerClient {
|
||||
List<FilerProto.Entry> results = new ArrayList<FilerProto.Entry>();
|
||||
String lastFileName = "";
|
||||
for (int limit = Integer.MAX_VALUE; limit > 0; ) {
|
||||
List<FilerProto.Entry> t = listEntries(path, "", lastFileName, 1024);
|
||||
List<FilerProto.Entry> t = listEntries(path, "", lastFileName, 1024, false);
|
||||
if (t == null) {
|
||||
break;
|
||||
}
|
||||
@ -173,17 +235,18 @@ public class FilerClient {
|
||||
return results;
|
||||
}
|
||||
|
||||
public List<FilerProto.Entry> listEntries(String path, String entryPrefix, String lastEntryName, int limit) {
|
||||
public List<FilerProto.Entry> listEntries(String path, String entryPrefix, String lastEntryName, int limit, boolean includeLastEntry) {
|
||||
Iterator<FilerProto.ListEntriesResponse> iter = filerGrpcClient.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder()
|
||||
.setDirectory(path)
|
||||
.setPrefix(entryPrefix)
|
||||
.setStartFromFileName(lastEntryName)
|
||||
.setInclusiveStartFrom(includeLastEntry)
|
||||
.setLimit(limit)
|
||||
.build());
|
||||
List<FilerProto.Entry> entries = new ArrayList<>();
|
||||
while (iter.hasNext()) {
|
||||
FilerProto.ListEntriesResponse resp = iter.next();
|
||||
entries.add(fixEntryAfterReading(resp.getEntry()));
|
||||
entries.add(afterEntryDeserialization(resp.getEntry()));
|
||||
}
|
||||
return entries;
|
||||
}
|
||||
@ -198,7 +261,7 @@ public class FilerClient {
|
||||
if (entry == null) {
|
||||
return null;
|
||||
}
|
||||
return fixEntryAfterReading(entry);
|
||||
return afterEntryDeserialization(entry);
|
||||
} catch (Exception e) {
|
||||
if (e.getMessage().indexOf("filer: no entry is found in filer store") > 0) {
|
||||
return null;
|
||||
@ -208,18 +271,22 @@ public class FilerClient {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public boolean createEntry(String parent, FilerProto.Entry entry) {
|
||||
try {
|
||||
filerGrpcClient.getBlockingStub().createEntry(FilerProto.CreateEntryRequest.newBuilder()
|
||||
FilerProto.CreateEntryResponse createEntryResponse =
|
||||
filerGrpcClient.getBlockingStub().createEntry(FilerProto.CreateEntryRequest.newBuilder()
|
||||
.setDirectory(parent)
|
||||
.setEntry(entry)
|
||||
.build());
|
||||
if (Strings.isNullOrEmpty(createEntryResponse.getError())) {
|
||||
return true;
|
||||
}
|
||||
LOG.warn("createEntry {}/{} error: {}", parent, entry.getName(), createEntryResponse.getError());
|
||||
return false;
|
||||
} catch (Exception e) {
|
||||
LOG.warn("createEntry {}/{}: {}", parent, entry.getName(), e);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public boolean updateEntry(String parent, FilerProto.Entry entry) {
|
||||
@ -229,7 +296,7 @@ public class FilerClient {
|
||||
.setEntry(entry)
|
||||
.build());
|
||||
} catch (Exception e) {
|
||||
LOG.warn("createEntry {}/{}: {}", parent, entry.getName(), e);
|
||||
LOG.warn("updateEntry {}/{}: {}", parent, entry.getName(), e);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -266,24 +333,4 @@ public class FilerClient {
|
||||
return true;
|
||||
}
|
||||
|
||||
private FilerProto.Entry fixEntryAfterReading(FilerProto.Entry entry) {
|
||||
if (entry.getChunksList().size() <= 0) {
|
||||
return entry;
|
||||
}
|
||||
String fileId = entry.getChunks(0).getFileId();
|
||||
if (fileId != null && fileId.length() != 0) {
|
||||
return entry;
|
||||
}
|
||||
FilerProto.Entry.Builder entryBuilder = entry.toBuilder();
|
||||
entryBuilder.clearChunks();
|
||||
for (FilerProto.FileChunk chunk : entry.getChunksList()) {
|
||||
FilerProto.FileChunk.Builder chunkBuilder = chunk.toBuilder();
|
||||
FilerProto.FileId fid = chunk.getFid();
|
||||
fileId = String.format("%d,%d%x", fid.getVolumeId(), fid.getFileKey(), fid.getCookie());
|
||||
chunkBuilder.setFileId(fileId);
|
||||
entryBuilder.addChunks(chunkBuilder);
|
||||
}
|
||||
return entryBuilder.build();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -9,6 +9,8 @@ import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.net.ssl.SSLException;
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class FilerGrpcClient {
|
||||
@ -24,6 +26,7 @@ public class FilerGrpcClient {
|
||||
}
|
||||
}
|
||||
|
||||
public final Map<String, FilerProto.Locations> vidLocations = new HashMap<>();
|
||||
private final ManagedChannel channel;
|
||||
private final SeaweedFilerGrpc.SeaweedFilerBlockingStub blockingStub;
|
||||
private final SeaweedFilerGrpc.SeaweedFilerStub asyncStub;
|
||||
@ -39,8 +42,10 @@ public class FilerGrpcClient {
|
||||
public FilerGrpcClient(String host, int grpcPort, SslContext sslContext) {
|
||||
|
||||
this(sslContext == null ?
|
||||
ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext() :
|
||||
ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext()
|
||||
.maxInboundMessageSize(1024 * 1024 * 1024) :
|
||||
NettyChannelBuilder.forAddress(host, grpcPort)
|
||||
.maxInboundMessageSize(1024 * 1024 * 1024)
|
||||
.negotiationType(NegotiationType.TLS)
|
||||
.sslContext(sslContext));
|
||||
|
||||
|
@ -18,14 +18,18 @@ public class Gzip {
|
||||
return compressed;
|
||||
}
|
||||
|
||||
public static byte[] decompress(byte[] compressed) throws IOException {
|
||||
ByteArrayInputStream bis = new ByteArrayInputStream(compressed);
|
||||
GZIPInputStream gis = new GZIPInputStream(bis);
|
||||
return readAll(gis);
|
||||
public static byte[] decompress(byte[] compressed) {
|
||||
try {
|
||||
ByteArrayInputStream bis = new ByteArrayInputStream(compressed);
|
||||
GZIPInputStream gis = new GZIPInputStream(bis);
|
||||
return readAll(gis);
|
||||
} catch (Exception e) {
|
||||
return compressed;
|
||||
}
|
||||
}
|
||||
|
||||
private static byte[] readAll(InputStream input) throws IOException {
|
||||
try( ByteArrayOutputStream output = new ByteArrayOutputStream()){
|
||||
try (ByteArrayOutputStream output = new ByteArrayOutputStream()) {
|
||||
byte[] buffer = new byte[4096];
|
||||
int n;
|
||||
while (-1 != (n = input.read(buffer))) {
|
||||
|
@ -1,16 +1,16 @@
|
||||
package seaweedfs.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HeaderElement;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHeaders;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.client.HttpClient;
|
||||
import org.apache.http.client.entity.GzipDecompressingEntity;
|
||||
import org.apache.http.client.methods.CloseableHttpResponse;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.impl.client.DefaultHttpClient;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
|
||||
@ -18,12 +18,12 @@ public class SeaweedRead {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedRead.class);
|
||||
|
||||
static ChunkCache chunkCache = new ChunkCache(1000);
|
||||
static ChunkCache chunkCache = new ChunkCache(4);
|
||||
|
||||
// returns bytesRead
|
||||
public static long read(FilerGrpcClient filerGrpcClient, List<VisibleInterval> visibleIntervals,
|
||||
final long position, final byte[] buffer, final int bufferOffset,
|
||||
final int bufferLength) throws IOException {
|
||||
final int bufferLength, final long fileSize) throws IOException {
|
||||
|
||||
List<ChunkView> chunkViews = viewFromVisibles(visibleIntervals, position, bufferLength);
|
||||
|
||||
@ -40,67 +40,128 @@ public class SeaweedRead {
|
||||
|
||||
//TODO parallel this
|
||||
long readCount = 0;
|
||||
int startOffset = bufferOffset;
|
||||
long startOffset = position;
|
||||
for (ChunkView chunkView : chunkViews) {
|
||||
|
||||
if (startOffset < chunkView.logicOffset) {
|
||||
long gap = chunkView.logicOffset - startOffset;
|
||||
LOG.debug("zero [{},{})", startOffset, startOffset + gap);
|
||||
readCount += gap;
|
||||
startOffset += gap;
|
||||
}
|
||||
|
||||
FilerProto.Locations locations = vid2Locations.get(parseVolumeId(chunkView.fileId));
|
||||
if (locations.getLocationsCount() == 0) {
|
||||
if (locations == null || locations.getLocationsCount() == 0) {
|
||||
LOG.error("failed to locate {}", chunkView.fileId);
|
||||
// log here!
|
||||
return 0;
|
||||
}
|
||||
|
||||
int len = readChunkView(position, buffer, startOffset, chunkView, locations);
|
||||
int len = readChunkView(startOffset, buffer, bufferOffset + readCount, chunkView, locations);
|
||||
|
||||
LOG.debug("read [{},{}) {} size {}", startOffset, startOffset + len, chunkView.fileId, chunkView.size);
|
||||
|
||||
readCount += len;
|
||||
startOffset += len;
|
||||
|
||||
}
|
||||
|
||||
long limit = Math.min(bufferOffset + bufferLength, fileSize);
|
||||
|
||||
if (startOffset < limit) {
|
||||
long gap = limit - startOffset;
|
||||
LOG.debug("zero2 [{},{})", startOffset, startOffset + gap);
|
||||
readCount += gap;
|
||||
startOffset += gap;
|
||||
}
|
||||
|
||||
return readCount;
|
||||
}
|
||||
|
||||
private static int readChunkView(long position, byte[] buffer, int startOffset, ChunkView chunkView, FilerProto.Locations locations) throws IOException {
|
||||
private static int readChunkView(long startOffset, byte[] buffer, long bufOffset, ChunkView chunkView, FilerProto.Locations locations) throws IOException {
|
||||
|
||||
byte[] chunkData = chunkCache.getChunk(chunkView.fileId);
|
||||
|
||||
if (chunkData == null) {
|
||||
chunkData = doFetchFullChunkData(chunkView, locations);
|
||||
chunkCache.setChunk(chunkView.fileId, chunkData);
|
||||
}
|
||||
|
||||
int len = (int) chunkView.size;
|
||||
LOG.debug("readChunkView fid:{} chunkData.length:{} chunkView.offset:{} buffer.length:{} startOffset:{} len:{}",
|
||||
chunkView.fileId, chunkData.length, chunkView.offset, buffer.length, startOffset, len);
|
||||
System.arraycopy(chunkData, (int) chunkView.offset, buffer, startOffset, len);
|
||||
|
||||
chunkCache.setChunk(chunkView.fileId, chunkData);
|
||||
LOG.debug("readChunkView fid:{} chunkData.length:{} chunkView.offset:{} chunkView[{};{}) buf[{},{})/{} startOffset:{}",
|
||||
chunkView.fileId, chunkData.length, chunkView.offset, chunkView.logicOffset, chunkView.logicOffset + chunkView.size, bufOffset, bufOffset + len, buffer.length, startOffset);
|
||||
System.arraycopy(chunkData, (int) (startOffset - chunkView.logicOffset + chunkView.offset), buffer, (int) bufOffset, len);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
private static byte[] doFetchFullChunkData(ChunkView chunkView, FilerProto.Locations locations) throws IOException {
|
||||
|
||||
HttpClient client = new DefaultHttpClient();
|
||||
HttpGet request = new HttpGet(
|
||||
String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId));
|
||||
|
||||
request.setHeader(HttpHeaders.ACCEPT_ENCODING, "");
|
||||
public static byte[] doFetchFullChunkData(ChunkView chunkView, FilerProto.Locations locations) throws IOException {
|
||||
|
||||
byte[] data = null;
|
||||
|
||||
try {
|
||||
HttpResponse response = client.execute(request);
|
||||
HttpEntity entity = response.getEntity();
|
||||
|
||||
data = EntityUtils.toByteArray(entity);
|
||||
|
||||
} finally {
|
||||
if (client instanceof Closeable) {
|
||||
Closeable t = (Closeable) client;
|
||||
t.close();
|
||||
IOException lastException = null;
|
||||
for (long waitTime = 1000L; waitTime < 10 * 1000; waitTime += waitTime / 2) {
|
||||
for (FilerProto.Location location : locations.getLocationsList()) {
|
||||
String url = String.format("http://%s/%s", location.getUrl(), chunkView.fileId);
|
||||
try {
|
||||
data = doFetchOneFullChunkData(chunkView, url);
|
||||
lastException = null;
|
||||
break;
|
||||
} catch (IOException ioe) {
|
||||
LOG.debug("doFetchFullChunkData {} :{}", url, ioe);
|
||||
lastException = ioe;
|
||||
}
|
||||
}
|
||||
if (data != null) {
|
||||
break;
|
||||
}
|
||||
try {
|
||||
Thread.sleep(waitTime);
|
||||
} catch (InterruptedException e) {
|
||||
}
|
||||
}
|
||||
|
||||
if (chunkView.isGzipped) {
|
||||
data = Gzip.decompress(data);
|
||||
if (lastException != null) {
|
||||
throw lastException;
|
||||
}
|
||||
|
||||
LOG.debug("doFetchFullChunkData fid:{} chunkData.length:{}", chunkView.fileId, data.length);
|
||||
|
||||
return data;
|
||||
|
||||
}
|
||||
|
||||
public static byte[] doFetchOneFullChunkData(ChunkView chunkView, String url) throws IOException {
|
||||
|
||||
HttpGet request = new HttpGet(url);
|
||||
|
||||
request.setHeader(HttpHeaders.ACCEPT_ENCODING, "gzip");
|
||||
|
||||
byte[] data = null;
|
||||
|
||||
CloseableHttpResponse response = SeaweedUtil.getClosableHttpClient().execute(request);
|
||||
|
||||
try {
|
||||
HttpEntity entity = response.getEntity();
|
||||
|
||||
Header contentEncodingHeader = entity.getContentEncoding();
|
||||
|
||||
if (contentEncodingHeader != null) {
|
||||
HeaderElement[] encodings = contentEncodingHeader.getElements();
|
||||
for (int i = 0; i < encodings.length; i++) {
|
||||
if (encodings[i].getName().equalsIgnoreCase("gzip")) {
|
||||
entity = new GzipDecompressingEntity(entity);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data = EntityUtils.toByteArray(entity);
|
||||
|
||||
EntityUtils.consume(entity);
|
||||
|
||||
} finally {
|
||||
response.close();
|
||||
request.releaseConnection();
|
||||
}
|
||||
|
||||
if (chunkView.cipherKey != null && chunkView.cipherKey.length != 0) {
|
||||
@ -111,6 +172,12 @@ public class SeaweedRead {
|
||||
}
|
||||
}
|
||||
|
||||
if (chunkView.isCompressed) {
|
||||
data = Gzip.decompress(data);
|
||||
}
|
||||
|
||||
LOG.debug("doFetchOneFullChunkData url:{} chunkData.length:{}", url, data.length);
|
||||
|
||||
return data;
|
||||
|
||||
}
|
||||
@ -120,29 +187,40 @@ public class SeaweedRead {
|
||||
|
||||
long stop = offset + size;
|
||||
for (VisibleInterval chunk : visibleIntervals) {
|
||||
if (chunk.start <= offset && offset < chunk.stop && offset < stop) {
|
||||
long chunkStart = Math.max(offset, chunk.start);
|
||||
long chunkStop = Math.min(stop, chunk.stop);
|
||||
if (chunkStart < chunkStop) {
|
||||
boolean isFullChunk = chunk.isFullChunk && chunk.start == offset && chunk.stop <= stop;
|
||||
views.add(new ChunkView(
|
||||
chunk.fileId,
|
||||
offset - chunk.start,
|
||||
Math.min(chunk.stop, stop) - offset,
|
||||
offset,
|
||||
chunkStart - chunk.start + chunk.chunkOffset,
|
||||
chunkStop - chunkStart,
|
||||
chunkStart,
|
||||
isFullChunk,
|
||||
chunk.cipherKey,
|
||||
chunk.isGzipped
|
||||
chunk.isCompressed
|
||||
));
|
||||
offset = Math.min(chunk.stop, stop);
|
||||
}
|
||||
}
|
||||
return views;
|
||||
}
|
||||
|
||||
public static List<VisibleInterval> nonOverlappingVisibleIntervals(List<FilerProto.FileChunk> chunkList) {
|
||||
public static List<VisibleInterval> nonOverlappingVisibleIntervals(
|
||||
final FilerGrpcClient filerGrpcClient, List<FilerProto.FileChunk> chunkList) throws IOException {
|
||||
|
||||
chunkList = FileChunkManifest.resolveChunkManifest(filerGrpcClient, chunkList);
|
||||
|
||||
FilerProto.FileChunk[] chunks = chunkList.toArray(new FilerProto.FileChunk[0]);
|
||||
Arrays.sort(chunks, new Comparator<FilerProto.FileChunk>() {
|
||||
@Override
|
||||
public int compare(FilerProto.FileChunk a, FilerProto.FileChunk b) {
|
||||
return (int) (a.getMtime() - b.getMtime());
|
||||
// if just a.getMtime() - b.getMtime(), it will overflow!
|
||||
if (a.getMtime() < b.getMtime()) {
|
||||
return -1;
|
||||
} else if (a.getMtime() > b.getMtime()) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
});
|
||||
|
||||
@ -163,9 +241,10 @@ public class SeaweedRead {
|
||||
chunk.getOffset() + chunk.getSize(),
|
||||
chunk.getFileId(),
|
||||
chunk.getMtime(),
|
||||
0,
|
||||
true,
|
||||
chunk.getCipherKey().toByteArray(),
|
||||
chunk.getIsGzipped()
|
||||
chunk.getIsCompressed()
|
||||
);
|
||||
|
||||
// easy cases to speed up
|
||||
@ -185,9 +264,10 @@ public class SeaweedRead {
|
||||
chunk.getOffset(),
|
||||
v.fileId,
|
||||
v.modifiedTime,
|
||||
v.chunkOffset,
|
||||
false,
|
||||
v.cipherKey,
|
||||
v.isGzipped
|
||||
v.isCompressed
|
||||
));
|
||||
}
|
||||
long chunkStop = chunk.getOffset() + chunk.getSize();
|
||||
@ -197,9 +277,10 @@ public class SeaweedRead {
|
||||
v.stop,
|
||||
v.fileId,
|
||||
v.modifiedTime,
|
||||
v.chunkOffset + (chunkStop - v.start),
|
||||
false,
|
||||
v.cipherKey,
|
||||
v.isGzipped
|
||||
v.isCompressed
|
||||
));
|
||||
}
|
||||
if (chunkStop <= v.start || v.stop <= chunk.getOffset()) {
|
||||
@ -229,6 +310,10 @@ public class SeaweedRead {
|
||||
return fileId;
|
||||
}
|
||||
|
||||
public static long fileSize(FilerProto.Entry entry) {
|
||||
return Math.max(totalSize(entry.getChunksList()), entry.getAttributes().getFileSize());
|
||||
}
|
||||
|
||||
public static long totalSize(List<FilerProto.FileChunk> chunksList) {
|
||||
long size = 0;
|
||||
for (FilerProto.FileChunk chunk : chunksList) {
|
||||
@ -245,18 +330,20 @@ public class SeaweedRead {
|
||||
public final long stop;
|
||||
public final long modifiedTime;
|
||||
public final String fileId;
|
||||
public final long chunkOffset;
|
||||
public final boolean isFullChunk;
|
||||
public final byte[] cipherKey;
|
||||
public final boolean isGzipped;
|
||||
public final boolean isCompressed;
|
||||
|
||||
public VisibleInterval(long start, long stop, String fileId, long modifiedTime, boolean isFullChunk, byte[] cipherKey, boolean isGzipped) {
|
||||
public VisibleInterval(long start, long stop, String fileId, long modifiedTime, long chunkOffset, boolean isFullChunk, byte[] cipherKey, boolean isCompressed) {
|
||||
this.start = start;
|
||||
this.stop = stop;
|
||||
this.modifiedTime = modifiedTime;
|
||||
this.fileId = fileId;
|
||||
this.chunkOffset = chunkOffset;
|
||||
this.isFullChunk = isFullChunk;
|
||||
this.cipherKey = cipherKey;
|
||||
this.isGzipped = isGzipped;
|
||||
this.isCompressed = isCompressed;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -268,7 +355,7 @@ public class SeaweedRead {
|
||||
", fileId='" + fileId + '\'' +
|
||||
", isFullChunk=" + isFullChunk +
|
||||
", cipherKey=" + Arrays.toString(cipherKey) +
|
||||
", isGzipped=" + isGzipped +
|
||||
", isCompressed=" + isCompressed +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
@ -280,16 +367,16 @@ public class SeaweedRead {
|
||||
public final long logicOffset;
|
||||
public final boolean isFullChunk;
|
||||
public final byte[] cipherKey;
|
||||
public final boolean isGzipped;
|
||||
public final boolean isCompressed;
|
||||
|
||||
public ChunkView(String fileId, long offset, long size, long logicOffset, boolean isFullChunk, byte[] cipherKey, boolean isGzipped) {
|
||||
public ChunkView(String fileId, long offset, long size, long logicOffset, boolean isFullChunk, byte[] cipherKey, boolean isCompressed) {
|
||||
this.fileId = fileId;
|
||||
this.offset = offset;
|
||||
this.size = size;
|
||||
this.logicOffset = logicOffset;
|
||||
this.isFullChunk = isFullChunk;
|
||||
this.cipherKey = cipherKey;
|
||||
this.isGzipped = isGzipped;
|
||||
this.isCompressed = isCompressed;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -301,7 +388,7 @@ public class SeaweedRead {
|
||||
", logicOffset=" + logicOffset +
|
||||
", isFullChunk=" + isFullChunk +
|
||||
", cipherKey=" + Arrays.toString(cipherKey) +
|
||||
", isGzipped=" + isGzipped +
|
||||
", isCompressed=" + isCompressed +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,30 @@
|
||||
package seaweedfs.client;
|
||||
|
||||
import org.apache.http.impl.DefaultConnectionReuseStrategy;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.impl.client.DefaultConnectionKeepAliveStrategy;
|
||||
import org.apache.http.impl.client.HttpClientBuilder;
|
||||
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
|
||||
|
||||
public class SeaweedUtil {
|
||||
|
||||
static PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager();
|
||||
static CloseableHttpClient httpClient;
|
||||
|
||||
static {
|
||||
// Increase max total connection to 200
|
||||
cm.setMaxTotal(200);
|
||||
// Increase default max connection per route to 20
|
||||
cm.setDefaultMaxPerRoute(20);
|
||||
|
||||
httpClient = HttpClientBuilder.create()
|
||||
.setConnectionManager(cm)
|
||||
.setConnectionReuseStrategy(DefaultConnectionReuseStrategy.INSTANCE)
|
||||
.setKeepAliveStrategy(DefaultConnectionKeepAliveStrategy.INSTANCE)
|
||||
.build();
|
||||
}
|
||||
|
||||
public static CloseableHttpClient getClosableHttpClient() {
|
||||
return httpClient;
|
||||
}
|
||||
}
|
@ -1,35 +1,54 @@
|
||||
package seaweedfs.client;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.client.HttpClient;
|
||||
import org.apache.http.client.methods.CloseableHttpResponse;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.entity.mime.HttpMultipartMode;
|
||||
import org.apache.http.entity.mime.MultipartEntityBuilder;
|
||||
import org.apache.http.impl.client.DefaultHttpClient;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.security.SecureRandom;
|
||||
import java.util.List;
|
||||
|
||||
public class SeaweedWrite {
|
||||
|
||||
private static SecureRandom random = new SecureRandom();
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedWrite.class);
|
||||
|
||||
private static final SecureRandom random = new SecureRandom();
|
||||
|
||||
public static void writeData(FilerProto.Entry.Builder entry,
|
||||
final String replication,
|
||||
final FilerGrpcClient filerGrpcClient,
|
||||
final long offset,
|
||||
final byte[] bytes,
|
||||
final long bytesOffset, final long bytesLength) throws IOException {
|
||||
final long bytesOffset, final long bytesLength,
|
||||
final String path) throws IOException {
|
||||
FilerProto.FileChunk.Builder chunkBuilder = writeChunk(
|
||||
replication, filerGrpcClient, offset, bytes, bytesOffset, bytesLength, path);
|
||||
synchronized (entry) {
|
||||
entry.addChunks(chunkBuilder);
|
||||
}
|
||||
}
|
||||
|
||||
public static FilerProto.FileChunk.Builder writeChunk(final String replication,
|
||||
final FilerGrpcClient filerGrpcClient,
|
||||
final long offset,
|
||||
final byte[] bytes,
|
||||
final long bytesOffset,
|
||||
final long bytesLength,
|
||||
final String path) throws IOException {
|
||||
FilerProto.AssignVolumeResponse response = filerGrpcClient.getBlockingStub().assignVolume(
|
||||
FilerProto.AssignVolumeRequest.newBuilder()
|
||||
.setCollection(filerGrpcClient.getCollection())
|
||||
.setReplication(replication == null ? filerGrpcClient.getReplication() : replication)
|
||||
.setDataCenter("")
|
||||
.setTtlSec(0)
|
||||
.setPath(path)
|
||||
.build());
|
||||
String fileId = response.getFileId();
|
||||
String url = response.getUrl();
|
||||
@ -45,28 +64,32 @@ public class SeaweedWrite {
|
||||
|
||||
String etag = multipartUpload(targetUrl, auth, bytes, bytesOffset, bytesLength, cipherKey);
|
||||
|
||||
// cache fileId ~ bytes
|
||||
SeaweedRead.chunkCache.setChunk(fileId, bytes);
|
||||
LOG.debug("write file chunk {} size {}", targetUrl, bytesLength);
|
||||
|
||||
entry.addChunks(FilerProto.FileChunk.newBuilder()
|
||||
return FilerProto.FileChunk.newBuilder()
|
||||
.setFileId(fileId)
|
||||
.setOffset(offset)
|
||||
.setSize(bytesLength)
|
||||
.setMtime(System.currentTimeMillis() / 10000L)
|
||||
.setETag(etag)
|
||||
.setCipherKey(cipherKeyString)
|
||||
);
|
||||
|
||||
.setCipherKey(cipherKeyString);
|
||||
}
|
||||
|
||||
public static void writeMeta(final FilerGrpcClient filerGrpcClient,
|
||||
final String parentDirectory, final FilerProto.Entry.Builder entry) {
|
||||
filerGrpcClient.getBlockingStub().createEntry(
|
||||
FilerProto.CreateEntryRequest.newBuilder()
|
||||
.setDirectory(parentDirectory)
|
||||
.setEntry(entry)
|
||||
.build()
|
||||
);
|
||||
final String parentDirectory,
|
||||
final FilerProto.Entry.Builder entry) throws IOException {
|
||||
|
||||
synchronized (entry) {
|
||||
List<FilerProto.FileChunk> chunks = FileChunkManifest.maybeManifestize(filerGrpcClient, entry.getChunksList(), parentDirectory);
|
||||
entry.clearChunks();
|
||||
entry.addAllChunks(chunks);
|
||||
filerGrpcClient.getBlockingStub().createEntry(
|
||||
FilerProto.CreateEntryRequest.newBuilder()
|
||||
.setDirectory(parentDirectory)
|
||||
.setEntry(entry)
|
||||
.build()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private static String multipartUpload(String targetUrl,
|
||||
@ -75,8 +98,6 @@ public class SeaweedWrite {
|
||||
final long bytesOffset, final long bytesLength,
|
||||
byte[] cipherKey) throws IOException {
|
||||
|
||||
HttpClient client = new DefaultHttpClient();
|
||||
|
||||
InputStream inputStream = null;
|
||||
if (cipherKey == null || cipherKey.length == 0) {
|
||||
inputStream = new ByteArrayInputStream(bytes, (int) bytesOffset, (int) bytesLength);
|
||||
@ -99,8 +120,9 @@ public class SeaweedWrite {
|
||||
.addBinaryBody("upload", inputStream)
|
||||
.build());
|
||||
|
||||
CloseableHttpResponse response = SeaweedUtil.getClosableHttpClient().execute(post);
|
||||
|
||||
try {
|
||||
HttpResponse response = client.execute(post);
|
||||
|
||||
String etag = response.getLastHeader("ETag").getValue();
|
||||
|
||||
@ -108,12 +130,12 @@ public class SeaweedWrite {
|
||||
etag = etag.substring(1, etag.length() - 1);
|
||||
}
|
||||
|
||||
EntityUtils.consume(response.getEntity());
|
||||
|
||||
return etag;
|
||||
} finally {
|
||||
if (client instanceof Closeable) {
|
||||
Closeable t = (Closeable) client;
|
||||
t.close();
|
||||
}
|
||||
response.close();
|
||||
post.releaseConnection();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ syntax = "proto3";
|
||||
|
||||
package filer_pb;
|
||||
|
||||
option go_package = "github.com/chrislusf/seaweedfs/weed/pb/filer_pb";
|
||||
option java_package = "seaweedfs.client";
|
||||
option java_outer_classname = "FilerProto";
|
||||
|
||||
@ -36,6 +37,9 @@ service SeaweedFiler {
|
||||
rpc LookupVolume (LookupVolumeRequest) returns (LookupVolumeResponse) {
|
||||
}
|
||||
|
||||
rpc CollectionList (CollectionListRequest) returns (CollectionListResponse) {
|
||||
}
|
||||
|
||||
rpc DeleteCollection (DeleteCollectionRequest) returns (DeleteCollectionResponse) {
|
||||
}
|
||||
|
||||
@ -48,12 +52,21 @@ service SeaweedFiler {
|
||||
rpc SubscribeMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) {
|
||||
}
|
||||
|
||||
rpc SubscribeLocalMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) {
|
||||
}
|
||||
|
||||
rpc KeepConnected (stream KeepConnectedRequest) returns (stream KeepConnectedResponse) {
|
||||
}
|
||||
|
||||
rpc LocateBroker (LocateBrokerRequest) returns (LocateBrokerResponse) {
|
||||
}
|
||||
|
||||
rpc KvGet (KvGetRequest) returns (KvGetResponse) {
|
||||
}
|
||||
|
||||
rpc KvPut (KvPutRequest) returns (KvPutResponse) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
@ -85,6 +98,8 @@ message Entry {
|
||||
repeated FileChunk chunks = 3;
|
||||
FuseAttributes attributes = 4;
|
||||
map<string, bytes> extended = 5;
|
||||
bytes hard_link_id = 7;
|
||||
int32 hard_link_counter = 8; // only exists in hard link meta data
|
||||
}
|
||||
|
||||
message FullEntry {
|
||||
@ -97,6 +112,8 @@ message EventNotification {
|
||||
Entry new_entry = 2;
|
||||
bool delete_chunks = 3;
|
||||
string new_parent_path = 4;
|
||||
bool is_from_other_cluster = 5;
|
||||
repeated int32 signatures = 6;
|
||||
}
|
||||
|
||||
message FileChunk {
|
||||
@ -109,7 +126,12 @@ message FileChunk {
|
||||
FileId fid = 7;
|
||||
FileId source_fid = 8;
|
||||
bytes cipher_key = 9;
|
||||
bool is_gzipped = 10;
|
||||
bool is_compressed = 10;
|
||||
bool is_chunk_manifest = 11; // content is a list of FileChunks
|
||||
}
|
||||
|
||||
message FileChunkManifest {
|
||||
repeated FileChunk chunks = 1;
|
||||
}
|
||||
|
||||
message FileId {
|
||||
@ -139,6 +161,8 @@ message CreateEntryRequest {
|
||||
string directory = 1;
|
||||
Entry entry = 2;
|
||||
bool o_excl = 3;
|
||||
bool is_from_other_cluster = 4;
|
||||
repeated int32 signatures = 5;
|
||||
}
|
||||
|
||||
message CreateEntryResponse {
|
||||
@ -148,6 +172,8 @@ message CreateEntryResponse {
|
||||
message UpdateEntryRequest {
|
||||
string directory = 1;
|
||||
Entry entry = 2;
|
||||
bool is_from_other_cluster = 3;
|
||||
repeated int32 signatures = 4;
|
||||
}
|
||||
message UpdateEntryResponse {
|
||||
}
|
||||
@ -167,6 +193,8 @@ message DeleteEntryRequest {
|
||||
bool is_delete_data = 4;
|
||||
bool is_recursive = 5;
|
||||
bool ignore_recursive_error = 6;
|
||||
bool is_from_other_cluster = 7;
|
||||
repeated int32 signatures = 8;
|
||||
}
|
||||
|
||||
message DeleteEntryResponse {
|
||||
@ -189,7 +217,8 @@ message AssignVolumeRequest {
|
||||
string replication = 3;
|
||||
int32 ttl_sec = 4;
|
||||
string data_center = 5;
|
||||
string parent_path = 6;
|
||||
string path = 6;
|
||||
string rack = 7;
|
||||
}
|
||||
|
||||
message AssignVolumeResponse {
|
||||
@ -219,6 +248,16 @@ message LookupVolumeResponse {
|
||||
map<string, Locations> locations_map = 1;
|
||||
}
|
||||
|
||||
message Collection {
|
||||
string name = 1;
|
||||
}
|
||||
message CollectionListRequest {
|
||||
bool include_normal_volumes = 1;
|
||||
bool include_ec_volumes = 2;
|
||||
}
|
||||
message CollectionListResponse {
|
||||
repeated Collection collections = 1;
|
||||
}
|
||||
message DeleteCollectionRequest {
|
||||
string collection = 1;
|
||||
}
|
||||
@ -249,12 +288,16 @@ message GetFilerConfigurationResponse {
|
||||
uint32 max_mb = 4;
|
||||
string dir_buckets = 5;
|
||||
bool cipher = 7;
|
||||
int32 signature = 8;
|
||||
string metrics_address = 9;
|
||||
int32 metrics_interval_sec = 10;
|
||||
}
|
||||
|
||||
message SubscribeMetadataRequest {
|
||||
string client_name = 1;
|
||||
string path_prefix = 2;
|
||||
int64 since_ns = 3;
|
||||
int32 signature = 4;
|
||||
}
|
||||
message SubscribeMetadataResponse {
|
||||
string directory = 1;
|
||||
@ -289,3 +332,19 @@ message LocateBrokerResponse {
|
||||
}
|
||||
repeated Resource resources = 2;
|
||||
}
|
||||
|
||||
// Key-Value operations
|
||||
message KvGetRequest {
|
||||
bytes key = 1;
|
||||
}
|
||||
message KvGetResponse {
|
||||
bytes value = 1;
|
||||
string error = 2;
|
||||
}
|
||||
message KvPutRequest {
|
||||
bytes key = 1;
|
||||
bytes value = 2;
|
||||
}
|
||||
message KvPutResponse {
|
||||
string error = 1;
|
||||
}
|
||||
|
@ -3,13 +3,14 @@ package seaweedfs.client;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public class SeaweedReadTest {
|
||||
|
||||
@Test
|
||||
public void testNonOverlappingVisibleIntervals() {
|
||||
public void testNonOverlappingVisibleIntervals() throws IOException {
|
||||
List<FilerProto.FileChunk> chunks = new ArrayList<>();
|
||||
chunks.add(FilerProto.FileChunk.newBuilder()
|
||||
.setFileId("aaa")
|
||||
@ -24,7 +25,7 @@ public class SeaweedReadTest {
|
||||
.setMtime(2000)
|
||||
.build());
|
||||
|
||||
List<SeaweedRead.VisibleInterval> visibleIntervals = SeaweedRead.nonOverlappingVisibleIntervals(chunks);
|
||||
List<SeaweedRead.VisibleInterval> visibleIntervals = SeaweedRead.nonOverlappingVisibleIntervals(null, chunks);
|
||||
for (SeaweedRead.VisibleInterval visibleInterval : visibleIntervals) {
|
||||
System.out.println("visible:" + visibleInterval);
|
||||
}
|
||||
|
@ -15,8 +15,8 @@
|
||||
<plugin>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<configuration>
|
||||
<source>7</source>
|
||||
<target>7</target>
|
||||
<source>8</source>
|
||||
<target>8</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
@ -120,6 +120,180 @@
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-client</artifactId>
|
||||
<version>2.9.2</version>
|
||||
<scope>provided</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>hadoop-hdfs-client</artifactId>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>hadoop-mapreduce-client-app</artifactId>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>hadoop-yarn-api</artifactId>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>hadoop-mapreduce-client-core</artifactId>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>hadoop-mapreduce-client-jobclient</artifactId>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>hadoop-annotations</artifactId>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-common</artifactId>
|
||||
<version>2.9.2</version>
|
||||
<scope>provided</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>commons-cli</artifactId>
|
||||
<groupId>commons-cli</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>commons-math3</artifactId>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>xmlenc</artifactId>
|
||||
<groupId>xmlenc</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>commons-io</artifactId>
|
||||
<groupId>commons-io</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>commons-net</artifactId>
|
||||
<groupId>commons-net</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>commons-collections</artifactId>
|
||||
<groupId>commons-collections</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>servlet-api</artifactId>
|
||||
<groupId>javax.servlet</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>jetty</artifactId>
|
||||
<groupId>org.mortbay.jetty</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>jetty-util</artifactId>
|
||||
<groupId>org.mortbay.jetty</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>jetty-sslengine</artifactId>
|
||||
<groupId>org.mortbay.jetty</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>jsp-api</artifactId>
|
||||
<groupId>javax.servlet.jsp</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>jersey-core</artifactId>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>jersey-json</artifactId>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>jersey-server</artifactId>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>log4j</artifactId>
|
||||
<groupId>log4j</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>jets3t</artifactId>
|
||||
<groupId>net.java.dev.jets3t</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>commons-lang</artifactId>
|
||||
<groupId>commons-lang</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>commons-configuration</artifactId>
|
||||
<groupId>commons-configuration</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
<groupId>org.slf4j</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>jackson-core-asl</artifactId>
|
||||
<groupId>org.codehaus.jackson</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>jackson-mapper-asl</artifactId>
|
||||
<groupId>org.codehaus.jackson</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>avro</artifactId>
|
||||
<groupId>org.apache.avro</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>hadoop-auth</artifactId>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>jsch</artifactId>
|
||||
<groupId>com.jcraft</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>curator-client</artifactId>
|
||||
<groupId>org.apache.curator</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>curator-recipes</artifactId>
|
||||
<groupId>org.apache.curator</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>htrace-core4</artifactId>
|
||||
<groupId>org.apache.htrace</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>zookeeper</artifactId>
|
||||
<groupId>org.apache.zookeeper</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>commons-compress</artifactId>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>stax2-api</artifactId>
|
||||
<groupId>org.codehaus.woodstox</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>woodstox-core</artifactId>
|
||||
<groupId>com.fasterxml.woodstox</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>hadoop-annotations</artifactId>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<distributionManagement>
|
||||
<snapshotRepository>
|
||||
<id>ossrh</id>
|
||||
@ -127,7 +301,7 @@
|
||||
</snapshotRepository>
|
||||
</distributionManagement>
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.2.8</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.5.2</seaweedfs.client.version>
|
||||
<hadoop.version>2.9.2</hadoop.version>
|
||||
</properties>
|
||||
</project>
|
||||
|
@ -5,7 +5,7 @@
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.2.8</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.5.2</seaweedfs.client.version>
|
||||
<hadoop.version>2.9.2</hadoop.version>
|
||||
</properties>
|
||||
|
||||
@ -31,8 +31,8 @@
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<configuration>
|
||||
<source>7</source>
|
||||
<target>7</target>
|
||||
<source>8</source>
|
||||
<target>8</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
@ -147,6 +147,7 @@
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-client</artifactId>
|
||||
<version>${hadoop.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
@ -157,6 +158,7 @@
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-common</artifactId>
|
||||
<version>${hadoop.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
|
@ -1,137 +0,0 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweed.hdfs;
|
||||
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
class ReadBuffer {
|
||||
|
||||
private SeaweedInputStream stream;
|
||||
private long offset; // offset within the file for the buffer
|
||||
private int length; // actual length, set after the buffer is filles
|
||||
private int requestedLength; // requested length of the read
|
||||
private byte[] buffer; // the buffer itself
|
||||
private int bufferindex = -1; // index in the buffers array in Buffer manager
|
||||
private ReadBufferStatus status; // status of the buffer
|
||||
private CountDownLatch latch = null; // signaled when the buffer is done reading, so any client
|
||||
// waiting on this buffer gets unblocked
|
||||
|
||||
// fields to help with eviction logic
|
||||
private long timeStamp = 0; // tick at which buffer became available to read
|
||||
private boolean isFirstByteConsumed = false;
|
||||
private boolean isLastByteConsumed = false;
|
||||
private boolean isAnyByteConsumed = false;
|
||||
|
||||
public SeaweedInputStream getStream() {
|
||||
return stream;
|
||||
}
|
||||
|
||||
public void setStream(SeaweedInputStream stream) {
|
||||
this.stream = stream;
|
||||
}
|
||||
|
||||
public long getOffset() {
|
||||
return offset;
|
||||
}
|
||||
|
||||
public void setOffset(long offset) {
|
||||
this.offset = offset;
|
||||
}
|
||||
|
||||
public int getLength() {
|
||||
return length;
|
||||
}
|
||||
|
||||
public void setLength(int length) {
|
||||
this.length = length;
|
||||
}
|
||||
|
||||
public int getRequestedLength() {
|
||||
return requestedLength;
|
||||
}
|
||||
|
||||
public void setRequestedLength(int requestedLength) {
|
||||
this.requestedLength = requestedLength;
|
||||
}
|
||||
|
||||
public byte[] getBuffer() {
|
||||
return buffer;
|
||||
}
|
||||
|
||||
public void setBuffer(byte[] buffer) {
|
||||
this.buffer = buffer;
|
||||
}
|
||||
|
||||
public int getBufferindex() {
|
||||
return bufferindex;
|
||||
}
|
||||
|
||||
public void setBufferindex(int bufferindex) {
|
||||
this.bufferindex = bufferindex;
|
||||
}
|
||||
|
||||
public ReadBufferStatus getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public void setStatus(ReadBufferStatus status) {
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public CountDownLatch getLatch() {
|
||||
return latch;
|
||||
}
|
||||
|
||||
public void setLatch(CountDownLatch latch) {
|
||||
this.latch = latch;
|
||||
}
|
||||
|
||||
public long getTimeStamp() {
|
||||
return timeStamp;
|
||||
}
|
||||
|
||||
public void setTimeStamp(long timeStamp) {
|
||||
this.timeStamp = timeStamp;
|
||||
}
|
||||
|
||||
public boolean isFirstByteConsumed() {
|
||||
return isFirstByteConsumed;
|
||||
}
|
||||
|
||||
public void setFirstByteConsumed(boolean isFirstByteConsumed) {
|
||||
this.isFirstByteConsumed = isFirstByteConsumed;
|
||||
}
|
||||
|
||||
public boolean isLastByteConsumed() {
|
||||
return isLastByteConsumed;
|
||||
}
|
||||
|
||||
public void setLastByteConsumed(boolean isLastByteConsumed) {
|
||||
this.isLastByteConsumed = isLastByteConsumed;
|
||||
}
|
||||
|
||||
public boolean isAnyByteConsumed() {
|
||||
return isAnyByteConsumed;
|
||||
}
|
||||
|
||||
public void setAnyByteConsumed(boolean isAnyByteConsumed) {
|
||||
this.isAnyByteConsumed = isAnyByteConsumed;
|
||||
}
|
||||
|
||||
}
|
@ -1,394 +0,0 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package seaweed.hdfs;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.LinkedList;
|
||||
import java.util.Queue;
|
||||
import java.util.Stack;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
/**
|
||||
* The Read Buffer Manager for Rest AbfsClient.
|
||||
*/
|
||||
final class ReadBufferManager {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(ReadBufferManager.class);
|
||||
|
||||
private static final int NUM_BUFFERS = 16;
|
||||
private static final int BLOCK_SIZE = 4 * 1024 * 1024;
|
||||
private static final int NUM_THREADS = 8;
|
||||
private static final int THRESHOLD_AGE_MILLISECONDS = 3000; // have to see if 3 seconds is a good threshold
|
||||
|
||||
private Thread[] threads = new Thread[NUM_THREADS];
|
||||
private byte[][] buffers; // array of byte[] buffers, to hold the data that is read
|
||||
private Stack<Integer> freeList = new Stack<>(); // indices in buffers[] array that are available
|
||||
|
||||
private Queue<ReadBuffer> readAheadQueue = new LinkedList<>(); // queue of requests that are not picked up by any worker thread yet
|
||||
private LinkedList<ReadBuffer> inProgressList = new LinkedList<>(); // requests being processed by worker threads
|
||||
private LinkedList<ReadBuffer> completedReadList = new LinkedList<>(); // buffers available for reading
|
||||
private static final ReadBufferManager BUFFER_MANAGER; // singleton, initialized in static initialization block
|
||||
|
||||
static {
|
||||
BUFFER_MANAGER = new ReadBufferManager();
|
||||
BUFFER_MANAGER.init();
|
||||
}
|
||||
|
||||
static ReadBufferManager getBufferManager() {
|
||||
return BUFFER_MANAGER;
|
||||
}
|
||||
|
||||
private void init() {
|
||||
buffers = new byte[NUM_BUFFERS][];
|
||||
for (int i = 0; i < NUM_BUFFERS; i++) {
|
||||
buffers[i] = new byte[BLOCK_SIZE]; // same buffers are reused. The byte array never goes back to GC
|
||||
freeList.add(i);
|
||||
}
|
||||
for (int i = 0; i < NUM_THREADS; i++) {
|
||||
Thread t = new Thread(new ReadBufferWorker(i));
|
||||
t.setDaemon(true);
|
||||
threads[i] = t;
|
||||
t.setName("SeaweedFS-prefetch-" + i);
|
||||
t.start();
|
||||
}
|
||||
ReadBufferWorker.UNLEASH_WORKERS.countDown();
|
||||
}
|
||||
|
||||
// hide instance constructor
|
||||
private ReadBufferManager() {
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
*
|
||||
* SeaweedInputStream-facing methods
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
/**
|
||||
* {@link SeaweedInputStream} calls this method to queue read-aheads.
|
||||
*
|
||||
* @param stream The {@link SeaweedInputStream} for which to do the read-ahead
|
||||
* @param requestedOffset The offset in the file which shoukd be read
|
||||
* @param requestedLength The length to read
|
||||
*/
|
||||
void queueReadAhead(final SeaweedInputStream stream, final long requestedOffset, final int requestedLength) {
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("Start Queueing readAhead for {} offset {} length {}",
|
||||
stream.getPath(), requestedOffset, requestedLength);
|
||||
}
|
||||
ReadBuffer buffer;
|
||||
synchronized (this) {
|
||||
if (isAlreadyQueued(stream, requestedOffset)) {
|
||||
return; // already queued, do not queue again
|
||||
}
|
||||
if (freeList.isEmpty() && !tryEvict()) {
|
||||
return; // no buffers available, cannot queue anything
|
||||
}
|
||||
|
||||
buffer = new ReadBuffer();
|
||||
buffer.setStream(stream);
|
||||
buffer.setOffset(requestedOffset);
|
||||
buffer.setLength(0);
|
||||
buffer.setRequestedLength(requestedLength);
|
||||
buffer.setStatus(ReadBufferStatus.NOT_AVAILABLE);
|
||||
buffer.setLatch(new CountDownLatch(1));
|
||||
|
||||
Integer bufferIndex = freeList.pop(); // will return a value, since we have checked size > 0 already
|
||||
|
||||
buffer.setBuffer(buffers[bufferIndex]);
|
||||
buffer.setBufferindex(bufferIndex);
|
||||
readAheadQueue.add(buffer);
|
||||
notifyAll();
|
||||
}
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("Done q-ing readAhead for file {} offset {} buffer idx {}",
|
||||
stream.getPath(), requestedOffset, buffer.getBufferindex());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* {@link SeaweedInputStream} calls this method read any bytes already available in a buffer (thereby saving a
|
||||
* remote read). This returns the bytes if the data already exists in buffer. If there is a buffer that is reading
|
||||
* the requested offset, then this method blocks until that read completes. If the data is queued in a read-ahead
|
||||
* but not picked up by a worker thread yet, then it cancels that read-ahead and reports cache miss. This is because
|
||||
* depending on worker thread availability, the read-ahead may take a while - the calling thread can do it's own
|
||||
* read to get the data faster (copmared to the read waiting in queue for an indeterminate amount of time).
|
||||
*
|
||||
* @param stream the file to read bytes for
|
||||
* @param position the offset in the file to do a read for
|
||||
* @param length the length to read
|
||||
* @param buffer the buffer to read data into. Note that the buffer will be written into from offset 0.
|
||||
* @return the number of bytes read
|
||||
*/
|
||||
int getBlock(final SeaweedInputStream stream, final long position, final int length, final byte[] buffer) {
|
||||
// not synchronized, so have to be careful with locking
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("getBlock for file {} position {} thread {}",
|
||||
stream.getPath(), position, Thread.currentThread().getName());
|
||||
}
|
||||
|
||||
waitForProcess(stream, position);
|
||||
|
||||
int bytesRead = 0;
|
||||
synchronized (this) {
|
||||
bytesRead = getBlockFromCompletedQueue(stream, position, length, buffer);
|
||||
}
|
||||
if (bytesRead > 0) {
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("Done read from Cache for {} position {} length {}",
|
||||
stream.getPath(), position, bytesRead);
|
||||
}
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
// otherwise, just say we got nothing - calling thread can do its own read
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
* Internal methods
|
||||
*
|
||||
*/
|
||||
|
||||
private void waitForProcess(final SeaweedInputStream stream, final long position) {
|
||||
ReadBuffer readBuf;
|
||||
synchronized (this) {
|
||||
clearFromReadAheadQueue(stream, position);
|
||||
readBuf = getFromList(inProgressList, stream, position);
|
||||
}
|
||||
if (readBuf != null) { // if in in-progress queue, then block for it
|
||||
try {
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("got a relevant read buffer for file {} offset {} buffer idx {}",
|
||||
stream.getPath(), readBuf.getOffset(), readBuf.getBufferindex());
|
||||
}
|
||||
readBuf.getLatch().await(); // blocking wait on the caller stream's thread
|
||||
// Note on correctness: readBuf gets out of inProgressList only in 1 place: after worker thread
|
||||
// is done processing it (in doneReading). There, the latch is set after removing the buffer from
|
||||
// inProgressList. So this latch is safe to be outside the synchronized block.
|
||||
// Putting it in synchronized would result in a deadlock, since this thread would be holding the lock
|
||||
// while waiting, so no one will be able to change any state. If this becomes more complex in the future,
|
||||
// then the latch cane be removed and replaced with wait/notify whenever inProgressList is touched.
|
||||
} catch (InterruptedException ex) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("latch done for file {} buffer idx {} length {}",
|
||||
stream.getPath(), readBuf.getBufferindex(), readBuf.getLength());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If any buffer in the completedlist can be reclaimed then reclaim it and return the buffer to free list.
|
||||
* The objective is to find just one buffer - there is no advantage to evicting more than one.
|
||||
*
|
||||
* @return whether the eviction succeeeded - i.e., were we able to free up one buffer
|
||||
*/
|
||||
private synchronized boolean tryEvict() {
|
||||
ReadBuffer nodeToEvict = null;
|
||||
if (completedReadList.size() <= 0) {
|
||||
return false; // there are no evict-able buffers
|
||||
}
|
||||
|
||||
// first, try buffers where all bytes have been consumed (approximated as first and last bytes consumed)
|
||||
for (ReadBuffer buf : completedReadList) {
|
||||
if (buf.isFirstByteConsumed() && buf.isLastByteConsumed()) {
|
||||
nodeToEvict = buf;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (nodeToEvict != null) {
|
||||
return evict(nodeToEvict);
|
||||
}
|
||||
|
||||
// next, try buffers where any bytes have been consumed (may be a bad idea? have to experiment and see)
|
||||
for (ReadBuffer buf : completedReadList) {
|
||||
if (buf.isAnyByteConsumed()) {
|
||||
nodeToEvict = buf;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (nodeToEvict != null) {
|
||||
return evict(nodeToEvict);
|
||||
}
|
||||
|
||||
// next, try any old nodes that have not been consumed
|
||||
long earliestBirthday = Long.MAX_VALUE;
|
||||
for (ReadBuffer buf : completedReadList) {
|
||||
if (buf.getTimeStamp() < earliestBirthday) {
|
||||
nodeToEvict = buf;
|
||||
earliestBirthday = buf.getTimeStamp();
|
||||
}
|
||||
}
|
||||
if ((currentTimeMillis() - earliestBirthday > THRESHOLD_AGE_MILLISECONDS) && (nodeToEvict != null)) {
|
||||
return evict(nodeToEvict);
|
||||
}
|
||||
|
||||
// nothing can be evicted
|
||||
return false;
|
||||
}
|
||||
|
||||
private boolean evict(final ReadBuffer buf) {
|
||||
freeList.push(buf.getBufferindex());
|
||||
completedReadList.remove(buf);
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("Evicting buffer idx {}; was used for file {} offset {} length {}",
|
||||
buf.getBufferindex(), buf.getStream().getPath(), buf.getOffset(), buf.getLength());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private boolean isAlreadyQueued(final SeaweedInputStream stream, final long requestedOffset) {
|
||||
// returns true if any part of the buffer is already queued
|
||||
return (isInList(readAheadQueue, stream, requestedOffset)
|
||||
|| isInList(inProgressList, stream, requestedOffset)
|
||||
|| isInList(completedReadList, stream, requestedOffset));
|
||||
}
|
||||
|
||||
private boolean isInList(final Collection<ReadBuffer> list, final SeaweedInputStream stream, final long requestedOffset) {
|
||||
return (getFromList(list, stream, requestedOffset) != null);
|
||||
}
|
||||
|
||||
private ReadBuffer getFromList(final Collection<ReadBuffer> list, final SeaweedInputStream stream, final long requestedOffset) {
|
||||
for (ReadBuffer buffer : list) {
|
||||
if (buffer.getStream() == stream) {
|
||||
if (buffer.getStatus() == ReadBufferStatus.AVAILABLE
|
||||
&& requestedOffset >= buffer.getOffset()
|
||||
&& requestedOffset < buffer.getOffset() + buffer.getLength()) {
|
||||
return buffer;
|
||||
} else if (requestedOffset >= buffer.getOffset()
|
||||
&& requestedOffset < buffer.getOffset() + buffer.getRequestedLength()) {
|
||||
return buffer;
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private void clearFromReadAheadQueue(final SeaweedInputStream stream, final long requestedOffset) {
|
||||
ReadBuffer buffer = getFromList(readAheadQueue, stream, requestedOffset);
|
||||
if (buffer != null) {
|
||||
readAheadQueue.remove(buffer);
|
||||
notifyAll(); // lock is held in calling method
|
||||
freeList.push(buffer.getBufferindex());
|
||||
}
|
||||
}
|
||||
|
||||
private int getBlockFromCompletedQueue(final SeaweedInputStream stream, final long position, final int length,
|
||||
final byte[] buffer) {
|
||||
ReadBuffer buf = getFromList(completedReadList, stream, position);
|
||||
if (buf == null || position >= buf.getOffset() + buf.getLength()) {
|
||||
return 0;
|
||||
}
|
||||
int cursor = (int) (position - buf.getOffset());
|
||||
int availableLengthInBuffer = buf.getLength() - cursor;
|
||||
int lengthToCopy = Math.min(length, availableLengthInBuffer);
|
||||
System.arraycopy(buf.getBuffer(), cursor, buffer, 0, lengthToCopy);
|
||||
if (cursor == 0) {
|
||||
buf.setFirstByteConsumed(true);
|
||||
}
|
||||
if (cursor + lengthToCopy == buf.getLength()) {
|
||||
buf.setLastByteConsumed(true);
|
||||
}
|
||||
buf.setAnyByteConsumed(true);
|
||||
return lengthToCopy;
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
* ReadBufferWorker-thread-facing methods
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* ReadBufferWorker thread calls this to get the next buffer that it should work on.
|
||||
*
|
||||
* @return {@link ReadBuffer}
|
||||
* @throws InterruptedException if thread is interrupted
|
||||
*/
|
||||
ReadBuffer getNextBlockToRead() throws InterruptedException {
|
||||
ReadBuffer buffer = null;
|
||||
synchronized (this) {
|
||||
//buffer = readAheadQueue.take(); // blocking method
|
||||
while (readAheadQueue.size() == 0) {
|
||||
wait();
|
||||
}
|
||||
buffer = readAheadQueue.remove();
|
||||
notifyAll();
|
||||
if (buffer == null) {
|
||||
return null; // should never happen
|
||||
}
|
||||
buffer.setStatus(ReadBufferStatus.READING_IN_PROGRESS);
|
||||
inProgressList.add(buffer);
|
||||
}
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("ReadBufferWorker picked file {} for offset {}",
|
||||
buffer.getStream().getPath(), buffer.getOffset());
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
/**
|
||||
* ReadBufferWorker thread calls this method to post completion.
|
||||
*
|
||||
* @param buffer the buffer whose read was completed
|
||||
* @param result the {@link ReadBufferStatus} after the read operation in the worker thread
|
||||
* @param bytesActuallyRead the number of bytes that the worker thread was actually able to read
|
||||
*/
|
||||
void doneReading(final ReadBuffer buffer, final ReadBufferStatus result, final int bytesActuallyRead) {
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("ReadBufferWorker completed file {} for offset {} bytes {}",
|
||||
buffer.getStream().getPath(), buffer.getOffset(), bytesActuallyRead);
|
||||
}
|
||||
synchronized (this) {
|
||||
inProgressList.remove(buffer);
|
||||
if (result == ReadBufferStatus.AVAILABLE && bytesActuallyRead > 0) {
|
||||
buffer.setStatus(ReadBufferStatus.AVAILABLE);
|
||||
buffer.setTimeStamp(currentTimeMillis());
|
||||
buffer.setLength(bytesActuallyRead);
|
||||
completedReadList.add(buffer);
|
||||
} else {
|
||||
freeList.push(buffer.getBufferindex());
|
||||
// buffer should go out of scope after the end of the calling method in ReadBufferWorker, and eligible for GC
|
||||
}
|
||||
}
|
||||
//outside the synchronized, since anyone receiving a wake-up from the latch must see safe-published results
|
||||
buffer.getLatch().countDown(); // wake up waiting threads (if any)
|
||||
}
|
||||
|
||||
/**
|
||||
* Similar to System.currentTimeMillis, except implemented with System.nanoTime().
|
||||
* System.currentTimeMillis can go backwards when system clock is changed (e.g., with NTP time synchronization),
|
||||
* making it unsuitable for measuring time intervals. nanotime is strictly monotonically increasing per CPU core.
|
||||
* Note: it is not monotonic across Sockets, and even within a CPU, its only the
|
||||
* more recent parts which share a clock across all cores.
|
||||
*
|
||||
* @return current time in milliseconds
|
||||
*/
|
||||
private long currentTimeMillis() {
|
||||
return System.nanoTime() / 1000 / 1000;
|
||||
}
|
||||
}
|
@ -1,70 +0,0 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweed.hdfs;
|
||||
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
class ReadBufferWorker implements Runnable {
|
||||
|
||||
protected static final CountDownLatch UNLEASH_WORKERS = new CountDownLatch(1);
|
||||
private int id;
|
||||
|
||||
ReadBufferWorker(final int id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
/**
|
||||
* return the ID of ReadBufferWorker.
|
||||
*/
|
||||
public int getId() {
|
||||
return this.id;
|
||||
}
|
||||
|
||||
/**
|
||||
* Waits until a buffer becomes available in ReadAheadQueue.
|
||||
* Once a buffer becomes available, reads the file specified in it and then posts results back to buffer manager.
|
||||
* Rinse and repeat. Forever.
|
||||
*/
|
||||
public void run() {
|
||||
try {
|
||||
UNLEASH_WORKERS.await();
|
||||
} catch (InterruptedException ex) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
ReadBufferManager bufferManager = ReadBufferManager.getBufferManager();
|
||||
ReadBuffer buffer;
|
||||
while (true) {
|
||||
try {
|
||||
buffer = bufferManager.getNextBlockToRead(); // blocks, until a buffer is available for this thread
|
||||
} catch (InterruptedException ex) {
|
||||
Thread.currentThread().interrupt();
|
||||
return;
|
||||
}
|
||||
if (buffer != null) {
|
||||
try {
|
||||
// do the actual read, from the file.
|
||||
int bytesRead = buffer.getStream().readRemote(buffer.getOffset(), buffer.getBuffer(), 0, buffer.getRequestedLength());
|
||||
bufferManager.doneReading(buffer, ReadBufferStatus.AVAILABLE, bytesRead); // post result back to ReadBufferManager
|
||||
} catch (Exception ex) {
|
||||
bufferManager.doneReading(buffer, ReadBufferStatus.READ_FAILED, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -18,12 +18,18 @@
|
||||
|
||||
package seaweed.hdfs;
|
||||
|
||||
/**
|
||||
* The ReadBufferStatus for Rest AbfsClient
|
||||
*/
|
||||
public enum ReadBufferStatus {
|
||||
NOT_AVAILABLE, // buffers sitting in readaheadqueue have this stats
|
||||
READING_IN_PROGRESS, // reading is in progress on this buffer. Buffer should be in inProgressList
|
||||
AVAILABLE, // data is available in buffer. It should be in completedList
|
||||
READ_FAILED // read completed, but failed.
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.DelegateToFileSystem;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
|
||||
public class SeaweedAbstractFileSystem extends DelegateToFileSystem {
|
||||
|
||||
SeaweedAbstractFileSystem(final URI uri, final Configuration conf)
|
||||
throws IOException, URISyntaxException {
|
||||
super(uri, new SeaweedFileSystem(), conf, "seaweedfs", false);
|
||||
}
|
||||
|
||||
}
|
@ -5,31 +5,29 @@ import org.apache.hadoop.fs.*;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import seaweedfs.client.FilerProto;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.net.URI;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
|
||||
public class SeaweedFileSystem extends FileSystem {
|
||||
|
||||
public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
||||
|
||||
public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
|
||||
public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host";
|
||||
public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port";
|
||||
public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
|
||||
public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size";
|
||||
public static final int FS_SEAWEED_DEFAULT_BUFFER_SIZE = 4 * 1024 * 1024;
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class);
|
||||
private static int BUFFER_SIZE = 16 * 1024 * 1024;
|
||||
|
||||
private URI uri;
|
||||
private Path workingDirectory = new Path("/");
|
||||
@ -60,12 +58,10 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
||||
port = (port == -1) ? FS_SEAWEED_DEFAULT_PORT : port;
|
||||
conf.setInt(FS_SEAWEED_FILER_PORT, port);
|
||||
|
||||
conf.setInt(IO_FILE_BUFFER_SIZE_KEY, BUFFER_SIZE);
|
||||
|
||||
setConf(conf);
|
||||
this.uri = uri;
|
||||
|
||||
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port);
|
||||
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, conf);
|
||||
|
||||
}
|
||||
|
||||
@ -77,8 +73,9 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
||||
path = qualify(path);
|
||||
|
||||
try {
|
||||
InputStream inputStream = seaweedFileSystemStore.openFileForRead(path, statistics, bufferSize);
|
||||
return new FSDataInputStream(inputStream);
|
||||
int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE);
|
||||
FSInputStream inputStream = seaweedFileSystemStore.openFileForRead(path, statistics);
|
||||
return new FSDataInputStream(new BufferedFSInputStream(inputStream, 4 * seaweedBufferSize));
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("open path: {} bufferSize:{}", path, bufferSize, ex);
|
||||
return null;
|
||||
@ -95,7 +92,8 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
||||
|
||||
try {
|
||||
String replicaPlacement = String.format("%03d", replication - 1);
|
||||
OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, bufferSize, replicaPlacement);
|
||||
int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE);
|
||||
OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, seaweedBufferSize, replicaPlacement);
|
||||
return new FSDataOutputStream(outputStream, statistics);
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("create path: {} bufferSize:{} blockSize:{}", path, bufferSize, blockSize, ex);
|
||||
@ -105,8 +103,9 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*
|
||||
* @throws FileNotFoundException if the parent directory is not present -or
|
||||
* is not a directory.
|
||||
* is not a directory.
|
||||
*/
|
||||
@Override
|
||||
public FSDataOutputStream createNonRecursive(Path path,
|
||||
@ -123,9 +122,10 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
||||
throw new FileAlreadyExistsException("Not a directory: " + parent);
|
||||
}
|
||||
}
|
||||
int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE);
|
||||
return create(path, permission,
|
||||
flags.contains(CreateFlag.OVERWRITE), bufferSize,
|
||||
replication, blockSize, progress);
|
||||
replication, seaweedBufferSize, progress);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -135,7 +135,8 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
||||
|
||||
path = qualify(path);
|
||||
try {
|
||||
OutputStream outputStream = seaweedFileSystemStore.createFile(path, false, null, bufferSize, "");
|
||||
int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE);
|
||||
OutputStream outputStream = seaweedFileSystemStore.createFile(path, false, null, seaweedBufferSize, "");
|
||||
return new FSDataOutputStream(outputStream, statistics);
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("append path: {} bufferSize:{}", path, bufferSize, ex);
|
||||
@ -144,7 +145,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean rename(Path src, Path dst) {
|
||||
public boolean rename(Path src, Path dst) throws IOException {
|
||||
|
||||
LOG.debug("rename path: {} => {}", src, dst);
|
||||
|
||||
@ -155,12 +156,13 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
||||
if (src.equals(dst)) {
|
||||
return true;
|
||||
}
|
||||
FileStatus dstFileStatus = getFileStatus(dst);
|
||||
FilerProto.Entry entry = seaweedFileSystemStore.lookupEntry(dst);
|
||||
|
||||
String sourceFileName = src.getName();
|
||||
Path adjustedDst = dst;
|
||||
|
||||
if (dstFileStatus != null) {
|
||||
if (entry != null) {
|
||||
FileStatus dstFileStatus = getFileStatus(dst);
|
||||
String sourceFileName = src.getName();
|
||||
if (!dstFileStatus.isDirectory()) {
|
||||
return false;
|
||||
}
|
||||
@ -175,18 +177,20 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean delete(Path path, boolean recursive) {
|
||||
public boolean delete(Path path, boolean recursive) throws IOException {
|
||||
|
||||
LOG.debug("delete path: {} recursive:{}", path, recursive);
|
||||
|
||||
path = qualify(path);
|
||||
|
||||
FileStatus fileStatus = getFileStatus(path);
|
||||
FilerProto.Entry entry = seaweedFileSystemStore.lookupEntry(path);
|
||||
|
||||
if (fileStatus == null) {
|
||||
if (entry == null) {
|
||||
return true;
|
||||
}
|
||||
|
||||
FileStatus fileStatus = getFileStatus(path);
|
||||
|
||||
return seaweedFileSystemStore.deleteEntries(path, fileStatus.isDirectory(), recursive);
|
||||
|
||||
}
|
||||
@ -222,9 +226,9 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
||||
|
||||
path = qualify(path);
|
||||
|
||||
FileStatus fileStatus = getFileStatus(path);
|
||||
FilerProto.Entry entry = seaweedFileSystemStore.lookupEntry(path);
|
||||
|
||||
if (fileStatus == null) {
|
||||
if (entry == null) {
|
||||
|
||||
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
|
||||
return seaweedFileSystemStore.createDirectory(path, currentUser,
|
||||
@ -233,6 +237,8 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
||||
|
||||
}
|
||||
|
||||
FileStatus fileStatus = getFileStatus(path);
|
||||
|
||||
if (fileStatus.isDirectory()) {
|
||||
return true;
|
||||
} else {
|
||||
@ -241,7 +247,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileStatus getFileStatus(Path path) {
|
||||
public FileStatus getFileStatus(Path path) throws IOException {
|
||||
|
||||
LOG.debug("getFileStatus path: {}", path);
|
||||
|
||||
@ -335,9 +341,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
||||
|
||||
@Override
|
||||
public void createSymlink(final Path target, final Path link,
|
||||
final boolean createParent) throws AccessControlException,
|
||||
FileAlreadyExistsException, FileNotFoundException,
|
||||
ParentNotDirectoryException, UnsupportedFileSystemException,
|
||||
final boolean createParent) throws
|
||||
IOException {
|
||||
// Supporting filesystems should override this method
|
||||
throw new UnsupportedOperationException(
|
||||
|
@ -1,5 +1,7 @@
|
||||
package seaweed.hdfs;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSInputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
@ -7,30 +9,31 @@ import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import seaweedfs.client.FilerClient;
|
||||
import seaweedfs.client.FilerGrpcClient;
|
||||
import seaweedfs.client.FilerProto;
|
||||
import seaweedfs.client.SeaweedRead;
|
||||
import seaweedfs.client.*;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import static seaweed.hdfs.SeaweedFileSystem.FS_SEAWEED_BUFFER_SIZE;
|
||||
import static seaweed.hdfs.SeaweedFileSystem.FS_SEAWEED_DEFAULT_BUFFER_SIZE;
|
||||
|
||||
public class SeaweedFileSystemStore {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystemStore.class);
|
||||
|
||||
private FilerGrpcClient filerGrpcClient;
|
||||
private FilerClient filerClient;
|
||||
private Configuration conf;
|
||||
|
||||
public SeaweedFileSystemStore(String host, int port) {
|
||||
public SeaweedFileSystemStore(String host, int port, Configuration conf) {
|
||||
int grpcPort = 10000 + port;
|
||||
filerGrpcClient = new FilerGrpcClient(host, grpcPort);
|
||||
filerClient = new FilerClient(filerGrpcClient);
|
||||
this.conf = conf;
|
||||
}
|
||||
|
||||
public static String getParentDirectory(Path path) {
|
||||
@ -61,7 +64,7 @@ public class SeaweedFileSystemStore {
|
||||
);
|
||||
}
|
||||
|
||||
public FileStatus[] listEntries(final Path path) {
|
||||
public FileStatus[] listEntries(final Path path) throws IOException {
|
||||
LOG.debug("listEntries path: {}", path);
|
||||
|
||||
FileStatus pathStatus = getFileStatus(path);
|
||||
@ -89,11 +92,11 @@ public class SeaweedFileSystemStore {
|
||||
|
||||
}
|
||||
|
||||
public FileStatus getFileStatus(final Path path) {
|
||||
public FileStatus getFileStatus(final Path path) throws IOException {
|
||||
|
||||
FilerProto.Entry entry = lookupEntry(path);
|
||||
if (entry == null) {
|
||||
return null;
|
||||
throw new FileNotFoundException("File does not exist: " + path);
|
||||
}
|
||||
LOG.debug("doGetFileStatus path:{} entry:{}", path, entry);
|
||||
|
||||
@ -123,10 +126,10 @@ public class SeaweedFileSystemStore {
|
||||
|
||||
private FileStatus doGetFileStatus(Path path, FilerProto.Entry entry) {
|
||||
FilerProto.FuseAttributes attributes = entry.getAttributes();
|
||||
long length = SeaweedRead.totalSize(entry.getChunksList());
|
||||
long length = SeaweedRead.fileSize(entry);
|
||||
boolean isDir = entry.getIsDirectory();
|
||||
int block_replication = 1;
|
||||
int blocksize = 512;
|
||||
int blocksize = this.conf.getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE);
|
||||
long modification_time = attributes.getMtime() * 1000; // milliseconds
|
||||
long access_time = 0;
|
||||
FsPermission permission = FsPermission.createImmutable((short) attributes.getFileMode());
|
||||
@ -136,7 +139,7 @@ public class SeaweedFileSystemStore {
|
||||
modification_time, access_time, permission, owner, group, null, path);
|
||||
}
|
||||
|
||||
private FilerProto.Entry lookupEntry(Path path) {
|
||||
public FilerProto.Entry lookupEntry(Path path) {
|
||||
|
||||
return filerClient.lookupEntry(getParentDirectory(path), path.getName());
|
||||
|
||||
@ -184,7 +187,7 @@ public class SeaweedFileSystemStore {
|
||||
entry.mergeFrom(existingEntry);
|
||||
entry.getAttributesBuilder().setMtime(now);
|
||||
LOG.debug("createFile merged entry path:{} entry:{} from:{}", path, entry, existingEntry);
|
||||
writePosition = SeaweedRead.totalSize(existingEntry.getChunksList());
|
||||
writePosition = SeaweedRead.fileSize(existingEntry);
|
||||
replication = existingEntry.getAttributes().getReplication();
|
||||
}
|
||||
}
|
||||
@ -201,18 +204,17 @@ public class SeaweedFileSystemStore {
|
||||
.clearGroupName()
|
||||
.addAllGroupName(Arrays.asList(userGroupInformation.getGroupNames()))
|
||||
);
|
||||
SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry);
|
||||
}
|
||||
|
||||
return new SeaweedOutputStream(filerGrpcClient, path, entry, writePosition, bufferSize, replication);
|
||||
|
||||
}
|
||||
|
||||
public InputStream openFileForRead(final Path path, FileSystem.Statistics statistics,
|
||||
int bufferSize) throws IOException {
|
||||
public FSInputStream openFileForRead(final Path path, FileSystem.Statistics statistics) throws IOException {
|
||||
|
||||
LOG.debug("openFileForRead path:{} bufferSize:{}", path, bufferSize);
|
||||
LOG.debug("openFileForRead path:{}", path);
|
||||
|
||||
int readAheadQueueDepth = 2;
|
||||
FilerProto.Entry entry = lookupEntry(path);
|
||||
|
||||
if (entry == null) {
|
||||
@ -222,9 +224,7 @@ public class SeaweedFileSystemStore {
|
||||
return new SeaweedInputStream(filerGrpcClient,
|
||||
statistics,
|
||||
path.toUri().getPath(),
|
||||
entry,
|
||||
bufferSize,
|
||||
readAheadQueueDepth);
|
||||
entry);
|
||||
}
|
||||
|
||||
public void setOwner(Path path, String owner, String group) {
|
||||
|
@ -2,7 +2,6 @@ package seaweed.hdfs;
|
||||
|
||||
// based on org.apache.hadoop.fs.azurebfs.services.AbfsInputStream
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.FSExceptionMessages;
|
||||
import org.apache.hadoop.fs.FSInputStream;
|
||||
import org.apache.hadoop.fs.FileSystem.Statistics;
|
||||
@ -26,36 +25,23 @@ public class SeaweedInputStream extends FSInputStream {
|
||||
private final FilerProto.Entry entry;
|
||||
private final List<SeaweedRead.VisibleInterval> visibleIntervalList;
|
||||
private final long contentLength;
|
||||
private final int bufferSize; // default buffer size
|
||||
private final int readAheadQueueDepth; // initialized in constructor
|
||||
private final boolean readAheadEnabled; // whether enable readAhead;
|
||||
|
||||
private byte[] buffer = null; // will be initialized on first use
|
||||
private long position = 0; // cursor of the file
|
||||
|
||||
private long fCursor = 0; // cursor of buffer within file - offset of next byte to read from remote server
|
||||
private long fCursorAfterLastRead = -1;
|
||||
private int bCursor = 0; // cursor of read within buffer - offset of next byte to be returned from buffer
|
||||
private int limit = 0; // offset of next byte to be read into buffer from service (i.e., upper marker+1
|
||||
// of valid bytes in buffer)
|
||||
private boolean closed = false;
|
||||
|
||||
public SeaweedInputStream(
|
||||
final FilerGrpcClient filerGrpcClient,
|
||||
final Statistics statistics,
|
||||
final String path,
|
||||
final FilerProto.Entry entry,
|
||||
final int bufferSize,
|
||||
final int readAheadQueueDepth) {
|
||||
final FilerGrpcClient filerGrpcClient,
|
||||
final Statistics statistics,
|
||||
final String path,
|
||||
final FilerProto.Entry entry) throws IOException {
|
||||
this.filerGrpcClient = filerGrpcClient;
|
||||
this.statistics = statistics;
|
||||
this.path = path;
|
||||
this.entry = entry;
|
||||
this.contentLength = SeaweedRead.totalSize(entry.getChunksList());
|
||||
this.bufferSize = bufferSize;
|
||||
this.readAheadQueueDepth = (readAheadQueueDepth >= 0) ? readAheadQueueDepth : Runtime.getRuntime().availableProcessors();
|
||||
this.readAheadEnabled = true;
|
||||
this.contentLength = SeaweedRead.fileSize(entry);
|
||||
|
||||
this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(entry.getChunksList());
|
||||
this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(filerGrpcClient, entry.getChunksList());
|
||||
|
||||
LOG.debug("new path:{} entry:{} visibleIntervalList:{}", path, entry, visibleIntervalList);
|
||||
|
||||
@ -78,122 +64,7 @@ public class SeaweedInputStream extends FSInputStream {
|
||||
|
||||
@Override
|
||||
public synchronized int read(final byte[] b, final int off, final int len) throws IOException {
|
||||
int currentOff = off;
|
||||
int currentLen = len;
|
||||
int lastReadBytes;
|
||||
int totalReadBytes = 0;
|
||||
do {
|
||||
lastReadBytes = readOneBlock(b, currentOff, currentLen);
|
||||
if (lastReadBytes > 0) {
|
||||
currentOff += lastReadBytes;
|
||||
currentLen -= lastReadBytes;
|
||||
totalReadBytes += lastReadBytes;
|
||||
}
|
||||
if (currentLen <= 0 || currentLen > b.length - currentOff) {
|
||||
break;
|
||||
}
|
||||
} while (lastReadBytes > 0);
|
||||
return totalReadBytes > 0 ? totalReadBytes : lastReadBytes;
|
||||
}
|
||||
|
||||
private int readOneBlock(final byte[] b, final int off, final int len) throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
}
|
||||
|
||||
Preconditions.checkNotNull(b);
|
||||
|
||||
if (len == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (this.available() == 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (off < 0 || len < 0 || len > b.length - off) {
|
||||
throw new IndexOutOfBoundsException();
|
||||
}
|
||||
|
||||
//If buffer is empty, then fill the buffer.
|
||||
if (bCursor == limit) {
|
||||
//If EOF, then return -1
|
||||
if (fCursor >= contentLength) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
long bytesRead = 0;
|
||||
//reset buffer to initial state - i.e., throw away existing data
|
||||
bCursor = 0;
|
||||
limit = 0;
|
||||
if (buffer == null) {
|
||||
buffer = new byte[bufferSize];
|
||||
}
|
||||
|
||||
// Enable readAhead when reading sequentially
|
||||
if (-1 == fCursorAfterLastRead || fCursorAfterLastRead == fCursor || b.length >= bufferSize) {
|
||||
bytesRead = readInternal(fCursor, buffer, 0, bufferSize, false);
|
||||
} else {
|
||||
bytesRead = readInternal(fCursor, buffer, 0, b.length, true);
|
||||
}
|
||||
|
||||
if (bytesRead == -1) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
limit += bytesRead;
|
||||
fCursor += bytesRead;
|
||||
fCursorAfterLastRead = fCursor;
|
||||
}
|
||||
|
||||
//If there is anything in the buffer, then return lesser of (requested bytes) and (bytes in buffer)
|
||||
//(bytes returned may be less than requested)
|
||||
int bytesRemaining = limit - bCursor;
|
||||
int bytesToRead = Math.min(len, bytesRemaining);
|
||||
System.arraycopy(buffer, bCursor, b, off, bytesToRead);
|
||||
bCursor += bytesToRead;
|
||||
if (statistics != null) {
|
||||
statistics.incrementBytesRead(bytesToRead);
|
||||
}
|
||||
return bytesToRead;
|
||||
}
|
||||
|
||||
|
||||
private int readInternal(final long position, final byte[] b, final int offset, final int length,
|
||||
final boolean bypassReadAhead) throws IOException {
|
||||
if (readAheadEnabled && !bypassReadAhead) {
|
||||
// try reading from read-ahead
|
||||
if (offset != 0) {
|
||||
throw new IllegalArgumentException("readahead buffers cannot have non-zero buffer offsets");
|
||||
}
|
||||
int receivedBytes;
|
||||
|
||||
// queue read-aheads
|
||||
int numReadAheads = this.readAheadQueueDepth;
|
||||
long nextSize;
|
||||
long nextOffset = position;
|
||||
while (numReadAheads > 0 && nextOffset < contentLength) {
|
||||
nextSize = Math.min((long) bufferSize, contentLength - nextOffset);
|
||||
ReadBufferManager.getBufferManager().queueReadAhead(this, nextOffset, (int) nextSize);
|
||||
nextOffset = nextOffset + nextSize;
|
||||
numReadAheads--;
|
||||
}
|
||||
|
||||
// try reading from buffers first
|
||||
receivedBytes = ReadBufferManager.getBufferManager().getBlock(this, position, length, b);
|
||||
if (receivedBytes > 0) {
|
||||
return receivedBytes;
|
||||
}
|
||||
|
||||
// got nothing from read-ahead, do our own read now
|
||||
receivedBytes = readRemote(position, b, offset, length);
|
||||
return receivedBytes;
|
||||
} else {
|
||||
return readRemote(position, b, offset, length);
|
||||
}
|
||||
}
|
||||
|
||||
int readRemote(long position, byte[] b, int offset, int length) throws IOException {
|
||||
if (position < 0) {
|
||||
throw new IllegalArgumentException("attempting to read from negative offset");
|
||||
}
|
||||
@ -203,21 +74,30 @@ public class SeaweedInputStream extends FSInputStream {
|
||||
if (b == null) {
|
||||
throw new IllegalArgumentException("null byte array passed in to read() method");
|
||||
}
|
||||
if (offset >= b.length) {
|
||||
if (off >= b.length) {
|
||||
throw new IllegalArgumentException("offset greater than length of array");
|
||||
}
|
||||
if (length < 0) {
|
||||
if (len < 0) {
|
||||
throw new IllegalArgumentException("requested read length is less than zero");
|
||||
}
|
||||
if (length > (b.length - offset)) {
|
||||
if (len > (b.length - off)) {
|
||||
throw new IllegalArgumentException("requested read length is more than will fit after requested offset in buffer");
|
||||
}
|
||||
|
||||
long bytesRead = SeaweedRead.read(filerGrpcClient, visibleIntervalList, position, b, offset, length);
|
||||
long bytesRead = SeaweedRead.read(this.filerGrpcClient, this.visibleIntervalList, this.position, b, off, len, SeaweedRead.fileSize(entry));
|
||||
if (bytesRead > Integer.MAX_VALUE) {
|
||||
throw new IOException("Unexpected Content-Length");
|
||||
}
|
||||
|
||||
if (bytesRead > 0) {
|
||||
this.position += bytesRead;
|
||||
if (statistics != null) {
|
||||
statistics.incrementBytesRead(bytesRead);
|
||||
}
|
||||
}
|
||||
|
||||
return (int) bytesRead;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@ -239,17 +119,8 @@ public class SeaweedInputStream extends FSInputStream {
|
||||
throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
|
||||
}
|
||||
|
||||
if (n >= fCursor - limit && n <= fCursor) { // within buffer
|
||||
bCursor = (int) (n - (fCursor - limit));
|
||||
return;
|
||||
}
|
||||
this.position = n;
|
||||
|
||||
// next read will read from here
|
||||
fCursor = n;
|
||||
|
||||
//invalidate buffer
|
||||
limit = 0;
|
||||
bCursor = 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -257,20 +128,19 @@ public class SeaweedInputStream extends FSInputStream {
|
||||
if (closed) {
|
||||
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
}
|
||||
long currentPos = getPos();
|
||||
if (currentPos == contentLength) {
|
||||
if (this.position == contentLength) {
|
||||
if (n > 0) {
|
||||
throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
|
||||
}
|
||||
}
|
||||
long newPos = currentPos + n;
|
||||
long newPos = this.position + n;
|
||||
if (newPos < 0) {
|
||||
newPos = 0;
|
||||
n = newPos - currentPos;
|
||||
n = newPos - this.position;
|
||||
}
|
||||
if (newPos > contentLength) {
|
||||
newPos = contentLength;
|
||||
n = newPos - currentPos;
|
||||
n = newPos - this.position;
|
||||
}
|
||||
seek(newPos);
|
||||
return n;
|
||||
@ -289,11 +159,11 @@ public class SeaweedInputStream extends FSInputStream {
|
||||
public synchronized int available() throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException(
|
||||
FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
}
|
||||
final long remaining = this.contentLength - this.getPos();
|
||||
return remaining <= Integer.MAX_VALUE
|
||||
? (int) remaining : Integer.MAX_VALUE;
|
||||
? (int) remaining : Integer.MAX_VALUE;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -321,7 +191,7 @@ public class SeaweedInputStream extends FSInputStream {
|
||||
if (closed) {
|
||||
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
}
|
||||
return fCursor - limit + bCursor;
|
||||
return position;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -338,7 +208,6 @@ public class SeaweedInputStream extends FSInputStream {
|
||||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
closed = true;
|
||||
buffer = null; // de-reference the buffer so it can be GC'ed sooner
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -7,6 +7,7 @@ import org.apache.hadoop.fs.FSExceptionMessages;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import seaweedfs.client.ByteBufferPool;
|
||||
import seaweedfs.client.FilerGrpcClient;
|
||||
import seaweedfs.client.FilerProto;
|
||||
import seaweedfs.client.SeaweedWrite;
|
||||
@ -14,6 +15,7 @@ import seaweedfs.client.SeaweedWrite;
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.concurrent.*;
|
||||
|
||||
import static seaweed.hdfs.SeaweedFileSystemStore.getParentDirectory;
|
||||
@ -28,16 +30,16 @@ public class SeaweedOutputStream extends OutputStream {
|
||||
private final int maxConcurrentRequestCount;
|
||||
private final ThreadPoolExecutor threadExecutor;
|
||||
private final ExecutorCompletionService<Void> completionService;
|
||||
private FilerProto.Entry.Builder entry;
|
||||
private final FilerProto.Entry.Builder entry;
|
||||
private final boolean supportFlush = false; // true;
|
||||
private final ConcurrentLinkedDeque<WriteOperation> writeOperations;
|
||||
private long position;
|
||||
private boolean closed;
|
||||
private boolean supportFlush = true;
|
||||
private volatile IOException lastError;
|
||||
private long lastFlushOffset;
|
||||
private long lastTotalAppendOffset = 0;
|
||||
private byte[] buffer;
|
||||
private int bufferIndex;
|
||||
private ConcurrentLinkedDeque<WriteOperation> writeOperations;
|
||||
private ByteBuffer buffer;
|
||||
private long outputIndex;
|
||||
private String replication = "000";
|
||||
|
||||
public SeaweedOutputStream(FilerGrpcClient filerGrpcClient, final Path path, FilerProto.Entry.Builder entry,
|
||||
@ -50,18 +52,18 @@ public class SeaweedOutputStream extends OutputStream {
|
||||
this.lastError = null;
|
||||
this.lastFlushOffset = 0;
|
||||
this.bufferSize = bufferSize;
|
||||
this.buffer = new byte[bufferSize];
|
||||
this.bufferIndex = 0;
|
||||
this.buffer = ByteBufferPool.request(bufferSize);
|
||||
this.outputIndex = 0;
|
||||
this.writeOperations = new ConcurrentLinkedDeque<>();
|
||||
|
||||
this.maxConcurrentRequestCount = 4 * Runtime.getRuntime().availableProcessors();
|
||||
this.maxConcurrentRequestCount = Runtime.getRuntime().availableProcessors();
|
||||
|
||||
this.threadExecutor
|
||||
= new ThreadPoolExecutor(maxConcurrentRequestCount,
|
||||
maxConcurrentRequestCount,
|
||||
10L,
|
||||
TimeUnit.SECONDS,
|
||||
new LinkedBlockingQueue<Runnable>());
|
||||
= new ThreadPoolExecutor(maxConcurrentRequestCount,
|
||||
maxConcurrentRequestCount,
|
||||
120L,
|
||||
TimeUnit.SECONDS,
|
||||
new LinkedBlockingQueue<Runnable>());
|
||||
this.completionService = new ExecutorCompletionService<>(this.threadExecutor);
|
||||
|
||||
this.entry = entry;
|
||||
@ -69,9 +71,6 @@ public class SeaweedOutputStream extends OutputStream {
|
||||
}
|
||||
|
||||
private synchronized void flushWrittenBytesToServiceInternal(final long offset) throws IOException {
|
||||
|
||||
LOG.debug("SeaweedWrite.writeMeta path: {} entry:{}", path, entry);
|
||||
|
||||
try {
|
||||
SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry);
|
||||
} catch (Exception ex) {
|
||||
@ -87,7 +86,7 @@ public class SeaweedOutputStream extends OutputStream {
|
||||
|
||||
@Override
|
||||
public synchronized void write(final byte[] data, final int off, final int length)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
maybeThrowLastError();
|
||||
|
||||
Preconditions.checkArgument(data != null, "null data");
|
||||
@ -96,25 +95,29 @@ public class SeaweedOutputStream extends OutputStream {
|
||||
throw new IndexOutOfBoundsException();
|
||||
}
|
||||
|
||||
// System.out.println(path + " write [" + (outputIndex + off) + "," + ((outputIndex + off) + length) + ")");
|
||||
|
||||
int currentOffset = off;
|
||||
int writableBytes = bufferSize - bufferIndex;
|
||||
int writableBytes = bufferSize - buffer.position();
|
||||
int numberOfBytesToWrite = length;
|
||||
|
||||
while (numberOfBytesToWrite > 0) {
|
||||
if (writableBytes <= numberOfBytesToWrite) {
|
||||
System.arraycopy(data, currentOffset, buffer, bufferIndex, writableBytes);
|
||||
bufferIndex += writableBytes;
|
||||
writeCurrentBufferToService();
|
||||
currentOffset += writableBytes;
|
||||
numberOfBytesToWrite = numberOfBytesToWrite - writableBytes;
|
||||
} else {
|
||||
System.arraycopy(data, currentOffset, buffer, bufferIndex, numberOfBytesToWrite);
|
||||
bufferIndex += numberOfBytesToWrite;
|
||||
numberOfBytesToWrite = 0;
|
||||
|
||||
if (numberOfBytesToWrite < writableBytes) {
|
||||
buffer.put(data, currentOffset, numberOfBytesToWrite);
|
||||
outputIndex += numberOfBytesToWrite;
|
||||
break;
|
||||
}
|
||||
|
||||
writableBytes = bufferSize - bufferIndex;
|
||||
// System.out.println(path + " [" + (outputIndex + currentOffset) + "," + ((outputIndex + currentOffset) + writableBytes) + ") " + buffer.capacity());
|
||||
buffer.put(data, currentOffset, writableBytes);
|
||||
outputIndex += writableBytes;
|
||||
currentOffset += writableBytes;
|
||||
writeCurrentBufferToService();
|
||||
numberOfBytesToWrite = numberOfBytesToWrite - writableBytes;
|
||||
writableBytes = bufferSize - buffer.position();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@ -150,8 +153,9 @@ public class SeaweedOutputStream extends OutputStream {
|
||||
threadExecutor.shutdown();
|
||||
} finally {
|
||||
lastError = new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
ByteBufferPool.release(buffer);
|
||||
buffer = null;
|
||||
bufferIndex = 0;
|
||||
outputIndex = 0;
|
||||
closed = true;
|
||||
writeOperations.clear();
|
||||
if (!threadExecutor.isShutdown()) {
|
||||
@ -161,35 +165,39 @@ public class SeaweedOutputStream extends OutputStream {
|
||||
}
|
||||
|
||||
private synchronized void writeCurrentBufferToService() throws IOException {
|
||||
if (bufferIndex == 0) {
|
||||
if (buffer.position() == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
final byte[] bytes = buffer;
|
||||
final int bytesLength = bufferIndex;
|
||||
position += submitWriteBufferToService(buffer, position);
|
||||
|
||||
buffer = new byte[bufferSize];
|
||||
bufferIndex = 0;
|
||||
final long offset = position;
|
||||
position += bytesLength;
|
||||
buffer = ByteBufferPool.request(bufferSize);
|
||||
|
||||
if (threadExecutor.getQueue().size() >= maxConcurrentRequestCount * 2) {
|
||||
}
|
||||
|
||||
private synchronized int submitWriteBufferToService(final ByteBuffer bufferToWrite, final long writePosition) throws IOException {
|
||||
|
||||
bufferToWrite.flip();
|
||||
int bytesLength = bufferToWrite.limit() - bufferToWrite.position();
|
||||
|
||||
if (threadExecutor.getQueue().size() >= maxConcurrentRequestCount) {
|
||||
waitForTaskToComplete();
|
||||
}
|
||||
|
||||
final Future<Void> job = completionService.submit(new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
// originally: client.append(path, offset, bytes, 0, bytesLength);
|
||||
SeaweedWrite.writeData(entry, replication, filerGrpcClient, offset, bytes, 0, bytesLength);
|
||||
return null;
|
||||
}
|
||||
final Future<Void> job = completionService.submit(() -> {
|
||||
// System.out.println(path + " is going to save [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
|
||||
SeaweedWrite.writeData(entry, replication, filerGrpcClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit(), path.toUri().getPath());
|
||||
// System.out.println(path + " saved [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
|
||||
ByteBufferPool.release(bufferToWrite);
|
||||
return null;
|
||||
});
|
||||
|
||||
writeOperations.add(new WriteOperation(job, offset, bytesLength));
|
||||
writeOperations.add(new WriteOperation(job, writePosition, bytesLength));
|
||||
|
||||
// Try to shrink the queue
|
||||
shrinkWriteOperationQueue();
|
||||
|
||||
return bytesLength;
|
||||
|
||||
}
|
||||
|
||||
private void waitForTaskToComplete() throws IOException {
|
||||
|
@ -15,8 +15,8 @@
|
||||
<plugin>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<configuration>
|
||||
<source>7</source>
|
||||
<target>7</target>
|
||||
<source>8</source>
|
||||
<target>8</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
@ -120,6 +120,188 @@
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-client</artifactId>
|
||||
<version>3.1.1</version>
|
||||
<scope>provided</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>hadoop-hdfs-client</artifactId>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>hadoop-yarn-api</artifactId>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>hadoop-yarn-client</artifactId>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>hadoop-mapreduce-client-core</artifactId>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>hadoop-mapreduce-client-jobclient</artifactId>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>hadoop-annotations</artifactId>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-common</artifactId>
|
||||
<version>3.1.1</version>
|
||||
<scope>provided</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>commons-cli</artifactId>
|
||||
<groupId>commons-cli</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>commons-math3</artifactId>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>commons-io</artifactId>
|
||||
<groupId>commons-io</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>commons-net</artifactId>
|
||||
<groupId>commons-net</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>commons-collections</artifactId>
|
||||
<groupId>commons-collections</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>javax.servlet-api</artifactId>
|
||||
<groupId>javax.servlet</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>jetty-server</artifactId>
|
||||
<groupId>org.eclipse.jetty</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>jetty-util</artifactId>
|
||||
<groupId>org.eclipse.jetty</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>jetty-servlet</artifactId>
|
||||
<groupId>org.eclipse.jetty</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>jetty-webapp</artifactId>
|
||||
<groupId>org.eclipse.jetty</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>jsp-api</artifactId>
|
||||
<groupId>javax.servlet.jsp</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>jersey-core</artifactId>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>jersey-servlet</artifactId>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>jersey-json</artifactId>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>jersey-server</artifactId>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>log4j</artifactId>
|
||||
<groupId>log4j</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>commons-lang</artifactId>
|
||||
<groupId>commons-lang</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>commons-beanutils</artifactId>
|
||||
<groupId>commons-beanutils</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>commons-configuration2</artifactId>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
<groupId>org.slf4j</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>avro</artifactId>
|
||||
<groupId>org.apache.avro</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>re2j</artifactId>
|
||||
<groupId>com.google.re2j</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>hadoop-auth</artifactId>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>jsch</artifactId>
|
||||
<groupId>com.jcraft</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>curator-client</artifactId>
|
||||
<groupId>org.apache.curator</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>curator-recipes</artifactId>
|
||||
<groupId>org.apache.curator</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>htrace-core4</artifactId>
|
||||
<groupId>org.apache.htrace</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>zookeeper</artifactId>
|
||||
<groupId>org.apache.zookeeper</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>commons-compress</artifactId>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>kerb-simplekdc</artifactId>
|
||||
<groupId>org.apache.kerby</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>jackson-databind</artifactId>
|
||||
<groupId>com.fasterxml.jackson.core</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>stax2-api</artifactId>
|
||||
<groupId>org.codehaus.woodstox</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>woodstox-core</artifactId>
|
||||
<groupId>com.fasterxml.woodstox</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>hadoop-annotations</artifactId>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<distributionManagement>
|
||||
<snapshotRepository>
|
||||
<id>ossrh</id>
|
||||
@ -127,7 +309,7 @@
|
||||
</snapshotRepository>
|
||||
</distributionManagement>
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.2.8</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.5.2</seaweedfs.client.version>
|
||||
<hadoop.version>3.1.1</hadoop.version>
|
||||
</properties>
|
||||
</project>
|
||||
|
@ -5,7 +5,7 @@
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.2.8</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.5.2</seaweedfs.client.version>
|
||||
<hadoop.version>3.1.1</hadoop.version>
|
||||
</properties>
|
||||
|
||||
@ -31,8 +31,8 @@
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<configuration>
|
||||
<source>7</source>
|
||||
<target>7</target>
|
||||
<source>8</source>
|
||||
<target>8</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
@ -147,6 +147,7 @@
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-client</artifactId>
|
||||
<version>${hadoop.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
@ -157,6 +158,7 @@
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-common</artifactId>
|
||||
<version>${hadoop.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
|
@ -1,137 +0,0 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweed.hdfs;
|
||||
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
class ReadBuffer {
|
||||
|
||||
private SeaweedInputStream stream;
|
||||
private long offset; // offset within the file for the buffer
|
||||
private int length; // actual length, set after the buffer is filles
|
||||
private int requestedLength; // requested length of the read
|
||||
private byte[] buffer; // the buffer itself
|
||||
private int bufferindex = -1; // index in the buffers array in Buffer manager
|
||||
private ReadBufferStatus status; // status of the buffer
|
||||
private CountDownLatch latch = null; // signaled when the buffer is done reading, so any client
|
||||
// waiting on this buffer gets unblocked
|
||||
|
||||
// fields to help with eviction logic
|
||||
private long timeStamp = 0; // tick at which buffer became available to read
|
||||
private boolean isFirstByteConsumed = false;
|
||||
private boolean isLastByteConsumed = false;
|
||||
private boolean isAnyByteConsumed = false;
|
||||
|
||||
public SeaweedInputStream getStream() {
|
||||
return stream;
|
||||
}
|
||||
|
||||
public void setStream(SeaweedInputStream stream) {
|
||||
this.stream = stream;
|
||||
}
|
||||
|
||||
public long getOffset() {
|
||||
return offset;
|
||||
}
|
||||
|
||||
public void setOffset(long offset) {
|
||||
this.offset = offset;
|
||||
}
|
||||
|
||||
public int getLength() {
|
||||
return length;
|
||||
}
|
||||
|
||||
public void setLength(int length) {
|
||||
this.length = length;
|
||||
}
|
||||
|
||||
public int getRequestedLength() {
|
||||
return requestedLength;
|
||||
}
|
||||
|
||||
public void setRequestedLength(int requestedLength) {
|
||||
this.requestedLength = requestedLength;
|
||||
}
|
||||
|
||||
public byte[] getBuffer() {
|
||||
return buffer;
|
||||
}
|
||||
|
||||
public void setBuffer(byte[] buffer) {
|
||||
this.buffer = buffer;
|
||||
}
|
||||
|
||||
public int getBufferindex() {
|
||||
return bufferindex;
|
||||
}
|
||||
|
||||
public void setBufferindex(int bufferindex) {
|
||||
this.bufferindex = bufferindex;
|
||||
}
|
||||
|
||||
public ReadBufferStatus getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public void setStatus(ReadBufferStatus status) {
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public CountDownLatch getLatch() {
|
||||
return latch;
|
||||
}
|
||||
|
||||
public void setLatch(CountDownLatch latch) {
|
||||
this.latch = latch;
|
||||
}
|
||||
|
||||
public long getTimeStamp() {
|
||||
return timeStamp;
|
||||
}
|
||||
|
||||
public void setTimeStamp(long timeStamp) {
|
||||
this.timeStamp = timeStamp;
|
||||
}
|
||||
|
||||
public boolean isFirstByteConsumed() {
|
||||
return isFirstByteConsumed;
|
||||
}
|
||||
|
||||
public void setFirstByteConsumed(boolean isFirstByteConsumed) {
|
||||
this.isFirstByteConsumed = isFirstByteConsumed;
|
||||
}
|
||||
|
||||
public boolean isLastByteConsumed() {
|
||||
return isLastByteConsumed;
|
||||
}
|
||||
|
||||
public void setLastByteConsumed(boolean isLastByteConsumed) {
|
||||
this.isLastByteConsumed = isLastByteConsumed;
|
||||
}
|
||||
|
||||
public boolean isAnyByteConsumed() {
|
||||
return isAnyByteConsumed;
|
||||
}
|
||||
|
||||
public void setAnyByteConsumed(boolean isAnyByteConsumed) {
|
||||
this.isAnyByteConsumed = isAnyByteConsumed;
|
||||
}
|
||||
|
||||
}
|
@ -1,394 +0,0 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package seaweed.hdfs;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.LinkedList;
|
||||
import java.util.Queue;
|
||||
import java.util.Stack;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
/**
|
||||
* The Read Buffer Manager for Rest AbfsClient.
|
||||
*/
|
||||
final class ReadBufferManager {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(ReadBufferManager.class);
|
||||
|
||||
private static final int NUM_BUFFERS = 16;
|
||||
private static final int BLOCK_SIZE = 4 * 1024 * 1024;
|
||||
private static final int NUM_THREADS = 8;
|
||||
private static final int THRESHOLD_AGE_MILLISECONDS = 3000; // have to see if 3 seconds is a good threshold
|
||||
|
||||
private Thread[] threads = new Thread[NUM_THREADS];
|
||||
private byte[][] buffers; // array of byte[] buffers, to hold the data that is read
|
||||
private Stack<Integer> freeList = new Stack<>(); // indices in buffers[] array that are available
|
||||
|
||||
private Queue<ReadBuffer> readAheadQueue = new LinkedList<>(); // queue of requests that are not picked up by any worker thread yet
|
||||
private LinkedList<ReadBuffer> inProgressList = new LinkedList<>(); // requests being processed by worker threads
|
||||
private LinkedList<ReadBuffer> completedReadList = new LinkedList<>(); // buffers available for reading
|
||||
private static final ReadBufferManager BUFFER_MANAGER; // singleton, initialized in static initialization block
|
||||
|
||||
static {
|
||||
BUFFER_MANAGER = new ReadBufferManager();
|
||||
BUFFER_MANAGER.init();
|
||||
}
|
||||
|
||||
static ReadBufferManager getBufferManager() {
|
||||
return BUFFER_MANAGER;
|
||||
}
|
||||
|
||||
private void init() {
|
||||
buffers = new byte[NUM_BUFFERS][];
|
||||
for (int i = 0; i < NUM_BUFFERS; i++) {
|
||||
buffers[i] = new byte[BLOCK_SIZE]; // same buffers are reused. The byte array never goes back to GC
|
||||
freeList.add(i);
|
||||
}
|
||||
for (int i = 0; i < NUM_THREADS; i++) {
|
||||
Thread t = new Thread(new ReadBufferWorker(i));
|
||||
t.setDaemon(true);
|
||||
threads[i] = t;
|
||||
t.setName("SeaweedFS-prefetch-" + i);
|
||||
t.start();
|
||||
}
|
||||
ReadBufferWorker.UNLEASH_WORKERS.countDown();
|
||||
}
|
||||
|
||||
// hide instance constructor
|
||||
private ReadBufferManager() {
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
*
|
||||
* SeaweedInputStream-facing methods
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
/**
|
||||
* {@link SeaweedInputStream} calls this method to queue read-aheads.
|
||||
*
|
||||
* @param stream The {@link SeaweedInputStream} for which to do the read-ahead
|
||||
* @param requestedOffset The offset in the file which shoukd be read
|
||||
* @param requestedLength The length to read
|
||||
*/
|
||||
void queueReadAhead(final SeaweedInputStream stream, final long requestedOffset, final int requestedLength) {
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("Start Queueing readAhead for {} offset {} length {}",
|
||||
stream.getPath(), requestedOffset, requestedLength);
|
||||
}
|
||||
ReadBuffer buffer;
|
||||
synchronized (this) {
|
||||
if (isAlreadyQueued(stream, requestedOffset)) {
|
||||
return; // already queued, do not queue again
|
||||
}
|
||||
if (freeList.isEmpty() && !tryEvict()) {
|
||||
return; // no buffers available, cannot queue anything
|
||||
}
|
||||
|
||||
buffer = new ReadBuffer();
|
||||
buffer.setStream(stream);
|
||||
buffer.setOffset(requestedOffset);
|
||||
buffer.setLength(0);
|
||||
buffer.setRequestedLength(requestedLength);
|
||||
buffer.setStatus(ReadBufferStatus.NOT_AVAILABLE);
|
||||
buffer.setLatch(new CountDownLatch(1));
|
||||
|
||||
Integer bufferIndex = freeList.pop(); // will return a value, since we have checked size > 0 already
|
||||
|
||||
buffer.setBuffer(buffers[bufferIndex]);
|
||||
buffer.setBufferindex(bufferIndex);
|
||||
readAheadQueue.add(buffer);
|
||||
notifyAll();
|
||||
}
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("Done q-ing readAhead for file {} offset {} buffer idx {}",
|
||||
stream.getPath(), requestedOffset, buffer.getBufferindex());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* {@link SeaweedInputStream} calls this method read any bytes already available in a buffer (thereby saving a
|
||||
* remote read). This returns the bytes if the data already exists in buffer. If there is a buffer that is reading
|
||||
* the requested offset, then this method blocks until that read completes. If the data is queued in a read-ahead
|
||||
* but not picked up by a worker thread yet, then it cancels that read-ahead and reports cache miss. This is because
|
||||
* depending on worker thread availability, the read-ahead may take a while - the calling thread can do it's own
|
||||
* read to get the data faster (copmared to the read waiting in queue for an indeterminate amount of time).
|
||||
*
|
||||
* @param stream the file to read bytes for
|
||||
* @param position the offset in the file to do a read for
|
||||
* @param length the length to read
|
||||
* @param buffer the buffer to read data into. Note that the buffer will be written into from offset 0.
|
||||
* @return the number of bytes read
|
||||
*/
|
||||
int getBlock(final SeaweedInputStream stream, final long position, final int length, final byte[] buffer) {
|
||||
// not synchronized, so have to be careful with locking
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("getBlock for file {} position {} thread {}",
|
||||
stream.getPath(), position, Thread.currentThread().getName());
|
||||
}
|
||||
|
||||
waitForProcess(stream, position);
|
||||
|
||||
int bytesRead = 0;
|
||||
synchronized (this) {
|
||||
bytesRead = getBlockFromCompletedQueue(stream, position, length, buffer);
|
||||
}
|
||||
if (bytesRead > 0) {
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("Done read from Cache for {} position {} length {}",
|
||||
stream.getPath(), position, bytesRead);
|
||||
}
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
// otherwise, just say we got nothing - calling thread can do its own read
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
* Internal methods
|
||||
*
|
||||
*/
|
||||
|
||||
private void waitForProcess(final SeaweedInputStream stream, final long position) {
|
||||
ReadBuffer readBuf;
|
||||
synchronized (this) {
|
||||
clearFromReadAheadQueue(stream, position);
|
||||
readBuf = getFromList(inProgressList, stream, position);
|
||||
}
|
||||
if (readBuf != null) { // if in in-progress queue, then block for it
|
||||
try {
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("got a relevant read buffer for file {} offset {} buffer idx {}",
|
||||
stream.getPath(), readBuf.getOffset(), readBuf.getBufferindex());
|
||||
}
|
||||
readBuf.getLatch().await(); // blocking wait on the caller stream's thread
|
||||
// Note on correctness: readBuf gets out of inProgressList only in 1 place: after worker thread
|
||||
// is done processing it (in doneReading). There, the latch is set after removing the buffer from
|
||||
// inProgressList. So this latch is safe to be outside the synchronized block.
|
||||
// Putting it in synchronized would result in a deadlock, since this thread would be holding the lock
|
||||
// while waiting, so no one will be able to change any state. If this becomes more complex in the future,
|
||||
// then the latch cane be removed and replaced with wait/notify whenever inProgressList is touched.
|
||||
} catch (InterruptedException ex) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("latch done for file {} buffer idx {} length {}",
|
||||
stream.getPath(), readBuf.getBufferindex(), readBuf.getLength());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If any buffer in the completedlist can be reclaimed then reclaim it and return the buffer to free list.
|
||||
* The objective is to find just one buffer - there is no advantage to evicting more than one.
|
||||
*
|
||||
* @return whether the eviction succeeeded - i.e., were we able to free up one buffer
|
||||
*/
|
||||
private synchronized boolean tryEvict() {
|
||||
ReadBuffer nodeToEvict = null;
|
||||
if (completedReadList.size() <= 0) {
|
||||
return false; // there are no evict-able buffers
|
||||
}
|
||||
|
||||
// first, try buffers where all bytes have been consumed (approximated as first and last bytes consumed)
|
||||
for (ReadBuffer buf : completedReadList) {
|
||||
if (buf.isFirstByteConsumed() && buf.isLastByteConsumed()) {
|
||||
nodeToEvict = buf;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (nodeToEvict != null) {
|
||||
return evict(nodeToEvict);
|
||||
}
|
||||
|
||||
// next, try buffers where any bytes have been consumed (may be a bad idea? have to experiment and see)
|
||||
for (ReadBuffer buf : completedReadList) {
|
||||
if (buf.isAnyByteConsumed()) {
|
||||
nodeToEvict = buf;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (nodeToEvict != null) {
|
||||
return evict(nodeToEvict);
|
||||
}
|
||||
|
||||
// next, try any old nodes that have not been consumed
|
||||
long earliestBirthday = Long.MAX_VALUE;
|
||||
for (ReadBuffer buf : completedReadList) {
|
||||
if (buf.getTimeStamp() < earliestBirthday) {
|
||||
nodeToEvict = buf;
|
||||
earliestBirthday = buf.getTimeStamp();
|
||||
}
|
||||
}
|
||||
if ((currentTimeMillis() - earliestBirthday > THRESHOLD_AGE_MILLISECONDS) && (nodeToEvict != null)) {
|
||||
return evict(nodeToEvict);
|
||||
}
|
||||
|
||||
// nothing can be evicted
|
||||
return false;
|
||||
}
|
||||
|
||||
private boolean evict(final ReadBuffer buf) {
|
||||
freeList.push(buf.getBufferindex());
|
||||
completedReadList.remove(buf);
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("Evicting buffer idx {}; was used for file {} offset {} length {}",
|
||||
buf.getBufferindex(), buf.getStream().getPath(), buf.getOffset(), buf.getLength());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private boolean isAlreadyQueued(final SeaweedInputStream stream, final long requestedOffset) {
|
||||
// returns true if any part of the buffer is already queued
|
||||
return (isInList(readAheadQueue, stream, requestedOffset)
|
||||
|| isInList(inProgressList, stream, requestedOffset)
|
||||
|| isInList(completedReadList, stream, requestedOffset));
|
||||
}
|
||||
|
||||
private boolean isInList(final Collection<ReadBuffer> list, final SeaweedInputStream stream, final long requestedOffset) {
|
||||
return (getFromList(list, stream, requestedOffset) != null);
|
||||
}
|
||||
|
||||
private ReadBuffer getFromList(final Collection<ReadBuffer> list, final SeaweedInputStream stream, final long requestedOffset) {
|
||||
for (ReadBuffer buffer : list) {
|
||||
if (buffer.getStream() == stream) {
|
||||
if (buffer.getStatus() == ReadBufferStatus.AVAILABLE
|
||||
&& requestedOffset >= buffer.getOffset()
|
||||
&& requestedOffset < buffer.getOffset() + buffer.getLength()) {
|
||||
return buffer;
|
||||
} else if (requestedOffset >= buffer.getOffset()
|
||||
&& requestedOffset < buffer.getOffset() + buffer.getRequestedLength()) {
|
||||
return buffer;
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private void clearFromReadAheadQueue(final SeaweedInputStream stream, final long requestedOffset) {
|
||||
ReadBuffer buffer = getFromList(readAheadQueue, stream, requestedOffset);
|
||||
if (buffer != null) {
|
||||
readAheadQueue.remove(buffer);
|
||||
notifyAll(); // lock is held in calling method
|
||||
freeList.push(buffer.getBufferindex());
|
||||
}
|
||||
}
|
||||
|
||||
private int getBlockFromCompletedQueue(final SeaweedInputStream stream, final long position, final int length,
|
||||
final byte[] buffer) {
|
||||
ReadBuffer buf = getFromList(completedReadList, stream, position);
|
||||
if (buf == null || position >= buf.getOffset() + buf.getLength()) {
|
||||
return 0;
|
||||
}
|
||||
int cursor = (int) (position - buf.getOffset());
|
||||
int availableLengthInBuffer = buf.getLength() - cursor;
|
||||
int lengthToCopy = Math.min(length, availableLengthInBuffer);
|
||||
System.arraycopy(buf.getBuffer(), cursor, buffer, 0, lengthToCopy);
|
||||
if (cursor == 0) {
|
||||
buf.setFirstByteConsumed(true);
|
||||
}
|
||||
if (cursor + lengthToCopy == buf.getLength()) {
|
||||
buf.setLastByteConsumed(true);
|
||||
}
|
||||
buf.setAnyByteConsumed(true);
|
||||
return lengthToCopy;
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
* ReadBufferWorker-thread-facing methods
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* ReadBufferWorker thread calls this to get the next buffer that it should work on.
|
||||
*
|
||||
* @return {@link ReadBuffer}
|
||||
* @throws InterruptedException if thread is interrupted
|
||||
*/
|
||||
ReadBuffer getNextBlockToRead() throws InterruptedException {
|
||||
ReadBuffer buffer = null;
|
||||
synchronized (this) {
|
||||
//buffer = readAheadQueue.take(); // blocking method
|
||||
while (readAheadQueue.size() == 0) {
|
||||
wait();
|
||||
}
|
||||
buffer = readAheadQueue.remove();
|
||||
notifyAll();
|
||||
if (buffer == null) {
|
||||
return null; // should never happen
|
||||
}
|
||||
buffer.setStatus(ReadBufferStatus.READING_IN_PROGRESS);
|
||||
inProgressList.add(buffer);
|
||||
}
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("ReadBufferWorker picked file {} for offset {}",
|
||||
buffer.getStream().getPath(), buffer.getOffset());
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
/**
|
||||
* ReadBufferWorker thread calls this method to post completion.
|
||||
*
|
||||
* @param buffer the buffer whose read was completed
|
||||
* @param result the {@link ReadBufferStatus} after the read operation in the worker thread
|
||||
* @param bytesActuallyRead the number of bytes that the worker thread was actually able to read
|
||||
*/
|
||||
void doneReading(final ReadBuffer buffer, final ReadBufferStatus result, final int bytesActuallyRead) {
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("ReadBufferWorker completed file {} for offset {} bytes {}",
|
||||
buffer.getStream().getPath(), buffer.getOffset(), bytesActuallyRead);
|
||||
}
|
||||
synchronized (this) {
|
||||
inProgressList.remove(buffer);
|
||||
if (result == ReadBufferStatus.AVAILABLE && bytesActuallyRead > 0) {
|
||||
buffer.setStatus(ReadBufferStatus.AVAILABLE);
|
||||
buffer.setTimeStamp(currentTimeMillis());
|
||||
buffer.setLength(bytesActuallyRead);
|
||||
completedReadList.add(buffer);
|
||||
} else {
|
||||
freeList.push(buffer.getBufferindex());
|
||||
// buffer should go out of scope after the end of the calling method in ReadBufferWorker, and eligible for GC
|
||||
}
|
||||
}
|
||||
//outside the synchronized, since anyone receiving a wake-up from the latch must see safe-published results
|
||||
buffer.getLatch().countDown(); // wake up waiting threads (if any)
|
||||
}
|
||||
|
||||
/**
|
||||
* Similar to System.currentTimeMillis, except implemented with System.nanoTime().
|
||||
* System.currentTimeMillis can go backwards when system clock is changed (e.g., with NTP time synchronization),
|
||||
* making it unsuitable for measuring time intervals. nanotime is strictly monotonically increasing per CPU core.
|
||||
* Note: it is not monotonic across Sockets, and even within a CPU, its only the
|
||||
* more recent parts which share a clock across all cores.
|
||||
*
|
||||
* @return current time in milliseconds
|
||||
*/
|
||||
private long currentTimeMillis() {
|
||||
return System.nanoTime() / 1000 / 1000;
|
||||
}
|
||||
}
|
@ -1,70 +0,0 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweed.hdfs;
|
||||
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
class ReadBufferWorker implements Runnable {
|
||||
|
||||
protected static final CountDownLatch UNLEASH_WORKERS = new CountDownLatch(1);
|
||||
private int id;
|
||||
|
||||
ReadBufferWorker(final int id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
/**
|
||||
* return the ID of ReadBufferWorker.
|
||||
*/
|
||||
public int getId() {
|
||||
return this.id;
|
||||
}
|
||||
|
||||
/**
|
||||
* Waits until a buffer becomes available in ReadAheadQueue.
|
||||
* Once a buffer becomes available, reads the file specified in it and then posts results back to buffer manager.
|
||||
* Rinse and repeat. Forever.
|
||||
*/
|
||||
public void run() {
|
||||
try {
|
||||
UNLEASH_WORKERS.await();
|
||||
} catch (InterruptedException ex) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
ReadBufferManager bufferManager = ReadBufferManager.getBufferManager();
|
||||
ReadBuffer buffer;
|
||||
while (true) {
|
||||
try {
|
||||
buffer = bufferManager.getNextBlockToRead(); // blocks, until a buffer is available for this thread
|
||||
} catch (InterruptedException ex) {
|
||||
Thread.currentThread().interrupt();
|
||||
return;
|
||||
}
|
||||
if (buffer != null) {
|
||||
try {
|
||||
// do the actual read, from the file.
|
||||
int bytesRead = buffer.getStream().readRemote(buffer.getOffset(), buffer.getBuffer(), 0, buffer.getRequestedLength());
|
||||
bufferManager.doneReading(buffer, ReadBufferStatus.AVAILABLE, bytesRead); // post result back to ReadBufferManager
|
||||
} catch (Exception ex) {
|
||||
bufferManager.doneReading(buffer, ReadBufferStatus.READ_FAILED, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -18,12 +18,18 @@
|
||||
|
||||
package seaweed.hdfs;
|
||||
|
||||
/**
|
||||
* The ReadBufferStatus for Rest AbfsClient
|
||||
*/
|
||||
public enum ReadBufferStatus {
|
||||
NOT_AVAILABLE, // buffers sitting in readaheadqueue have this stats
|
||||
READING_IN_PROGRESS, // reading is in progress on this buffer. Buffer should be in inProgressList
|
||||
AVAILABLE, // data is available in buffer. It should be in completedList
|
||||
READ_FAILED // read completed, but failed.
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.DelegateToFileSystem;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
|
||||
public class SeaweedAbstractFileSystem extends DelegateToFileSystem {
|
||||
|
||||
SeaweedAbstractFileSystem(final URI uri, final Configuration conf)
|
||||
throws IOException, URISyntaxException {
|
||||
super(uri, new SeaweedFileSystem(), conf, "seaweedfs", false);
|
||||
}
|
||||
|
||||
}
|
@ -5,31 +5,29 @@ import org.apache.hadoop.fs.*;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import seaweedfs.client.FilerProto;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.net.URI;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
|
||||
|
||||
public class SeaweedFileSystem extends FileSystem {
|
||||
|
||||
public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
|
||||
public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host";
|
||||
public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port";
|
||||
public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
|
||||
public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size";
|
||||
public static final int FS_SEAWEED_DEFAULT_BUFFER_SIZE = 4 * 1024 * 1024;
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class);
|
||||
private static int BUFFER_SIZE = 16 * 1024 * 1024;
|
||||
|
||||
private URI uri;
|
||||
private Path workingDirectory = new Path("/");
|
||||
@ -60,12 +58,10 @@ public class SeaweedFileSystem extends FileSystem {
|
||||
port = (port == -1) ? FS_SEAWEED_DEFAULT_PORT : port;
|
||||
conf.setInt(FS_SEAWEED_FILER_PORT, port);
|
||||
|
||||
conf.setInt(IO_FILE_BUFFER_SIZE_KEY, BUFFER_SIZE);
|
||||
|
||||
setConf(conf);
|
||||
this.uri = uri;
|
||||
|
||||
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port);
|
||||
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, conf);
|
||||
|
||||
}
|
||||
|
||||
@ -77,8 +73,9 @@ public class SeaweedFileSystem extends FileSystem {
|
||||
path = qualify(path);
|
||||
|
||||
try {
|
||||
InputStream inputStream = seaweedFileSystemStore.openFileForRead(path, statistics, bufferSize);
|
||||
return new FSDataInputStream(inputStream);
|
||||
int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE);
|
||||
FSInputStream inputStream = seaweedFileSystemStore.openFileForRead(path, statistics);
|
||||
return new FSDataInputStream(new BufferedFSInputStream(inputStream, 4 * seaweedBufferSize));
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("open path: {} bufferSize:{}", path, bufferSize, ex);
|
||||
return null;
|
||||
@ -95,7 +92,8 @@ public class SeaweedFileSystem extends FileSystem {
|
||||
|
||||
try {
|
||||
String replicaPlacement = String.format("%03d", replication - 1);
|
||||
OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, bufferSize, replicaPlacement);
|
||||
int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE);
|
||||
OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, seaweedBufferSize, replicaPlacement);
|
||||
return new FSDataOutputStream(outputStream, statistics);
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("create path: {} bufferSize:{} blockSize:{}", path, bufferSize, blockSize, ex);
|
||||
@ -105,8 +103,9 @@ public class SeaweedFileSystem extends FileSystem {
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*
|
||||
* @throws FileNotFoundException if the parent directory is not present -or
|
||||
* is not a directory.
|
||||
* is not a directory.
|
||||
*/
|
||||
@Override
|
||||
public FSDataOutputStream createNonRecursive(Path path,
|
||||
@ -123,9 +122,10 @@ public class SeaweedFileSystem extends FileSystem {
|
||||
throw new FileAlreadyExistsException("Not a directory: " + parent);
|
||||
}
|
||||
}
|
||||
int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE);
|
||||
return create(path, permission,
|
||||
flags.contains(CreateFlag.OVERWRITE), bufferSize,
|
||||
replication, blockSize, progress);
|
||||
replication, seaweedBufferSize, progress);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -135,7 +135,8 @@ public class SeaweedFileSystem extends FileSystem {
|
||||
|
||||
path = qualify(path);
|
||||
try {
|
||||
OutputStream outputStream = seaweedFileSystemStore.createFile(path, false, null, bufferSize, "");
|
||||
int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE);
|
||||
OutputStream outputStream = seaweedFileSystemStore.createFile(path, false, null, seaweedBufferSize, "");
|
||||
return new FSDataOutputStream(outputStream, statistics);
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("append path: {} bufferSize:{}", path, bufferSize, ex);
|
||||
@ -144,7 +145,7 @@ public class SeaweedFileSystem extends FileSystem {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean rename(Path src, Path dst) {
|
||||
public boolean rename(Path src, Path dst) throws IOException {
|
||||
|
||||
LOG.debug("rename path: {} => {}", src, dst);
|
||||
|
||||
@ -155,12 +156,13 @@ public class SeaweedFileSystem extends FileSystem {
|
||||
if (src.equals(dst)) {
|
||||
return true;
|
||||
}
|
||||
FileStatus dstFileStatus = getFileStatus(dst);
|
||||
FilerProto.Entry entry = seaweedFileSystemStore.lookupEntry(dst);
|
||||
|
||||
String sourceFileName = src.getName();
|
||||
Path adjustedDst = dst;
|
||||
|
||||
if (dstFileStatus != null) {
|
||||
if (entry != null) {
|
||||
FileStatus dstFileStatus = getFileStatus(dst);
|
||||
String sourceFileName = src.getName();
|
||||
if (!dstFileStatus.isDirectory()) {
|
||||
return false;
|
||||
}
|
||||
@ -175,18 +177,20 @@ public class SeaweedFileSystem extends FileSystem {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean delete(Path path, boolean recursive) {
|
||||
public boolean delete(Path path, boolean recursive) throws IOException {
|
||||
|
||||
LOG.debug("delete path: {} recursive:{}", path, recursive);
|
||||
|
||||
path = qualify(path);
|
||||
|
||||
FileStatus fileStatus = getFileStatus(path);
|
||||
FilerProto.Entry entry = seaweedFileSystemStore.lookupEntry(path);
|
||||
|
||||
if (fileStatus == null) {
|
||||
if (entry == null) {
|
||||
return true;
|
||||
}
|
||||
|
||||
FileStatus fileStatus = getFileStatus(path);
|
||||
|
||||
return seaweedFileSystemStore.deleteEntries(path, fileStatus.isDirectory(), recursive);
|
||||
|
||||
}
|
||||
@ -222,9 +226,9 @@ public class SeaweedFileSystem extends FileSystem {
|
||||
|
||||
path = qualify(path);
|
||||
|
||||
FileStatus fileStatus = getFileStatus(path);
|
||||
FilerProto.Entry entry = seaweedFileSystemStore.lookupEntry(path);
|
||||
|
||||
if (fileStatus == null) {
|
||||
if (entry == null) {
|
||||
|
||||
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
|
||||
return seaweedFileSystemStore.createDirectory(path, currentUser,
|
||||
@ -233,6 +237,8 @@ public class SeaweedFileSystem extends FileSystem {
|
||||
|
||||
}
|
||||
|
||||
FileStatus fileStatus = getFileStatus(path);
|
||||
|
||||
if (fileStatus.isDirectory()) {
|
||||
return true;
|
||||
} else {
|
||||
@ -241,7 +247,7 @@ public class SeaweedFileSystem extends FileSystem {
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileStatus getFileStatus(Path path) {
|
||||
public FileStatus getFileStatus(Path path) throws IOException {
|
||||
|
||||
LOG.debug("getFileStatus path: {}", path);
|
||||
|
||||
@ -335,9 +341,7 @@ public class SeaweedFileSystem extends FileSystem {
|
||||
|
||||
@Override
|
||||
public void createSymlink(final Path target, final Path link,
|
||||
final boolean createParent) throws AccessControlException,
|
||||
FileAlreadyExistsException, FileNotFoundException,
|
||||
ParentNotDirectoryException, UnsupportedFileSystemException,
|
||||
final boolean createParent) throws
|
||||
IOException {
|
||||
// Supporting filesystems should override this method
|
||||
throw new UnsupportedOperationException(
|
||||
|
@ -1,5 +1,7 @@
|
||||
package seaweed.hdfs;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSInputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
@ -7,30 +9,31 @@ import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import seaweedfs.client.FilerClient;
|
||||
import seaweedfs.client.FilerGrpcClient;
|
||||
import seaweedfs.client.FilerProto;
|
||||
import seaweedfs.client.SeaweedRead;
|
||||
import seaweedfs.client.*;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import static seaweed.hdfs.SeaweedFileSystem.FS_SEAWEED_BUFFER_SIZE;
|
||||
import static seaweed.hdfs.SeaweedFileSystem.FS_SEAWEED_DEFAULT_BUFFER_SIZE;
|
||||
|
||||
public class SeaweedFileSystemStore {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystemStore.class);
|
||||
|
||||
private FilerGrpcClient filerGrpcClient;
|
||||
private FilerClient filerClient;
|
||||
private Configuration conf;
|
||||
|
||||
public SeaweedFileSystemStore(String host, int port) {
|
||||
public SeaweedFileSystemStore(String host, int port, Configuration conf) {
|
||||
int grpcPort = 10000 + port;
|
||||
filerGrpcClient = new FilerGrpcClient(host, grpcPort);
|
||||
filerClient = new FilerClient(filerGrpcClient);
|
||||
this.conf = conf;
|
||||
}
|
||||
|
||||
public static String getParentDirectory(Path path) {
|
||||
@ -61,7 +64,7 @@ public class SeaweedFileSystemStore {
|
||||
);
|
||||
}
|
||||
|
||||
public FileStatus[] listEntries(final Path path) {
|
||||
public FileStatus[] listEntries(final Path path) throws IOException {
|
||||
LOG.debug("listEntries path: {}", path);
|
||||
|
||||
FileStatus pathStatus = getFileStatus(path);
|
||||
@ -89,11 +92,11 @@ public class SeaweedFileSystemStore {
|
||||
|
||||
}
|
||||
|
||||
public FileStatus getFileStatus(final Path path) {
|
||||
public FileStatus getFileStatus(final Path path) throws IOException {
|
||||
|
||||
FilerProto.Entry entry = lookupEntry(path);
|
||||
if (entry == null) {
|
||||
return null;
|
||||
throw new FileNotFoundException("File does not exist: " + path);
|
||||
}
|
||||
LOG.debug("doGetFileStatus path:{} entry:{}", path, entry);
|
||||
|
||||
@ -123,10 +126,10 @@ public class SeaweedFileSystemStore {
|
||||
|
||||
private FileStatus doGetFileStatus(Path path, FilerProto.Entry entry) {
|
||||
FilerProto.FuseAttributes attributes = entry.getAttributes();
|
||||
long length = SeaweedRead.totalSize(entry.getChunksList());
|
||||
long length = SeaweedRead.fileSize(entry);
|
||||
boolean isDir = entry.getIsDirectory();
|
||||
int block_replication = 1;
|
||||
int blocksize = 512;
|
||||
int blocksize = this.conf.getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE);
|
||||
long modification_time = attributes.getMtime() * 1000; // milliseconds
|
||||
long access_time = 0;
|
||||
FsPermission permission = FsPermission.createImmutable((short) attributes.getFileMode());
|
||||
@ -136,7 +139,7 @@ public class SeaweedFileSystemStore {
|
||||
modification_time, access_time, permission, owner, group, null, path);
|
||||
}
|
||||
|
||||
private FilerProto.Entry lookupEntry(Path path) {
|
||||
public FilerProto.Entry lookupEntry(Path path) {
|
||||
|
||||
return filerClient.lookupEntry(getParentDirectory(path), path.getName());
|
||||
|
||||
@ -184,7 +187,7 @@ public class SeaweedFileSystemStore {
|
||||
entry.mergeFrom(existingEntry);
|
||||
entry.getAttributesBuilder().setMtime(now);
|
||||
LOG.debug("createFile merged entry path:{} entry:{} from:{}", path, entry, existingEntry);
|
||||
writePosition = SeaweedRead.totalSize(existingEntry.getChunksList());
|
||||
writePosition = SeaweedRead.fileSize(existingEntry);
|
||||
replication = existingEntry.getAttributes().getReplication();
|
||||
}
|
||||
}
|
||||
@ -201,18 +204,17 @@ public class SeaweedFileSystemStore {
|
||||
.clearGroupName()
|
||||
.addAllGroupName(Arrays.asList(userGroupInformation.getGroupNames()))
|
||||
);
|
||||
SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry);
|
||||
}
|
||||
|
||||
return new SeaweedOutputStream(filerGrpcClient, path, entry, writePosition, bufferSize, replication);
|
||||
|
||||
}
|
||||
|
||||
public InputStream openFileForRead(final Path path, FileSystem.Statistics statistics,
|
||||
int bufferSize) throws IOException {
|
||||
public FSInputStream openFileForRead(final Path path, FileSystem.Statistics statistics) throws IOException {
|
||||
|
||||
LOG.debug("openFileForRead path:{} bufferSize:{}", path, bufferSize);
|
||||
LOG.debug("openFileForRead path:{}", path);
|
||||
|
||||
int readAheadQueueDepth = 2;
|
||||
FilerProto.Entry entry = lookupEntry(path);
|
||||
|
||||
if (entry == null) {
|
||||
@ -222,9 +224,7 @@ public class SeaweedFileSystemStore {
|
||||
return new SeaweedInputStream(filerGrpcClient,
|
||||
statistics,
|
||||
path.toUri().getPath(),
|
||||
entry,
|
||||
bufferSize,
|
||||
readAheadQueueDepth);
|
||||
entry);
|
||||
}
|
||||
|
||||
public void setOwner(Path path, String owner, String group) {
|
||||
|
@ -2,7 +2,6 @@ package seaweed.hdfs;
|
||||
|
||||
// based on org.apache.hadoop.fs.azurebfs.services.AbfsInputStream
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.FSExceptionMessages;
|
||||
import org.apache.hadoop.fs.FSInputStream;
|
||||
import org.apache.hadoop.fs.FileSystem.Statistics;
|
||||
@ -26,36 +25,23 @@ public class SeaweedInputStream extends FSInputStream {
|
||||
private final FilerProto.Entry entry;
|
||||
private final List<SeaweedRead.VisibleInterval> visibleIntervalList;
|
||||
private final long contentLength;
|
||||
private final int bufferSize; // default buffer size
|
||||
private final int readAheadQueueDepth; // initialized in constructor
|
||||
private final boolean readAheadEnabled; // whether enable readAhead;
|
||||
|
||||
private byte[] buffer = null; // will be initialized on first use
|
||||
private long position = 0; // cursor of the file
|
||||
|
||||
private long fCursor = 0; // cursor of buffer within file - offset of next byte to read from remote server
|
||||
private long fCursorAfterLastRead = -1;
|
||||
private int bCursor = 0; // cursor of read within buffer - offset of next byte to be returned from buffer
|
||||
private int limit = 0; // offset of next byte to be read into buffer from service (i.e., upper marker+1
|
||||
// of valid bytes in buffer)
|
||||
private boolean closed = false;
|
||||
|
||||
public SeaweedInputStream(
|
||||
final FilerGrpcClient filerGrpcClient,
|
||||
final Statistics statistics,
|
||||
final String path,
|
||||
final FilerProto.Entry entry,
|
||||
final int bufferSize,
|
||||
final int readAheadQueueDepth) {
|
||||
final FilerGrpcClient filerGrpcClient,
|
||||
final Statistics statistics,
|
||||
final String path,
|
||||
final FilerProto.Entry entry) throws IOException {
|
||||
this.filerGrpcClient = filerGrpcClient;
|
||||
this.statistics = statistics;
|
||||
this.path = path;
|
||||
this.entry = entry;
|
||||
this.contentLength = SeaweedRead.totalSize(entry.getChunksList());
|
||||
this.bufferSize = bufferSize;
|
||||
this.readAheadQueueDepth = (readAheadQueueDepth >= 0) ? readAheadQueueDepth : Runtime.getRuntime().availableProcessors();
|
||||
this.readAheadEnabled = true;
|
||||
this.contentLength = SeaweedRead.fileSize(entry);
|
||||
|
||||
this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(entry.getChunksList());
|
||||
this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(filerGrpcClient, entry.getChunksList());
|
||||
|
||||
LOG.debug("new path:{} entry:{} visibleIntervalList:{}", path, entry, visibleIntervalList);
|
||||
|
||||
@ -78,122 +64,7 @@ public class SeaweedInputStream extends FSInputStream {
|
||||
|
||||
@Override
|
||||
public synchronized int read(final byte[] b, final int off, final int len) throws IOException {
|
||||
int currentOff = off;
|
||||
int currentLen = len;
|
||||
int lastReadBytes;
|
||||
int totalReadBytes = 0;
|
||||
do {
|
||||
lastReadBytes = readOneBlock(b, currentOff, currentLen);
|
||||
if (lastReadBytes > 0) {
|
||||
currentOff += lastReadBytes;
|
||||
currentLen -= lastReadBytes;
|
||||
totalReadBytes += lastReadBytes;
|
||||
}
|
||||
if (currentLen <= 0 || currentLen > b.length - currentOff) {
|
||||
break;
|
||||
}
|
||||
} while (lastReadBytes > 0);
|
||||
return totalReadBytes > 0 ? totalReadBytes : lastReadBytes;
|
||||
}
|
||||
|
||||
private int readOneBlock(final byte[] b, final int off, final int len) throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
}
|
||||
|
||||
Preconditions.checkNotNull(b);
|
||||
|
||||
if (len == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (this.available() == 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (off < 0 || len < 0 || len > b.length - off) {
|
||||
throw new IndexOutOfBoundsException();
|
||||
}
|
||||
|
||||
//If buffer is empty, then fill the buffer.
|
||||
if (bCursor == limit) {
|
||||
//If EOF, then return -1
|
||||
if (fCursor >= contentLength) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
long bytesRead = 0;
|
||||
//reset buffer to initial state - i.e., throw away existing data
|
||||
bCursor = 0;
|
||||
limit = 0;
|
||||
if (buffer == null) {
|
||||
buffer = new byte[bufferSize];
|
||||
}
|
||||
|
||||
// Enable readAhead when reading sequentially
|
||||
if (-1 == fCursorAfterLastRead || fCursorAfterLastRead == fCursor || b.length >= bufferSize) {
|
||||
bytesRead = readInternal(fCursor, buffer, 0, bufferSize, false);
|
||||
} else {
|
||||
bytesRead = readInternal(fCursor, buffer, 0, b.length, true);
|
||||
}
|
||||
|
||||
if (bytesRead == -1) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
limit += bytesRead;
|
||||
fCursor += bytesRead;
|
||||
fCursorAfterLastRead = fCursor;
|
||||
}
|
||||
|
||||
//If there is anything in the buffer, then return lesser of (requested bytes) and (bytes in buffer)
|
||||
//(bytes returned may be less than requested)
|
||||
int bytesRemaining = limit - bCursor;
|
||||
int bytesToRead = Math.min(len, bytesRemaining);
|
||||
System.arraycopy(buffer, bCursor, b, off, bytesToRead);
|
||||
bCursor += bytesToRead;
|
||||
if (statistics != null) {
|
||||
statistics.incrementBytesRead(bytesToRead);
|
||||
}
|
||||
return bytesToRead;
|
||||
}
|
||||
|
||||
|
||||
private int readInternal(final long position, final byte[] b, final int offset, final int length,
|
||||
final boolean bypassReadAhead) throws IOException {
|
||||
if (readAheadEnabled && !bypassReadAhead) {
|
||||
// try reading from read-ahead
|
||||
if (offset != 0) {
|
||||
throw new IllegalArgumentException("readahead buffers cannot have non-zero buffer offsets");
|
||||
}
|
||||
int receivedBytes;
|
||||
|
||||
// queue read-aheads
|
||||
int numReadAheads = this.readAheadQueueDepth;
|
||||
long nextSize;
|
||||
long nextOffset = position;
|
||||
while (numReadAheads > 0 && nextOffset < contentLength) {
|
||||
nextSize = Math.min((long) bufferSize, contentLength - nextOffset);
|
||||
ReadBufferManager.getBufferManager().queueReadAhead(this, nextOffset, (int) nextSize);
|
||||
nextOffset = nextOffset + nextSize;
|
||||
numReadAheads--;
|
||||
}
|
||||
|
||||
// try reading from buffers first
|
||||
receivedBytes = ReadBufferManager.getBufferManager().getBlock(this, position, length, b);
|
||||
if (receivedBytes > 0) {
|
||||
return receivedBytes;
|
||||
}
|
||||
|
||||
// got nothing from read-ahead, do our own read now
|
||||
receivedBytes = readRemote(position, b, offset, length);
|
||||
return receivedBytes;
|
||||
} else {
|
||||
return readRemote(position, b, offset, length);
|
||||
}
|
||||
}
|
||||
|
||||
int readRemote(long position, byte[] b, int offset, int length) throws IOException {
|
||||
if (position < 0) {
|
||||
throw new IllegalArgumentException("attempting to read from negative offset");
|
||||
}
|
||||
@ -203,21 +74,30 @@ public class SeaweedInputStream extends FSInputStream {
|
||||
if (b == null) {
|
||||
throw new IllegalArgumentException("null byte array passed in to read() method");
|
||||
}
|
||||
if (offset >= b.length) {
|
||||
if (off >= b.length) {
|
||||
throw new IllegalArgumentException("offset greater than length of array");
|
||||
}
|
||||
if (length < 0) {
|
||||
if (len < 0) {
|
||||
throw new IllegalArgumentException("requested read length is less than zero");
|
||||
}
|
||||
if (length > (b.length - offset)) {
|
||||
if (len > (b.length - off)) {
|
||||
throw new IllegalArgumentException("requested read length is more than will fit after requested offset in buffer");
|
||||
}
|
||||
|
||||
long bytesRead = SeaweedRead.read(filerGrpcClient, visibleIntervalList, position, b, offset, length);
|
||||
long bytesRead = SeaweedRead.read(this.filerGrpcClient, this.visibleIntervalList, this.position, b, off, len, SeaweedRead.fileSize(entry));
|
||||
if (bytesRead > Integer.MAX_VALUE) {
|
||||
throw new IOException("Unexpected Content-Length");
|
||||
}
|
||||
|
||||
if (bytesRead > 0) {
|
||||
this.position += bytesRead;
|
||||
if (statistics != null) {
|
||||
statistics.incrementBytesRead(bytesRead);
|
||||
}
|
||||
}
|
||||
|
||||
return (int) bytesRead;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@ -239,17 +119,8 @@ public class SeaweedInputStream extends FSInputStream {
|
||||
throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
|
||||
}
|
||||
|
||||
if (n >= fCursor - limit && n <= fCursor) { // within buffer
|
||||
bCursor = (int) (n - (fCursor - limit));
|
||||
return;
|
||||
}
|
||||
this.position = n;
|
||||
|
||||
// next read will read from here
|
||||
fCursor = n;
|
||||
|
||||
//invalidate buffer
|
||||
limit = 0;
|
||||
bCursor = 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -257,20 +128,19 @@ public class SeaweedInputStream extends FSInputStream {
|
||||
if (closed) {
|
||||
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
}
|
||||
long currentPos = getPos();
|
||||
if (currentPos == contentLength) {
|
||||
if (this.position == contentLength) {
|
||||
if (n > 0) {
|
||||
throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
|
||||
}
|
||||
}
|
||||
long newPos = currentPos + n;
|
||||
long newPos = this.position + n;
|
||||
if (newPos < 0) {
|
||||
newPos = 0;
|
||||
n = newPos - currentPos;
|
||||
n = newPos - this.position;
|
||||
}
|
||||
if (newPos > contentLength) {
|
||||
newPos = contentLength;
|
||||
n = newPos - currentPos;
|
||||
n = newPos - this.position;
|
||||
}
|
||||
seek(newPos);
|
||||
return n;
|
||||
@ -289,11 +159,11 @@ public class SeaweedInputStream extends FSInputStream {
|
||||
public synchronized int available() throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException(
|
||||
FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
}
|
||||
final long remaining = this.contentLength - this.getPos();
|
||||
return remaining <= Integer.MAX_VALUE
|
||||
? (int) remaining : Integer.MAX_VALUE;
|
||||
? (int) remaining : Integer.MAX_VALUE;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -321,7 +191,7 @@ public class SeaweedInputStream extends FSInputStream {
|
||||
if (closed) {
|
||||
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
}
|
||||
return fCursor - limit + bCursor;
|
||||
return position;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -338,7 +208,6 @@ public class SeaweedInputStream extends FSInputStream {
|
||||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
closed = true;
|
||||
buffer = null; // de-reference the buffer so it can be GC'ed sooner
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -9,6 +9,7 @@ import org.apache.hadoop.fs.StreamCapabilities;
|
||||
import org.apache.hadoop.fs.Syncable;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import seaweedfs.client.ByteBufferPool;
|
||||
import seaweedfs.client.FilerGrpcClient;
|
||||
import seaweedfs.client.FilerProto;
|
||||
import seaweedfs.client.SeaweedWrite;
|
||||
@ -16,14 +17,9 @@ import seaweedfs.client.SeaweedWrite;
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ConcurrentLinkedDeque;
|
||||
import java.util.concurrent.ExecutorCompletionService;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.*;
|
||||
|
||||
import static seaweed.hdfs.SeaweedFileSystemStore.getParentDirectory;
|
||||
|
||||
@ -37,16 +33,16 @@ public class SeaweedOutputStream extends OutputStream implements Syncable, Strea
|
||||
private final int maxConcurrentRequestCount;
|
||||
private final ThreadPoolExecutor threadExecutor;
|
||||
private final ExecutorCompletionService<Void> completionService;
|
||||
private FilerProto.Entry.Builder entry;
|
||||
private final FilerProto.Entry.Builder entry;
|
||||
private final boolean supportFlush = false; // true;
|
||||
private final ConcurrentLinkedDeque<WriteOperation> writeOperations;
|
||||
private long position;
|
||||
private boolean closed;
|
||||
private boolean supportFlush = true;
|
||||
private volatile IOException lastError;
|
||||
private long lastFlushOffset;
|
||||
private long lastTotalAppendOffset = 0;
|
||||
private byte[] buffer;
|
||||
private int bufferIndex;
|
||||
private ConcurrentLinkedDeque<WriteOperation> writeOperations;
|
||||
private ByteBuffer buffer;
|
||||
private long outputIndex;
|
||||
private String replication = "000";
|
||||
|
||||
public SeaweedOutputStream(FilerGrpcClient filerGrpcClient, final Path path, FilerProto.Entry.Builder entry,
|
||||
@ -59,18 +55,18 @@ public class SeaweedOutputStream extends OutputStream implements Syncable, Strea
|
||||
this.lastError = null;
|
||||
this.lastFlushOffset = 0;
|
||||
this.bufferSize = bufferSize;
|
||||
this.buffer = new byte[bufferSize];
|
||||
this.bufferIndex = 0;
|
||||
this.buffer = ByteBufferPool.request(bufferSize);
|
||||
this.outputIndex = 0;
|
||||
this.writeOperations = new ConcurrentLinkedDeque<>();
|
||||
|
||||
this.maxConcurrentRequestCount = 4 * Runtime.getRuntime().availableProcessors();
|
||||
this.maxConcurrentRequestCount = Runtime.getRuntime().availableProcessors();
|
||||
|
||||
this.threadExecutor
|
||||
= new ThreadPoolExecutor(maxConcurrentRequestCount,
|
||||
maxConcurrentRequestCount,
|
||||
10L,
|
||||
TimeUnit.SECONDS,
|
||||
new LinkedBlockingQueue<Runnable>());
|
||||
= new ThreadPoolExecutor(maxConcurrentRequestCount,
|
||||
maxConcurrentRequestCount,
|
||||
120L,
|
||||
TimeUnit.SECONDS,
|
||||
new LinkedBlockingQueue<Runnable>());
|
||||
this.completionService = new ExecutorCompletionService<>(this.threadExecutor);
|
||||
|
||||
this.entry = entry;
|
||||
@ -78,9 +74,6 @@ public class SeaweedOutputStream extends OutputStream implements Syncable, Strea
|
||||
}
|
||||
|
||||
private synchronized void flushWrittenBytesToServiceInternal(final long offset) throws IOException {
|
||||
|
||||
LOG.debug("SeaweedWrite.writeMeta path: {} entry:{}", path, entry);
|
||||
|
||||
try {
|
||||
SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry);
|
||||
} catch (Exception ex) {
|
||||
@ -96,7 +89,7 @@ public class SeaweedOutputStream extends OutputStream implements Syncable, Strea
|
||||
|
||||
@Override
|
||||
public synchronized void write(final byte[] data, final int off, final int length)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
maybeThrowLastError();
|
||||
|
||||
Preconditions.checkArgument(data != null, "null data");
|
||||
@ -105,25 +98,29 @@ public class SeaweedOutputStream extends OutputStream implements Syncable, Strea
|
||||
throw new IndexOutOfBoundsException();
|
||||
}
|
||||
|
||||
// System.out.println(path + " write [" + (outputIndex + off) + "," + ((outputIndex + off) + length) + ")");
|
||||
|
||||
int currentOffset = off;
|
||||
int writableBytes = bufferSize - bufferIndex;
|
||||
int writableBytes = bufferSize - buffer.position();
|
||||
int numberOfBytesToWrite = length;
|
||||
|
||||
while (numberOfBytesToWrite > 0) {
|
||||
if (writableBytes <= numberOfBytesToWrite) {
|
||||
System.arraycopy(data, currentOffset, buffer, bufferIndex, writableBytes);
|
||||
bufferIndex += writableBytes;
|
||||
writeCurrentBufferToService();
|
||||
currentOffset += writableBytes;
|
||||
numberOfBytesToWrite = numberOfBytesToWrite - writableBytes;
|
||||
} else {
|
||||
System.arraycopy(data, currentOffset, buffer, bufferIndex, numberOfBytesToWrite);
|
||||
bufferIndex += numberOfBytesToWrite;
|
||||
numberOfBytesToWrite = 0;
|
||||
|
||||
if (numberOfBytesToWrite < writableBytes) {
|
||||
buffer.put(data, currentOffset, numberOfBytesToWrite);
|
||||
outputIndex += numberOfBytesToWrite;
|
||||
break;
|
||||
}
|
||||
|
||||
writableBytes = bufferSize - bufferIndex;
|
||||
// System.out.println(path + " [" + (outputIndex + currentOffset) + "," + ((outputIndex + currentOffset) + writableBytes) + ") " + buffer.capacity());
|
||||
buffer.put(data, currentOffset, writableBytes);
|
||||
outputIndex += writableBytes;
|
||||
currentOffset += writableBytes;
|
||||
writeCurrentBufferToService();
|
||||
numberOfBytesToWrite = numberOfBytesToWrite - writableBytes;
|
||||
writableBytes = bufferSize - buffer.position();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@ -202,8 +199,9 @@ public class SeaweedOutputStream extends OutputStream implements Syncable, Strea
|
||||
threadExecutor.shutdown();
|
||||
} finally {
|
||||
lastError = new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
ByteBufferPool.release(buffer);
|
||||
buffer = null;
|
||||
bufferIndex = 0;
|
||||
outputIndex = 0;
|
||||
closed = true;
|
||||
writeOperations.clear();
|
||||
if (!threadExecutor.isShutdown()) {
|
||||
@ -213,35 +211,39 @@ public class SeaweedOutputStream extends OutputStream implements Syncable, Strea
|
||||
}
|
||||
|
||||
private synchronized void writeCurrentBufferToService() throws IOException {
|
||||
if (bufferIndex == 0) {
|
||||
if (buffer.position() == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
final byte[] bytes = buffer;
|
||||
final int bytesLength = bufferIndex;
|
||||
position += submitWriteBufferToService(buffer, position);
|
||||
|
||||
buffer = new byte[bufferSize];
|
||||
bufferIndex = 0;
|
||||
final long offset = position;
|
||||
position += bytesLength;
|
||||
buffer = ByteBufferPool.request(bufferSize);
|
||||
|
||||
if (threadExecutor.getQueue().size() >= maxConcurrentRequestCount * 2) {
|
||||
}
|
||||
|
||||
private synchronized int submitWriteBufferToService(final ByteBuffer bufferToWrite, final long writePosition) throws IOException {
|
||||
|
||||
bufferToWrite.flip();
|
||||
int bytesLength = bufferToWrite.limit() - bufferToWrite.position();
|
||||
|
||||
if (threadExecutor.getQueue().size() >= maxConcurrentRequestCount) {
|
||||
waitForTaskToComplete();
|
||||
}
|
||||
|
||||
final Future<Void> job = completionService.submit(new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
// originally: client.append(path, offset, bytes, 0, bytesLength);
|
||||
SeaweedWrite.writeData(entry, replication, filerGrpcClient, offset, bytes, 0, bytesLength);
|
||||
return null;
|
||||
}
|
||||
final Future<Void> job = completionService.submit(() -> {
|
||||
// System.out.println(path + " is going to save [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
|
||||
SeaweedWrite.writeData(entry, replication, filerGrpcClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit(), path.toUri().getPath());
|
||||
// System.out.println(path + " saved [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
|
||||
ByteBufferPool.release(bufferToWrite);
|
||||
return null;
|
||||
});
|
||||
|
||||
writeOperations.add(new WriteOperation(job, offset, bytesLength));
|
||||
writeOperations.add(new WriteOperation(job, writePosition, bytesLength));
|
||||
|
||||
// Try to shrink the queue
|
||||
shrinkWriteOperationQueue();
|
||||
|
||||
return bytesLength;
|
||||
|
||||
}
|
||||
|
||||
private void waitForTaskToComplete() throws IOException {
|
||||
|
@ -28,7 +28,7 @@
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>3.8.1</version>
|
||||
<version>4.13.1</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
1856
other/metrics/grafana_seaweedfs.json
Normal file
1856
other/metrics/grafana_seaweedfs.json
Normal file
File diff suppressed because it is too large
Load Diff
2362
other/metrics/grafana_seaweedfs_k8s.json
Normal file
2362
other/metrics/grafana_seaweedfs_k8s.json
Normal file
File diff suppressed because it is too large
Load Diff
58
test/random_access/pom.xml
Normal file
58
test/random_access/pom.xml
Normal file
@ -0,0 +1,58 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.seaweedfs.test</groupId>
|
||||
<artifactId>random_access</artifactId>
|
||||
<packaging>jar</packaging>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
|
||||
<properties>
|
||||
<guava.version>28.0-jre</guava.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
<version>${guava.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-api</artifactId>
|
||||
<version>1.7.25</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.13.1</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.esotericsoftware.kryo</groupId>
|
||||
<artifactId>kryo</artifactId>
|
||||
<version>2.24.0</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<extensions>
|
||||
<extension>
|
||||
<groupId>kr.motd.maven</groupId>
|
||||
<artifactId>os-maven-plugin</artifactId>
|
||||
<version>1.6.2</version>
|
||||
</extension>
|
||||
</extensions>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<configuration>
|
||||
<source>8</source>
|
||||
<target>8</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
</project>
|
@ -0,0 +1,753 @@
|
||||
/*
|
||||
* Copyright 2010 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package seaweedfs.client.btree;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import seaweedfs.client.btree.serialize.Serializer;
|
||||
import seaweedfs.client.btree.serialize.kryo.KryoBackedDecoder;
|
||||
import seaweedfs.client.btree.serialize.kryo.KryoBackedEncoder;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
|
||||
// todo - stream serialised value to file
|
||||
// todo - handle hash collisions (properly, this time)
|
||||
// todo - don't store null links to child blocks in leaf index blocks
|
||||
// todo - align block boundaries
|
||||
// todo - thread safety control
|
||||
// todo - merge small values into a single data block
|
||||
// todo - discard when file corrupt
|
||||
// todo - include data directly in index entry when serializer can guarantee small fixed sized data
|
||||
// todo - free list leaks disk space
|
||||
// todo - merge adjacent free blocks
|
||||
// todo - use more efficient lookup for free block with nearest size
|
||||
@SuppressWarnings("unchecked")
|
||||
public class BTreePersistentIndexedCache<K, V> {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(BTreePersistentIndexedCache.class);
|
||||
private final File cacheFile;
|
||||
private final KeyHasher<K> keyHasher;
|
||||
private final Serializer<V> serializer;
|
||||
private final short maxChildIndexEntries;
|
||||
private final int minIndexChildNodes;
|
||||
private final StateCheckBlockStore store;
|
||||
private HeaderBlock header;
|
||||
|
||||
public BTreePersistentIndexedCache(File cacheFile, Serializer<K> keySerializer, Serializer<V> valueSerializer) {
|
||||
this(cacheFile, keySerializer, valueSerializer, (short) 512, 512);
|
||||
}
|
||||
|
||||
public BTreePersistentIndexedCache(File cacheFile, Serializer<K> keySerializer, Serializer<V> valueSerializer,
|
||||
short maxChildIndexEntries, int maxFreeListEntries) {
|
||||
this.cacheFile = cacheFile;
|
||||
this.keyHasher = new KeyHasher<K>(keySerializer);
|
||||
this.serializer = valueSerializer;
|
||||
this.maxChildIndexEntries = maxChildIndexEntries;
|
||||
this.minIndexChildNodes = maxChildIndexEntries / 2;
|
||||
BlockStore cachingStore = new CachingBlockStore(new FileBackedBlockStore(cacheFile), ImmutableSet.of(IndexBlock.class, FreeListBlockStore.FreeListBlock.class));
|
||||
this.store = new StateCheckBlockStore(new FreeListBlockStore(cachingStore, maxFreeListEntries));
|
||||
try {
|
||||
open();
|
||||
} catch (Exception e) {
|
||||
throw new UncheckedIOException(String.format("Could not open %s.", this), e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "cache " + cacheFile.getName() + " (" + cacheFile + ")";
|
||||
}
|
||||
|
||||
private void open() throws Exception {
|
||||
LOGGER.debug("Opening {}", this);
|
||||
try {
|
||||
doOpen();
|
||||
} catch (CorruptedCacheException e) {
|
||||
rebuild();
|
||||
}
|
||||
}
|
||||
|
||||
private void doOpen() throws Exception {
|
||||
BlockStore.Factory factory = new BlockStore.Factory() {
|
||||
@Override
|
||||
public Object create(Class<? extends BlockPayload> type) {
|
||||
if (type == HeaderBlock.class) {
|
||||
return new HeaderBlock();
|
||||
}
|
||||
if (type == IndexBlock.class) {
|
||||
return new IndexBlock();
|
||||
}
|
||||
if (type == DataBlock.class) {
|
||||
return new DataBlock();
|
||||
}
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
};
|
||||
Runnable initAction = new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
header = new HeaderBlock();
|
||||
store.write(header);
|
||||
header.index.newRoot();
|
||||
store.flush();
|
||||
}
|
||||
};
|
||||
|
||||
store.open(initAction, factory);
|
||||
header = store.readFirst(HeaderBlock.class);
|
||||
}
|
||||
|
||||
public V get(K key) {
|
||||
try {
|
||||
try {
|
||||
DataBlock block = header.getRoot().get(key);
|
||||
if (block != null) {
|
||||
return block.getValue();
|
||||
}
|
||||
return null;
|
||||
} catch (CorruptedCacheException e) {
|
||||
rebuild();
|
||||
return null;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new UncheckedIOException(String.format("Could not read entry '%s' from %s.", key, this), e);
|
||||
}
|
||||
}
|
||||
|
||||
public void put(K key, V value) {
|
||||
try {
|
||||
long hashCode = keyHasher.getHashCode(key);
|
||||
Lookup lookup = header.getRoot().find(hashCode);
|
||||
DataBlock newBlock = null;
|
||||
if (lookup.entry != null) {
|
||||
DataBlock block = store.read(lookup.entry.dataBlock, DataBlock.class);
|
||||
DataBlockUpdateResult updateResult = block.useNewValue(value);
|
||||
if (updateResult.isFailed()) {
|
||||
store.remove(block);
|
||||
newBlock = new DataBlock(value, updateResult.getSerializedValue());
|
||||
}
|
||||
} else {
|
||||
newBlock = new DataBlock(value);
|
||||
}
|
||||
if (newBlock != null) {
|
||||
store.write(newBlock);
|
||||
lookup.indexBlock.put(hashCode, newBlock.getPos());
|
||||
}
|
||||
store.flush();
|
||||
} catch (Exception e) {
|
||||
throw new UncheckedIOException(String.format("Could not add entry '%s' to %s.", key, this), e);
|
||||
}
|
||||
}
|
||||
|
||||
public void remove(K key) {
|
||||
try {
|
||||
Lookup lookup = header.getRoot().find(key);
|
||||
if (lookup.entry == null) {
|
||||
return;
|
||||
}
|
||||
lookup.indexBlock.remove(lookup.entry);
|
||||
DataBlock block = store.read(lookup.entry.dataBlock, DataBlock.class);
|
||||
store.remove(block);
|
||||
store.flush();
|
||||
} catch (Exception e) {
|
||||
throw new UncheckedIOException(String.format("Could not remove entry '%s' from %s.", key, this), e);
|
||||
}
|
||||
}
|
||||
|
||||
private IndexBlock load(BlockPointer pos, IndexRoot root, IndexBlock parent, int index) {
|
||||
IndexBlock block = store.read(pos, IndexBlock.class);
|
||||
block.root = root;
|
||||
block.parent = parent;
|
||||
block.parentEntryIndex = index;
|
||||
return block;
|
||||
}
|
||||
|
||||
public void reset() {
|
||||
close();
|
||||
try {
|
||||
open();
|
||||
} catch (Exception e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public void close() {
|
||||
LOGGER.debug("Closing {}", this);
|
||||
try {
|
||||
store.close();
|
||||
} catch (Exception e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isOpen() {
|
||||
return store.isOpen();
|
||||
}
|
||||
|
||||
private void rebuild() {
|
||||
LOGGER.warn("{} is corrupt. Discarding.", this);
|
||||
try {
|
||||
clear();
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("{} couldn't be rebuilt. Closing.", this);
|
||||
close();
|
||||
}
|
||||
}
|
||||
|
||||
public void verify() {
|
||||
try {
|
||||
doVerify();
|
||||
} catch (Exception e) {
|
||||
throw new UncheckedIOException(String.format("Some problems were found when checking the integrity of %s.",
|
||||
this), e);
|
||||
}
|
||||
}
|
||||
|
||||
private void doVerify() throws Exception {
|
||||
List<BlockPayload> blocks = new ArrayList<BlockPayload>();
|
||||
|
||||
HeaderBlock header = store.readFirst(HeaderBlock.class);
|
||||
blocks.add(header);
|
||||
verifyTree(header.getRoot(), "", blocks, Long.MAX_VALUE, true);
|
||||
|
||||
Collections.sort(blocks, new Comparator<BlockPayload>() {
|
||||
@Override
|
||||
public int compare(BlockPayload block, BlockPayload block1) {
|
||||
return block.getPos().compareTo(block1.getPos());
|
||||
}
|
||||
});
|
||||
|
||||
for (int i = 0; i < blocks.size() - 1; i++) {
|
||||
Block b1 = blocks.get(i).getBlock();
|
||||
Block b2 = blocks.get(i + 1).getBlock();
|
||||
if (b1.getPos().getPos() + b1.getSize() > b2.getPos().getPos()) {
|
||||
throw new IOException(String.format("%s overlaps with %s", b1, b2));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void verifyTree(IndexBlock current, String prefix, Collection<BlockPayload> blocks, long maxValue,
|
||||
boolean loadData) throws Exception {
|
||||
blocks.add(current);
|
||||
|
||||
if (!prefix.equals("") && current.entries.size() < maxChildIndexEntries / 2) {
|
||||
throw new IOException(String.format("Too few entries found in %s", current));
|
||||
}
|
||||
if (current.entries.size() > maxChildIndexEntries) {
|
||||
throw new IOException(String.format("Too many entries found in %s", current));
|
||||
}
|
||||
|
||||
boolean isLeaf = current.entries.size() == 0 || current.entries.get(0).childIndexBlock.isNull();
|
||||
if (isLeaf ^ current.tailPos.isNull()) {
|
||||
throw new IOException(String.format("Mismatched leaf/tail-node in %s", current));
|
||||
}
|
||||
|
||||
long min = Long.MIN_VALUE;
|
||||
for (IndexEntry entry : current.entries) {
|
||||
if (isLeaf ^ entry.childIndexBlock.isNull()) {
|
||||
throw new IOException(String.format("Mismatched leaf/non-leaf entry in %s", current));
|
||||
}
|
||||
if (entry.hashCode >= maxValue || entry.hashCode <= min) {
|
||||
throw new IOException(String.format("Out-of-order key in %s", current));
|
||||
}
|
||||
min = entry.hashCode;
|
||||
if (!entry.childIndexBlock.isNull()) {
|
||||
IndexBlock child = store.read(entry.childIndexBlock, IndexBlock.class);
|
||||
verifyTree(child, " " + prefix, blocks, entry.hashCode, loadData);
|
||||
}
|
||||
if (loadData) {
|
||||
DataBlock block = store.read(entry.dataBlock, DataBlock.class);
|
||||
blocks.add(block);
|
||||
}
|
||||
}
|
||||
if (!current.tailPos.isNull()) {
|
||||
IndexBlock tail = store.read(current.tailPos, IndexBlock.class);
|
||||
verifyTree(tail, " " + prefix, blocks, maxValue, loadData);
|
||||
}
|
||||
}
|
||||
|
||||
public void clear() {
|
||||
store.clear();
|
||||
close();
|
||||
try {
|
||||
doOpen();
|
||||
} catch (Exception e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private class IndexRoot {
|
||||
private BlockPointer rootPos = BlockPointer.start();
|
||||
private HeaderBlock owner;
|
||||
|
||||
private IndexRoot(HeaderBlock owner) {
|
||||
this.owner = owner;
|
||||
}
|
||||
|
||||
public void setRootPos(BlockPointer rootPos) {
|
||||
this.rootPos = rootPos;
|
||||
store.write(owner);
|
||||
}
|
||||
|
||||
public IndexBlock getRoot() {
|
||||
return load(rootPos, this, null, 0);
|
||||
}
|
||||
|
||||
public IndexBlock newRoot() {
|
||||
IndexBlock block = new IndexBlock();
|
||||
store.write(block);
|
||||
setRootPos(block.getPos());
|
||||
return block;
|
||||
}
|
||||
}
|
||||
|
||||
private class HeaderBlock extends BlockPayload {
|
||||
private IndexRoot index;
|
||||
|
||||
private HeaderBlock() {
|
||||
index = new IndexRoot(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected byte getType() {
|
||||
return 0x55;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int getSize() {
|
||||
return Block.LONG_SIZE + Block.SHORT_SIZE;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void read(DataInputStream instr) throws Exception {
|
||||
index.rootPos = BlockPointer.pos(instr.readLong());
|
||||
|
||||
short actualChildIndexEntries = instr.readShort();
|
||||
if (actualChildIndexEntries != maxChildIndexEntries) {
|
||||
throw blockCorruptedException();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void write(DataOutputStream outstr) throws Exception {
|
||||
outstr.writeLong(index.rootPos.getPos());
|
||||
outstr.writeShort(maxChildIndexEntries);
|
||||
}
|
||||
|
||||
public IndexBlock getRoot() throws Exception {
|
||||
return index.getRoot();
|
||||
}
|
||||
}
|
||||
|
||||
private class IndexBlock extends BlockPayload {
|
||||
private final List<IndexEntry> entries = new ArrayList<IndexEntry>();
|
||||
private BlockPointer tailPos = BlockPointer.start();
|
||||
// Transient fields
|
||||
private IndexBlock parent;
|
||||
private int parentEntryIndex;
|
||||
private IndexRoot root;
|
||||
|
||||
@Override
|
||||
protected byte getType() {
|
||||
return 0x77;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int getSize() {
|
||||
return Block.INT_SIZE + Block.LONG_SIZE + (3 * Block.LONG_SIZE) * maxChildIndexEntries;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(DataInputStream instr) throws IOException {
|
||||
int count = instr.readInt();
|
||||
entries.clear();
|
||||
for (int i = 0; i < count; i++) {
|
||||
IndexEntry entry = new IndexEntry();
|
||||
entry.hashCode = instr.readLong();
|
||||
entry.dataBlock = BlockPointer.pos(instr.readLong());
|
||||
entry.childIndexBlock = BlockPointer.pos(instr.readLong());
|
||||
entries.add(entry);
|
||||
}
|
||||
tailPos = BlockPointer.pos(instr.readLong());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(DataOutputStream outstr) throws IOException {
|
||||
outstr.writeInt(entries.size());
|
||||
for (IndexEntry entry : entries) {
|
||||
outstr.writeLong(entry.hashCode);
|
||||
outstr.writeLong(entry.dataBlock.getPos());
|
||||
outstr.writeLong(entry.childIndexBlock.getPos());
|
||||
}
|
||||
outstr.writeLong(tailPos.getPos());
|
||||
}
|
||||
|
||||
public void put(long hashCode, BlockPointer pos) throws Exception {
|
||||
int index = Collections.binarySearch(entries, new IndexEntry(hashCode));
|
||||
IndexEntry entry;
|
||||
if (index >= 0) {
|
||||
entry = entries.get(index);
|
||||
} else {
|
||||
assert tailPos.isNull();
|
||||
entry = new IndexEntry();
|
||||
entry.hashCode = hashCode;
|
||||
entry.childIndexBlock = BlockPointer.start();
|
||||
index = -index - 1;
|
||||
entries.add(index, entry);
|
||||
}
|
||||
|
||||
entry.dataBlock = pos;
|
||||
store.write(this);
|
||||
|
||||
maybeSplit();
|
||||
}
|
||||
|
||||
private void maybeSplit() throws Exception {
|
||||
if (entries.size() > maxChildIndexEntries) {
|
||||
int splitPos = entries.size() / 2;
|
||||
IndexEntry splitEntry = entries.remove(splitPos);
|
||||
if (parent == null) {
|
||||
parent = root.newRoot();
|
||||
}
|
||||
IndexBlock sibling = new IndexBlock();
|
||||
store.write(sibling);
|
||||
List<IndexEntry> siblingEntries = entries.subList(splitPos, entries.size());
|
||||
sibling.entries.addAll(siblingEntries);
|
||||
siblingEntries.clear();
|
||||
sibling.tailPos = tailPos;
|
||||
tailPos = splitEntry.childIndexBlock;
|
||||
splitEntry.childIndexBlock = BlockPointer.start();
|
||||
parent.add(this, splitEntry, sibling);
|
||||
}
|
||||
}
|
||||
|
||||
private void add(IndexBlock left, IndexEntry entry, IndexBlock right) throws Exception {
|
||||
int index = left.parentEntryIndex;
|
||||
if (index < entries.size()) {
|
||||
IndexEntry parentEntry = entries.get(index);
|
||||
assert parentEntry.childIndexBlock.equals(left.getPos());
|
||||
parentEntry.childIndexBlock = right.getPos();
|
||||
} else {
|
||||
assert index == entries.size() && (tailPos.isNull() || tailPos.equals(left.getPos()));
|
||||
tailPos = right.getPos();
|
||||
}
|
||||
entries.add(index, entry);
|
||||
entry.childIndexBlock = left.getPos();
|
||||
store.write(this);
|
||||
|
||||
maybeSplit();
|
||||
}
|
||||
|
||||
public DataBlock get(K key) throws Exception {
|
||||
Lookup lookup = find(key);
|
||||
if (lookup.entry == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return store.read(lookup.entry.dataBlock, DataBlock.class);
|
||||
}
|
||||
|
||||
public Lookup find(K key) throws Exception {
|
||||
long checksum = keyHasher.getHashCode(key);
|
||||
return find(checksum);
|
||||
}
|
||||
|
||||
private Lookup find(long hashCode) throws Exception {
|
||||
int index = Collections.binarySearch(entries, new IndexEntry(hashCode));
|
||||
if (index >= 0) {
|
||||
return new Lookup(this, entries.get(index));
|
||||
}
|
||||
|
||||
index = -index - 1;
|
||||
BlockPointer childBlockPos;
|
||||
if (index == entries.size()) {
|
||||
childBlockPos = tailPos;
|
||||
} else {
|
||||
childBlockPos = entries.get(index).childIndexBlock;
|
||||
}
|
||||
if (childBlockPos.isNull()) {
|
||||
return new Lookup(this, null);
|
||||
}
|
||||
|
||||
IndexBlock childBlock = load(childBlockPos, root, this, index);
|
||||
return childBlock.find(hashCode);
|
||||
}
|
||||
|
||||
public void remove(IndexEntry entry) throws Exception {
|
||||
int index = entries.indexOf(entry);
|
||||
assert index >= 0;
|
||||
entries.remove(index);
|
||||
store.write(this);
|
||||
|
||||
if (entry.childIndexBlock.isNull()) {
|
||||
maybeMerge();
|
||||
} else {
|
||||
// Not a leaf node. Move up an entry from a leaf node, then possibly merge the leaf node
|
||||
IndexBlock leafBlock = load(entry.childIndexBlock, root, this, index);
|
||||
leafBlock = leafBlock.findHighestLeaf();
|
||||
IndexEntry highestEntry = leafBlock.entries.remove(leafBlock.entries.size() - 1);
|
||||
highestEntry.childIndexBlock = entry.childIndexBlock;
|
||||
entries.add(index, highestEntry);
|
||||
store.write(leafBlock);
|
||||
leafBlock.maybeMerge();
|
||||
}
|
||||
}
|
||||
|
||||
private void maybeMerge() throws Exception {
|
||||
if (parent == null) {
|
||||
// This is the root block. Can have any number of children <= maxChildIndexEntries
|
||||
if (entries.size() == 0 && !tailPos.isNull()) {
|
||||
// This is an empty root block, discard it
|
||||
header.index.setRootPos(tailPos);
|
||||
store.remove(this);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// This is not the root block. Must have children >= minIndexChildNodes
|
||||
if (entries.size() >= minIndexChildNodes) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Attempt to merge with the left sibling
|
||||
IndexBlock left = parent.getPrevious(this);
|
||||
if (left != null) {
|
||||
assert entries.size() + left.entries.size() <= maxChildIndexEntries * 2;
|
||||
if (left.entries.size() > minIndexChildNodes) {
|
||||
// There are enough entries in this block and the left sibling to make up 2 blocks, so redistribute
|
||||
// the entries evenly between them
|
||||
left.mergeFrom(this);
|
||||
left.maybeSplit();
|
||||
return;
|
||||
} else {
|
||||
// There are only enough entries to make up 1 block, so move the entries of the left sibling into
|
||||
// this block and discard the left sibling. Might also need to merge the parent
|
||||
left.mergeFrom(this);
|
||||
parent.maybeMerge();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt to merge with the right sibling
|
||||
IndexBlock right = parent.getNext(this);
|
||||
if (right != null) {
|
||||
assert entries.size() + right.entries.size() <= maxChildIndexEntries * 2;
|
||||
if (right.entries.size() > minIndexChildNodes) {
|
||||
// There are enough entries in this block and the right sibling to make up 2 blocks, so redistribute
|
||||
// the entries evenly between them
|
||||
mergeFrom(right);
|
||||
maybeSplit();
|
||||
return;
|
||||
} else {
|
||||
// There are only enough entries to make up 1 block, so move the entries of the right sibling into
|
||||
// this block and discard this block. Might also need to merge the parent
|
||||
mergeFrom(right);
|
||||
parent.maybeMerge();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Should not happen
|
||||
throw new IllegalStateException(String.format("%s does not have any siblings.", getBlock()));
|
||||
}
|
||||
|
||||
private void mergeFrom(IndexBlock right) throws Exception {
|
||||
IndexEntry newChildEntry = parent.entries.remove(parentEntryIndex);
|
||||
if (right.getPos().equals(parent.tailPos)) {
|
||||
parent.tailPos = getPos();
|
||||
} else {
|
||||
IndexEntry newParentEntry = parent.entries.get(parentEntryIndex);
|
||||
assert newParentEntry.childIndexBlock.equals(right.getPos());
|
||||
newParentEntry.childIndexBlock = getPos();
|
||||
}
|
||||
entries.add(newChildEntry);
|
||||
entries.addAll(right.entries);
|
||||
newChildEntry.childIndexBlock = tailPos;
|
||||
tailPos = right.tailPos;
|
||||
store.write(parent);
|
||||
store.write(this);
|
||||
store.remove(right);
|
||||
}
|
||||
|
||||
private IndexBlock getNext(IndexBlock indexBlock) throws Exception {
|
||||
int index = indexBlock.parentEntryIndex + 1;
|
||||
if (index > entries.size()) {
|
||||
return null;
|
||||
}
|
||||
if (index == entries.size()) {
|
||||
return load(tailPos, root, this, index);
|
||||
}
|
||||
return load(entries.get(index).childIndexBlock, root, this, index);
|
||||
}
|
||||
|
||||
private IndexBlock getPrevious(IndexBlock indexBlock) throws Exception {
|
||||
int index = indexBlock.parentEntryIndex - 1;
|
||||
if (index < 0) {
|
||||
return null;
|
||||
}
|
||||
return load(entries.get(index).childIndexBlock, root, this, index);
|
||||
}
|
||||
|
||||
private IndexBlock findHighestLeaf() throws Exception {
|
||||
if (tailPos.isNull()) {
|
||||
return this;
|
||||
}
|
||||
return load(tailPos, root, this, entries.size()).findHighestLeaf();
|
||||
}
|
||||
}
|
||||
|
||||
private static class IndexEntry implements Comparable<IndexEntry> {
|
||||
long hashCode;
|
||||
BlockPointer dataBlock;
|
||||
BlockPointer childIndexBlock;
|
||||
|
||||
private IndexEntry() {
|
||||
}
|
||||
|
||||
private IndexEntry(long hashCode) {
|
||||
this.hashCode = hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(IndexEntry indexEntry) {
|
||||
if (hashCode > indexEntry.hashCode) {
|
||||
return 1;
|
||||
}
|
||||
if (hashCode < indexEntry.hashCode) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
private class Lookup {
|
||||
final IndexBlock indexBlock;
|
||||
final IndexEntry entry;
|
||||
|
||||
private Lookup(IndexBlock indexBlock, IndexEntry entry) {
|
||||
this.indexBlock = indexBlock;
|
||||
this.entry = entry;
|
||||
}
|
||||
}
|
||||
|
||||
private class DataBlock extends BlockPayload {
|
||||
private int size;
|
||||
private StreamByteBuffer buffer;
|
||||
private V value;
|
||||
|
||||
private DataBlock() {
|
||||
}
|
||||
|
||||
public DataBlock(V value) throws Exception {
|
||||
this.value = value;
|
||||
setValue(value);
|
||||
size = buffer.totalBytesUnread();
|
||||
}
|
||||
|
||||
public DataBlock(V value, StreamByteBuffer buffer) throws Exception {
|
||||
this.value = value;
|
||||
this.buffer = buffer;
|
||||
size = buffer.totalBytesUnread();
|
||||
}
|
||||
|
||||
public void setValue(V value) throws Exception {
|
||||
buffer = StreamByteBuffer.createWithChunkSizeInDefaultRange(size);
|
||||
KryoBackedEncoder encoder = new KryoBackedEncoder(buffer.getOutputStream());
|
||||
serializer.write(encoder, value);
|
||||
encoder.flush();
|
||||
}
|
||||
|
||||
public V getValue() throws Exception {
|
||||
if (value == null) {
|
||||
value = serializer.read(new KryoBackedDecoder(buffer.getInputStream()));
|
||||
buffer = null;
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected byte getType() {
|
||||
return 0x33;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int getSize() {
|
||||
return 2 * Block.INT_SIZE + size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(DataInputStream instr) throws Exception {
|
||||
size = instr.readInt();
|
||||
int bytes = instr.readInt();
|
||||
buffer = StreamByteBuffer.of(instr, bytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(DataOutputStream outstr) throws Exception {
|
||||
outstr.writeInt(size);
|
||||
outstr.writeInt(buffer.totalBytesUnread());
|
||||
buffer.writeTo(outstr);
|
||||
buffer = null;
|
||||
}
|
||||
|
||||
public DataBlockUpdateResult useNewValue(V value) throws Exception {
|
||||
setValue(value);
|
||||
boolean ok = buffer.totalBytesUnread() <= size;
|
||||
if (ok) {
|
||||
this.value = value;
|
||||
store.write(this);
|
||||
return DataBlockUpdateResult.success();
|
||||
} else {
|
||||
return DataBlockUpdateResult.failed(buffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class DataBlockUpdateResult {
|
||||
private static final DataBlockUpdateResult SUCCESS = new DataBlockUpdateResult(true, null);
|
||||
private final boolean success;
|
||||
private final StreamByteBuffer serializedValue;
|
||||
|
||||
private DataBlockUpdateResult(boolean success, StreamByteBuffer serializedValue) {
|
||||
this.success = success;
|
||||
this.serializedValue = serializedValue;
|
||||
}
|
||||
|
||||
static DataBlockUpdateResult success() {
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
static DataBlockUpdateResult failed(StreamByteBuffer serializedValue) {
|
||||
return new DataBlockUpdateResult(false, serializedValue);
|
||||
}
|
||||
|
||||
public boolean isFailed() {
|
||||
return !success;
|
||||
}
|
||||
|
||||
public StreamByteBuffer getSerializedValue() {
|
||||
return serializedValue;
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,59 @@
|
||||
/*
|
||||
* Copyright 2009 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package seaweedfs.client.btree;
|
||||
|
||||
public abstract class Block {
|
||||
static final int LONG_SIZE = 8;
|
||||
static final int INT_SIZE = 4;
|
||||
static final int SHORT_SIZE = 2;
|
||||
|
||||
private BlockPayload payload;
|
||||
|
||||
protected Block(BlockPayload payload) {
|
||||
this.payload = payload;
|
||||
payload.setBlock(this);
|
||||
}
|
||||
|
||||
public BlockPayload getPayload() {
|
||||
return payload;
|
||||
}
|
||||
|
||||
protected void detach() {
|
||||
payload.setBlock(null);
|
||||
payload = null;
|
||||
}
|
||||
|
||||
public abstract BlockPointer getPos();
|
||||
|
||||
public abstract int getSize();
|
||||
|
||||
public abstract RuntimeException blockCorruptedException();
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return payload.getClass().getSimpleName() + " " + getPos();
|
||||
}
|
||||
|
||||
public BlockPointer getNextPos() {
|
||||
return BlockPointer.pos(getPos().getPos() + getSize());
|
||||
}
|
||||
|
||||
public abstract boolean hasPos();
|
||||
|
||||
public abstract void setPos(BlockPointer pos);
|
||||
|
||||
public abstract void setSize(int size);
|
||||
}
|
@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Copyright 2009 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package seaweedfs.client.btree;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
|
||||
public abstract class BlockPayload {
|
||||
private Block block;
|
||||
|
||||
public Block getBlock() {
|
||||
return block;
|
||||
}
|
||||
|
||||
public void setBlock(Block block) {
|
||||
this.block = block;
|
||||
}
|
||||
|
||||
public BlockPointer getPos() {
|
||||
return getBlock().getPos();
|
||||
}
|
||||
|
||||
public BlockPointer getNextPos() {
|
||||
return getBlock().getNextPos();
|
||||
}
|
||||
|
||||
protected abstract int getSize();
|
||||
|
||||
protected abstract byte getType();
|
||||
|
||||
protected abstract void read(DataInputStream inputStream) throws Exception;
|
||||
|
||||
protected abstract void write(DataOutputStream outputStream) throws Exception;
|
||||
|
||||
protected RuntimeException blockCorruptedException() {
|
||||
return getBlock().blockCorruptedException();
|
||||
}
|
||||
}
|
@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Copyright 2009 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package seaweedfs.client.btree;
|
||||
|
||||
import com.google.common.primitives.Longs;
|
||||
|
||||
public class BlockPointer implements Comparable<BlockPointer> {
|
||||
|
||||
private static final BlockPointer NULL = new BlockPointer(-1);
|
||||
|
||||
public static BlockPointer start() {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
public static BlockPointer pos(long pos) {
|
||||
if (pos < -1) {
|
||||
throw new CorruptedCacheException("block pointer must be >= -1, but was" + pos);
|
||||
}
|
||||
if (pos == -1) {
|
||||
return NULL;
|
||||
}
|
||||
return new BlockPointer(pos);
|
||||
}
|
||||
|
||||
private final long pos;
|
||||
|
||||
private BlockPointer(long pos) {
|
||||
this.pos = pos;
|
||||
}
|
||||
|
||||
public boolean isNull() {
|
||||
return pos < 0;
|
||||
}
|
||||
|
||||
public long getPos() {
|
||||
return pos;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return String.valueOf(pos);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
BlockPointer other = (BlockPointer) obj;
|
||||
return pos == other.pos;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Longs.hashCode(pos);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(BlockPointer o) {
|
||||
return Longs.compare(pos, o.pos);
|
||||
}
|
||||
}
|
@ -0,0 +1,68 @@
|
||||
/*
|
||||
* Copyright 2009 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package seaweedfs.client.btree;
|
||||
|
||||
public interface BlockStore {
|
||||
/**
|
||||
* Opens this store, calling the given action if the store is empty.
|
||||
*/
|
||||
void open(Runnable initAction, Factory factory);
|
||||
|
||||
/**
|
||||
* Closes this store.
|
||||
*/
|
||||
void close();
|
||||
|
||||
/**
|
||||
* Discards all blocks from this store.
|
||||
*/
|
||||
void clear();
|
||||
|
||||
/**
|
||||
* Removes the given block from this store.
|
||||
*/
|
||||
void remove(BlockPayload block);
|
||||
|
||||
/**
|
||||
* Reads the first block from this store.
|
||||
*/
|
||||
<T extends BlockPayload> T readFirst(Class<T> payloadType);
|
||||
|
||||
/**
|
||||
* Reads a block from this store.
|
||||
*/
|
||||
<T extends BlockPayload> T read(BlockPointer pos, Class<T> payloadType);
|
||||
|
||||
/**
|
||||
* Writes a block to this store, adding the block if required.
|
||||
*/
|
||||
void write(BlockPayload block);
|
||||
|
||||
/**
|
||||
* Adds a new block to this store. Allocates space for the block, but does not write the contents of the block
|
||||
* until {@link #write(BlockPayload)} is called.
|
||||
*/
|
||||
void attach(BlockPayload block);
|
||||
|
||||
/**
|
||||
* Flushes any pending updates for this store.
|
||||
*/
|
||||
void flush();
|
||||
|
||||
interface Factory {
|
||||
Object create(Class<? extends BlockPayload> type);
|
||||
}
|
||||
}
|
@ -0,0 +1,30 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweedfs.client.btree;
|
||||
|
||||
import java.nio.Buffer;
|
||||
|
||||
public class BufferCaster {
|
||||
/**
|
||||
* Without this cast, when the code compiled by Java 9+ is executed on Java 8, it will throw
|
||||
* java.lang.NoSuchMethodError: Method flip()Ljava/nio/ByteBuffer; does not exist in class java.nio.ByteBuffer
|
||||
*/
|
||||
@SuppressWarnings("RedundantCast")
|
||||
public static <T extends Buffer> Buffer cast(T byteBuffer) {
|
||||
return (Buffer) byteBuffer;
|
||||
}
|
||||
}
|
@ -0,0 +1,74 @@
|
||||
/*
|
||||
* Copyright 2014 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweedfs.client.btree;
|
||||
|
||||
import com.google.common.io.CountingInputStream;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.RandomAccessFile;
|
||||
|
||||
/**
|
||||
* Allows a stream of bytes to be read from a particular location of some backing byte stream.
|
||||
*/
|
||||
class ByteInput {
|
||||
private final RandomAccessFile file;
|
||||
private final ResettableBufferedInputStream bufferedInputStream;
|
||||
private CountingInputStream countingInputStream;
|
||||
|
||||
public ByteInput(RandomAccessFile file) {
|
||||
this.file = file;
|
||||
bufferedInputStream = new ResettableBufferedInputStream(new RandomAccessFileInputStream(file));
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts reading from the given offset.
|
||||
*/
|
||||
public DataInputStream start(long offset) throws IOException {
|
||||
file.seek(offset);
|
||||
bufferedInputStream.clear();
|
||||
countingInputStream = new CountingInputStream(bufferedInputStream);
|
||||
return new DataInputStream(countingInputStream);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of bytes read since {@link #start(long)} was called.
|
||||
*/
|
||||
public long getBytesRead() {
|
||||
return countingInputStream.getCount();
|
||||
}
|
||||
|
||||
/**
|
||||
* Finishes reading, resetting any buffered state.
|
||||
*/
|
||||
public void done() {
|
||||
countingInputStream = null;
|
||||
}
|
||||
|
||||
private static class ResettableBufferedInputStream extends BufferedInputStream {
|
||||
ResettableBufferedInputStream(InputStream input) {
|
||||
super(input);
|
||||
}
|
||||
|
||||
void clear() {
|
||||
count = 0;
|
||||
pos = 0;
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,74 @@
|
||||
/*
|
||||
* Copyright 2014 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweedfs.client.btree;
|
||||
|
||||
import com.google.common.io.CountingOutputStream;
|
||||
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.io.RandomAccessFile;
|
||||
|
||||
/**
|
||||
* Allows a stream of bytes to be written to a particular location of some backing byte stream.
|
||||
*/
|
||||
class ByteOutput {
|
||||
private final RandomAccessFile file;
|
||||
private final ResettableBufferedOutputStream bufferedOutputStream;
|
||||
private CountingOutputStream countingOutputStream;
|
||||
|
||||
public ByteOutput(RandomAccessFile file) {
|
||||
this.file = file;
|
||||
bufferedOutputStream = new ResettableBufferedOutputStream(new RandomAccessFileOutputStream(file));
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts writing to the given offset. Can be beyond the current length of the file.
|
||||
*/
|
||||
public DataOutputStream start(long offset) throws IOException {
|
||||
file.seek(offset);
|
||||
bufferedOutputStream.clear();
|
||||
countingOutputStream = new CountingOutputStream(bufferedOutputStream);
|
||||
return new DataOutputStream(countingOutputStream);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of byte written since {@link #start(long)} was called.
|
||||
*/
|
||||
public long getBytesWritten() {
|
||||
return countingOutputStream.getCount();
|
||||
}
|
||||
|
||||
/**
|
||||
* Finishes writing, flushing and resetting any buffered state
|
||||
*/
|
||||
public void done() throws IOException {
|
||||
countingOutputStream.flush();
|
||||
countingOutputStream = null;
|
||||
}
|
||||
|
||||
private static class ResettableBufferedOutputStream extends BufferedOutputStream {
|
||||
ResettableBufferedOutputStream(OutputStream output) {
|
||||
super(output);
|
||||
}
|
||||
|
||||
void clear() {
|
||||
count = 0;
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,129 @@
|
||||
/*
|
||||
* Copyright 2009 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package seaweedfs.client.btree;
|
||||
|
||||
import com.google.common.cache.Cache;
|
||||
import com.google.common.cache.CacheBuilder;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class CachingBlockStore implements BlockStore {
|
||||
private final BlockStore store;
|
||||
private final Map<BlockPointer, BlockPayload> dirty = new LinkedHashMap<BlockPointer, BlockPayload>();
|
||||
private final Cache<BlockPointer, BlockPayload> indexBlockCache = CacheBuilder.newBuilder().maximumSize(100).concurrencyLevel(1).build();
|
||||
private final ImmutableSet<Class<? extends BlockPayload>> cacheableBlockTypes;
|
||||
|
||||
public CachingBlockStore(BlockStore store, Collection<Class<? extends BlockPayload>> cacheableBlockTypes) {
|
||||
this.store = store;
|
||||
this.cacheableBlockTypes = ImmutableSet.copyOf(cacheableBlockTypes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void open(Runnable initAction, Factory factory) {
|
||||
store.open(initAction, factory);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
flush();
|
||||
indexBlockCache.invalidateAll();
|
||||
store.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
dirty.clear();
|
||||
indexBlockCache.invalidateAll();
|
||||
store.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void flush() {
|
||||
Iterator<BlockPayload> iterator = dirty.values().iterator();
|
||||
while (iterator.hasNext()) {
|
||||
BlockPayload block = iterator.next();
|
||||
iterator.remove();
|
||||
store.write(block);
|
||||
}
|
||||
store.flush();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(BlockPayload block) {
|
||||
store.attach(block);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove(BlockPayload block) {
|
||||
dirty.remove(block.getPos());
|
||||
if (isCacheable(block)) {
|
||||
indexBlockCache.invalidate(block.getPos());
|
||||
}
|
||||
store.remove(block);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends BlockPayload> T readFirst(Class<T> payloadType) {
|
||||
T block = store.readFirst(payloadType);
|
||||
maybeCache(block);
|
||||
return block;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends BlockPayload> T read(BlockPointer pos, Class<T> payloadType) {
|
||||
T block = payloadType.cast(dirty.get(pos));
|
||||
if (block != null) {
|
||||
return block;
|
||||
}
|
||||
block = maybeGetFromCache(pos, payloadType);
|
||||
if (block != null) {
|
||||
return block;
|
||||
}
|
||||
block = store.read(pos, payloadType);
|
||||
maybeCache(block);
|
||||
return block;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
private <T extends BlockPayload> T maybeGetFromCache(BlockPointer pos, Class<T> payloadType) {
|
||||
if (cacheableBlockTypes.contains(payloadType)) {
|
||||
return payloadType.cast(indexBlockCache.getIfPresent(pos));
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(BlockPayload block) {
|
||||
store.attach(block);
|
||||
maybeCache(block);
|
||||
dirty.put(block.getPos(), block);
|
||||
}
|
||||
|
||||
private <T extends BlockPayload> void maybeCache(T block) {
|
||||
if (isCacheable(block)) {
|
||||
indexBlockCache.put(block.getPos(), block);
|
||||
}
|
||||
}
|
||||
|
||||
private <T extends BlockPayload> boolean isCacheable(T block) {
|
||||
return cacheableBlockTypes.contains(block.getClass());
|
||||
}
|
||||
}
|
@ -0,0 +1,22 @@
|
||||
/*
|
||||
* Copyright 2009 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package seaweedfs.client.btree;
|
||||
|
||||
class CorruptedCacheException extends RuntimeException {
|
||||
CorruptedCacheException(String message) {
|
||||
super(message);
|
||||
}
|
||||
}
|
@ -0,0 +1,274 @@
|
||||
/*
|
||||
* Copyright 2009 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package seaweedfs.client.btree;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.RandomAccessFile;
|
||||
|
||||
public class FileBackedBlockStore implements BlockStore {
|
||||
private final File cacheFile;
|
||||
private RandomAccessFile file;
|
||||
private ByteOutput output;
|
||||
private ByteInput input;
|
||||
private long nextBlock;
|
||||
private Factory factory;
|
||||
private long currentFileSize;
|
||||
|
||||
public FileBackedBlockStore(File cacheFile) {
|
||||
this.cacheFile = cacheFile;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "cache '" + cacheFile + "'";
|
||||
}
|
||||
|
||||
@Override
|
||||
public void open(Runnable runnable, Factory factory) {
|
||||
this.factory = factory;
|
||||
try {
|
||||
cacheFile.getParentFile().mkdirs();
|
||||
file = openRandomAccessFile();
|
||||
output = new ByteOutput(file);
|
||||
input = new ByteInput(file);
|
||||
currentFileSize = file.length();
|
||||
nextBlock = currentFileSize;
|
||||
if (currentFileSize == 0) {
|
||||
runnable.run();
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private RandomAccessFile openRandomAccessFile() throws FileNotFoundException {
|
||||
try {
|
||||
return randomAccessFile("rw");
|
||||
} catch (FileNotFoundException e) {
|
||||
return randomAccessFile("r");
|
||||
}
|
||||
}
|
||||
|
||||
private RandomAccessFile randomAccessFile(String mode) throws FileNotFoundException {
|
||||
return new RandomAccessFile(cacheFile, mode);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
try {
|
||||
file.close();
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
try {
|
||||
file.setLength(0);
|
||||
currentFileSize = 0;
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
nextBlock = 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(BlockPayload block) {
|
||||
if (block.getBlock() == null) {
|
||||
block.setBlock(new BlockImpl(block));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove(BlockPayload block) {
|
||||
BlockImpl blockImpl = (BlockImpl) block.getBlock();
|
||||
blockImpl.detach();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void flush() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends BlockPayload> T readFirst(Class<T> payloadType) {
|
||||
return read(BlockPointer.pos(0), payloadType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends BlockPayload> T read(BlockPointer pos, Class<T> payloadType) {
|
||||
assert !pos.isNull();
|
||||
try {
|
||||
T payload = payloadType.cast(factory.create(payloadType));
|
||||
BlockImpl block = new BlockImpl(payload, pos);
|
||||
block.read();
|
||||
return payload;
|
||||
} catch (CorruptedCacheException e) {
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(BlockPayload block) {
|
||||
BlockImpl blockImpl = (BlockImpl) block.getBlock();
|
||||
try {
|
||||
blockImpl.write();
|
||||
} catch (CorruptedCacheException e) {
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private long alloc(long length) {
|
||||
long pos = nextBlock;
|
||||
nextBlock += length;
|
||||
return pos;
|
||||
}
|
||||
|
||||
private final class BlockImpl extends Block {
|
||||
private static final int HEADER_SIZE = 1 + INT_SIZE; // type, payload size
|
||||
private static final int TAIL_SIZE = INT_SIZE;
|
||||
|
||||
private BlockPointer pos;
|
||||
private int payloadSize;
|
||||
|
||||
private BlockImpl(BlockPayload payload, BlockPointer pos) {
|
||||
this(payload);
|
||||
setPos(pos);
|
||||
}
|
||||
|
||||
public BlockImpl(BlockPayload payload) {
|
||||
super(payload);
|
||||
pos = null;
|
||||
payloadSize = -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPos() {
|
||||
return pos != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BlockPointer getPos() {
|
||||
if (pos == null) {
|
||||
pos = BlockPointer.pos(alloc(getSize()));
|
||||
}
|
||||
return pos;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setPos(BlockPointer pos) {
|
||||
assert this.pos == null && !pos.isNull();
|
||||
this.pos = pos;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getSize() {
|
||||
if (payloadSize < 0) {
|
||||
payloadSize = getPayload().getSize();
|
||||
}
|
||||
return payloadSize + HEADER_SIZE + TAIL_SIZE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setSize(int size) {
|
||||
int newPayloadSize = size - HEADER_SIZE - TAIL_SIZE;
|
||||
assert newPayloadSize >= payloadSize;
|
||||
payloadSize = newPayloadSize;
|
||||
}
|
||||
|
||||
public void write() throws Exception {
|
||||
long pos = getPos().getPos();
|
||||
|
||||
DataOutputStream outputStream = output.start(pos);
|
||||
|
||||
BlockPayload payload = getPayload();
|
||||
|
||||
// Write header
|
||||
outputStream.writeByte(payload.getType());
|
||||
outputStream.writeInt(payloadSize);
|
||||
long finalSize = pos + HEADER_SIZE + TAIL_SIZE + payloadSize;
|
||||
|
||||
// Write body
|
||||
payload.write(outputStream);
|
||||
|
||||
// Write count
|
||||
long bytesWritten = output.getBytesWritten();
|
||||
if (bytesWritten > Integer.MAX_VALUE) {
|
||||
throw new IllegalArgumentException("Block payload exceeds maximum size");
|
||||
}
|
||||
outputStream.writeInt((int) bytesWritten);
|
||||
output.done();
|
||||
|
||||
// System.out.println(String.format("wrote [%d,%d)", pos, pos + bytesWritten + 4));
|
||||
|
||||
// Pad
|
||||
if (currentFileSize < finalSize) {
|
||||
// System.out.println(String.format("pad length %d => %d", currentFileSize, finalSize));
|
||||
file.setLength(finalSize);
|
||||
currentFileSize = finalSize;
|
||||
}
|
||||
}
|
||||
|
||||
public void read() throws Exception {
|
||||
long pos = getPos().getPos();
|
||||
assert pos >= 0;
|
||||
if (pos + HEADER_SIZE >= currentFileSize) {
|
||||
throw blockCorruptedException();
|
||||
}
|
||||
|
||||
DataInputStream inputStream = input.start(pos);
|
||||
|
||||
BlockPayload payload = getPayload();
|
||||
|
||||
// Read header
|
||||
byte type = inputStream.readByte();
|
||||
if (type != payload.getType()) {
|
||||
throw blockCorruptedException();
|
||||
}
|
||||
|
||||
// Read body
|
||||
payloadSize = inputStream.readInt();
|
||||
if (pos + HEADER_SIZE + TAIL_SIZE + payloadSize > currentFileSize) {
|
||||
throw blockCorruptedException();
|
||||
}
|
||||
payload.read(inputStream);
|
||||
|
||||
// Read and verify count
|
||||
long actualCount = input.getBytesRead();
|
||||
long count = inputStream.readInt();
|
||||
if (actualCount != count) {
|
||||
System.out.println(String.format("read expected %d actual %d, pos %d payloadSize %d currentFileSize %d", count, actualCount, pos, payloadSize, currentFileSize));
|
||||
throw blockCorruptedException();
|
||||
}
|
||||
input.done();
|
||||
}
|
||||
|
||||
@Override
|
||||
public RuntimeException blockCorruptedException() {
|
||||
return new CorruptedCacheException(String.format("Corrupted %s found in %s.", this,
|
||||
FileBackedBlockStore.this));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,283 @@
|
||||
/*
|
||||
* Copyright 2009 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package seaweedfs.client.btree;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
public class FreeListBlockStore implements BlockStore {
|
||||
private final BlockStore store;
|
||||
private final BlockStore freeListStore;
|
||||
private final int maxBlockEntries;
|
||||
private FreeListBlock freeListBlock;
|
||||
|
||||
public FreeListBlockStore(BlockStore store, int maxBlockEntries) {
|
||||
this.store = store;
|
||||
freeListStore = this;
|
||||
this.maxBlockEntries = maxBlockEntries;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void open(final Runnable initAction, final Factory factory) {
|
||||
Runnable freeListInitAction = new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
freeListBlock = new FreeListBlock();
|
||||
store.write(freeListBlock);
|
||||
store.flush();
|
||||
initAction.run();
|
||||
}
|
||||
};
|
||||
Factory freeListFactory = new Factory() {
|
||||
@Override
|
||||
public Object create(Class<? extends BlockPayload> type) {
|
||||
if (type == FreeListBlock.class) {
|
||||
return new FreeListBlock();
|
||||
}
|
||||
return factory.create(type);
|
||||
}
|
||||
};
|
||||
|
||||
store.open(freeListInitAction, freeListFactory);
|
||||
freeListBlock = store.readFirst(FreeListBlock.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
freeListBlock = null;
|
||||
store.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
store.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove(BlockPayload block) {
|
||||
Block container = block.getBlock();
|
||||
store.remove(block);
|
||||
freeListBlock.add(container.getPos(), container.getSize());
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends BlockPayload> T readFirst(Class<T> payloadType) {
|
||||
return store.read(freeListBlock.getNextPos(), payloadType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends BlockPayload> T read(BlockPointer pos, Class<T> payloadType) {
|
||||
return store.read(pos, payloadType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(BlockPayload block) {
|
||||
attach(block);
|
||||
store.write(block);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(BlockPayload block) {
|
||||
store.attach(block);
|
||||
freeListBlock.alloc(block.getBlock());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void flush() {
|
||||
store.flush();
|
||||
}
|
||||
|
||||
private void verify() {
|
||||
FreeListBlock block = store.readFirst(FreeListBlock.class);
|
||||
verify(block, Integer.MAX_VALUE);
|
||||
}
|
||||
|
||||
private void verify(FreeListBlock block, int maxValue) {
|
||||
if (block.largestInNextBlock > maxValue) {
|
||||
throw new RuntimeException("corrupt free list");
|
||||
}
|
||||
int current = 0;
|
||||
for (FreeListEntry entry : block.entries) {
|
||||
if (entry.size > maxValue) {
|
||||
throw new RuntimeException("corrupt free list");
|
||||
}
|
||||
if (entry.size < block.largestInNextBlock) {
|
||||
throw new RuntimeException("corrupt free list");
|
||||
}
|
||||
if (entry.size < current) {
|
||||
throw new RuntimeException("corrupt free list");
|
||||
}
|
||||
current = entry.size;
|
||||
}
|
||||
if (!block.nextBlock.isNull()) {
|
||||
verify(store.read(block.nextBlock, FreeListBlock.class), block.largestInNextBlock);
|
||||
}
|
||||
}
|
||||
|
||||
public class FreeListBlock extends BlockPayload {
|
||||
private List<FreeListEntry> entries = new ArrayList<FreeListEntry>();
|
||||
private int largestInNextBlock;
|
||||
private BlockPointer nextBlock = BlockPointer.start();
|
||||
// Transient fields
|
||||
private FreeListBlock prev;
|
||||
private FreeListBlock next;
|
||||
|
||||
@Override
|
||||
protected int getSize() {
|
||||
return Block.LONG_SIZE + Block.INT_SIZE + Block.INT_SIZE + maxBlockEntries * (Block.LONG_SIZE
|
||||
+ Block.INT_SIZE);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected byte getType() {
|
||||
return 0x44;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void read(DataInputStream inputStream) throws Exception {
|
||||
nextBlock = BlockPointer.pos(inputStream.readLong());
|
||||
largestInNextBlock = inputStream.readInt();
|
||||
int count = inputStream.readInt();
|
||||
for (int i = 0; i < count; i++) {
|
||||
BlockPointer pos = BlockPointer.pos(inputStream.readLong());
|
||||
int size = inputStream.readInt();
|
||||
entries.add(new FreeListEntry(pos, size));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void write(DataOutputStream outputStream) throws Exception {
|
||||
outputStream.writeLong(nextBlock.getPos());
|
||||
outputStream.writeInt(largestInNextBlock);
|
||||
outputStream.writeInt(entries.size());
|
||||
for (FreeListEntry entry : entries) {
|
||||
outputStream.writeLong(entry.pos.getPos());
|
||||
outputStream.writeInt(entry.size);
|
||||
}
|
||||
}
|
||||
|
||||
public void add(BlockPointer pos, int size) {
|
||||
assert !pos.isNull() && size >= 0;
|
||||
if (size == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (size < largestInNextBlock) {
|
||||
FreeListBlock next = getNextBlock();
|
||||
next.add(pos, size);
|
||||
return;
|
||||
}
|
||||
|
||||
FreeListEntry entry = new FreeListEntry(pos, size);
|
||||
int index = Collections.binarySearch(entries, entry);
|
||||
if (index < 0) {
|
||||
index = -index - 1;
|
||||
}
|
||||
entries.add(index, entry);
|
||||
|
||||
if (entries.size() > maxBlockEntries) {
|
||||
FreeListBlock newBlock = new FreeListBlock();
|
||||
newBlock.largestInNextBlock = largestInNextBlock;
|
||||
newBlock.nextBlock = nextBlock;
|
||||
newBlock.prev = this;
|
||||
newBlock.next = next;
|
||||
next = newBlock;
|
||||
|
||||
List<FreeListEntry> newBlockEntries = entries.subList(0, entries.size() / 2);
|
||||
newBlock.entries.addAll(newBlockEntries);
|
||||
newBlockEntries.clear();
|
||||
largestInNextBlock = newBlock.entries.get(newBlock.entries.size() - 1).size;
|
||||
freeListStore.write(newBlock);
|
||||
nextBlock = newBlock.getPos();
|
||||
}
|
||||
|
||||
freeListStore.write(this);
|
||||
}
|
||||
|
||||
private FreeListBlock getNextBlock() {
|
||||
if (next == null) {
|
||||
next = freeListStore.read(nextBlock, FreeListBlock.class);
|
||||
next.prev = this;
|
||||
}
|
||||
return next;
|
||||
}
|
||||
|
||||
public void alloc(Block block) {
|
||||
if (block.hasPos()) {
|
||||
return;
|
||||
}
|
||||
|
||||
int requiredSize = block.getSize();
|
||||
|
||||
if (entries.isEmpty() || requiredSize <= largestInNextBlock) {
|
||||
if (nextBlock.isNull()) {
|
||||
return;
|
||||
}
|
||||
getNextBlock().alloc(block);
|
||||
return;
|
||||
}
|
||||
|
||||
int index = Collections.binarySearch(entries, new FreeListEntry(null, requiredSize));
|
||||
if (index < 0) {
|
||||
index = -index - 1;
|
||||
}
|
||||
if (index == entries.size()) {
|
||||
// Largest free block is too small
|
||||
return;
|
||||
}
|
||||
|
||||
FreeListEntry entry = entries.remove(index);
|
||||
block.setPos(entry.pos);
|
||||
block.setSize(entry.size);
|
||||
freeListStore.write(this);
|
||||
|
||||
if (entries.size() == 0 && prev != null) {
|
||||
prev.nextBlock = nextBlock;
|
||||
prev.largestInNextBlock = largestInNextBlock;
|
||||
prev.next = next;
|
||||
if (next != null) {
|
||||
next.prev = prev;
|
||||
}
|
||||
freeListStore.write(prev);
|
||||
freeListStore.remove(this);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class FreeListEntry implements Comparable<FreeListEntry> {
|
||||
final BlockPointer pos;
|
||||
final int size;
|
||||
|
||||
private FreeListEntry(BlockPointer pos, int size) {
|
||||
this.pos = pos;
|
||||
this.size = size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(FreeListEntry o) {
|
||||
if (size > o.size) {
|
||||
return 1;
|
||||
}
|
||||
if (size < o.size) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Copyright 2014 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweedfs.client.btree;
|
||||
|
||||
import seaweedfs.client.btree.serialize.Serializer;
|
||||
import seaweedfs.client.btree.serialize.kryo.KryoBackedEncoder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.math.BigInteger;
|
||||
import java.security.MessageDigest;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
|
||||
class KeyHasher<K> {
|
||||
private final Serializer<K> serializer;
|
||||
private final MessageDigestStream digestStream = new MessageDigestStream();
|
||||
private final KryoBackedEncoder encoder = new KryoBackedEncoder(digestStream);
|
||||
|
||||
public KeyHasher(Serializer<K> serializer) {
|
||||
this.serializer = serializer;
|
||||
}
|
||||
|
||||
long getHashCode(K key) throws Exception {
|
||||
serializer.write(encoder, key);
|
||||
encoder.flush();
|
||||
return digestStream.getChecksum();
|
||||
}
|
||||
|
||||
private static class MessageDigestStream extends OutputStream {
|
||||
MessageDigest messageDigest;
|
||||
|
||||
private MessageDigestStream() {
|
||||
try {
|
||||
messageDigest = MessageDigest.getInstance("MD5");
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
throw UncheckedException.throwAsUncheckedException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(int b) throws IOException {
|
||||
messageDigest.update((byte) b);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(byte[] b) throws IOException {
|
||||
messageDigest.update(b);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(byte[] b, int off, int len) throws IOException {
|
||||
messageDigest.update(b, off, len);
|
||||
}
|
||||
|
||||
long getChecksum() {
|
||||
byte[] digest = messageDigest.digest();
|
||||
assert digest.length == 16;
|
||||
return new BigInteger(digest).longValue();
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Copyright 2013 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package seaweedfs.client.btree;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.RandomAccessFile;
|
||||
|
||||
/**
|
||||
* Reads from a {@link RandomAccessFile}. Each operation reads from and advances the current position of the file.
|
||||
*
|
||||
* <p>Closing this stream does not close the underlying file.
|
||||
*/
|
||||
public class RandomAccessFileInputStream extends InputStream {
|
||||
private final RandomAccessFile file;
|
||||
|
||||
public RandomAccessFileInputStream(RandomAccessFile file) {
|
||||
this.file = file;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long skip(long n) throws IOException {
|
||||
file.seek(file.getFilePointer() + n);
|
||||
return n;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(byte[] bytes) throws IOException {
|
||||
return file.read(bytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
return file.read();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(byte[] bytes, int offset, int length) throws IOException {
|
||||
return file.read(bytes, offset, length);
|
||||
}
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Copyright 2013 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package seaweedfs.client.btree;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.io.RandomAccessFile;
|
||||
|
||||
/**
|
||||
* Writes to a {@link RandomAccessFile}. Each operation writes to and advances the current position of the file.
|
||||
*
|
||||
* <p>Closing this stream does not close the underlying file. Flushing this stream does nothing.
|
||||
*/
|
||||
public class RandomAccessFileOutputStream extends OutputStream {
|
||||
private final RandomAccessFile file;
|
||||
|
||||
public RandomAccessFileOutputStream(RandomAccessFile file) {
|
||||
this.file = file;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(int i) throws IOException {
|
||||
file.write(i);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(byte[] bytes) throws IOException {
|
||||
file.write(bytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(byte[] bytes, int offset, int length) throws IOException {
|
||||
file.write(bytes, offset, length);
|
||||
}
|
||||
}
|
@ -0,0 +1,87 @@
|
||||
/*
|
||||
* Copyright 2009 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package seaweedfs.client.btree;
|
||||
|
||||
public class StateCheckBlockStore implements BlockStore {
|
||||
private final BlockStore blockStore;
|
||||
private boolean open;
|
||||
|
||||
public StateCheckBlockStore(BlockStore blockStore) {
|
||||
this.blockStore = blockStore;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void open(Runnable initAction, Factory factory) {
|
||||
assert !open;
|
||||
open = true;
|
||||
blockStore.open(initAction, factory);
|
||||
}
|
||||
|
||||
public boolean isOpen() {
|
||||
return open;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
if (!open) {
|
||||
return;
|
||||
}
|
||||
open = false;
|
||||
blockStore.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
assert open;
|
||||
blockStore.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove(BlockPayload block) {
|
||||
assert open;
|
||||
blockStore.remove(block);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends BlockPayload> T readFirst(Class<T> payloadType) {
|
||||
assert open;
|
||||
return blockStore.readFirst(payloadType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends BlockPayload> T read(BlockPointer pos, Class<T> payloadType) {
|
||||
assert open;
|
||||
return blockStore.read(pos, payloadType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(BlockPayload block) {
|
||||
assert open;
|
||||
blockStore.write(block);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(BlockPayload block) {
|
||||
assert open;
|
||||
blockStore.attach(block);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void flush() {
|
||||
assert open;
|
||||
blockStore.flush();
|
||||
}
|
||||
}
|
@ -0,0 +1,526 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package seaweedfs.client.btree;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.CharBuffer;
|
||||
import java.nio.charset.CharacterCodingException;
|
||||
import java.nio.charset.Charset;
|
||||
import java.nio.charset.CharsetDecoder;
|
||||
import java.nio.charset.CoderResult;
|
||||
import java.nio.charset.CodingErrorAction;
|
||||
import java.util.ArrayList;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
|
||||
|
||||
/**
|
||||
* An in-memory buffer that provides OutputStream and InputStream interfaces.
|
||||
*
|
||||
* This is more efficient than using ByteArrayOutputStream/ByteArrayInputStream
|
||||
*
|
||||
* Reading the buffer will clear the buffer.
|
||||
* This is not thread-safe, it is intended to be used by a single Thread.
|
||||
*/
|
||||
public class StreamByteBuffer {
|
||||
private static final int DEFAULT_CHUNK_SIZE = 4096;
|
||||
private static final int MAX_CHUNK_SIZE = 1024 * 1024;
|
||||
private LinkedList<StreamByteBufferChunk> chunks = new LinkedList<StreamByteBufferChunk>();
|
||||
private StreamByteBufferChunk currentWriteChunk;
|
||||
private StreamByteBufferChunk currentReadChunk;
|
||||
private int chunkSize;
|
||||
private int nextChunkSize;
|
||||
private int maxChunkSize;
|
||||
private StreamByteBufferOutputStream output;
|
||||
private StreamByteBufferInputStream input;
|
||||
private int totalBytesUnreadInList;
|
||||
|
||||
public StreamByteBuffer() {
|
||||
this(DEFAULT_CHUNK_SIZE);
|
||||
}
|
||||
|
||||
public StreamByteBuffer(int chunkSize) {
|
||||
this.chunkSize = chunkSize;
|
||||
this.nextChunkSize = chunkSize;
|
||||
this.maxChunkSize = Math.max(chunkSize, MAX_CHUNK_SIZE);
|
||||
currentWriteChunk = new StreamByteBufferChunk(nextChunkSize);
|
||||
output = new StreamByteBufferOutputStream();
|
||||
input = new StreamByteBufferInputStream();
|
||||
}
|
||||
|
||||
public static StreamByteBuffer of(InputStream inputStream) throws IOException {
|
||||
StreamByteBuffer buffer = new StreamByteBuffer(chunkSizeInDefaultRange(inputStream.available()));
|
||||
buffer.readFully(inputStream);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
public static StreamByteBuffer of(InputStream inputStream, int len) throws IOException {
|
||||
StreamByteBuffer buffer = new StreamByteBuffer(chunkSizeInDefaultRange(len));
|
||||
buffer.readFrom(inputStream, len);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
public static StreamByteBuffer createWithChunkSizeInDefaultRange(int value) {
|
||||
return new StreamByteBuffer(chunkSizeInDefaultRange(value));
|
||||
}
|
||||
|
||||
static int chunkSizeInDefaultRange(int value) {
|
||||
return valueInRange(value, DEFAULT_CHUNK_SIZE, MAX_CHUNK_SIZE);
|
||||
}
|
||||
|
||||
private static int valueInRange(int value, int min, int max) {
|
||||
return Math.min(Math.max(value, min), max);
|
||||
}
|
||||
|
||||
public OutputStream getOutputStream() {
|
||||
return output;
|
||||
}
|
||||
|
||||
public InputStream getInputStream() {
|
||||
return input;
|
||||
}
|
||||
|
||||
public void writeTo(OutputStream target) throws IOException {
|
||||
while (prepareRead() != -1) {
|
||||
currentReadChunk.writeTo(target);
|
||||
}
|
||||
}
|
||||
|
||||
public void readFrom(InputStream inputStream, int len) throws IOException {
|
||||
int bytesLeft = len;
|
||||
while (bytesLeft > 0) {
|
||||
int spaceLeft = allocateSpace();
|
||||
int limit = Math.min(spaceLeft, bytesLeft);
|
||||
int readBytes = currentWriteChunk.readFrom(inputStream, limit);
|
||||
if (readBytes == -1) {
|
||||
throw new EOFException("Unexpected EOF");
|
||||
}
|
||||
bytesLeft -= readBytes;
|
||||
}
|
||||
}
|
||||
|
||||
public void readFully(InputStream inputStream) throws IOException {
|
||||
while (true) {
|
||||
int len = allocateSpace();
|
||||
int readBytes = currentWriteChunk.readFrom(inputStream, len);
|
||||
if (readBytes == -1) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public byte[] readAsByteArray() {
|
||||
byte[] buf = new byte[totalBytesUnread()];
|
||||
input.readImpl(buf, 0, buf.length);
|
||||
return buf;
|
||||
}
|
||||
|
||||
public List<byte[]> readAsListOfByteArrays() {
|
||||
List<byte[]> listOfByteArrays = new ArrayList<byte[]>(chunks.size() + 1);
|
||||
byte[] buf;
|
||||
while ((buf = input.readNextBuffer()) != null) {
|
||||
if (buf.length > 0) {
|
||||
listOfByteArrays.add(buf);
|
||||
}
|
||||
}
|
||||
return listOfByteArrays;
|
||||
}
|
||||
|
||||
public String readAsString(String encoding) {
|
||||
Charset charset = Charset.forName(encoding);
|
||||
return readAsString(charset);
|
||||
}
|
||||
|
||||
public String readAsString() {
|
||||
return readAsString(Charset.defaultCharset());
|
||||
}
|
||||
|
||||
public String readAsString(Charset charset) {
|
||||
try {
|
||||
return doReadAsString(charset);
|
||||
} catch (CharacterCodingException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private String doReadAsString(Charset charset) throws CharacterCodingException {
|
||||
int unreadSize = totalBytesUnread();
|
||||
if (unreadSize > 0) {
|
||||
return readAsCharBuffer(charset).toString();
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
private CharBuffer readAsCharBuffer(Charset charset) throws CharacterCodingException {
|
||||
CharsetDecoder decoder = charset.newDecoder().onMalformedInput(
|
||||
CodingErrorAction.REPLACE).onUnmappableCharacter(
|
||||
CodingErrorAction.REPLACE);
|
||||
CharBuffer charbuffer = CharBuffer.allocate(totalBytesUnread());
|
||||
ByteBuffer buf = null;
|
||||
boolean wasUnderflow = false;
|
||||
ByteBuffer nextBuf = null;
|
||||
boolean needsFlush = false;
|
||||
while (hasRemaining(nextBuf) || hasRemaining(buf) || prepareRead() != -1) {
|
||||
if (hasRemaining(buf)) {
|
||||
// handle decoding underflow, multi-byte unicode character at buffer chunk boundary
|
||||
if (!wasUnderflow) {
|
||||
throw new IllegalStateException("Unexpected state. Buffer has remaining bytes without underflow in decoding.");
|
||||
}
|
||||
if (!hasRemaining(nextBuf) && prepareRead() != -1) {
|
||||
nextBuf = currentReadChunk.readToNioBuffer();
|
||||
}
|
||||
// copy one by one until the underflow has been resolved
|
||||
buf = ByteBuffer.allocate(buf.remaining() + 1).put(buf);
|
||||
buf.put(nextBuf.get());
|
||||
BufferCaster.cast(buf).flip();
|
||||
} else {
|
||||
if (hasRemaining(nextBuf)) {
|
||||
buf = nextBuf;
|
||||
} else if (prepareRead() != -1) {
|
||||
buf = currentReadChunk.readToNioBuffer();
|
||||
if (!hasRemaining(buf)) {
|
||||
throw new IllegalStateException("Unexpected state. Buffer is empty.");
|
||||
}
|
||||
}
|
||||
nextBuf = null;
|
||||
}
|
||||
boolean endOfInput = !hasRemaining(nextBuf) && prepareRead() == -1;
|
||||
int bufRemainingBefore = buf.remaining();
|
||||
CoderResult result = decoder.decode(buf, charbuffer, false);
|
||||
if (bufRemainingBefore > buf.remaining()) {
|
||||
needsFlush = true;
|
||||
}
|
||||
if (endOfInput) {
|
||||
result = decoder.decode(ByteBuffer.allocate(0), charbuffer, true);
|
||||
if (!result.isUnderflow()) {
|
||||
result.throwException();
|
||||
}
|
||||
break;
|
||||
}
|
||||
wasUnderflow = result.isUnderflow();
|
||||
}
|
||||
if (needsFlush) {
|
||||
CoderResult result = decoder.flush(charbuffer);
|
||||
if (!result.isUnderflow()) {
|
||||
result.throwException();
|
||||
}
|
||||
}
|
||||
clear();
|
||||
// push back remaining bytes of multi-byte unicode character
|
||||
while (hasRemaining(buf)) {
|
||||
byte b = buf.get();
|
||||
try {
|
||||
getOutputStream().write(b);
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
}
|
||||
BufferCaster.cast(charbuffer).flip();
|
||||
return charbuffer;
|
||||
}
|
||||
|
||||
private boolean hasRemaining(ByteBuffer nextBuf) {
|
||||
return nextBuf != null && nextBuf.hasRemaining();
|
||||
}
|
||||
|
||||
public int totalBytesUnread() {
|
||||
int total = totalBytesUnreadInList;
|
||||
if (currentReadChunk != null) {
|
||||
total += currentReadChunk.bytesUnread();
|
||||
}
|
||||
if (currentWriteChunk != currentReadChunk && currentWriteChunk != null) {
|
||||
total += currentWriteChunk.bytesUnread();
|
||||
}
|
||||
return total;
|
||||
}
|
||||
|
||||
protected int allocateSpace() {
|
||||
int spaceLeft = currentWriteChunk.spaceLeft();
|
||||
if (spaceLeft == 0) {
|
||||
addChunk(currentWriteChunk);
|
||||
currentWriteChunk = new StreamByteBufferChunk(nextChunkSize);
|
||||
if (nextChunkSize < maxChunkSize) {
|
||||
nextChunkSize = Math.min(nextChunkSize * 2, maxChunkSize);
|
||||
}
|
||||
spaceLeft = currentWriteChunk.spaceLeft();
|
||||
}
|
||||
return spaceLeft;
|
||||
}
|
||||
|
||||
protected int prepareRead() {
|
||||
int bytesUnread = (currentReadChunk != null) ? currentReadChunk.bytesUnread() : 0;
|
||||
if (bytesUnread == 0) {
|
||||
if (!chunks.isEmpty()) {
|
||||
currentReadChunk = chunks.removeFirst();
|
||||
bytesUnread = currentReadChunk.bytesUnread();
|
||||
totalBytesUnreadInList -= bytesUnread;
|
||||
} else if (currentReadChunk != currentWriteChunk) {
|
||||
currentReadChunk = currentWriteChunk;
|
||||
bytesUnread = currentReadChunk.bytesUnread();
|
||||
} else {
|
||||
bytesUnread = -1;
|
||||
}
|
||||
}
|
||||
return bytesUnread;
|
||||
}
|
||||
|
||||
public static StreamByteBuffer of(List<byte[]> listOfByteArrays) {
|
||||
StreamByteBuffer buffer = new StreamByteBuffer();
|
||||
buffer.addChunks(listOfByteArrays);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
private void addChunks(List<byte[]> listOfByteArrays) {
|
||||
for (byte[] buf : listOfByteArrays) {
|
||||
addChunk(new StreamByteBufferChunk(buf));
|
||||
}
|
||||
}
|
||||
|
||||
private void addChunk(StreamByteBufferChunk chunk) {
|
||||
chunks.add(chunk);
|
||||
totalBytesUnreadInList += chunk.bytesUnread();
|
||||
}
|
||||
|
||||
static class StreamByteBufferChunk {
|
||||
private int pointer;
|
||||
private byte[] buffer;
|
||||
private int size;
|
||||
private int used;
|
||||
|
||||
public StreamByteBufferChunk(int size) {
|
||||
this.size = size;
|
||||
buffer = new byte[size];
|
||||
}
|
||||
|
||||
public StreamByteBufferChunk(byte[] buf) {
|
||||
this.size = buf.length;
|
||||
this.buffer = buf;
|
||||
this.used = buf.length;
|
||||
}
|
||||
|
||||
public ByteBuffer readToNioBuffer() {
|
||||
if (pointer < used) {
|
||||
ByteBuffer result;
|
||||
if (pointer > 0 || used < size) {
|
||||
result = ByteBuffer.wrap(buffer, pointer, used - pointer);
|
||||
} else {
|
||||
result = ByteBuffer.wrap(buffer);
|
||||
}
|
||||
pointer = used;
|
||||
return result;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
public boolean write(byte b) {
|
||||
if (used < size) {
|
||||
buffer[used++] = b;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
public void write(byte[] b, int off, int len) {
|
||||
System.arraycopy(b, off, buffer, used, len);
|
||||
used = used + len;
|
||||
}
|
||||
|
||||
public void read(byte[] b, int off, int len) {
|
||||
System.arraycopy(buffer, pointer, b, off, len);
|
||||
pointer = pointer + len;
|
||||
}
|
||||
|
||||
public void writeTo(OutputStream target) throws IOException {
|
||||
if (pointer < used) {
|
||||
target.write(buffer, pointer, used - pointer);
|
||||
pointer = used;
|
||||
}
|
||||
}
|
||||
|
||||
public void reset() {
|
||||
pointer = 0;
|
||||
}
|
||||
|
||||
public int bytesUsed() {
|
||||
return used;
|
||||
}
|
||||
|
||||
public int bytesUnread() {
|
||||
return used - pointer;
|
||||
}
|
||||
|
||||
public int read() {
|
||||
if (pointer < used) {
|
||||
return buffer[pointer++] & 0xff;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
public int spaceLeft() {
|
||||
return size - used;
|
||||
}
|
||||
|
||||
public int readFrom(InputStream inputStream, int len) throws IOException {
|
||||
int readBytes = inputStream.read(buffer, used, len);
|
||||
if(readBytes > 0) {
|
||||
used += readBytes;
|
||||
}
|
||||
return readBytes;
|
||||
}
|
||||
|
||||
public void clear() {
|
||||
used = pointer = 0;
|
||||
}
|
||||
|
||||
public byte[] readBuffer() {
|
||||
if (used == buffer.length && pointer == 0) {
|
||||
pointer = used;
|
||||
return buffer;
|
||||
} else if (pointer < used) {
|
||||
byte[] buf = new byte[used - pointer];
|
||||
read(buf, 0, used - pointer);
|
||||
return buf;
|
||||
} else {
|
||||
return new byte[0];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class StreamByteBufferOutputStream extends OutputStream {
|
||||
private boolean closed;
|
||||
|
||||
@Override
|
||||
public void write(byte[] b, int off, int len) throws IOException {
|
||||
if (b == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
|
||||
if ((off < 0) || (off > b.length) || (len < 0)
|
||||
|| ((off + len) > b.length) || ((off + len) < 0)) {
|
||||
throw new IndexOutOfBoundsException();
|
||||
}
|
||||
|
||||
if (len == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
int bytesLeft = len;
|
||||
int currentOffset = off;
|
||||
while (bytesLeft > 0) {
|
||||
int spaceLeft = allocateSpace();
|
||||
int writeBytes = Math.min(spaceLeft, bytesLeft);
|
||||
currentWriteChunk.write(b, currentOffset, writeBytes);
|
||||
bytesLeft -= writeBytes;
|
||||
currentOffset += writeBytes;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
closed = true;
|
||||
}
|
||||
|
||||
public boolean isClosed() {
|
||||
return closed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(int b) throws IOException {
|
||||
allocateSpace();
|
||||
currentWriteChunk.write((byte) b);
|
||||
}
|
||||
|
||||
public StreamByteBuffer getBuffer() {
|
||||
return StreamByteBuffer.this;
|
||||
}
|
||||
}
|
||||
|
||||
class StreamByteBufferInputStream extends InputStream {
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
prepareRead();
|
||||
return currentReadChunk.read();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(byte[] b, int off, int len) throws IOException {
|
||||
return readImpl(b, off, len);
|
||||
}
|
||||
|
||||
int readImpl(byte[] b, int off, int len) {
|
||||
if (b == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
|
||||
if ((off < 0) || (off > b.length) || (len < 0)
|
||||
|| ((off + len) > b.length) || ((off + len) < 0)) {
|
||||
throw new IndexOutOfBoundsException();
|
||||
}
|
||||
|
||||
if (len == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bytesLeft = len;
|
||||
int currentOffset = off;
|
||||
int bytesUnread = prepareRead();
|
||||
int totalBytesRead = 0;
|
||||
while (bytesLeft > 0 && bytesUnread != -1) {
|
||||
int readBytes = Math.min(bytesUnread, bytesLeft);
|
||||
currentReadChunk.read(b, currentOffset, readBytes);
|
||||
bytesLeft -= readBytes;
|
||||
currentOffset += readBytes;
|
||||
totalBytesRead += readBytes;
|
||||
bytesUnread = prepareRead();
|
||||
}
|
||||
if (totalBytesRead > 0) {
|
||||
return totalBytesRead;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int available() throws IOException {
|
||||
return totalBytesUnread();
|
||||
}
|
||||
|
||||
public StreamByteBuffer getBuffer() {
|
||||
return StreamByteBuffer.this;
|
||||
}
|
||||
|
||||
public byte[] readNextBuffer() {
|
||||
if (prepareRead() != -1) {
|
||||
return currentReadChunk.readBuffer();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public void clear() {
|
||||
chunks.clear();
|
||||
currentReadChunk = null;
|
||||
totalBytesUnreadInList = 0;
|
||||
currentWriteChunk.clear();
|
||||
}
|
||||
}
|
@ -0,0 +1,88 @@
|
||||
/*
|
||||
* Copyright 2010 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweedfs.client.btree;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
/**
|
||||
* Wraps a checked exception. Carries no other context.
|
||||
*/
|
||||
public final class UncheckedException extends RuntimeException {
|
||||
private UncheckedException(Throwable cause) {
|
||||
super(cause);
|
||||
}
|
||||
|
||||
private UncheckedException(String message, Throwable cause) {
|
||||
super(message, cause);
|
||||
}
|
||||
|
||||
/**
|
||||
* Note: always throws the failure in some form. The return value is to keep the compiler happy.
|
||||
*/
|
||||
public static RuntimeException throwAsUncheckedException(Throwable t) {
|
||||
return throwAsUncheckedException(t, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Note: always throws the failure in some form. The return value is to keep the compiler happy.
|
||||
*/
|
||||
public static RuntimeException throwAsUncheckedException(Throwable t, boolean preserveMessage) {
|
||||
if (t instanceof InterruptedException) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
if (t instanceof RuntimeException) {
|
||||
throw (RuntimeException) t;
|
||||
}
|
||||
if (t instanceof Error) {
|
||||
throw (Error) t;
|
||||
}
|
||||
if (t instanceof IOException) {
|
||||
if (preserveMessage) {
|
||||
throw new UncheckedIOException(t.getMessage(), t);
|
||||
} else {
|
||||
throw new UncheckedIOException(t);
|
||||
}
|
||||
}
|
||||
if (preserveMessage) {
|
||||
throw new UncheckedException(t.getMessage(), t);
|
||||
} else {
|
||||
throw new UncheckedException(t);
|
||||
}
|
||||
}
|
||||
|
||||
public static <T> T callUnchecked(Callable<T> callable) {
|
||||
try {
|
||||
return callable.call();
|
||||
} catch (Exception e) {
|
||||
throw throwAsUncheckedException(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Unwraps passed InvocationTargetException hence making the stack of exceptions cleaner without losing information.
|
||||
*
|
||||
* Note: always throws the failure in some form. The return value is to keep the compiler happy.
|
||||
*
|
||||
* @param e to be unwrapped
|
||||
* @return an instance of RuntimeException based on the target exception of the parameter.
|
||||
*/
|
||||
public static RuntimeException unwrapAndRethrow(InvocationTargetException e) {
|
||||
return UncheckedException.throwAsUncheckedException(e.getTargetException());
|
||||
}
|
||||
}
|
@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Copyright 2012 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package seaweedfs.client.btree;
|
||||
|
||||
/**
|
||||
* <code>UncheckedIOException</code> is used to wrap an {@link java.io.IOException} into an unchecked exception.
|
||||
*/
|
||||
public class UncheckedIOException extends RuntimeException {
|
||||
public UncheckedIOException() {
|
||||
}
|
||||
|
||||
public UncheckedIOException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
public UncheckedIOException(String message, Throwable cause) {
|
||||
super(message, cause);
|
||||
}
|
||||
|
||||
public UncheckedIOException(Throwable cause) {
|
||||
super(cause);
|
||||
}
|
||||
}
|
@ -0,0 +1,133 @@
|
||||
/*
|
||||
* Copyright 2013 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweedfs.client.btree.serialize;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
public abstract class AbstractDecoder implements Decoder {
|
||||
private DecoderStream stream;
|
||||
|
||||
@Override
|
||||
public InputStream getInputStream() {
|
||||
if (stream == null) {
|
||||
stream = new DecoderStream();
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readBytes(byte[] buffer) throws IOException {
|
||||
readBytes(buffer, 0, buffer.length);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] readBinary() throws EOFException, IOException {
|
||||
int size = readSmallInt();
|
||||
byte[] result = new byte[size];
|
||||
readBytes(result);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int readSmallInt() throws EOFException, IOException {
|
||||
return readInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long readSmallLong() throws EOFException, IOException {
|
||||
return readLong();
|
||||
}
|
||||
|
||||
@Nullable
|
||||
@Override
|
||||
public Integer readNullableSmallInt() throws IOException {
|
||||
if (readBoolean()) {
|
||||
return readSmallInt();
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String readNullableString() throws EOFException, IOException {
|
||||
if (readBoolean()) {
|
||||
return readString();
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skipBytes(long count) throws EOFException, IOException {
|
||||
long remaining = count;
|
||||
while (remaining > 0) {
|
||||
long skipped = maybeSkip(remaining);
|
||||
if (skipped <= 0) {
|
||||
break;
|
||||
}
|
||||
remaining -= skipped;
|
||||
}
|
||||
if (remaining > 0) {
|
||||
throw new EOFException();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> T decodeChunked(DecodeAction<Decoder, T> decodeAction) throws EOFException, Exception {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skipChunked() throws EOFException, IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
protected abstract int maybeReadBytes(byte[] buffer, int offset, int count) throws IOException;
|
||||
|
||||
protected abstract long maybeSkip(long count) throws IOException;
|
||||
|
||||
private class DecoderStream extends InputStream {
|
||||
byte[] buffer = new byte[1];
|
||||
|
||||
@Override
|
||||
public long skip(long n) throws IOException {
|
||||
return maybeSkip(n);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
int read = maybeReadBytes(buffer, 0, 1);
|
||||
if (read <= 0) {
|
||||
return read;
|
||||
}
|
||||
return buffer[0] & 0xff;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(byte[] buffer) throws IOException {
|
||||
return maybeReadBytes(buffer, 0, buffer.length);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(byte[] buffer, int offset, int count) throws IOException {
|
||||
return maybeReadBytes(buffer, offset, count);
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,101 @@
|
||||
/*
|
||||
* Copyright 2013 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweedfs.client.btree.serialize;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
|
||||
public abstract class AbstractEncoder implements Encoder {
|
||||
private EncoderStream stream;
|
||||
|
||||
@Override
|
||||
public OutputStream getOutputStream() {
|
||||
if (stream == null) {
|
||||
stream = new EncoderStream();
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBytes(byte[] bytes) throws IOException {
|
||||
writeBytes(bytes, 0, bytes.length);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBinary(byte[] bytes) throws IOException {
|
||||
writeBinary(bytes, 0, bytes.length);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBinary(byte[] bytes, int offset, int count) throws IOException {
|
||||
writeSmallInt(count);
|
||||
writeBytes(bytes, offset, count);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void encodeChunked(EncodeAction<Encoder> writeAction) throws Exception {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeSmallInt(int value) throws IOException {
|
||||
writeInt(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeSmallLong(long value) throws IOException {
|
||||
writeLong(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeNullableSmallInt(@Nullable Integer value) throws IOException {
|
||||
if (value == null) {
|
||||
writeBoolean(false);
|
||||
} else {
|
||||
writeBoolean(true);
|
||||
writeSmallInt(value);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeNullableString(@Nullable CharSequence value) throws IOException {
|
||||
if (value == null) {
|
||||
writeBoolean(false);
|
||||
} else {
|
||||
writeBoolean(true);
|
||||
writeString(value.toString());
|
||||
}
|
||||
}
|
||||
|
||||
private class EncoderStream extends OutputStream {
|
||||
@Override
|
||||
public void write(byte[] buffer) throws IOException {
|
||||
writeBytes(buffer);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(byte[] buffer, int offset, int length) throws IOException {
|
||||
writeBytes(buffer, offset, length);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(int b) throws IOException {
|
||||
writeByte((byte) b);
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweedfs.client.btree.serialize;
|
||||
|
||||
import com.google.common.base.Objects;
|
||||
|
||||
/**
|
||||
* This abstract class provide a sensible default implementation for {@code Serializer} equality. This equality
|
||||
* implementation is required to enable cache instance reuse within the same Gradle runtime. Serializers are used
|
||||
* as cache parameter which need to be compared to determine compatible cache.
|
||||
*/
|
||||
public abstract class AbstractSerializer<T> implements Serializer<T> {
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return Objects.equal(obj.getClass(), getClass());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hashCode(getClass());
|
||||
}
|
||||
}
|
@ -0,0 +1,79 @@
|
||||
/*
|
||||
* Copyright 2012 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweedfs.client.btree.serialize;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
|
||||
public abstract class Cast {
|
||||
|
||||
/**
|
||||
* Casts the given object to the given type, providing a better error message than the default.
|
||||
*
|
||||
* The standard {@link Class#cast(Object)} method produces unsatisfactory error messages on some platforms
|
||||
* when it fails. All this method does is provide a better, consistent, error message.
|
||||
*
|
||||
* This should be used whenever there is a chance the cast could fail. If in doubt, use this.
|
||||
*
|
||||
* @param outputType The type to cast the input to
|
||||
* @param object The object to be cast (must not be {@code null})
|
||||
* @param <O> The type to be cast to
|
||||
* @param <I> The type of the object to be vast
|
||||
* @return The input object, cast to the output type
|
||||
*/
|
||||
public static <O, I> O cast(Class<O> outputType, I object) {
|
||||
try {
|
||||
return outputType.cast(object);
|
||||
} catch (ClassCastException e) {
|
||||
throw new ClassCastException(String.format(
|
||||
"Failed to cast object %s of type %s to target type %s", object, object.getClass().getName(), outputType.getName()
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Casts the given object to the given type, providing a better error message than the default.
|
||||
*
|
||||
* The standard {@link Class#cast(Object)} method produces unsatisfactory error messages on some platforms
|
||||
* when it fails. All this method does is provide a better, consistent, error message.
|
||||
*
|
||||
* This should be used whenever there is a chance the cast could fail. If in doubt, use this.
|
||||
*
|
||||
* @param outputType The type to cast the input to
|
||||
* @param object The object to be cast
|
||||
* @param <O> The type to be cast to
|
||||
* @param <I> The type of the object to be vast
|
||||
* @return The input object, cast to the output type
|
||||
*/
|
||||
@Nullable
|
||||
public static <O, I> O castNullable(Class<O> outputType, @Nullable I object) {
|
||||
if (object == null) {
|
||||
return null;
|
||||
}
|
||||
return cast(outputType, object);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Nullable
|
||||
public static <T> T uncheckedCast(@Nullable Object object) {
|
||||
return (T) object;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public static <T> T uncheckedNonnullCast(Object object) {
|
||||
return (T) object;
|
||||
}
|
||||
}
|
@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Copyright 2010 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package seaweedfs.client.btree.serialize;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.ObjectInputStream;
|
||||
import java.io.ObjectStreamClass;
|
||||
|
||||
public class ClassLoaderObjectInputStream extends ObjectInputStream {
|
||||
private final ClassLoader loader;
|
||||
|
||||
public ClassLoaderObjectInputStream(InputStream in, ClassLoader loader) throws IOException {
|
||||
super(in);
|
||||
this.loader = loader;
|
||||
}
|
||||
|
||||
public ClassLoader getClassLoader() {
|
||||
return loader;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Class<?> resolveClass(ObjectStreamClass desc) throws IOException, ClassNotFoundException {
|
||||
try {
|
||||
return Class.forName(desc.getName(), false, loader);
|
||||
} catch (ClassNotFoundException e) {
|
||||
return super.resolveClass(desc);
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,140 @@
|
||||
/*
|
||||
* Copyright 2013 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweedfs.client.btree.serialize;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
/**
|
||||
* Provides a way to decode structured data from a backing byte stream. Implementations may buffer incoming bytes read
|
||||
* from the backing stream prior to decoding.
|
||||
*/
|
||||
public interface Decoder {
|
||||
/**
|
||||
* Returns an InputStream which can be used to read raw bytes.
|
||||
*/
|
||||
InputStream getInputStream();
|
||||
|
||||
/**
|
||||
* Reads a signed 64 bit long value. Can read any value that was written using {@link Encoder#writeLong(long)}.
|
||||
*
|
||||
* @throws EOFException when the end of the byte stream is reached before the long value can be fully read.
|
||||
*/
|
||||
long readLong() throws EOFException, IOException;
|
||||
|
||||
/**
|
||||
* Reads a signed 64 bit int value. Can read any value that was written using {@link Encoder#writeSmallLong(long)}.
|
||||
*
|
||||
* @throws EOFException when the end of the byte stream is reached before the int value can be fully read.
|
||||
*/
|
||||
long readSmallLong() throws EOFException, IOException;
|
||||
|
||||
/**
|
||||
* Reads a signed 32 bit int value. Can read any value that was written using {@link Encoder#writeInt(int)}.
|
||||
*
|
||||
* @throws EOFException when the end of the byte stream is reached before the int value can be fully read.
|
||||
*/
|
||||
int readInt() throws EOFException, IOException;
|
||||
|
||||
/**
|
||||
* Reads a signed 32 bit int value. Can read any value that was written using {@link Encoder#writeSmallInt(int)}.
|
||||
*
|
||||
* @throws EOFException when the end of the byte stream is reached before the int value can be fully read.
|
||||
*/
|
||||
int readSmallInt() throws EOFException, IOException;
|
||||
|
||||
/**
|
||||
* Reads a nullable signed 32 bit int value.
|
||||
*
|
||||
* @see #readSmallInt()
|
||||
*/
|
||||
@Nullable
|
||||
Integer readNullableSmallInt() throws EOFException, IOException;
|
||||
|
||||
/**
|
||||
* Reads a boolean value. Can read any value that was written using {@link Encoder#writeBoolean(boolean)}.
|
||||
*
|
||||
* @throws EOFException when the end of the byte stream is reached before the boolean value can be fully read.
|
||||
*/
|
||||
boolean readBoolean() throws EOFException, IOException;
|
||||
|
||||
/**
|
||||
* Reads a non-null string value. Can read any value that was written using {@link Encoder#writeString(CharSequence)}.
|
||||
*
|
||||
* @throws EOFException when the end of the byte stream is reached before the string can be fully read.
|
||||
*/
|
||||
String readString() throws EOFException, IOException;
|
||||
|
||||
/**
|
||||
* Reads a nullable string value. Can reads any value that was written using {@link Encoder#writeNullableString(CharSequence)}.
|
||||
*
|
||||
* @throws EOFException when the end of the byte stream is reached before the string can be fully read.
|
||||
*/
|
||||
@Nullable
|
||||
String readNullableString() throws EOFException, IOException;
|
||||
|
||||
/**
|
||||
* Reads a byte value. Can read any byte value that was written using one of the raw byte methods on {@link Encoder}, such as {@link Encoder#writeByte(byte)} or {@link Encoder#getOutputStream()}
|
||||
*
|
||||
* @throws EOFException when the end of the byte stream is reached.
|
||||
*/
|
||||
byte readByte() throws EOFException, IOException;
|
||||
|
||||
/**
|
||||
* Reads bytes into the given buffer, filling the buffer. Can read any byte values that were written using one of the raw byte methods on {@link Encoder}, such as {@link
|
||||
* Encoder#writeBytes(byte[])} or {@link Encoder#getOutputStream()}
|
||||
*
|
||||
* @throws EOFException when the end of the byte stream is reached before the buffer is full.
|
||||
*/
|
||||
void readBytes(byte[] buffer) throws EOFException, IOException;
|
||||
|
||||
/**
|
||||
* Reads the specified number of bytes into the given buffer. Can read any byte values that were written using one of the raw byte methods on {@link Encoder}, such as {@link
|
||||
* Encoder#writeBytes(byte[])} or {@link Encoder#getOutputStream()}
|
||||
*
|
||||
* @throws EOFException when the end of the byte stream is reached before the specified number of bytes were read.
|
||||
*/
|
||||
void readBytes(byte[] buffer, int offset, int count) throws EOFException, IOException;
|
||||
|
||||
/**
|
||||
* Reads a byte array. Can read any byte array written using {@link Encoder#writeBinary(byte[])} or {@link Encoder#writeBinary(byte[], int, int)}.
|
||||
*
|
||||
* @throws EOFException when the end of the byte stream is reached before the byte array was fully read.
|
||||
*/
|
||||
byte[] readBinary() throws EOFException, IOException;
|
||||
|
||||
/**
|
||||
* Skips the given number of bytes. Can skip over any byte values that were written using one of the raw byte methods on {@link Encoder}.
|
||||
*/
|
||||
void skipBytes(long count) throws EOFException, IOException;
|
||||
|
||||
/**
|
||||
* Reads a byte stream written using {@link Encoder#encodeChunked(Encoder.EncodeAction)}.
|
||||
*/
|
||||
<T> T decodeChunked(DecodeAction<Decoder, T> decodeAction) throws EOFException, Exception;
|
||||
|
||||
/**
|
||||
* Skips over a byte stream written using {@link Encoder#encodeChunked(Encoder.EncodeAction)}, discarding its content.
|
||||
*/
|
||||
void skipChunked() throws EOFException, IOException;
|
||||
|
||||
interface DecodeAction<IN, OUT> {
|
||||
OUT read(IN source) throws Exception;
|
||||
}
|
||||
}
|
@ -0,0 +1,73 @@
|
||||
/*
|
||||
* Copyright 2009 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package seaweedfs.client.btree.serialize;
|
||||
|
||||
import com.google.common.base.Objects;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.ObjectOutputStream;
|
||||
import java.io.StreamCorruptedException;
|
||||
|
||||
public class DefaultSerializer<T> extends AbstractSerializer<T> {
|
||||
private ClassLoader classLoader;
|
||||
|
||||
public DefaultSerializer() {
|
||||
classLoader = getClass().getClassLoader();
|
||||
}
|
||||
|
||||
public DefaultSerializer(ClassLoader classLoader) {
|
||||
this.classLoader = classLoader != null ? classLoader : getClass().getClassLoader();
|
||||
}
|
||||
|
||||
public ClassLoader getClassLoader() {
|
||||
return classLoader;
|
||||
}
|
||||
|
||||
public void setClassLoader(ClassLoader classLoader) {
|
||||
this.classLoader = classLoader;
|
||||
}
|
||||
|
||||
@Override
|
||||
public T read(Decoder decoder) throws Exception {
|
||||
try {
|
||||
return Cast.uncheckedNonnullCast(new ClassLoaderObjectInputStream(decoder.getInputStream(), classLoader).readObject());
|
||||
} catch (StreamCorruptedException e) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Encoder encoder, T value) throws IOException {
|
||||
ObjectOutputStream objectStr = new ObjectOutputStream(encoder.getOutputStream());
|
||||
objectStr.writeObject(value);
|
||||
objectStr.flush();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!super.equals(obj)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DefaultSerializer<?> rhs = (DefaultSerializer<?>) obj;
|
||||
return Objects.equal(classLoader, rhs.classLoader);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hashCode(super.hashCode(), classLoader);
|
||||
}
|
||||
}
|
@ -0,0 +1,110 @@
|
||||
/*
|
||||
* Copyright 2013 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweedfs.client.btree.serialize;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
|
||||
/**
|
||||
* Provides a way to encode structured data to a backing byte stream. Implementations may buffer outgoing encoded bytes prior
|
||||
* to writing to the backing byte stream.
|
||||
*/
|
||||
public interface Encoder {
|
||||
/**
|
||||
* Returns an {@link OutputStream) that can be used to write raw bytes to the stream.
|
||||
*/
|
||||
OutputStream getOutputStream();
|
||||
|
||||
/**
|
||||
* Writes a raw byte value to the stream.
|
||||
*/
|
||||
void writeByte(byte value) throws IOException;
|
||||
|
||||
/**
|
||||
* Writes the given raw bytes to the stream. Does not encode any length information.
|
||||
*/
|
||||
void writeBytes(byte[] bytes) throws IOException;
|
||||
|
||||
/**
|
||||
* Writes the given raw bytes to the stream. Does not encode any length information.
|
||||
*/
|
||||
void writeBytes(byte[] bytes, int offset, int count) throws IOException;
|
||||
|
||||
/**
|
||||
* Writes the given byte array to the stream. Encodes the bytes and length information.
|
||||
*/
|
||||
void writeBinary(byte[] bytes) throws IOException;
|
||||
|
||||
/**
|
||||
* Writes the given byte array to the stream. Encodes the bytes and length information.
|
||||
*/
|
||||
void writeBinary(byte[] bytes, int offset, int count) throws IOException;
|
||||
|
||||
/**
|
||||
* Appends an encoded stream to this stream. Encodes the stream as a series of chunks with length information.
|
||||
*/
|
||||
void encodeChunked(EncodeAction<Encoder> writeAction) throws Exception;
|
||||
|
||||
/**
|
||||
* Writes a signed 64 bit long value. The implementation may encode the value as a variable number of bytes, not necessarily as 8 bytes.
|
||||
*/
|
||||
void writeLong(long value) throws IOException;
|
||||
|
||||
/**
|
||||
* Writes a signed 64 bit long value whose value is likely to be small and positive but may not be. The implementation may encode the value in a way that is more efficient for small positive
|
||||
* values.
|
||||
*/
|
||||
void writeSmallLong(long value) throws IOException;
|
||||
|
||||
/**
|
||||
* Writes a signed 32 bit int value. The implementation may encode the value as a variable number of bytes, not necessarily as 4 bytes.
|
||||
*/
|
||||
void writeInt(int value) throws IOException;
|
||||
|
||||
/**
|
||||
* Writes a signed 32 bit int value whose value is likely to be small and positive but may not be. The implementation may encode the value in a way that
|
||||
* is more efficient for small positive values.
|
||||
*/
|
||||
void writeSmallInt(int value) throws IOException;
|
||||
|
||||
/**
|
||||
* Writes a nullable signed 32 bit int value whose value is likely to be small and positive but may not be.
|
||||
*
|
||||
* @see #writeSmallInt(int)
|
||||
*/
|
||||
void writeNullableSmallInt(@Nullable Integer value) throws IOException;
|
||||
|
||||
/**
|
||||
* Writes a boolean value.
|
||||
*/
|
||||
void writeBoolean(boolean value) throws IOException;
|
||||
|
||||
/**
|
||||
* Writes a non-null string value.
|
||||
*/
|
||||
void writeString(CharSequence value) throws IOException;
|
||||
|
||||
/**
|
||||
* Writes a nullable string value.
|
||||
*/
|
||||
void writeNullableString(@Nullable CharSequence value) throws IOException;
|
||||
|
||||
interface EncodeAction<T> {
|
||||
void write(T target) throws Exception;
|
||||
}
|
||||
}
|
@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright 2013 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweedfs.client.btree.serialize;
|
||||
|
||||
import java.io.Flushable;
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Represents an {@link Encoder} that buffers encoded data prior to writing to the backing stream.
|
||||
*/
|
||||
public interface FlushableEncoder extends Encoder, Flushable {
|
||||
/**
|
||||
* Ensures that all buffered data has been written to the backing stream. Does not flush the backing stream.
|
||||
*/
|
||||
@Override
|
||||
void flush() throws IOException;
|
||||
}
|
@ -0,0 +1,28 @@
|
||||
/*
|
||||
* Copyright 2012 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweedfs.client.btree.serialize;
|
||||
|
||||
import java.io.EOFException;
|
||||
|
||||
public interface ObjectReader<T> {
|
||||
/**
|
||||
* Reads the next object from the stream.
|
||||
*
|
||||
* @throws EOFException When the next object cannot be fully read due to reaching the end of stream.
|
||||
*/
|
||||
T read() throws EOFException, Exception;
|
||||
}
|
@ -0,0 +1,21 @@
|
||||
/*
|
||||
* Copyright 2012 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweedfs.client.btree.serialize;
|
||||
|
||||
public interface ObjectWriter<T> {
|
||||
void write(T value) throws Exception;
|
||||
}
|
@ -0,0 +1,33 @@
|
||||
/*
|
||||
* Copyright 2009 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package seaweedfs.client.btree.serialize;
|
||||
|
||||
import java.io.EOFException;
|
||||
|
||||
public interface Serializer<T> {
|
||||
/**
|
||||
* Reads the next object from the given stream. The implementation must not perform any buffering, so that it reads only those bytes from the input stream that are
|
||||
* required to deserialize the next object.
|
||||
*
|
||||
* @throws EOFException When the next object cannot be fully read due to reaching the end of stream.
|
||||
*/
|
||||
T read(Decoder decoder) throws EOFException, Exception;
|
||||
|
||||
/**
|
||||
* Writes the given object to the given stream. The implementation must not perform any buffering.
|
||||
*/
|
||||
void write(Encoder encoder, T value) throws Exception;
|
||||
}
|
@ -0,0 +1,33 @@
|
||||
/*
|
||||
* Copyright 2012 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweedfs.client.btree.serialize;
|
||||
|
||||
/**
|
||||
* Implementations must allow concurrent reading and writing, so that a thread can read and a thread can write at the same time.
|
||||
* Implementations do not need to support multiple read threads or multiple write threads.
|
||||
*/
|
||||
public interface StatefulSerializer<T> {
|
||||
/**
|
||||
* Should not perform any buffering
|
||||
*/
|
||||
ObjectReader<T> newReader(Decoder decoder);
|
||||
|
||||
/**
|
||||
* Should not perform any buffering
|
||||
*/
|
||||
ObjectWriter<T> newWriter(Encoder encoder);
|
||||
}
|
@ -0,0 +1,210 @@
|
||||
/*
|
||||
* Copyright 2013 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweedfs.client.btree.serialize.kryo;
|
||||
|
||||
import com.esotericsoftware.kryo.KryoException;
|
||||
import com.esotericsoftware.kryo.io.Input;
|
||||
import seaweedfs.client.btree.serialize.AbstractDecoder;
|
||||
import seaweedfs.client.btree.serialize.Decoder;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
/**
|
||||
* Note that this decoder uses buffering, so will attempt to read beyond the end of the encoded data. This means you should use this type only when this decoder will be used to decode the entire
|
||||
* stream.
|
||||
*/
|
||||
public class KryoBackedDecoder extends AbstractDecoder implements Decoder, Closeable {
|
||||
private final Input input;
|
||||
private final InputStream inputStream;
|
||||
private long extraSkipped;
|
||||
private KryoBackedDecoder nested;
|
||||
|
||||
public KryoBackedDecoder(InputStream inputStream) {
|
||||
this(inputStream, 4096);
|
||||
}
|
||||
|
||||
public KryoBackedDecoder(InputStream inputStream, int bufferSize) {
|
||||
this.inputStream = inputStream;
|
||||
input = new Input(this.inputStream, bufferSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int maybeReadBytes(byte[] buffer, int offset, int count) {
|
||||
return input.read(buffer, offset, count);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected long maybeSkip(long count) throws IOException {
|
||||
// Work around some bugs in Input.skip()
|
||||
int remaining = input.limit() - input.position();
|
||||
if (remaining == 0) {
|
||||
long skipped = inputStream.skip(count);
|
||||
if (skipped > 0) {
|
||||
extraSkipped += skipped;
|
||||
}
|
||||
return skipped;
|
||||
} else if (count <= remaining) {
|
||||
input.setPosition(input.position() + (int) count);
|
||||
return count;
|
||||
} else {
|
||||
input.setPosition(input.limit());
|
||||
return remaining;
|
||||
}
|
||||
}
|
||||
|
||||
private RuntimeException maybeEndOfStream(KryoException e) throws EOFException {
|
||||
if (e.getMessage().equals("Buffer underflow.")) {
|
||||
throw (EOFException) (new EOFException().initCause(e));
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte readByte() throws EOFException {
|
||||
try {
|
||||
return input.readByte();
|
||||
} catch (KryoException e) {
|
||||
throw maybeEndOfStream(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readBytes(byte[] buffer, int offset, int count) throws EOFException {
|
||||
try {
|
||||
input.readBytes(buffer, offset, count);
|
||||
} catch (KryoException e) {
|
||||
throw maybeEndOfStream(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long readLong() throws EOFException {
|
||||
try {
|
||||
return input.readLong();
|
||||
} catch (KryoException e) {
|
||||
throw maybeEndOfStream(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long readSmallLong() throws EOFException, IOException {
|
||||
try {
|
||||
return input.readLong(true);
|
||||
} catch (KryoException e) {
|
||||
throw maybeEndOfStream(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int readInt() throws EOFException {
|
||||
try {
|
||||
return input.readInt();
|
||||
} catch (KryoException e) {
|
||||
throw maybeEndOfStream(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int readSmallInt() throws EOFException {
|
||||
try {
|
||||
return input.readInt(true);
|
||||
} catch (KryoException e) {
|
||||
throw maybeEndOfStream(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean readBoolean() throws EOFException {
|
||||
try {
|
||||
return input.readBoolean();
|
||||
} catch (KryoException e) {
|
||||
throw maybeEndOfStream(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String readString() throws EOFException {
|
||||
return readNullableString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String readNullableString() throws EOFException {
|
||||
try {
|
||||
return input.readString();
|
||||
} catch (KryoException e) {
|
||||
throw maybeEndOfStream(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void skipChunked() throws EOFException, IOException {
|
||||
while (true) {
|
||||
int count = readSmallInt();
|
||||
if (count == 0) {
|
||||
break;
|
||||
}
|
||||
skipBytes(count);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> T decodeChunked(DecodeAction<Decoder, T> decodeAction) throws EOFException, Exception {
|
||||
if (nested == null) {
|
||||
nested = new KryoBackedDecoder(new InputStream() {
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(byte[] buffer, int offset, int length) throws IOException {
|
||||
int count = readSmallInt();
|
||||
if (count == 0) {
|
||||
// End of stream has been reached
|
||||
return -1;
|
||||
}
|
||||
if (count > length) {
|
||||
// For now, assume same size buffers used to read and write
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
readBytes(buffer, offset, count);
|
||||
return count;
|
||||
}
|
||||
});
|
||||
}
|
||||
T value = decodeAction.read(nested);
|
||||
if (readSmallInt() != 0) {
|
||||
throw new IllegalStateException("Expecting the end of nested stream.");
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the total number of bytes consumed by this decoder. Some additional bytes may also be buffered by this decoder but have not been consumed.
|
||||
*/
|
||||
public long getReadPosition() {
|
||||
return input.total() + extraSkipped;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
input.close();
|
||||
}
|
||||
}
|
@ -0,0 +1,134 @@
|
||||
/*
|
||||
* Copyright 2013 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweedfs.client.btree.serialize.kryo;
|
||||
|
||||
import com.esotericsoftware.kryo.io.Output;
|
||||
import seaweedfs.client.btree.serialize.AbstractEncoder;
|
||||
import seaweedfs.client.btree.serialize.Encoder;
|
||||
import seaweedfs.client.btree.serialize.FlushableEncoder;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
|
||||
public class KryoBackedEncoder extends AbstractEncoder implements FlushableEncoder, Closeable {
|
||||
private final Output output;
|
||||
private KryoBackedEncoder nested;
|
||||
|
||||
public KryoBackedEncoder(OutputStream outputStream) {
|
||||
this(outputStream, 4096);
|
||||
}
|
||||
|
||||
public KryoBackedEncoder(OutputStream outputStream, int bufferSize) {
|
||||
output = new Output(outputStream, bufferSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeByte(byte value) {
|
||||
output.writeByte(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBytes(byte[] bytes, int offset, int count) {
|
||||
output.writeBytes(bytes, offset, count);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeLong(long value) {
|
||||
output.writeLong(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeSmallLong(long value) {
|
||||
output.writeLong(value, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeInt(int value) {
|
||||
output.writeInt(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeSmallInt(int value) {
|
||||
output.writeInt(value, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBoolean(boolean value) {
|
||||
output.writeBoolean(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeString(CharSequence value) {
|
||||
if (value == null) {
|
||||
throw new IllegalArgumentException("Cannot encode a null string.");
|
||||
}
|
||||
output.writeString(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeNullableString(@Nullable CharSequence value) {
|
||||
output.writeString(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void encodeChunked(EncodeAction<Encoder> writeAction) throws Exception {
|
||||
if (nested == null) {
|
||||
nested = new KryoBackedEncoder(new OutputStream() {
|
||||
@Override
|
||||
public void write(byte[] buffer, int offset, int length) {
|
||||
if (length == 0) {
|
||||
return;
|
||||
}
|
||||
writeSmallInt(length);
|
||||
writeBytes(buffer, offset, length);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(byte[] buffer) throws IOException {
|
||||
write(buffer, 0, buffer.length);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(int b) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
});
|
||||
}
|
||||
writeAction.write(nested);
|
||||
nested.flush();
|
||||
writeSmallInt(0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the total number of bytes written by this encoder, some of which may still be buffered.
|
||||
*/
|
||||
public long getWritePosition() {
|
||||
return output.total();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void flush() {
|
||||
output.flush();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
output.close();
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user