mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-11-28 21:39:02 +08:00
Merge branch 'master' of https://github.com/bingoohuang/seaweedfs
This commit is contained in:
commit
d861cbd81b
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -8,6 +8,8 @@ assignees: ''
|
||||
---
|
||||
|
||||
Sponsors SeaweedFS via Patreon https://www.patreon.com/seaweedfs
|
||||
Report issues here. Ask questions here https://stackoverflow.com/questions/tagged/seaweedfs
|
||||
Please ask questions in https://github.com/chrislusf/seaweedfs/discussions
|
||||
|
||||
example of a good issue report:
|
||||
https://github.com/chrislusf/seaweedfs/issues/1005
|
||||
|
22
.github/workflows/cleanup.yml
vendored
Normal file
22
.github/workflows/cleanup.yml
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
name: Cleanup
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
|
||||
- name: Delete old release assets
|
||||
uses: mknejp/delete-release-assets@v1
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
tag: dev
|
||||
fail-if-no-assets: false
|
||||
assets: |
|
||||
weed-*
|
37
.github/workflows/go.yml
vendored
Normal file
37
.github/workflows/go.yml
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
name: Go
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.13
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Get dependencies
|
||||
run: |
|
||||
cd weed; go get -v -t -d ./...
|
||||
if [ -f Gopkg.toml ]; then
|
||||
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
|
||||
dep ensure
|
||||
fi
|
||||
|
||||
- name: Build
|
||||
run: cd weed; go build -v .
|
||||
|
||||
- name: Test
|
||||
run: cd weed; go test -v ./...
|
64
.github/workflows/release.yml
vendored
Normal file
64
.github/workflows/release.yml
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [linux, windows, darwin, freebsd ]
|
||||
goarch: [amd64, arm]
|
||||
exclude:
|
||||
- goarch: arm
|
||||
goos: darwin
|
||||
- goarch: arm
|
||||
goos: windows
|
||||
|
||||
steps:
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Wait for the deletion
|
||||
uses: jakejarvis/wait-action@master
|
||||
with:
|
||||
time: '30s'
|
||||
|
||||
- name: Set BUILD_TIME env
|
||||
run: echo BUILD_TIME=$(date -u +%Y-%m-%d-%H-%M) >> ${GITHUB_ENV}
|
||||
|
||||
- name: Go Release Binaries
|
||||
uses: wangyoucao577/go-release-action@v1.14
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
release_tag: dev
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0
|
||||
build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed-large-disk
|
||||
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
||||
|
||||
- name: Go Release Binaries
|
||||
uses: wangyoucao577/go-release-action@v1.14
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
release_tag: dev
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0
|
||||
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
@ -1,9 +1,8 @@
|
||||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
- 1.13.x
|
||||
- 1.15.x
|
||||
- 1.16.x
|
||||
|
||||
before_install:
|
||||
- export PATH=/home/travis/gopath/bin:$PATH
|
||||
@ -45,4 +44,4 @@ deploy:
|
||||
on:
|
||||
tags: true
|
||||
repo: chrislusf/seaweedfs
|
||||
go: 1.13.x
|
||||
go: 1.16.x
|
||||
|
24
Makefile
24
Makefile
@ -8,11 +8,14 @@ appname := weed
|
||||
|
||||
sources := $(wildcard *.go)
|
||||
|
||||
build = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -ldflags "-extldflags -static" -o build/$(appname)$(3) $(SOURCE_DIR)
|
||||
COMMIT ?= $(shell git rev-parse --short HEAD)
|
||||
LDFLAGS ?= -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${COMMIT}
|
||||
|
||||
build = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -ldflags "-extldflags -static $(LDFLAGS)" -o build/$(appname)$(3) $(SOURCE_DIR)
|
||||
tar = cd build && tar -cvzf $(1)_$(2).tar.gz $(appname)$(3) && rm $(appname)$(3)
|
||||
zip = cd build && zip $(1)_$(2).zip $(appname)$(3) && rm $(appname)$(3)
|
||||
|
||||
build_large = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -tags 5BytesOffset -ldflags "-extldflags -static" -o build/$(appname)$(3) $(SOURCE_DIR)
|
||||
build_large = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -tags 5BytesOffset -ldflags "-extldflags -static $(LDFLAGS)" -o build/$(appname)$(3) $(SOURCE_DIR)
|
||||
tar_large = cd build && tar -cvzf $(1)_$(2)_large_disk.tar.gz $(appname)$(3) && rm $(appname)$(3)
|
||||
zip_large = cd build && zip $(1)_$(2)_large_disk.zip $(appname)$(3) && rm $(appname)$(3)
|
||||
|
||||
@ -31,13 +34,16 @@ deps:
|
||||
rm -rf /home/travis/gopath/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace
|
||||
|
||||
build: deps
|
||||
go build $(GO_FLAGS) -o $(BINARY) $(SOURCE_DIR)
|
||||
go build $(GO_FLAGS) -ldflags "$(LDFLAGS)" -o $(BINARY) $(SOURCE_DIR)
|
||||
|
||||
install: deps
|
||||
go install $(GO_FLAGS) -ldflags "$(LDFLAGS)" $(SOURCE_DIR)
|
||||
|
||||
linux: deps
|
||||
mkdir -p linux
|
||||
GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -o linux/$(BINARY) $(SOURCE_DIR)
|
||||
GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -ldflags "$(LDFLAGS)" -o linux/$(BINARY) $(SOURCE_DIR)
|
||||
|
||||
release: deps windows_build darwin_build linux_build bsd_build 5_byte_linux_build 5_byte_darwin_build 5_byte_windows_build
|
||||
release: deps windows_build darwin_build linux_build bsd_build 5_byte_linux_build 5_byte_arm64_build 5_byte_darwin_build 5_byte_windows_build
|
||||
|
||||
##### LINUX BUILDS #####
|
||||
5_byte_linux_build:
|
||||
@ -52,6 +58,14 @@ release: deps windows_build darwin_build linux_build bsd_build 5_byte_linux_buil
|
||||
$(call build_large,windows,amd64,.exe)
|
||||
$(call zip_large,windows,amd64,.exe)
|
||||
|
||||
5_byte_arm_build: $(sources)
|
||||
$(call build_large,linux,arm,)
|
||||
$(call tar_large,linux,arm)
|
||||
|
||||
5_byte_arm64_build: $(sources)
|
||||
$(call build_large,linux,arm64,)
|
||||
$(call tar_large,linux,arm64)
|
||||
|
||||
linux_build: build/linux_arm.tar.gz build/linux_arm64.tar.gz build/linux_386.tar.gz build/linux_amd64.tar.gz
|
||||
|
||||
build/linux_386.tar.gz: $(sources)
|
||||
|
246
README.md
246
README.md
@ -1,14 +1,18 @@
|
||||
# SeaweedFS
|
||||
|
||||
|
||||
[![Slack](https://img.shields.io/badge/slack-purple)](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY)
|
||||
[![Twitter](https://img.shields.io/twitter/follow/seaweedfs.svg?style=social&label=Follow)](https://twitter.com/intent/follow?screen_name=seaweedfs)
|
||||
[![Build Status](https://travis-ci.org/chrislusf/seaweedfs.svg?branch=master)](https://travis-ci.org/chrislusf/seaweedfs)
|
||||
[![GoDoc](https://godoc.org/github.com/chrislusf/seaweedfs/weed?status.svg)](https://godoc.org/github.com/chrislusf/seaweedfs/weed)
|
||||
[![Wiki](https://img.shields.io/badge/docs-wiki-blue.svg)](https://github.com/chrislusf/seaweedfs/wiki)
|
||||
[![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs.svg?maxAge=604800)](https://hub.docker.com/r/chrislusf/seaweedfs/)
|
||||
[![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs.svg?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs/)
|
||||
[![SeaweedFS on Maven Central](https://img.shields.io/maven-central/v/com.github.chrislusf/seaweedfs-client)](https://search.maven.org/search?q=g:com.github.chrislusf)
|
||||
|
||||
|
||||
![SeaweedFS Logo](https://raw.githubusercontent.com/chrislusf/seaweedfs/master/note/seaweedfs.png)
|
||||
|
||||
<h2 align="center">Supporting SeaweedFS</h2>
|
||||
<h2 align="center"><a href="https://www.patreon.com/seaweedfs">Sponsor SeaweedFS via Patreon</a></h2>
|
||||
|
||||
SeaweedFS is an independent Apache-licensed open source project with its ongoing development made
|
||||
possible entirely thanks to the support of these awesome [backers](https://github.com/chrislusf/seaweedfs/blob/master/backers.md).
|
||||
@ -17,8 +21,6 @@ If you'd like to grow SeaweedFS even stronger, please consider joining our
|
||||
|
||||
Your support will be really appreciated by me and other supporters!
|
||||
|
||||
<h3 align="center"><a href="https://www.patreon.com/seaweedfs">Sponsor SeaweedFS via Patreon</a></h3>
|
||||
|
||||
<!--
|
||||
<h4 align="center">Platinum</h4>
|
||||
|
||||
@ -27,41 +29,32 @@ Your support will be really appreciated by me and other supporters!
|
||||
Add your name or icon here
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">Gold</h4>
|
||||
|
||||
<table>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td align="center" valign="middle">
|
||||
<a href="" target="_blank">
|
||||
Add your name or icon here
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
-->
|
||||
|
||||
|
||||
### Gold Sponsors
|
||||
![shuguang](https://raw.githubusercontent.com/chrislusf/seaweedfs/master/note/shuguang.png)
|
||||
|
||||
---
|
||||
|
||||
|
||||
- [Download Binaries for different platforms](https://github.com/chrislusf/seaweedfs/releases/latest)
|
||||
- [SeaweedFS on Slack](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY)
|
||||
- [SeaweedFS on Twitter](https://twitter.com/SeaweedFS)
|
||||
- [SeaweedFS Mailing List](https://groups.google.com/d/forum/seaweedfs)
|
||||
- [Wiki Documentation](https://github.com/chrislusf/seaweedfs/wiki)
|
||||
- [SeaweedFS White Paper](https://github.com/chrislusf/seaweedfs/wiki/SeaweedFS_Architecture.pdf)
|
||||
- [SeaweedFS Introduction Slides](https://www.slideshare.net/chrislusf/seaweedfs-introduction)
|
||||
|
||||
Table of Contents
|
||||
=================
|
||||
|
||||
* [Quick Start](#quick-start)
|
||||
* [Introduction](#introduction)
|
||||
* [Features](#features)
|
||||
* [Additional Features](#additional-features)
|
||||
* [Filer Features](#filer-features)
|
||||
* [Example Usage](#example-usage)
|
||||
* [Example: Using Seaweed Object Store](#example-Using-Seaweed-Object-Store)
|
||||
* [Architecture](#architecture)
|
||||
* [Compared to Other File Systems](#compared-to-other-file-systems)
|
||||
* [Compared to HDFS](#compared-to-hdfs)
|
||||
@ -74,6 +67,13 @@ Table of Contents
|
||||
* [Benchmark](#Benchmark)
|
||||
* [License](#license)
|
||||
|
||||
|
||||
## Quick Start ##
|
||||
* Download the latest binary from https://github.com/chrislusf/seaweedfs/releases and unzip a single binary file `weed` or `weed.exe`
|
||||
* Run `weed server -dir=/some/data/dir -s3` to start one master, one volume server, one filer, and one S3 gateway.
|
||||
|
||||
Also, to increase capacity, just add more volume servers by running `weed volume -dir="/some/data/dir2" -mserver="<master_host>:9333" -port=8081` locally, or on a different machine, or on thousands of machines. That is it!
|
||||
|
||||
## Introduction ##
|
||||
|
||||
SeaweedFS is a simple and highly scalable distributed file system. There are two objectives:
|
||||
@ -81,17 +81,34 @@ SeaweedFS is a simple and highly scalable distributed file system. There are two
|
||||
1. to store billions of files!
|
||||
2. to serve the files fast!
|
||||
|
||||
SeaweedFS started as an Object Store to handle small files efficiently. Instead of managing all file metadata in a central master, the central master only manages file volumes, and it lets these volume servers manage files and their metadata. This relieves concurrency pressure from the central master and spreads file metadata into volume servers, allowing faster file access (just one disk read operation).
|
||||
SeaweedFS started as an Object Store to handle small files efficiently.
|
||||
Instead of managing all file metadata in a central master,
|
||||
the central master only manages volumes on volume servers,
|
||||
and these volume servers manage files and their metadata.
|
||||
This relieves concurrency pressure from the central master and spreads file metadata into volume servers,
|
||||
allowing faster file access (O(1), usually just one disk read operation).
|
||||
|
||||
There is only 40 bytes of disk storage overhead for each file's metadata. It is so simple with O(1) disk reads that you are welcome to challenge the performance with your actual use cases.
|
||||
SeaweedFS can transparently integrate with the cloud.
|
||||
With hot data on local cluster, and warm data on the cloud with O(1) access time,
|
||||
SeaweedFS can achieve both fast local access time and elastic cloud storage capacity.
|
||||
What's more, the cloud storage access API cost is minimized.
|
||||
Faster and Cheaper than direct cloud storage!
|
||||
Signup for future managed SeaweedFS cluster offering at "seaweedfilesystem at gmail dot com".
|
||||
|
||||
SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). Also, SeaweedFS implements erasure coding with ideas from [f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf)
|
||||
There is only 40 bytes of disk storage overhead for each file's metadata.
|
||||
It is so simple with O(1) disk reads that you are welcome to challenge the performance with your actual use cases.
|
||||
|
||||
SeaweedFS can work very well with just the object store. [[Filer]] can then be added later to support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql/Postgres/Redis/Etcd/Cassandra/LevelDB/MemSql/TiDB/CockroachDB.
|
||||
SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf).
|
||||
Also, SeaweedFS implements erasure coding with ideas from
|
||||
[f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf), and has a lot of similarities with [Facebook’s Tectonic Filesystem](https://www.usenix.org/system/files/fast21-pan.pdf)
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
On top of the object store, optional [Filer] can support directories and POSIX attributes.
|
||||
Filer is a separate linearly-scalable stateless server with customizable metadata stores,
|
||||
e.g., MySql, Postgres, Redis, Cassandra, HBase, Mongodb, Elastic Search, LevelDB, RocksDB, MemSql, TiDB, Etcd, CockroachDB, etc.
|
||||
|
||||
## Features ##
|
||||
For any distributed key value stores, the large values can be offloaded to SeaweedFS.
|
||||
With the fast access speed and linearly scalable capacity,
|
||||
SeaweedFS can work as a distributed [Key-Large-Value store][KeyLargeValueStore].
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
@ -100,35 +117,57 @@ SeaweedFS can work very well with just the object store. [[Filer]] can then be a
|
||||
* Automatic master servers failover - no single point of failure (SPOF).
|
||||
* Automatic Gzip compression depending on file mime type.
|
||||
* Automatic compaction to reclaim disk space after deletion or update.
|
||||
* Servers in the same cluster can have different disk spaces, file systems, OS etc.
|
||||
* Adding/Removing servers does **not** cause any data re-balancing.
|
||||
* Optionally fix the orientation for jpeg pictures.
|
||||
* [Automatic entry TTL expiration][VolumeServerTTL].
|
||||
* Any server with some disk spaces can add to the total storage space.
|
||||
* Adding/Removing servers does **not** cause any data re-balancing unless triggered by admin commands.
|
||||
* Optional picture resizing.
|
||||
* Support ETag, Accept-Range, Last-Modified, etc.
|
||||
* Support in-memory/leveldb/boltdb/btree mode tuning for memory/performance balance.
|
||||
* Support in-memory/leveldb/readonly mode tuning for memory/performance balance.
|
||||
* Support rebalancing the writable and readonly volumes.
|
||||
* [Customizable Multiple Storage Tiers][TieredStorage]: Customizable storage disk types to balance performance and cost.
|
||||
* [Transparent cloud integration][CloudTier]: unlimited capacity via tiered cloud storage for warm data.
|
||||
* [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Filer Features ##
|
||||
* [filer server][Filer] provide "normal" directories and files via http.
|
||||
* [mount filer][Mount] to read and write files directly as a local directory via FUSE.
|
||||
* [Amazon S3 compatible API][AmazonS3API] to access files with S3 tooling.
|
||||
* [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability.
|
||||
* [Hadoop Compatible File System][Hadoop] to access files from Hadoop/Spark/Flink/etc jobs.
|
||||
* [Async Backup To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze.
|
||||
* [WebDAV] access as a mapped drive on Mac and Windows, or from mobile devices.
|
||||
* [Filer server][Filer] provides "normal" directories and files via http.
|
||||
* [File TTL][FilerTTL] automatically expires file metadata and actual file data.
|
||||
* [Mount filer][Mount] reads and writes files directly as a local directory via FUSE.
|
||||
* [Filer Store Replication][FilerStoreReplication] enables HA for filer meta data stores.
|
||||
* [Active-Active Replication][ActiveActiveAsyncReplication] enables asynchronous one-way or two-way cross cluster continuous replication.
|
||||
* [Amazon S3 compatible API][AmazonS3API] accesses files with S3 tooling.
|
||||
* [Hadoop Compatible File System][Hadoop] accesses files from Hadoop/Spark/Flink/etc or even runs HBase.
|
||||
* [Async Replication To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze.
|
||||
* [WebDAV] accesses as a mapped drive on Mac and Windows, or from mobile devices.
|
||||
* [AES256-GCM Encrypted Storage][FilerDataEncryption] safely stores the encrypted data.
|
||||
* [Super Large Files][SuperLargeFiles] stores large or super large files in tens of TB.
|
||||
|
||||
## Kubernetes ##
|
||||
* [Kubernetes CSI Driver][SeaweedFsCsiDriver] A Container Storage Interface (CSI) Driver. [![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs-csi-driver.svg?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs-csi-driver/)
|
||||
* [SeaweedFS Operator](https://github.com/seaweedfs/seaweedfs-operator)
|
||||
|
||||
[Filer]: https://github.com/chrislusf/seaweedfs/wiki/Directories-and-Files
|
||||
[Mount]: https://github.com/chrislusf/seaweedfs/wiki/Mount
|
||||
[SuperLargeFiles]: https://github.com/chrislusf/seaweedfs/wiki/Data-Structure-for-Large-Files
|
||||
[Mount]: https://github.com/chrislusf/seaweedfs/wiki/FUSE-Mount
|
||||
[AmazonS3API]: https://github.com/chrislusf/seaweedfs/wiki/Amazon-S3-API
|
||||
[BackupToCloud]: https://github.com/chrislusf/seaweedfs/wiki/Backup-to-Cloud
|
||||
[BackupToCloud]: https://github.com/chrislusf/seaweedfs/wiki/Async-Replication-to-Cloud
|
||||
[Hadoop]: https://github.com/chrislusf/seaweedfs/wiki/Hadoop-Compatible-File-System
|
||||
[WebDAV]: https://github.com/chrislusf/seaweedfs/wiki/WebDAV
|
||||
[ErasureCoding]: https://github.com/chrislusf/seaweedfs/wiki/Erasure-coding-for-warm-storage
|
||||
[TieredStorage]: https://github.com/chrislusf/seaweedfs/wiki/Tiered-Storage
|
||||
[CloudTier]: https://github.com/chrislusf/seaweedfs/wiki/Cloud-Tier
|
||||
[FilerDataEncryption]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Data-Encryption
|
||||
[FilerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Stores
|
||||
[VolumeServerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Store-file-with-a-Time-To-Live
|
||||
[SeaweedFsCsiDriver]: https://github.com/seaweedfs/seaweedfs-csi-driver
|
||||
[ActiveActiveAsyncReplication]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Active-Active-cross-cluster-continuous-synchronization
|
||||
[FilerStoreReplication]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Store-Replication
|
||||
[KeyLargeValueStore]: https://github.com/chrislusf/seaweedfs/wiki/Filer-as-a-Key-Large-Value-Store
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Example Usage ##
|
||||
## Example: Using Seaweed Object Store ##
|
||||
|
||||
By default, the master node runs on port 9333, and the volume nodes run on port 8080.
|
||||
Let's start one master node, and two volume nodes on port 8080 and 8081. Ideally, they should be started from different machines. We'll use localhost as an example.
|
||||
@ -318,6 +357,16 @@ Each individual file size is limited to the volume size.
|
||||
|
||||
All file meta information stored on an volume server is readable from memory without disk access. Each file takes just a 16-byte map entry of <64bit key, 32bit offset, 32bit size>. Of course, each map entry has its own space cost for the map. But usually the disk space runs out before the memory does.
|
||||
|
||||
### Tiered Storage to the cloud ###
|
||||
|
||||
The local volume servers are much faster, while cloud storages have elastic capacity and are actually more cost-efficient if not accessed often (usually free to upload, but relatively costly to access). With the append-only structure and O(1) access time, SeaweedFS can take advantage of both local and cloud storage by offloading the warm data to the cloud.
|
||||
|
||||
Usually hot data are fresh and warm data are old. SeaweedFS puts the newly created volumes on local servers, and optionally upload the older volumes on the cloud. If the older data are accessed less often, this literally gives you unlimited capacity with limited local servers, and still fast for new data.
|
||||
|
||||
With the O(1) access time, the network latency cost is kept at minimum.
|
||||
|
||||
If the hot/warm data is split as 20/80, with 20 servers, you can achieve storage capacity of 100 servers. That's a cost saving of 80%! Or you can repurpose the 80 servers to store new data also, and get 5X storage throughput.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Compared to Other File Systems ##
|
||||
@ -326,6 +375,8 @@ Most other distributed file systems seem more complicated than necessary.
|
||||
|
||||
SeaweedFS is meant to be fast and simple, in both setup and operation. If you do not understand how it works when you reach here, we've failed! Please raise an issue with any questions or update this file with clarifications.
|
||||
|
||||
SeaweedFS is constantly moving forward. Same with other systems. These comparisons can be outdated quickly. Please help to keep them updated.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
### Compared to HDFS ###
|
||||
@ -344,15 +395,17 @@ The architectures are mostly the same. SeaweedFS aims to store and read files fa
|
||||
|
||||
* SeaweedFS optimizes for small files, ensuring O(1) disk seek operation, and can also handle large files.
|
||||
* SeaweedFS statically assigns a volume id for a file. Locating file content becomes just a lookup of the volume id, which can be easily cached.
|
||||
* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Cassandra, Redis, Etcd, MySql, Postgres, MemSql, TiDB, CockroachDB, etc, and is easy to customized.
|
||||
* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Redis, Cassandra, HBase, Mongodb, Elastic Search, MySql, Postgres, MemSql, TiDB, CockroachDB, Etcd etc, and is easy to customized.
|
||||
* SeaweedFS Volume server also communicates directly with clients via HTTP, supporting range queries, direct uploads, etc.
|
||||
|
||||
| System | File Meta | File Content Read| POSIX | REST API | Optimized for small files |
|
||||
| System | File Metadata | File Content Read| POSIX | REST API | Optimized for large number of small files |
|
||||
| ------------- | ------------------------------- | ---------------- | ------ | -------- | ------------------------- |
|
||||
| SeaweedFS | lookup volume id, cacheable | O(1) disk seek | | Yes | Yes |
|
||||
| SeaweedFS Filer| Linearly Scalable, Customizable | O(1) disk seek | FUSE | Yes | Yes |
|
||||
| GlusterFS | hashing | | FUSE, NFS | | |
|
||||
| Ceph | hashing + rules | | FUSE | Yes | |
|
||||
| MooseFS | in memory | | FUSE | | No |
|
||||
| MinIO | separate meta file for each file | | | Yes | No |
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
@ -364,6 +417,14 @@ GlusterFS hashes the path and filename into ids, and assigned to virtual volumes
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
### Compared to MooseFS ###
|
||||
|
||||
MooseFS chooses to neglect small file issue. From moosefs 3.0 manual, "even a small file will occupy 64KiB plus additionally 4KiB of checksums and 1KiB for the header", because it "was initially designed for keeping large amounts (like several thousands) of very big files"
|
||||
|
||||
MooseFS Master Server keeps all meta data in memory. Same issue as HDFS namenode.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
### Compared to Ceph ###
|
||||
|
||||
Ceph can be setup similar to SeaweedFS as a key->blob store. It is much more complicated, with the need to support layers on top of it. [Here is a more detailed comparison](https://github.com/chrislusf/seaweedfs/issues/120)
|
||||
@ -372,11 +433,11 @@ SeaweedFS has a centralized master group to look up free volumes, while Ceph use
|
||||
|
||||
Same as SeaweedFS, Ceph is also based on the object store RADOS. Ceph is rather complicated with mixed reviews.
|
||||
|
||||
Ceph uses CRUSH hashing to automatically manage the data placement. SeaweedFS places data by assigned volumes.
|
||||
Ceph uses CRUSH hashing to automatically manage the data placement, which is efficient to locate the data. But the data has to be placed according to the CRUSH algorithm. Any wrong configuration would cause data loss. SeaweedFS places data by assigning them to any writable volumes. If writes to one volume failed, just pick another volume to write. Adding more volumes are also as simple as it can be.
|
||||
|
||||
SeaweedFS is optimized for small files. Small files are stored as one continuous block of content, with at most 8 unused bytes between files. Small file access is O(1) disk read.
|
||||
|
||||
SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Redis, Etcd, Cassandra, MemSql, TiDB, CockroachCB, to manage file directories. There are proven, scalable, and easier to manage.
|
||||
SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Mongodb, Redis, Elastic Search, Cassandra, HBase, MemSql, TiDB, CockroachCB, Etcd, to manage file directories. These stores are proven, scalable, and easier to manage.
|
||||
|
||||
| SeaweedFS | comparable to Ceph | advantage |
|
||||
| ------------- | ------------- | ---------------- |
|
||||
@ -386,19 +447,31 @@ SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Redis, Etcd,
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
### Compared to MinIO ###
|
||||
|
||||
MinIO follows AWS S3 closely and is ideal for testing for S3 API. It has good UI, policies, versionings, etc. SeaweedFS is trying to catch up here. It is also possible to put MinIO as a gateway in front of SeaweedFS later.
|
||||
|
||||
MinIO metadata are in simple files. Each file write will incur extra writes to corresponding meta file.
|
||||
|
||||
MinIO does not have optimization for lots of small files. The files are simply stored as is to local disks.
|
||||
Plus the extra meta file and shards for erasure coding, it only amplifies the LOSF problem.
|
||||
|
||||
MinIO has multiple disk IO to read one file. SeaweedFS has O(1) disk reads, even for erasure coded files.
|
||||
|
||||
MinIO has full-time erasure coding. SeaweedFS uses replication on hot data for faster speed and optionally applies erasure coding on warm data.
|
||||
|
||||
MinIO does not have POSIX-like API support.
|
||||
|
||||
MinIO has specific requirements on storage layout. It is not flexible to adjust capacity. In SeaweedFS, just start one volume server pointing to the master. That's all.
|
||||
|
||||
## Dev Plan ##
|
||||
|
||||
More tools and documentation, on how to maintain and scale the system. For example, how to move volumes, automatically balancing data, how to grow volumes, how to check system status, etc.
|
||||
Other key features include: Erasure Encoding, JWT security.
|
||||
* More tools and documentation, on how to manage and scale the system.
|
||||
* Read and write stream data.
|
||||
* Support structured data.
|
||||
|
||||
This is a super exciting project! And we need helpers and [support](https://www.patreon.com/seaweedfs)!
|
||||
|
||||
BTW, We suggest run the code style check script `util/gostd` before you push your branch to remote, it will make SeaweedFS easy to review, maintain and develop:
|
||||
|
||||
```
|
||||
$ ./util/gostd
|
||||
```
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Installation Guide ##
|
||||
@ -412,24 +485,18 @@ https://golang.org/doc/install
|
||||
make sure you set up your $GOPATH
|
||||
|
||||
|
||||
Step 2: also you may need to install Mercurial by following the instructions at:
|
||||
|
||||
http://mercurial.selenic.com/downloads
|
||||
|
||||
Step 2: checkout this repo:
|
||||
```bash
|
||||
git clone https://github.com/chrislusf/seaweedfs.git
|
||||
```
|
||||
Step 3: download, compile, and install the project by executing the following command
|
||||
|
||||
```bash
|
||||
go get github.com/chrislusf/seaweedfs/weed
|
||||
make install
|
||||
```
|
||||
|
||||
Once this is done, you will find the executable "weed" in your `$GOPATH/bin` directory
|
||||
|
||||
Step 4: after you modify your code locally, you could start a local build by calling `go install` under
|
||||
|
||||
```
|
||||
$GOPATH/src/github.com/chrislusf/seaweedfs/weed
|
||||
```
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Disk Related Topics ##
|
||||
@ -451,50 +518,49 @@ My Own Unscientific Single Machine Results on Mac Book with Solid State Disk, CP
|
||||
Write 1 million 1KB file:
|
||||
```
|
||||
Concurrency Level: 16
|
||||
Time taken for tests: 88.796 seconds
|
||||
Time taken for tests: 66.753 seconds
|
||||
Complete requests: 1048576
|
||||
Failed requests: 0
|
||||
Total transferred: 1106764659 bytes
|
||||
Requests per second: 11808.87 [#/sec]
|
||||
Transfer rate: 12172.05 [Kbytes/sec]
|
||||
Total transferred: 1106789009 bytes
|
||||
Requests per second: 15708.23 [#/sec]
|
||||
Transfer rate: 16191.69 [Kbytes/sec]
|
||||
|
||||
Connection Times (ms)
|
||||
min avg max std
|
||||
Total: 0.2 1.3 44.8 0.9
|
||||
Total: 0.3 1.0 84.3 0.9
|
||||
|
||||
Percentage of the requests served within a certain time (ms)
|
||||
50% 1.1 ms
|
||||
66% 1.3 ms
|
||||
75% 1.5 ms
|
||||
80% 1.7 ms
|
||||
90% 2.1 ms
|
||||
95% 2.6 ms
|
||||
98% 3.7 ms
|
||||
99% 4.6 ms
|
||||
100% 44.8 ms
|
||||
50% 0.8 ms
|
||||
66% 1.0 ms
|
||||
75% 1.1 ms
|
||||
80% 1.2 ms
|
||||
90% 1.4 ms
|
||||
95% 1.7 ms
|
||||
98% 2.1 ms
|
||||
99% 2.6 ms
|
||||
100% 84.3 ms
|
||||
```
|
||||
|
||||
Randomly read 1 million files:
|
||||
```
|
||||
Concurrency Level: 16
|
||||
Time taken for tests: 34.263 seconds
|
||||
Time taken for tests: 22.301 seconds
|
||||
Complete requests: 1048576
|
||||
Failed requests: 0
|
||||
Total transferred: 1106762945 bytes
|
||||
Requests per second: 30603.34 [#/sec]
|
||||
Transfer rate: 31544.49 [Kbytes/sec]
|
||||
Total transferred: 1106812873 bytes
|
||||
Requests per second: 47019.38 [#/sec]
|
||||
Transfer rate: 48467.57 [Kbytes/sec]
|
||||
|
||||
Connection Times (ms)
|
||||
min avg max std
|
||||
Total: 0.0 0.5 20.7 0.7
|
||||
Total: 0.0 0.3 54.1 0.2
|
||||
|
||||
Percentage of the requests served within a certain time (ms)
|
||||
50% 0.4 ms
|
||||
75% 0.5 ms
|
||||
95% 0.6 ms
|
||||
98% 0.8 ms
|
||||
99% 1.2 ms
|
||||
100% 20.7 ms
|
||||
50% 0.3 ms
|
||||
90% 0.4 ms
|
||||
98% 0.6 ms
|
||||
99% 0.7 ms
|
||||
100% 54.1 ms
|
||||
```
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
@ -513,6 +579,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
The text of this page is available for modification and reuse under the terms of the Creative Commons Attribution-Sharealike 3.0 Unported License and the GNU Free Documentation License (unversioned, with no invariant sections, front-cover texts, or back-cover texts).
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Stargazers over time ##
|
||||
|
@ -1,3 +1,4 @@
|
||||
|
||||
<h1 align="center">Sponsors & Backers</h1>
|
||||
|
||||
- [Become a backer or sponsor on Patreon](https://www.patreon.com/seaweedfs).
|
||||
@ -6,8 +7,10 @@
|
||||
|
||||
- [4Sight Imaging](https://www.4sightimaging.com/)
|
||||
- [Evercam Camera Management Software](https://evercam.io/)
|
||||
- [Admiral](https://getadmiral.com)
|
||||
|
||||
<h2 align="center">Backers</h2>
|
||||
|
||||
- [ColorfulClouds Tech Co. Ltd.](https://caiyunai.com/)
|
||||
- [Haravan - Ecommerce Platform](https://www.haravan.com)
|
||||
- PeterCxy - Creator of Shelter App
|
||||
|
@ -1,15 +1,23 @@
|
||||
FROM frolvlad/alpine-glibc
|
||||
FROM alpine
|
||||
|
||||
# Supercronic install settings
|
||||
ENV SUPERCRONIC_URL=https://github.com/aptible/supercronic/releases/download/v0.1.8/supercronic-linux-amd64 \
|
||||
SUPERCRONIC=supercronic-linux-amd64 \
|
||||
SUPERCRONIC_SHA1SUM=be43e64c45acd6ec4fce5831e03759c89676a0ea
|
||||
# 'latest' or 'dev'
|
||||
ARG RELEASE=latest
|
||||
|
||||
# Install SeaweedFS and Supercronic ( for cron job mode )
|
||||
# Tried to use curl only (curl -o /tmp/linux_amd64.tar.gz ...), however it turned out that the following tar command failed with "gzip: stdin: not in gzip format"
|
||||
RUN apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \
|
||||
wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/latest | egrep -o 'chrislusf/seaweedfs/releases/download/.*/linux_amd64.tar.gz') && \
|
||||
tar -C /usr/bin/ -xzvf /tmp/linux_amd64.tar.gz && \
|
||||
RUN \
|
||||
ARCH=$(if [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "64" ]; then echo "amd64"; \
|
||||
elif [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "32" ]; then echo "386"; \
|
||||
elif [ $(uname -m) == "aarch64" ]; then echo "arm64"; \
|
||||
elif [ $(uname -m) == "armv7l" ]; then echo "arm"; \
|
||||
elif [ $(uname -m) == "armv6l" ]; then echo "arm"; fi;) && \
|
||||
echo "Building for $ARCH" 1>&2 && \
|
||||
SUPERCRONIC_SHA1SUM=$(echo $ARCH | sed 's/386/e0126b0102b9f388ecd55714358e3ad60d0cebdb/g' | sed 's/amd64/5ddf8ea26b56d4a7ff6faecdd8966610d5cb9d85/g' | sed 's/arm64/e2714c43e7781bf1579c85aa61259245f56dbba1/g' | sed 's/arm/47481c3341bc3a1ae91a728e0cc63c8e6d3791ad/g') && \
|
||||
SUPERCRONIC_URL=https://github.com/aptible/supercronic/releases/download/v0.1.9/supercronic-linux-$ARCH && \
|
||||
SUPERCRONIC=supercronic-linux-$ARCH && \
|
||||
# Install SeaweedFS and Supercronic ( for cron job mode )
|
||||
apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \
|
||||
apk add fuse && \
|
||||
wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/${RELEASE} | egrep -o "chrislusf/seaweedfs/releases/download/.*/linux_$ARCH.tar.gz") && \
|
||||
tar -C /usr/bin/ -xzvf /tmp/linux_$ARCH.tar.gz && \
|
||||
curl -fsSLO "$SUPERCRONIC_URL" && \
|
||||
echo "${SUPERCRONIC_SHA1SUM} ${SUPERCRONIC}" | sha1sum -c - && \
|
||||
chmod +x "$SUPERCRONIC" && \
|
||||
@ -32,6 +40,8 @@ EXPOSE 19333
|
||||
EXPOSE 9333
|
||||
# s3 server http port
|
||||
EXPOSE 8333
|
||||
# webdav server http port
|
||||
EXPOSE 7333
|
||||
|
||||
RUN mkdir -p /data/filerldb2
|
||||
|
||||
|
@ -1,5 +1,20 @@
|
||||
FROM golang:latest
|
||||
RUN go get github.com/chrislusf/seaweedfs/weed
|
||||
FROM frolvlad/alpine-glibc as builder
|
||||
RUN apk add git go g++ fuse
|
||||
RUN mkdir -p /go/src/github.com/chrislusf/
|
||||
RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs
|
||||
ARG BRANCH=${BRANCH:-master}
|
||||
RUN cd /go/src/github.com/chrislusf/seaweedfs && git checkout $BRANCH
|
||||
RUN cd /go/src/github.com/chrislusf/seaweedfs/weed \
|
||||
&& export LDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \
|
||||
&& CGO_ENABLED=0 go install -ldflags "-extldflags -static ${LDFLAGS}"
|
||||
|
||||
FROM alpine AS final
|
||||
LABEL author="Chris Lu"
|
||||
COPY --from=builder /root/go/bin/weed /usr/bin/
|
||||
RUN mkdir -p /etc/seaweedfs
|
||||
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml
|
||||
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh
|
||||
RUN apk add fuse # for weed mount
|
||||
|
||||
# volume server gprc port
|
||||
EXPOSE 18080
|
||||
@ -15,15 +30,13 @@ EXPOSE 19333
|
||||
EXPOSE 9333
|
||||
# s3 server http port
|
||||
EXPOSE 8333
|
||||
# webdav server http port
|
||||
EXPOSE 7333
|
||||
|
||||
RUN mkdir -p /data/filerldb2
|
||||
|
||||
VOLUME /data
|
||||
|
||||
RUN mkdir -p /etc/seaweedfs
|
||||
RUN cp /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml
|
||||
RUN cp /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /entrypoint.sh
|
||||
RUN cp /go/bin/weed /usr/bin/
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
|
42
docker/Dockerfile.go_build_large
Normal file
42
docker/Dockerfile.go_build_large
Normal file
@ -0,0 +1,42 @@
|
||||
FROM frolvlad/alpine-glibc as builder
|
||||
RUN apk add git go g++ fuse
|
||||
RUN mkdir -p /go/src/github.com/chrislusf/
|
||||
RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs
|
||||
ARG BRANCH=${BRANCH:-master}
|
||||
RUN cd /go/src/github.com/chrislusf/seaweedfs && git checkout $BRANCH
|
||||
RUN cd /go/src/github.com/chrislusf/seaweedfs/weed \
|
||||
&& export LDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \
|
||||
&& CGO_ENABLED=0 go install -tags 5BytesOffset -ldflags "-extldflags -static ${LDFLAGS}"
|
||||
|
||||
FROM alpine AS final
|
||||
LABEL author="Chris Lu"
|
||||
COPY --from=builder /root/go/bin/weed /usr/bin/
|
||||
RUN mkdir -p /etc/seaweedfs
|
||||
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml
|
||||
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh
|
||||
RUN apk add fuse # for weed mount
|
||||
|
||||
# volume server gprc port
|
||||
EXPOSE 18080
|
||||
# volume server http port
|
||||
EXPOSE 8080
|
||||
# filer server gprc port
|
||||
EXPOSE 18888
|
||||
# filer server http port
|
||||
EXPOSE 8888
|
||||
# master server shared gprc port
|
||||
EXPOSE 19333
|
||||
# master server shared http port
|
||||
EXPOSE 9333
|
||||
# s3 server http port
|
||||
EXPOSE 8333
|
||||
# webdav server http port
|
||||
EXPOSE 7333
|
||||
|
||||
RUN mkdir -p /data/filerldb2
|
||||
|
||||
VOLUME /data
|
||||
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
32
docker/Dockerfile.local
Normal file
32
docker/Dockerfile.local
Normal file
@ -0,0 +1,32 @@
|
||||
FROM alpine AS final
|
||||
LABEL author="Chris Lu"
|
||||
COPY ./weed /usr/bin/
|
||||
RUN mkdir -p /etc/seaweedfs
|
||||
COPY ./filer.toml /etc/seaweedfs/filer.toml
|
||||
COPY ./entrypoint.sh /entrypoint.sh
|
||||
RUN apk add fuse # for weed mount
|
||||
|
||||
# volume server grpc port
|
||||
EXPOSE 18080
|
||||
# volume server http port
|
||||
EXPOSE 8080
|
||||
# filer server grpc port
|
||||
EXPOSE 18888
|
||||
# filer server http port
|
||||
EXPOSE 8888
|
||||
# master server shared grpc port
|
||||
EXPOSE 19333
|
||||
# master server shared http port
|
||||
EXPOSE 9333
|
||||
# s3 server http port
|
||||
EXPOSE 8333
|
||||
# webdav server http port
|
||||
EXPOSE 7333
|
||||
|
||||
RUN mkdir -p /data/filerldb2
|
||||
|
||||
VOLUME /data
|
||||
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
31
docker/Dockerfile.s3tests
Normal file
31
docker/Dockerfile.s3tests
Normal file
@ -0,0 +1,31 @@
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN DEBIAN_FRONTEND=noninteractive apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get upgrade -y && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
|
||||
git \
|
||||
sudo \
|
||||
debianutils \
|
||||
python3-pip \
|
||||
python3-virtualenv \
|
||||
python3-dev \
|
||||
libevent-dev \
|
||||
libffi-dev \
|
||||
libxml2-dev \
|
||||
libxslt-dev \
|
||||
zlib1g-dev && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
git clone https://github.com/ceph/s3-tests.git /opt/s3-tests
|
||||
|
||||
WORKDIR /opt/s3-tests
|
||||
RUN ./bootstrap
|
||||
|
||||
ENV \
|
||||
NOSETESTS_EXCLUDE="" \
|
||||
NOSETESTS_ATTR="" \
|
||||
NOSETESTS_OPTIONS="" \
|
||||
S3TEST_CONF="/s3test.conf"
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "-c"]
|
||||
CMD ["sleep 10 && exec ./virtualenv/bin/nosetests ${NOSETESTS_OPTIONS-} ${NOSETESTS_ATTR:+-a $NOSETESTS_ATTR} ${NOSETESTS_EXCLUDE:+-e $NOSETESTS_EXCLUDE}"]
|
63
docker/Makefile
Normal file
63
docker/Makefile
Normal file
@ -0,0 +1,63 @@
|
||||
all: gen
|
||||
|
||||
.PHONY : gen
|
||||
|
||||
gen: dev
|
||||
|
||||
binary:
|
||||
cd ../weed; CGO_ENABLED=0 GOOS=linux go build -ldflags "-extldflags -static"; mv weed ../docker/
|
||||
|
||||
build: binary
|
||||
docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.local .
|
||||
rm ./weed
|
||||
|
||||
s3tests_build:
|
||||
docker build --no-cache -t chrislusf/ceph-s3-tests:local -f Dockerfile.s3tests .
|
||||
|
||||
dev: build
|
||||
docker-compose -f compose/local-dev-compose.yml -p seaweedfs up
|
||||
|
||||
dev_tls: build certstrap
|
||||
ENV_FILE="tls.env" docker-compose -f compose/local-dev-compose.yml -p seaweedfs up
|
||||
|
||||
dev_mount: build
|
||||
docker-compose -f compose/local-mount-compose.yml -p seaweedfs up
|
||||
|
||||
profile_mount: build
|
||||
docker-compose -f compose/local-mount-profile-compose.yml -p seaweedfs up
|
||||
|
||||
k8s: build
|
||||
docker-compose -f compose/local-k8s-compose.yml -p seaweedfs up
|
||||
|
||||
dev_registry: build
|
||||
docker-compose -f compose/local-registry-compose.yml -p seaweedfs up
|
||||
|
||||
dev_replicate: build
|
||||
docker-compose -f compose/local-replicate-compose.yml -p seaweedfs up
|
||||
|
||||
cluster: build
|
||||
docker-compose -f compose/local-cluster-compose.yml -p seaweedfs up
|
||||
|
||||
2clusters: build
|
||||
docker-compose -f compose/local-clusters-compose.yml -p seaweedfs up
|
||||
|
||||
s3tests: build s3tests_build
|
||||
docker-compose -f compose/local-s3tests-compose.yml -p seaweedfs up
|
||||
|
||||
filer_etcd: build
|
||||
docker stack deploy -c compose/swarm-etcd.yml fs
|
||||
|
||||
clean:
|
||||
rm ./weed
|
||||
|
||||
certstrap:
|
||||
go get github.com/square/certstrap
|
||||
certstrap --depot-path compose/tls init --passphrase "" --common-name "SeaweedFS CA" || true
|
||||
certstrap --depot-path compose/tls request-cert --passphrase "" --common-name volume01.dev || true
|
||||
certstrap --depot-path compose/tls request-cert --passphrase "" --common-name master01.dev || true
|
||||
certstrap --depot-path compose/tls request-cert --passphrase "" --common-name filer01.dev || true
|
||||
certstrap --depot-path compose/tls request-cert --passphrase "" --common-name client01.dev || true
|
||||
certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" volume01.dev || true
|
||||
certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" master01.dev || true
|
||||
certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" filer01.dev || true
|
||||
certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" client01.dev || true
|
@ -11,11 +11,29 @@ docker-compose -f seaweedfs-compose.yml -p seaweedfs up
|
||||
|
||||
```
|
||||
|
||||
## Development
|
||||
## Try latest tip
|
||||
|
||||
```bash
|
||||
|
||||
wget https://raw.githubusercontent.com/chrislusf/seaweedfs/master/docker/seaweedfs-dev-compose.yml
|
||||
|
||||
docker-compose -f seaweedfs-dev-compose.yml -p seaweedfs up
|
||||
|
||||
```
|
||||
|
||||
## Local Development
|
||||
|
||||
```bash
|
||||
cd $GOPATH/src/github.com/chrislusf/seaweedfs/docker
|
||||
|
||||
docker-compose -f dev-compose.yml -p seaweedfs up
|
||||
|
||||
make
|
||||
```
|
||||
|
||||
## Build and push a multiarch build
|
||||
|
||||
Make sure that `docker buildx` is supported (might be an experimental docker feature)
|
||||
```bash
|
||||
BUILDER=$(docker buildx create --driver docker-container --use)
|
||||
docker buildx build --pull --push --platform linux/386,linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6 . -t chrislusf/seaweedfs
|
||||
docker buildx stop $BUILDER
|
||||
```
|
||||
|
||||
|
0
docker/compose/dev.env
Normal file
0
docker/compose/dev.env
Normal file
75
docker/compose/local-cluster-compose.yml
Normal file
75
docker/compose/local-cluster-compose.yml
Normal file
@ -0,0 +1,75 @@
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
master0:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335 -mdir=/data/m1"
|
||||
master1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9334:9334
|
||||
- 19334:19334
|
||||
command: "master -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335 -mdir=/data/m2"
|
||||
master2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9335:9335
|
||||
- 19335:19335
|
||||
command: "master -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335 -mdir=/data/m3"
|
||||
volume1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume1 -publicUrl=localhost:8080 -preStopSeconds=1 -disk=ssd1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
volume2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8082:8082
|
||||
- 18082:18082
|
||||
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8082 -ip=volume2 -publicUrl=localhost:8082 -preStopSeconds=1 -disk=ssd1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
volume3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8083:8083
|
||||
- 18083:18083
|
||||
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8083 -ip=volume3 -publicUrl=localhost:8083 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: 'filer -master="master0:9333,master1:9334,master2:9335"'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume1
|
||||
- volume2
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: 's3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume1
|
||||
- volume2
|
||||
- filer
|
28
docker/compose/local-clusters-compose.yml
Normal file
28
docker/compose/local-clusters-compose.yml
Normal file
@ -0,0 +1,28 @@
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
server1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
- 8084:8080
|
||||
- 18084:18080
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: "server -ip=server1 -filer -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1"
|
||||
volumes:
|
||||
- ./master-cloud.toml:/etc/seaweedfs/master.toml
|
||||
depends_on:
|
||||
- server2
|
||||
server2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9334:9333
|
||||
- 19334:19333
|
||||
- 8085:8080
|
||||
- 18085:18080
|
||||
- 8889:8888
|
||||
- 18889:18888
|
||||
- 8334:8333
|
||||
command: "server -ip=server2 -filer -s3 -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1"
|
67
docker/compose/local-dev-compose.yml
Normal file
67
docker/compose/local-dev-compose.yml
Normal file
@ -0,0 +1,67 @@
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "-v=1 master -ip=master"
|
||||
volumes:
|
||||
- ./tls:/etc/seaweedfs/tls
|
||||
env_file:
|
||||
- ${ENV_FILE:-dev.env}
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "-v=1 volume -mserver=master:9333 -port=8080 -ip=volume -preStopSeconds=1"
|
||||
depends_on:
|
||||
- master
|
||||
volumes:
|
||||
- ./tls:/etc/seaweedfs/tls
|
||||
env_file:
|
||||
- ${ENV_FILE:-dev.env}
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8111:8111
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: '-v=1 filer -master="master:9333" -iam'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
volumes:
|
||||
- ./tls:/etc/seaweedfs/tls
|
||||
env_file:
|
||||
- ${ENV_FILE:-dev.env}
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: '-v=1 s3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
volumes:
|
||||
- ./tls:/etc/seaweedfs/tls
|
||||
env_file:
|
||||
- ${ENV_FILE:-dev.env}
|
||||
mount:
|
||||
image: chrislusf/seaweedfs:local
|
||||
privileged: true
|
||||
cap_add:
|
||||
- SYS_ADMIN
|
||||
mem_limit: 4096m
|
||||
command: '-v=4 mount -filer="filer:8888" -dirAutoCreate -dir=/mnt/seaweedfs -cacheCapacityMB=100 -concurrentWriters=128'
|
||||
volumes:
|
||||
- ./tls:/etc/seaweedfs/tls
|
||||
env_file:
|
||||
- ${ENV_FILE:-dev.env}
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
94
docker/compose/local-k8s-compose.yml
Normal file
94
docker/compose/local-k8s-compose.yml
Normal file
@ -0,0 +1,94 @@
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume"
|
||||
depends_on:
|
||||
- master
|
||||
mysql:
|
||||
image: percona/percona-server:5.7
|
||||
ports:
|
||||
- 3306:3306
|
||||
volumes:
|
||||
- ./seaweedfs.sql:/docker-entrypoint-initdb.d/seaweedfs.sql
|
||||
environment:
|
||||
- MYSQL_ROOT_PASSWORD=secret
|
||||
- MYSQL_DATABASE=seaweedfs
|
||||
- MYSQL_PASSWORD=secret
|
||||
- MYSQL_USER=seaweedfs
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
environment:
|
||||
- WEED_MYSQL_HOSTNAME=mysql
|
||||
- WEED_MYSQL_PORT=3306
|
||||
- WEED_MYSQL_DATABASE=seaweedfs
|
||||
- WEED_MYSQL_USERNAME=seaweedfs
|
||||
- WEED_MYSQL_PASSWORD=secret
|
||||
- WEED_MYSQL_ENABLED=true
|
||||
- WEED_MYSQL_CONNECTION_MAX_IDLE=5
|
||||
- WEED_MYSQL_CONNECTION_MAX_OPEN=75
|
||||
# "refresh" connection every 10 minutes, eliminating mysql closing "old" connections
|
||||
- WEED_MYSQL_CONNECTION_MAX_LIFETIME_SECONDS=600
|
||||
# enable usage of memsql as filer backend
|
||||
- WEED_MYSQL_INTERPOLATEPARAMS=true
|
||||
- WEED_LEVELDB2_ENABLED=false
|
||||
command: '-v 9 filer -master="master:9333"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- mysql
|
||||
ingress:
|
||||
image: jwilder/nginx-proxy:alpine
|
||||
ports:
|
||||
- "80:80"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/tmp/docker.sock:ro
|
||||
- ./nginx/proxy.conf:/etc/nginx/proxy.conf
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: '-v 9 s3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
environment:
|
||||
- VIRTUAL_HOST=ingress
|
||||
- VIRTUAL_PORT=8333
|
||||
registry:
|
||||
image: registry:2
|
||||
environment:
|
||||
REGISTRY_HTTP_ADDR: "0.0.0.0:5001" # seaweedfs s3
|
||||
REGISTRY_LOG_LEVEL: "debug"
|
||||
REGISTRY_STORAGE: "s3"
|
||||
REGISTRY_STORAGE_S3_REGION: "us-east-1"
|
||||
REGISTRY_STORAGE_S3_REGIONENDPOINT: "http://ingress"
|
||||
REGISTRY_STORAGE_S3_BUCKET: "registry"
|
||||
REGISTRY_STORAGE_S3_ACCESSKEY: "some_access_key1"
|
||||
REGISTRY_STORAGE_S3_SECRETKEY: "some_secret_key1"
|
||||
REGISTRY_STORAGE_S3_V4AUTH: "true"
|
||||
REGISTRY_STORAGE_S3_SECURE: "false"
|
||||
REGISTRY_STORAGE_S3_SKIPVERIFY: "true"
|
||||
REGISTRY_STORAGE_S3_ROOTDIRECTORY: "/"
|
||||
REGISTRY_STORAGE_DELETE_ENABLED: "true"
|
||||
REGISTRY_STORAGE_REDIRECT_DISABLE: "true"
|
||||
REGISTRY_VALIDATION_DISABLED: "true"
|
||||
ports:
|
||||
- 5001:5001
|
||||
depends_on:
|
||||
- s3
|
||||
- ingress
|
50
docker/compose/local-minio-gateway-compose.yml
Normal file
50
docker/compose/local-minio-gateway-compose.yml
Normal file
@ -0,0 +1,50 @@
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master -volumeSizeLimitMB=1024"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume -max=0 -preStopSeconds=1"
|
||||
depends_on:
|
||||
- master
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 8333:8333
|
||||
command: '-v 1 filer -master="master:9333" -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8333'
|
||||
volumes:
|
||||
- ./s3.json:/etc/seaweedfs/s3.json
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
minio-gateway-s3:
|
||||
image: minio/minio
|
||||
ports:
|
||||
- 9000:9000
|
||||
command: 'minio gateway s3 http://s3:8333'
|
||||
restart: on-failure
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: "some_access_key1"
|
||||
MINIO_SECRET_KEY: "some_secret_key1"
|
||||
depends_on:
|
||||
- s3
|
||||
minio-warp:
|
||||
image: minio/warp
|
||||
command: 'mixed --duration=5m --obj.size=3mb --autoterm'
|
||||
restart: on-failure
|
||||
environment:
|
||||
WARP_HOST: "minio-gateway-s3:9000"
|
||||
WARP_ACCESS_KEY: "some_access_key1"
|
||||
WARP_SECRET_KEY: "some_secret_key1"
|
||||
depends_on:
|
||||
- minio-gateway-s3
|
46
docker/compose/local-mount-compose.yml
Normal file
46
docker/compose/local-mount-compose.yml
Normal file
@ -0,0 +1,46 @@
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 7455:8080
|
||||
- 9325:9325
|
||||
command: 'volume -mserver="master:9333" -port=8080 -metricsPort=9325 -preStopSeconds=1 -publicUrl=localhost:7455'
|
||||
depends_on:
|
||||
- master
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 9326:9326
|
||||
command: 'filer -master="master:9333" -metricsPort=9326'
|
||||
tty: true
|
||||
stdin_open: true
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
mount_1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
privileged: true
|
||||
entrypoint: '/bin/sh -c "mkdir -p t1 && mkdir -p cache/t1 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t1 -dir=./t1 -filer.path=/c1 -volumeServerAccess=filerProxy"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
mount_2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
privileged: true
|
||||
entrypoint: '/bin/sh -c "mkdir -p t2 && mkdir -p cache/t2 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t2 -dir=./t2 -filer.path=/c1 -volumeServerAcess=publicUrl"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
- mount_1
|
47
docker/compose/local-mount-profile-compose.yml
Normal file
47
docker/compose/local-mount-profile-compose.yml
Normal file
@ -0,0 +1,47 @@
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 7455:8080
|
||||
- 9325:9325
|
||||
volumes:
|
||||
- /Volumes/mobile_disk/99:/data
|
||||
command: 'volume -mserver="master:9333" -port=8080 -metricsPort=9325 -preStopSeconds=1 -publicUrl=localhost:7455'
|
||||
depends_on:
|
||||
- master
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 9326:9326
|
||||
volumes:
|
||||
- /Volumes/mobile_disk/99:/data
|
||||
command: 'filer -master="master:9333" -metricsPort=9326'
|
||||
tty: true
|
||||
stdin_open: true
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
mount:
|
||||
image: chrislusf/seaweedfs:local
|
||||
privileged: true
|
||||
cap_add:
|
||||
- SYS_ADMIN
|
||||
devices:
|
||||
- fuse
|
||||
volumes:
|
||||
- /Volumes/mobile_disk/99:/data
|
||||
entrypoint: '/bin/sh -c "mkdir -p t1 && weed mount -filer=filer:8888 -dir=./t1 -cacheCapacityMB=0 -memprofile=/data/mount.mem.pprof"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
85
docker/compose/local-registry-compose.yml
Normal file
85
docker/compose/local-registry-compose.yml
Normal file
@ -0,0 +1,85 @@
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master -volumeSizeLimitMB=1024"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume -max=0 -preStopSeconds=1"
|
||||
depends_on:
|
||||
- master
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 8333:8333
|
||||
command: '-v 9 filer -master="master:9333" -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8333'
|
||||
volumes:
|
||||
- ./s3.json:/etc/seaweedfs/s3.json
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
minio:
|
||||
image: minio/minio
|
||||
ports:
|
||||
- 9000:9000
|
||||
command: 'minio server /data'
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: "some_access_key1"
|
||||
MINIO_SECRET_KEY: "some_secret_key1"
|
||||
depends_on:
|
||||
- master
|
||||
registry1:
|
||||
image: registry:2
|
||||
environment:
|
||||
REGISTRY_HTTP_ADDR: "0.0.0.0:5001" # seaweedfs s3
|
||||
REGISTRY_LOG_LEVEL: "debug"
|
||||
REGISTRY_STORAGE: "s3"
|
||||
REGISTRY_STORAGE_S3_REGION: "us-east-1"
|
||||
REGISTRY_STORAGE_S3_REGIONENDPOINT: "http://s3:8333"
|
||||
REGISTRY_STORAGE_S3_BUCKET: "registry"
|
||||
REGISTRY_STORAGE_S3_ACCESSKEY: "some_access_key1"
|
||||
REGISTRY_STORAGE_S3_SECRETKEY: "some_secret_key1"
|
||||
REGISTRY_STORAGE_S3_V4AUTH: "true"
|
||||
REGISTRY_STORAGE_S3_SECURE: "false"
|
||||
REGISTRY_STORAGE_S3_SKIPVERIFY: "true"
|
||||
REGISTRY_STORAGE_S3_ROOTDIRECTORY: "/"
|
||||
REGISTRY_STORAGE_DELETE_ENABLED: "true"
|
||||
REGISTRY_STORAGE_REDIRECT_DISABLE: "true"
|
||||
REGISTRY_VALIDATION_DISABLED: "true"
|
||||
ports:
|
||||
- 5001:5001
|
||||
depends_on:
|
||||
- s3
|
||||
- minio
|
||||
registry2:
|
||||
image: registry:2
|
||||
environment:
|
||||
REGISTRY_HTTP_ADDR: "0.0.0.0:5002" # minio
|
||||
REGISTRY_LOG_LEVEL: "debug"
|
||||
REGISTRY_STORAGE: "s3"
|
||||
REGISTRY_STORAGE_S3_REGION: "us-east-1"
|
||||
REGISTRY_STORAGE_S3_REGIONENDPOINT: "http://minio:9000"
|
||||
REGISTRY_STORAGE_S3_BUCKET: "registry"
|
||||
REGISTRY_STORAGE_S3_ACCESSKEY: "some_access_key1"
|
||||
REGISTRY_STORAGE_S3_SECRETKEY: "some_secret_key1"
|
||||
REGISTRY_STORAGE_S3_V4AUTH: "true"
|
||||
REGISTRY_STORAGE_S3_SECURE: "false"
|
||||
REGISTRY_STORAGE_S3_SKIPVERIFY: "true"
|
||||
REGISTRY_STORAGE_S3_ROOTDIRECTORY: "/"
|
||||
REGISTRY_STORAGE_DELETE_ENABLED: "true"
|
||||
REGISTRY_STORAGE_REDIRECT_DISABLE: "true"
|
||||
REGISTRY_VALIDATION_DISABLED: "true"
|
||||
ports:
|
||||
- 5002:5002
|
||||
depends_on:
|
||||
- s3
|
||||
- minio
|
61
docker/compose/local-replicate-compose.yml
Normal file
61
docker/compose/local-replicate-compose.yml
Normal file
@ -0,0 +1,61 @@
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume -preStopSeconds=1"
|
||||
depends_on:
|
||||
- master
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: '-v=9 filer -master="master:9333"'
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- ./notification.toml:/etc/seaweedfs/notification.toml
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- rabbitmq
|
||||
- replicate
|
||||
environment:
|
||||
RABBIT_SERVER_URL: "amqp://guest:guest@rabbitmq:5672/"
|
||||
replicate:
|
||||
image: chrislusf/seaweedfs:local
|
||||
command: '-v=9 filer.replicate'
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- ./notification.toml:/etc/seaweedfs/notification.toml
|
||||
- ./replication.toml:/etc/seaweedfs/replication.toml
|
||||
depends_on:
|
||||
- rabbitmq
|
||||
environment:
|
||||
RABBIT_SERVER_URL: "amqp://guest:guest@rabbitmq:5672/"
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: 's3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
rabbitmq:
|
||||
image: rabbitmq:3.8.10-management-alpine
|
||||
ports:
|
||||
- 5672:5672
|
||||
- 15671:15671
|
||||
- 15672:15672
|
||||
environment:
|
||||
RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS: "-rabbit log_levels [{connection,error},{queue,debug}]"
|
45
docker/compose/local-s3tests-compose.yml
Normal file
45
docker/compose/local-s3tests-compose.yml
Normal file
@ -0,0 +1,45 @@
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master -volumeSizeLimitMB=16"
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume -preStopSeconds=1"
|
||||
depends_on:
|
||||
- master
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 8000:8000
|
||||
command: 'filer -master="master:9333" -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8000'
|
||||
volumes:
|
||||
- ./s3.json:/etc/seaweedfs/s3.json
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
s3tests:
|
||||
image: chrislusf/ceph-s3-tests:local
|
||||
volumes:
|
||||
- ./s3tests.conf:/opt/s3-tests/s3tests.conf
|
||||
environment:
|
||||
S3TEST_CONF: "s3tests.conf"
|
||||
NOSETESTS_OPTIONS: "--verbose --logging-level=ERROR --with-xunit --failure-detail s3tests_boto3.functional.test_s3"
|
||||
NOSETESTS_ATTR: "!tagging,!fails_on_aws,!encryption,!bucket-policy,!versioning,!fails_on_rgw,!bucket-policy,!fails_with_subdomain,!policy_status,!object-lock,!lifecycle,!cors,!user-policy"
|
||||
NOSETESTS_EXCLUDE: "(bucket_list_delimiter_basic|bucket_listv2_delimiter_basic|bucket_listv2_encoding_basic|bucket_list_encoding_basic|bucket_list_delimiter_prefix|bucket_listv2_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_alt|bucket_listv2_delimiter_alt|bucket_list_delimiter_prefix_underscore|bucket_list_delimiter_percentage|bucket_listv2_delimiter_percentage|bucket_list_delimiter_whitespace|bucket_listv2_delimiter_whitespace|bucket_list_delimiter_dot|bucket_listv2_delimiter_dot|bucket_list_delimiter_unreadable|bucket_listv2_delimiter_unreadable|bucket_listv2_fetchowner_defaultempty|bucket_listv2_fetchowner_empty|bucket_list_prefix_delimiter_alt|bucket_listv2_prefix_delimiter_alt|bucket_list_prefix_delimiter_prefix_not_exist|bucket_listv2_prefix_delimiter_prefix_not_exist|bucket_list_prefix_delimiter_delimiter_not_exist|bucket_listv2_prefix_delimiter_delimiter_not_exist|bucket_list_prefix_delimiter_prefix_delimiter_not_exist|bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist|bucket_list_maxkeys_none|bucket_listv2_maxkeys_none|bucket_list_maxkeys_invalid|bucket_listv2_continuationtoken_empty|bucket_list_return_data|bucket_list_objects_anonymous|bucket_listv2_objects_anonymous|bucket_notexist|bucketv2_notexist|bucket_delete_nonempty|bucket_concurrent_set_canned_acl|object_write_to_nonexist_bucket|object_requestid_matches_header_on_error|object_write_cache_control|object_write_expires|object_set_get_metadata_none_to_good|object_set_get_metadata_none_to_empty|object_set_get_metadata_overwrite_to_empty|post_object_anonymous_request|post_object_authenticated_request|post_object_authenticated_no_content_type|post_object_authenticated_request_bad_access_key|post_object_set_success_code|post_object_set_invalid_success_code|post_object_upload_larger_than_chunk|post_object_set_key_from_filename|post_object_ignored_header|post_object_case_insensitive_condition_fields|post_object_escaped_field_values|post_object_success_redirect_action|post_object_invalid_signature|post_object_invalid_access_key|post_object_missing_policy_condition|post_object_user_specified_header|post_object_request_missing_policy_specified_field|post_object_expired_policy|post_object_invalid_request_field_value|get_object_ifmatch_failed|get_object_ifunmodifiedsince_good|put_object_ifmatch_failed|object_raw_get_bucket_gone|object_delete_key_bucket_gone|object_raw_get_bucket_acl|object_raw_get_object_acl|object_raw_response_headers|object_raw_authenticated_bucket_gone|object_raw_get_x_amz_expires_out_max_range|object_raw_get_x_amz_expires_out_positive_range|object_anon_put_write_access|object_raw_put_authenticated_expired|bucket_create_exists|bucket_create_naming_bad_short_one|bucket_create_naming_bad_short_two|bucket_get_location|bucket_acl_default|bucket_acl_canned|bucket_acl_canned_publicreadwrite|bucket_acl_canned_authenticatedread|object_acl_default|object_acl_canned_during_create|object_acl_canned|object_acl_canned_publicreadwrite|object_acl_canned_authenticatedread|object_acl_canned_bucketownerread|object_acl_canned_bucketownerfullcontrol|object_acl_full_control_verify_attributes|bucket_acl_canned_private_to_private|bucket_acl_grant_nonexist_user|bucket_acl_no_grants|bucket_acl_grant_email_not_exist|bucket_acl_revoke_all|bucket_recreate_not_overriding|object_copy_verify_contenttype|object_copy_to_itself_with_metadata|object_copy_not_owned_bucket|object_copy_not_owned_object_bucket|object_copy_retaining_metadata|object_copy_replacing_metadata|multipart_upload_empty|multipart_copy_invalid_range|multipart_copy_special_names|multipart_upload_resend_part|multipart_upload_size_too_small|abort_multipart_upload_not_found|multipart_upload_missing_part|multipart_upload_incorrect_etag|100_continue|ranged_request_invalid_range|ranged_request_empty_object|access_bucket)"
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- s3
|
30
docker/compose/master-cloud.toml
Normal file
30
docker/compose/master-cloud.toml
Normal file
@ -0,0 +1,30 @@
|
||||
|
||||
# Put this file to one of the location, with descending priority
|
||||
# ./master.toml
|
||||
# $HOME/.seaweedfs/master.toml
|
||||
# /etc/seaweedfs/master.toml
|
||||
# this file is read by master
|
||||
|
||||
[master.maintenance]
|
||||
# periodically run these scripts are the same as running them from 'weed shell'
|
||||
scripts = """
|
||||
lock
|
||||
ec.encode -fullPercent=95 -quietFor=1h
|
||||
ec.rebuild -force
|
||||
ec.balance -force
|
||||
volume.balance -force
|
||||
volume.fix.replication
|
||||
unlock
|
||||
"""
|
||||
sleep_minutes = 17 # sleep minutes between each script execution
|
||||
|
||||
# configurations for tiered cloud storage
|
||||
# old volumes are transparently moved to cloud for cost efficiency
|
||||
[storage.backend]
|
||||
[storage.backend.s3.default]
|
||||
enabled = true
|
||||
aws_access_key_id = "any" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
||||
aws_secret_access_key = "any" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
||||
region = "us-east-2"
|
||||
bucket = "volume_bucket" # an existing bucket
|
||||
endpoint = "http://server2:8333"
|
17
docker/compose/notification.toml
Normal file
17
docker/compose/notification.toml
Normal file
@ -0,0 +1,17 @@
|
||||
[notification.log]
|
||||
# this is only for debugging perpose and does not work with "weed filer.replicate"
|
||||
enabled = false
|
||||
|
||||
|
||||
[notification.gocdk_pub_sub]
|
||||
# The Go Cloud Development Kit (https://gocloud.dev).
|
||||
# PubSub API (https://godoc.org/gocloud.dev/pubsub).
|
||||
# Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ.
|
||||
enabled = true
|
||||
# This URL will Dial the RabbitMQ server at the URL in the environment
|
||||
# variable RABBIT_SERVER_URL and open the exchange "myexchange".
|
||||
# The exchange must have already been created by some other means, like
|
||||
# the RabbitMQ management plugin. Сreate myexchange of type fanout and myqueue then
|
||||
# create binding myexchange => myqueue
|
||||
topic_url = "rabbit://swexchange"
|
||||
sub_url = "rabbit://swqueue"
|
11
docker/compose/replication.toml
Normal file
11
docker/compose/replication.toml
Normal file
@ -0,0 +1,11 @@
|
||||
[source.filer]
|
||||
enabled = true
|
||||
grpcAddress = "filer:18888"
|
||||
# all files under this directory tree are replicated.
|
||||
# this is not a directory on your hard drive, but on your filer.
|
||||
# i.e., all files with this "prefix" are sent to notification message queue.
|
||||
directory = "/buckets"
|
||||
|
||||
[sink.local_incremental]
|
||||
enabled = true
|
||||
directory = "/data"
|
105
docker/compose/s3.json
Normal file
105
docker/compose/s3.json
Normal file
@ -0,0 +1,105 @@
|
||||
{
|
||||
"identities": [
|
||||
{
|
||||
"name": "anonymous",
|
||||
"actions": [
|
||||
"Read"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "some_admin_user",
|
||||
"credentials": [
|
||||
{
|
||||
"accessKey": "some_access_key1",
|
||||
"secretKey": "some_secret_key1"
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
"Admin",
|
||||
"Read",
|
||||
"List",
|
||||
"Tagging",
|
||||
"Write"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "s3_tests",
|
||||
"credentials": [
|
||||
{
|
||||
"accessKey": "ABCDEFGHIJKLMNOPQRST",
|
||||
"secretKey": "abcdefghijklmnopqrstuvwxyzabcdefghijklmn"
|
||||
},
|
||||
{
|
||||
"accessKey": "0555b35654ad1656d804",
|
||||
"secretKey": "h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=="
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
"Admin",
|
||||
"Read",
|
||||
"List",
|
||||
"Tagging",
|
||||
"Write"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "s3_tests_alt",
|
||||
"credentials": [
|
||||
{
|
||||
"accessKey": "NOPQRSTUVWXYZABCDEFG",
|
||||
"secretKey": "nopqrstuvwxyzabcdefghijklmnabcdefghijklm"
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
"Admin",
|
||||
"Read",
|
||||
"List",
|
||||
"Tagging",
|
||||
"Write"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "s3_tests_tenant",
|
||||
"credentials": [
|
||||
{
|
||||
"accessKey": "HIJKLMNOPQRSTUVWXYZA",
|
||||
"secretKey": "opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab"
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
"Admin",
|
||||
"Read",
|
||||
"List",
|
||||
"Tagging",
|
||||
"Write"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "some_read_only_user",
|
||||
"credentials": [
|
||||
{
|
||||
"accessKey": "some_access_key2",
|
||||
"secretKey": "some_secret_key2"
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
"Read"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "some_normal_user",
|
||||
"credentials": [
|
||||
{
|
||||
"accessKey": "some_access_key3",
|
||||
"secretKey": "some_secret_key3"
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
"Read",
|
||||
"List",
|
||||
"Tagging",
|
||||
"Write"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
70
docker/compose/s3tests.conf
Normal file
70
docker/compose/s3tests.conf
Normal file
@ -0,0 +1,70 @@
|
||||
[DEFAULT]
|
||||
## this section is just used for host, port and bucket_prefix
|
||||
|
||||
# host set for rgw in vstart.sh
|
||||
host = s3
|
||||
|
||||
# port set for rgw in vstart.sh
|
||||
port = 8000
|
||||
|
||||
## say "False" to disable TLS
|
||||
is_secure = False
|
||||
|
||||
[fixtures]
|
||||
## all the buckets created will start with this prefix;
|
||||
## {random} will be filled with random characters to pad
|
||||
## the prefix to 30 characters long, and avoid collisions
|
||||
bucket prefix = yournamehere-{random}-
|
||||
|
||||
[s3 main]
|
||||
# main display_name set in vstart.sh
|
||||
display_name = M. Tester
|
||||
|
||||
# main user_idname set in vstart.sh
|
||||
user_id = testid
|
||||
|
||||
# main email set in vstart.sh
|
||||
email = tester@ceph.com
|
||||
|
||||
# zonegroup api_name for bucket location
|
||||
api_name = default
|
||||
|
||||
## main AWS access key
|
||||
access_key = 0555b35654ad1656d804
|
||||
|
||||
## main AWS secret key
|
||||
secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
|
||||
|
||||
## replace with key id obtained when secret is created, or delete if KMS not tested
|
||||
#kms_keyid = 01234567-89ab-cdef-0123-456789abcdef
|
||||
|
||||
[s3 alt]
|
||||
# alt display_name set in vstart.sh
|
||||
display_name = john.doe
|
||||
## alt email set in vstart.sh
|
||||
email = john.doe@example.com
|
||||
|
||||
# alt user_id set in vstart.sh
|
||||
user_id = 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234
|
||||
|
||||
# alt AWS access key set in vstart.sh
|
||||
access_key = NOPQRSTUVWXYZABCDEFG
|
||||
|
||||
# alt AWS secret key set in vstart.sh
|
||||
secret_key = nopqrstuvwxyzabcdefghijklmnabcdefghijklm
|
||||
|
||||
[s3 tenant]
|
||||
# tenant display_name set in vstart.sh
|
||||
display_name = testx$tenanteduser
|
||||
|
||||
# tenant user_id set in vstart.sh
|
||||
user_id = 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef
|
||||
|
||||
# tenant AWS secret key set in vstart.sh
|
||||
access_key = HIJKLMNOPQRSTUVWXYZA
|
||||
|
||||
# tenant AWS secret key set in vstart.sh
|
||||
secret_key = opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab
|
||||
|
||||
# tenant email set in vstart.sh
|
||||
email = tenanteduser@example.com
|
84
docker/compose/swarm-etcd.yml
Normal file
84
docker/compose/swarm-etcd.yml
Normal file
@ -0,0 +1,84 @@
|
||||
# 2021-01-30 16:25:30
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
|
||||
etcd:
|
||||
image: gasparekatapy/etcd
|
||||
networks:
|
||||
- net
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 3
|
||||
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
environment:
|
||||
WEED_MASTER_FILER_DEFAULT: "filer:8888"
|
||||
WEED_MASTER_SEQUENCER_TYPE: "raft"
|
||||
ports:
|
||||
- "9333:9333"
|
||||
- "19333:19333"
|
||||
networks:
|
||||
- net
|
||||
command:
|
||||
- 'master'
|
||||
- '-resumeState=true'
|
||||
- '-ip=master'
|
||||
- '-port=9333'
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
environment:
|
||||
WEED_LEVELDB2_ENABLED: "false"
|
||||
WEED_ETCD_ENABLED: "true"
|
||||
WEED_ETCD_SERVERS: "etcd:2379"
|
||||
ports:
|
||||
- target: 8888
|
||||
published: 8888
|
||||
protocol: tcp
|
||||
mode: host
|
||||
- target: 18888
|
||||
published: 18888
|
||||
protocol: tcp
|
||||
mode: host
|
||||
networks:
|
||||
- net
|
||||
command:
|
||||
- 'filer'
|
||||
- '-ip=filer'
|
||||
- '-port=8888'
|
||||
- '-port.readonly=28888'
|
||||
- '-master=master:9333'
|
||||
- '-disableDirListing=true'
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- target: 8080
|
||||
published: 8080
|
||||
protocol: tcp
|
||||
mode: host
|
||||
- target: 18080
|
||||
published: 18080
|
||||
protocol: tcp
|
||||
mode: host
|
||||
networks:
|
||||
- net
|
||||
command:
|
||||
- 'volume'
|
||||
- '-mserver=master:9333'
|
||||
- '-port=8080'
|
||||
deploy:
|
||||
mode: global
|
||||
|
||||
###########################################################################
|
||||
|
||||
networks:
|
||||
net:
|
14
docker/compose/tls.env
Normal file
14
docker/compose/tls.env
Normal file
@ -0,0 +1,14 @@
|
||||
WEED_GRPC_CA=/etc/seaweedfs/tls/SeaweedFS_CA.crt
|
||||
WEED_GRPC_ALLOWED_WILDCARD_DOMAIN=".dev"
|
||||
WEED_GRPC_MASTER_CERT=/etc/seaweedfs/tls/master01.dev.crt
|
||||
WEED_GRPC_MASTER_KEY=/etc/seaweedfs/tls/master01.dev.key
|
||||
WEED_GRPC_VOLUME_CERT=/etc/seaweedfs/tls/volume01.dev.crt
|
||||
WEED_GRPC_VOLUME_KEY=/etc/seaweedfs/tls/volume01.dev.key
|
||||
WEED_GRPC_FILER_CERT=/etc/seaweedfs/tls/filer01.dev.crt
|
||||
WEED_GRPC_FILER_KEY=/etc/seaweedfs/tls/filer01.dev.key
|
||||
WEED_GRPC_CLIENT_CERT=/etc/seaweedfs/tls/client01.dev.crt
|
||||
WEED_GRPC_CLIENT_KEY=/etc/seaweedfs/tls/client01.dev.key
|
||||
WEED_GRPC_MASTER_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev"
|
||||
WEED_GRPC_VOLUME_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev"
|
||||
WEED_GRPC_FILER_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev"
|
||||
WEED_GRPC_CLIENT_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev"
|
@ -1,43 +0,0 @@
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
master:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.go_build
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.go_build
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -max=5 -mserver="master:9333" -port=8080'
|
||||
depends_on:
|
||||
- master
|
||||
filer:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.go_build
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: 'filer -master="master:9333"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
s3:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.go_build
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: 's3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
@ -1,55 +1,68 @@
|
||||
#!/bin/sh
|
||||
|
||||
isArgPassed() {
|
||||
arg="$1"
|
||||
argWithEqualSign="$1="
|
||||
shift
|
||||
while [ $# -gt 0 ]; do
|
||||
passedArg="$1"
|
||||
shift
|
||||
case $passedArg in
|
||||
$arg)
|
||||
return 0
|
||||
;;
|
||||
$argWithEqualSign*)
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
|
||||
'master')
|
||||
ARGS="-mdir /data"
|
||||
# Is this instance linked with an other master? (Docker commandline "--link master1:master")
|
||||
if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then
|
||||
ARGS="$ARGS -peers=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT"
|
||||
fi
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
ARGS="-mdir=/data -volumePreallocate -volumeSizeLimitMB=1024"
|
||||
shift
|
||||
exec /usr/bin/weed master $ARGS $@
|
||||
;;
|
||||
|
||||
'volume')
|
||||
ARGS="-ip `hostname -i` -dir /data"
|
||||
# Is this instance linked with a master? (Docker commandline "--link master1:master")
|
||||
if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then
|
||||
ARGS="$ARGS -mserver=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT"
|
||||
fi
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
ARGS="-dir=/data -max=0"
|
||||
if isArgPassed "-max" "$@"; then
|
||||
ARGS="-dir=/data"
|
||||
fi
|
||||
shift
|
||||
exec /usr/bin/weed volume $ARGS $@
|
||||
;;
|
||||
|
||||
'server')
|
||||
ARGS="-ip `hostname -i` -dir /data"
|
||||
if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then
|
||||
ARGS="$ARGS -master.peers=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT"
|
||||
fi
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
ARGS="-dir=/data -volume.max=0 -master.volumePreallocate -master.volumeSizeLimitMB=1024"
|
||||
if isArgPassed "-volume.max" "$@"; then
|
||||
ARGS="-dir=/data -master.volumePreallocate -master.volumeSizeLimitMB=1024"
|
||||
fi
|
||||
shift
|
||||
exec /usr/bin/weed server $ARGS $@
|
||||
;;
|
||||
|
||||
'filer')
|
||||
ARGS=""
|
||||
if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then
|
||||
ARGS="$ARGS -master=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT"
|
||||
fi
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
shift
|
||||
exec /usr/bin/weed filer $ARGS $@
|
||||
;;
|
||||
|
||||
's3')
|
||||
ARGS="-domainName=$S3_DOMAIN_NAME -key.file=$S3_KEY_FILE -cert.file=$S3_CERT_FILE"
|
||||
if [ -n "$FILER_PORT_8888_TCP_ADDR" ] ; then
|
||||
ARGS="$ARGS -filer=$FILER_PORT_8888_TCP_ADDR:$FILER_PORT_8888_TCP_PORT"
|
||||
fi
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
shift
|
||||
exec /usr/bin/weed s3 $ARGS $@
|
||||
;;
|
||||
|
||||
'cronjob')
|
||||
MASTER=${WEED_MASTER-localhost:9333}
|
||||
FIX_REPLICATION_CRON_SCHEDULE=${CRON_SCHEDULE-*/7 * * * * *}
|
||||
echo "$FIX_REPLICATION_CRON_SCHEDULE" 'echo "volume.fix.replication" | weed shell -master='$MASTER > /crontab
|
||||
echo "$FIX_REPLICATION_CRON_SCHEDULE" 'echo "lock; volume.fix.replication; unlock" | weed shell -master='$MASTER > /crontab
|
||||
BALANCING_CRON_SCHEDULE=${CRON_SCHEDULE-25 * * * * *}
|
||||
echo "$BALANCING_CRON_SCHEDULE" 'echo "volume.balance -c ALL -force" | weed shell -master='$MASTER >> /crontab
|
||||
echo "$BALANCING_CRON_SCHEDULE" 'echo "lock; volume.balance -collection ALL_COLLECTIONS -force; unlock" | weed shell -master='$MASTER >> /crontab
|
||||
echo "Running Crontab:"
|
||||
cat /crontab
|
||||
exec supercronic /crontab
|
||||
|
30
docker/nginx/proxy.conf
Normal file
30
docker/nginx/proxy.conf
Normal file
@ -0,0 +1,30 @@
|
||||
# HTTP 1.1 support
|
||||
proxy_http_version 1.1;
|
||||
#proxy_buffering off;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $proxy_connection;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $proxy_x_forwarded_proto;
|
||||
proxy_set_header X-Forwarded-Ssl $proxy_x_forwarded_ssl;
|
||||
proxy_set_header X-Forwarded-Port $proxy_x_forwarded_port;
|
||||
|
||||
# Mitigate httpoxy attack (see README for details)
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
# aws default max_concurrent_requests 10
|
||||
# aws default multipart_threshold 8MB
|
||||
proxy_buffering on; # GET buffering or “X-Accel-Buffering” enables or disables buffering of a response;
|
||||
proxy_buffers 64 1m; # buffers used for reading a response from the proxied server, for a single connection
|
||||
proxy_buffer_size 8k; # maximum size of the data that nginx can receive from the server at a time is set
|
||||
proxy_busy_buffers_size 2m;
|
||||
|
||||
proxy_request_buffering on; # PUT buffering
|
||||
client_body_buffer_size 64m; # buffer size for reading client request body
|
||||
client_max_body_size 64m;
|
||||
|
||||
proxy_next_upstream error timeout non_idempotent http_500; # PUT request should be passed to the next server:
|
||||
proxy_connect_timeout 200ms;
|
||||
proxy_read_timeout 3s; #timeout is set only between two successive read operations
|
||||
proxy_send_timeout 3s; #timeout is set only between two successive write operations
|
13
docker/prometheus/prometheus.yml
Normal file
13
docker/prometheus/prometheus.yml
Normal file
@ -0,0 +1,13 @@
|
||||
global:
|
||||
scrape_interval: 30s
|
||||
scrape_timeout: 10s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: services
|
||||
metrics_path: /metrics
|
||||
static_configs:
|
||||
- targets:
|
||||
- 'prometheus:9090'
|
||||
- 'volume:9325'
|
||||
- 'filer:9326'
|
||||
- 's3:9327'
|
@ -4,28 +4,30 @@ services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs # use a remote image
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs # use a remote image
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -max=15 -mserver="master:9333" -port=8080'
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
- 9325:9325
|
||||
command: 'volume -mserver="master:9333" -port=8080 -metricsPort=9325'
|
||||
depends_on:
|
||||
- master
|
||||
- master
|
||||
filer:
|
||||
image: chrislusf/seaweedfs # use a remote image
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: 'filer -master="master:9333"'
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 9326:9326
|
||||
command: 'filer -master="master:9333" -metricsPort=9326'
|
||||
tty: true
|
||||
stdin_open: true
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- master
|
||||
- volume
|
||||
cronjob:
|
||||
image: chrislusf/seaweedfs # use a remote image
|
||||
command: 'cronjob'
|
||||
@ -34,14 +36,33 @@ services:
|
||||
CRON_SCHEDULE: '*/2 * * * * *' # Default: '*/5 * * * * *'
|
||||
WEED_MASTER: master:9333 # Default: localhost:9333
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- master
|
||||
- volume
|
||||
s3:
|
||||
image: chrislusf/seaweedfs # use a remote image
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: 's3 -filer="filer:8888"'
|
||||
- 8333:8333
|
||||
- 9327:9327
|
||||
command: 's3 -filer="filer:8888" -metricsPort=9327'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
webdav:
|
||||
image: chrislusf/seaweedfs # use a remote image
|
||||
ports:
|
||||
- 7333:7333
|
||||
command: 'webdav -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
prometheus:
|
||||
image: prom/prometheus:v2.21.0
|
||||
ports:
|
||||
- 9000:9090
|
||||
volumes:
|
||||
- ./prometheus:/etc/prometheus
|
||||
command: --web.enable-lifecycle --config.file=/etc/prometheus/prometheus.yml
|
||||
depends_on:
|
||||
- s3
|
||||
|
44
docker/seaweedfs-dev-compose.yml
Normal file
44
docker/seaweedfs-dev-compose.yml
Normal file
@ -0,0 +1,44 @@
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:dev # use a remote dev image
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:dev # use a remote dev image
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -mserver="master:9333" -port=8080 -ip=volume'
|
||||
depends_on:
|
||||
- master
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:dev # use a remote dev image
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: 'filer -master="master:9333"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:dev # use a remote dev image
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: 's3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
webdav:
|
||||
image: chrislusf/seaweedfs:dev # use a remote dev image
|
||||
ports:
|
||||
- 7333:7333
|
||||
command: 'webdav -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
12
docker/seaweedfs.sql
Normal file
12
docker/seaweedfs.sql
Normal file
@ -0,0 +1,12 @@
|
||||
CREATE DATABASE IF NOT EXISTS seaweedfs;
|
||||
CREATE USER IF NOT EXISTS 'seaweedfs'@'%' IDENTIFIED BY 'secret';
|
||||
GRANT ALL PRIVILEGES ON seaweedfs_fast.* TO 'seaweedfs'@'%';
|
||||
FLUSH PRIVILEGES;
|
||||
USE seaweedfs;
|
||||
CREATE TABLE IF NOT EXISTS filemeta (
|
||||
dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
|
||||
name VARCHAR(1000) COMMENT 'directory or file name',
|
||||
directory TEXT COMMENT 'full path to parent directory',
|
||||
meta LONGBLOB,
|
||||
PRIMARY KEY (dirhash, name)
|
||||
) DEFAULT CHARSET=utf8;
|
117
go.mod
117
go.mod
@ -3,97 +3,106 @@ module github.com/chrislusf/seaweedfs
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.44.3
|
||||
cloud.google.com/go v0.58.0 // indirect
|
||||
cloud.google.com/go/pubsub v1.3.1
|
||||
cloud.google.com/go/storage v1.9.0
|
||||
github.com/Azure/azure-amqp-common-go/v2 v2.1.0 // indirect
|
||||
github.com/Azure/azure-pipeline-go v0.2.2 // indirect
|
||||
github.com/Azure/azure-storage-blob-go v0.8.0
|
||||
github.com/DataDog/zstd v1.4.1 // indirect
|
||||
github.com/Azure/azure-storage-blob-go v0.9.0
|
||||
github.com/OneOfOne/xxhash v1.2.2
|
||||
github.com/Shopify/sarama v1.23.1
|
||||
github.com/aws/aws-sdk-go v1.23.13
|
||||
github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92
|
||||
github.com/coreos/etcd v3.3.15+incompatible // indirect
|
||||
github.com/aws/aws-sdk-go v1.34.30
|
||||
github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72
|
||||
github.com/bwmarrin/snowflake v0.3.0
|
||||
github.com/cespare/xxhash v1.1.0
|
||||
github.com/chrislusf/raft v1.0.6
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
||||
github.com/disintegration/imaging v1.6.1
|
||||
github.com/disintegration/imaging v1.6.2
|
||||
github.com/dustin/go-humanize v1.0.0
|
||||
github.com/eapache/go-resiliency v1.2.0 // indirect
|
||||
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a
|
||||
github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c // indirect
|
||||
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
|
||||
github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4
|
||||
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect
|
||||
github.com/fclairamb/ftpserverlib v0.8.0
|
||||
github.com/frankban/quicktest v1.7.2 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.0.0
|
||||
github.com/go-redis/redis v6.15.2+incompatible
|
||||
github.com/go-sql-driver/mysql v1.4.1
|
||||
github.com/go-errors/errors v1.1.1 // indirect
|
||||
github.com/go-redis/redis/v8 v8.4.4
|
||||
github.com/go-sql-driver/mysql v1.5.0
|
||||
github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6
|
||||
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 // indirect
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e
|
||||
github.com/golang/protobuf v1.4.2
|
||||
github.com/google/btree v1.0.0
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/gorilla/mux v1.7.3
|
||||
github.com/gorilla/mux v1.7.4
|
||||
github.com/gorilla/websocket v1.4.1 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.3 // indirect
|
||||
github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f
|
||||
github.com/jcmturner/gofork v1.0.0 // indirect
|
||||
github.com/karlseguin/ccache v2.0.3+incompatible
|
||||
github.com/karlseguin/expect v1.0.1 // indirect
|
||||
github.com/jinzhu/copier v0.2.8
|
||||
github.com/json-iterator/go v1.1.10
|
||||
github.com/karlseguin/ccache v2.0.3+incompatible // indirect
|
||||
github.com/karlseguin/ccache/v2 v2.0.7
|
||||
github.com/klauspost/compress v1.10.9 // indirect
|
||||
github.com/klauspost/cpuid v1.2.1 // indirect
|
||||
github.com/klauspost/crc32 v1.2.0
|
||||
github.com/klauspost/reedsolomon v1.9.2
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
|
||||
github.com/kurin/blazer v0.5.3
|
||||
github.com/lib/pq v1.2.0
|
||||
github.com/lib/pq v1.10.0
|
||||
github.com/lunixbochs/vtclean v1.0.0 // indirect
|
||||
github.com/magiconair/properties v1.8.1 // indirect
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb // indirect
|
||||
github.com/mattn/go-colorable v0.1.2 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.4 // indirect
|
||||
github.com/nats-io/nats-server/v2 v2.0.4 // indirect
|
||||
github.com/onsi/ginkgo v1.10.1 // indirect
|
||||
github.com/onsi/gomega v1.7.0 // indirect
|
||||
github.com/opentracing/opentracing-go v1.1.0 // indirect
|
||||
github.com/pelletier/go-toml v1.4.0 // indirect
|
||||
github.com/olivere/elastic/v7 v7.0.19
|
||||
github.com/peterh/liner v1.1.0
|
||||
github.com/pierrec/lz4 v2.2.7+incompatible // indirect
|
||||
github.com/pingcap/kvproto v0.0.0-20191022073741-81b2a2d9e986 // indirect
|
||||
github.com/pingcap/tidb v1.1.0-beta.0.20191023070859-58fc7d44f73b
|
||||
github.com/pingcap/tipb v0.0.0-20191022094114-a2e8c3fa634b // indirect
|
||||
github.com/prometheus/client_golang v1.1.0
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 // indirect
|
||||
github.com/prometheus/procfs v0.0.4 // indirect
|
||||
github.com/rakyll/statik v0.1.6
|
||||
github.com/prometheus/client_golang v1.3.0
|
||||
github.com/rakyll/statik v0.1.7
|
||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 // indirect
|
||||
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd
|
||||
github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff
|
||||
github.com/sirupsen/logrus v1.4.2 // indirect
|
||||
github.com/seaweedfs/fuse v1.1.4
|
||||
github.com/seaweedfs/goexif v1.0.2
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/spf13/afero v1.2.2 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/viper v1.4.0
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 // indirect
|
||||
github.com/stretchr/testify v1.4.0 // indirect
|
||||
github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/syndtr/goleveldb v1.0.0
|
||||
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c
|
||||
github.com/tidwall/gjson v1.3.2
|
||||
github.com/tidwall/match v1.0.1
|
||||
github.com/uber-go/atomic v1.4.0 // indirect
|
||||
github.com/uber/jaeger-client-go v2.17.0+incompatible // indirect
|
||||
github.com/uber/jaeger-lib v2.0.0+incompatible // indirect
|
||||
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365
|
||||
github.com/valyala/bytebufferpool v1.0.0
|
||||
github.com/valyala/fasthttp v1.20.0
|
||||
github.com/viant/assertly v0.5.4 // indirect
|
||||
github.com/viant/ptrie v0.3.0
|
||||
github.com/viant/toolbox v0.33.2 // indirect
|
||||
github.com/willf/bitset v1.1.10 // indirect
|
||||
github.com/willf/bloom v2.0.3+incompatible
|
||||
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 // indirect
|
||||
go.etcd.io/etcd v3.3.15+incompatible
|
||||
gocloud.dev v0.16.0
|
||||
gocloud.dev/pubsub/natspubsub v0.16.0
|
||||
gocloud.dev/pubsub/rabbitpubsub v0.16.0
|
||||
golang.org/x/image v0.0.0-20190829233526-b3c06291d021 // indirect
|
||||
golang.org/x/net v0.0.0-20190909003024-a7b16738d86b
|
||||
golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b
|
||||
golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110
|
||||
google.golang.org/api v0.9.0
|
||||
google.golang.org/appengine v1.6.2 // indirect
|
||||
google.golang.org/grpc v1.23.0
|
||||
go.mongodb.org/mongo-driver v1.3.2
|
||||
gocloud.dev v0.20.0
|
||||
gocloud.dev/pubsub/natspubsub v0.20.0
|
||||
gocloud.dev/pubsub/rabbitpubsub v0.20.0
|
||||
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd
|
||||
golang.org/x/tools v0.0.0-20200608174601-1b747fd94509
|
||||
google.golang.org/api v0.26.0
|
||||
google.golang.org/grpc v1.29.1
|
||||
google.golang.org/protobuf v1.24.0
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
|
||||
gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect
|
||||
gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect
|
||||
gopkg.in/karlseguin/expect.v1 v1.0.1 // indirect
|
||||
sigs.k8s.io/yaml v1.1.0 // indirect
|
||||
)
|
||||
|
||||
replace github.com/satori/go.uuid v1.2.0 => github.com/satori/go.uuid v0.0.0-20181028125025-b2ce2384e17b
|
||||
// replace github.com/seaweedfs/fuse => /Users/chris/go/src/github.com/seaweedfs/fuse
|
||||
// replace github.com/chrislusf/raft => /Users/chris/go/src/github.com/chrislusf/raft
|
||||
|
||||
replace go.etcd.io/etcd => go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547
|
||||
|
48
k8s/README.md
Normal file
48
k8s/README.md
Normal file
@ -0,0 +1,48 @@
|
||||
## SEAWEEDFS - helm chart (2.x)
|
||||
|
||||
### info:
|
||||
* master/filer/volume are stateful sets with anti-affinity on the hostname,
|
||||
so your deployment will be spread/HA.
|
||||
* chart is using memsql(mysql) as the filer backend to enable HA (multiple filer instances)
|
||||
and backup/HA memsql can provide.
|
||||
* mysql user/password are created in a k8s secret (secret-seaweedfs-db.yaml) and injected to the filer
|
||||
with ENV.
|
||||
* cert config exists and can be enabled, but not been tested.
|
||||
|
||||
### prerequisites
|
||||
kubernetes node have labels which help to define which node(Host) will run which pod.
|
||||
|
||||
s3/filer/master needs the label **sw-backend=true**
|
||||
|
||||
volume need the label **sw-volume=true**
|
||||
|
||||
to label a node to be able to run all pod types in k8s:
|
||||
```
|
||||
kubectl label node YOUR_NODE_NAME sw-volume=true,sw-backend=true
|
||||
```
|
||||
|
||||
on production k8s deployment you will want each pod to have a different host,
|
||||
especially the volume server & the masters, currently all pods (master/volume/filer)
|
||||
have anti-affinity rule to disallow running multiple pod type on the same host.
|
||||
if you still want to run multiple pods of the same type (master/volume/filer) on the same host
|
||||
please set/update the corresponding affinity rule in values.yaml to an empty one:
|
||||
|
||||
```affinity: ""```
|
||||
|
||||
### PVC - storage class ###
|
||||
|
||||
on the volume stateful set added support for K8S PVC, currently example
|
||||
with the simple local-path-provisioner from Rancher (comes included with k3d / k3s)
|
||||
https://github.com/rancher/local-path-provisioner
|
||||
|
||||
you can use ANY storage class you like, just update the correct storage-class
|
||||
for your deployment.
|
||||
|
||||
### current instances config (AIO):
|
||||
1 instance for each type (master/filer+s3/volume)
|
||||
|
||||
you can update the replicas count for each node type in values.yaml,
|
||||
need to add more nodes with the corresponding labels.
|
||||
|
||||
most of the configuration are available through values.yaml
|
||||
|
22
k8s/seaweedfs/.helmignore
Normal file
22
k8s/seaweedfs/.helmignore
Normal file
@ -0,0 +1,22 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
5
k8s/seaweedfs/Chart.yaml
Normal file
5
k8s/seaweedfs/Chart.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
apiVersion: v1
|
||||
description: SeaweedFS
|
||||
name: seaweedfs
|
||||
appVersion: "2.41"
|
||||
version: 2.41
|
1856
k8s/seaweedfs/dashboards/seaweedfs-grafana-dashboard.json
Normal file
1856
k8s/seaweedfs/dashboards/seaweedfs-grafana-dashboard.json
Normal file
File diff suppressed because it is too large
Load Diff
151
k8s/seaweedfs/templates/_helpers.tpl
Normal file
151
k8s/seaweedfs/templates/_helpers.tpl
Normal file
@ -0,0 +1,151 @@
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to
|
||||
this (by the DNS naming spec). If release name contains chart name it will
|
||||
be used as a full name.
|
||||
*/}}
|
||||
{{- define "seaweedfs.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "seaweedfs.chart" -}}
|
||||
{{- printf "%s-helm" .Chart.Name | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "seaweedfs.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Inject extra environment vars in the format key:value, if populated
|
||||
*/}}
|
||||
{{- define "seaweedfs.extraEnvironmentVars" -}}
|
||||
{{- if .extraEnvironmentVars -}}
|
||||
{{- range $key, $value := .extraEnvironmentVars }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $value | quote }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Return the proper filer image */}}
|
||||
{{- define "filer.image" -}}
|
||||
{{- if .Values.filer.imageOverride -}}
|
||||
{{- $imageOverride := .Values.filer.imageOverride -}}
|
||||
{{- printf "%s" $imageOverride -}}
|
||||
{{- else -}}
|
||||
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
|
||||
{{- $repositoryName := .Values.image.repository | toString -}}
|
||||
{{- $name := .Values.global.imageName | toString -}}
|
||||
{{- $tag := .Chart.AppVersion | toString -}}
|
||||
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Return the proper dbSchema image */}}
|
||||
{{- define "filer.dbSchema.image" -}}
|
||||
{{- if .Values.filer.dbSchema.imageOverride -}}
|
||||
{{- $imageOverride := .Values.filer.dbSchema.imageOverride -}}
|
||||
{{- printf "%s" $imageOverride -}}
|
||||
{{- else -}}
|
||||
{{- $registryName := default .Values.global.registry .Values.global.localRegistry | toString -}}
|
||||
{{- $repositoryName := .Values.global.repository | toString -}}
|
||||
{{- $name := .Values.filer.dbSchema.imageName | toString -}}
|
||||
{{- $tag := .Values.filer.dbSchema.imageTag | toString -}}
|
||||
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Return the proper master image */}}
|
||||
{{- define "master.image" -}}
|
||||
{{- if .Values.master.imageOverride -}}
|
||||
{{- $imageOverride := .Values.master.imageOverride -}}
|
||||
{{- printf "%s" $imageOverride -}}
|
||||
{{- else -}}
|
||||
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
|
||||
{{- $repositoryName := .Values.image.repository | toString -}}
|
||||
{{- $name := .Values.global.imageName | toString -}}
|
||||
{{- $tag := .Chart.AppVersion | toString -}}
|
||||
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Return the proper s3 image */}}
|
||||
{{- define "s3.image" -}}
|
||||
{{- if .Values.s3.imageOverride -}}
|
||||
{{- $imageOverride := .Values.s3.imageOverride -}}
|
||||
{{- printf "%s" $imageOverride -}}
|
||||
{{- else -}}
|
||||
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
|
||||
{{- $repositoryName := .Values.image.repository | toString -}}
|
||||
{{- $name := .Values.global.imageName | toString -}}
|
||||
{{- $tag := .Chart.AppVersion | toString -}}
|
||||
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Return the proper volume image */}}
|
||||
{{- define "volume.image" -}}
|
||||
{{- if .Values.volume.imageOverride -}}
|
||||
{{- $imageOverride := .Values.volume.imageOverride -}}
|
||||
{{- printf "%s" $imageOverride -}}
|
||||
{{- else -}}
|
||||
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
|
||||
{{- $repositoryName := .Values.image.repository | toString -}}
|
||||
{{- $name := .Values.global.imageName | toString -}}
|
||||
{{- $tag := .Chart.AppVersion | toString -}}
|
||||
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Return the proper cronjob image */}}
|
||||
{{- define "cronjob.image" -}}
|
||||
{{- if .Values.cronjob.imageOverride -}}
|
||||
{{- $imageOverride := .Values.cronjob.imageOverride -}}
|
||||
{{- printf "%s" $imageOverride -}}
|
||||
{{- else -}}
|
||||
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
|
||||
{{- $repositoryName := .Values.image.repository | toString -}}
|
||||
{{- $name := .Values.global.imageName | toString -}}
|
||||
{{- $tag := .Chart.AppVersion | toString -}}
|
||||
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{/* check if any PVC exists */}}
|
||||
{{- define "volume.pvc_exists" -}}
|
||||
{{- if or (or (eq .Values.volume.data.type "persistentVolumeClaim") (and (eq .Values.volume.idx.type "persistentVolumeClaim") .Values.volume.dir_idx )) (eq .Values.volume.logs.type "persistentVolumeClaim") -}}
|
||||
{{- printf "true" -}}
|
||||
{{- else -}}
|
||||
{{- printf "false" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* check if any HostPath exists */}}
|
||||
{{- define "volume.hostpath_exists" -}}
|
||||
{{- if or (or (eq .Values.volume.data.type "hostPath") (and (eq .Values.volume.idx.type "hostPath") .Values.volume.dir_idx )) (eq .Values.volume.logs.type "hostPath") -}}
|
||||
{{- printf "true" -}}
|
||||
{{- else -}}
|
||||
{{- if or .Values.global.enableSecurity .Values.volume.extraVolumes -}}
|
||||
{{- printf "true" -}}
|
||||
{{- else -}}
|
||||
{{- printf "false" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
14
k8s/seaweedfs/templates/ca-cert.yaml
Normal file
14
k8s/seaweedfs/templates/ca-cert.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
apiVersion: certmanager.k8s.io/v1alpha1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-ca-cert
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
secretName: {{ template "seaweedfs.name" . }}-ca-cert
|
||||
commonName: "{{ template "seaweedfs.name" . }}-root-ca"
|
||||
isCA: true
|
||||
issuerRef:
|
||||
name: {{ template "seaweedfs.name" . }}-clusterissuer
|
||||
kind: ClusterIssuer
|
||||
{{- end }}
|
8
k8s/seaweedfs/templates/cert-clusterissuer.yaml
Normal file
8
k8s/seaweedfs/templates/cert-clusterissuer.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
apiVersion: certmanager.k8s.io/v1alpha1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-clusterissuer
|
||||
spec:
|
||||
selfSigned: {}
|
||||
{{- end }}
|
33
k8s/seaweedfs/templates/client-cert.yaml
Normal file
33
k8s/seaweedfs/templates/client-cert.yaml
Normal file
@ -0,0 +1,33 @@
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
apiVersion: certmanager.k8s.io/v1alpha1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-client-cert
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
secretName: {{ template "seaweedfs.name" . }}-client-cert
|
||||
issuerRef:
|
||||
name: {{ template "seaweedfs.name" . }}-clusterissuer
|
||||
kind: ClusterIssuer
|
||||
commonName: {{ .Values.certificates.commonName }}
|
||||
organization:
|
||||
- "SeaweedFS CA"
|
||||
dnsNames:
|
||||
- '*.{{ .Release.Namespace }}'
|
||||
- '*.{{ .Release.Namespace }}.svc'
|
||||
- '*.{{ .Release.Namespace }}.svc.cluster.local'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local'
|
||||
{{- if .Values.certificates.ipAddresses }}
|
||||
ipAddresses:
|
||||
{{- range .Values.certificates.ipAddresses }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
keyAlgorithm: {{ .Values.certificates.keyAlgorithm }}
|
||||
keySize: {{ .Values.certificates.keySize }}
|
||||
duration: {{ .Values.certificates.duration }}
|
||||
renewBefore: {{ .Values.certificates.renewBefore }}
|
||||
{{- end }}
|
58
k8s/seaweedfs/templates/cronjob.yaml
Normal file
58
k8s/seaweedfs/templates/cronjob.yaml
Normal file
@ -0,0 +1,58 @@
|
||||
{{- if .Values.cronjob }}
|
||||
{{- if .Values.cronjob.enabled }}
|
||||
apiVersion: batch/v1beta1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: {{ include "seaweedfs.fullname" . }}-cronjob
|
||||
spec:
|
||||
schedule: "{{ .Values.cronjob.schedule }}"
|
||||
startingDeadlineSeconds: 200
|
||||
concurrencyPolicy: Forbid
|
||||
failedJobsHistoryLimit: 2
|
||||
successfulJobsHistoryLimit: 2
|
||||
jobTemplate:
|
||||
spec:
|
||||
backoffLimit: 2
|
||||
template:
|
||||
spec:
|
||||
{{- if .Values.cronjob.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ tpl .Values.cronjob.nodeSelector . | indent 12 | trim }}
|
||||
{{- end }}
|
||||
{{- if .Values.cronjob.tolerations }}
|
||||
tolerations:
|
||||
{{ tpl .Values.cronjob.tolerations . | nindent 12 | trim }}
|
||||
{{- end }}
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: shell
|
||||
image: {{ template "cronjob.image" . }}
|
||||
imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
|
||||
resources:
|
||||
{{- toYaml .Values.cronjob.resources| nindent 16 }}
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
set -ex
|
||||
echo -e "lock\n\
|
||||
volume.balance -force \
|
||||
{{ if .Values.volume.dataCenter }} -dataCenter {{ .Values.volume.dataCenter }}{{ end }}\
|
||||
{{ if .Values.cronjob.collection }} -collection {{ .Values.cronjob.collection }}{{ end }}\n\
|
||||
{{- if .Values.cronjob.enableFixReplication }}
|
||||
volume.fix.replication -collectionPattern={{ .Values.cronjob.collectionPattern }} \n\
|
||||
{{- end }}
|
||||
unlock\n" | \
|
||||
/usr/bin/weed shell \
|
||||
{{- if .Values.cronjob.master }}
|
||||
-master {{ .Values.cronjob.master }} \
|
||||
{{- else }}
|
||||
-master {{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc:{{ .Values.master.port }} \
|
||||
{{- end }}
|
||||
{{- if .Values.cronjob.filer }}
|
||||
-filer {{ .Values.cronjob.filer }}
|
||||
{{- else }}
|
||||
-filer {{ template "seaweedfs.name" . }}-filer.{{ .Release.Namespace }}.svc:{{ .Values.filer.port }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
33
k8s/seaweedfs/templates/filer-cert.yaml
Normal file
33
k8s/seaweedfs/templates/filer-cert.yaml
Normal file
@ -0,0 +1,33 @@
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
apiVersion: certmanager.k8s.io/v1alpha1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-filer-cert
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
secretName: {{ template "seaweedfs.name" . }}-filer-cert
|
||||
issuerRef:
|
||||
name: {{ template "seaweedfs.name" . }}-clusterissuer
|
||||
kind: ClusterIssuer
|
||||
commonName: {{ .Values.certificates.commonName }}
|
||||
organization:
|
||||
- "SeaweedFS CA"
|
||||
dnsNames:
|
||||
- '*.{{ .Release.Namespace }}'
|
||||
- '*.{{ .Release.Namespace }}.svc'
|
||||
- '*.{{ .Release.Namespace }}.svc.cluster.local'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local'
|
||||
{{- if .Values.certificates.ipAddresses }}
|
||||
ipAddresses:
|
||||
{{- range .Values.certificates.ipAddresses }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
keyAlgorithm: {{ .Values.certificates.keyAlgorithm }}
|
||||
keySize: {{ .Values.certificates.keySize }}
|
||||
duration: {{ .Values.certificates.duration }}
|
||||
renewBefore: {{ .Values.certificates.renewBefore }}
|
||||
{{- end }}
|
31
k8s/seaweedfs/templates/filer-service-client.yaml
Normal file
31
k8s/seaweedfs/templates/filer-service-client.yaml
Normal file
@ -0,0 +1,31 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-filer-client
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: filer
|
||||
{{- if .Values.filer.metricsPort }}
|
||||
monitoring: "true"
|
||||
{{- end }}
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: "swfs-filer"
|
||||
port: {{ .Values.filer.port }}
|
||||
targetPort: {{ .Values.filer.port }}
|
||||
protocol: TCP
|
||||
- name: "swfs-filer-grpc"
|
||||
port: {{ .Values.filer.grpcPort }}
|
||||
targetPort: {{ .Values.filer.grpcPort }}
|
||||
protocol: TCP
|
||||
{{- if .Values.filer.metricsPort }}
|
||||
- name: "metrics"
|
||||
port: {{ .Values.filer.metricsPort }}
|
||||
targetPort: {{ .Values.filer.metricsPort }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
selector:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: filer
|
25
k8s/seaweedfs/templates/filer-service.yaml
Normal file
25
k8s/seaweedfs/templates/filer-service.yaml
Normal file
@ -0,0 +1,25 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
|
||||
name: {{ template "seaweedfs.name" . }}-filer
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: filer
|
||||
spec:
|
||||
clusterIP: None
|
||||
publishNotReadyAddresses: true
|
||||
ports:
|
||||
- name: "swfs-filer"
|
||||
port: {{ .Values.filer.port }}
|
||||
targetPort: {{ .Values.filer.port }}
|
||||
protocol: TCP
|
||||
- name: "swfs-filer-grpc"
|
||||
port: {{ .Values.filer.grpcPort }}
|
||||
targetPort: {{ .Values.filer.grpcPort }}
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: filer
|
18
k8s/seaweedfs/templates/filer-servicemonitor.yaml
Normal file
18
k8s/seaweedfs/templates/filer-servicemonitor.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
{{- if .Values.filer.metricsPort }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-filer
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: filer
|
||||
spec:
|
||||
endpoints:
|
||||
- interval: 30s
|
||||
port: swfs-filer-metrics
|
||||
scrapeTimeout: 5s
|
||||
selector:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: filer
|
||||
{{- end }}
|
267
k8s/seaweedfs/templates/filer-statefulset.yaml
Normal file
267
k8s/seaweedfs/templates/filer-statefulset.yaml
Normal file
@ -0,0 +1,267 @@
|
||||
{{- if .Values.filer.enabled }}
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-filer
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
serviceName: {{ template "seaweedfs.name" . }}-filer
|
||||
podManagementPolicy: Parallel
|
||||
replicas: {{ .Values.filer.replicas }}
|
||||
{{- if (gt (int .Values.filer.updatePartition) 0) }}
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
partition: {{ .Values.filer.updatePartition }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: filer
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: filer
|
||||
spec:
|
||||
restartPolicy: {{ default .Values.global.restartPolicy .Values.filer.restartPolicy }}
|
||||
{{- if .Values.filer.affinity }}
|
||||
affinity:
|
||||
{{ tpl .Values.filer.affinity . | nindent 8 | trim }}
|
||||
{{- end }}
|
||||
{{- if .Values.filer.tolerations }}
|
||||
tolerations:
|
||||
{{ tpl .Values.filer.tolerations . | nindent 8 | trim }}
|
||||
{{- end }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.global.imagePullSecrets }}
|
||||
{{- end }}
|
||||
serviceAccountName: seaweefds-rw-sa #hack for delete pod master after migration
|
||||
terminationGracePeriodSeconds: 60
|
||||
{{- if .Values.filer.priorityClassName }}
|
||||
priorityClassName: {{ .Values.filer.priorityClassName | quote }}
|
||||
{{- end }}
|
||||
enableServiceLinks: false
|
||||
containers:
|
||||
- name: seaweedfs
|
||||
image: {{ template "filer.image" . }}
|
||||
imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: WEED_MYSQL_USERNAME
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: secret-seaweedfs-db
|
||||
key: user
|
||||
- name: WEED_MYSQL_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: secret-seaweedfs-db
|
||||
key: password
|
||||
- name: SEAWEEDFS_FULLNAME
|
||||
value: "{{ template "seaweedfs.name" . }}"
|
||||
{{- if .Values.filer.extraEnvironmentVars }}
|
||||
{{- range $key, $value := .Values.filer.extraEnvironmentVars }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.global.extraEnvironmentVars }}
|
||||
{{- range $key, $value := .Values.global.extraEnvironmentVars }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-ec"
|
||||
- |
|
||||
exec /usr/bin/weed -logdir=/logs \
|
||||
{{- if .Values.filer.loggingOverrideLevel }}
|
||||
-v={{ .Values.filer.loggingOverrideLevel }} \
|
||||
{{- else }}
|
||||
-v={{ .Values.global.loggingLevel }} \
|
||||
{{- end }}
|
||||
filer \
|
||||
-port={{ .Values.filer.port }} \
|
||||
{{- if .Values.filer.metricsPort }}
|
||||
-metricsPort {{ .Values.filer.metricsPort }} \
|
||||
{{- end }}
|
||||
{{- if .Values.filer.redirectOnRead }}
|
||||
-redirectOnRead \
|
||||
{{- end }}
|
||||
{{- if .Values.filer.disableHttp }}
|
||||
-disableHttp \
|
||||
{{- end }}
|
||||
{{- if .Values.filer.disableDirListing }}
|
||||
-disableDirListing \
|
||||
{{- end }}
|
||||
-dirListLimit={{ .Values.filer.dirListLimit }} \
|
||||
{{- if .Values.global.enableReplication }}
|
||||
-defaultReplicaPlacement={{ .Values.global.replicationPlacment }} \
|
||||
{{- else }}
|
||||
-defaultReplicaPlacement={{ .Values.filer.defaultReplicaPlacement }} \
|
||||
{{- end }}
|
||||
{{- if .Values.filer.disableDirListing }}
|
||||
-disableDirListing \
|
||||
{{- end }}
|
||||
{{- if .Values.filer.maxMB }}
|
||||
-maxMB={{ .Values.filer.maxMB }} \
|
||||
{{- end }}
|
||||
{{- if .Values.filer.encryptVolumeData }}
|
||||
-encryptVolumeData \
|
||||
{{- end }}
|
||||
-ip=${POD_IP} \
|
||||
{{- if .Values.filer.enable_peers }}
|
||||
{{- if gt (.Values.filer.replicas | int) 1 }}
|
||||
-peers=$(echo -n "{{ range $index := until (.Values.filer.replicas | int) }}${SEAWEEDFS_FULLNAME}-filer-{{ $index }}.${SEAWEEDFS_FULLNAME}-filer:{{ $.Values.filer.port }}{{ if lt $index (sub ($.Values.filer.replicas | int) 1) }},{{ end }}{{ end }}" | sed "s/$HOSTNAME.${SEAWEEDFS_FULLNAME}-filer:{{ $.Values.filer.port }}//" | sed 's/,$//; 's/^,//'; s/,,/,/;' ) \
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.filer.s3.enabled }}
|
||||
-s3 \
|
||||
-s3.port={{ .Values.filer.s3.port }} \
|
||||
{{- if .Values.filer.s3.domainName }}
|
||||
-s3.domainName={{ .Values.filer.s3.domainName }} \
|
||||
{{- end }}
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
-s3.cert.file=/usr/local/share/ca-certificates/client/tls.crt \
|
||||
-s3.key.file=/usr/local/share/ca-certificates/client/tls.key \
|
||||
{{- end }}
|
||||
{{- if .Values.filer.s3.allowEmptyFolder }}
|
||||
-s3.allowEmptyFolder={{ .Values.filer.s3.allowEmptyFolder }} \
|
||||
{{- end }}
|
||||
{{- if .Values.filer.s3.enableAuth }}
|
||||
-s3.config=/etc/sw/seaweedfs_s3_config \
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
-master={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
|
||||
{{- if or (.Values.global.enableSecurity) (.Values.filer.extraVolumeMounts) }}
|
||||
volumeMounts:
|
||||
- name: seaweedfs-filer-log-volume
|
||||
mountPath: "/logs/"
|
||||
- mountPath: /etc/sw
|
||||
name: config-users
|
||||
readOnly: true
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
- name: security-config
|
||||
readOnly: true
|
||||
mountPath: /etc/seaweedfs/security.toml
|
||||
subPath: security.toml
|
||||
- name: ca-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/ca/
|
||||
- name: master-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/master/
|
||||
- name: volume-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/volume/
|
||||
- name: filer-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/filer/
|
||||
- name: client-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/client/
|
||||
{{- end }}
|
||||
{{ tpl .Values.filer.extraVolumeMounts . | nindent 12 | trim }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.filer.port }}
|
||||
name: swfs-filer
|
||||
- containerPort: {{ .Values.filer.grpcPort }}
|
||||
#name: swfs-filer-grpc
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: {{ .Values.filer.port }}
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 15
|
||||
successThreshold: 1
|
||||
failureThreshold: 100
|
||||
timeoutSeconds: 10
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: {{ .Values.filer.port }}
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
{{- if .Values.filer.resources }}
|
||||
resources:
|
||||
{{ tpl .Values.filer.resources . | nindent 12 | trim }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: seaweedfs-filer-log-volume
|
||||
hostPath:
|
||||
path: /storage/logs/seaweedfs/filer
|
||||
type: DirectoryOrCreate
|
||||
- name: db-schema-config-volume
|
||||
configMap:
|
||||
name: seaweedfs-db-init-config
|
||||
- name: config-users
|
||||
secret:
|
||||
defaultMode: 420
|
||||
secretName: seaweedfs-s3-secret
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
- name: security-config
|
||||
configMap:
|
||||
name: {{ template "seaweedfs.name" . }}-security-config
|
||||
- name: ca-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-ca-cert
|
||||
- name: master-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-master-cert
|
||||
- name: volume-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-volume-cert
|
||||
- name: filer-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-filer-cert
|
||||
- name: client-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-client-cert
|
||||
{{- end }}
|
||||
{{ tpl .Values.filer.extraVolumes . | indent 8 | trim }}
|
||||
{{- if .Values.filer.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ tpl .Values.filer.nodeSelector . | indent 8 | trim }}
|
||||
{{- end }}
|
||||
{{/* volumeClaimTemplates:*/}}
|
||||
{{/* - metadata:*/}}
|
||||
{{/* name: data-{{ .Release.Namespace }}*/}}
|
||||
{{/* spec:*/}}
|
||||
{{/* accessModes:*/}}
|
||||
{{/* - ReadWriteOnce*/}}
|
||||
{{/* resources:*/}}
|
||||
{{/* requests:*/}}
|
||||
{{/* storage: {{ .Values.filer.storage }}*/}}
|
||||
{{/* {{- if .Values.filer.storageClass }}*/}}
|
||||
{{/* storageClassName: {{ .Values.filer.storageClass }}*/}}
|
||||
{{/* {{- end }}*/}}
|
||||
{{- end }}
|
59
k8s/seaweedfs/templates/ingress.yaml
Normal file
59
k8s/seaweedfs/templates/ingress.yaml
Normal file
@ -0,0 +1,59 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: ingress-{{ template "seaweedfs.name" . }}-filer
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
nginx.ingress.kubernetes.io/auth-type: "basic"
|
||||
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
|
||||
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Filer'
|
||||
nginx.ingress.kubernetes.io/service-upstream: "true"
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /$1
|
||||
nginx.ingress.kubernetes.io/use-regex: "true"
|
||||
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "false"
|
||||
nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
|
||||
nginx.ingress.kubernetes.io/configuration-snippet: |
|
||||
sub_filter '<head>' '<head> <base href="/sw-filer/">'; #add base url
|
||||
sub_filter '="/' '="./'; #make absolute paths to relative
|
||||
sub_filter '=/' '=./';
|
||||
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
|
||||
sub_filter_once off;
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /sw-filer/?(.*)
|
||||
backend:
|
||||
serviceName: {{ template "seaweedfs.name" . }}-filer
|
||||
servicePort: {{ .Values.filer.port }}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: ingress-{{ template "seaweedfs.name" . }}-master
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
nginx.ingress.kubernetes.io/auth-type: "basic"
|
||||
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
|
||||
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master'
|
||||
nginx.ingress.kubernetes.io/service-upstream: "true"
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /$1
|
||||
nginx.ingress.kubernetes.io/use-regex: "true"
|
||||
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "false"
|
||||
nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
|
||||
nginx.ingress.kubernetes.io/configuration-snippet: |
|
||||
sub_filter '<head>' '<head> <base href="/sw-master/">'; #add base url
|
||||
sub_filter '="/' '="./'; #make absolute paths to relative
|
||||
sub_filter '=/' '=./';
|
||||
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
|
||||
sub_filter_once off;
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /sw-master/?(.*)
|
||||
backend:
|
||||
serviceName: {{ template "seaweedfs.name" . }}-master
|
||||
servicePort: {{ .Values.master.port }}
|
33
k8s/seaweedfs/templates/master-cert.yaml
Normal file
33
k8s/seaweedfs/templates/master-cert.yaml
Normal file
@ -0,0 +1,33 @@
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
apiVersion: certmanager.k8s.io/v1alpha1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-master-cert
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
secretName: {{ template "seaweedfs.name" . }}-master-cert
|
||||
issuerRef:
|
||||
name: {{ template "seaweedfs.name" . }}-clusterissuer
|
||||
kind: ClusterIssuer
|
||||
commonName: {{ .Values.certificates.commonName }}
|
||||
organization:
|
||||
- "SeaweedFS CA"
|
||||
dnsNames:
|
||||
- '*.{{ .Release.Namespace }}'
|
||||
- '*.{{ .Release.Namespace }}.svc'
|
||||
- '*.{{ .Release.Namespace }}.svc.cluster.local'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local'
|
||||
{{- if .Values.certificates.ipAddresses }}
|
||||
ipAddresses:
|
||||
{{- range .Values.certificates.ipAddresses }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
keyAlgorithm: {{ .Values.certificates.keyAlgorithm }}
|
||||
keySize: {{ .Values.certificates.keySize }}
|
||||
duration: {{ .Values.certificates.duration }}
|
||||
renewBefore: {{ .Values.certificates.renewBefore }}
|
||||
{{- end }}
|
25
k8s/seaweedfs/templates/master-service.yaml
Normal file
25
k8s/seaweedfs/templates/master-service.yaml
Normal file
@ -0,0 +1,25 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-master
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: master
|
||||
annotations:
|
||||
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
|
||||
spec:
|
||||
clusterIP: None
|
||||
publishNotReadyAddresses: true
|
||||
ports:
|
||||
- name: "swfs-master"
|
||||
port: {{ .Values.master.port }}
|
||||
targetPort: {{ .Values.master.port }}
|
||||
protocol: TCP
|
||||
- name: "swfs-master-grpc"
|
||||
port: {{ .Values.master.grpcPort }}
|
||||
targetPort: {{ .Values.master.grpcPort }}
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: master
|
227
k8s/seaweedfs/templates/master-statefulset.yaml
Normal file
227
k8s/seaweedfs/templates/master-statefulset.yaml
Normal file
@ -0,0 +1,227 @@
|
||||
{{- if .Values.master.enabled }}
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-master
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
serviceName: {{ template "seaweedfs.name" . }}-master
|
||||
podManagementPolicy: Parallel
|
||||
replicas: {{ .Values.master.replicas }}
|
||||
{{- if (gt (int .Values.master.updatePartition) 0) }}
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
partition: {{ .Values.master.updatePartition }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: master
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: master
|
||||
spec:
|
||||
restartPolicy: {{ default .Values.global.restartPolicy .Values.master.restartPolicy }}
|
||||
{{- if .Values.master.affinity }}
|
||||
affinity:
|
||||
{{ tpl .Values.master.affinity . | nindent 8 | trim }}
|
||||
{{- end }}
|
||||
{{- if .Values.master.tolerations }}
|
||||
tolerations:
|
||||
{{ tpl .Values.master.tolerations . | nindent 8 | trim }}
|
||||
{{- end }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.global.imagePullSecrets }}
|
||||
{{- end }}
|
||||
terminationGracePeriodSeconds: 60
|
||||
{{- if .Values.master.priorityClassName }}
|
||||
priorityClassName: {{ .Values.master.priorityClassName | quote }}
|
||||
{{- end }}
|
||||
enableServiceLinks: false
|
||||
containers:
|
||||
- name: seaweedfs
|
||||
image: {{ template "master.image" . }}
|
||||
imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: SEAWEEDFS_FULLNAME
|
||||
value: "{{ template "seaweedfs.name" . }}"
|
||||
{{- if .Values.master.extraEnvironmentVars }}
|
||||
{{- range $key, $value := .Values.master.extraEnvironmentVars }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.global.extraEnvironmentVars }}
|
||||
{{- range $key, $value := .Values.global.extraEnvironmentVars }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-ec"
|
||||
- |
|
||||
exec /usr/bin/weed -logdir=/logs \
|
||||
{{- if .Values.master.loggingOverrideLevel }}
|
||||
-v={{ .Values.master.loggingOverrideLevel }} \
|
||||
{{- else }}
|
||||
-v={{ .Values.global.loggingLevel }} \
|
||||
{{- end }}
|
||||
master \
|
||||
-port={{ .Values.master.port }} \
|
||||
-mdir=/data \
|
||||
-ip.bind={{ .Values.master.ipBind }} \
|
||||
{{- if .Values.global.enableReplication }}
|
||||
-defaultReplication={{ .Values.global.replicationPlacment }} \
|
||||
{{- else }}
|
||||
-defaultReplication={{ .Values.master.defaultReplication }} \
|
||||
{{- end }}
|
||||
{{- if .Values.master.volumePreallocate }}
|
||||
-volumePreallocate \
|
||||
{{- end }}
|
||||
{{- if .Values.global.monitoring.enabled }}
|
||||
-metrics.address="{{ .Values.global.monitoring.gatewayHost }}:{{ .Values.global.monitoring.gatewayPort }}" \
|
||||
{{- end }}
|
||||
-volumeSizeLimitMB={{ .Values.master.volumeSizeLimitMB }} \
|
||||
{{- if .Values.master.disableHttp }}
|
||||
-disableHttp \
|
||||
{{- end }}
|
||||
{{- if .Values.master.pulseSeconds }}
|
||||
-pulseSeconds={{ .Values.master.pulseSeconds }} \
|
||||
{{- end }}
|
||||
{{- if .Values.master.garbageThreshold }}
|
||||
-garbageThreshold={{ .Values.master.garbageThreshold }} \
|
||||
{{- end }}
|
||||
{{- if .Values.master.metricsIntervalSec }}
|
||||
-metrics.intervalSeconds={{ .Values.master.metricsIntervalSec }} \
|
||||
{{- end }}
|
||||
-ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-master \
|
||||
-peers={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
|
||||
volumeMounts:
|
||||
- name : data-{{ .Release.Namespace }}
|
||||
mountPath: /data
|
||||
- name: seaweedfs-master-log-volume
|
||||
mountPath: "/logs/"
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
- name: security-config
|
||||
readOnly: true
|
||||
mountPath: /etc/seaweedfs/security.toml
|
||||
subPath: security.toml
|
||||
- name: ca-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/ca/
|
||||
- name: master-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/master/
|
||||
- name: volume-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/volume/
|
||||
- name: filer-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/filer/
|
||||
- name: client-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/client/
|
||||
{{- end }}
|
||||
{{ tpl .Values.master.extraVolumeMounts . | nindent 12 | trim }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.master.port }}
|
||||
name: swfs-master
|
||||
- containerPort: {{ .Values.master.grpcPort }}
|
||||
#name: swfs-master-grpc
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /cluster/status
|
||||
port: {{ .Values.master.port }}
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 45
|
||||
successThreshold: 2
|
||||
failureThreshold: 100
|
||||
timeoutSeconds: 10
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /cluster/status
|
||||
port: {{ .Values.master.port }}
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
failureThreshold: 4
|
||||
timeoutSeconds: 10
|
||||
{{- if .Values.master.resources }}
|
||||
resources:
|
||||
{{ tpl .Values.master.resources . | nindent 12 | trim }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: seaweedfs-master-log-volume
|
||||
hostPath:
|
||||
path: /storage/logs/seaweedfs/master
|
||||
type: DirectoryOrCreate
|
||||
- name: data-{{ .Release.Namespace }}
|
||||
hostPath:
|
||||
path: /ssd/seaweed-master/
|
||||
type: DirectoryOrCreate
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
- name: security-config
|
||||
configMap:
|
||||
name: {{ template "seaweedfs.name" . }}-security-config
|
||||
- name: ca-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-ca-cert
|
||||
- name: master-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-master-cert
|
||||
- name: volume-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-volume-cert
|
||||
- name: filer-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-filer-cert
|
||||
- name: client-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-client-cert
|
||||
{{- end }}
|
||||
{{ tpl .Values.master.extraVolumes . | indent 8 | trim }}
|
||||
{{- if .Values.master.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ tpl .Values.master.nodeSelector . | indent 8 | trim }}
|
||||
{{- end }}
|
||||
{{/* volumeClaimTemplates:*/}}
|
||||
{{/* - metadata:*/}}
|
||||
{{/* name: data-{{ .Release.Namespace }}*/}}
|
||||
{{/* spec:*/}}
|
||||
{{/* accessModes:*/}}
|
||||
{{/* - ReadWriteOnce*/}}
|
||||
{{/* resources:*/}}
|
||||
{{/* requests:*/}}
|
||||
{{/* storage: {{ .Values.master.storage }}*/}}
|
||||
{{/* {{- if .Values.master.storageClass }}*/}}
|
||||
{{/* storageClassName: {{ .Values.master.storageClass }}*/}}
|
||||
{{/* {{- end }}*/}}
|
||||
{{- end }}
|
188
k8s/seaweedfs/templates/s3-deployment.yaml
Normal file
188
k8s/seaweedfs/templates/s3-deployment.yaml
Normal file
@ -0,0 +1,188 @@
|
||||
{{- if .Values.s3.enabled }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-s3
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
serviceName: {{ template "seaweedfs.name" . }}-s3
|
||||
replicas: {{ .Values.s3.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: s3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: s3
|
||||
spec:
|
||||
restartPolicy: {{ default .Values.global.restartPolicy .Values.s3.restartPolicy }}
|
||||
{{- if .Values.s3.tolerations }}
|
||||
tolerations:
|
||||
{{ tpl .Values.s3.tolerations . | nindent 8 | trim }}
|
||||
{{- end }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.global.imagePullSecrets }}
|
||||
{{- end }}
|
||||
terminationGracePeriodSeconds: 10
|
||||
{{- if .Values.s3.priorityClassName }}
|
||||
priorityClassName: {{ .Values.s3.priorityClassName | quote }}
|
||||
{{- end }}
|
||||
enableServiceLinks: false
|
||||
containers:
|
||||
- name: seaweedfs
|
||||
image: {{ template "s3.image" . }}
|
||||
imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: SEAWEEDFS_FULLNAME
|
||||
value: "{{ template "seaweedfs.name" . }}"
|
||||
{{- if .Values.global.extraEnvironmentVars }}
|
||||
{{- range $key, $value := .Values.global.extraEnvironmentVars }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-ec"
|
||||
- |
|
||||
exec /usr/bin/weed -logdir=/logs \
|
||||
{{- if .Values.s3.loggingOverrideLevel }}
|
||||
-v={{ .Values.s3.loggingOverrideLevel }} \
|
||||
{{- else }}
|
||||
-v={{ .Values.global.loggingLevel }} \
|
||||
{{- end }}
|
||||
s3 \
|
||||
-port={{ .Values.s3.port }} \
|
||||
{{- if .Values.s3.metricsPort }}
|
||||
-metricsPort {{ .Values.s3.metricsPort }} \
|
||||
{{- end }}
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
-cert.file=/usr/local/share/ca-certificates/client/tls.crt \
|
||||
-key.file=/usr/local/share/ca-certificates/client/tls.key \
|
||||
{{- end }}
|
||||
{{- if .Values.s3.domainName }}
|
||||
-domainName={{ .Values.s3.domainName }} \
|
||||
{{- end }}
|
||||
{{- if .Values.s3.allowEmptyFolder }}
|
||||
-allowEmptyFolder={{ .Values.s3.allowEmptyFolder }} \
|
||||
{{- end }}
|
||||
{{- if .Values.s3.enableAuth }}
|
||||
-config=/etc/sw/seaweedfs_s3_config \
|
||||
{{- end }}
|
||||
-filer={{ template "seaweedfs.name" . }}-filer-client:{{ .Values.filer.port }}
|
||||
volumeMounts:
|
||||
- name: logs
|
||||
mountPath: "/logs/"
|
||||
- mountPath: /etc/sw
|
||||
name: config-users
|
||||
readOnly: true
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
- name: security-config
|
||||
readOnly: true
|
||||
mountPath: /etc/seaweedfs/security.toml
|
||||
subPath: security.toml
|
||||
- name: ca-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/ca/
|
||||
- name: master-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/master/
|
||||
- name: volume-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/volume/
|
||||
- name: filer-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/filer/
|
||||
- name: client-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/client/
|
||||
{{- end }}
|
||||
{{ tpl .Values.s3.extraVolumeMounts . | nindent 12 | trim }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.s3.port }}
|
||||
name: swfs-s3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: {{ .Values.s3.port }}
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 15
|
||||
successThreshold: 1
|
||||
failureThreshold: 100
|
||||
timeoutSeconds: 10
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: {{ .Values.s3.port }}
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 60
|
||||
successThreshold: 1
|
||||
failureThreshold: 20
|
||||
timeoutSeconds: 10
|
||||
{{- if .Values.s3.resources }}
|
||||
resources:
|
||||
{{ tpl .Values.s3.resources . | nindent 12 | trim }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: config-users
|
||||
secret:
|
||||
defaultMode: 420
|
||||
secretName: seaweedfs-s3-secret
|
||||
{{- if eq .Values.s3.logs.type "hostPath" }}
|
||||
- name: logs
|
||||
hostPath:
|
||||
path: /storage/logs/seaweedfs/s3
|
||||
type: DirectoryOrCreate
|
||||
{{- end }}
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
- name: security-config
|
||||
configMap:
|
||||
name: {{ template "seaweedfs.name" . }}-security-config
|
||||
- name: ca-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-ca-cert
|
||||
- name: master-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-master-cert
|
||||
- name: volume-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-volume-cert
|
||||
- name: filer-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-filer-cert
|
||||
- name: client-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-client-cert
|
||||
{{- end }}
|
||||
{{ tpl .Values.s3.extraVolumes . | indent 8 | trim }}
|
||||
{{- if .Values.s3.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ tpl .Values.s3.nodeSelector . | indent 8 | trim }}
|
||||
{{- end }}
|
||||
{{- end }}
|
23
k8s/seaweedfs/templates/s3-service.yaml
Normal file
23
k8s/seaweedfs/templates/s3-service.yaml
Normal file
@ -0,0 +1,23 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-s3
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: s3
|
||||
spec:
|
||||
ports:
|
||||
- name: "swfs-s3"
|
||||
port: {{ if .Values.s3.enabled }}{{ .Values.s3.port }}{{ else }}{{ .Values.filer.s3.port }}{{ end }}
|
||||
targetPort: {{ if .Values.s3.enabled }}{{ .Values.s3.port }}{{ else }}{{ .Values.filer.s3.port }}{{ end }}
|
||||
protocol: TCP
|
||||
{{- if and .Values.s3.enabled .Values.s3.metricsPort }}
|
||||
- name: "metrics"
|
||||
port: {{ .Values.s3.metricsPort }}
|
||||
targetPort: {{ .Values.s3.metricsPort }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
selector:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: {{ if .Values.s3.enabled }}s3{{ else }}filer{{ end }}
|
18
k8s/seaweedfs/templates/s3-servicemonitor.yaml
Normal file
18
k8s/seaweedfs/templates/s3-servicemonitor.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
{{- if .Values.s3.metricsPort }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-s3
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: s3
|
||||
spec:
|
||||
endpoints:
|
||||
- interval: 30s
|
||||
port: swfs-s3-metrics
|
||||
scrapeTimeout: 5s
|
||||
selector:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: s3
|
||||
{{- end }}
|
20
k8s/seaweedfs/templates/seaweedfs-grafana-dashboard.yaml
Normal file
20
k8s/seaweedfs/templates/seaweedfs-grafana-dashboard.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
{{- if .Values.global.monitoring.enabled }}
|
||||
{{- $files := .Files.Glob "dashboards/*.json" }}
|
||||
{{- if $files }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMapList
|
||||
items:
|
||||
{{- range $path, $fileContents := $files }}
|
||||
{{- $dashboardName := regexReplaceAll "(^.*/)(.*)\\.json$" $path "${2}" }}
|
||||
- apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ printf "%s" $dashboardName | lower | replace "_" "-" }}
|
||||
namespace: {{ $.Release.Namespace }}
|
||||
labels:
|
||||
grafana_dashboard: "1"
|
||||
data:
|
||||
{{ $dashboardName }}.json: {{ $.Files.Get $path | toJson }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
21
k8s/seaweedfs/templates/seaweedfs-s3-secret.yaml
Normal file
21
k8s/seaweedfs/templates/seaweedfs-s3-secret.yaml
Normal file
@ -0,0 +1,21 @@
|
||||
{{- if not (or .Values.filer.s3.skipAuthSecretCreation .Values.s3.skipAuthSecretCreation) }}
|
||||
{{- $access_key_admin := randAlphaNum 16 -}}
|
||||
{{- $secret_key_admin := randAlphaNum 32 -}}
|
||||
{{- $access_key_read := randAlphaNum 16 -}}
|
||||
{{- $secret_key_read := randAlphaNum 32 -}}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
type: Opaque
|
||||
metadata:
|
||||
name: seaweedfs-s3-secret
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
"helm.sh/hook": "pre-install"
|
||||
stringData:
|
||||
admin_access_key_id: {{ $access_key_admin }}
|
||||
admin_secret_access_key: {{ $secret_key_admin }}
|
||||
read_access_key_id: {{ $access_key_read }}
|
||||
read_secret_access_key: {{ $secret_key_read }}
|
||||
seaweedfs_s3_config: '{"identities":[{"name":"anvAdmin","credentials":[{"accessKey":"{{ $access_key_admin }}","secretKey":"{{ $secret_key_admin }}"}],"actions":["Admin","Read","Write"]},{"name":"anvReadOnly","credentials":[{"accessKey":"{{ $access_key_read }}","secretKey":"{{ $secret_key_read }}"}],"actions":["Read"]}]}'
|
||||
{{- end }}
|
14
k8s/seaweedfs/templates/secret-seaweedfs-db.yaml
Normal file
14
k8s/seaweedfs/templates/secret-seaweedfs-db.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
type: Opaque
|
||||
metadata:
|
||||
name: secret-seaweedfs-db
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
"helm.sh/hook": "pre-install"
|
||||
stringData:
|
||||
user: "YourSWUser"
|
||||
password: "HardCodedPassword"
|
||||
# better to random generate and create in DB
|
||||
# password: {{ randAlphaNum 10 | sha256sum | b64enc | trunc 32 }}
|
52
k8s/seaweedfs/templates/security-configmap.yaml
Normal file
52
k8s/seaweedfs/templates/security-configmap.yaml
Normal file
@ -0,0 +1,52 @@
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-security-config
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
data:
|
||||
security.toml: |-
|
||||
# this file is read by master, volume server, and filer
|
||||
|
||||
# the jwt signing key is read by master and volume server
|
||||
# a jwt expires in 10 seconds
|
||||
[jwt.signing]
|
||||
key = "{{ randAlphaNum 10 | b64enc }}"
|
||||
|
||||
# all grpc tls authentications are mutual
|
||||
# the values for the following ca, cert, and key are paths to the PERM files.
|
||||
[grpc]
|
||||
ca = "/usr/local/share/ca-certificates/ca/tls.crt"
|
||||
|
||||
[grpc.volume]
|
||||
cert = "/usr/local/share/ca-certificates/volume/tls.crt"
|
||||
key = "/usr/local/share/ca-certificates/volume/tls.key"
|
||||
|
||||
[grpc.master]
|
||||
cert = "/usr/local/share/ca-certificates/master/tls.crt"
|
||||
key = "/usr/local/share/ca-certificates/master/tls.key"
|
||||
|
||||
[grpc.filer]
|
||||
cert = "/usr/local/share/ca-certificates/filer/tls.crt"
|
||||
key = "/usr/local/share/ca-certificates/filer/tls.key"
|
||||
|
||||
# use this for any place needs a grpc client
|
||||
# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
|
||||
[grpc.client]
|
||||
cert = "/usr/local/share/ca-certificates/client/tls.crt"
|
||||
key = "/usr/local/share/ca-certificates/client/tls.key"
|
||||
|
||||
# volume server https options
|
||||
# Note: work in progress!
|
||||
# this does not work with other clients, e.g., "weed filer|mount" etc, yet.
|
||||
[https.client]
|
||||
enabled = false
|
||||
[https.volume]
|
||||
cert = ""
|
||||
key = ""
|
||||
{{- end }}
|
29
k8s/seaweedfs/templates/service-account.yaml
Normal file
29
k8s/seaweedfs/templates/service-account.yaml
Normal file
@ -0,0 +1,29 @@
|
||||
#hack for delete pod master after migration
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: seaweefds-rw-cr
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: seaweefds-rw-sa
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: system:serviceaccount:seaweefds-rw-sa:default
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: seaweefds-rw-sa
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: seaweefds-rw-cr
|
33
k8s/seaweedfs/templates/volume-cert.yaml
Normal file
33
k8s/seaweedfs/templates/volume-cert.yaml
Normal file
@ -0,0 +1,33 @@
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
apiVersion: certmanager.k8s.io/v1alpha1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-volume-cert
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
secretName: {{ template "seaweedfs.name" . }}-volume-cert
|
||||
issuerRef:
|
||||
name: {{ template "seaweedfs.name" . }}-clusterissuer
|
||||
kind: ClusterIssuer
|
||||
commonName: {{ .Values.certificates.commonName }}
|
||||
organization:
|
||||
- "SeaweedFS CA"
|
||||
dnsNames:
|
||||
- '*.{{ .Release.Namespace }}'
|
||||
- '*.{{ .Release.Namespace }}.svc'
|
||||
- '*.{{ .Release.Namespace }}.svc.cluster.local'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local'
|
||||
{{- if .Values.certificates.ipAddresses }}
|
||||
ipAddresses:
|
||||
{{- range .Values.certificates.ipAddresses }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
keyAlgorithm: {{ .Values.certificates.keyAlgorithm }}
|
||||
keySize: {{ .Values.certificates.keySize }}
|
||||
duration: {{ .Values.certificates.duration }}
|
||||
renewBefore: {{ .Values.certificates.renewBefore }}
|
||||
{{- end }}
|
28
k8s/seaweedfs/templates/volume-service.yaml
Normal file
28
k8s/seaweedfs/templates/volume-service.yaml
Normal file
@ -0,0 +1,28 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-volume
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: volume
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: "swfs-volume"
|
||||
port: {{ .Values.volume.port }}
|
||||
targetPort: {{ .Values.volume.port }}
|
||||
protocol: TCP
|
||||
- name: "swfs-volume-18080"
|
||||
port: {{ .Values.volume.grpcPort }}
|
||||
targetPort: {{ .Values.volume.grpcPort }}
|
||||
protocol: TCP
|
||||
{{- if .Values.volume.metricsPort }}
|
||||
- name: "swfs-volume-metrics"
|
||||
port: {{ .Values.volume.metricsPort }}
|
||||
targetPort: {{ .Values.volume.metricsPort }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
selector:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: volume
|
18
k8s/seaweedfs/templates/volume-servicemonitor.yaml
Normal file
18
k8s/seaweedfs/templates/volume-servicemonitor.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
{{- if .Values.volume.metricsPort }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-volume
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: volume
|
||||
spec:
|
||||
endpoints:
|
||||
- interval: 30s
|
||||
port: swfs-volume-metrics
|
||||
scrapeTimeout: 5s
|
||||
selector:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: volume
|
||||
{{- end }}
|
276
k8s/seaweedfs/templates/volume-statefulset.yaml
Normal file
276
k8s/seaweedfs/templates/volume-statefulset.yaml
Normal file
@ -0,0 +1,276 @@
|
||||
{{- if .Values.volume.enabled }}
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-volume
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
serviceName: {{ template "seaweedfs.name" . }}-volume
|
||||
replicas: {{ .Values.volume.replicas }}
|
||||
podManagementPolicy: Parallel
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: volume
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: volume
|
||||
spec:
|
||||
{{- if .Values.volume.affinity }}
|
||||
affinity:
|
||||
{{ tpl .Values.volume.affinity . | nindent 8 | trim }}
|
||||
{{- end }}
|
||||
restartPolicy: {{ default .Values.global.restartPolicy .Values.volume.restartPolicy }}
|
||||
{{- if .Values.volume.tolerations }}
|
||||
tolerations:
|
||||
{{ tpl .Values.volume.tolerations . | nindent 8 | trim }}
|
||||
{{- end }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.global.imagePullSecrets }}
|
||||
{{- end }}
|
||||
terminationGracePeriodSeconds: 10
|
||||
{{- if .Values.volume.priorityClassName }}
|
||||
priorityClassName: {{ .Values.volume.priorityClassName | quote }}
|
||||
{{- end }}
|
||||
enableServiceLinks: false
|
||||
{{- if .Values.volume.dir_idx }}
|
||||
initContainers:
|
||||
- name: seaweedfs-vol-move-idx
|
||||
image: {{ template "volume.image" . }}
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy | default "IfNotPresent" }}
|
||||
command: [ '/bin/sh', '-c' ]
|
||||
args: ['if ls {{ .Values.volume.dir }}/*.idx >/dev/null 2>&1; then mv {{ .Values.volume.dir }}/*.idx {{ .Values.volume.dir_idx }}/; fi;']
|
||||
volumeMounts:
|
||||
- name: idx
|
||||
mountPath: {{ .Values.volume.dir_idx }}
|
||||
- name: data
|
||||
mountPath: {{ .Values.volume.dir }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: seaweedfs
|
||||
image: {{ template "volume.image" . }}
|
||||
imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: HOST_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.hostIP
|
||||
- name: SEAWEEDFS_FULLNAME
|
||||
value: "{{ template "seaweedfs.name" . }}"
|
||||
{{- if .Values.global.extraEnvironmentVars }}
|
||||
{{- range $key, $value := .Values.global.extraEnvironmentVars }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-ec"
|
||||
- |
|
||||
exec /usr/bin/weed -logdir=/logs \
|
||||
{{- if .Values.volume.loggingOverrideLevel }}
|
||||
-v={{ .Values.volume.loggingOverrideLevel }} \
|
||||
{{- else }}
|
||||
-v={{ .Values.global.loggingLevel }} \
|
||||
{{- end }}
|
||||
volume \
|
||||
-port={{ .Values.volume.port }} \
|
||||
{{- if .Values.volume.metricsPort }}
|
||||
-metricsPort {{ .Values.volume.metricsPort }} \
|
||||
{{- end }}
|
||||
-dir={{ .Values.volume.dir }} \
|
||||
{{- if .Values.volume.dir_idx }}
|
||||
-dir.idx={{ .Values.volume.dir_idx }} \
|
||||
{{- end }}
|
||||
-max={{ .Values.volume.maxVolumes }} \
|
||||
{{- if .Values.volume.rack }}
|
||||
-rack={{ .Values.volume.rack }} \
|
||||
{{- end }}
|
||||
{{- if .Values.volume.dataCenter }}
|
||||
-dataCenter={{ .Values.volume.dataCenter }} \
|
||||
{{- end }}
|
||||
-ip.bind={{ .Values.volume.ipBind }} \
|
||||
-read.redirect={{ .Values.volume.readRedirect }} \
|
||||
{{- if .Values.volume.whiteList }}
|
||||
-whiteList={{ .Values.volume.whiteList }} \
|
||||
{{- end }}
|
||||
{{- if .Values.volume.imagesFixOrientation }}
|
||||
-images.fix.orientation \
|
||||
{{- end }}
|
||||
{{- if .Values.volume.pulseSeconds }}
|
||||
-pulseSeconds={{ .Values.volume.pulseSeconds }} \
|
||||
{{- end }}
|
||||
{{- if .Values.volume.index }}
|
||||
-index={{ .Values.volume.index }} \
|
||||
{{- end }}
|
||||
{{- if .Values.volume.fileSizeLimitMB }}
|
||||
-fileSizeLimitMB={{ .Values.volume.fileSizeLimitMB }} \
|
||||
{{- end }}
|
||||
-minFreeSpacePercent={{ .Values.volume.minFreeSpacePercent }} \
|
||||
-ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-volume \
|
||||
-compactionMBps={{ .Values.volume.compactionMBps }} \
|
||||
-mserver={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: "{{ .Values.volume.dir }}/"
|
||||
{{- if .Values.volume.dir_idx }}
|
||||
- name: idx
|
||||
mountPath: "{{ .Values.volume.dir_idx }}/"
|
||||
{{- end }}
|
||||
- name: logs
|
||||
mountPath: "/logs/"
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
- name: security-config
|
||||
readOnly: true
|
||||
mountPath: /etc/seaweedfs/security.toml
|
||||
subPath: security.toml
|
||||
- name: ca-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/ca/
|
||||
- name: master-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/master/
|
||||
- name: volume-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/volume/
|
||||
- name: filer-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/filer/
|
||||
- name: client-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/client/
|
||||
{{- end }}
|
||||
{{ tpl .Values.volume.extraVolumeMounts . | nindent 12 | trim }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.volume.port }}
|
||||
name: swfs-vol
|
||||
- containerPort: {{ .Values.volume.grpcPort }}
|
||||
#name: swfs-vol-grpc
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /status
|
||||
port: {{ .Values.volume.port }}
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 90
|
||||
successThreshold: 1
|
||||
failureThreshold: 100
|
||||
timeoutSeconds: 30
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /status
|
||||
port: {{ .Values.volume.port }}
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 90
|
||||
successThreshold: 1
|
||||
failureThreshold: 4
|
||||
timeoutSeconds: 30
|
||||
{{- if .Values.volume.resources }}
|
||||
resources:
|
||||
{{ tpl .Values.volume.resources . | nindent 12 | trim }}
|
||||
{{- end }}
|
||||
{{- $hostpath_exists := include "volume.hostpath_exists" . -}}
|
||||
{{- if $hostpath_exists }}
|
||||
volumes:
|
||||
{{- if eq .Values.volume.data.type "hostPath" }}
|
||||
- name: data
|
||||
hostPath:
|
||||
path: /storage/object_store/
|
||||
type: DirectoryOrCreate
|
||||
{{- end }}
|
||||
{{- if and (eq .Values.volume.idx.type "hostPath") .Values.volume.dir_idx }}
|
||||
- name: idx
|
||||
hostPath:
|
||||
path: /ssd/seaweedfs-volume-idx/
|
||||
type: DirectoryOrCreate
|
||||
{{- end }}
|
||||
{{- if eq .Values.volume.logs.type "hostPath" }}
|
||||
- name: logs
|
||||
hostPath:
|
||||
path: /storage/logs/seaweedfs/volume
|
||||
type: DirectoryOrCreate
|
||||
{{- end }}
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
- name: security-config
|
||||
configMap:
|
||||
name: {{ template "seaweedfs.name" . }}-security-config
|
||||
- name: ca-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-ca-cert
|
||||
- name: master-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-master-cert
|
||||
- name: volume-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-volume-cert
|
||||
- name: filer-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-filer-cert
|
||||
- name: client-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-client-cert
|
||||
{{- end }}
|
||||
{{- if .Values.volume.extraVolumes }}
|
||||
{{ tpl .Values.volume.extraVolumes . | indent 8 | trim }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.volume.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ tpl .Values.volume.nodeSelector . | indent 8 | trim }}
|
||||
{{- end }}
|
||||
{{- $pvc_exists := include "volume.pvc_exists" . -}}
|
||||
{{- if $pvc_exists }}
|
||||
volumeClaimTemplates:
|
||||
{{- if eq .Values.volume.data.type "persistentVolumeClaim"}}
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: {{ .Values.volume.data.storageClass }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.volume.data.size }}
|
||||
{{- end }}
|
||||
{{- if and (eq .Values.volume.idx.type "persistentVolumeClaim") .Values.volume.dir_idx }}
|
||||
- metadata:
|
||||
name: idx
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: {{ .Values.volume.idx.storageClass }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.volume.idx.size }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.volume.logs.type "persistentVolumeClaim" }}
|
||||
- metadata:
|
||||
name: logs
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: {{ .Values.volume.logs.storageClass }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.volume.logs.size }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
418
k8s/seaweedfs/values.yaml
Normal file
418
k8s/seaweedfs/values.yaml
Normal file
@ -0,0 +1,418 @@
|
||||
# Available parameters and their default values for the SeaweedFS chart.
|
||||
|
||||
global:
|
||||
registry: ""
|
||||
repository: ""
|
||||
imageName: chrislusf/seaweedfs
|
||||
# imageTag: "2.41" - started using {.Chart.appVersion}
|
||||
imagePullPolicy: IfNotPresent
|
||||
imagePullSecrets: imagepullsecret
|
||||
restartPolicy: Always
|
||||
loggingLevel: 1
|
||||
enableSecurity: false
|
||||
monitoring:
|
||||
enabled: false
|
||||
gatewayHost: null
|
||||
gatewayPort: null
|
||||
# if enabled will use global.replicationPlacment and override master & filer defaultReplicaPlacement config
|
||||
enableReplication: false
|
||||
# replication type is XYZ:
|
||||
# X number of replica in other data centers
|
||||
# Y number of replica in other racks in the same data center
|
||||
# Z number of replica in other servers in the same rack
|
||||
replicationPlacment: "001"
|
||||
extraEnvironmentVars:
|
||||
WEED_CLUSTER_DEFAULT: "sw"
|
||||
WEED_CLUSTER_SW_MASTER: "seaweedfs-master:9333"
|
||||
WEED_CLUSTER_SW_FILER: "seaweedfs-filer-client:8888"
|
||||
|
||||
image:
|
||||
registry: ""
|
||||
repository: ""
|
||||
|
||||
master:
|
||||
enabled: true
|
||||
repository: null
|
||||
imageName: null
|
||||
imageTag: null
|
||||
imageOverride: null
|
||||
restartPolicy: null
|
||||
replicas: 1
|
||||
port: 9333
|
||||
grpcPort: 19333
|
||||
ipBind: "0.0.0.0"
|
||||
volumePreallocate: false
|
||||
#Master stops directing writes to oversized volumes
|
||||
volumeSizeLimitMB: 30000
|
||||
loggingOverrideLevel: null
|
||||
#number of seconds between heartbeats, default 5
|
||||
pulseSeconds: null
|
||||
#threshold to vacuum and reclaim spaces, default 0.3 (30%)
|
||||
garbageThreshold: null
|
||||
#Prometheus push interval in seconds, default 15
|
||||
metricsIntervalSec: 15
|
||||
# replication type is XYZ:
|
||||
# X number of replica in other data centers
|
||||
# Y number of replica in other racks in the same data center
|
||||
# Z number of replica in other servers in the same rack
|
||||
defaultReplication: "000"
|
||||
|
||||
# Disable http request, only gRpc operations are allowed
|
||||
disableHttp: false
|
||||
|
||||
extraVolumes: ""
|
||||
extraVolumeMounts: ""
|
||||
|
||||
# storage and storageClass are the settings for configuring stateful
|
||||
# storage for the master pods. storage should be set to the disk size of
|
||||
# the attached volume. storageClass is the class of storage which defaults
|
||||
# to null (the Kube cluster will pick the default).
|
||||
storage: 25Gi
|
||||
storageClass: null
|
||||
|
||||
# Resource requests, limits, etc. for the master cluster placement. This
|
||||
# should map directly to the value of the resources field for a PodSpec,
|
||||
# formatted as a multi-line string. By default no direct resource request
|
||||
# is made.
|
||||
resources: null
|
||||
|
||||
# updatePartition is used to control a careful rolling update of SeaweedFS
|
||||
# masters.
|
||||
updatePartition: 0
|
||||
|
||||
# Affinity Settings
|
||||
# Commenting out or setting as empty the affinity variable, will allow
|
||||
# deployment to single node services such as Minikube
|
||||
affinity: |
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
release: "{{ .Release.Name }}"
|
||||
component: master
|
||||
topologyKey: kubernetes.io/hostname
|
||||
|
||||
# Toleration Settings for master pods
|
||||
# This should be a multi-line string matching the Toleration array
|
||||
# in a PodSpec.
|
||||
tolerations: ""
|
||||
|
||||
# nodeSelector labels for master pod assignment, formatted as a muli-line string.
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
|
||||
# Example:
|
||||
# nodeSelector: |
|
||||
# beta.kubernetes.io/arch: amd64
|
||||
nodeSelector: |
|
||||
sw-backend: "true"
|
||||
|
||||
# used to assign priority to master pods
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
||||
priorityClassName: ""
|
||||
|
||||
extraEnvironmentVars:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 7
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 6
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_3: 3
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
|
||||
volume:
|
||||
enabled: true
|
||||
repository: null
|
||||
imageName: null
|
||||
imageTag: null
|
||||
imageOverride: null
|
||||
restartPolicy: null
|
||||
port: 8080
|
||||
grpcPort: 18080
|
||||
metricsPort: 9327
|
||||
ipBind: "0.0.0.0"
|
||||
replicas: 1
|
||||
loggingOverrideLevel: null
|
||||
# number of seconds between heartbeats, must be smaller than or equal to the master's setting
|
||||
pulseSeconds: null
|
||||
# Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance., default memory
|
||||
index: null
|
||||
# limit file size to avoid out of memory, default 256mb
|
||||
fileSizeLimitMB: null
|
||||
# minimum free disk space(in percents). If free disk space lower this value - all volumes marks as ReadOnly
|
||||
minFreeSpacePercent: 7
|
||||
|
||||
# can use ANY storage-class , example with local-path-provisner
|
||||
# data:
|
||||
# type: "persistentVolumeClaim"
|
||||
# size: "24Ti"
|
||||
# storageClass: "local-path-provisioner"
|
||||
data:
|
||||
type: "hostPath"
|
||||
size: ""
|
||||
storageClass: ""
|
||||
idx:
|
||||
type: "hostPath"
|
||||
size: ""
|
||||
storageClass: ""
|
||||
|
||||
logs:
|
||||
type: "hostPath"
|
||||
size: ""
|
||||
storageClass: ""
|
||||
|
||||
# limit background compaction or copying speed in mega bytes per second
|
||||
compactionMBps: "50"
|
||||
|
||||
# Directories to store data files. dir[,dir]... (default "/tmp")
|
||||
dir: "/data"
|
||||
# Directories to store index files. dir[,dir]... (default "/tmp")
|
||||
dir_idx: null
|
||||
|
||||
# Maximum numbers of volumes, count[,count]...
|
||||
# If set to zero on non-windows OS, the limit will be auto configured. (default "7")
|
||||
maxVolumes: "0"
|
||||
|
||||
# Volume server's rack name
|
||||
rack: null
|
||||
|
||||
# Volume server's data center name
|
||||
dataCenter: null
|
||||
|
||||
# Redirect moved or non-local volumes. (default true)
|
||||
readRedirect: true
|
||||
|
||||
# Comma separated Ip addresses having write permission. No limit if empty.
|
||||
whiteList: null
|
||||
|
||||
# Adjust jpg orientation when uploading.
|
||||
imagesFixOrientation: false
|
||||
|
||||
extraVolumes: ""
|
||||
extraVolumeMounts: ""
|
||||
|
||||
# Affinity Settings
|
||||
# Commenting out or setting as empty the affinity variable, will allow
|
||||
# deployment to single node services such as Minikube
|
||||
affinity: |
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
release: "{{ .Release.Name }}"
|
||||
component: volume
|
||||
topologyKey: kubernetes.io/hostname
|
||||
|
||||
# Resource requests, limits, etc. for the server cluster placement. This
|
||||
# should map directly to the value of the resources field for a PodSpec,
|
||||
# formatted as a multi-line string. By default no direct resource request
|
||||
# is made.
|
||||
resources: null
|
||||
|
||||
# Toleration Settings for server pods
|
||||
# This should be a multi-line string matching the Toleration array
|
||||
# in a PodSpec.
|
||||
tolerations: ""
|
||||
|
||||
# nodeSelector labels for server pod assignment, formatted as a muli-line string.
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
|
||||
# Example:
|
||||
# nodeSelector: |
|
||||
# beta.kubernetes.io/arch: amd64
|
||||
nodeSelector: |
|
||||
sw-volume: "true"
|
||||
|
||||
# used to assign priority to server pods
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
||||
priorityClassName: ""
|
||||
|
||||
|
||||
filer:
|
||||
enabled: true
|
||||
repository: null
|
||||
imageName: null
|
||||
imageTag: null
|
||||
imageOverride: null
|
||||
restartPolicy: null
|
||||
replicas: 1
|
||||
port: 8888
|
||||
grpcPort: 18888
|
||||
metricsPort: 9327
|
||||
loggingOverrideLevel: null
|
||||
# replication type is XYZ:
|
||||
# X number of replica in other data centers
|
||||
# Y number of replica in other racks in the same data center
|
||||
# Z number of replica in other servers in the same rack
|
||||
defaultReplicaPlacement: "000"
|
||||
# turn off directory listing
|
||||
disableDirListing: false
|
||||
# split files larger than the limit, default 32
|
||||
maxMB: null
|
||||
# encrypt data on volume servers
|
||||
encryptVolumeData: false
|
||||
# enable peers sync metadata, for leveldb (localdb for filer but with sync across)
|
||||
enable_peers: false
|
||||
|
||||
# Whether proxy or redirect to volume server during file GET request
|
||||
redirectOnRead: false
|
||||
|
||||
# Limit sub dir listing size (default 100000)
|
||||
dirListLimit: 100000
|
||||
|
||||
# Turn off directory listing
|
||||
disableDirListing: false
|
||||
|
||||
# Disable http request, only gRpc operations are allowed
|
||||
disableHttp: false
|
||||
|
||||
# storage and storageClass are the settings for configuring stateful
|
||||
# storage for the master pods. storage should be set to the disk size of
|
||||
# the attached volume. storageClass is the class of storage which defaults
|
||||
# to null (the Kube cluster will pick the default).
|
||||
storage: 25Gi
|
||||
storageClass: null
|
||||
|
||||
extraVolumes: ""
|
||||
extraVolumeMounts: ""
|
||||
|
||||
# Affinity Settings
|
||||
# Commenting out or setting as empty the affinity variable, will allow
|
||||
# deployment to single node services such as Minikube
|
||||
affinity: |
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
release: "{{ .Release.Name }}"
|
||||
component: filer
|
||||
topologyKey: kubernetes.io/hostname
|
||||
|
||||
# updatePartition is used to control a careful rolling update of SeaweedFS
|
||||
# masters.
|
||||
updatePartition: 0
|
||||
|
||||
# Resource requests, limits, etc. for the server cluster placement. This
|
||||
# should map directly to the value of the resources field for a PodSpec,
|
||||
# formatted as a multi-line string. By default no direct resource request
|
||||
# is made.
|
||||
resources: null
|
||||
|
||||
# Toleration Settings for server pods
|
||||
# This should be a multi-line string matching the Toleration array
|
||||
# in a PodSpec.
|
||||
tolerations: ""
|
||||
|
||||
# nodeSelector labels for server pod assignment, formatted as a muli-line string.
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
|
||||
# Example:
|
||||
# nodeSelector: |
|
||||
# beta.kubernetes.io/arch: amd64
|
||||
nodeSelector: |
|
||||
sw-backend: "true"
|
||||
|
||||
# used to assign priority to server pods
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
||||
priorityClassName: ""
|
||||
|
||||
# extraEnvVars is a list of extra enviroment variables to set with the stateful set.
|
||||
extraEnvironmentVars:
|
||||
WEED_MYSQL_ENABLED: "true"
|
||||
WEED_MYSQL_HOSTNAME: "mysql-db-host"
|
||||
WEED_MYSQL_PORT: "3306"
|
||||
WEED_MYSQL_DATABASE: "sw_database"
|
||||
WEED_MYSQL_CONNECTION_MAX_IDLE: "5"
|
||||
WEED_MYSQL_CONNECTION_MAX_OPEN: "75"
|
||||
# "refresh" connection every 10 minutes, eliminating mysql closing "old" connections
|
||||
WEED_MYSQL_CONNECTION_MAX_LIFETIME_SECONDS: "600"
|
||||
# enable usage of memsql as filer backend
|
||||
WEED_MYSQL_INTERPOLATEPARAMS: "true"
|
||||
WEED_LEVELDB2_ENABLED: "false"
|
||||
# with http DELETE, by default the filer would check whether a folder is empty.
|
||||
# recursive_delete will delete all sub folders and files, similar to "rm -Rf"
|
||||
WEED_FILER_OPTIONS_RECURSIVE_DELETE: "false"
|
||||
# directories under this folder will be automatically creating a separate bucket
|
||||
WEED_FILER_BUCKETS_FOLDER: "/buckets"
|
||||
|
||||
s3:
|
||||
enabled: true
|
||||
port: 8333
|
||||
#allow empty folders
|
||||
allowEmptyFolder: false
|
||||
# Suffix of the host name, {bucket}.{domainName}
|
||||
domainName: ""
|
||||
# enable user & permission to s3 (need to inject to all services)
|
||||
enableAuth: false
|
||||
skipAuthSecretCreation: false
|
||||
|
||||
s3:
|
||||
enabled: false
|
||||
repository: null
|
||||
imageName: null
|
||||
imageTag: null
|
||||
restartPolicy: null
|
||||
replicas: 1
|
||||
port: 8333
|
||||
metricsPort: 9327
|
||||
loggingOverrideLevel: null
|
||||
#allow empty folders
|
||||
allowEmptyFolder: true
|
||||
# enable user & permission to s3 (need to inject to all services)
|
||||
enableAuth: false
|
||||
skipAuthSecretCreation: false
|
||||
|
||||
# Suffix of the host name, {bucket}.{domainName}
|
||||
domainName: ""
|
||||
|
||||
extraVolumes: ""
|
||||
extraVolumeMounts: ""
|
||||
|
||||
# Resource requests, limits, etc. for the server cluster placement. This
|
||||
# should map directly to the value of the resources field for a PodSpec,
|
||||
# formatted as a multi-line string. By default no direct resource request
|
||||
# is made.
|
||||
resources: null
|
||||
|
||||
# Toleration Settings for server pods
|
||||
# This should be a multi-line string matching the Toleration array
|
||||
# in a PodSpec.
|
||||
tolerations: ""
|
||||
|
||||
# nodeSelector labels for server pod assignment, formatted as a muli-line string.
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
|
||||
# Example:
|
||||
# nodeSelector: |
|
||||
# beta.kubernetes.io/arch: amd64
|
||||
nodeSelector: |
|
||||
sw-backend: "true"
|
||||
|
||||
# used to assign priority to server pods
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
||||
priorityClassName: ""
|
||||
|
||||
logs:
|
||||
type: "hostPath"
|
||||
size: ""
|
||||
storageClass: ""
|
||||
|
||||
cronjob:
|
||||
enabled: true
|
||||
master: "seaweedfs-master:9333"
|
||||
filer: "seaweedfs-filer-client:8888"
|
||||
tolerations: ""
|
||||
nodeSelector: |
|
||||
sw-backend: "true"
|
||||
replication:
|
||||
enable: true
|
||||
collectionPattern: ""
|
||||
schedule: "*/7 * * * *"
|
||||
resources: null
|
||||
# balance all volumes among volume servers
|
||||
# ALL|EACH_COLLECTION|<collection_name>
|
||||
collection: ""
|
||||
|
||||
|
||||
certificates:
|
||||
commonName: "SeaweedFS CA"
|
||||
ipAddresses: []
|
||||
keyAlgorithm: rsa
|
||||
keySize: 2048
|
||||
duration: 2160h # 90d
|
||||
renewBefore: 360h # 15d
|
BIN
note/SeaweedFS_Architecture.png
Normal file
BIN
note/SeaweedFS_Architecture.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 90 KiB |
BIN
note/SeaweedFS_Cluster_Backup.png
Normal file
BIN
note/SeaweedFS_Cluster_Backup.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 86 KiB |
BIN
note/SeaweedFS_XDR.png
Normal file
BIN
note/SeaweedFS_XDR.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 62 KiB |
BIN
note/shuguang.png
Normal file
BIN
note/shuguang.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 12 KiB |
@ -1,10 +1,11 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.2.4</version>
|
||||
<version>1.6.4</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
@ -16,7 +17,7 @@
|
||||
<protobuf.version>3.9.1</protobuf.version>
|
||||
<!-- follow https://github.com/grpc/grpc-java -->
|
||||
<grpc.version>1.23.0</grpc.version>
|
||||
<guava.version>28.0-jre</guava.version>
|
||||
<guava.version>30.0-jre</guava.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
@ -64,9 +65,14 @@
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.12</version>
|
||||
<version>4.13.1</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>javax.annotation</groupId>
|
||||
<artifactId>javax.annotation-api</artifactId>
|
||||
<version>1.3.2</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<distributionManagement>
|
||||
@ -88,8 +94,8 @@
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<configuration>
|
||||
<source>7</source>
|
||||
<target>7</target>
|
||||
<source>8</source>
|
||||
<target>8</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
@ -97,9 +103,11 @@
|
||||
<artifactId>protobuf-maven-plugin</artifactId>
|
||||
<version>0.6.1</version>
|
||||
<configuration>
|
||||
<protocArtifact>com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}</protocArtifact>
|
||||
<protocArtifact>com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}
|
||||
</protocArtifact>
|
||||
<pluginId>grpc-java</pluginId>
|
||||
<pluginArtifact>io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}</pluginArtifact>
|
||||
<pluginArtifact>io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}
|
||||
</pluginArtifact>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
|
170
other/java/client/pom.xml.deploy
Normal file
170
other/java/client/pom.xml.deploy
Normal file
@ -0,0 +1,170 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.6.4</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
<artifactId>oss-parent</artifactId>
|
||||
<version>9</version>
|
||||
</parent>
|
||||
|
||||
<properties>
|
||||
<protobuf.version>3.9.1</protobuf.version>
|
||||
<!-- follow https://github.com/grpc/grpc-java -->
|
||||
<grpc.version>1.23.0</grpc.version>
|
||||
<guava.version>28.0-jre</guava.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.moandjiezana.toml</groupId>
|
||||
<artifactId>toml4j</artifactId>
|
||||
<version>0.7.2</version>
|
||||
</dependency>
|
||||
<!-- https://mvnrepository.com/artifact/com.google.protobuf/protobuf-java -->
|
||||
<dependency>
|
||||
<groupId>com.google.protobuf</groupId>
|
||||
<artifactId>protobuf-java</artifactId>
|
||||
<version>${protobuf.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
<version>${guava.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.grpc</groupId>
|
||||
<artifactId>grpc-netty-shaded</artifactId>
|
||||
<version>${grpc.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.grpc</groupId>
|
||||
<artifactId>grpc-protobuf</artifactId>
|
||||
<version>${grpc.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.grpc</groupId>
|
||||
<artifactId>grpc-stub</artifactId>
|
||||
<version>${grpc.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-api</artifactId>
|
||||
<version>1.7.25</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.httpcomponents</groupId>
|
||||
<artifactId>httpmime</artifactId>
|
||||
<version>4.5.6</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.12</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<distributionManagement>
|
||||
<snapshotRepository>
|
||||
<id>ossrh</id>
|
||||
<url>https://oss.sonatype.org/content/repositories/snapshots</url>
|
||||
</snapshotRepository>
|
||||
</distributionManagement>
|
||||
<build>
|
||||
<extensions>
|
||||
<extension>
|
||||
<groupId>kr.motd.maven</groupId>
|
||||
<artifactId>os-maven-plugin</artifactId>
|
||||
<version>1.6.2</version>
|
||||
</extension>
|
||||
</extensions>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<configuration>
|
||||
<source>8</source>
|
||||
<target>8</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.xolstice.maven.plugins</groupId>
|
||||
<artifactId>protobuf-maven-plugin</artifactId>
|
||||
<version>0.6.1</version>
|
||||
<configuration>
|
||||
<protocArtifact>com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}
|
||||
</protocArtifact>
|
||||
<pluginId>grpc-java</pluginId>
|
||||
<pluginArtifact>io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}
|
||||
</pluginArtifact>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>compile</goal>
|
||||
<goal>compile-custom</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-gpg-plugin</artifactId>
|
||||
<version>1.5</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>sign-artifacts</id>
|
||||
<phase>verify</phase>
|
||||
<goals>
|
||||
<goal>sign</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.sonatype.plugins</groupId>
|
||||
<artifactId>nexus-staging-maven-plugin</artifactId>
|
||||
<version>1.6.7</version>
|
||||
<extensions>true</extensions>
|
||||
<configuration>
|
||||
<serverId>ossrh</serverId>
|
||||
<nexusUrl>https://oss.sonatype.org/</nexusUrl>
|
||||
<autoReleaseAfterClose>true</autoReleaseAfterClose>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
<version>2.2.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-sources</id>
|
||||
<goals>
|
||||
<goal>jar-no-fork</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<version>2.9.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-javadocs</id>
|
||||
<goals>
|
||||
<goal>jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
</project>
|
144
other/java/client/pom_debug.xml
Normal file
144
other/java/client/pom_debug.xml
Normal file
@ -0,0 +1,144 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.6.4</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
<artifactId>oss-parent</artifactId>
|
||||
<version>9</version>
|
||||
</parent>
|
||||
|
||||
<properties>
|
||||
<protobuf.version>3.9.1</protobuf.version>
|
||||
<!-- follow https://github.com/grpc/grpc-java -->
|
||||
<grpc.version>1.23.0</grpc.version>
|
||||
<guava.version>28.0-jre</guava.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.moandjiezana.toml</groupId>
|
||||
<artifactId>toml4j</artifactId>
|
||||
<version>0.7.2</version>
|
||||
</dependency>
|
||||
<!-- https://mvnrepository.com/artifact/com.google.protobuf/protobuf-java -->
|
||||
<dependency>
|
||||
<groupId>com.google.protobuf</groupId>
|
||||
<artifactId>protobuf-java</artifactId>
|
||||
<version>${protobuf.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
<version>${guava.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.grpc</groupId>
|
||||
<artifactId>grpc-netty-shaded</artifactId>
|
||||
<version>${grpc.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.grpc</groupId>
|
||||
<artifactId>grpc-protobuf</artifactId>
|
||||
<version>${grpc.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.grpc</groupId>
|
||||
<artifactId>grpc-stub</artifactId>
|
||||
<version>${grpc.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-api</artifactId>
|
||||
<version>1.7.25</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.httpcomponents</groupId>
|
||||
<artifactId>httpmime</artifactId>
|
||||
<version>4.5.6</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.13.1</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>javax.annotation</groupId>
|
||||
<artifactId>javax.annotation-api</artifactId>
|
||||
<version>1.3.2</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<extensions>
|
||||
<extension>
|
||||
<groupId>kr.motd.maven</groupId>
|
||||
<artifactId>os-maven-plugin</artifactId>
|
||||
<version>1.6.2</version>
|
||||
</extension>
|
||||
</extensions>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<configuration>
|
||||
<source>8</source>
|
||||
<target>8</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.xolstice.maven.plugins</groupId>
|
||||
<artifactId>protobuf-maven-plugin</artifactId>
|
||||
<version>0.6.1</version>
|
||||
<configuration>
|
||||
<protocArtifact>com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}
|
||||
</protocArtifact>
|
||||
<pluginId>grpc-java</pluginId>
|
||||
<pluginArtifact>io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}
|
||||
</pluginArtifact>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>compile</goal>
|
||||
<goal>compile-custom</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
<version>2.2.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-sources</id>
|
||||
<goals>
|
||||
<goal>jar-no-fork</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<version>2.9.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-javadocs</id>
|
||||
<goals>
|
||||
<goal>jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
</project>
|
@ -0,0 +1,42 @@
|
||||
package seaweedfs.client;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.nio.Buffer;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public class ByteBufferPool {
|
||||
|
||||
private static final int MIN_BUFFER_SIZE = 8 * 1024 * 1024;
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ByteBufferPool.class);
|
||||
|
||||
private static final List<ByteBuffer> bufferList = new ArrayList<>();
|
||||
|
||||
public static synchronized ByteBuffer request(int bufferSize) {
|
||||
if (bufferSize < MIN_BUFFER_SIZE) {
|
||||
bufferSize = MIN_BUFFER_SIZE;
|
||||
}
|
||||
LOG.debug("requested new buffer {}", bufferSize);
|
||||
if (bufferList.isEmpty()) {
|
||||
return ByteBuffer.allocate(bufferSize);
|
||||
}
|
||||
ByteBuffer buffer = bufferList.remove(bufferList.size() - 1);
|
||||
if (buffer.capacity() >= bufferSize) {
|
||||
return buffer;
|
||||
}
|
||||
|
||||
LOG.info("add new buffer from {} to {}", buffer.capacity(), bufferSize);
|
||||
bufferList.add(0, buffer);
|
||||
return ByteBuffer.allocate(bufferSize);
|
||||
|
||||
}
|
||||
|
||||
public static synchronized void release(ByteBuffer obj) {
|
||||
((Buffer)obj).clear();
|
||||
bufferList.add(0, obj);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,36 @@
|
||||
package seaweedfs.client;
|
||||
|
||||
import com.google.common.cache.Cache;
|
||||
import com.google.common.cache.CacheBuilder;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class ChunkCache {
|
||||
|
||||
private Cache<String, byte[]> cache = null;
|
||||
|
||||
public ChunkCache(int maxEntries) {
|
||||
if (maxEntries == 0) {
|
||||
return;
|
||||
}
|
||||
this.cache = CacheBuilder.newBuilder()
|
||||
.maximumSize(maxEntries)
|
||||
.expireAfterAccess(1, TimeUnit.HOURS)
|
||||
.build();
|
||||
}
|
||||
|
||||
public byte[] getChunk(String fileId) {
|
||||
if (this.cache == null) {
|
||||
return null;
|
||||
}
|
||||
return this.cache.getIfPresent(fileId);
|
||||
}
|
||||
|
||||
public void setChunk(String fileId, byte[] data) {
|
||||
if (this.cache == null) {
|
||||
return;
|
||||
}
|
||||
this.cache.put(fileId, data);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,140 @@
|
||||
package seaweedfs.client;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public class FileChunkManifest {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(FileChunkManifest.class);
|
||||
|
||||
private static final int mergeFactor = 1000;
|
||||
|
||||
public static boolean hasChunkManifest(List<FilerProto.FileChunk> chunks) {
|
||||
for (FilerProto.FileChunk chunk : chunks) {
|
||||
if (chunk.getIsChunkManifest()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public static List<FilerProto.FileChunk> resolveChunkManifest(
|
||||
final FilerClient filerClient, List<FilerProto.FileChunk> chunks) throws IOException {
|
||||
|
||||
List<FilerProto.FileChunk> dataChunks = new ArrayList<>();
|
||||
|
||||
for (FilerProto.FileChunk chunk : chunks) {
|
||||
if (!chunk.getIsChunkManifest()) {
|
||||
dataChunks.add(chunk);
|
||||
continue;
|
||||
}
|
||||
|
||||
// IsChunkManifest
|
||||
LOG.debug("fetching chunk manifest:{}", chunk);
|
||||
byte[] data = fetchChunk(filerClient, chunk);
|
||||
FilerProto.FileChunkManifest m = FilerProto.FileChunkManifest.newBuilder().mergeFrom(data).build();
|
||||
List<FilerProto.FileChunk> resolvedChunks = new ArrayList<>();
|
||||
for (FilerProto.FileChunk t : m.getChunksList()) {
|
||||
// avoid deprecated chunk.getFileId()
|
||||
resolvedChunks.add(t.toBuilder().setFileId(FilerClient.toFileId(t.getFid())).build());
|
||||
}
|
||||
dataChunks.addAll(resolveChunkManifest(filerClient, resolvedChunks));
|
||||
}
|
||||
|
||||
return dataChunks;
|
||||
}
|
||||
|
||||
private static byte[] fetchChunk(final FilerClient filerClient, FilerProto.FileChunk chunk) throws IOException {
|
||||
|
||||
String vid = "" + chunk.getFid().getVolumeId();
|
||||
FilerProto.Locations locations = filerClient.vidLocations.get(vid);
|
||||
if (locations == null) {
|
||||
FilerProto.LookupVolumeRequest.Builder lookupRequest = FilerProto.LookupVolumeRequest.newBuilder();
|
||||
lookupRequest.addVolumeIds(vid);
|
||||
FilerProto.LookupVolumeResponse lookupResponse = filerClient
|
||||
.getBlockingStub().lookupVolume(lookupRequest.build());
|
||||
locations = lookupResponse.getLocationsMapMap().get(vid);
|
||||
filerClient.vidLocations.put(vid, locations);
|
||||
LOG.debug("fetchChunk vid:{} locations:{}", vid, locations);
|
||||
}
|
||||
|
||||
SeaweedRead.ChunkView chunkView = new SeaweedRead.ChunkView(
|
||||
FilerClient.toFileId(chunk.getFid()), // avoid deprecated chunk.getFileId()
|
||||
0,
|
||||
-1,
|
||||
0,
|
||||
true,
|
||||
chunk.getCipherKey().toByteArray(),
|
||||
chunk.getIsCompressed());
|
||||
|
||||
byte[] chunkData = SeaweedRead.chunkCache.getChunk(chunkView.fileId);
|
||||
if (chunkData == null) {
|
||||
LOG.debug("doFetchFullChunkData:{}", chunkView);
|
||||
chunkData = SeaweedRead.doFetchFullChunkData(filerClient, chunkView, locations);
|
||||
}
|
||||
if (chunk.getIsChunkManifest()){
|
||||
LOG.debug("chunk {} size {}", chunkView.fileId, chunkData.length);
|
||||
SeaweedRead.chunkCache.setChunk(chunkView.fileId, chunkData);
|
||||
}
|
||||
|
||||
return chunkData;
|
||||
|
||||
}
|
||||
|
||||
public static List<FilerProto.FileChunk> maybeManifestize(
|
||||
final FilerClient filerClient, List<FilerProto.FileChunk> inputChunks, String parentDirectory) throws IOException {
|
||||
// the return variable
|
||||
List<FilerProto.FileChunk> chunks = new ArrayList<>();
|
||||
|
||||
List<FilerProto.FileChunk> dataChunks = new ArrayList<>();
|
||||
for (FilerProto.FileChunk chunk : inputChunks) {
|
||||
if (!chunk.getIsChunkManifest()) {
|
||||
dataChunks.add(chunk);
|
||||
} else {
|
||||
chunks.add(chunk);
|
||||
}
|
||||
}
|
||||
|
||||
int remaining = dataChunks.size();
|
||||
for (int i = 0; i + mergeFactor < dataChunks.size(); i += mergeFactor) {
|
||||
FilerProto.FileChunk chunk = mergeIntoManifest(filerClient, dataChunks.subList(i, i + mergeFactor), parentDirectory);
|
||||
chunks.add(chunk);
|
||||
remaining -= mergeFactor;
|
||||
}
|
||||
|
||||
// remaining
|
||||
for (int i = dataChunks.size() - remaining; i < dataChunks.size(); i++) {
|
||||
chunks.add(dataChunks.get(i));
|
||||
}
|
||||
return chunks;
|
||||
}
|
||||
|
||||
private static FilerProto.FileChunk mergeIntoManifest(final FilerClient filerClient, List<FilerProto.FileChunk> dataChunks, String parentDirectory) throws IOException {
|
||||
// create and serialize the manifest
|
||||
dataChunks = FilerClient.beforeEntrySerialization(dataChunks);
|
||||
FilerProto.FileChunkManifest.Builder m = FilerProto.FileChunkManifest.newBuilder().addAllChunks(dataChunks);
|
||||
byte[] data = m.build().toByteArray();
|
||||
|
||||
long minOffset = Long.MAX_VALUE;
|
||||
long maxOffset = -1;
|
||||
for (FilerProto.FileChunk chunk : dataChunks) {
|
||||
minOffset = Math.min(minOffset, chunk.getOffset());
|
||||
maxOffset = Math.max(maxOffset, chunk.getSize() + chunk.getOffset());
|
||||
}
|
||||
|
||||
FilerProto.FileChunk.Builder manifestChunk = SeaweedWrite.writeChunk(
|
||||
filerClient.getReplication(),
|
||||
filerClient,
|
||||
minOffset,
|
||||
data, 0, data.length, parentDirectory);
|
||||
manifestChunk.setIsChunkManifest(true);
|
||||
manifestChunk.setSize(maxOffset - minOffset);
|
||||
return manifestChunk.build();
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -1,27 +1,82 @@
|
||||
package seaweedfs.client;
|
||||
|
||||
import com.google.common.base.Strings;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.io.File;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
public class FilerClient {
|
||||
public class FilerClient extends FilerGrpcClient {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(FilerClient.class);
|
||||
|
||||
private FilerGrpcClient filerGrpcClient;
|
||||
|
||||
public FilerClient(String host, int grpcPort) {
|
||||
filerGrpcClient = new FilerGrpcClient(host, grpcPort);
|
||||
super(host, grpcPort);
|
||||
}
|
||||
|
||||
public FilerClient(FilerGrpcClient filerGrpcClient) {
|
||||
this.filerGrpcClient = filerGrpcClient;
|
||||
public static String toFileId(FilerProto.FileId fid) {
|
||||
if (fid == null) {
|
||||
return null;
|
||||
}
|
||||
return String.format("%d,%x%08x", fid.getVolumeId(), fid.getFileKey(), fid.getCookie());
|
||||
}
|
||||
|
||||
public static FilerProto.FileId toFileIdObject(String fileIdStr) {
|
||||
if (fileIdStr == null || fileIdStr.length() == 0) {
|
||||
return null;
|
||||
}
|
||||
int commaIndex = fileIdStr.lastIndexOf(',');
|
||||
String volumeIdStr = fileIdStr.substring(0, commaIndex);
|
||||
String fileKeyStr = fileIdStr.substring(commaIndex + 1, fileIdStr.length() - 8);
|
||||
String cookieStr = fileIdStr.substring(fileIdStr.length() - 8);
|
||||
|
||||
return FilerProto.FileId.newBuilder()
|
||||
.setVolumeId(Integer.parseInt(volumeIdStr))
|
||||
.setFileKey(Long.parseLong(fileKeyStr, 16))
|
||||
.setCookie((int) Long.parseLong(cookieStr, 16))
|
||||
.build();
|
||||
}
|
||||
|
||||
public static List<FilerProto.FileChunk> beforeEntrySerialization(List<FilerProto.FileChunk> chunks) {
|
||||
List<FilerProto.FileChunk> cleanedChunks = new ArrayList<>();
|
||||
for (FilerProto.FileChunk chunk : chunks) {
|
||||
FilerProto.FileChunk.Builder chunkBuilder = chunk.toBuilder();
|
||||
chunkBuilder.clearFileId();
|
||||
chunkBuilder.clearSourceFileId();
|
||||
chunkBuilder.setFid(toFileIdObject(chunk.getFileId()));
|
||||
FilerProto.FileId sourceFid = toFileIdObject(chunk.getSourceFileId());
|
||||
if (sourceFid != null) {
|
||||
chunkBuilder.setSourceFid(sourceFid);
|
||||
}
|
||||
cleanedChunks.add(chunkBuilder.build());
|
||||
}
|
||||
return cleanedChunks;
|
||||
}
|
||||
|
||||
public static FilerProto.Entry afterEntryDeserialization(FilerProto.Entry entry) {
|
||||
if (entry.getChunksList().size() <= 0) {
|
||||
return entry;
|
||||
}
|
||||
String fileId = entry.getChunks(0).getFileId();
|
||||
if (fileId != null && fileId.length() != 0) {
|
||||
return entry;
|
||||
}
|
||||
FilerProto.Entry.Builder entryBuilder = entry.toBuilder();
|
||||
entryBuilder.clearChunks();
|
||||
for (FilerProto.FileChunk chunk : entry.getChunksList()) {
|
||||
FilerProto.FileChunk.Builder chunkBuilder = chunk.toBuilder();
|
||||
chunkBuilder.setFileId(toFileId(chunk.getFid()));
|
||||
String sourceFileId = toFileId(chunk.getSourceFid());
|
||||
if (sourceFileId != null) {
|
||||
chunkBuilder.setSourceFileId(sourceFileId);
|
||||
}
|
||||
entryBuilder.addChunks(chunkBuilder);
|
||||
}
|
||||
return entryBuilder.build();
|
||||
}
|
||||
|
||||
public boolean mkdirs(String path, int mode) {
|
||||
@ -38,9 +93,9 @@ public class FilerClient {
|
||||
if ("/".equals(path)) {
|
||||
return true;
|
||||
}
|
||||
Path pathObject = Paths.get(path);
|
||||
String parent = pathObject.getParent().toString();
|
||||
String name = pathObject.getFileName().toString();
|
||||
File pathFile = new File(path);
|
||||
String parent = pathFile.getParent().replace('\\','/');
|
||||
String name = pathFile.getName();
|
||||
|
||||
mkdirs(parent, mode, uid, gid, userName, groupNames);
|
||||
|
||||
@ -59,13 +114,13 @@ public class FilerClient {
|
||||
|
||||
public boolean mv(String oldPath, String newPath) {
|
||||
|
||||
Path oldPathObject = Paths.get(oldPath);
|
||||
String oldParent = oldPathObject.getParent().toString();
|
||||
String oldName = oldPathObject.getFileName().toString();
|
||||
File oldPathFile = new File(oldPath);
|
||||
String oldParent = oldPathFile.getParent().replace('\\','/');
|
||||
String oldName = oldPathFile.getName();
|
||||
|
||||
Path newPathObject = Paths.get(newPath);
|
||||
String newParent = newPathObject.getParent().toString();
|
||||
String newName = newPathObject.getFileName().toString();
|
||||
File newPathFile = new File(newPath);
|
||||
String newParent = newPathFile.getParent().replace('\\','/');
|
||||
String newName = newPathFile.getName();
|
||||
|
||||
return atomicRenameEntry(oldParent, oldName, newParent, newName);
|
||||
|
||||
@ -73,9 +128,9 @@ public class FilerClient {
|
||||
|
||||
public boolean rm(String path, boolean isRecursive, boolean ignoreRecusiveError) {
|
||||
|
||||
Path pathObject = Paths.get(path);
|
||||
String parent = pathObject.getParent().toString();
|
||||
String name = pathObject.getFileName().toString();
|
||||
File pathFile = new File(path);
|
||||
String parent = pathFile.getParent().replace('\\','/');
|
||||
String name = pathFile.getName();
|
||||
|
||||
return deleteEntry(
|
||||
parent,
|
||||
@ -92,9 +147,9 @@ public class FilerClient {
|
||||
|
||||
public boolean touch(String path, int mode, int uid, int gid, String userName, String[] groupNames) {
|
||||
|
||||
Path pathObject = Paths.get(path);
|
||||
String parent = pathObject.getParent().toString();
|
||||
String name = pathObject.getFileName().toString();
|
||||
File pathFile = new File(path);
|
||||
String parent = pathFile.getParent().replace('\\','/');
|
||||
String name = pathFile.getName();
|
||||
|
||||
FilerProto.Entry entry = lookupEntry(parent, name);
|
||||
if (entry == null) {
|
||||
@ -156,7 +211,7 @@ public class FilerClient {
|
||||
List<FilerProto.Entry> results = new ArrayList<FilerProto.Entry>();
|
||||
String lastFileName = "";
|
||||
for (int limit = Integer.MAX_VALUE; limit > 0; ) {
|
||||
List<FilerProto.Entry> t = listEntries(path, "", lastFileName, 1024);
|
||||
List<FilerProto.Entry> t = listEntries(path, "", lastFileName, 1024, false);
|
||||
if (t == null) {
|
||||
break;
|
||||
}
|
||||
@ -173,31 +228,35 @@ public class FilerClient {
|
||||
return results;
|
||||
}
|
||||
|
||||
public List<FilerProto.Entry> listEntries(String path, String entryPrefix, String lastEntryName, int limit) {
|
||||
Iterator<FilerProto.ListEntriesResponse> iter = filerGrpcClient.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder()
|
||||
public List<FilerProto.Entry> listEntries(String path, String entryPrefix, String lastEntryName, int limit, boolean includeLastEntry) {
|
||||
Iterator<FilerProto.ListEntriesResponse> iter = this.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder()
|
||||
.setDirectory(path)
|
||||
.setPrefix(entryPrefix)
|
||||
.setStartFromFileName(lastEntryName)
|
||||
.setInclusiveStartFrom(includeLastEntry)
|
||||
.setLimit(limit)
|
||||
.build());
|
||||
List<FilerProto.Entry> entries = new ArrayList<>();
|
||||
while (iter.hasNext()){
|
||||
while (iter.hasNext()) {
|
||||
FilerProto.ListEntriesResponse resp = iter.next();
|
||||
entries.add(fixEntryAfterReading(resp.getEntry()));
|
||||
entries.add(afterEntryDeserialization(resp.getEntry()));
|
||||
}
|
||||
return entries;
|
||||
}
|
||||
|
||||
public FilerProto.Entry lookupEntry(String directory, String entryName) {
|
||||
try {
|
||||
FilerProto.Entry entry = filerGrpcClient.getBlockingStub().lookupDirectoryEntry(
|
||||
FilerProto.Entry entry = this.getBlockingStub().lookupDirectoryEntry(
|
||||
FilerProto.LookupDirectoryEntryRequest.newBuilder()
|
||||
.setDirectory(directory)
|
||||
.setName(entryName)
|
||||
.build()).getEntry();
|
||||
return fixEntryAfterReading(entry);
|
||||
if (entry == null) {
|
||||
return null;
|
||||
}
|
||||
return afterEntryDeserialization(entry);
|
||||
} catch (Exception e) {
|
||||
if (e.getMessage().indexOf("filer: no entry is found in filer store")>0){
|
||||
if (e.getMessage().indexOf("filer: no entry is found in filer store") > 0) {
|
||||
return null;
|
||||
}
|
||||
LOG.warn("lookupEntry {}/{}: {}", directory, entryName, e);
|
||||
@ -205,28 +264,32 @@ public class FilerClient {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public boolean createEntry(String parent, FilerProto.Entry entry) {
|
||||
try {
|
||||
filerGrpcClient.getBlockingStub().createEntry(FilerProto.CreateEntryRequest.newBuilder()
|
||||
.setDirectory(parent)
|
||||
.setEntry(entry)
|
||||
.build());
|
||||
FilerProto.CreateEntryResponse createEntryResponse =
|
||||
this.getBlockingStub().createEntry(FilerProto.CreateEntryRequest.newBuilder()
|
||||
.setDirectory(parent)
|
||||
.setEntry(entry)
|
||||
.build());
|
||||
if (Strings.isNullOrEmpty(createEntryResponse.getError())) {
|
||||
return true;
|
||||
}
|
||||
LOG.warn("createEntry {}/{} error: {}", parent, entry.getName(), createEntryResponse.getError());
|
||||
return false;
|
||||
} catch (Exception e) {
|
||||
LOG.warn("createEntry {}/{}: {}", parent, entry.getName(), e);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public boolean updateEntry(String parent, FilerProto.Entry entry) {
|
||||
try {
|
||||
filerGrpcClient.getBlockingStub().updateEntry(FilerProto.UpdateEntryRequest.newBuilder()
|
||||
this.getBlockingStub().updateEntry(FilerProto.UpdateEntryRequest.newBuilder()
|
||||
.setDirectory(parent)
|
||||
.setEntry(entry)
|
||||
.build());
|
||||
} catch (Exception e) {
|
||||
LOG.warn("createEntry {}/{}: {}", parent, entry.getName(), e);
|
||||
LOG.warn("updateEntry {}/{}: {}", parent, entry.getName(), e);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -234,7 +297,7 @@ public class FilerClient {
|
||||
|
||||
public boolean deleteEntry(String parent, String entryName, boolean isDeleteFileChunk, boolean isRecursive, boolean ignoreRecusiveError) {
|
||||
try {
|
||||
filerGrpcClient.getBlockingStub().deleteEntry(FilerProto.DeleteEntryRequest.newBuilder()
|
||||
this.getBlockingStub().deleteEntry(FilerProto.DeleteEntryRequest.newBuilder()
|
||||
.setDirectory(parent)
|
||||
.setName(entryName)
|
||||
.setIsDeleteData(isDeleteFileChunk)
|
||||
@ -250,7 +313,7 @@ public class FilerClient {
|
||||
|
||||
public boolean atomicRenameEntry(String oldParent, String oldName, String newParent, String newName) {
|
||||
try {
|
||||
filerGrpcClient.getBlockingStub().atomicRenameEntry(FilerProto.AtomicRenameEntryRequest.newBuilder()
|
||||
this.getBlockingStub().atomicRenameEntry(FilerProto.AtomicRenameEntryRequest.newBuilder()
|
||||
.setOldDirectory(oldParent)
|
||||
.setOldName(oldName)
|
||||
.setNewDirectory(newParent)
|
||||
@ -263,24 +326,13 @@ public class FilerClient {
|
||||
return true;
|
||||
}
|
||||
|
||||
private FilerProto.Entry fixEntryAfterReading(FilerProto.Entry entry) {
|
||||
if (entry.getChunksList().size() <= 0) {
|
||||
return entry;
|
||||
}
|
||||
String fileId = entry.getChunks(0).getFileId();
|
||||
if (fileId != null && fileId.length() != 0) {
|
||||
return entry;
|
||||
}
|
||||
FilerProto.Entry.Builder entryBuilder = entry.toBuilder();
|
||||
entryBuilder.clearChunks();
|
||||
for (FilerProto.FileChunk chunk : entry.getChunksList()) {
|
||||
FilerProto.FileChunk.Builder chunkBuilder = chunk.toBuilder();
|
||||
FilerProto.FileId fid = chunk.getFid();
|
||||
fileId = String.format("%d,%d%x", fid.getVolumeId(), fid.getFileKey(), fid.getCookie());
|
||||
chunkBuilder.setFileId(fileId);
|
||||
entryBuilder.addChunks(chunkBuilder);
|
||||
}
|
||||
return entryBuilder.build();
|
||||
public Iterator<FilerProto.SubscribeMetadataResponse> watch(String prefix, String clientName, long sinceNs) {
|
||||
return this.getBlockingStub().subscribeMetadata(FilerProto.SubscribeMetadataRequest.newBuilder()
|
||||
.setPathPrefix(prefix)
|
||||
.setClientName(clientName)
|
||||
.setSinceNs(sinceNs)
|
||||
.build()
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -9,17 +9,13 @@ import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.net.ssl.SSLException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class FilerGrpcClient {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(FilerGrpcClient.class);
|
||||
|
||||
private final ManagedChannel channel;
|
||||
private final SeaweedFilerGrpc.SeaweedFilerBlockingStub blockingStub;
|
||||
private final SeaweedFilerGrpc.SeaweedFilerStub asyncStub;
|
||||
private final SeaweedFilerGrpc.SeaweedFilerFutureStub futureStub;
|
||||
|
||||
static SslContext sslContext;
|
||||
|
||||
static {
|
||||
@ -30,6 +26,20 @@ public class FilerGrpcClient {
|
||||
}
|
||||
}
|
||||
|
||||
public final int VOLUME_SERVER_ACCESS_DIRECT = 0;
|
||||
public final int VOLUME_SERVER_ACCESS_PUBLIC_URL = 1;
|
||||
public final int VOLUME_SERVER_ACCESS_FILER_PROXY = 2;
|
||||
public final Map<String, FilerProto.Locations> vidLocations = new HashMap<>();
|
||||
private final ManagedChannel channel;
|
||||
private final SeaweedFilerGrpc.SeaweedFilerBlockingStub blockingStub;
|
||||
private final SeaweedFilerGrpc.SeaweedFilerStub asyncStub;
|
||||
private final SeaweedFilerGrpc.SeaweedFilerFutureStub futureStub;
|
||||
private boolean cipher = false;
|
||||
private String collection = "";
|
||||
private String replication = "";
|
||||
private int volumeServerAccess = VOLUME_SERVER_ACCESS_DIRECT;
|
||||
private String filerAddress;
|
||||
|
||||
public FilerGrpcClient(String host, int grpcPort) {
|
||||
this(host, grpcPort, sslContext);
|
||||
}
|
||||
@ -37,20 +47,43 @@ public class FilerGrpcClient {
|
||||
public FilerGrpcClient(String host, int grpcPort, SslContext sslContext) {
|
||||
|
||||
this(sslContext == null ?
|
||||
ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext() :
|
||||
ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext()
|
||||
.maxInboundMessageSize(1024 * 1024 * 1024) :
|
||||
NettyChannelBuilder.forAddress(host, grpcPort)
|
||||
.maxInboundMessageSize(1024 * 1024 * 1024)
|
||||
.negotiationType(NegotiationType.TLS)
|
||||
.sslContext(sslContext));
|
||||
|
||||
filerAddress = String.format("%s:%d", host, grpcPort - 10000);
|
||||
|
||||
FilerProto.GetFilerConfigurationResponse filerConfigurationResponse =
|
||||
this.getBlockingStub().getFilerConfiguration(
|
||||
FilerProto.GetFilerConfigurationRequest.newBuilder().build());
|
||||
cipher = filerConfigurationResponse.getCipher();
|
||||
collection = filerConfigurationResponse.getCollection();
|
||||
replication = filerConfigurationResponse.getReplication();
|
||||
|
||||
}
|
||||
|
||||
public FilerGrpcClient(ManagedChannelBuilder<?> channelBuilder) {
|
||||
private FilerGrpcClient(ManagedChannelBuilder<?> channelBuilder) {
|
||||
channel = channelBuilder.build();
|
||||
blockingStub = SeaweedFilerGrpc.newBlockingStub(channel);
|
||||
asyncStub = SeaweedFilerGrpc.newStub(channel);
|
||||
futureStub = SeaweedFilerGrpc.newFutureStub(channel);
|
||||
}
|
||||
|
||||
public boolean isCipher() {
|
||||
return cipher;
|
||||
}
|
||||
|
||||
public String getCollection() {
|
||||
return collection;
|
||||
}
|
||||
|
||||
public String getReplication() {
|
||||
return replication;
|
||||
}
|
||||
|
||||
public void shutdown() throws InterruptedException {
|
||||
channel.shutdown().awaitTermination(5, TimeUnit.SECONDS);
|
||||
}
|
||||
@ -67,4 +100,39 @@ public class FilerGrpcClient {
|
||||
return futureStub;
|
||||
}
|
||||
|
||||
public void setAccessVolumeServerDirectly() {
|
||||
this.volumeServerAccess = VOLUME_SERVER_ACCESS_DIRECT;
|
||||
}
|
||||
|
||||
public boolean isAccessVolumeServerDirectly() {
|
||||
return this.volumeServerAccess == VOLUME_SERVER_ACCESS_DIRECT;
|
||||
}
|
||||
|
||||
public void setAccessVolumeServerByPublicUrl() {
|
||||
this.volumeServerAccess = VOLUME_SERVER_ACCESS_PUBLIC_URL;
|
||||
}
|
||||
|
||||
public boolean isAccessVolumeServerByPublicUrl() {
|
||||
return this.volumeServerAccess == VOLUME_SERVER_ACCESS_PUBLIC_URL;
|
||||
}
|
||||
|
||||
public void setAccessVolumeServerByFilerProxy() {
|
||||
this.volumeServerAccess = VOLUME_SERVER_ACCESS_FILER_PROXY;
|
||||
}
|
||||
|
||||
public boolean isAccessVolumeServerByFilerProxy() {
|
||||
return this.volumeServerAccess == VOLUME_SERVER_ACCESS_FILER_PROXY;
|
||||
}
|
||||
|
||||
public String getChunkUrl(String chunkId, String url, String publicUrl) {
|
||||
switch (this.volumeServerAccess) {
|
||||
case VOLUME_SERVER_ACCESS_PUBLIC_URL:
|
||||
return String.format("http://%s/%s", publicUrl, chunkId);
|
||||
case VOLUME_SERVER_ACCESS_FILER_PROXY:
|
||||
return String.format("http://%s/?proxyChunkId=%s", this.filerAddress, chunkId);
|
||||
default:
|
||||
return String.format("http://%s/%s", url, chunkId);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
41
other/java/client/src/main/java/seaweedfs/client/Gzip.java
Normal file
41
other/java/client/src/main/java/seaweedfs/client/Gzip.java
Normal file
@ -0,0 +1,41 @@
|
||||
package seaweedfs.client;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.zip.GZIPInputStream;
|
||||
import java.util.zip.GZIPOutputStream;
|
||||
|
||||
public class Gzip {
|
||||
public static byte[] compress(byte[] data) throws IOException {
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream(data.length);
|
||||
GZIPOutputStream gzip = new GZIPOutputStream(bos);
|
||||
gzip.write(data);
|
||||
gzip.close();
|
||||
byte[] compressed = bos.toByteArray();
|
||||
bos.close();
|
||||
return compressed;
|
||||
}
|
||||
|
||||
public static byte[] decompress(byte[] compressed) {
|
||||
try {
|
||||
ByteArrayInputStream bis = new ByteArrayInputStream(compressed);
|
||||
GZIPInputStream gis = new GZIPInputStream(bis);
|
||||
return readAll(gis);
|
||||
} catch (Exception e) {
|
||||
return compressed;
|
||||
}
|
||||
}
|
||||
|
||||
private static byte[] readAll(InputStream input) throws IOException {
|
||||
try (ByteArrayOutputStream output = new ByteArrayOutputStream()) {
|
||||
byte[] buffer = new byte[4096];
|
||||
int n;
|
||||
while (-1 != (n = input.read(buffer))) {
|
||||
output.write(buffer, 0, n);
|
||||
}
|
||||
return output.toByteArray();
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,55 @@
|
||||
package seaweedfs.client;
|
||||
|
||||
import javax.crypto.Cipher;
|
||||
import javax.crypto.spec.GCMParameterSpec;
|
||||
import javax.crypto.spec.SecretKeySpec;
|
||||
import java.security.SecureRandom;
|
||||
|
||||
public class SeaweedCipher {
|
||||
// AES-GCM parameters
|
||||
public static final int AES_KEY_SIZE = 256; // in bits
|
||||
public static final int GCM_NONCE_LENGTH = 12; // in bytes
|
||||
public static final int GCM_TAG_LENGTH = 16; // in bytes
|
||||
|
||||
private static SecureRandom random = new SecureRandom();
|
||||
|
||||
public static byte[] genCipherKey() throws Exception {
|
||||
byte[] key = new byte[AES_KEY_SIZE / 8];
|
||||
random.nextBytes(key);
|
||||
return key;
|
||||
}
|
||||
|
||||
public static byte[] encrypt(byte[] clearTextbytes, byte[] cipherKey) throws Exception {
|
||||
return encrypt(clearTextbytes, 0, clearTextbytes.length, cipherKey);
|
||||
}
|
||||
|
||||
public static byte[] encrypt(byte[] clearTextbytes, int offset, int length, byte[] cipherKey) throws Exception {
|
||||
|
||||
final byte[] nonce = new byte[GCM_NONCE_LENGTH];
|
||||
random.nextBytes(nonce);
|
||||
GCMParameterSpec spec = new GCMParameterSpec(GCM_TAG_LENGTH * 8, nonce);
|
||||
SecretKeySpec keySpec = new SecretKeySpec(cipherKey, "AES");
|
||||
|
||||
Cipher AES_cipherInstance = Cipher.getInstance("AES/GCM/NoPadding");
|
||||
AES_cipherInstance.init(Cipher.ENCRYPT_MODE, keySpec, spec);
|
||||
|
||||
byte[] encryptedText = AES_cipherInstance.doFinal(clearTextbytes, offset, length);
|
||||
|
||||
byte[] iv = AES_cipherInstance.getIV();
|
||||
byte[] message = new byte[GCM_NONCE_LENGTH + clearTextbytes.length + GCM_TAG_LENGTH];
|
||||
System.arraycopy(iv, 0, message, 0, GCM_NONCE_LENGTH);
|
||||
System.arraycopy(encryptedText, 0, message, GCM_NONCE_LENGTH, encryptedText.length);
|
||||
|
||||
return message;
|
||||
}
|
||||
|
||||
public static byte[] decrypt(byte[] encryptedText, byte[] cipherKey) throws Exception {
|
||||
final Cipher AES_cipherInstance = Cipher.getInstance("AES/GCM/NoPadding");
|
||||
GCMParameterSpec params = new GCMParameterSpec(GCM_TAG_LENGTH * 8, encryptedText, 0, GCM_NONCE_LENGTH);
|
||||
SecretKeySpec keySpec = new SecretKeySpec(cipherKey, "AES");
|
||||
AES_cipherInstance.init(Cipher.DECRYPT_MODE, keySpec, params);
|
||||
byte[] decryptedText = AES_cipherInstance.doFinal(encryptedText, GCM_NONCE_LENGTH, encryptedText.length - GCM_NONCE_LENGTH);
|
||||
return decryptedText;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,208 @@
|
||||
package seaweedfs.client;
|
||||
|
||||
// based on org.apache.hadoop.fs.azurebfs.services.AbfsInputStream
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.List;
|
||||
|
||||
public class SeaweedInputStream extends InputStream {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedInputStream.class);
|
||||
private static final IOException EXCEPTION_STREAM_IS_CLOSED = new IOException("Stream is closed!");
|
||||
|
||||
private final FilerClient filerClient;
|
||||
private final String path;
|
||||
private final FilerProto.Entry entry;
|
||||
private final List<SeaweedRead.VisibleInterval> visibleIntervalList;
|
||||
private final long contentLength;
|
||||
|
||||
private long position = 0; // cursor of the file
|
||||
|
||||
private boolean closed = false;
|
||||
|
||||
public SeaweedInputStream(
|
||||
final FilerClient filerClient,
|
||||
final String fullpath) throws IOException {
|
||||
this.path = fullpath;
|
||||
this.filerClient = filerClient;
|
||||
this.entry = filerClient.lookupEntry(
|
||||
SeaweedOutputStream.getParentDirectory(fullpath),
|
||||
SeaweedOutputStream.getFileName(fullpath));
|
||||
this.contentLength = SeaweedRead.fileSize(entry);
|
||||
|
||||
this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(filerClient, entry.getChunksList());
|
||||
|
||||
LOG.debug("new path:{} entry:{} visibleIntervalList:{}", path, entry, visibleIntervalList);
|
||||
|
||||
}
|
||||
|
||||
public SeaweedInputStream(
|
||||
final FilerClient filerClient,
|
||||
final String path,
|
||||
final FilerProto.Entry entry) throws IOException {
|
||||
this.filerClient = filerClient;
|
||||
this.path = path;
|
||||
this.entry = entry;
|
||||
this.contentLength = SeaweedRead.fileSize(entry);
|
||||
|
||||
this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(filerClient, entry.getChunksList());
|
||||
|
||||
LOG.debug("new path:{} entry:{} visibleIntervalList:{}", path, entry, visibleIntervalList);
|
||||
|
||||
}
|
||||
|
||||
public String getPath() {
|
||||
return path;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
byte[] b = new byte[1];
|
||||
int numberOfBytesRead = read(b, 0, 1);
|
||||
if (numberOfBytesRead < 0) {
|
||||
return -1;
|
||||
} else {
|
||||
return (b[0] & 0xFF);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(final byte[] b, final int off, final int len) throws IOException {
|
||||
|
||||
if (b == null) {
|
||||
throw new IllegalArgumentException("null byte array passed in to read() method");
|
||||
}
|
||||
if (off >= b.length) {
|
||||
throw new IllegalArgumentException("offset greater than length of array");
|
||||
}
|
||||
if (len < 0) {
|
||||
throw new IllegalArgumentException("requested read length is less than zero");
|
||||
}
|
||||
if (len > (b.length - off)) {
|
||||
throw new IllegalArgumentException("requested read length is more than will fit after requested offset in buffer");
|
||||
}
|
||||
|
||||
ByteBuffer buf = ByteBuffer.wrap(b, off, len);
|
||||
return read(buf);
|
||||
|
||||
}
|
||||
|
||||
// implement ByteBufferReadable
|
||||
public synchronized int read(ByteBuffer buf) throws IOException {
|
||||
|
||||
if (position < 0) {
|
||||
throw new IllegalArgumentException("attempting to read from negative offset");
|
||||
}
|
||||
if (position >= contentLength) {
|
||||
return -1; // Hadoop prefers -1 to EOFException
|
||||
}
|
||||
|
||||
long bytesRead = 0;
|
||||
int len = buf.remaining();
|
||||
int start = (int) this.position;
|
||||
if (start+len <= entry.getContent().size()) {
|
||||
entry.getContent().substring(start, start+len).copyTo(buf);
|
||||
} else {
|
||||
bytesRead = SeaweedRead.read(this.filerClient, this.visibleIntervalList, this.position, buf, SeaweedRead.fileSize(entry));
|
||||
}
|
||||
|
||||
if (bytesRead > Integer.MAX_VALUE) {
|
||||
throw new IOException("Unexpected Content-Length");
|
||||
}
|
||||
|
||||
if (bytesRead > 0) {
|
||||
this.position += bytesRead;
|
||||
}
|
||||
|
||||
return (int) bytesRead;
|
||||
}
|
||||
|
||||
public synchronized void seek(long n) throws IOException {
|
||||
if (closed) {
|
||||
throw EXCEPTION_STREAM_IS_CLOSED;
|
||||
}
|
||||
if (n < 0) {
|
||||
throw new EOFException("Cannot seek to a negative offset");
|
||||
}
|
||||
if (n > contentLength) {
|
||||
throw new EOFException("Attempted to seek or read past the end of the file");
|
||||
}
|
||||
this.position = n;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized long skip(long n) throws IOException {
|
||||
if (closed) {
|
||||
throw EXCEPTION_STREAM_IS_CLOSED;
|
||||
}
|
||||
if (this.position == contentLength) {
|
||||
if (n > 0) {
|
||||
throw new EOFException("Attempted to seek or read past the end of the file");
|
||||
}
|
||||
}
|
||||
long newPos = this.position + n;
|
||||
if (newPos < 0) {
|
||||
newPos = 0;
|
||||
n = newPos - this.position;
|
||||
}
|
||||
if (newPos > contentLength) {
|
||||
newPos = contentLength;
|
||||
n = newPos - this.position;
|
||||
}
|
||||
seek(newPos);
|
||||
return n;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the size of the remaining available bytes
|
||||
* if the size is less than or equal to {@link Integer#MAX_VALUE},
|
||||
* otherwise, return {@link Integer#MAX_VALUE}.
|
||||
* <p>
|
||||
* This is to match the behavior of DFSInputStream.available(),
|
||||
* which some clients may rely on (HBase write-ahead log reading in
|
||||
* particular).
|
||||
*/
|
||||
@Override
|
||||
public synchronized int available() throws IOException {
|
||||
if (closed) {
|
||||
throw EXCEPTION_STREAM_IS_CLOSED;
|
||||
}
|
||||
final long remaining = this.contentLength - this.position;
|
||||
return remaining <= Integer.MAX_VALUE
|
||||
? (int) remaining : Integer.MAX_VALUE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the length of the file that this stream refers to. Note that the length returned is the length
|
||||
* as of the time the Stream was opened. Specifically, if there have been subsequent appends to the file,
|
||||
* they wont be reflected in the returned length.
|
||||
*
|
||||
* @return length of the file.
|
||||
* @throws IOException if the stream is closed
|
||||
*/
|
||||
public long length() throws IOException {
|
||||
if (closed) {
|
||||
throw EXCEPTION_STREAM_IS_CLOSED;
|
||||
}
|
||||
return contentLength;
|
||||
}
|
||||
|
||||
public synchronized long getPos() throws IOException {
|
||||
if (closed) {
|
||||
throw EXCEPTION_STREAM_IS_CLOSED;
|
||||
}
|
||||
return position;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
closed = true;
|
||||
}
|
||||
|
||||
}
|
@ -1,48 +1,50 @@
|
||||
package seaweed.hdfs;
|
||||
package seaweedfs.client;
|
||||
|
||||
// adapted from org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.FSExceptionMessages;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import seaweedfs.client.FilerGrpcClient;
|
||||
import seaweedfs.client.FilerProto;
|
||||
import seaweedfs.client.SeaweedWrite;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.Buffer;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.concurrent.*;
|
||||
|
||||
import static seaweed.hdfs.SeaweedFileSystemStore.getParentDirectory;
|
||||
|
||||
public class SeaweedOutputStream extends OutputStream {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedOutputStream.class);
|
||||
|
||||
private final FilerGrpcClient filerGrpcClient;
|
||||
private final Path path;
|
||||
protected final boolean supportFlush = true;
|
||||
private final FilerClient filerClient;
|
||||
private final String path;
|
||||
private final int bufferSize;
|
||||
private final int maxConcurrentRequestCount;
|
||||
private final ThreadPoolExecutor threadExecutor;
|
||||
private final ExecutorCompletionService<Void> completionService;
|
||||
private final ConcurrentLinkedDeque<WriteOperation> writeOperations;
|
||||
private final boolean shouldSaveMetadata = false;
|
||||
private FilerProto.Entry.Builder entry;
|
||||
private long position;
|
||||
private boolean closed;
|
||||
private boolean supportFlush = true;
|
||||
private volatile IOException lastError;
|
||||
private long lastFlushOffset;
|
||||
private long lastTotalAppendOffset = 0;
|
||||
private byte[] buffer;
|
||||
private int bufferIndex;
|
||||
private ConcurrentLinkedDeque<WriteOperation> writeOperations;
|
||||
private ByteBuffer buffer;
|
||||
private long outputIndex;
|
||||
private String replication = "000";
|
||||
|
||||
public SeaweedOutputStream(FilerGrpcClient filerGrpcClient, final Path path, FilerProto.Entry.Builder entry,
|
||||
public SeaweedOutputStream(FilerClient filerClient, final String fullpath) {
|
||||
this(filerClient, fullpath, "000");
|
||||
}
|
||||
|
||||
public SeaweedOutputStream(FilerClient filerClient, final String fullpath, final String replication) {
|
||||
this(filerClient, fullpath, null, 0, 8 * 1024 * 1024, "000");
|
||||
}
|
||||
|
||||
public SeaweedOutputStream(FilerClient filerClient, final String path, FilerProto.Entry.Builder entry,
|
||||
final long position, final int bufferSize, final String replication) {
|
||||
this.filerGrpcClient = filerGrpcClient;
|
||||
this.filerClient = filerClient;
|
||||
this.replication = replication;
|
||||
this.path = path;
|
||||
this.position = position;
|
||||
@ -50,30 +52,65 @@ public class SeaweedOutputStream extends OutputStream {
|
||||
this.lastError = null;
|
||||
this.lastFlushOffset = 0;
|
||||
this.bufferSize = bufferSize;
|
||||
this.buffer = new byte[bufferSize];
|
||||
this.bufferIndex = 0;
|
||||
this.buffer = ByteBufferPool.request(bufferSize);
|
||||
this.outputIndex = 0;
|
||||
this.writeOperations = new ConcurrentLinkedDeque<>();
|
||||
|
||||
this.maxConcurrentRequestCount = 4 * Runtime.getRuntime().availableProcessors();
|
||||
this.maxConcurrentRequestCount = Runtime.getRuntime().availableProcessors();
|
||||
|
||||
this.threadExecutor
|
||||
= new ThreadPoolExecutor(maxConcurrentRequestCount,
|
||||
maxConcurrentRequestCount,
|
||||
10L,
|
||||
TimeUnit.SECONDS,
|
||||
new LinkedBlockingQueue<Runnable>());
|
||||
= new ThreadPoolExecutor(maxConcurrentRequestCount,
|
||||
maxConcurrentRequestCount,
|
||||
120L,
|
||||
TimeUnit.SECONDS,
|
||||
new LinkedBlockingQueue<Runnable>());
|
||||
this.completionService = new ExecutorCompletionService<>(this.threadExecutor);
|
||||
|
||||
this.entry = entry;
|
||||
if (this.entry == null) {
|
||||
long now = System.currentTimeMillis() / 1000L;
|
||||
|
||||
this.entry = FilerProto.Entry.newBuilder()
|
||||
.setName(getFileName(path))
|
||||
.setIsDirectory(false)
|
||||
.setAttributes(FilerProto.FuseAttributes.newBuilder()
|
||||
.setFileMode(0755)
|
||||
.setReplication(replication)
|
||||
.setCrtime(now)
|
||||
.setMtime(now)
|
||||
.clearGroupName()
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static String getParentDirectory(String path) {
|
||||
int protoIndex = path.indexOf("://");
|
||||
if (protoIndex >= 0) {
|
||||
int pathStart = path.indexOf("/", protoIndex+3);
|
||||
path = path.substring(pathStart);
|
||||
}
|
||||
if (path.equals("/")) {
|
||||
return path;
|
||||
}
|
||||
int lastSlashIndex = path.lastIndexOf("/");
|
||||
if (lastSlashIndex == 0) {
|
||||
return "/";
|
||||
}
|
||||
return path.substring(0, lastSlashIndex);
|
||||
}
|
||||
|
||||
public static String getFileName(String path) {
|
||||
if (path.indexOf("/") < 0) {
|
||||
return path;
|
||||
}
|
||||
int lastSlashIndex = path.lastIndexOf("/");
|
||||
return path.substring(lastSlashIndex + 1);
|
||||
}
|
||||
|
||||
private synchronized void flushWrittenBytesToServiceInternal(final long offset) throws IOException {
|
||||
|
||||
LOG.debug("SeaweedWrite.writeMeta path: {} entry:{}", path, entry);
|
||||
|
||||
try {
|
||||
SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry);
|
||||
SeaweedWrite.writeMeta(filerClient, getParentDirectory(path), entry);
|
||||
} catch (Exception ex) {
|
||||
throw new IOException(ex);
|
||||
}
|
||||
@ -87,34 +124,40 @@ public class SeaweedOutputStream extends OutputStream {
|
||||
|
||||
@Override
|
||||
public synchronized void write(final byte[] data, final int off, final int length)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
maybeThrowLastError();
|
||||
|
||||
Preconditions.checkArgument(data != null, "null data");
|
||||
if (data == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (off < 0 || length < 0 || length > data.length - off) {
|
||||
throw new IndexOutOfBoundsException();
|
||||
}
|
||||
|
||||
// System.out.println(path + " write [" + (outputIndex + off) + "," + ((outputIndex + off) + length) + ")");
|
||||
|
||||
int currentOffset = off;
|
||||
int writableBytes = bufferSize - bufferIndex;
|
||||
int writableBytes = bufferSize - buffer.position();
|
||||
int numberOfBytesToWrite = length;
|
||||
|
||||
while (numberOfBytesToWrite > 0) {
|
||||
if (writableBytes <= numberOfBytesToWrite) {
|
||||
System.arraycopy(data, currentOffset, buffer, bufferIndex, writableBytes);
|
||||
bufferIndex += writableBytes;
|
||||
writeCurrentBufferToService();
|
||||
currentOffset += writableBytes;
|
||||
numberOfBytesToWrite = numberOfBytesToWrite - writableBytes;
|
||||
} else {
|
||||
System.arraycopy(data, currentOffset, buffer, bufferIndex, numberOfBytesToWrite);
|
||||
bufferIndex += numberOfBytesToWrite;
|
||||
numberOfBytesToWrite = 0;
|
||||
|
||||
if (numberOfBytesToWrite < writableBytes) {
|
||||
buffer.put(data, currentOffset, numberOfBytesToWrite);
|
||||
outputIndex += numberOfBytesToWrite;
|
||||
break;
|
||||
}
|
||||
|
||||
writableBytes = bufferSize - bufferIndex;
|
||||
// System.out.println(path + " [" + (outputIndex + currentOffset) + "," + ((outputIndex + currentOffset) + writableBytes) + ") " + buffer.capacity());
|
||||
buffer.put(data, currentOffset, writableBytes);
|
||||
outputIndex += writableBytes;
|
||||
currentOffset += writableBytes;
|
||||
writeCurrentBufferToService();
|
||||
numberOfBytesToWrite = numberOfBytesToWrite - writableBytes;
|
||||
writableBytes = bufferSize - buffer.position();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@ -149,47 +192,53 @@ public class SeaweedOutputStream extends OutputStream {
|
||||
flushInternal();
|
||||
threadExecutor.shutdown();
|
||||
} finally {
|
||||
lastError = new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
lastError = new IOException("Stream is closed!");
|
||||
ByteBufferPool.release(buffer);
|
||||
buffer = null;
|
||||
bufferIndex = 0;
|
||||
outputIndex = 0;
|
||||
closed = true;
|
||||
writeOperations.clear();
|
||||
if (!threadExecutor.isShutdown()) {
|
||||
threadExecutor.shutdownNow();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private synchronized void writeCurrentBufferToService() throws IOException {
|
||||
if (bufferIndex == 0) {
|
||||
if (buffer.position() == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
final byte[] bytes = buffer;
|
||||
final int bytesLength = bufferIndex;
|
||||
position += submitWriteBufferToService(buffer, position);
|
||||
|
||||
buffer = new byte[bufferSize];
|
||||
bufferIndex = 0;
|
||||
final long offset = position;
|
||||
position += bytesLength;
|
||||
buffer = ByteBufferPool.request(bufferSize);
|
||||
|
||||
if (threadExecutor.getQueue().size() >= maxConcurrentRequestCount * 2) {
|
||||
}
|
||||
|
||||
private synchronized int submitWriteBufferToService(final ByteBuffer bufferToWrite, final long writePosition) throws IOException {
|
||||
|
||||
((Buffer)bufferToWrite).flip();
|
||||
int bytesLength = bufferToWrite.limit() - bufferToWrite.position();
|
||||
|
||||
if (threadExecutor.getQueue().size() >= maxConcurrentRequestCount) {
|
||||
waitForTaskToComplete();
|
||||
}
|
||||
|
||||
final Future<Void> job = completionService.submit(new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
// originally: client.append(path, offset, bytes, 0, bytesLength);
|
||||
SeaweedWrite.writeData(entry, replication, filerGrpcClient, offset, bytes, 0, bytesLength);
|
||||
return null;
|
||||
}
|
||||
final Future<Void> job = completionService.submit(() -> {
|
||||
// System.out.println(path + " is going to save [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
|
||||
SeaweedWrite.writeData(entry, replication, filerClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit(), path);
|
||||
// System.out.println(path + " saved [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
|
||||
ByteBufferPool.release(bufferToWrite);
|
||||
return null;
|
||||
});
|
||||
|
||||
writeOperations.add(new WriteOperation(job, offset, bytesLength));
|
||||
writeOperations.add(new WriteOperation(job, writePosition, bytesLength));
|
||||
|
||||
// Try to shrink the queue
|
||||
shrinkWriteOperationQueue();
|
||||
|
||||
return bytesLength;
|
||||
|
||||
}
|
||||
|
||||
private void waitForTaskToComplete() throws IOException {
|
||||
@ -231,13 +280,13 @@ public class SeaweedOutputStream extends OutputStream {
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized void flushInternal() throws IOException {
|
||||
protected synchronized void flushInternal() throws IOException {
|
||||
maybeThrowLastError();
|
||||
writeCurrentBufferToService();
|
||||
flushWrittenBytesToService();
|
||||
}
|
||||
|
||||
private synchronized void flushInternalAsync() throws IOException {
|
||||
protected synchronized void flushInternalAsync() throws IOException {
|
||||
maybeThrowLastError();
|
||||
writeCurrentBufferToService();
|
||||
flushWrittenBytesToServiceAsync();
|
||||
@ -270,10 +319,6 @@ public class SeaweedOutputStream extends OutputStream {
|
||||
private final long length;
|
||||
|
||||
WriteOperation(final Future<Void> task, final long startOffset, final long length) {
|
||||
Preconditions.checkNotNull(task, "task");
|
||||
Preconditions.checkArgument(startOffset >= 0, "startOffset");
|
||||
Preconditions.checkArgument(length >= 0, "length");
|
||||
|
||||
this.task = task;
|
||||
this.startOffset = startOffset;
|
||||
this.length = length;
|
@ -1,88 +1,200 @@
|
||||
package seaweedfs.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HeaderElement;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHeaders;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.client.HttpClient;
|
||||
import org.apache.http.client.entity.GzipDecompressingEntity;
|
||||
import org.apache.http.client.methods.CloseableHttpResponse;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.impl.client.DefaultHttpClient;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.*;
|
||||
|
||||
public class SeaweedRead {
|
||||
|
||||
// private static final Logger LOG = LoggerFactory.getLogger(SeaweedRead.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedRead.class);
|
||||
|
||||
static ChunkCache chunkCache = new ChunkCache(4);
|
||||
static VolumeIdCache volumeIdCache = new VolumeIdCache(4 * 1024);
|
||||
|
||||
// returns bytesRead
|
||||
public static long read(FilerGrpcClient filerGrpcClient, List<VisibleInterval> visibleIntervals,
|
||||
final long position, final byte[] buffer, final int bufferOffset,
|
||||
final int bufferLength) throws IOException {
|
||||
public static long read(FilerClient filerClient, List<VisibleInterval> visibleIntervals,
|
||||
final long position, final ByteBuffer buf, final long fileSize) throws IOException {
|
||||
|
||||
List<ChunkView> chunkViews = viewFromVisibles(visibleIntervals, position, bufferLength);
|
||||
List<ChunkView> chunkViews = viewFromVisibles(visibleIntervals, position, buf.remaining());
|
||||
|
||||
Map<String, FilerProto.Locations> knownLocations = new HashMap<>();
|
||||
|
||||
FilerProto.LookupVolumeRequest.Builder lookupRequest = FilerProto.LookupVolumeRequest.newBuilder();
|
||||
for (ChunkView chunkView : chunkViews) {
|
||||
String vid = parseVolumeId(chunkView.fileId);
|
||||
lookupRequest.addVolumeIds(vid);
|
||||
FilerProto.Locations locations = volumeIdCache.getLocations(vid);
|
||||
if (locations == null) {
|
||||
lookupRequest.addVolumeIds(vid);
|
||||
} else {
|
||||
knownLocations.put(vid, locations);
|
||||
}
|
||||
}
|
||||
|
||||
FilerProto.LookupVolumeResponse lookupResponse = filerGrpcClient
|
||||
.getBlockingStub().lookupVolume(lookupRequest.build());
|
||||
|
||||
Map<String, FilerProto.Locations> vid2Locations = lookupResponse.getLocationsMapMap();
|
||||
if (lookupRequest.getVolumeIdsCount() > 0) {
|
||||
FilerProto.LookupVolumeResponse lookupResponse = filerClient
|
||||
.getBlockingStub().lookupVolume(lookupRequest.build());
|
||||
Map<String, FilerProto.Locations> vid2Locations = lookupResponse.getLocationsMapMap();
|
||||
for (Map.Entry<String, FilerProto.Locations> entry : vid2Locations.entrySet()) {
|
||||
volumeIdCache.setLocations(entry.getKey(), entry.getValue());
|
||||
knownLocations.put(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
//TODO parallel this
|
||||
long readCount = 0;
|
||||
int startOffset = bufferOffset;
|
||||
long startOffset = position;
|
||||
for (ChunkView chunkView : chunkViews) {
|
||||
FilerProto.Locations locations = vid2Locations.get(parseVolumeId(chunkView.fileId));
|
||||
if (locations.getLocationsCount() == 0) {
|
||||
|
||||
if (startOffset < chunkView.logicOffset) {
|
||||
long gap = chunkView.logicOffset - startOffset;
|
||||
LOG.debug("zero [{},{})", startOffset, startOffset + gap);
|
||||
buf.position(buf.position()+ (int)gap);
|
||||
readCount += gap;
|
||||
startOffset += gap;
|
||||
}
|
||||
|
||||
FilerProto.Locations locations = knownLocations.get(parseVolumeId(chunkView.fileId));
|
||||
if (locations == null || locations.getLocationsCount() == 0) {
|
||||
LOG.error("failed to locate {}", chunkView.fileId);
|
||||
// log here!
|
||||
return 0;
|
||||
}
|
||||
|
||||
int len = readChunkView(position, buffer, startOffset, chunkView, locations);
|
||||
int len = readChunkView(filerClient, startOffset, buf, chunkView, locations);
|
||||
|
||||
LOG.debug("read [{},{}) {} size {}", startOffset, startOffset + len, chunkView.fileId, chunkView.size);
|
||||
|
||||
readCount += len;
|
||||
startOffset += len;
|
||||
|
||||
}
|
||||
|
||||
long limit = Math.min(buf.limit(), fileSize);
|
||||
|
||||
if (startOffset < limit) {
|
||||
long gap = limit - startOffset;
|
||||
LOG.debug("zero2 [{},{})", startOffset, startOffset + gap);
|
||||
buf.position(buf.position()+ (int)gap);
|
||||
readCount += gap;
|
||||
startOffset += gap;
|
||||
}
|
||||
|
||||
return readCount;
|
||||
}
|
||||
|
||||
private static int readChunkView(long position, byte[] buffer, int startOffset, ChunkView chunkView, FilerProto.Locations locations) throws IOException {
|
||||
HttpClient client = new DefaultHttpClient();
|
||||
HttpGet request = new HttpGet(
|
||||
String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId));
|
||||
private static int readChunkView(FilerClient filerClient, long startOffset, ByteBuffer buf, ChunkView chunkView, FilerProto.Locations locations) throws IOException {
|
||||
|
||||
if (!chunkView.isFullChunk) {
|
||||
request.setHeader(HttpHeaders.ACCEPT_ENCODING, "");
|
||||
request.setHeader(HttpHeaders.RANGE,
|
||||
String.format("bytes=%d-%d", chunkView.offset, chunkView.offset + chunkView.size));
|
||||
byte[] chunkData = chunkCache.getChunk(chunkView.fileId);
|
||||
|
||||
if (chunkData == null) {
|
||||
chunkData = doFetchFullChunkData(filerClient, chunkView, locations);
|
||||
chunkCache.setChunk(chunkView.fileId, chunkData);
|
||||
}
|
||||
|
||||
try {
|
||||
HttpResponse response = client.execute(request);
|
||||
HttpEntity entity = response.getEntity();
|
||||
int len = (int) chunkView.size;
|
||||
LOG.debug("readChunkView fid:{} chunkData.length:{} chunkView.offset:{} chunkView[{};{}) startOffset:{}",
|
||||
chunkView.fileId, chunkData.length, chunkView.offset, chunkView.logicOffset, chunkView.logicOffset + chunkView.size, startOffset);
|
||||
buf.put(chunkData, (int) (startOffset - chunkView.logicOffset + chunkView.offset), len);
|
||||
|
||||
int len = (int) (chunkView.logicOffset - position + chunkView.size);
|
||||
OutputStream outputStream = new ByteBufferOutputStream(ByteBuffer.wrap(buffer, startOffset, len));
|
||||
entity.writeTo(outputStream);
|
||||
// LOG.debug("* read chunkView:{} startOffset:{} length:{}", chunkView, startOffset, len);
|
||||
return len;
|
||||
}
|
||||
|
||||
return len;
|
||||
public static byte[] doFetchFullChunkData(FilerClient filerClient, ChunkView chunkView, FilerProto.Locations locations) throws IOException {
|
||||
|
||||
} finally {
|
||||
if (client instanceof Closeable) {
|
||||
Closeable t = (Closeable) client;
|
||||
t.close();
|
||||
byte[] data = null;
|
||||
IOException lastException = null;
|
||||
for (long waitTime = 1000L; waitTime < 10 * 1000; waitTime += waitTime / 2) {
|
||||
for (FilerProto.Location location : locations.getLocationsList()) {
|
||||
String url = filerClient.getChunkUrl(chunkView.fileId, location.getUrl(), location.getPublicUrl());
|
||||
try {
|
||||
data = doFetchOneFullChunkData(chunkView, url);
|
||||
lastException = null;
|
||||
break;
|
||||
} catch (IOException ioe) {
|
||||
LOG.debug("doFetchFullChunkData {} :{}", url, ioe);
|
||||
lastException = ioe;
|
||||
}
|
||||
}
|
||||
if (data != null) {
|
||||
break;
|
||||
}
|
||||
try {
|
||||
Thread.sleep(waitTime);
|
||||
} catch (InterruptedException e) {
|
||||
}
|
||||
}
|
||||
|
||||
if (lastException != null) {
|
||||
throw lastException;
|
||||
}
|
||||
|
||||
LOG.debug("doFetchFullChunkData fid:{} chunkData.length:{}", chunkView.fileId, data.length);
|
||||
|
||||
return data;
|
||||
|
||||
}
|
||||
|
||||
private static byte[] doFetchOneFullChunkData(ChunkView chunkView, String url) throws IOException {
|
||||
|
||||
HttpGet request = new HttpGet(url);
|
||||
|
||||
request.setHeader(HttpHeaders.ACCEPT_ENCODING, "gzip");
|
||||
|
||||
byte[] data = null;
|
||||
|
||||
CloseableHttpResponse response = SeaweedUtil.getClosableHttpClient().execute(request);
|
||||
|
||||
try {
|
||||
HttpEntity entity = response.getEntity();
|
||||
|
||||
Header contentEncodingHeader = entity.getContentEncoding();
|
||||
|
||||
if (contentEncodingHeader != null) {
|
||||
HeaderElement[] encodings = contentEncodingHeader.getElements();
|
||||
for (int i = 0; i < encodings.length; i++) {
|
||||
if (encodings[i].getName().equalsIgnoreCase("gzip")) {
|
||||
entity = new GzipDecompressingEntity(entity);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data = EntityUtils.toByteArray(entity);
|
||||
|
||||
EntityUtils.consume(entity);
|
||||
|
||||
} finally {
|
||||
response.close();
|
||||
request.releaseConnection();
|
||||
}
|
||||
|
||||
if (chunkView.cipherKey != null && chunkView.cipherKey.length != 0) {
|
||||
try {
|
||||
data = SeaweedCipher.decrypt(data, chunkView.cipherKey);
|
||||
} catch (Exception e) {
|
||||
throw new IOException("fail to decrypt", e);
|
||||
}
|
||||
}
|
||||
|
||||
if (chunkView.isCompressed) {
|
||||
data = Gzip.decompress(data);
|
||||
}
|
||||
|
||||
LOG.debug("doFetchOneFullChunkData url:{} chunkData.length:{}", url, data.length);
|
||||
|
||||
return data;
|
||||
|
||||
}
|
||||
|
||||
protected static List<ChunkView> viewFromVisibles(List<VisibleInterval> visibleIntervals, long offset, long size) {
|
||||
@ -90,27 +202,40 @@ public class SeaweedRead {
|
||||
|
||||
long stop = offset + size;
|
||||
for (VisibleInterval chunk : visibleIntervals) {
|
||||
if (chunk.start <= offset && offset < chunk.stop && offset < stop) {
|
||||
long chunkStart = Math.max(offset, chunk.start);
|
||||
long chunkStop = Math.min(stop, chunk.stop);
|
||||
if (chunkStart < chunkStop) {
|
||||
boolean isFullChunk = chunk.isFullChunk && chunk.start == offset && chunk.stop <= stop;
|
||||
views.add(new ChunkView(
|
||||
chunk.fileId,
|
||||
offset - chunk.start,
|
||||
Math.min(chunk.stop, stop) - offset,
|
||||
offset,
|
||||
isFullChunk
|
||||
chunk.fileId,
|
||||
chunkStart - chunk.start + chunk.chunkOffset,
|
||||
chunkStop - chunkStart,
|
||||
chunkStart,
|
||||
isFullChunk,
|
||||
chunk.cipherKey,
|
||||
chunk.isCompressed
|
||||
));
|
||||
offset = Math.min(chunk.stop, stop);
|
||||
}
|
||||
}
|
||||
return views;
|
||||
}
|
||||
|
||||
public static List<VisibleInterval> nonOverlappingVisibleIntervals(List<FilerProto.FileChunk> chunkList) {
|
||||
public static List<VisibleInterval> nonOverlappingVisibleIntervals(
|
||||
final FilerClient filerClient, List<FilerProto.FileChunk> chunkList) throws IOException {
|
||||
|
||||
chunkList = FileChunkManifest.resolveChunkManifest(filerClient, chunkList);
|
||||
|
||||
FilerProto.FileChunk[] chunks = chunkList.toArray(new FilerProto.FileChunk[0]);
|
||||
Arrays.sort(chunks, new Comparator<FilerProto.FileChunk>() {
|
||||
@Override
|
||||
public int compare(FilerProto.FileChunk a, FilerProto.FileChunk b) {
|
||||
return (int) (a.getMtime() - b.getMtime());
|
||||
// if just a.getMtime() - b.getMtime(), it will overflow!
|
||||
if (a.getMtime() < b.getMtime()) {
|
||||
return -1;
|
||||
} else if (a.getMtime() > b.getMtime()) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
});
|
||||
|
||||
@ -127,11 +252,14 @@ public class SeaweedRead {
|
||||
List<VisibleInterval> newVisibles,
|
||||
FilerProto.FileChunk chunk) {
|
||||
VisibleInterval newV = new VisibleInterval(
|
||||
chunk.getOffset(),
|
||||
chunk.getOffset() + chunk.getSize(),
|
||||
chunk.getFileId(),
|
||||
chunk.getMtime(),
|
||||
true
|
||||
chunk.getOffset(),
|
||||
chunk.getOffset() + chunk.getSize(),
|
||||
chunk.getFileId(),
|
||||
chunk.getMtime(),
|
||||
0,
|
||||
true,
|
||||
chunk.getCipherKey().toByteArray(),
|
||||
chunk.getIsCompressed()
|
||||
);
|
||||
|
||||
// easy cases to speed up
|
||||
@ -147,21 +275,27 @@ public class SeaweedRead {
|
||||
for (VisibleInterval v : visibles) {
|
||||
if (v.start < chunk.getOffset() && chunk.getOffset() < v.stop) {
|
||||
newVisibles.add(new VisibleInterval(
|
||||
v.start,
|
||||
chunk.getOffset(),
|
||||
v.fileId,
|
||||
v.modifiedTime,
|
||||
false
|
||||
v.start,
|
||||
chunk.getOffset(),
|
||||
v.fileId,
|
||||
v.modifiedTime,
|
||||
v.chunkOffset,
|
||||
false,
|
||||
v.cipherKey,
|
||||
v.isCompressed
|
||||
));
|
||||
}
|
||||
long chunkStop = chunk.getOffset() + chunk.getSize();
|
||||
if (v.start < chunkStop && chunkStop < v.stop) {
|
||||
newVisibles.add(new VisibleInterval(
|
||||
chunkStop,
|
||||
v.stop,
|
||||
v.fileId,
|
||||
v.modifiedTime,
|
||||
false
|
||||
chunkStop,
|
||||
v.stop,
|
||||
v.fileId,
|
||||
v.modifiedTime,
|
||||
v.chunkOffset + (chunkStop - v.start),
|
||||
false,
|
||||
v.cipherKey,
|
||||
v.isCompressed
|
||||
));
|
||||
}
|
||||
if (chunkStop <= v.start || v.stop <= chunk.getOffset()) {
|
||||
@ -191,6 +325,10 @@ public class SeaweedRead {
|
||||
return fileId;
|
||||
}
|
||||
|
||||
public static long fileSize(FilerProto.Entry entry) {
|
||||
return Math.max(totalSize(entry.getChunksList()), entry.getAttributes().getFileSize());
|
||||
}
|
||||
|
||||
public static long totalSize(List<FilerProto.FileChunk> chunksList) {
|
||||
long size = 0;
|
||||
for (FilerProto.FileChunk chunk : chunksList) {
|
||||
@ -207,25 +345,33 @@ public class SeaweedRead {
|
||||
public final long stop;
|
||||
public final long modifiedTime;
|
||||
public final String fileId;
|
||||
public final long chunkOffset;
|
||||
public final boolean isFullChunk;
|
||||
public final byte[] cipherKey;
|
||||
public final boolean isCompressed;
|
||||
|
||||
public VisibleInterval(long start, long stop, String fileId, long modifiedTime, boolean isFullChunk) {
|
||||
public VisibleInterval(long start, long stop, String fileId, long modifiedTime, long chunkOffset, boolean isFullChunk, byte[] cipherKey, boolean isCompressed) {
|
||||
this.start = start;
|
||||
this.stop = stop;
|
||||
this.modifiedTime = modifiedTime;
|
||||
this.fileId = fileId;
|
||||
this.chunkOffset = chunkOffset;
|
||||
this.isFullChunk = isFullChunk;
|
||||
this.cipherKey = cipherKey;
|
||||
this.isCompressed = isCompressed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "VisibleInterval{" +
|
||||
"start=" + start +
|
||||
", stop=" + stop +
|
||||
", modifiedTime=" + modifiedTime +
|
||||
", fileId='" + fileId + '\'' +
|
||||
", isFullChunk=" + isFullChunk +
|
||||
'}';
|
||||
"start=" + start +
|
||||
", stop=" + stop +
|
||||
", modifiedTime=" + modifiedTime +
|
||||
", fileId='" + fileId + '\'' +
|
||||
", isFullChunk=" + isFullChunk +
|
||||
", cipherKey=" + Arrays.toString(cipherKey) +
|
||||
", isCompressed=" + isCompressed +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
@ -235,24 +381,30 @@ public class SeaweedRead {
|
||||
public final long size;
|
||||
public final long logicOffset;
|
||||
public final boolean isFullChunk;
|
||||
public final byte[] cipherKey;
|
||||
public final boolean isCompressed;
|
||||
|
||||
public ChunkView(String fileId, long offset, long size, long logicOffset, boolean isFullChunk) {
|
||||
public ChunkView(String fileId, long offset, long size, long logicOffset, boolean isFullChunk, byte[] cipherKey, boolean isCompressed) {
|
||||
this.fileId = fileId;
|
||||
this.offset = offset;
|
||||
this.size = size;
|
||||
this.logicOffset = logicOffset;
|
||||
this.isFullChunk = isFullChunk;
|
||||
this.cipherKey = cipherKey;
|
||||
this.isCompressed = isCompressed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ChunkView{" +
|
||||
"fileId='" + fileId + '\'' +
|
||||
", offset=" + offset +
|
||||
", size=" + size +
|
||||
", logicOffset=" + logicOffset +
|
||||
", isFullChunk=" + isFullChunk +
|
||||
'}';
|
||||
"fileId='" + fileId + '\'' +
|
||||
", offset=" + offset +
|
||||
", size=" + size +
|
||||
", logicOffset=" + logicOffset +
|
||||
", isFullChunk=" + isFullChunk +
|
||||
", cipherKey=" + Arrays.toString(cipherKey) +
|
||||
", isCompressed=" + isCompressed +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,30 @@
|
||||
package seaweedfs.client;
|
||||
|
||||
import org.apache.http.impl.DefaultConnectionReuseStrategy;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.impl.client.DefaultConnectionKeepAliveStrategy;
|
||||
import org.apache.http.impl.client.HttpClientBuilder;
|
||||
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
|
||||
|
||||
public class SeaweedUtil {
|
||||
|
||||
static PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager();
|
||||
static CloseableHttpClient httpClient;
|
||||
|
||||
static {
|
||||
// Increase max total connection to 200
|
||||
cm.setMaxTotal(200);
|
||||
// Increase default max connection per route to 20
|
||||
cm.setDefaultMaxPerRoute(20);
|
||||
|
||||
httpClient = HttpClientBuilder.create()
|
||||
.setConnectionManager(cm)
|
||||
.setConnectionReuseStrategy(DefaultConnectionReuseStrategy.INSTANCE)
|
||||
.setKeepAliveStrategy(DefaultConnectionKeepAliveStrategy.INSTANCE)
|
||||
.build();
|
||||
}
|
||||
|
||||
public static CloseableHttpClient getClosableHttpClient() {
|
||||
return httpClient;
|
||||
}
|
||||
}
|
@ -1,68 +1,114 @@
|
||||
package seaweedfs.client;
|
||||
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.client.HttpClient;
|
||||
import com.google.protobuf.ByteString;
|
||||
import org.apache.http.client.methods.CloseableHttpResponse;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.entity.mime.HttpMultipartMode;
|
||||
import org.apache.http.entity.mime.MultipartEntityBuilder;
|
||||
import org.apache.http.impl.client.DefaultHttpClient;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.security.SecureRandom;
|
||||
import java.util.List;
|
||||
|
||||
public class SeaweedWrite {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedWrite.class);
|
||||
|
||||
private static final SecureRandom random = new SecureRandom();
|
||||
|
||||
public static void writeData(FilerProto.Entry.Builder entry,
|
||||
final String replication,
|
||||
final FilerGrpcClient filerGrpcClient,
|
||||
final FilerClient filerClient,
|
||||
final long offset,
|
||||
final byte[] bytes,
|
||||
final long bytesOffset, final long bytesLength) throws IOException {
|
||||
FilerProto.AssignVolumeResponse response = filerGrpcClient.getBlockingStub().assignVolume(
|
||||
final long bytesOffset, final long bytesLength,
|
||||
final String path) throws IOException {
|
||||
FilerProto.FileChunk.Builder chunkBuilder = writeChunk(
|
||||
replication, filerClient, offset, bytes, bytesOffset, bytesLength, path);
|
||||
synchronized (entry) {
|
||||
entry.addChunks(chunkBuilder);
|
||||
}
|
||||
}
|
||||
|
||||
public static FilerProto.FileChunk.Builder writeChunk(final String replication,
|
||||
final FilerClient filerClient,
|
||||
final long offset,
|
||||
final byte[] bytes,
|
||||
final long bytesOffset,
|
||||
final long bytesLength,
|
||||
final String path) throws IOException {
|
||||
FilerProto.AssignVolumeResponse response = filerClient.getBlockingStub().assignVolume(
|
||||
FilerProto.AssignVolumeRequest.newBuilder()
|
||||
.setCollection("")
|
||||
.setReplication(replication)
|
||||
.setCollection(filerClient.getCollection())
|
||||
.setReplication(replication == null ? filerClient.getReplication() : replication)
|
||||
.setDataCenter("")
|
||||
.setReplication("")
|
||||
.setTtlSec(0)
|
||||
.setPath(path)
|
||||
.build());
|
||||
String fileId = response.getFileId();
|
||||
String url = response.getUrl();
|
||||
String auth = response.getAuth();
|
||||
String targetUrl = String.format("http://%s/%s", url, fileId);
|
||||
|
||||
String etag = multipartUpload(targetUrl, auth, bytes, bytesOffset, bytesLength);
|
||||
String targetUrl = filerClient.getChunkUrl(fileId, response.getUrl(), response.getPublicUrl());
|
||||
|
||||
entry.addChunks(FilerProto.FileChunk.newBuilder()
|
||||
ByteString cipherKeyString = com.google.protobuf.ByteString.EMPTY;
|
||||
byte[] cipherKey = null;
|
||||
if (filerClient.isCipher()) {
|
||||
cipherKey = genCipherKey();
|
||||
cipherKeyString = ByteString.copyFrom(cipherKey);
|
||||
}
|
||||
|
||||
String etag = multipartUpload(targetUrl, auth, bytes, bytesOffset, bytesLength, cipherKey);
|
||||
|
||||
LOG.debug("write file chunk {} size {}", targetUrl, bytesLength);
|
||||
|
||||
return FilerProto.FileChunk.newBuilder()
|
||||
.setFileId(fileId)
|
||||
.setOffset(offset)
|
||||
.setSize(bytesLength)
|
||||
.setMtime(System.currentTimeMillis() / 10000L)
|
||||
.setETag(etag)
|
||||
);
|
||||
|
||||
.setCipherKey(cipherKeyString);
|
||||
}
|
||||
|
||||
public static void writeMeta(final FilerGrpcClient filerGrpcClient,
|
||||
final String parentDirectory, final FilerProto.Entry.Builder entry) {
|
||||
filerGrpcClient.getBlockingStub().createEntry(
|
||||
FilerProto.CreateEntryRequest.newBuilder()
|
||||
.setDirectory(parentDirectory)
|
||||
.setEntry(entry)
|
||||
.build()
|
||||
);
|
||||
public static void writeMeta(final FilerClient filerClient,
|
||||
final String parentDirectory,
|
||||
final FilerProto.Entry.Builder entry) throws IOException {
|
||||
|
||||
synchronized (entry) {
|
||||
List<FilerProto.FileChunk> chunks = FileChunkManifest.maybeManifestize(filerClient, entry.getChunksList(), parentDirectory);
|
||||
entry.clearChunks();
|
||||
entry.addAllChunks(chunks);
|
||||
filerClient.getBlockingStub().createEntry(
|
||||
FilerProto.CreateEntryRequest.newBuilder()
|
||||
.setDirectory(parentDirectory)
|
||||
.setEntry(entry)
|
||||
.build()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private static String multipartUpload(String targetUrl,
|
||||
String auth,
|
||||
final byte[] bytes,
|
||||
final long bytesOffset, final long bytesLength) throws IOException {
|
||||
final long bytesOffset, final long bytesLength,
|
||||
byte[] cipherKey) throws IOException {
|
||||
|
||||
HttpClient client = new DefaultHttpClient();
|
||||
|
||||
InputStream inputStream = new ByteArrayInputStream(bytes, (int) bytesOffset, (int) bytesLength);
|
||||
InputStream inputStream = null;
|
||||
if (cipherKey == null || cipherKey.length == 0) {
|
||||
inputStream = new ByteArrayInputStream(bytes, (int) bytesOffset, (int) bytesLength);
|
||||
} else {
|
||||
try {
|
||||
byte[] encryptedBytes = SeaweedCipher.encrypt(bytes, (int) bytesOffset, (int) bytesLength, cipherKey);
|
||||
inputStream = new ByteArrayInputStream(encryptedBytes, 0, encryptedBytes.length);
|
||||
} catch (Exception e) {
|
||||
throw new IOException("fail to encrypt data", e);
|
||||
}
|
||||
}
|
||||
|
||||
HttpPost post = new HttpPost(targetUrl);
|
||||
if (auth != null && auth.length() != 0) {
|
||||
@ -74,8 +120,9 @@ public class SeaweedWrite {
|
||||
.addBinaryBody("upload", inputStream)
|
||||
.build());
|
||||
|
||||
CloseableHttpResponse response = SeaweedUtil.getClosableHttpClient().execute(post);
|
||||
|
||||
try {
|
||||
HttpResponse response = client.execute(post);
|
||||
|
||||
String etag = response.getLastHeader("ETag").getValue();
|
||||
|
||||
@ -83,13 +130,19 @@ public class SeaweedWrite {
|
||||
etag = etag.substring(1, etag.length() - 1);
|
||||
}
|
||||
|
||||
EntityUtils.consume(response.getEntity());
|
||||
|
||||
return etag;
|
||||
} finally {
|
||||
if (client instanceof Closeable) {
|
||||
Closeable t = (Closeable) client;
|
||||
t.close();
|
||||
}
|
||||
response.close();
|
||||
post.releaseConnection();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static byte[] genCipherKey() {
|
||||
byte[] b = new byte[32];
|
||||
random.nextBytes(b);
|
||||
return b;
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,36 @@
|
||||
package seaweedfs.client;
|
||||
|
||||
import com.google.common.cache.Cache;
|
||||
import com.google.common.cache.CacheBuilder;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class VolumeIdCache {
|
||||
|
||||
private Cache<String, FilerProto.Locations> cache = null;
|
||||
|
||||
public VolumeIdCache(int maxEntries) {
|
||||
if (maxEntries == 0) {
|
||||
return;
|
||||
}
|
||||
this.cache = CacheBuilder.newBuilder()
|
||||
.maximumSize(maxEntries)
|
||||
.expireAfterAccess(5, TimeUnit.MINUTES)
|
||||
.build();
|
||||
}
|
||||
|
||||
public FilerProto.Locations getLocations(String volumeId) {
|
||||
if (this.cache == null) {
|
||||
return null;
|
||||
}
|
||||
return this.cache.getIfPresent(volumeId);
|
||||
}
|
||||
|
||||
public void setLocations(String volumeId, FilerProto.Locations locations) {
|
||||
if (this.cache == null) {
|
||||
return;
|
||||
}
|
||||
this.cache.put(volumeId, locations);
|
||||
}
|
||||
|
||||
}
|
@ -2,6 +2,7 @@ syntax = "proto3";
|
||||
|
||||
package filer_pb;
|
||||
|
||||
option go_package = "github.com/chrislusf/seaweedfs/weed/pb/filer_pb";
|
||||
option java_package = "seaweedfs.client";
|
||||
option java_outer_classname = "FilerProto";
|
||||
|
||||
@ -21,6 +22,9 @@ service SeaweedFiler {
|
||||
rpc UpdateEntry (UpdateEntryRequest) returns (UpdateEntryResponse) {
|
||||
}
|
||||
|
||||
rpc AppendToEntry (AppendToEntryRequest) returns (AppendToEntryResponse) {
|
||||
}
|
||||
|
||||
rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) {
|
||||
}
|
||||
|
||||
@ -33,6 +37,9 @@ service SeaweedFiler {
|
||||
rpc LookupVolume (LookupVolumeRequest) returns (LookupVolumeResponse) {
|
||||
}
|
||||
|
||||
rpc CollectionList (CollectionListRequest) returns (CollectionListResponse) {
|
||||
}
|
||||
|
||||
rpc DeleteCollection (DeleteCollectionRequest) returns (DeleteCollectionResponse) {
|
||||
}
|
||||
|
||||
@ -42,6 +49,24 @@ service SeaweedFiler {
|
||||
rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) {
|
||||
}
|
||||
|
||||
rpc SubscribeMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) {
|
||||
}
|
||||
|
||||
rpc SubscribeLocalMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) {
|
||||
}
|
||||
|
||||
rpc KeepConnected (stream KeepConnectedRequest) returns (stream KeepConnectedResponse) {
|
||||
}
|
||||
|
||||
rpc LocateBroker (LocateBrokerRequest) returns (LocateBrokerResponse) {
|
||||
}
|
||||
|
||||
rpc KvGet (KvGetRequest) returns (KvGetResponse) {
|
||||
}
|
||||
|
||||
rpc KvPut (KvPutRequest) returns (KvPutResponse) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
@ -73,6 +98,9 @@ message Entry {
|
||||
repeated FileChunk chunks = 3;
|
||||
FuseAttributes attributes = 4;
|
||||
map<string, bytes> extended = 5;
|
||||
bytes hard_link_id = 7;
|
||||
int32 hard_link_counter = 8; // only exists in hard link meta data
|
||||
bytes content = 9; // if not empty, the file content
|
||||
}
|
||||
|
||||
message FullEntry {
|
||||
@ -85,6 +113,8 @@ message EventNotification {
|
||||
Entry new_entry = 2;
|
||||
bool delete_chunks = 3;
|
||||
string new_parent_path = 4;
|
||||
bool is_from_other_cluster = 5;
|
||||
repeated int32 signatures = 6;
|
||||
}
|
||||
|
||||
message FileChunk {
|
||||
@ -96,6 +126,13 @@ message FileChunk {
|
||||
string source_file_id = 6; // to be deprecated
|
||||
FileId fid = 7;
|
||||
FileId source_fid = 8;
|
||||
bytes cipher_key = 9;
|
||||
bool is_compressed = 10;
|
||||
bool is_chunk_manifest = 11; // content is a list of FileChunks
|
||||
}
|
||||
|
||||
message FileChunkManifest {
|
||||
repeated FileChunk chunks = 1;
|
||||
}
|
||||
|
||||
message FileId {
|
||||
@ -118,23 +155,39 @@ message FuseAttributes {
|
||||
string user_name = 11; // for hdfs
|
||||
repeated string group_name = 12; // for hdfs
|
||||
string symlink_target = 13;
|
||||
bytes md5 = 14;
|
||||
string disk_type = 15;
|
||||
}
|
||||
|
||||
message CreateEntryRequest {
|
||||
string directory = 1;
|
||||
Entry entry = 2;
|
||||
bool o_excl = 3;
|
||||
bool is_from_other_cluster = 4;
|
||||
repeated int32 signatures = 5;
|
||||
}
|
||||
|
||||
message CreateEntryResponse {
|
||||
string error = 1;
|
||||
}
|
||||
|
||||
message UpdateEntryRequest {
|
||||
string directory = 1;
|
||||
Entry entry = 2;
|
||||
bool is_from_other_cluster = 3;
|
||||
repeated int32 signatures = 4;
|
||||
}
|
||||
message UpdateEntryResponse {
|
||||
}
|
||||
|
||||
message AppendToEntryRequest {
|
||||
string directory = 1;
|
||||
string entry_name = 2;
|
||||
repeated FileChunk chunks = 3;
|
||||
}
|
||||
message AppendToEntryResponse {
|
||||
}
|
||||
|
||||
message DeleteEntryRequest {
|
||||
string directory = 1;
|
||||
string name = 2;
|
||||
@ -142,9 +195,12 @@ message DeleteEntryRequest {
|
||||
bool is_delete_data = 4;
|
||||
bool is_recursive = 5;
|
||||
bool ignore_recursive_error = 6;
|
||||
bool is_from_other_cluster = 7;
|
||||
repeated int32 signatures = 8;
|
||||
}
|
||||
|
||||
message DeleteEntryResponse {
|
||||
string error = 1;
|
||||
}
|
||||
|
||||
message AtomicRenameEntryRequest {
|
||||
@ -163,6 +219,9 @@ message AssignVolumeRequest {
|
||||
string replication = 3;
|
||||
int32 ttl_sec = 4;
|
||||
string data_center = 5;
|
||||
string path = 6;
|
||||
string rack = 7;
|
||||
string disk_type = 8;
|
||||
}
|
||||
|
||||
message AssignVolumeResponse {
|
||||
@ -171,6 +230,9 @@ message AssignVolumeResponse {
|
||||
string public_url = 3;
|
||||
int32 count = 4;
|
||||
string auth = 5;
|
||||
string collection = 6;
|
||||
string replication = 7;
|
||||
string error = 8;
|
||||
}
|
||||
|
||||
message LookupVolumeRequest {
|
||||
@ -189,6 +251,16 @@ message LookupVolumeResponse {
|
||||
map<string, Locations> locations_map = 1;
|
||||
}
|
||||
|
||||
message Collection {
|
||||
string name = 1;
|
||||
}
|
||||
message CollectionListRequest {
|
||||
bool include_normal_volumes = 1;
|
||||
bool include_ec_volumes = 2;
|
||||
}
|
||||
message CollectionListResponse {
|
||||
repeated Collection collections = 1;
|
||||
}
|
||||
message DeleteCollectionRequest {
|
||||
string collection = 1;
|
||||
}
|
||||
@ -200,11 +272,9 @@ message StatisticsRequest {
|
||||
string replication = 1;
|
||||
string collection = 2;
|
||||
string ttl = 3;
|
||||
string disk_type = 4;
|
||||
}
|
||||
message StatisticsResponse {
|
||||
string replication = 1;
|
||||
string collection = 2;
|
||||
string ttl = 3;
|
||||
uint64 total_size = 4;
|
||||
uint64 used_size = 5;
|
||||
uint64 file_count = 6;
|
||||
@ -217,4 +287,80 @@ message GetFilerConfigurationResponse {
|
||||
string replication = 2;
|
||||
string collection = 3;
|
||||
uint32 max_mb = 4;
|
||||
string dir_buckets = 5;
|
||||
bool cipher = 7;
|
||||
int32 signature = 8;
|
||||
string metrics_address = 9;
|
||||
int32 metrics_interval_sec = 10;
|
||||
}
|
||||
|
||||
message SubscribeMetadataRequest {
|
||||
string client_name = 1;
|
||||
string path_prefix = 2;
|
||||
int64 since_ns = 3;
|
||||
int32 signature = 4;
|
||||
}
|
||||
message SubscribeMetadataResponse {
|
||||
string directory = 1;
|
||||
EventNotification event_notification = 2;
|
||||
int64 ts_ns = 3;
|
||||
}
|
||||
|
||||
message LogEntry {
|
||||
int64 ts_ns = 1;
|
||||
int32 partition_key_hash = 2;
|
||||
bytes data = 3;
|
||||
}
|
||||
|
||||
message KeepConnectedRequest {
|
||||
string name = 1;
|
||||
uint32 grpc_port = 2;
|
||||
repeated string resources = 3;
|
||||
}
|
||||
message KeepConnectedResponse {
|
||||
}
|
||||
|
||||
message LocateBrokerRequest {
|
||||
string resource = 1;
|
||||
}
|
||||
message LocateBrokerResponse {
|
||||
bool found = 1;
|
||||
// if found, send the exact address
|
||||
// if not found, send the full list of existing brokers
|
||||
message Resource {
|
||||
string grpc_addresses = 1;
|
||||
int32 resource_count = 2;
|
||||
}
|
||||
repeated Resource resources = 2;
|
||||
}
|
||||
|
||||
// Key-Value operations
|
||||
message KvGetRequest {
|
||||
bytes key = 1;
|
||||
}
|
||||
message KvGetResponse {
|
||||
bytes value = 1;
|
||||
string error = 2;
|
||||
}
|
||||
message KvPutRequest {
|
||||
bytes key = 1;
|
||||
bytes value = 2;
|
||||
}
|
||||
message KvPutResponse {
|
||||
string error = 1;
|
||||
}
|
||||
|
||||
// path-based configurations
|
||||
message FilerConf {
|
||||
int32 version = 1;
|
||||
message PathConf {
|
||||
string location_prefix = 1;
|
||||
string collection = 2;
|
||||
string replication = 3;
|
||||
string ttl = 4;
|
||||
string disk_type = 5;
|
||||
bool fsync = 6;
|
||||
uint32 volume_growth_count = 7;
|
||||
}
|
||||
repeated PathConf locations = 2;
|
||||
}
|
||||
|
@ -0,0 +1,42 @@
|
||||
package seaweedfs.client;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Base64;
|
||||
|
||||
import static seaweedfs.client.SeaweedCipher.decrypt;
|
||||
import static seaweedfs.client.SeaweedCipher.encrypt;
|
||||
|
||||
public class SeaweedCipherTest {
|
||||
|
||||
@Test
|
||||
public void testSameAsGoImplemnetation() throws Exception {
|
||||
byte[] secretKey = "256-bit key for AES 256 GCM encr".getBytes();
|
||||
|
||||
String plainText = "Now we need to generate a 256-bit key for AES 256 GCM";
|
||||
|
||||
System.out.println("Original Text : " + plainText);
|
||||
|
||||
byte[] cipherText = encrypt(plainText.getBytes(), secretKey);
|
||||
System.out.println("Encrypted Text : " + Base64.getEncoder().encodeToString(cipherText));
|
||||
|
||||
byte[] decryptedText = decrypt(cipherText, secretKey);
|
||||
System.out.println("DeCrypted Text : " + new String(decryptedText));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEncryptDecrypt() throws Exception {
|
||||
byte[] secretKey = SeaweedCipher.genCipherKey();
|
||||
|
||||
String plainText = "Now we need to generate a 256-bit key for AES 256 GCM";
|
||||
|
||||
System.out.println("Original Text : " + plainText);
|
||||
|
||||
byte[] cipherText = encrypt(plainText.getBytes(), secretKey);
|
||||
System.out.println("Encrypted Text : " + Base64.getEncoder().encodeToString(cipherText));
|
||||
|
||||
byte[] decryptedText = decrypt(cipherText, secretKey);
|
||||
System.out.println("DeCrypted Text : " + new String(decryptedText));
|
||||
}
|
||||
|
||||
}
|
@ -3,13 +3,14 @@ package seaweedfs.client;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public class SeaweedReadTest {
|
||||
|
||||
@Test
|
||||
public void testNonOverlappingVisibleIntervals() {
|
||||
public void testNonOverlappingVisibleIntervals() throws IOException {
|
||||
List<FilerProto.FileChunk> chunks = new ArrayList<>();
|
||||
chunks.add(FilerProto.FileChunk.newBuilder()
|
||||
.setFileId("aaa")
|
||||
@ -24,7 +25,7 @@ public class SeaweedReadTest {
|
||||
.setMtime(2000)
|
||||
.build());
|
||||
|
||||
List<SeaweedRead.VisibleInterval> visibleIntervals = SeaweedRead.nonOverlappingVisibleIntervals(chunks);
|
||||
List<SeaweedRead.VisibleInterval> visibleIntervals = SeaweedRead.nonOverlappingVisibleIntervals(null, chunks);
|
||||
for (SeaweedRead.VisibleInterval visibleInterval : visibleIntervals) {
|
||||
System.out.println("visible:" + visibleInterval);
|
||||
}
|
||||
|
32
other/java/examples/pom.xml
Normal file
32
other/java/examples/pom.xml
Normal file
@ -0,0 +1,32 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<groupId>org.example</groupId>
|
||||
<artifactId>unzip</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.6.4</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-hadoop2-client</artifactId>
|
||||
<version>1.6.4</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-common</artifactId>
|
||||
<version>2.9.2</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
|
||||
</project>
|
@ -0,0 +1,48 @@
|
||||
package com.seaweedfs.examples;
|
||||
|
||||
import seaweedfs.client.FilerClient;
|
||||
import seaweedfs.client.SeaweedInputStream;
|
||||
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.zip.ZipEntry;
|
||||
import java.util.zip.ZipInputStream;
|
||||
|
||||
public class ExampleReadFile {
|
||||
|
||||
public static void main(String[] args) throws IOException {
|
||||
|
||||
FilerClient filerClient = new FilerClient("localhost", 18888);
|
||||
|
||||
long startTime = System.currentTimeMillis();
|
||||
parseZip("/Users/chris/tmp/test.zip");
|
||||
|
||||
long startTime2 = System.currentTimeMillis();
|
||||
|
||||
long localProcessTime = startTime2 - startTime;
|
||||
|
||||
SeaweedInputStream seaweedInputStream = new SeaweedInputStream(
|
||||
filerClient, "/test.zip");
|
||||
parseZip(seaweedInputStream);
|
||||
|
||||
long swProcessTime = System.currentTimeMillis() - startTime2;
|
||||
|
||||
System.out.println("Local time: " + localProcessTime);
|
||||
System.out.println("SeaweedFS time: " + swProcessTime);
|
||||
|
||||
}
|
||||
|
||||
public static void parseZip(String filename) throws IOException {
|
||||
FileInputStream fileInputStream = new FileInputStream(filename);
|
||||
parseZip(fileInputStream);
|
||||
}
|
||||
|
||||
public static void parseZip(InputStream is) throws IOException {
|
||||
ZipInputStream zin = new ZipInputStream(is);
|
||||
ZipEntry ze;
|
||||
while ((ze = zin.getNextEntry()) != null) {
|
||||
System.out.println(ze.getName());
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,46 @@
|
||||
package com.seaweedfs.examples;
|
||||
|
||||
import seaweedfs.client.FilerClient;
|
||||
import seaweedfs.client.FilerProto;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Date;
|
||||
import java.util.Iterator;
|
||||
|
||||
public class ExampleWatchFileChanges {
|
||||
|
||||
public static void main(String[] args) throws IOException {
|
||||
|
||||
FilerClient filerClient = new FilerClient("localhost", 18888);
|
||||
|
||||
long sinceNs = (System.currentTimeMillis() - 3600 * 1000) * 1000000L;
|
||||
|
||||
Iterator<FilerProto.SubscribeMetadataResponse> watch = filerClient.watch(
|
||||
"/buckets",
|
||||
"exampleClientName",
|
||||
sinceNs
|
||||
);
|
||||
|
||||
System.out.println("Connected to filer, subscribing from " + new Date());
|
||||
|
||||
while (watch.hasNext()) {
|
||||
FilerProto.SubscribeMetadataResponse event = watch.next();
|
||||
FilerProto.EventNotification notification = event.getEventNotification();
|
||||
if (!event.getDirectory().equals(notification.getNewParentPath())) {
|
||||
// move an entry to a new directory, possibly with a new name
|
||||
if (notification.hasOldEntry() && notification.hasNewEntry()) {
|
||||
System.out.println("moved " + event.getDirectory() + "/" + notification.getOldEntry().getName() + " to " + notification.getNewParentPath() + "/" + notification.getNewEntry().getName());
|
||||
} else {
|
||||
System.out.println("this should not happen.");
|
||||
}
|
||||
} else if (notification.hasNewEntry() && !notification.hasOldEntry()) {
|
||||
System.out.println("created entry " + event.getDirectory() + "/" + notification.getNewEntry().getName());
|
||||
} else if (!notification.hasNewEntry() && notification.hasOldEntry()) {
|
||||
System.out.println("deleted entry " + event.getDirectory() + "/" + notification.getOldEntry().getName());
|
||||
} else if (notification.hasNewEntry() && notification.hasOldEntry()) {
|
||||
System.out.println("updated entry " + event.getDirectory() + "/" + notification.getNewEntry().getName());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
@ -0,0 +1,47 @@
|
||||
package com.seaweedfs.examples;
|
||||
|
||||
import seaweedfs.client.FilerClient;
|
||||
import seaweedfs.client.SeaweedInputStream;
|
||||
import seaweedfs.client.SeaweedOutputStream;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.zip.ZipEntry;
|
||||
import java.util.zip.ZipInputStream;
|
||||
|
||||
public class ExampleWriteFile {
|
||||
|
||||
public static void main(String[] args) throws IOException {
|
||||
|
||||
FilerClient filerClient = new FilerClient("localhost", 18888);
|
||||
|
||||
SeaweedInputStream seaweedInputStream = new SeaweedInputStream(filerClient, "/test.zip");
|
||||
unZipFiles(filerClient, seaweedInputStream);
|
||||
|
||||
}
|
||||
|
||||
public static void unZipFiles(FilerClient filerClient, InputStream is) throws IOException {
|
||||
ZipInputStream zin = new ZipInputStream(is);
|
||||
ZipEntry ze;
|
||||
while ((ze = zin.getNextEntry()) != null) {
|
||||
|
||||
String filename = ze.getName();
|
||||
if (filename.indexOf("/") >= 0) {
|
||||
filename = filename.substring(filename.lastIndexOf("/") + 1);
|
||||
}
|
||||
if (filename.length()==0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
SeaweedOutputStream seaweedOutputStream = new SeaweedOutputStream(filerClient, "/test/"+filename);
|
||||
byte[] bytesIn = new byte[16 * 1024];
|
||||
int read = 0;
|
||||
while ((read = zin.read(bytesIn))!=-1) {
|
||||
seaweedOutputStream.write(bytesIn,0,read);
|
||||
}
|
||||
seaweedOutputStream.close();
|
||||
|
||||
System.out.println(ze.getName());
|
||||
}
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user