mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-11-26 03:59:12 +08:00
Merge branch 'master' into bptree
This commit is contained in:
commit
2226c3c8b6
62
.github/workflows/binaries_dev.yml
vendored
Normal file
62
.github/workflows/binaries_dev.yml
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
name: "go: build dev binaries"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
|
||||
|
||||
build-latest-docker-image:
|
||||
runs-on: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
-
|
||||
name: Docker meta
|
||||
id: docker_meta
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
images: |
|
||||
chrislusf/seaweedfs
|
||||
ghcr.io/chrislusf/seaweedfs
|
||||
tags: |
|
||||
type=raw,value=latest
|
||||
labels: |
|
||||
org.opencontainers.image.title=seaweedfs
|
||||
org.opencontainers.image.vendor=Chris Lu
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
buildkitd-flags: "--debug"
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
-
|
||||
name: Login to GHCR
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ secrets.GHCR_USERNAME }}
|
||||
password: ${{ secrets.GHCR_TOKEN }}
|
||||
-
|
||||
name: Build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./docker
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ./docker/Dockerfile
|
||||
platforms: linux/amd64, linux/arm, linux/arm64
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
@ -1,36 +1,41 @@
|
||||
# This is a basic workflow to help you get started with Actions
|
||||
|
||||
name: Build Versioned Releases
|
||||
name: "go: build versioned binaries"
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [created]
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
jobs:
|
||||
build:
|
||||
|
||||
build-release-binaries:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [linux, windows, darwin, freebsd ]
|
||||
goarch: [amd64, arm]
|
||||
goos: [linux, windows, darwin, freebsd]
|
||||
goarch: [amd64, arm, arm64]
|
||||
exclude:
|
||||
- goarch: arm
|
||||
goos: darwin
|
||||
- goarch: 386
|
||||
goos: darwin
|
||||
- goarch: arm
|
||||
goos: windows
|
||||
- goarch: arm64
|
||||
goos: windows
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@v2
|
||||
- name: Go Release Binaries
|
||||
uses: wangyoucao577/go-release-action@v1.19
|
||||
- name: Go Release Binaries Normal Volume Size
|
||||
uses: wangyoucao577/go-release-action@v1.20
|
||||
with:
|
||||
goversion: 1.17
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
@ -43,9 +48,8 @@ jobs:
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
||||
- name: Go Release Large Disk Binaries
|
||||
uses: wangyoucao577/go-release-action@v1.19
|
||||
uses: wangyoucao577/go-release-action@v1.20
|
||||
with:
|
||||
goversion: 1.17
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
22
.github/workflows/cleanup.yml
vendored
22
.github/workflows/cleanup.yml
vendored
@ -1,22 +0,0 @@
|
||||
name: Cleanup
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
|
||||
- name: Delete old release assets
|
||||
uses: mknejp/delete-release-assets@v1
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
tag: dev
|
||||
fail-if-no-assets: false
|
||||
assets: |
|
||||
weed-*
|
63
.github/workflows/container_dev.yml
vendored
Normal file
63
.github/workflows/container_dev.yml
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
name: "docker: build dev containers"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
workflow_dispatch: []
|
||||
|
||||
jobs:
|
||||
|
||||
build-dev-containers:
|
||||
runs-on: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
-
|
||||
name: Docker meta
|
||||
id: docker_meta
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
images: |
|
||||
chrislusf/seaweedfs
|
||||
ghcr.io/chrislusf/seaweedfs
|
||||
tags: |
|
||||
type=raw,value=dev
|
||||
labels: |
|
||||
org.opencontainers.image.title=seaweedfs
|
||||
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
|
||||
org.opencontainers.image.vendor=Chris Lu
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
buildkitd-flags: "--debug"
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
-
|
||||
name: Login to GHCR
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ secrets.GHCR_USERNAME }}
|
||||
password: ${{ secrets.GHCR_TOKEN }}
|
||||
-
|
||||
name: Build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./docker
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ./docker/Dockerfile.go_build
|
||||
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
65
.github/workflows/container_latest.yml
vendored
65
.github/workflows/container_latest.yml
vendored
@ -1,66 +1,15 @@
|
||||
name: Build Latest Containers
|
||||
name: "docker: build latest container"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
branches: [ master ]
|
||||
workflow_dispatch: []
|
||||
|
||||
jobs:
|
||||
build-latest:
|
||||
runs-on: [ubuntu-latest]
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
-
|
||||
name: Docker meta
|
||||
id: docker_meta
|
||||
uses: crazy-max/ghaction-docker-meta@v2
|
||||
with:
|
||||
images: |
|
||||
chrislusf/seaweedfs
|
||||
ghcr.io/chrislusf/seaweedfs
|
||||
tags: |
|
||||
type=raw,value=latest
|
||||
labels: |
|
||||
org.opencontainers.image.title=seaweedfs
|
||||
org.opencontainers.image.vendor=Chris Lu
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
buildkitd-flags: "--debug"
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
-
|
||||
name: Login to GHCR
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ secrets.GHCR_USERNAME }}
|
||||
password: ${{ secrets.GHCR_TOKEN }}
|
||||
-
|
||||
name: Build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./docker
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ./docker/Dockerfile
|
||||
platforms: linux/amd64
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
||||
|
||||
build-dev:
|
||||
build-latest-container:
|
||||
runs-on: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
@ -68,7 +17,7 @@ jobs:
|
||||
-
|
||||
name: Docker meta
|
||||
id: docker_meta
|
||||
uses: crazy-max/ghaction-docker-meta@v2
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
images: |
|
||||
chrislusf/seaweedfs
|
||||
@ -109,6 +58,6 @@ jobs:
|
||||
context: ./docker
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ./docker/Dockerfile.go_build
|
||||
platforms: linux/amd64
|
||||
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
||||
|
17
.github/workflows/container_release.yml
vendored
17
.github/workflows/container_release.yml
vendored
@ -1,4 +1,5 @@
|
||||
name: Build Release Containers
|
||||
name: "docker: build release containers"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
@ -6,8 +7,9 @@ on:
|
||||
workflow_dispatch: []
|
||||
|
||||
jobs:
|
||||
build-default:
|
||||
build-default-release-container:
|
||||
runs-on: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
@ -15,7 +17,7 @@ jobs:
|
||||
-
|
||||
name: Docker meta
|
||||
id: docker_meta
|
||||
uses: crazy-max/ghaction-docker-meta@v2
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
images: |
|
||||
chrislusf/seaweedfs
|
||||
@ -58,11 +60,12 @@ jobs:
|
||||
context: ./docker
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ./docker/Dockerfile.go_build
|
||||
platforms: linux/amd64
|
||||
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
||||
build-large:
|
||||
build-large-release-container:
|
||||
runs-on: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
@ -70,7 +73,7 @@ jobs:
|
||||
-
|
||||
name: Docker meta
|
||||
id: docker_meta
|
||||
uses: crazy-max/ghaction-docker-meta@v2
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
images: |
|
||||
chrislusf/seaweedfs
|
||||
@ -113,6 +116,6 @@ jobs:
|
||||
context: ./docker
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ./docker/Dockerfile.go_build_large
|
||||
platforms: linux/amd64
|
||||
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
||||
|
6
.github/workflows/go.yml
vendored
6
.github/workflows/go.yml
vendored
@ -1,4 +1,4 @@
|
||||
name: Go
|
||||
name: "go: build binary"
|
||||
|
||||
on:
|
||||
push:
|
||||
@ -6,6 +6,10 @@ on:
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}/go
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
|
66
.github/workflows/release.yml
vendored
66
.github/workflows/release.yml
vendored
@ -1,66 +0,0 @@
|
||||
name: Build Dev Binaries
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [linux, windows, darwin, freebsd ]
|
||||
goarch: [amd64, arm]
|
||||
exclude:
|
||||
- goarch: arm
|
||||
goos: darwin
|
||||
- goarch: arm
|
||||
goos: windows
|
||||
|
||||
steps:
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Wait for the deletion
|
||||
uses: jakejarvis/wait-action@master
|
||||
with:
|
||||
time: '30s'
|
||||
|
||||
- name: Set BUILD_TIME env
|
||||
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
|
||||
|
||||
- name: Go Release Binaries
|
||||
uses: wangyoucao577/go-release-action@v1.17
|
||||
with:
|
||||
goversion: 1.17
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
release_tag: dev
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0
|
||||
build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed-large-disk
|
||||
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
||||
|
||||
- name: Go Release Binaries
|
||||
uses: wangyoucao577/go-release-action@v1.17
|
||||
with:
|
||||
goversion: 1.17
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
release_tag: dev
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0
|
||||
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
@ -1,46 +0,0 @@
|
||||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.17.x
|
||||
|
||||
before_install:
|
||||
- export PATH=/home/travis/gopath/bin:$PATH
|
||||
|
||||
install:
|
||||
- export CGO_ENABLED="0"
|
||||
- go env
|
||||
|
||||
script:
|
||||
- env GO111MODULE=on go test ./weed/...
|
||||
|
||||
before_deploy:
|
||||
- make release
|
||||
deploy:
|
||||
provider: releases
|
||||
skip_cleanup: true
|
||||
api_key:
|
||||
secure: ERL986+ncQ8lwAJUYDrQ8s2/FxF/cyNIwJIFCqspnWxQgGNNyokET9HapmlPSxjpFRF0q6L2WCg9OY3mSVRq4oI6hg1igOQ12KlLyN71XSJ3c8w0Ay5ho48TQ9l3f3Iu97mntBCe9l0R9pnT8wj1VI8YJxloXwUMG2yeTjA9aBI=
|
||||
file:
|
||||
- build/linux_arm.tar.gz
|
||||
- build/linux_arm64.tar.gz
|
||||
- build/linux_386.tar.gz
|
||||
- build/linux_amd64.tar.gz
|
||||
- build/linux_amd64_large_disk.tar.gz
|
||||
- build/darwin_amd64.tar.gz
|
||||
- build/darwin_amd64_large_disk.tar.gz
|
||||
- build/windows_386.zip
|
||||
- build/windows_amd64.zip
|
||||
- build/windows_amd64_large_disk.zip
|
||||
- build/freebsd_arm.tar.gz
|
||||
- build/freebsd_amd64.tar.gz
|
||||
- build/freebsd_386.tar.gz
|
||||
- build/netbsd_arm.tar.gz
|
||||
- build/netbsd_amd64.tar.gz
|
||||
- build/netbsd_386.tar.gz
|
||||
- build/openbsd_arm.tar.gz
|
||||
- build/openbsd_amd64.tar.gz
|
||||
- build/openbsd_386.tar.gz
|
||||
on:
|
||||
tags: true
|
||||
repo: chrislusf/seaweedfs
|
||||
go: 1.17.x
|
147
Makefile
147
Makefile
@ -1,147 +0,0 @@
|
||||
BINARY = weed/weed
|
||||
package = github.com/chrislusf/seaweedfs/weed
|
||||
|
||||
GO_FLAGS = #-v
|
||||
SOURCE_DIR = ./weed/
|
||||
|
||||
appname := weed
|
||||
|
||||
sources := $(wildcard *.go)
|
||||
|
||||
COMMIT ?= $(shell git rev-parse --short HEAD)
|
||||
LDFLAGS ?= -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${COMMIT}
|
||||
|
||||
build = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -ldflags "-extldflags -static $(LDFLAGS)" -o build/$(appname)$(3) $(SOURCE_DIR)
|
||||
tar = cd build && tar -cvzf $(1)_$(2).tar.gz $(appname)$(3) && rm $(appname)$(3)
|
||||
zip = cd build && zip $(1)_$(2).zip $(appname)$(3) && rm $(appname)$(3)
|
||||
|
||||
build_large = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -tags 5BytesOffset -ldflags "-extldflags -static $(LDFLAGS)" -o build/$(appname)$(3) $(SOURCE_DIR)
|
||||
tar_large = cd build && tar -cvzf $(1)_$(2)_large_disk.tar.gz $(appname)$(3) && rm $(appname)$(3)
|
||||
zip_large = cd build && zip $(1)_$(2)_large_disk.zip $(appname)$(3) && rm $(appname)$(3)
|
||||
|
||||
all: build
|
||||
|
||||
.PHONY : clean deps build linux release windows_build darwin_build linux_build bsd_build clean
|
||||
|
||||
clean:
|
||||
go clean -i $(GO_FLAGS) $(SOURCE_DIR)
|
||||
rm -f $(BINARY)
|
||||
rm -rf build/
|
||||
|
||||
deps:
|
||||
go get $(GO_FLAGS) -d $(SOURCE_DIR)
|
||||
rm -rf /home/travis/gopath/src/github.com/coreos/etcd/vendor/golang.org/x/net/trace
|
||||
rm -rf /home/travis/gopath/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace
|
||||
|
||||
build: deps
|
||||
go build $(GO_FLAGS) -ldflags "$(LDFLAGS)" -o $(BINARY) $(SOURCE_DIR)
|
||||
|
||||
gccgo_build: deps
|
||||
go build $(GO_FLAGS) -ldflags "$(LDFLAGS)" -compiler=gccgo -tags gccgo,noasm -o $(BINARY) $(SOURCE_DIR)
|
||||
|
||||
install: deps
|
||||
go install $(GO_FLAGS) -ldflags "$(LDFLAGS)" $(SOURCE_DIR)
|
||||
|
||||
linux: deps
|
||||
mkdir -p linux
|
||||
GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -ldflags "$(LDFLAGS)" -o linux/$(BINARY) $(SOURCE_DIR)
|
||||
|
||||
release: deps windows_build darwin_build linux_build bsd_build 5_byte_linux_build 5_byte_arm64_build 5_byte_darwin_build 5_byte_windows_build
|
||||
|
||||
##### LINUX BUILDS #####
|
||||
5_byte_linux_build:
|
||||
$(call build_large,linux,amd64,)
|
||||
$(call tar_large,linux,amd64)
|
||||
|
||||
5_byte_darwin_build:
|
||||
$(call build_large,darwin,amd64,)
|
||||
$(call tar_large,darwin,amd64)
|
||||
|
||||
5_byte_windows_build:
|
||||
$(call build_large,windows,amd64,.exe)
|
||||
$(call zip_large,windows,amd64,.exe)
|
||||
|
||||
5_byte_arm_build: $(sources)
|
||||
$(call build_large,linux,arm,)
|
||||
$(call tar_large,linux,arm)
|
||||
|
||||
5_byte_arm64_build: $(sources)
|
||||
$(call build_large,linux,arm64,)
|
||||
$(call tar_large,linux,arm64)
|
||||
|
||||
linux_build: build/linux_arm.tar.gz build/linux_arm64.tar.gz build/linux_386.tar.gz build/linux_amd64.tar.gz
|
||||
|
||||
build/linux_386.tar.gz: $(sources)
|
||||
$(call build,linux,386,)
|
||||
$(call tar,linux,386)
|
||||
|
||||
build/linux_amd64.tar.gz: $(sources)
|
||||
$(call build,linux,amd64,)
|
||||
$(call tar,linux,amd64)
|
||||
|
||||
build/linux_arm.tar.gz: $(sources)
|
||||
$(call build,linux,arm,)
|
||||
$(call tar,linux,arm)
|
||||
|
||||
build/linux_arm64.tar.gz: $(sources)
|
||||
$(call build,linux,arm64,)
|
||||
$(call tar,linux,arm64)
|
||||
|
||||
##### DARWIN (MAC) BUILDS #####
|
||||
darwin_build: build/darwin_amd64.tar.gz
|
||||
|
||||
build/darwin_amd64.tar.gz: $(sources)
|
||||
$(call build,darwin,amd64,)
|
||||
$(call tar,darwin,amd64)
|
||||
|
||||
##### WINDOWS BUILDS #####
|
||||
windows_build: build/windows_386.zip build/windows_amd64.zip
|
||||
|
||||
build/windows_386.zip: $(sources)
|
||||
$(call build,windows,386,.exe)
|
||||
$(call zip,windows,386,.exe)
|
||||
|
||||
build/windows_amd64.zip: $(sources)
|
||||
$(call build,windows,amd64,.exe)
|
||||
$(call zip,windows,amd64,.exe)
|
||||
|
||||
##### BSD BUILDS #####
|
||||
bsd_build: build/freebsd_arm.tar.gz build/freebsd_386.tar.gz build/freebsd_amd64.tar.gz \
|
||||
build/netbsd_arm.tar.gz build/netbsd_386.tar.gz build/netbsd_amd64.tar.gz \
|
||||
build/openbsd_arm.tar.gz build/openbsd_386.tar.gz build/openbsd_amd64.tar.gz
|
||||
|
||||
build/freebsd_386.tar.gz: $(sources)
|
||||
$(call build,freebsd,386,)
|
||||
$(call tar,freebsd,386)
|
||||
|
||||
build/freebsd_amd64.tar.gz: $(sources)
|
||||
$(call build,freebsd,amd64,)
|
||||
$(call tar,freebsd,amd64)
|
||||
|
||||
build/freebsd_arm.tar.gz: $(sources)
|
||||
$(call build,freebsd,arm,)
|
||||
$(call tar,freebsd,arm)
|
||||
|
||||
build/netbsd_386.tar.gz: $(sources)
|
||||
$(call build,netbsd,386,)
|
||||
$(call tar,netbsd,386)
|
||||
|
||||
build/netbsd_amd64.tar.gz: $(sources)
|
||||
$(call build,netbsd,amd64,)
|
||||
$(call tar,netbsd,amd64)
|
||||
|
||||
build/netbsd_arm.tar.gz: $(sources)
|
||||
$(call build,netbsd,arm,)
|
||||
$(call tar,netbsd,arm)
|
||||
|
||||
build/openbsd_386.tar.gz: $(sources)
|
||||
$(call build,openbsd,386,)
|
||||
$(call tar,openbsd,386)
|
||||
|
||||
build/openbsd_amd64.tar.gz: $(sources)
|
||||
$(call build,openbsd,amd64,)
|
||||
$(call tar,openbsd,amd64)
|
||||
|
||||
build/openbsd_arm.tar.gz: $(sources)
|
||||
$(call build,openbsd,arm,)
|
||||
$(call tar,openbsd,arm)
|
@ -146,7 +146,8 @@ Faster and Cheaper than direct cloud storage!
|
||||
* [WebDAV] accesses as a mapped drive on Mac and Windows, or from mobile devices.
|
||||
* [AES256-GCM Encrypted Storage][FilerDataEncryption] safely stores the encrypted data.
|
||||
* [Super Large Files][SuperLargeFiles] stores large or super large files in tens of TB.
|
||||
* [Cloud Data Accelerator][RemoteStorage] transparently read and write existing cloud data at local speed with content cache, metadata cache, and asynchronous write back.
|
||||
* [Cloud Drive][CloudDrive] mounts cloud storage to local cluster, cached for fast read and write with asynchronous write back.
|
||||
* [Gateway to Remote Object Store][GatewayToRemoteObjectStore] mirrors bucket operations to remote object storage, in addition to [Cloud Drive][CloudDrive]
|
||||
|
||||
## Kubernetes ##
|
||||
* [Kubernetes CSI Driver][SeaweedFsCsiDriver] A Container Storage Interface (CSI) Driver. [![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs-csi-driver.svg?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs-csi-driver/)
|
||||
@ -169,7 +170,8 @@ Faster and Cheaper than direct cloud storage!
|
||||
[ActiveActiveAsyncReplication]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Active-Active-cross-cluster-continuous-synchronization
|
||||
[FilerStoreReplication]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Store-Replication
|
||||
[KeyLargeValueStore]: https://github.com/chrislusf/seaweedfs/wiki/Filer-as-a-Key-Large-Value-Store
|
||||
[RemoteStorage]: https://github.com/chrislusf/seaweedfs/wiki/Cloud-Cache-Architecture
|
||||
[CloudDrive]: https://github.com/chrislusf/seaweedfs/wiki/Cloud-Drive-Architecture
|
||||
[GatewayToRemoteObjectStore]: https://github.com/chrislusf/seaweedfs/wiki/Gateway-to-Remote-Object-Storage
|
||||
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
@ -8,7 +8,9 @@ RUN \
|
||||
elif [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "32" ]; then echo "386"; \
|
||||
elif [ $(uname -m) == "aarch64" ]; then echo "arm64"; \
|
||||
elif [ $(uname -m) == "armv7l" ]; then echo "arm"; \
|
||||
elif [ $(uname -m) == "armv6l" ]; then echo "arm"; fi;) && \
|
||||
elif [ $(uname -m) == "armv6l" ]; then echo "arm"; \
|
||||
elif [ $(uname -m) == "s390x" ]; then echo "s390x"; \
|
||||
elif [ $(uname -m) == "ppc64le" ]; then echo "ppc64le"; fi;) && \
|
||||
echo "Building for $ARCH" 1>&2 && \
|
||||
SUPERCRONIC_SHA1SUM=$(echo $ARCH | sed 's/386/e0126b0102b9f388ecd55714358e3ad60d0cebdb/g' | sed 's/amd64/5ddf8ea26b56d4a7ff6faecdd8966610d5cb9d85/g' | sed 's/arm64/e2714c43e7781bf1579c85aa61259245f56dbba1/g' | sed 's/arm/47481c3341bc3a1ae91a728e0cc63c8e6d3791ad/g') && \
|
||||
SUPERCRONIC_URL=https://github.com/aptible/supercronic/releases/download/v0.1.9/supercronic-linux-$ARCH && \
|
||||
|
109
go.mod
109
go.mod
@ -3,24 +3,25 @@ module github.com/chrislusf/seaweedfs
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.58.0 // indirect
|
||||
cloud.google.com/go v0.94.1 // indirect
|
||||
cloud.google.com/go/pubsub v1.3.1
|
||||
cloud.google.com/go/storage v1.9.0
|
||||
github.com/Azure/azure-pipeline-go v0.2.2 // indirect
|
||||
github.com/Azure/azure-storage-blob-go v0.9.0
|
||||
cloud.google.com/go/storage v1.16.1
|
||||
github.com/Azure/azure-pipeline-go v0.2.3
|
||||
github.com/Azure/azure-storage-blob-go v0.14.0
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 // indirect
|
||||
github.com/OneOfOne/xxhash v1.2.2
|
||||
github.com/Shopify/sarama v1.23.1
|
||||
github.com/aws/aws-sdk-go v1.34.30
|
||||
github.com/aws/aws-sdk-go v1.35.3
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72
|
||||
github.com/bwmarrin/snowflake v0.3.0
|
||||
github.com/cespare/xxhash v1.1.0
|
||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||
github.com/chrislusf/raft v1.0.7
|
||||
github.com/colinmarc/hdfs/v2 v2.2.0
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.0.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/disintegration/imaging v1.6.2
|
||||
@ -42,31 +43,29 @@ require (
|
||||
github.com/go-stack/stack v1.8.0 // indirect
|
||||
github.com/go-zookeeper/zk v1.0.2 // indirect
|
||||
github.com/gocql/gocql v0.0.0-20210707082121-9a3953d1826d
|
||||
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 // indirect
|
||||
github.com/golang-jwt/jwt v3.2.1+incompatible
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e
|
||||
github.com/golang/protobuf v1.4.3
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/btree v1.0.0
|
||||
github.com/google/go-cmp v0.5.5 // indirect
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/google/go-cmp v0.5.6 // indirect
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/google/wire v0.4.0 // indirect
|
||||
github.com/googleapis/gax-go v2.0.2+incompatible // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.1.0 // indirect
|
||||
github.com/gorilla/mux v1.7.4
|
||||
github.com/gorilla/websocket v1.4.1 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0
|
||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
|
||||
github.com/hashicorp/errwrap v1.0.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.0.0 // indirect
|
||||
github.com/hashicorp/go-uuid v1.0.1 // indirect
|
||||
github.com/hashicorp/go-uuid v1.0.2 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/jcmturner/gofork v1.0.0 // indirect
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.1
|
||||
github.com/jinzhu/copier v0.2.8
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/json-iterator/go v1.1.11
|
||||
github.com/jstemmer/go-junit-report v0.9.1 // indirect
|
||||
github.com/karlseguin/ccache/v2 v2.0.7
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/klauspost/compress v1.10.9 // indirect
|
||||
@ -79,7 +78,6 @@ require (
|
||||
github.com/mailru/easyjson v0.7.1 // indirect
|
||||
github.com/mattn/go-ieproxy v0.0.1 // indirect
|
||||
github.com/mattn/go-isatty v0.0.12 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.4 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.1.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
@ -105,66 +103,55 @@ require (
|
||||
github.com/seaweedfs/fuse v1.2.0
|
||||
github.com/seaweedfs/goexif v1.0.2
|
||||
github.com/sirupsen/logrus v1.6.0 // indirect
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/spf13/afero v1.6.0 // indirect
|
||||
github.com/spf13/cast v1.3.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.3 // indirect
|
||||
github.com/spf13/viper v1.4.0
|
||||
github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/syndtr/goleveldb v1.0.0
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965
|
||||
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c
|
||||
github.com/tidwall/gjson v1.8.1
|
||||
github.com/tidwall/match v1.0.3
|
||||
github.com/tidwall/pretty v1.1.0 // indirect
|
||||
github.com/tikv/client-go/v2 v2.0.0-alpha.0.20210824090536-16d902a3c7e5
|
||||
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365
|
||||
github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43
|
||||
github.com/valyala/bytebufferpool v1.0.0
|
||||
github.com/viant/assertly v0.5.4 // indirect
|
||||
github.com/viant/ptrie v0.3.0
|
||||
github.com/viant/toolbox v0.33.2 // indirect
|
||||
github.com/willf/bitset v1.1.10 // indirect
|
||||
github.com/willf/bloom v2.0.3+incompatible
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
|
||||
github.com/xdg-go/scram v1.0.2 // indirect
|
||||
github.com/xdg-go/stringprep v1.0.2 // indirect
|
||||
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
|
||||
go.etcd.io/etcd v3.3.15+incompatible
|
||||
go.etcd.io/etcd v3.3.25+incompatible
|
||||
go.mongodb.org/mongo-driver v1.7.0
|
||||
go.opencensus.io v0.22.4 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.opentelemetry.io/otel v0.15.0 // indirect
|
||||
go.uber.org/atomic v1.6.0 // indirect
|
||||
go.uber.org/multierr v1.5.0 // indirect
|
||||
go.uber.org/zap v1.14.1 // indirect
|
||||
gocloud.dev v0.20.0
|
||||
gocloud.dev/pubsub/natspubsub v0.20.0
|
||||
gocloud.dev/pubsub/rabbitpubsub v0.20.0
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 // indirect
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 // indirect
|
||||
golang.org/x/image v0.0.0-20200119044424-58c23975cae1
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
|
||||
golang.org/x/mod v0.3.0 // indirect
|
||||
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a // indirect
|
||||
golang.org/x/sys v0.0.0-20210817142637-7d9622a276b7
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf
|
||||
golang.org/x/text v0.3.6 // indirect
|
||||
golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78
|
||||
golang.org/x/tools v0.1.5
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/api v0.26.0
|
||||
google.golang.org/appengine v1.6.6 // indirect
|
||||
google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482 // indirect
|
||||
google.golang.org/grpc v1.29.1
|
||||
google.golang.org/protobuf v1.26.0-rc.1
|
||||
google.golang.org/api v0.56.0
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83 // indirect
|
||||
google.golang.org/grpc v1.40.0
|
||||
google.golang.org/protobuf v1.27.1
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect
|
||||
gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect
|
||||
gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect
|
||||
gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect
|
||||
gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.3.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect
|
||||
honnef.co/go/tools v0.0.1-2020.1.4 // indirect
|
||||
modernc.org/b v1.0.0 // indirect
|
||||
modernc.org/cc/v3 v3.33.5 // indirect
|
||||
modernc.org/ccgo/v3 v3.9.4 // indirect
|
||||
@ -177,7 +164,39 @@ require (
|
||||
modernc.org/token v1.0.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/coreos/etcd v3.3.10+incompatible // indirect
|
||||
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect
|
||||
github.com/d4l3k/messagediff v1.2.1 // indirect
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
|
||||
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
|
||||
github.com/jcmturner/goidentity/v6 v6.0.1 // indirect
|
||||
github.com/jcmturner/rpc/v2 v2.0.2 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.7 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3 // indirect
|
||||
github.com/pingcap/failpoint v0.0.0-20210316064728-7acb0f0a3dfd // indirect
|
||||
github.com/pingcap/kvproto v0.0.0-20210806074406-317f69fb54b4 // indirect
|
||||
github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4 // indirect
|
||||
github.com/pingcap/parser v0.0.0-20210525032559-c37778aff307 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/tikv/pd v1.1.0-beta.0.20210323121136-78679e5e209d // indirect
|
||||
github.com/twmb/murmur3 v1.1.3 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.0 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.0 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.7.0 // indirect
|
||||
go.uber.org/zap v1.17.0 // indirect
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
|
||||
golang.org/x/mod v0.4.2 // indirect
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
)
|
||||
|
||||
// replace github.com/seaweedfs/fuse => /Users/chris/go/src/github.com/seaweedfs/fuse
|
||||
// replace github.com/chrislusf/raft => /Users/chris/go/src/github.com/chrislusf/raft
|
||||
|
||||
replace go.etcd.io/etcd => go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547
|
||||
|
@ -1,5 +1,5 @@
|
||||
apiVersion: v1
|
||||
description: SeaweedFS
|
||||
name: seaweedfs
|
||||
appVersion: "2.63"
|
||||
version: "2.63"
|
||||
appVersion: "2.68"
|
||||
version: "2.68"
|
@ -5,7 +5,7 @@
|
||||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.6.7</version>
|
||||
<version>1.6.8</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
@ -135,7 +135,7 @@
|
||||
<plugin>
|
||||
<groupId>org.sonatype.plugins</groupId>
|
||||
<artifactId>nexus-staging-maven-plugin</artifactId>
|
||||
<version>1.6.7</version>
|
||||
<version>1.6.8</version>
|
||||
<extensions>true</extensions>
|
||||
<configuration>
|
||||
<serverId>ossrh</serverId>
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.6.7</version>
|
||||
<version>1.6.8</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
@ -130,7 +130,7 @@
|
||||
<plugin>
|
||||
<groupId>org.sonatype.plugins</groupId>
|
||||
<artifactId>nexus-staging-maven-plugin</artifactId>
|
||||
<version>1.6.7</version>
|
||||
<version>1.6.8</version>
|
||||
<extensions>true</extensions>
|
||||
<configuration>
|
||||
<serverId>ossrh</serverId>
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.6.7</version>
|
||||
<version>1.6.8</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
|
@ -4,7 +4,6 @@ import com.google.common.base.Strings;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
@ -15,7 +14,11 @@ public class FilerClient extends FilerGrpcClient {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(FilerClient.class);
|
||||
|
||||
public FilerClient(String host, int grpcPort) {
|
||||
super(host, grpcPort);
|
||||
super(host, grpcPort-10000, grpcPort);
|
||||
}
|
||||
|
||||
public FilerClient(String host, int port, int grpcPort) {
|
||||
super(host, port, grpcPort);
|
||||
}
|
||||
|
||||
public static String toFileId(FilerProto.FileId fid) {
|
||||
@ -59,24 +62,39 @@ public class FilerClient extends FilerGrpcClient {
|
||||
|
||||
public static FilerProto.Entry afterEntryDeserialization(FilerProto.Entry entry) {
|
||||
if (entry.getChunksList().size() <= 0) {
|
||||
return entry;
|
||||
}
|
||||
String fileId = entry.getChunks(0).getFileId();
|
||||
if (fileId != null && fileId.length() != 0) {
|
||||
return entry;
|
||||
}
|
||||
FilerProto.Entry.Builder entryBuilder = entry.toBuilder();
|
||||
entryBuilder.clearChunks();
|
||||
for (FilerProto.FileChunk chunk : entry.getChunksList()) {
|
||||
FilerProto.FileChunk.Builder chunkBuilder = chunk.toBuilder();
|
||||
chunkBuilder.setFileId(toFileId(chunk.getFid()));
|
||||
String sourceFileId = toFileId(chunk.getSourceFid());
|
||||
if (sourceFileId != null) {
|
||||
chunkBuilder.setSourceFileId(sourceFileId);
|
||||
if (entry.getContent().isEmpty()) {
|
||||
return entry;
|
||||
} else {
|
||||
if (entry.getAttributes().getFileSize() <= 0) {
|
||||
FilerProto.Entry.Builder entryBuilder = entry.toBuilder();
|
||||
FilerProto.FuseAttributes.Builder attrBuilder = entry.getAttributes().toBuilder();
|
||||
attrBuilder.setFileSize(entry.getContent().size());
|
||||
entryBuilder.setAttributes(attrBuilder);
|
||||
return entryBuilder.build();
|
||||
}
|
||||
}
|
||||
entryBuilder.addChunks(chunkBuilder);
|
||||
return entry;
|
||||
} else {
|
||||
FilerProto.Entry.Builder entryBuilder = entry.toBuilder();
|
||||
entryBuilder.clearChunks();
|
||||
long fileSize = 0;
|
||||
for (FilerProto.FileChunk chunk : entry.getChunksList()) {
|
||||
fileSize = Math.max(fileSize, chunk.getOffset()+chunk.getSize());
|
||||
FilerProto.FileChunk.Builder chunkBuilder = chunk.toBuilder();
|
||||
chunkBuilder.setFileId(toFileId(chunk.getFid()));
|
||||
String sourceFileId = toFileId(chunk.getSourceFid());
|
||||
if (sourceFileId != null) {
|
||||
chunkBuilder.setSourceFileId(sourceFileId);
|
||||
}
|
||||
entryBuilder.addChunks(chunkBuilder);
|
||||
}
|
||||
if (entry.getAttributes().getFileSize() <= 0) {
|
||||
FilerProto.FuseAttributes.Builder attrBuilder = entry.getAttributes().toBuilder();
|
||||
attrBuilder.setFileSize(fileSize);
|
||||
entryBuilder.setAttributes(attrBuilder);
|
||||
}
|
||||
return entryBuilder.build();
|
||||
}
|
||||
return entryBuilder.build();
|
||||
}
|
||||
|
||||
public boolean mkdirs(String path, int mode) {
|
||||
@ -93,9 +111,9 @@ public class FilerClient extends FilerGrpcClient {
|
||||
if ("/".equals(path)) {
|
||||
return true;
|
||||
}
|
||||
File pathFile = new File(path);
|
||||
String parent = pathFile.getParent().replace('\\','/');
|
||||
String name = pathFile.getName();
|
||||
String[] dirAndName = SeaweedUtil.toDirAndName(path);
|
||||
String parent = dirAndName[0];
|
||||
String name = dirAndName[1];
|
||||
|
||||
mkdirs(parent, mode, uid, gid, userName, groupNames);
|
||||
|
||||
@ -114,35 +132,32 @@ public class FilerClient extends FilerGrpcClient {
|
||||
|
||||
public boolean mv(String oldPath, String newPath) {
|
||||
|
||||
File oldPathFile = new File(oldPath);
|
||||
String oldParent = oldPathFile.getParent().replace('\\','/');
|
||||
String oldName = oldPathFile.getName();
|
||||
String[] oldDirAndName = SeaweedUtil.toDirAndName(oldPath);
|
||||
String oldParent = oldDirAndName[0];
|
||||
String oldName = oldDirAndName[1];
|
||||
|
||||
File newPathFile = new File(newPath);
|
||||
String newParent = newPathFile.getParent().replace('\\','/');
|
||||
String newName = newPathFile.getName();
|
||||
String[] newDirAndName = SeaweedUtil.toDirAndName(newPath);
|
||||
String newParent = newDirAndName[0];
|
||||
String newName = newDirAndName[1];
|
||||
|
||||
return atomicRenameEntry(oldParent, oldName, newParent, newName);
|
||||
|
||||
}
|
||||
|
||||
public boolean exists(String path){
|
||||
File pathFile = new File(path);
|
||||
String parent = pathFile.getParent();
|
||||
String entryName = pathFile.getName();
|
||||
if(parent == null) {
|
||||
parent = path;
|
||||
entryName ="";
|
||||
}
|
||||
return lookupEntry(parent, entryName) != null;
|
||||
|
||||
String[] dirAndName = SeaweedUtil.toDirAndName(path);
|
||||
String parent = dirAndName[0];
|
||||
String entryName = dirAndName[1];
|
||||
|
||||
return lookupEntry(parent, entryName) != null;
|
||||
}
|
||||
|
||||
public boolean rm(String path, boolean isRecursive, boolean ignoreRecusiveError) {
|
||||
|
||||
File pathFile = new File(path);
|
||||
String parent = pathFile.getParent().replace('\\','/');
|
||||
String name = pathFile.getName();
|
||||
String[] dirAndName = SeaweedUtil.toDirAndName(path);
|
||||
String parent = dirAndName[0];
|
||||
String name = dirAndName[1];
|
||||
|
||||
return deleteEntry(
|
||||
parent,
|
||||
@ -153,17 +168,19 @@ public class FilerClient extends FilerGrpcClient {
|
||||
}
|
||||
|
||||
public boolean touch(String path, int mode) {
|
||||
|
||||
String currentUser = System.getProperty("user.name");
|
||||
|
||||
long now = System.currentTimeMillis() / 1000L;
|
||||
return touch(path, now, mode, 0, 0, currentUser, new String[]{});
|
||||
|
||||
}
|
||||
|
||||
public boolean touch(String path, long modifiedTimeSecond, int mode, int uid, int gid, String userName, String[] groupNames) {
|
||||
|
||||
File pathFile = new File(path);
|
||||
String parent = pathFile.getParent().replace('\\','/');
|
||||
String name = pathFile.getName();
|
||||
String[] dirAndName = SeaweedUtil.toDirAndName(path);
|
||||
String parent = dirAndName[0];
|
||||
String name = dirAndName[1];
|
||||
|
||||
FilerProto.Entry entry = lookupEntry(parent, name);
|
||||
if (entry == null) {
|
||||
|
@ -40,11 +40,11 @@ public class FilerGrpcClient {
|
||||
private int volumeServerAccess = VOLUME_SERVER_ACCESS_DIRECT;
|
||||
private String filerAddress;
|
||||
|
||||
public FilerGrpcClient(String host, int grpcPort) {
|
||||
this(host, grpcPort, sslContext);
|
||||
public FilerGrpcClient(String host, int port, int grpcPort) {
|
||||
this(host, port, grpcPort, sslContext);
|
||||
}
|
||||
|
||||
public FilerGrpcClient(String host, int grpcPort, SslContext sslContext) {
|
||||
public FilerGrpcClient(String host, int port, int grpcPort, SslContext sslContext) {
|
||||
|
||||
this(sslContext == null ?
|
||||
ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext()
|
||||
@ -54,7 +54,7 @@ public class FilerGrpcClient {
|
||||
.negotiationType(NegotiationType.TLS)
|
||||
.sslContext(sslContext));
|
||||
|
||||
filerAddress = String.format("%s:%d", host, grpcPort - 10000);
|
||||
filerAddress = SeaweedUtil.joinHostPort(host, port);
|
||||
|
||||
FilerProto.GetFilerConfigurationResponse filerConfigurationResponse =
|
||||
this.getBlockingStub().getFilerConfiguration(
|
||||
|
@ -64,10 +64,11 @@ public class SeaweedRead {
|
||||
startOffset += gap;
|
||||
}
|
||||
|
||||
FilerProto.Locations locations = knownLocations.get(parseVolumeId(chunkView.fileId));
|
||||
String volumeId = parseVolumeId(chunkView.fileId);
|
||||
FilerProto.Locations locations = knownLocations.get(volumeId);
|
||||
if (locations == null || locations.getLocationsCount() == 0) {
|
||||
LOG.error("failed to locate {}", chunkView.fileId);
|
||||
// log here!
|
||||
volumeIdCache.clearLocations(volumeId);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -27,4 +27,30 @@ public class SeaweedUtil {
|
||||
public static CloseableHttpClient getClosableHttpClient() {
|
||||
return httpClient;
|
||||
}
|
||||
|
||||
public static String[] toDirAndName(String fullpath) {
|
||||
if (fullpath == null) {
|
||||
return new String[]{"/", ""};
|
||||
}
|
||||
if (fullpath.endsWith("/")) {
|
||||
fullpath = fullpath.substring(0, fullpath.length() - 1);
|
||||
}
|
||||
if (fullpath.length() == 0) {
|
||||
return new String[]{"/", ""};
|
||||
}
|
||||
int sep = fullpath.lastIndexOf("/");
|
||||
String parent = sep == 0 ? "/" : fullpath.substring(0, sep);
|
||||
String name = fullpath.substring(sep + 1);
|
||||
return new String[]{parent, name};
|
||||
}
|
||||
|
||||
public static String joinHostPort(String host, int port) {
|
||||
if (host.startsWith("[") && host.endsWith("]")) {
|
||||
return host + ":" + port;
|
||||
}
|
||||
if (host.indexOf(':')>=0) {
|
||||
return "[" + host + "]:" + port;
|
||||
}
|
||||
return host + ":" + port;
|
||||
}
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ public class SeaweedWrite {
|
||||
String fileId = response.getFileId();
|
||||
String auth = response.getAuth();
|
||||
|
||||
String targetUrl = filerClient.getChunkUrl(fileId, response.getUrl(), response.getPublicUrl());
|
||||
String targetUrl = filerClient.getChunkUrl(fileId, response.getLocation().getUrl(), response.getLocation().getPublicUrl());
|
||||
|
||||
ByteString cipherKeyString = com.google.protobuf.ByteString.EMPTY;
|
||||
byte[] cipherKey = null;
|
||||
|
@ -26,6 +26,13 @@ public class VolumeIdCache {
|
||||
return this.cache.getIfPresent(volumeId);
|
||||
}
|
||||
|
||||
public void clearLocations(String volumeId) {
|
||||
if (this.cache == null) {
|
||||
return;
|
||||
}
|
||||
this.cache.invalidate(volumeId);
|
||||
}
|
||||
|
||||
public void setLocations(String volumeId, FilerProto.Locations locations) {
|
||||
if (this.cache == null) {
|
||||
return;
|
||||
|
@ -238,13 +238,12 @@ message AssignVolumeRequest {
|
||||
|
||||
message AssignVolumeResponse {
|
||||
string file_id = 1;
|
||||
string url = 2;
|
||||
string public_url = 3;
|
||||
int32 count = 4;
|
||||
string auth = 5;
|
||||
string collection = 6;
|
||||
string replication = 7;
|
||||
string error = 8;
|
||||
Location location = 9;
|
||||
}
|
||||
|
||||
message LookupVolumeRequest {
|
||||
@ -258,6 +257,7 @@ message Locations {
|
||||
message Location {
|
||||
string url = 1;
|
||||
string public_url = 2;
|
||||
uint32 grpc_port = 3;
|
||||
}
|
||||
message LookupVolumeResponse {
|
||||
map<string, Locations> locations_map = 1;
|
||||
@ -305,6 +305,7 @@ message GetFilerConfigurationResponse {
|
||||
string metrics_address = 9;
|
||||
int32 metrics_interval_sec = 10;
|
||||
string version = 11;
|
||||
string cluster_id = 12;
|
||||
}
|
||||
|
||||
message SubscribeMetadataRequest {
|
||||
@ -312,6 +313,7 @@ message SubscribeMetadataRequest {
|
||||
string path_prefix = 2;
|
||||
int64 since_ns = 3;
|
||||
int32 signature = 4;
|
||||
repeated string path_prefixes = 6;
|
||||
}
|
||||
message SubscribeMetadataResponse {
|
||||
string directory = 1;
|
||||
@ -336,6 +338,7 @@ message KeepConnectedResponse {
|
||||
message LocateBrokerRequest {
|
||||
string resource = 1;
|
||||
}
|
||||
|
||||
message LocateBrokerResponse {
|
||||
bool found = 1;
|
||||
// if found, send the exact address
|
||||
@ -386,23 +389,6 @@ message FilerConf {
|
||||
/////////////////////////
|
||||
// Remote Storage related
|
||||
/////////////////////////
|
||||
message RemoteConf {
|
||||
string type = 1;
|
||||
string name = 2;
|
||||
string s3_access_key = 4;
|
||||
string s3_secret_key = 5;
|
||||
string s3_region = 6;
|
||||
string s3_endpoint = 7;
|
||||
}
|
||||
|
||||
message RemoteStorageMapping {
|
||||
map<string,RemoteStorageLocation> mappings = 1;
|
||||
}
|
||||
message RemoteStorageLocation {
|
||||
string name = 1;
|
||||
string bucket = 2;
|
||||
string path = 3;
|
||||
}
|
||||
message DownloadToLocalRequest {
|
||||
string directory = 1;
|
||||
string name = 2;
|
||||
|
@ -11,13 +11,13 @@
|
||||
<dependency>
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.6.7</version>
|
||||
<version>1.6.8</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-hadoop2-client</artifactId>
|
||||
<version>1.6.7</version>
|
||||
<version>1.6.8</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
|
@ -86,7 +86,7 @@
|
||||
<plugin>
|
||||
<groupId>org.sonatype.plugins</groupId>
|
||||
<artifactId>nexus-staging-maven-plugin</artifactId>
|
||||
<version>1.6.7</version>
|
||||
<version>1.6.8</version>
|
||||
<extensions>true</extensions>
|
||||
<configuration>
|
||||
<serverId>ossrh</serverId>
|
||||
@ -301,7 +301,7 @@
|
||||
</snapshotRepository>
|
||||
</distributionManagement>
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.6.7</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.6.8</seaweedfs.client.version>
|
||||
<hadoop.version>2.9.2</hadoop.version>
|
||||
</properties>
|
||||
</project>
|
||||
|
@ -5,7 +5,7 @@
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.6.7</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.6.8</seaweedfs.client.version>
|
||||
<hadoop.version>2.9.2</hadoop.version>
|
||||
</properties>
|
||||
|
||||
@ -105,7 +105,7 @@
|
||||
<plugin>
|
||||
<groupId>org.sonatype.plugins</groupId>
|
||||
<artifactId>nexus-staging-maven-plugin</artifactId>
|
||||
<version>1.6.7</version>
|
||||
<version>1.6.8</version>
|
||||
<extensions>true</extensions>
|
||||
<configuration>
|
||||
<serverId>ossrh</serverId>
|
||||
|
@ -23,6 +23,7 @@ public class SeaweedFileSystem extends FileSystem {
|
||||
|
||||
public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host";
|
||||
public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port";
|
||||
public static final String FS_SEAWEED_FILER_PORT_GRPC = "fs.seaweed.filer.port.grpc";
|
||||
public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
|
||||
public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size";
|
||||
public static final String FS_SEAWEED_REPLICATION = "fs.seaweed.replication";
|
||||
@ -50,9 +51,6 @@ public class SeaweedFileSystem extends FileSystem {
|
||||
// get host information from uri (overrides info in conf)
|
||||
String host = uri.getHost();
|
||||
host = (host == null) ? conf.get(FS_SEAWEED_FILER_HOST, "localhost") : host;
|
||||
if (host == null) {
|
||||
throw new IOException("Invalid host specified");
|
||||
}
|
||||
conf.set(FS_SEAWEED_FILER_HOST, host);
|
||||
|
||||
// get port information from uri, (overrides info in conf)
|
||||
@ -60,10 +58,12 @@ public class SeaweedFileSystem extends FileSystem {
|
||||
port = (port == -1) ? FS_SEAWEED_DEFAULT_PORT : port;
|
||||
conf.setInt(FS_SEAWEED_FILER_PORT, port);
|
||||
|
||||
int grpcPort = conf.getInt(FS_SEAWEED_FILER_PORT_GRPC, port+10000);
|
||||
|
||||
setConf(conf);
|
||||
this.uri = uri;
|
||||
|
||||
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, conf);
|
||||
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, grpcPort, conf);
|
||||
|
||||
}
|
||||
|
||||
|
@ -27,9 +27,8 @@ public class SeaweedFileSystemStore {
|
||||
private FilerClient filerClient;
|
||||
private Configuration conf;
|
||||
|
||||
public SeaweedFileSystemStore(String host, int port, Configuration conf) {
|
||||
int grpcPort = 10000 + port;
|
||||
filerClient = new FilerClient(host, grpcPort);
|
||||
public SeaweedFileSystemStore(String host, int port, int grpcPort, Configuration conf) {
|
||||
filerClient = new FilerClient(host, port, grpcPort);
|
||||
this.conf = conf;
|
||||
String volumeServerAccessMode = this.conf.get(FS_SEAWEED_VOLUME_SERVER_ACCESS, "direct");
|
||||
if (volumeServerAccessMode.equals("publicUrl")) {
|
||||
|
@ -86,7 +86,7 @@
|
||||
<plugin>
|
||||
<groupId>org.sonatype.plugins</groupId>
|
||||
<artifactId>nexus-staging-maven-plugin</artifactId>
|
||||
<version>1.6.7</version>
|
||||
<version>1.6.8</version>
|
||||
<extensions>true</extensions>
|
||||
<configuration>
|
||||
<serverId>ossrh</serverId>
|
||||
@ -309,7 +309,7 @@
|
||||
</snapshotRepository>
|
||||
</distributionManagement>
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.6.7</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.6.8</seaweedfs.client.version>
|
||||
<hadoop.version>3.1.1</hadoop.version>
|
||||
</properties>
|
||||
</project>
|
||||
|
@ -5,7 +5,7 @@
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.6.7</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.6.8</seaweedfs.client.version>
|
||||
<hadoop.version>3.1.1</hadoop.version>
|
||||
</properties>
|
||||
|
||||
@ -105,7 +105,7 @@
|
||||
<plugin>
|
||||
<groupId>org.sonatype.plugins</groupId>
|
||||
<artifactId>nexus-staging-maven-plugin</artifactId>
|
||||
<version>1.6.7</version>
|
||||
<version>1.6.8</version>
|
||||
<extensions>true</extensions>
|
||||
<configuration>
|
||||
<serverId>ossrh</serverId>
|
||||
|
@ -23,6 +23,7 @@ public class SeaweedFileSystem extends FileSystem {
|
||||
|
||||
public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host";
|
||||
public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port";
|
||||
public static final String FS_SEAWEED_FILER_PORT_GRPC = "fs.seaweed.filer.port.grpc";
|
||||
public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
|
||||
public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size";
|
||||
public static final String FS_SEAWEED_REPLICATION = "fs.seaweed.replication";
|
||||
@ -50,9 +51,6 @@ public class SeaweedFileSystem extends FileSystem {
|
||||
// get host information from uri (overrides info in conf)
|
||||
String host = uri.getHost();
|
||||
host = (host == null) ? conf.get(FS_SEAWEED_FILER_HOST, "localhost") : host;
|
||||
if (host == null) {
|
||||
throw new IOException("Invalid host specified");
|
||||
}
|
||||
conf.set(FS_SEAWEED_FILER_HOST, host);
|
||||
|
||||
// get port information from uri, (overrides info in conf)
|
||||
@ -60,10 +58,12 @@ public class SeaweedFileSystem extends FileSystem {
|
||||
port = (port == -1) ? FS_SEAWEED_DEFAULT_PORT : port;
|
||||
conf.setInt(FS_SEAWEED_FILER_PORT, port);
|
||||
|
||||
int grpcPort = conf.getInt(FS_SEAWEED_FILER_PORT_GRPC, port+10000);
|
||||
|
||||
setConf(conf);
|
||||
this.uri = uri;
|
||||
|
||||
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, conf);
|
||||
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, grpcPort, conf);
|
||||
|
||||
}
|
||||
|
||||
|
@ -27,9 +27,8 @@ public class SeaweedFileSystemStore {
|
||||
private FilerClient filerClient;
|
||||
private Configuration conf;
|
||||
|
||||
public SeaweedFileSystemStore(String host, int port, Configuration conf) {
|
||||
int grpcPort = 10000 + port;
|
||||
filerClient = new FilerClient(host, grpcPort);
|
||||
public SeaweedFileSystemStore(String host, int port, int grpcPort, Configuration conf) {
|
||||
filerClient = new FilerClient(host, port, grpcPort);
|
||||
this.conf = conf;
|
||||
String volumeServerAccessMode = this.conf.get(FS_SEAWEED_VOLUME_SERVER_ACCESS, "direct");
|
||||
if (volumeServerAccessMode.equals("publicUrl")) {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -6,13 +6,9 @@ import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/idx"
|
||||
@ -20,6 +16,9 @@ import (
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"google.golang.org/grpc"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -45,13 +44,13 @@ func main() {
|
||||
grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
vid := uint32(*volumeId)
|
||||
servers := strings.Split(*serversStr, ",")
|
||||
servers := pb.ServerAddresses(*serversStr).ToAddresses()
|
||||
if len(servers) < 2 {
|
||||
glog.Fatalf("You must specify more than 1 server\n")
|
||||
}
|
||||
var referenceServer string
|
||||
var referenceServer pb.ServerAddress
|
||||
var maxOffset int64
|
||||
allFiles := map[string]map[types.NeedleId]needleState{}
|
||||
allFiles := map[pb.ServerAddress]map[types.NeedleId]needleState{}
|
||||
for _, addr := range servers {
|
||||
files, offset, err := getVolumeFiles(vid, addr)
|
||||
if err != nil {
|
||||
@ -121,7 +120,7 @@ type needleState struct {
|
||||
size types.Size
|
||||
}
|
||||
|
||||
func getVolumeFiles(v uint32, addr string) (map[types.NeedleId]needleState, int64, error) {
|
||||
func getVolumeFiles(v uint32, addr pb.ServerAddress) (map[types.NeedleId]needleState, int64, error) {
|
||||
var idxFile *bytes.Reader
|
||||
err := operation.WithVolumeServerClient(addr, grpcDialOption, func(vs volume_server_pb.VolumeServerClient) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@ -179,7 +178,7 @@ func getVolumeFiles(v uint32, addr string) (map[types.NeedleId]needleState, int6
|
||||
return files, maxOffset, nil
|
||||
}
|
||||
|
||||
func getNeedleFileId(v uint32, nid types.NeedleId, addr string) (string, error) {
|
||||
func getNeedleFileId(v uint32, nid types.NeedleId, addr pb.ServerAddress) (string, error) {
|
||||
var id string
|
||||
err := operation.WithVolumeServerClient(addr, grpcDialOption, func(vs volume_server_pb.VolumeServerClient) error {
|
||||
resp, err := vs.VolumeNeedleStatus(context.Background(), &volume_server_pb.VolumeNeedleStatusRequest{
|
||||
|
@ -12,10 +12,11 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
dir = flag.String("dir", "/tmp", "directory to create files")
|
||||
n = flag.Int("n", 100, "the number of metadata")
|
||||
tailFiler = flag.String("filer", "localhost:8888", "the filer address")
|
||||
isWrite = flag.Bool("write", false, "only write")
|
||||
dir = flag.String("dir", "/tmp", "directory to create files")
|
||||
n = flag.Int("n", 100, "the number of metadata")
|
||||
tailFiler = flag.String("filer", "localhost:8888", "the filer address")
|
||||
isWrite = flag.Bool("write", false, "only write")
|
||||
writeInterval = flag.Duration("writeInterval", 0, "write interval, e.g., 1s")
|
||||
)
|
||||
|
||||
func main() {
|
||||
@ -50,10 +51,11 @@ func main() {
|
||||
}
|
||||
|
||||
func startGenerateMetadata() {
|
||||
pb.WithFilerClient(*tailFiler, grpc.WithInsecure(), func(client filer_pb.SeaweedFilerClient) error {
|
||||
pb.WithFilerClient(pb.ServerAddress(*tailFiler), grpc.WithInsecure(), func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
for i := 0; i < *n; i++ {
|
||||
name := fmt.Sprintf("file%d", i)
|
||||
glog.V(0).Infof("write %s/%s", *dir, name)
|
||||
if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{
|
||||
Directory: *dir,
|
||||
Entry: &filer_pb.Entry{
|
||||
@ -63,6 +65,9 @@ func startGenerateMetadata() {
|
||||
fmt.Printf("create entry %s: %v\n", name, err)
|
||||
return err
|
||||
}
|
||||
if *writeInterval > 0 {
|
||||
time.Sleep(*writeInterval)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -72,8 +77,7 @@ func startGenerateMetadata() {
|
||||
|
||||
func startSubscribeMetadata(eachEntryFunc func(event *filer_pb.SubscribeMetadataResponse) error) {
|
||||
|
||||
tailErr := pb.FollowMetadata(*tailFiler, grpc.WithInsecure(), "tail",
|
||||
*dir, 0, 0, eachEntryFunc, false)
|
||||
tailErr := pb.FollowMetadata(pb.ServerAddress(*tailFiler), grpc.WithInsecure(), "tail", *dir, nil, 0, 0, eachEntryFunc, false)
|
||||
|
||||
if tailErr != nil {
|
||||
fmt.Printf("tail %s: %v\n", *tailFiler, tailErr)
|
||||
|
@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"log"
|
||||
"math/rand"
|
||||
"time"
|
||||
@ -32,7 +33,7 @@ func main() {
|
||||
go func() {
|
||||
for {
|
||||
println("vacuum threshold", *garbageThreshold)
|
||||
_, _, err := util.Get(fmt.Sprintf("http://%s/vol/vacuum?garbageThreshold=%f", *master, *garbageThreshold))
|
||||
_, _, err := util.Get(fmt.Sprintf("http://%s/vol/vacuum?garbageThreshold=%f", pb.ServerAddress(*master).ToHttpAddress(), *garbageThreshold))
|
||||
if err != nil {
|
||||
log.Fatalf("vacuum: %v", err)
|
||||
}
|
||||
@ -52,7 +53,7 @@ func main() {
|
||||
}
|
||||
|
||||
func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, string) {
|
||||
assignResult, err := operation.Assign(func() string { return *master }, grpcDialOption, &operation.VolumeAssignRequest{
|
||||
assignResult, err := operation.Assign(func() pb.ServerAddress { return pb.ServerAddress(*master) }, grpcDialOption, &operation.VolumeAssignRequest{
|
||||
Count: 1,
|
||||
Replication: *replication,
|
||||
})
|
||||
@ -65,7 +66,16 @@ func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, st
|
||||
|
||||
targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid)
|
||||
|
||||
_, err = operation.UploadData(targetUrl, fmt.Sprintf("test%d", i), false, data, false, "bench/test", nil, assignResult.Auth)
|
||||
uploadOption := &operation.UploadOption{
|
||||
UploadUrl: targetUrl,
|
||||
Filename: fmt.Sprintf("test%d", i),
|
||||
Cipher: false,
|
||||
IsInputCompressed: true,
|
||||
MimeType: "bench/test",
|
||||
PairMap: nil,
|
||||
Jwt: assignResult.Auth,
|
||||
}
|
||||
_, err = operation.UploadData(data, uploadOption)
|
||||
if err != nil {
|
||||
log.Fatalf("upload: %v", err)
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
@ -37,7 +38,7 @@ func main() {
|
||||
sinceTimeNs = time.Now().Add(-*rewindDuration).UnixNano()
|
||||
}
|
||||
|
||||
err := operation.TailVolume(func()string{return *master}, grpcDialOption, vid, uint64(sinceTimeNs), *timeoutSeconds, func(n *needle.Needle) (err error) {
|
||||
err := operation.TailVolume(func()pb.ServerAddress{return pb.ServerAddress(*master)}, grpcDialOption, vid, uint64(sinceTimeNs), *timeoutSeconds, func(n *needle.Needle) (err error) {
|
||||
if n.Size == 0 {
|
||||
println("-", n.String())
|
||||
return nil
|
||||
|
@ -38,10 +38,14 @@ debug_filer_copy:
|
||||
go build -gcflags="all=-N -l"
|
||||
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 filer.backup -filer=localhost:8888 -filerProxy -timeAgo=10h
|
||||
|
||||
debug_filer_remote_sync:
|
||||
debug_filer_remote_sync_dir:
|
||||
go build -gcflags="all=-N -l"
|
||||
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 filer.remote.sync -filer="localhost:8888" -dir=/buckets/b2 -timeAgo=1h
|
||||
|
||||
debug_filer_remote_sync_buckets:
|
||||
go build -gcflags="all=-N -l"
|
||||
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 filer.remote.sync -filer="localhost:8888" -createBucketAt=cloud1 -timeAgo=1h
|
||||
|
||||
debug_master_follower:
|
||||
go build -gcflags="all=-N -l"
|
||||
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 master.follower
|
||||
|
@ -2,6 +2,7 @@ package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
@ -72,12 +73,12 @@ func runBackup(cmd *Command, args []string) bool {
|
||||
vid := needle.VolumeId(*s.volumeId)
|
||||
|
||||
// find volume location, replication, ttl info
|
||||
lookup, err := operation.LookupVolumeId(func() string { return *s.master }, grpcDialOption, vid.String())
|
||||
lookup, err := operation.LookupVolumeId(func() pb.ServerAddress { return pb.ServerAddress(*s.master) }, grpcDialOption, vid.String())
|
||||
if err != nil {
|
||||
fmt.Printf("Error looking up volume %d: %v\n", vid, err)
|
||||
return true
|
||||
}
|
||||
volumeServer := lookup.Locations[0].Url
|
||||
volumeServer := lookup.Locations[0].ServerAddress()
|
||||
|
||||
stats, err := operation.GetVolumeSyncStatus(volumeServer, grpcDialOption, uint32(vid))
|
||||
if err != nil {
|
||||
|
@ -3,6 +3,7 @@ package command
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"io"
|
||||
"math"
|
||||
"math/rand"
|
||||
@ -10,7 +11,6 @@ import (
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -129,7 +129,7 @@ func runBenchmark(cmd *Command, args []string) bool {
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "client", "", 0, "", strings.Split(*b.masters, ","))
|
||||
b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "client", "", "", pb.ServerAddresses(*b.masters).ToAddresses())
|
||||
go b.masterClient.KeepConnectedToMaster()
|
||||
b.masterClient.WaitUntilConnected()
|
||||
|
||||
|
@ -21,6 +21,7 @@ var Commands = []*Command{
|
||||
cmdFilerCopy,
|
||||
cmdFilerMetaBackup,
|
||||
cmdFilerMetaTail,
|
||||
cmdFilerRemoteGateway,
|
||||
cmdFilerRemoteSynchronize,
|
||||
cmdFilerReplicate,
|
||||
cmdFilerSynchronize,
|
||||
|
@ -2,6 +2,7 @@ package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"google.golang.org/grpc"
|
||||
"io"
|
||||
@ -49,7 +50,7 @@ func runDownload(cmd *Command, args []string) bool {
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
for _, fid := range args {
|
||||
if e := downloadToFile(func() string { return *d.server }, grpcDialOption, fid, util.ResolvePath(*d.dir)); e != nil {
|
||||
if e := downloadToFile(func() pb.ServerAddress { return pb.ServerAddress(*d.server) }, grpcDialOption, fid, util.ResolvePath(*d.dir)); e != nil {
|
||||
fmt.Println("Download Error: ", fid, e)
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -30,10 +29,12 @@ var (
|
||||
)
|
||||
|
||||
type FilerOptions struct {
|
||||
masters *string
|
||||
masters []pb.ServerAddress
|
||||
mastersString *string
|
||||
ip *string
|
||||
bindIp *string
|
||||
port *int
|
||||
portGrpc *int
|
||||
publicPort *int
|
||||
collection *string
|
||||
defaultReplicaPlacement *string
|
||||
@ -56,11 +57,12 @@ type FilerOptions struct {
|
||||
|
||||
func init() {
|
||||
cmdFiler.Run = runFiler // break init cycle
|
||||
f.masters = cmdFiler.Flag.String("master", "localhost:9333", "comma-separated master servers")
|
||||
f.mastersString = cmdFiler.Flag.String("master", "localhost:9333", "comma-separated master servers")
|
||||
f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this default collection")
|
||||
f.ip = cmdFiler.Flag.String("ip", util.DetectedHostAddress(), "filer server http listen ip address")
|
||||
f.bindIp = cmdFiler.Flag.String("ip.bind", "", "ip address to bind to")
|
||||
f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port")
|
||||
f.portGrpc = cmdFiler.Flag.Int("port.grpc", 18888, "filer server grpc listen port")
|
||||
f.publicPort = cmdFiler.Flag.Int("port.readonly", 0, "readonly port opened to public")
|
||||
f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "", "default replication type. If not specified, use master setting.")
|
||||
f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing")
|
||||
@ -134,7 +136,7 @@ func runFiler(cmd *Command, args []string) bool {
|
||||
|
||||
go stats_collect.StartMetricsServer(*f.metricsHttpPort)
|
||||
|
||||
filerAddress := fmt.Sprintf("%s:%d", *f.ip, *f.port)
|
||||
filerAddress := util.JoinHostPort(*f.ip, *f.port)
|
||||
startDelay := time.Duration(2)
|
||||
if *filerStartS3 {
|
||||
filerS3Options.filer = &filerAddress
|
||||
@ -156,13 +158,15 @@ func runFiler(cmd *Command, args []string) bool {
|
||||
|
||||
if *filerStartIam {
|
||||
filerIamOptions.filer = &filerAddress
|
||||
filerIamOptions.masters = f.masters
|
||||
filerIamOptions.masters = f.mastersString
|
||||
go func() {
|
||||
time.Sleep(startDelay * time.Second)
|
||||
filerIamOptions.startIamServer()
|
||||
}()
|
||||
}
|
||||
|
||||
f.masters = pb.ServerAddresses(*f.mastersString).ToAddresses()
|
||||
|
||||
f.startFiler()
|
||||
|
||||
return true
|
||||
@ -184,8 +188,10 @@ func (fo *FilerOptions) startFiler() {
|
||||
peers = strings.Split(*fo.peers, ",")
|
||||
}
|
||||
|
||||
filerAddress := pb.NewServerAddress(*fo.ip, *fo.port, *fo.portGrpc)
|
||||
|
||||
fs, nfs_err := weed_server.NewFilerServer(defaultMux, publicVolumeMux, &weed_server.FilerOption{
|
||||
Masters: strings.Split(*fo.masters, ","),
|
||||
Masters: fo.masters,
|
||||
Collection: *fo.collection,
|
||||
DefaultReplication: *fo.defaultReplicaPlacement,
|
||||
DisableDirListing: *fo.disableDirListing,
|
||||
@ -195,11 +201,10 @@ func (fo *FilerOptions) startFiler() {
|
||||
Rack: *fo.rack,
|
||||
DefaultLevelDbDir: defaultLevelDbDirectory,
|
||||
DisableHttp: *fo.disableHttp,
|
||||
Host: *fo.ip,
|
||||
Port: uint32(*fo.port),
|
||||
Host: filerAddress,
|
||||
Cipher: *fo.cipher,
|
||||
SaveToFilerLimit: int64(*fo.saveToFilerLimit),
|
||||
Filers: peers,
|
||||
Filers: pb.FromAddressStrings(peers),
|
||||
ConcurrentUploadLimit: int64(*fo.concurrentUploadLimitMB) * 1024 * 1024,
|
||||
})
|
||||
if nfs_err != nil {
|
||||
@ -207,7 +212,7 @@ func (fo *FilerOptions) startFiler() {
|
||||
}
|
||||
|
||||
if *fo.publicPort != 0 {
|
||||
publicListeningAddress := *fo.bindIp + ":" + strconv.Itoa(*fo.publicPort)
|
||||
publicListeningAddress := util.JoinHostPort(*fo.bindIp, *fo.publicPort)
|
||||
glog.V(0).Infoln("Start Seaweed filer server", util.Version(), "public at", publicListeningAddress)
|
||||
publicListener, e := util.NewListener(publicListeningAddress, 0)
|
||||
if e != nil {
|
||||
@ -222,7 +227,7 @@ func (fo *FilerOptions) startFiler() {
|
||||
|
||||
glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.Version(), *fo.ip, *fo.port)
|
||||
filerListener, e := util.NewListener(
|
||||
*fo.bindIp+":"+strconv.Itoa(*fo.port),
|
||||
util.JoinHostPort(*fo.bindIp, *fo.port),
|
||||
time.Duration(10)*time.Second,
|
||||
)
|
||||
if e != nil {
|
||||
@ -230,8 +235,8 @@ func (fo *FilerOptions) startFiler() {
|
||||
}
|
||||
|
||||
// starting grpc server
|
||||
grpcPort := *fo.port + 10000
|
||||
grpcL, err := util.NewListener(*fo.bindIp+":"+strconv.Itoa(grpcPort), 0)
|
||||
grpcPort := *fo.portGrpc
|
||||
grpcL, err := util.NewListener(util.JoinHostPort(*fo.bindIp, grpcPort), 0)
|
||||
if err != nil {
|
||||
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOpti
|
||||
return fmt.Errorf("no data sink configured in replication.toml")
|
||||
}
|
||||
|
||||
sourceFiler := *backupOption.filer
|
||||
sourceFiler := pb.ServerAddress(*backupOption.filer)
|
||||
sourcePath := *backupOption.path
|
||||
timeAgo := *backupOption.timeAgo
|
||||
targetPath := dataSink.GetSinkToDirectory()
|
||||
@ -102,7 +102,7 @@ func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOpti
|
||||
|
||||
// create filer sink
|
||||
filerSource := &source.FilerSource{}
|
||||
filerSource.DoInitialize(sourceFiler, pb.ServerToGrpcAddress(sourceFiler), sourcePath, *backupOption.proxyByFiler)
|
||||
filerSource.DoInitialize(sourceFiler.ToHttpAddress(), sourceFiler.ToGrpcAddress(), sourcePath, *backupOption.proxyByFiler)
|
||||
dataSink.SetSourceFiler(filerSource)
|
||||
|
||||
processEventFn := genProcessFunction(sourcePath, targetPath, dataSink, debug)
|
||||
@ -113,6 +113,6 @@ func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOpti
|
||||
})
|
||||
|
||||
return pb.FollowMetadata(sourceFiler, grpcDialOption, "backup_"+dataSink.GetName(),
|
||||
sourcePath, startFrom.UnixNano(), 0, processEventFnWithOffset, false)
|
||||
sourcePath, nil, startFrom.UnixNano(), 0, processEventFnWithOffset, false)
|
||||
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ var (
|
||||
|
||||
type FilerCatOptions struct {
|
||||
grpcDialOption grpc.DialOption
|
||||
filerAddress string
|
||||
filerAddress pb.ServerAddress
|
||||
filerClient filer_pb.SeaweedFilerClient
|
||||
output *string
|
||||
}
|
||||
@ -78,7 +78,7 @@ func runFilerCat(cmd *Command, args []string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
filerCat.filerAddress = filerUrl.Host
|
||||
filerCat.filerAddress = pb.ServerAddress(filerUrl.Host)
|
||||
filerCat.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
dir, name := util.FullPath(urlPath).DirAndName()
|
||||
@ -108,6 +108,11 @@ func runFilerCat(cmd *Command, args []string) bool {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(respLookupEntry.Entry.Content) > 0 {
|
||||
_, err = writer.Write(respLookupEntry.Entry.Content)
|
||||
return err
|
||||
}
|
||||
|
||||
filerCat.filerClient = client
|
||||
|
||||
return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64)
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@ -92,35 +91,21 @@ func runCopy(cmd *Command, args []string) bool {
|
||||
filerDestination := args[len(args)-1]
|
||||
fileOrDirs := args[0 : len(args)-1]
|
||||
|
||||
filerUrl, err := url.Parse(filerDestination)
|
||||
filerAddress, urlPath, err := pb.ParseUrl(filerDestination)
|
||||
if err != nil {
|
||||
fmt.Printf("The last argument should be a URL on filer: %v\n", err)
|
||||
return false
|
||||
}
|
||||
urlPath := filerUrl.Path
|
||||
if !strings.HasSuffix(urlPath, "/") {
|
||||
fmt.Printf("The last argument should be a folder and end with \"/\"\n")
|
||||
return false
|
||||
}
|
||||
|
||||
if filerUrl.Port() == "" {
|
||||
fmt.Printf("The filer port should be specified.\n")
|
||||
return false
|
||||
}
|
||||
|
||||
filerPort, parseErr := strconv.ParseUint(filerUrl.Port(), 10, 64)
|
||||
if parseErr != nil {
|
||||
fmt.Printf("The filer port parse error: %v\n", parseErr)
|
||||
return false
|
||||
}
|
||||
|
||||
filerGrpcPort := filerPort + 10000
|
||||
filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort)
|
||||
copy.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
masters, collection, replication, dirBuckets, maxMB, cipher, err := readFilerConfiguration(copy.grpcDialOption, filerGrpcAddress)
|
||||
masters, collection, replication, dirBuckets, maxMB, cipher, err := readFilerConfiguration(copy.grpcDialOption, filerAddress)
|
||||
if err != nil {
|
||||
fmt.Printf("read from filer %s: %v\n", filerGrpcAddress, err)
|
||||
fmt.Printf("read from filer %s: %v\n", filerAddress, err)
|
||||
return false
|
||||
}
|
||||
if strings.HasPrefix(urlPath, dirBuckets+"/") {
|
||||
@ -174,9 +159,8 @@ func runCopy(cmd *Command, args []string) bool {
|
||||
go func() {
|
||||
defer waitGroup.Done()
|
||||
worker := FileCopyWorker{
|
||||
options: ©,
|
||||
filerHost: filerUrl.Host,
|
||||
filerGrpcAddress: filerGrpcAddress,
|
||||
options: ©,
|
||||
filerAddress: filerAddress,
|
||||
}
|
||||
if err := worker.copyFiles(fileCopyTaskChan); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "copy file error: %v\n", err)
|
||||
@ -189,7 +173,7 @@ func runCopy(cmd *Command, args []string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func readFilerConfiguration(grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, dirBuckets string, maxMB uint32, cipher bool, err error) {
|
||||
func readFilerConfiguration(grpcDialOption grpc.DialOption, filerGrpcAddress pb.ServerAddress) (masters []string, collection, replication string, dirBuckets string, maxMB uint32, cipher bool, err error) {
|
||||
err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
||||
if err != nil {
|
||||
@ -241,9 +225,8 @@ func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan Fi
|
||||
}
|
||||
|
||||
type FileCopyWorker struct {
|
||||
options *CopyOptions
|
||||
filerHost string
|
||||
filerGrpcAddress string
|
||||
options *CopyOptions
|
||||
filerAddress pb.ServerAddress
|
||||
}
|
||||
|
||||
func (worker *FileCopyWorker) copyFiles(fileCopyTaskChan chan FileCopyTask) error {
|
||||
@ -321,7 +304,7 @@ func (worker *FileCopyWorker) checkExistingFileFirst(task FileCopyTask, f *os.Fi
|
||||
return
|
||||
}
|
||||
|
||||
err = pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
err = pb.WithGrpcFilerClient(worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.LookupDirectoryEntryRequest{
|
||||
Directory: task.destinationUrlPath,
|
||||
@ -363,7 +346,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
|
||||
|
||||
// assign a volume
|
||||
err = util.Retry("assignVolume", func() error {
|
||||
return pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
return pb.WithGrpcFilerClient(worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.AssignVolumeRequest{
|
||||
Count: 1,
|
||||
@ -381,7 +364,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
|
||||
if assignResult.Error != "" {
|
||||
return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
|
||||
}
|
||||
if assignResult.Url == "" {
|
||||
if assignResult.Location.Url == "" {
|
||||
return fmt.Errorf("assign volume failure %v: %v", request, assignResult)
|
||||
}
|
||||
return nil
|
||||
@ -391,9 +374,17 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
|
||||
return fmt.Errorf("Failed to assign from %v: %v\n", worker.options.masters, err)
|
||||
}
|
||||
|
||||
targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId
|
||||
|
||||
uploadResult, err := operation.UploadData(targetUrl, fileName, worker.options.cipher, data, false, mimeType, nil, security.EncodedJwt(assignResult.Auth))
|
||||
targetUrl := "http://" + assignResult.Location.Url + "/" + assignResult.FileId
|
||||
uploadOption := &operation.UploadOption{
|
||||
UploadUrl: targetUrl,
|
||||
Filename: fileName,
|
||||
Cipher: worker.options.cipher,
|
||||
IsInputCompressed: false,
|
||||
MimeType: mimeType,
|
||||
PairMap: nil,
|
||||
Jwt: security.EncodedJwt(assignResult.Auth),
|
||||
}
|
||||
uploadResult, err := operation.UploadData(data, uploadOption)
|
||||
if err != nil {
|
||||
return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err)
|
||||
}
|
||||
@ -406,10 +397,10 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
|
||||
|
||||
chunks = append(chunks, uploadResult.ToPbFileChunk(assignResult.FileId, 0))
|
||||
|
||||
fmt.Printf("copied %s => http://%s%s%s\n", f.Name(), worker.filerHost, task.destinationUrlPath, fileName)
|
||||
fmt.Printf("copied %s => http://%s%s%s\n", f.Name(), worker.filerAddress.ToHttpAddress(), task.destinationUrlPath, fileName)
|
||||
}
|
||||
|
||||
if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
if err := pb.WithGrpcFilerClient(worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
request := &filer_pb.CreateEntryRequest{
|
||||
Directory: task.destinationUrlPath,
|
||||
Entry: &filer_pb.Entry{
|
||||
@ -435,7 +426,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("upload data %v to http://%s%s%s: %v\n", fileName, worker.filerHost, task.destinationUrlPath, fileName, err)
|
||||
return fmt.Errorf("upload data %v to http://%s%s%s: %v\n", fileName, worker.filerAddress.ToHttpAddress(), task.destinationUrlPath, fileName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -466,7 +457,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
|
||||
var assignResult *filer_pb.AssignVolumeResponse
|
||||
var assignError error
|
||||
err := util.Retry("assignVolume", func() error {
|
||||
return pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
return pb.WithGrpcFilerClient(worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
request := &filer_pb.AssignVolumeRequest{
|
||||
Count: 1,
|
||||
Replication: *worker.options.replication,
|
||||
@ -490,7 +481,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
|
||||
fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
|
||||
}
|
||||
|
||||
targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId
|
||||
targetUrl := "http://" + assignResult.Location.Url + "/" + assignResult.FileId
|
||||
if collection == "" {
|
||||
collection = assignResult.Collection
|
||||
}
|
||||
@ -498,7 +489,16 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
|
||||
replication = assignResult.Replication
|
||||
}
|
||||
|
||||
uploadResult, err, _ := operation.Upload(targetUrl, fileName+"-"+strconv.FormatInt(i+1, 10), worker.options.cipher, io.NewSectionReader(f, i*chunkSize, chunkSize), false, "", nil, security.EncodedJwt(assignResult.Auth))
|
||||
uploadOption := &operation.UploadOption{
|
||||
UploadUrl: targetUrl,
|
||||
Filename: fileName + "-" + strconv.FormatInt(i+1, 10),
|
||||
Cipher: worker.options.cipher,
|
||||
IsInputCompressed: false,
|
||||
MimeType: "",
|
||||
PairMap: nil,
|
||||
Jwt: security.EncodedJwt(assignResult.Auth),
|
||||
}
|
||||
uploadResult, err, _ := operation.Upload(io.NewSectionReader(f, i*chunkSize, chunkSize), uploadOption)
|
||||
if err != nil {
|
||||
uploadError = fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err)
|
||||
return
|
||||
@ -525,8 +525,8 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
|
||||
for _, chunk := range chunks {
|
||||
fileIds = append(fileIds, chunk.FileId)
|
||||
}
|
||||
operation.DeleteFiles(func() string {
|
||||
return copy.masters[0]
|
||||
operation.DeleteFiles(func() pb.ServerAddress {
|
||||
return pb.ServerAddress(copy.masters[0])
|
||||
}, false, worker.options.grpcDialOption, fileIds)
|
||||
return uploadError
|
||||
}
|
||||
@ -536,7 +536,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
|
||||
return fmt.Errorf("create manifest: %v", manifestErr)
|
||||
}
|
||||
|
||||
if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
if err := pb.WithGrpcFilerClient(worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
request := &filer_pb.CreateEntryRequest{
|
||||
Directory: task.destinationUrlPath,
|
||||
Entry: &filer_pb.Entry{
|
||||
@ -562,10 +562,10 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("upload data %v to http://%s%s%s: %v\n", fileName, worker.filerHost, task.destinationUrlPath, fileName, err)
|
||||
return fmt.Errorf("upload data %v to http://%s%s%s: %v\n", fileName, worker.filerAddress.ToHttpAddress(), task.destinationUrlPath, fileName, err)
|
||||
}
|
||||
|
||||
fmt.Printf("copied %s => http://%s%s%s\n", f.Name(), worker.filerHost, task.destinationUrlPath, fileName)
|
||||
fmt.Printf("copied %s => http://%s%s%s\n", f.Name(), worker.filerAddress.ToHttpAddress(), task.destinationUrlPath, fileName)
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -594,7 +594,7 @@ func (worker *FileCopyWorker) saveDataAsChunk(reader io.Reader, name string, off
|
||||
var fileId, host string
|
||||
var auth security.EncodedJwt
|
||||
|
||||
if flushErr := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
if flushErr := pb.WithGrpcFilerClient(worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
@ -616,7 +616,7 @@ func (worker *FileCopyWorker) saveDataAsChunk(reader io.Reader, name string, off
|
||||
return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
|
||||
}
|
||||
|
||||
fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
|
||||
fileId, host, auth = resp.FileId, resp.Location.Url, security.EncodedJwt(resp.Auth)
|
||||
collection, replication = resp.Collection, resp.Replication
|
||||
|
||||
return nil
|
||||
@ -630,8 +630,16 @@ func (worker *FileCopyWorker) saveDataAsChunk(reader io.Reader, name string, off
|
||||
return nil, collection, replication, fmt.Errorf("filerGrpcAddress assign volume: %v", flushErr)
|
||||
}
|
||||
|
||||
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
|
||||
uploadResult, flushErr, _ := operation.Upload(fileUrl, name, worker.options.cipher, reader, false, "", nil, auth)
|
||||
uploadOption := &operation.UploadOption{
|
||||
UploadUrl: fmt.Sprintf("http://%s/%s", host, fileId),
|
||||
Filename: name,
|
||||
Cipher: worker.options.cipher,
|
||||
IsInputCompressed: false,
|
||||
MimeType: "",
|
||||
PairMap: nil,
|
||||
Jwt: auth,
|
||||
}
|
||||
uploadResult, flushErr, _ := operation.Upload(reader, uploadOption)
|
||||
if flushErr != nil {
|
||||
return nil, collection, replication, fmt.Errorf("upload data: %v", flushErr)
|
||||
}
|
||||
|
@ -195,8 +195,8 @@ func (metaBackup *FilerMetaBackupOptions) streamMetadataBackup() error {
|
||||
return metaBackup.setOffset(lastTime)
|
||||
})
|
||||
|
||||
return pb.FollowMetadata(*metaBackup.filerAddress, metaBackup.grpcDialOption, "meta_backup",
|
||||
*metaBackup.filerDirectory, startTime.UnixNano(), 0, processEventFnWithOffset, false)
|
||||
return pb.FollowMetadata(pb.ServerAddress(*metaBackup.filerAddress), metaBackup.grpcDialOption, "meta_backup",
|
||||
*metaBackup.filerDirectory, nil, startTime.UnixNano(), 0, processEventFnWithOffset, false)
|
||||
|
||||
}
|
||||
|
||||
@ -224,7 +224,7 @@ var _ = filer_pb.FilerClient(&FilerMetaBackupOptions{})
|
||||
|
||||
func (metaBackup *FilerMetaBackupOptions) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
|
||||
|
||||
return pb.WithFilerClient(*metaBackup.filerAddress, metaBackup.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
return pb.WithFilerClient(pb.ServerAddress(*metaBackup.filerAddress), metaBackup.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
return fn(client)
|
||||
})
|
||||
|
||||
|
@ -103,8 +103,8 @@ func runFilerMetaTail(cmd *Command, args []string) bool {
|
||||
}
|
||||
}
|
||||
|
||||
tailErr := pb.FollowMetadata(*tailFiler, grpcDialOption, "tail",
|
||||
*tailTarget, time.Now().Add(-*tailStart).UnixNano(), 0,
|
||||
tailErr := pb.FollowMetadata(pb.ServerAddress(*tailFiler), grpcDialOption, "tail",
|
||||
*tailTarget, nil, time.Now().Add(-*tailStart).UnixNano(), 0,
|
||||
func(resp *filer_pb.SubscribeMetadataResponse) error {
|
||||
if !shouldPrint(resp) {
|
||||
return nil
|
||||
|
117
weed/command/filer_remote_gateway.go
Normal file
117
weed/command/filer_remote_gateway.go
Normal file
@ -0,0 +1,117 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/replication/source"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"google.golang.org/grpc"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
type RemoteGatewayOptions struct {
|
||||
filerAddress *string
|
||||
grpcDialOption grpc.DialOption
|
||||
readChunkFromFiler *bool
|
||||
timeAgo *time.Duration
|
||||
createBucketAt *string
|
||||
createBucketRandomSuffix *bool
|
||||
include *string
|
||||
exclude *string
|
||||
|
||||
mappings *remote_pb.RemoteStorageMapping
|
||||
remoteConfs map[string]*remote_pb.RemoteConf
|
||||
bucketsDir string
|
||||
}
|
||||
|
||||
var _ = filer_pb.FilerClient(&RemoteGatewayOptions{})
|
||||
|
||||
func (option *RemoteGatewayOptions) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
|
||||
return pb.WithFilerClient(pb.ServerAddress(*option.filerAddress), option.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
return fn(client)
|
||||
})
|
||||
}
|
||||
func (option *RemoteGatewayOptions) AdjustedUrl(location *filer_pb.Location) string {
|
||||
return location.Url
|
||||
}
|
||||
|
||||
var (
|
||||
remoteGatewayOptions RemoteGatewayOptions
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmdFilerRemoteGateway.Run = runFilerRemoteGateway // break init cycle
|
||||
remoteGatewayOptions.filerAddress = cmdFilerRemoteGateway.Flag.String("filer", "localhost:8888", "filer of the SeaweedFS cluster")
|
||||
remoteGatewayOptions.createBucketAt = cmdFilerRemoteGateway.Flag.String("createBucketAt", "", "one remote storage name to create new buckets in")
|
||||
remoteGatewayOptions.createBucketRandomSuffix = cmdFilerRemoteGateway.Flag.Bool("createBucketWithRandomSuffix", true, "add randomized suffix to bucket name to avoid conflicts")
|
||||
remoteGatewayOptions.readChunkFromFiler = cmdFilerRemoteGateway.Flag.Bool("filerProxy", false, "read file chunks from filer instead of volume servers")
|
||||
remoteGatewayOptions.timeAgo = cmdFilerRemoteGateway.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
|
||||
remoteGatewayOptions.include = cmdFilerRemoteGateway.Flag.String("include", "", "pattens of new bucket names, e.g., s3*")
|
||||
remoteGatewayOptions.exclude = cmdFilerRemoteGateway.Flag.String("exclude", "", "pattens of new bucket names, e.g., local*")
|
||||
}
|
||||
|
||||
var cmdFilerRemoteGateway = &Command{
|
||||
UsageLine: "filer.remote.gateway",
|
||||
Short: "resumable continuously write back bucket creation, deletion, and other local updates to remote object store",
|
||||
Long: `resumable continuously write back bucket creation, deletion, and other local updates to remote object store
|
||||
|
||||
filer.remote.gateway listens on filer local buckets update events.
|
||||
If any bucket is created, deleted, or updated, it will mirror the changes to remote object store.
|
||||
|
||||
weed filer.remote.sync -createBucketAt=cloud1
|
||||
|
||||
`,
|
||||
}
|
||||
|
||||
func runFilerRemoteGateway(cmd *Command, args []string) bool {
|
||||
|
||||
util.LoadConfiguration("security", false)
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
remoteGatewayOptions.grpcDialOption = grpcDialOption
|
||||
|
||||
filerAddress := pb.ServerAddress(*remoteGatewayOptions.filerAddress)
|
||||
|
||||
filerSource := &source.FilerSource{}
|
||||
filerSource.DoInitialize(
|
||||
filerAddress.ToHttpAddress(),
|
||||
filerAddress.ToGrpcAddress(),
|
||||
"/", // does not matter
|
||||
*remoteGatewayOptions.readChunkFromFiler,
|
||||
)
|
||||
|
||||
remoteGatewayOptions.bucketsDir = "/buckets"
|
||||
// check buckets again
|
||||
remoteGatewayOptions.WithFilerClient(func(filerClient filer_pb.SeaweedFilerClient) error {
|
||||
resp, err := filerClient.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
remoteGatewayOptions.bucketsDir = resp.DirBuckets
|
||||
return nil
|
||||
})
|
||||
|
||||
// read filer remote storage mount mappings
|
||||
if detectErr := remoteGatewayOptions.collectRemoteStorageConf(); detectErr != nil {
|
||||
fmt.Fprintf(os.Stderr, "read mount info: %v\n", detectErr)
|
||||
return true
|
||||
}
|
||||
|
||||
// synchronize /buckets folder
|
||||
fmt.Printf("synchronize buckets in %s ...\n", remoteGatewayOptions.bucketsDir)
|
||||
util.RetryForever("filer.remote.sync buckets", func() error {
|
||||
return remoteGatewayOptions.followBucketUpdatesAndUploadToRemote(filerSource)
|
||||
}, func(err error) bool {
|
||||
if err != nil {
|
||||
glog.Errorf("synchronize %s: %v", remoteGatewayOptions.bucketsDir, err)
|
||||
}
|
||||
return true
|
||||
})
|
||||
return true
|
||||
|
||||
}
|
398
weed/command/filer_remote_gateway_buckets.go
Normal file
398
weed/command/filer_remote_gateway_buckets.go
Normal file
@ -0,0 +1,398 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/replication/source"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"math"
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (option *RemoteGatewayOptions) followBucketUpdatesAndUploadToRemote(filerSource *source.FilerSource) error {
|
||||
|
||||
// read filer remote storage mount mappings
|
||||
if detectErr := option.collectRemoteStorageConf(); detectErr != nil {
|
||||
return fmt.Errorf("read mount info: %v", detectErr)
|
||||
}
|
||||
|
||||
eachEntryFunc, err := option.makeBucketedEventProcessor(filerSource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
processEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3*time.Second, func(counter int64, lastTsNs int64) error {
|
||||
lastTime := time.Unix(0, lastTsNs)
|
||||
glog.V(0).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, lastTime, float64(counter)/float64(3))
|
||||
return remote_storage.SetSyncOffset(option.grpcDialOption, pb.ServerAddress(*option.filerAddress), option.bucketsDir, lastTsNs)
|
||||
})
|
||||
|
||||
lastOffsetTs := collectLastSyncOffset(option, option.grpcDialOption, pb.ServerAddress(*option.filerAddress), option.bucketsDir, *option.timeAgo)
|
||||
|
||||
return pb.FollowMetadata(pb.ServerAddress(*option.filerAddress), option.grpcDialOption, "filer.remote.sync",
|
||||
option.bucketsDir, []string{filer.DirectoryEtcRemote}, lastOffsetTs.UnixNano(), 0, processEventFnWithOffset, false)
|
||||
}
|
||||
|
||||
func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *source.FilerSource) (pb.ProcessMetadataFunc, error) {
|
||||
|
||||
handleCreateBucket := func(entry *filer_pb.Entry) error {
|
||||
if !entry.IsDirectory {
|
||||
return nil
|
||||
}
|
||||
if entry.RemoteEntry != nil {
|
||||
// this directory is imported from "remote.mount.buckets" or "remote.mount"
|
||||
return nil
|
||||
}
|
||||
if option.mappings.PrimaryBucketStorageName != "" && *option.createBucketAt == "" {
|
||||
*option.createBucketAt = option.mappings.PrimaryBucketStorageName
|
||||
glog.V(0).Infof("%s is set as the primary remote storage", *option.createBucketAt)
|
||||
}
|
||||
if len(option.mappings.Mappings) == 1 && *option.createBucketAt == "" {
|
||||
for k := range option.mappings.Mappings {
|
||||
*option.createBucketAt = k
|
||||
glog.V(0).Infof("%s is set as the only remote storage", *option.createBucketAt)
|
||||
}
|
||||
}
|
||||
if *option.createBucketAt == "" {
|
||||
return nil
|
||||
}
|
||||
remoteConf, found := option.remoteConfs[*option.createBucketAt]
|
||||
if !found {
|
||||
return fmt.Errorf("un-configured remote storage %s", *option.createBucketAt)
|
||||
}
|
||||
|
||||
client, err := remote_storage.GetRemoteStorage(remoteConf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucketName := strings.ToLower(entry.Name)
|
||||
if *option.include != "" {
|
||||
if ok, _ := filepath.Match(*option.include, entry.Name); !ok {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if *option.exclude != "" {
|
||||
if ok, _ := filepath.Match(*option.exclude, entry.Name); ok {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if *option.createBucketRandomSuffix {
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html
|
||||
if len(bucketName)+5 > 63 {
|
||||
bucketName = bucketName[:58]
|
||||
}
|
||||
bucketName = fmt.Sprintf("%s-%4d", bucketName, rand.Uint32()%10000)
|
||||
}
|
||||
|
||||
glog.V(0).Infof("create bucket %s", bucketName)
|
||||
if err := client.CreateBucket(bucketName); err != nil {
|
||||
return fmt.Errorf("create bucket %s in %s: %v", bucketName, remoteConf.Name, err)
|
||||
}
|
||||
|
||||
bucketPath := util.FullPath(option.bucketsDir).Child(entry.Name)
|
||||
remoteLocation := &remote_pb.RemoteStorageLocation{
|
||||
Name: *option.createBucketAt,
|
||||
Bucket: bucketName,
|
||||
Path: "/",
|
||||
}
|
||||
|
||||
// need to add new mapping here before getting upates from metadata tailing
|
||||
option.mappings.Mappings[string(bucketPath)] = remoteLocation
|
||||
|
||||
return filer.InsertMountMapping(option, string(bucketPath), remoteLocation)
|
||||
|
||||
}
|
||||
handleDeleteBucket := func(entry *filer_pb.Entry) error {
|
||||
if !entry.IsDirectory {
|
||||
return nil
|
||||
}
|
||||
|
||||
client, remoteStorageMountLocation, err := option.findRemoteStorageClient(entry.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("findRemoteStorageClient %s: %v", entry.Name, err)
|
||||
}
|
||||
|
||||
glog.V(0).Infof("delete remote bucket %s", remoteStorageMountLocation.Bucket)
|
||||
if err := client.DeleteBucket(remoteStorageMountLocation.Bucket); err != nil {
|
||||
return fmt.Errorf("delete remote bucket %s: %v", remoteStorageMountLocation.Bucket, err)
|
||||
}
|
||||
|
||||
bucketPath := util.FullPath(option.bucketsDir).Child(entry.Name)
|
||||
|
||||
return filer.DeleteMountMapping(option, string(bucketPath))
|
||||
}
|
||||
|
||||
handleEtcRemoteChanges := func(resp *filer_pb.SubscribeMetadataResponse) error {
|
||||
message := resp.EventNotification
|
||||
if message.NewEntry != nil {
|
||||
// update
|
||||
if message.NewEntry.Name == filer.REMOTE_STORAGE_MOUNT_FILE {
|
||||
newMappings, readErr := filer.UnmarshalRemoteStorageMappings(message.NewEntry.Content)
|
||||
if readErr != nil {
|
||||
return fmt.Errorf("unmarshal mappings: %v", readErr)
|
||||
}
|
||||
option.mappings = newMappings
|
||||
}
|
||||
if strings.HasSuffix(message.NewEntry.Name, filer.REMOTE_STORAGE_CONF_SUFFIX) {
|
||||
conf := &remote_pb.RemoteConf{}
|
||||
if err := proto.Unmarshal(message.NewEntry.Content, conf); err != nil {
|
||||
return fmt.Errorf("unmarshal %s/%s: %v", filer.DirectoryEtcRemote, message.NewEntry.Name, err)
|
||||
}
|
||||
option.remoteConfs[conf.Name] = conf
|
||||
}
|
||||
} else if message.OldEntry != nil {
|
||||
// deletion
|
||||
if strings.HasSuffix(message.OldEntry.Name, filer.REMOTE_STORAGE_CONF_SUFFIX) {
|
||||
conf := &remote_pb.RemoteConf{}
|
||||
if err := proto.Unmarshal(message.OldEntry.Content, conf); err != nil {
|
||||
return fmt.Errorf("unmarshal %s/%s: %v", filer.DirectoryEtcRemote, message.OldEntry.Name, err)
|
||||
}
|
||||
delete(option.remoteConfs, conf.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
eachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error {
|
||||
message := resp.EventNotification
|
||||
if strings.HasPrefix(resp.Directory, filer.DirectoryEtcRemote) {
|
||||
return handleEtcRemoteChanges(resp)
|
||||
}
|
||||
|
||||
if message.OldEntry == nil && message.NewEntry == nil {
|
||||
return nil
|
||||
}
|
||||
if message.OldEntry == nil && message.NewEntry != nil {
|
||||
if message.NewParentPath == option.bucketsDir {
|
||||
return handleCreateBucket(message.NewEntry)
|
||||
}
|
||||
if !filer.HasData(message.NewEntry) {
|
||||
return nil
|
||||
}
|
||||
bucket, remoteStorageMountLocation, remoteStorage, ok := option.detectBucketInfo(message.NewParentPath)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
client, err := remote_storage.GetRemoteStorage(remoteStorage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("create: %+v", resp)
|
||||
if !shouldSendToRemote(message.NewEntry) {
|
||||
glog.V(2).Infof("skipping creating: %+v", resp)
|
||||
return nil
|
||||
}
|
||||
dest := toRemoteStorageLocation(bucket, util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)
|
||||
if message.NewEntry.IsDirectory {
|
||||
glog.V(0).Infof("mkdir %s", remote_storage.FormatLocation(dest))
|
||||
return client.WriteDirectory(dest, message.NewEntry)
|
||||
}
|
||||
glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
|
||||
reader := filer.NewFileReader(filerSource, message.NewEntry)
|
||||
remoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)
|
||||
if writeErr != nil {
|
||||
return writeErr
|
||||
}
|
||||
return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry)
|
||||
}
|
||||
if message.OldEntry != nil && message.NewEntry == nil {
|
||||
if resp.Directory == option.bucketsDir {
|
||||
return handleDeleteBucket(message.OldEntry)
|
||||
}
|
||||
bucket, remoteStorageMountLocation, remoteStorage, ok := option.detectBucketInfo(resp.Directory)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
client, err := remote_storage.GetRemoteStorage(remoteStorage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("delete: %+v", resp)
|
||||
dest := toRemoteStorageLocation(bucket, util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)
|
||||
if message.OldEntry.IsDirectory {
|
||||
glog.V(0).Infof("rmdir %s", remote_storage.FormatLocation(dest))
|
||||
return client.RemoveDirectory(dest)
|
||||
}
|
||||
glog.V(0).Infof("delete %s", remote_storage.FormatLocation(dest))
|
||||
return client.DeleteFile(dest)
|
||||
}
|
||||
if message.OldEntry != nil && message.NewEntry != nil {
|
||||
if resp.Directory == option.bucketsDir {
|
||||
if message.NewParentPath == option.bucketsDir {
|
||||
if message.OldEntry.Name == message.NewEntry.Name {
|
||||
return nil
|
||||
}
|
||||
if err := handleCreateBucket(message.NewEntry); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := handleDeleteBucket(message.OldEntry); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
oldBucket, oldRemoteStorageMountLocation, oldRemoteStorage, oldOk := option.detectBucketInfo(resp.Directory)
|
||||
newBucket, newRemoteStorageMountLocation, newRemoteStorage, newOk := option.detectBucketInfo(message.NewParentPath)
|
||||
if oldOk && newOk {
|
||||
if !shouldSendToRemote(message.NewEntry) {
|
||||
glog.V(2).Infof("skipping updating: %+v", resp)
|
||||
return nil
|
||||
}
|
||||
client, err := remote_storage.GetRemoteStorage(oldRemoteStorage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.Directory == message.NewParentPath && message.OldEntry.Name == message.NewEntry.Name {
|
||||
// update the same entry
|
||||
if message.NewEntry.IsDirectory {
|
||||
// update directory property
|
||||
return nil
|
||||
}
|
||||
if filer.IsSameData(message.OldEntry, message.NewEntry) {
|
||||
glog.V(2).Infof("update meta: %+v", resp)
|
||||
oldDest := toRemoteStorageLocation(oldBucket, util.NewFullPath(resp.Directory, message.OldEntry.Name), oldRemoteStorageMountLocation)
|
||||
return client.UpdateFileMetadata(oldDest, message.OldEntry, message.NewEntry)
|
||||
} else {
|
||||
newDest := toRemoteStorageLocation(newBucket, util.NewFullPath(message.NewParentPath, message.NewEntry.Name), newRemoteStorageMountLocation)
|
||||
reader := filer.NewFileReader(filerSource, message.NewEntry)
|
||||
glog.V(0).Infof("create %s", remote_storage.FormatLocation(newDest))
|
||||
remoteEntry, writeErr := client.WriteFile(newDest, message.NewEntry, reader)
|
||||
if writeErr != nil {
|
||||
return writeErr
|
||||
}
|
||||
return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// the following is entry rename
|
||||
if oldOk {
|
||||
client, err := remote_storage.GetRemoteStorage(oldRemoteStorage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oldDest := toRemoteStorageLocation(oldBucket, util.NewFullPath(resp.Directory, message.OldEntry.Name), oldRemoteStorageMountLocation)
|
||||
if message.OldEntry.IsDirectory {
|
||||
return client.RemoveDirectory(oldDest)
|
||||
}
|
||||
glog.V(0).Infof("delete %s", remote_storage.FormatLocation(oldDest))
|
||||
if err := client.DeleteFile(oldDest); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if newOk {
|
||||
if !shouldSendToRemote(message.NewEntry) {
|
||||
glog.V(2).Infof("skipping updating: %+v", resp)
|
||||
return nil
|
||||
}
|
||||
client, err := remote_storage.GetRemoteStorage(newRemoteStorage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newDest := toRemoteStorageLocation(newBucket, util.NewFullPath(message.NewParentPath, message.NewEntry.Name), newRemoteStorageMountLocation)
|
||||
if message.NewEntry.IsDirectory {
|
||||
return client.WriteDirectory(newDest, message.NewEntry)
|
||||
}
|
||||
reader := filer.NewFileReader(filerSource, message.NewEntry)
|
||||
glog.V(0).Infof("create %s", remote_storage.FormatLocation(newDest))
|
||||
remoteEntry, writeErr := client.WriteFile(newDest, message.NewEntry, reader)
|
||||
if writeErr != nil {
|
||||
return writeErr
|
||||
}
|
||||
return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
return eachEntryFunc, nil
|
||||
}
|
||||
|
||||
func (option *RemoteGatewayOptions) findRemoteStorageClient(bucketName string) (client remote_storage.RemoteStorageClient, remoteStorageMountLocation *remote_pb.RemoteStorageLocation, err error) {
|
||||
bucket := util.FullPath(option.bucketsDir).Child(bucketName)
|
||||
|
||||
var isMounted bool
|
||||
remoteStorageMountLocation, isMounted = option.mappings.Mappings[string(bucket)]
|
||||
if !isMounted {
|
||||
return nil, remoteStorageMountLocation, fmt.Errorf("%s is not mounted", bucket)
|
||||
}
|
||||
remoteConf, hasClient := option.remoteConfs[remoteStorageMountLocation.Name]
|
||||
if !hasClient {
|
||||
return nil, remoteStorageMountLocation, fmt.Errorf("%s mounted to un-configured %+v", bucket, remoteStorageMountLocation)
|
||||
}
|
||||
|
||||
client, err = remote_storage.GetRemoteStorage(remoteConf)
|
||||
if err != nil {
|
||||
return nil, remoteStorageMountLocation, err
|
||||
}
|
||||
return client, remoteStorageMountLocation, nil
|
||||
}
|
||||
|
||||
func (option *RemoteGatewayOptions) detectBucketInfo(actualDir string) (bucket util.FullPath, remoteStorageMountLocation *remote_pb.RemoteStorageLocation, remoteConf *remote_pb.RemoteConf, ok bool) {
|
||||
bucket, ok = extractBucketPath(option.bucketsDir, actualDir)
|
||||
if !ok {
|
||||
return "", nil, nil, false
|
||||
}
|
||||
var isMounted bool
|
||||
remoteStorageMountLocation, isMounted = option.mappings.Mappings[string(bucket)]
|
||||
if !isMounted {
|
||||
glog.Warningf("%s is not mounted", bucket)
|
||||
return "", nil, nil, false
|
||||
}
|
||||
var hasClient bool
|
||||
remoteConf, hasClient = option.remoteConfs[remoteStorageMountLocation.Name]
|
||||
if !hasClient {
|
||||
glog.Warningf("%s mounted to un-configured %+v", bucket, remoteStorageMountLocation)
|
||||
return "", nil, nil, false
|
||||
}
|
||||
return bucket, remoteStorageMountLocation, remoteConf, true
|
||||
}
|
||||
|
||||
func extractBucketPath(bucketsDir, dir string) (util.FullPath, bool) {
|
||||
if !strings.HasPrefix(dir, bucketsDir+"/") {
|
||||
return "", false
|
||||
}
|
||||
parts := strings.SplitN(dir[len(bucketsDir)+1:], "/", 2)
|
||||
return util.FullPath(bucketsDir).Child(parts[0]), true
|
||||
}
|
||||
|
||||
func (option *RemoteGatewayOptions) collectRemoteStorageConf() (err error) {
|
||||
|
||||
if mappings, err := filer.ReadMountMappings(option.grpcDialOption, pb.ServerAddress(*option.filerAddress)); err != nil {
|
||||
return err
|
||||
} else {
|
||||
option.mappings = mappings
|
||||
}
|
||||
|
||||
option.remoteConfs = make(map[string]*remote_pb.RemoteConf)
|
||||
var lastConfName string
|
||||
err = filer_pb.List(option, filer.DirectoryEtcRemote, "", func(entry *filer_pb.Entry, isLast bool) error {
|
||||
if !strings.HasSuffix(entry.Name, filer.REMOTE_STORAGE_CONF_SUFFIX) {
|
||||
return nil
|
||||
}
|
||||
conf := &remote_pb.RemoteConf{}
|
||||
if err := proto.Unmarshal(entry.Content, conf); err != nil {
|
||||
return fmt.Errorf("unmarshal %s/%s: %v", filer.DirectoryEtcRemote, entry.Name, err)
|
||||
}
|
||||
option.remoteConfs[conf.Name] = conf
|
||||
lastConfName = conf.Name
|
||||
return nil
|
||||
}, "", false, math.MaxUint32)
|
||||
|
||||
if option.mappings.PrimaryBucketStorageName == "" && len(option.remoteConfs) == 1 {
|
||||
glog.V(0).Infof("%s is set to the default remote storage", lastConfName)
|
||||
option.mappings.PrimaryBucketStorageName = lastConfName
|
||||
}
|
||||
|
||||
return
|
||||
}
|
@ -1,13 +1,10 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/replication/source"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
@ -16,22 +13,18 @@ import (
|
||||
)
|
||||
|
||||
type RemoteSyncOptions struct {
|
||||
filerAddress *string
|
||||
grpcDialOption grpc.DialOption
|
||||
readChunkFromFiler *bool
|
||||
debug *bool
|
||||
timeAgo *time.Duration
|
||||
dir *string
|
||||
}
|
||||
filerAddress *string
|
||||
grpcDialOption grpc.DialOption
|
||||
readChunkFromFiler *bool
|
||||
timeAgo *time.Duration
|
||||
dir *string
|
||||
|
||||
const (
|
||||
RemoteSyncKeyPrefix = "remote.sync."
|
||||
)
|
||||
}
|
||||
|
||||
var _ = filer_pb.FilerClient(&RemoteSyncOptions{})
|
||||
|
||||
func (option *RemoteSyncOptions) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
|
||||
return pb.WithFilerClient(*option.filerAddress, option.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
return pb.WithFilerClient(pb.ServerAddress(*option.filerAddress), option.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
return fn(client)
|
||||
})
|
||||
}
|
||||
@ -46,20 +39,22 @@ var (
|
||||
func init() {
|
||||
cmdFilerRemoteSynchronize.Run = runFilerRemoteSynchronize // break init cycle
|
||||
remoteSyncOptions.filerAddress = cmdFilerRemoteSynchronize.Flag.String("filer", "localhost:8888", "filer of the SeaweedFS cluster")
|
||||
remoteSyncOptions.dir = cmdFilerRemoteSynchronize.Flag.String("dir", "/", "a mounted directory on filer")
|
||||
remoteSyncOptions.dir = cmdFilerRemoteSynchronize.Flag.String("dir", "", "a mounted directory on filer")
|
||||
remoteSyncOptions.readChunkFromFiler = cmdFilerRemoteSynchronize.Flag.Bool("filerProxy", false, "read file chunks from filer instead of volume servers")
|
||||
remoteSyncOptions.debug = cmdFilerRemoteSynchronize.Flag.Bool("debug", false, "debug mode to print out filer updated remote files")
|
||||
remoteSyncOptions.timeAgo = cmdFilerRemoteSynchronize.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
|
||||
}
|
||||
|
||||
var cmdFilerRemoteSynchronize = &Command{
|
||||
UsageLine: "filer.remote.sync -filer=<filerHost>:<filerPort> -dir=/mount/s3_on_cloud",
|
||||
Short: "resumable continuously write back updates to remote storage if the directory is mounted to the remote storage",
|
||||
Long: `resumable continuously write back updates to remote storage if the directory is mounted to the remote storage
|
||||
UsageLine: "filer.remote.sync",
|
||||
Short: "resumable continuously write back updates to remote storage",
|
||||
Long: `resumable continuously write back updates to remote storage
|
||||
|
||||
filer.remote.sync listens on filer update events.
|
||||
If any mounted remote file is updated, it will fetch the updated content,
|
||||
and write to the remote storage.
|
||||
|
||||
weed filer.remote.sync -dir=/mount/s3_on_cloud
|
||||
|
||||
`,
|
||||
}
|
||||
|
||||
@ -70,169 +65,29 @@ func runFilerRemoteSynchronize(cmd *Command, args []string) bool {
|
||||
remoteSyncOptions.grpcDialOption = grpcDialOption
|
||||
|
||||
dir := *remoteSyncOptions.dir
|
||||
filerAddress := *remoteSyncOptions.filerAddress
|
||||
|
||||
// read filer remote storage mount mappings
|
||||
_, _, remoteStorageMountLocation, storageConf, detectErr := filer.DetectMountInfo(grpcDialOption, filerAddress, dir)
|
||||
if detectErr != nil {
|
||||
fmt.Printf("read mount info: %v", detectErr)
|
||||
return false
|
||||
}
|
||||
filerAddress := pb.ServerAddress(*remoteSyncOptions.filerAddress)
|
||||
|
||||
filerSource := &source.FilerSource{}
|
||||
filerSource.DoInitialize(
|
||||
filerAddress,
|
||||
pb.ServerToGrpcAddress(filerAddress),
|
||||
filerAddress.ToHttpAddress(),
|
||||
filerAddress.ToGrpcAddress(),
|
||||
"/", // does not matter
|
||||
*remoteSyncOptions.readChunkFromFiler,
|
||||
)
|
||||
|
||||
fmt.Printf("synchronize %s to remote storage...\n", dir)
|
||||
util.RetryForever("filer.remote.sync "+dir, func() error {
|
||||
return followUpdatesAndUploadToRemote(&remoteSyncOptions, filerSource, dir, storageConf, remoteStorageMountLocation)
|
||||
}, func(err error) bool {
|
||||
if err != nil {
|
||||
glog.Errorf("synchronize %s: %v", dir, err)
|
||||
}
|
||||
if dir != "" {
|
||||
fmt.Printf("synchronize %s to remote storage...\n", dir)
|
||||
util.RetryForever("filer.remote.sync "+dir, func() error {
|
||||
return followUpdatesAndUploadToRemote(&remoteSyncOptions, filerSource, dir)
|
||||
}, func(err error) bool {
|
||||
if err != nil {
|
||||
glog.Errorf("synchronize %s: %v", dir, err)
|
||||
}
|
||||
return true
|
||||
})
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *source.FilerSource, mountedDir string, remoteStorage *filer_pb.RemoteConf, remoteStorageMountLocation *filer_pb.RemoteStorageLocation) error {
|
||||
|
||||
dirHash := util.HashStringToLong(mountedDir)
|
||||
|
||||
// 1. specified by timeAgo
|
||||
// 2. last offset timestamp for this directory
|
||||
// 3. directory creation time
|
||||
var lastOffsetTs time.Time
|
||||
if *option.timeAgo == 0 {
|
||||
mountedDirEntry, err := filer_pb.GetEntry(option, util.FullPath(mountedDir))
|
||||
if err != nil {
|
||||
return fmt.Errorf("lookup %s: %v", mountedDir, err)
|
||||
}
|
||||
|
||||
lastOffsetTsNs, err := getOffset(option.grpcDialOption, *option.filerAddress, RemoteSyncKeyPrefix, int32(dirHash))
|
||||
if err == nil && mountedDirEntry.Attributes.Crtime < lastOffsetTsNs/1000000 {
|
||||
lastOffsetTs = time.Unix(0, lastOffsetTsNs)
|
||||
glog.V(0).Infof("resume from %v", lastOffsetTs)
|
||||
} else {
|
||||
lastOffsetTs = time.Unix(mountedDirEntry.Attributes.Crtime, 0)
|
||||
}
|
||||
} else {
|
||||
lastOffsetTs = time.Now().Add(-*option.timeAgo)
|
||||
}
|
||||
|
||||
client, err := remote_storage.GetRemoteStorage(remoteStorage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
eachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error {
|
||||
message := resp.EventNotification
|
||||
if message.OldEntry == nil && message.NewEntry == nil {
|
||||
return nil
|
||||
}
|
||||
if message.OldEntry == nil && message.NewEntry != nil {
|
||||
if !filer.HasData(message.NewEntry) {
|
||||
return nil
|
||||
}
|
||||
glog.V(2).Infof("create: %+v", resp)
|
||||
if !shouldSendToRemote(message.NewEntry) {
|
||||
glog.V(2).Infof("skipping creating: %+v", resp)
|
||||
return nil
|
||||
}
|
||||
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)
|
||||
if message.NewEntry.IsDirectory {
|
||||
glog.V(0).Infof("mkdir %s", remote_storage.FormatLocation(dest))
|
||||
return client.WriteDirectory(dest, message.NewEntry)
|
||||
}
|
||||
glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
|
||||
reader := filer.NewFileReader(filerSource, message.NewEntry)
|
||||
remoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)
|
||||
if writeErr != nil {
|
||||
return writeErr
|
||||
}
|
||||
return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry)
|
||||
}
|
||||
if message.OldEntry != nil && message.NewEntry == nil {
|
||||
glog.V(2).Infof("delete: %+v", resp)
|
||||
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)
|
||||
glog.V(0).Infof("delete %s", remote_storage.FormatLocation(dest))
|
||||
return client.DeleteFile(dest)
|
||||
}
|
||||
if message.OldEntry != nil && message.NewEntry != nil {
|
||||
oldDest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)
|
||||
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)
|
||||
if !shouldSendToRemote(message.NewEntry) {
|
||||
glog.V(2).Infof("skipping updating: %+v", resp)
|
||||
return nil
|
||||
}
|
||||
if message.NewEntry.IsDirectory {
|
||||
return client.WriteDirectory(dest, message.NewEntry)
|
||||
}
|
||||
if resp.Directory == message.NewParentPath && message.OldEntry.Name == message.NewEntry.Name {
|
||||
if filer.IsSameData(message.OldEntry, message.NewEntry) {
|
||||
glog.V(2).Infof("update meta: %+v", resp)
|
||||
return client.UpdateFileMetadata(dest, message.OldEntry, message.NewEntry)
|
||||
}
|
||||
}
|
||||
glog.V(2).Infof("update: %+v", resp)
|
||||
glog.V(0).Infof("delete %s", remote_storage.FormatLocation(oldDest))
|
||||
if err := client.DeleteFile(oldDest); err != nil {
|
||||
return err
|
||||
}
|
||||
reader := filer.NewFileReader(filerSource, message.NewEntry)
|
||||
glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
|
||||
remoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)
|
||||
if writeErr != nil {
|
||||
return writeErr
|
||||
}
|
||||
return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
processEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3*time.Second, func(counter int64, lastTsNs int64) error {
|
||||
lastTime := time.Unix(0, lastTsNs)
|
||||
glog.V(0).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, lastTime, float64(counter)/float64(3))
|
||||
return setOffset(option.grpcDialOption, *option.filerAddress, RemoteSyncKeyPrefix, int32(dirHash), lastTsNs)
|
||||
})
|
||||
|
||||
return pb.FollowMetadata(*option.filerAddress, option.grpcDialOption,
|
||||
"filer.remote.sync", mountedDir, lastOffsetTs.UnixNano(), 0, processEventFnWithOffset, false)
|
||||
}
|
||||
|
||||
func toRemoteStorageLocation(mountDir, sourcePath util.FullPath, remoteMountLocation *filer_pb.RemoteStorageLocation) *filer_pb.RemoteStorageLocation {
|
||||
source := string(sourcePath[len(mountDir):])
|
||||
dest := util.FullPath(remoteMountLocation.Path).Child(source)
|
||||
return &filer_pb.RemoteStorageLocation{
|
||||
Name: remoteMountLocation.Name,
|
||||
Bucket: remoteMountLocation.Bucket,
|
||||
Path: string(dest),
|
||||
}
|
||||
}
|
||||
|
||||
func shouldSendToRemote(entry *filer_pb.Entry) bool {
|
||||
if entry.RemoteEntry == nil {
|
||||
return true
|
||||
}
|
||||
if entry.RemoteEntry.LastLocalSyncTsNs/1e9 < entry.Attributes.Mtime {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func updateLocalEntry(filerClient filer_pb.FilerClient, dir string, entry *filer_pb.Entry, remoteEntry *filer_pb.RemoteEntry) error {
|
||||
entry.RemoteEntry = remoteEntry
|
||||
return filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
_, err := client.UpdateEntry(context.Background(), &filer_pb.UpdateEntryRequest{
|
||||
Directory: dir,
|
||||
Entry: entry,
|
||||
})
|
||||
return err
|
||||
})
|
||||
|
||||
}
|
||||
|
222
weed/command/filer_remote_sync_dir.go
Normal file
222
weed/command/filer_remote_sync_dir.go
Normal file
@ -0,0 +1,222 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/replication/source"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/grpc"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *source.FilerSource, mountedDir string) error {
|
||||
|
||||
// read filer remote storage mount mappings
|
||||
_, _, remoteStorageMountLocation, remoteStorage, detectErr := filer.DetectMountInfo(option.grpcDialOption, pb.ServerAddress(*option.filerAddress), mountedDir)
|
||||
if detectErr != nil {
|
||||
return fmt.Errorf("read mount info: %v", detectErr)
|
||||
}
|
||||
|
||||
eachEntryFunc, err := makeEventProcessor(remoteStorage, mountedDir, remoteStorageMountLocation, filerSource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
processEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3*time.Second, func(counter int64, lastTsNs int64) error {
|
||||
lastTime := time.Unix(0, lastTsNs)
|
||||
glog.V(0).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, lastTime, float64(counter)/float64(3))
|
||||
return remote_storage.SetSyncOffset(option.grpcDialOption, pb.ServerAddress(*option.filerAddress), mountedDir, lastTsNs)
|
||||
})
|
||||
|
||||
lastOffsetTs := collectLastSyncOffset(option, option.grpcDialOption, pb.ServerAddress(*option.filerAddress), mountedDir, *option.timeAgo)
|
||||
|
||||
return pb.FollowMetadata(pb.ServerAddress(*option.filerAddress), option.grpcDialOption, "filer.remote.sync",
|
||||
mountedDir, []string{filer.DirectoryEtcRemote}, lastOffsetTs.UnixNano(), 0, processEventFnWithOffset, false)
|
||||
}
|
||||
|
||||
func makeEventProcessor(remoteStorage *remote_pb.RemoteConf, mountedDir string, remoteStorageMountLocation *remote_pb.RemoteStorageLocation, filerSource *source.FilerSource) (pb.ProcessMetadataFunc, error) {
|
||||
client, err := remote_storage.GetRemoteStorage(remoteStorage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
handleEtcRemoteChanges := func(resp *filer_pb.SubscribeMetadataResponse) error {
|
||||
message := resp.EventNotification
|
||||
if message.NewEntry == nil {
|
||||
return nil
|
||||
}
|
||||
if message.NewEntry.Name == filer.REMOTE_STORAGE_MOUNT_FILE {
|
||||
mappings, readErr := filer.UnmarshalRemoteStorageMappings(message.NewEntry.Content)
|
||||
if readErr != nil {
|
||||
return fmt.Errorf("unmarshal mappings: %v", readErr)
|
||||
}
|
||||
if remoteLoc, found := mappings.Mappings[mountedDir]; found {
|
||||
if remoteStorageMountLocation.Bucket != remoteLoc.Bucket || remoteStorageMountLocation.Path != remoteLoc.Path {
|
||||
glog.Fatalf("Unexpected mount changes %+v => %+v", remoteStorageMountLocation, remoteLoc)
|
||||
}
|
||||
} else {
|
||||
glog.V(0).Infof("unmounted %s exiting ...", mountedDir)
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
if message.NewEntry.Name == remoteStorage.Name+filer.REMOTE_STORAGE_CONF_SUFFIX {
|
||||
conf := &remote_pb.RemoteConf{}
|
||||
if err := proto.Unmarshal(message.NewEntry.Content, conf); err != nil {
|
||||
return fmt.Errorf("unmarshal %s/%s: %v", filer.DirectoryEtcRemote, message.NewEntry.Name, err)
|
||||
}
|
||||
remoteStorage = conf
|
||||
if newClient, err := remote_storage.GetRemoteStorage(remoteStorage); err == nil {
|
||||
client = newClient
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
eachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error {
|
||||
message := resp.EventNotification
|
||||
if strings.HasPrefix(resp.Directory, filer.DirectoryEtcRemote) {
|
||||
return handleEtcRemoteChanges(resp)
|
||||
}
|
||||
|
||||
if message.OldEntry == nil && message.NewEntry == nil {
|
||||
return nil
|
||||
}
|
||||
if message.OldEntry == nil && message.NewEntry != nil {
|
||||
if !filer.HasData(message.NewEntry) {
|
||||
return nil
|
||||
}
|
||||
glog.V(2).Infof("create: %+v", resp)
|
||||
if !shouldSendToRemote(message.NewEntry) {
|
||||
glog.V(2).Infof("skipping creating: %+v", resp)
|
||||
return nil
|
||||
}
|
||||
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)
|
||||
if message.NewEntry.IsDirectory {
|
||||
glog.V(0).Infof("mkdir %s", remote_storage.FormatLocation(dest))
|
||||
return client.WriteDirectory(dest, message.NewEntry)
|
||||
}
|
||||
glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
|
||||
reader := filer.NewFileReader(filerSource, message.NewEntry)
|
||||
remoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)
|
||||
if writeErr != nil {
|
||||
return writeErr
|
||||
}
|
||||
return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry)
|
||||
}
|
||||
if message.OldEntry != nil && message.NewEntry == nil {
|
||||
glog.V(2).Infof("delete: %+v", resp)
|
||||
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)
|
||||
if message.OldEntry.IsDirectory {
|
||||
glog.V(0).Infof("rmdir %s", remote_storage.FormatLocation(dest))
|
||||
return client.RemoveDirectory(dest)
|
||||
}
|
||||
glog.V(0).Infof("delete %s", remote_storage.FormatLocation(dest))
|
||||
return client.DeleteFile(dest)
|
||||
}
|
||||
if message.OldEntry != nil && message.NewEntry != nil {
|
||||
oldDest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)
|
||||
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)
|
||||
if !shouldSendToRemote(message.NewEntry) {
|
||||
glog.V(2).Infof("skipping updating: %+v", resp)
|
||||
return nil
|
||||
}
|
||||
if message.NewEntry.IsDirectory {
|
||||
return client.WriteDirectory(dest, message.NewEntry)
|
||||
}
|
||||
if resp.Directory == message.NewParentPath && message.OldEntry.Name == message.NewEntry.Name {
|
||||
if filer.IsSameData(message.OldEntry, message.NewEntry) {
|
||||
glog.V(2).Infof("update meta: %+v", resp)
|
||||
return client.UpdateFileMetadata(dest, message.OldEntry, message.NewEntry)
|
||||
}
|
||||
}
|
||||
glog.V(2).Infof("update: %+v", resp)
|
||||
glog.V(0).Infof("delete %s", remote_storage.FormatLocation(oldDest))
|
||||
if err := client.DeleteFile(oldDest); err != nil {
|
||||
return err
|
||||
}
|
||||
reader := filer.NewFileReader(filerSource, message.NewEntry)
|
||||
glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
|
||||
remoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)
|
||||
if writeErr != nil {
|
||||
return writeErr
|
||||
}
|
||||
return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
return eachEntryFunc, nil
|
||||
}
|
||||
|
||||
func collectLastSyncOffset(filerClient filer_pb.FilerClient, grpcDialOption grpc.DialOption, filerAddress pb.ServerAddress, mountedDir string, timeAgo time.Duration) time.Time {
|
||||
// 1. specified by timeAgo
|
||||
// 2. last offset timestamp for this directory
|
||||
// 3. directory creation time
|
||||
var lastOffsetTs time.Time
|
||||
if timeAgo == 0 {
|
||||
mountedDirEntry, err := filer_pb.GetEntry(filerClient, util.FullPath(mountedDir))
|
||||
if err != nil {
|
||||
glog.V(0).Infof("get mounted directory %s: %v", mountedDir, err)
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
lastOffsetTsNs, err := remote_storage.GetSyncOffset(grpcDialOption, filerAddress, mountedDir)
|
||||
if mountedDirEntry != nil {
|
||||
if err == nil && mountedDirEntry.Attributes.Crtime < lastOffsetTsNs/1000000 {
|
||||
lastOffsetTs = time.Unix(0, lastOffsetTsNs)
|
||||
glog.V(0).Infof("resume from %v", lastOffsetTs)
|
||||
} else {
|
||||
lastOffsetTs = time.Unix(mountedDirEntry.Attributes.Crtime, 0)
|
||||
}
|
||||
} else {
|
||||
lastOffsetTs = time.Now()
|
||||
}
|
||||
} else {
|
||||
lastOffsetTs = time.Now().Add(-timeAgo)
|
||||
}
|
||||
return lastOffsetTs
|
||||
}
|
||||
|
||||
func toRemoteStorageLocation(mountDir, sourcePath util.FullPath, remoteMountLocation *remote_pb.RemoteStorageLocation) *remote_pb.RemoteStorageLocation {
|
||||
source := string(sourcePath[len(mountDir):])
|
||||
dest := util.FullPath(remoteMountLocation.Path).Child(source)
|
||||
return &remote_pb.RemoteStorageLocation{
|
||||
Name: remoteMountLocation.Name,
|
||||
Bucket: remoteMountLocation.Bucket,
|
||||
Path: string(dest),
|
||||
}
|
||||
}
|
||||
|
||||
func shouldSendToRemote(entry *filer_pb.Entry) bool {
|
||||
if entry.RemoteEntry == nil {
|
||||
return true
|
||||
}
|
||||
if entry.RemoteEntry.RemoteMtime < entry.Attributes.Mtime {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func updateLocalEntry(filerClient filer_pb.FilerClient, dir string, entry *filer_pb.Entry, remoteEntry *filer_pb.RemoteEntry) error {
|
||||
remoteEntry.LastLocalSyncTsNs = time.Now().UnixNano()
|
||||
entry.RemoteEntry = remoteEntry
|
||||
return filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
_, err := client.UpdateEntry(context.Background(), &filer_pb.UpdateEntryRequest{
|
||||
Directory: dir,
|
||||
Entry: entry,
|
||||
})
|
||||
return err
|
||||
})
|
||||
}
|
@ -93,9 +93,11 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
|
||||
|
||||
grace.SetupProfiling(*syncCpuProfile, *syncMemProfile)
|
||||
|
||||
filerA := pb.ServerAddress(*syncOptions.filerA)
|
||||
filerB := pb.ServerAddress(*syncOptions.filerB)
|
||||
go func() {
|
||||
for {
|
||||
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerA, *syncOptions.aPath, *syncOptions.aProxyByFiler, *syncOptions.filerB,
|
||||
err := doSubscribeFilerMetaChanges(grpcDialOption, filerA, *syncOptions.aPath, *syncOptions.aProxyByFiler, filerB,
|
||||
*syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bProxyByFiler, *syncOptions.bDiskType, *syncOptions.bDebug)
|
||||
if err != nil {
|
||||
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err)
|
||||
@ -107,7 +109,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
|
||||
if !*syncOptions.isActivePassive {
|
||||
go func() {
|
||||
for {
|
||||
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerB, *syncOptions.bPath, *syncOptions.bProxyByFiler, *syncOptions.filerA,
|
||||
err := doSubscribeFilerMetaChanges(grpcDialOption, filerB, *syncOptions.bPath, *syncOptions.bProxyByFiler, filerA,
|
||||
*syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aProxyByFiler, *syncOptions.aDiskType, *syncOptions.aDebug)
|
||||
if err != nil {
|
||||
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err)
|
||||
@ -122,7 +124,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, sourcePath string, sourceReadChunkFromFiler bool, targetFiler, targetPath string,
|
||||
func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler pb.ServerAddress, sourcePath string, sourceReadChunkFromFiler bool, targetFiler pb.ServerAddress, targetPath string,
|
||||
replicationStr, collection string, ttlSec int, sinkWriteChunkByFiler bool, diskType string, debug bool) error {
|
||||
|
||||
// read source filer signature
|
||||
@ -147,9 +149,9 @@ func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, so
|
||||
|
||||
// create filer sink
|
||||
filerSource := &source.FilerSource{}
|
||||
filerSource.DoInitialize(sourceFiler, pb.ServerToGrpcAddress(sourceFiler), sourcePath, sourceReadChunkFromFiler)
|
||||
filerSource.DoInitialize(sourceFiler.ToHttpAddress(), sourceFiler.ToGrpcAddress(), sourcePath, sourceReadChunkFromFiler)
|
||||
filerSink := &filersink.FilerSink{}
|
||||
filerSink.DoInitialize(targetFiler, pb.ServerToGrpcAddress(targetFiler), targetPath, replicationStr, collection, ttlSec, diskType, grpcDialOption, sinkWriteChunkByFiler)
|
||||
filerSink.DoInitialize(targetFiler.ToHttpAddress(), targetFiler.ToGrpcAddress(), targetPath, replicationStr, collection, ttlSec, diskType, grpcDialOption, sinkWriteChunkByFiler)
|
||||
filerSink.SetSourceFiler(filerSource)
|
||||
|
||||
persistEventFn := genProcessFunction(sourcePath, targetPath, filerSink, debug)
|
||||
@ -170,8 +172,8 @@ func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, so
|
||||
return setOffset(grpcDialOption, targetFiler, SyncKeyPrefix, sourceFilerSignature, lastTsNs)
|
||||
})
|
||||
|
||||
return pb.FollowMetadata(sourceFiler, grpcDialOption, "syncTo_"+targetFiler,
|
||||
sourcePath, sourceFilerOffsetTsNs, targetFilerSignature, processEventFnWithOffset, false)
|
||||
return pb.FollowMetadata(sourceFiler, grpcDialOption, "syncTo_"+string(targetFiler),
|
||||
sourcePath, nil, sourceFilerOffsetTsNs, targetFilerSignature, processEventFnWithOffset, false)
|
||||
|
||||
}
|
||||
|
||||
@ -179,7 +181,7 @@ const (
|
||||
SyncKeyPrefix = "sync."
|
||||
)
|
||||
|
||||
func getOffset(grpcDialOption grpc.DialOption, filer string, signaturePrefix string, signature int32) (lastOffsetTsNs int64, readErr error) {
|
||||
func getOffset(grpcDialOption grpc.DialOption, filer pb.ServerAddress, signaturePrefix string, signature int32) (lastOffsetTsNs int64, readErr error) {
|
||||
|
||||
readErr = pb.WithFilerClient(filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
syncKey := []byte(signaturePrefix + "____")
|
||||
@ -206,7 +208,7 @@ func getOffset(grpcDialOption grpc.DialOption, filer string, signaturePrefix str
|
||||
|
||||
}
|
||||
|
||||
func setOffset(grpcDialOption grpc.DialOption, filer string, signaturePrefix string, signature int32, offsetTsNs int64) error {
|
||||
func setOffset(grpcDialOption grpc.DialOption, filer pb.ServerAddress, signaturePrefix string, signature int32, offsetTsNs int64) error {
|
||||
return pb.WithFilerClient(filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
syncKey := []byte(signaturePrefix + "____")
|
||||
|
@ -1,3 +1,4 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package command
|
||||
|
@ -43,38 +43,35 @@ func runIam(cmd *Command, args []string) bool {
|
||||
}
|
||||
|
||||
func (iamopt *IamOptions) startIamServer() bool {
|
||||
filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*iamopt.filer)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
return false
|
||||
}
|
||||
filerAddress := pb.ServerAddress(*iamopt.filer)
|
||||
|
||||
util.LoadConfiguration("security", false)
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
for {
|
||||
err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
err := pb.WithGrpcFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
|
||||
return fmt.Errorf("get filer %s configuration: %v", filerAddress, err)
|
||||
}
|
||||
glog.V(0).Infof("IAM read filer configuration: %s", resp)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *iamopt.filer, filerGrpcAddress)
|
||||
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *iamopt.filer, filerAddress.ToGrpcAddress())
|
||||
time.Sleep(time.Second)
|
||||
} else {
|
||||
glog.V(0).Infof("connected to filer %s grpc address %s", *iamopt.filer, filerGrpcAddress)
|
||||
glog.V(0).Infof("connected to filer %s grpc address %s", *iamopt.filer, filerAddress.ToGrpcAddress())
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
masters := pb.ServerAddresses(*iamopt.masters).ToAddresses()
|
||||
router := mux.NewRouter().SkipClean(true)
|
||||
_, iamApiServer_err := iamapi.NewIamApiServer(router, &iamapi.IamServerOption{
|
||||
Filer: *iamopt.filer,
|
||||
Port: *iamopt.port,
|
||||
FilerGrpcAddress: filerGrpcAddress,
|
||||
GrpcDialOption: grpcDialOption,
|
||||
Masters: masters,
|
||||
Filer: filerAddress,
|
||||
Port: *iamopt.port,
|
||||
GrpcDialOption: grpcDialOption,
|
||||
})
|
||||
glog.V(0).Info("NewIamApiServer created")
|
||||
if iamApiServer_err != nil {
|
||||
|
@ -3,6 +3,9 @@ package command
|
||||
import (
|
||||
_ "net/http/pprof"
|
||||
|
||||
_ "github.com/chrislusf/seaweedfs/weed/remote_storage/azure"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/remote_storage/gcs"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/remote_storage/hdfs"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/remote_storage/s3"
|
||||
|
||||
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/azuresink"
|
||||
@ -27,4 +30,5 @@ import (
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/redis"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/redis2"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/sqlite"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/tikv"
|
||||
)
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -28,6 +27,7 @@ var (
|
||||
|
||||
type MasterOptions struct {
|
||||
port *int
|
||||
portGrpc *int
|
||||
ip *string
|
||||
ipBind *string
|
||||
metaFolder *string
|
||||
@ -47,6 +47,7 @@ type MasterOptions struct {
|
||||
func init() {
|
||||
cmdMaster.Run = runMaster // break init cycle
|
||||
m.port = cmdMaster.Flag.Int("port", 9333, "http listen port")
|
||||
m.portGrpc = cmdMaster.Flag.Int("port.grpc", 19333, "grpc listen port")
|
||||
m.ip = cmdMaster.Flag.String("ip", util.DetectedHostAddress(), "master <ip>|<server> address, also used as identifier")
|
||||
m.ipBind = cmdMaster.Flag.String("ip.bind", "", "ip address to bind to")
|
||||
m.metaFolder = cmdMaster.Flag.String("mdir", os.TempDir(), "data directory to store meta data")
|
||||
@ -112,11 +113,11 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
|
||||
|
||||
backend.LoadConfiguration(util.GetViper())
|
||||
|
||||
myMasterAddress, peers := checkPeers(*masterOption.ip, *masterOption.port, *masterOption.peers)
|
||||
myMasterAddress, peers := checkPeers(*masterOption.ip, *masterOption.port, *masterOption.portGrpc, *masterOption.peers)
|
||||
|
||||
r := mux.NewRouter()
|
||||
ms := weed_server.NewMasterServer(r, masterOption.toMasterOption(masterWhiteList), peers)
|
||||
listeningAddress := *masterOption.ipBind + ":" + strconv.Itoa(*masterOption.port)
|
||||
listeningAddress := util.JoinHostPort(*masterOption.ipBind, *masterOption.port)
|
||||
glog.V(0).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress)
|
||||
masterListener, e := util.NewListener(listeningAddress, 0)
|
||||
if e != nil {
|
||||
@ -131,8 +132,8 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
|
||||
ms.SetRaftServer(raftServer)
|
||||
r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET")
|
||||
// starting grpc server
|
||||
grpcPort := *masterOption.port + 10000
|
||||
grpcL, err := util.NewListener(*masterOption.ipBind+":"+strconv.Itoa(grpcPort), 0)
|
||||
grpcPort := *masterOption.portGrpc
|
||||
grpcL, err := util.NewListener(util.JoinHostPort(*masterOption.ipBind, grpcPort), 0)
|
||||
if err != nil {
|
||||
glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
|
||||
}
|
||||
@ -161,16 +162,14 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
|
||||
select {}
|
||||
}
|
||||
|
||||
func checkPeers(masterIp string, masterPort int, peers string) (masterAddress string, cleanedPeers []string) {
|
||||
func checkPeers(masterIp string, masterPort int, masterGrpcPort int, peers string) (masterAddress pb.ServerAddress, cleanedPeers []pb.ServerAddress) {
|
||||
glog.V(0).Infof("current: %s:%d peers:%s", masterIp, masterPort, peers)
|
||||
masterAddress = masterIp + ":" + strconv.Itoa(masterPort)
|
||||
if peers != "" {
|
||||
cleanedPeers = strings.Split(peers, ",")
|
||||
}
|
||||
masterAddress = pb.NewServerAddress(masterIp, masterPort, masterGrpcPort)
|
||||
cleanedPeers = pb.ServerAddresses(peers).ToAddresses()
|
||||
|
||||
hasSelf := false
|
||||
for _, peer := range cleanedPeers {
|
||||
if peer == masterAddress {
|
||||
if peer.ToHttpAddress() == masterAddress.ToHttpAddress() {
|
||||
hasSelf = true
|
||||
break
|
||||
}
|
||||
@ -180,13 +179,15 @@ func checkPeers(masterIp string, masterPort int, peers string) (masterAddress st
|
||||
cleanedPeers = append(cleanedPeers, masterAddress)
|
||||
}
|
||||
if len(cleanedPeers)%2 == 0 {
|
||||
glog.Fatalf("Only odd number of masters are supported!")
|
||||
glog.Fatalf("Only odd number of masters are supported: %+v", cleanedPeers)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func isTheFirstOne(self string, peers []string) bool {
|
||||
sort.Strings(peers)
|
||||
func isTheFirstOne(self pb.ServerAddress, peers []pb.ServerAddress) bool {
|
||||
sort.Slice(peers, func(i, j int) bool {
|
||||
return strings.Compare(string(peers[i]), string(peers[j])) < 0
|
||||
})
|
||||
if len(peers) <= 0 {
|
||||
return true
|
||||
}
|
||||
@ -194,9 +195,9 @@ func isTheFirstOne(self string, peers []string) bool {
|
||||
}
|
||||
|
||||
func (m *MasterOptions) toMasterOption(whiteList []string) *weed_server.MasterOption {
|
||||
masterAddress := pb.NewServerAddress(*m.ip, *m.port, *m.portGrpc)
|
||||
return &weed_server.MasterOption{
|
||||
Host: *m.ip,
|
||||
Port: *m.port,
|
||||
Master: masterAddress,
|
||||
MetaFolder: *m.metaFolder,
|
||||
VolumeSizeLimitMB: uint32(*m.volumeSizeLimitMB),
|
||||
VolumePreallocate: *m.volumePreallocate,
|
||||
|
@ -13,8 +13,6 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
"google.golang.org/grpc/reflection"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
@ -25,6 +23,7 @@ var (
|
||||
func init() {
|
||||
cmdMasterFollower.Run = runMasterFollower // break init cycle
|
||||
mf.port = cmdMasterFollower.Flag.Int("port", 9334, "http listen port")
|
||||
mf.portGrpc = cmdMasterFollower.Flag.Int("port.grpc", 19334, "grpc listen port")
|
||||
mf.ipBind = cmdMasterFollower.Flag.String("ip.bind", "", "ip address to bind to")
|
||||
mf.peers = cmdMasterFollower.Flag.String("masters", "localhost:9333", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094,127.0.0.1:9095")
|
||||
|
||||
@ -79,19 +78,15 @@ func runMasterFollower(cmd *Command, args []string) bool {
|
||||
func startMasterFollower(masterOptions MasterOptions) {
|
||||
|
||||
// collect settings from main masters
|
||||
masters := strings.Split(*mf.peers, ",")
|
||||
masterGrpcAddresses, err := pb.ParseServersToGrpcAddresses(masters)
|
||||
if err != nil {
|
||||
glog.V(0).Infof("ParseFilerGrpcAddress: %v", err)
|
||||
return
|
||||
}
|
||||
masters := pb.ServerAddresses(*mf.peers).ToAddresses()
|
||||
|
||||
var err error
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.master")
|
||||
for i := 0; i < 10; i++ {
|
||||
err = pb.WithOneOfGrpcMasterClients(masterGrpcAddresses, grpcDialOption, func(client master_pb.SeaweedClient) error {
|
||||
err = pb.WithOneOfGrpcMasterClients(masters, grpcDialOption, func(client master_pb.SeaweedClient) error {
|
||||
resp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("get master grpc address %v configuration: %v", masterGrpcAddresses, err)
|
||||
return fmt.Errorf("get master grpc address %v configuration: %v", masters, err)
|
||||
}
|
||||
masterOptions.defaultReplication = &resp.DefaultReplication
|
||||
masterOptions.volumeSizeLimitMB = aws.Uint(uint(resp.VolumeSizeLimitMB))
|
||||
@ -99,13 +94,13 @@ func startMasterFollower(masterOptions MasterOptions) {
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.V(0).Infof("failed to talk to filer %v: %v", masterGrpcAddresses, err)
|
||||
glog.V(0).Infof("failed to talk to filer %v: %v", masters, err)
|
||||
glog.V(0).Infof("wait for %d seconds ...", i+1)
|
||||
time.Sleep(time.Duration(i+1) * time.Second)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("failed to talk to filer %v: %v", masterGrpcAddresses, err)
|
||||
glog.Errorf("failed to talk to filer %v: %v", masters, err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -114,7 +109,7 @@ func startMasterFollower(masterOptions MasterOptions) {
|
||||
|
||||
r := mux.NewRouter()
|
||||
ms := weed_server.NewMasterServer(r, option, masters)
|
||||
listeningAddress := *masterOptions.ipBind + ":" + strconv.Itoa(*masterOptions.port)
|
||||
listeningAddress := util.JoinHostPort(*masterOptions.ipBind, *masterOptions.port)
|
||||
glog.V(0).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress)
|
||||
masterListener, e := util.NewListener(listeningAddress, 0)
|
||||
if e != nil {
|
||||
@ -122,8 +117,8 @@ func startMasterFollower(masterOptions MasterOptions) {
|
||||
}
|
||||
|
||||
// starting grpc server
|
||||
grpcPort := *masterOptions.port + 10000
|
||||
grpcL, err := util.NewListener(*masterOptions.ipBind+":"+strconv.Itoa(grpcPort), 0)
|
||||
grpcPort := *masterOptions.portGrpc
|
||||
grpcL, err := util.NewListener(util.JoinHostPort(*masterOptions.ipBind, grpcPort), 0)
|
||||
if err != nil {
|
||||
glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
|
||||
}
|
||||
|
@ -1,6 +1,5 @@
|
||||
// +build !linux
|
||||
// +build !darwin
|
||||
// +build !freebsd
|
||||
//go:build !linux && !darwin && !freebsd
|
||||
// +build !linux,!darwin,!freebsd
|
||||
|
||||
package command
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
//go:build linux || darwin || freebsd
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package command
|
||||
@ -69,35 +70,30 @@ func getParentInode(mountDir string) (uint64, error) {
|
||||
|
||||
func RunMount(option *MountOptions, umask os.FileMode) bool {
|
||||
|
||||
filers := strings.Split(*option.filer, ",")
|
||||
// parse filer grpc address
|
||||
filerGrpcAddresses, err := pb.ParseServersToGrpcAddresses(filers)
|
||||
if err != nil {
|
||||
glog.V(0).Infof("ParseFilerGrpcAddress: %v", err)
|
||||
return true
|
||||
}
|
||||
filerAddresses := pb.ServerAddresses(*option.filer).ToAddresses()
|
||||
|
||||
util.LoadConfiguration("security", false)
|
||||
// try to connect to filer, filerBucketsPath may be useful later
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
var cipher bool
|
||||
var err error
|
||||
for i := 0; i < 10; i++ {
|
||||
err = pb.WithOneOfGrpcFilerClients(filerGrpcAddresses, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
err = pb.WithOneOfGrpcFilerClients(filerAddresses, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("get filer grpc address %v configuration: %v", filerGrpcAddresses, err)
|
||||
return fmt.Errorf("get filer grpc address %v configuration: %v", filerAddresses, err)
|
||||
}
|
||||
cipher = resp.Cipher
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.V(0).Infof("failed to talk to filer %v: %v", filerGrpcAddresses, err)
|
||||
glog.V(0).Infof("failed to talk to filer %v: %v", filerAddresses, err)
|
||||
glog.V(0).Infof("wait for %d seconds ...", i+1)
|
||||
time.Sleep(time.Duration(i+1) * time.Second)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("failed to talk to filer %v: %v", filerGrpcAddresses, err)
|
||||
glog.Errorf("failed to talk to filer %v: %v", filerAddresses, err)
|
||||
return true
|
||||
}
|
||||
|
||||
@ -205,8 +201,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
||||
|
||||
seaweedFileSystem := filesys.NewSeaweedFileSystem(&filesys.Option{
|
||||
MountDirectory: dir,
|
||||
FilerAddresses: filers,
|
||||
FilerGrpcAddresses: filerGrpcAddresses,
|
||||
FilerAddresses: filerAddresses,
|
||||
GrpcDialOption: grpcDialOption,
|
||||
FilerMountRootPath: mountRoot,
|
||||
Collection: *option.collection,
|
||||
|
@ -3,7 +3,6 @@ package command
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/reflection"
|
||||
@ -63,35 +62,31 @@ func (msgBrokerOpt *MessageBrokerOptions) startQueueServer() bool {
|
||||
|
||||
grace.SetupProfiling(*messageBrokerStandaloneOptions.cpuprofile, *messageBrokerStandaloneOptions.memprofile)
|
||||
|
||||
filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*msgBrokerOpt.filer)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
return false
|
||||
}
|
||||
filerAddress := pb.ServerAddress(*msgBrokerOpt.filer)
|
||||
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.msg_broker")
|
||||
cipher := false
|
||||
|
||||
for {
|
||||
err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
err := pb.WithGrpcFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
|
||||
return fmt.Errorf("get filer %s configuration: %v", filerAddress, err)
|
||||
}
|
||||
cipher = resp.Cipher
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
|
||||
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *msgBrokerOpt.filer, filerAddress.ToGrpcAddress())
|
||||
time.Sleep(time.Second)
|
||||
} else {
|
||||
glog.V(0).Infof("connected to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
|
||||
glog.V(0).Infof("connected to filer %s grpc address %s", *msgBrokerOpt.filer, filerAddress.ToGrpcAddress())
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
qs, err := broker.NewMessageBroker(&broker.MessageBrokerOption{
|
||||
Filers: []string{*msgBrokerOpt.filer},
|
||||
Filers: []pb.ServerAddress{filerAddress},
|
||||
DefaultReplication: "",
|
||||
MaxMB: 0,
|
||||
Ip: *msgBrokerOpt.ip,
|
||||
@ -100,7 +95,7 @@ func (msgBrokerOpt *MessageBrokerOptions) startQueueServer() bool {
|
||||
}, grpcDialOption)
|
||||
|
||||
// start grpc listener
|
||||
grpcL, err := util.NewListener(":"+strconv.Itoa(*msgBrokerOpt.port), 0)
|
||||
grpcL, err := util.NewListener(util.JoinHostPort("", *msgBrokerOpt.port), 0)
|
||||
if err != nil {
|
||||
glog.Fatalf("failed to listen on grpc port %d: %v", *msgBrokerOpt.port, err)
|
||||
}
|
||||
|
@ -137,11 +137,7 @@ func runS3(cmd *Command, args []string) bool {
|
||||
|
||||
func (s3opt *S3Options) startS3Server() bool {
|
||||
|
||||
filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*s3opt.filer)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
return false
|
||||
}
|
||||
filerAddress := pb.ServerAddress(*s3opt.filer)
|
||||
|
||||
filerBucketsPath := "/buckets"
|
||||
|
||||
@ -152,10 +148,10 @@ func (s3opt *S3Options) startS3Server() bool {
|
||||
var metricsIntervalSec int
|
||||
|
||||
for {
|
||||
err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
err := pb.WithGrpcFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
|
||||
return fmt.Errorf("get filer %s configuration: %v", filerAddress, err)
|
||||
}
|
||||
filerBucketsPath = resp.DirBuckets
|
||||
metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSec)
|
||||
@ -163,10 +159,10 @@ func (s3opt *S3Options) startS3Server() bool {
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
|
||||
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerAddress.ToGrpcAddress())
|
||||
time.Sleep(time.Second)
|
||||
} else {
|
||||
glog.V(0).Infof("connected to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
|
||||
glog.V(0).Infof("connected to filer %s grpc address %s", *s3opt.filer, filerAddress.ToGrpcAddress())
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -176,9 +172,8 @@ func (s3opt *S3Options) startS3Server() bool {
|
||||
router := mux.NewRouter().SkipClean(true)
|
||||
|
||||
_, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{
|
||||
Filer: *s3opt.filer,
|
||||
Filer: filerAddress,
|
||||
Port: *s3opt.port,
|
||||
FilerGrpcAddress: filerGrpcAddress,
|
||||
Config: *s3opt.config,
|
||||
DomainName: *s3opt.domainName,
|
||||
BucketsPath: filerBucketsPath,
|
||||
|
@ -44,7 +44,7 @@ dbFile = "./filer.db" # sqlite db file
|
||||
# CREATE TABLE IF NOT EXISTS filemeta (
|
||||
# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
|
||||
# name VARCHAR(1000) BINARY COMMENT 'directory or file name',
|
||||
# directory TEXT COMMENT 'full path to parent directory',
|
||||
# directory TEXT BINARY COMMENT 'full path to parent directory',
|
||||
# meta LONGBLOB,
|
||||
# PRIMARY KEY (dirhash, name)
|
||||
# ) DEFAULT CHARSET=utf8;
|
||||
@ -69,7 +69,7 @@ createTable = """
|
||||
CREATE TABLE IF NOT EXISTS ` + "`%s`" + ` (
|
||||
dirhash BIGINT,
|
||||
name VARCHAR(1000) BINARY,
|
||||
directory TEXT,
|
||||
directory TEXT BINARY,
|
||||
meta LONGBLOB,
|
||||
PRIMARY KEY (dirhash, name)
|
||||
) DEFAULT CHARSET=utf8;
|
||||
@ -230,3 +230,11 @@ location = "/tmp/"
|
||||
address = "localhost:6379"
|
||||
password = ""
|
||||
database = 1
|
||||
|
||||
[tikv]
|
||||
enabled = false
|
||||
# If you have many pd address, use ',' split then:
|
||||
# pdaddrs = "pdhost1:2379, pdhost2:2379, pdhost3:2379"
|
||||
pdaddrs = "localhost:2379"
|
||||
# Concurrency for TiKV delete range
|
||||
deleterange_concurrency = 1
|
||||
|
@ -2,6 +2,7 @@ package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/util/grace"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -85,6 +86,7 @@ func init() {
|
||||
serverOptions.debugPort = cmdServer.Flag.Int("debug.port", 6060, "http port for debugging")
|
||||
|
||||
masterOptions.port = cmdServer.Flag.Int("master.port", 9333, "master server http listen port")
|
||||
masterOptions.portGrpc = cmdServer.Flag.Int("master.port.grpc", 19333, "master server grpc listen port")
|
||||
masterOptions.metaFolder = cmdServer.Flag.String("master.dir", "", "data directory to store meta data, default to same as -dir specified")
|
||||
masterOptions.peers = cmdServer.Flag.String("master.peers", "", "all master nodes in comma separated ip:masterPort list")
|
||||
masterOptions.volumeSizeLimitMB = cmdServer.Flag.Uint("master.volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.")
|
||||
@ -97,6 +99,7 @@ func init() {
|
||||
|
||||
filerOptions.collection = cmdServer.Flag.String("filer.collection", "", "all data will be stored in this collection")
|
||||
filerOptions.port = cmdServer.Flag.Int("filer.port", 8888, "filer server http listen port")
|
||||
filerOptions.portGrpc = cmdServer.Flag.Int("filer.port.grpc", 18888, "filer server grpc listen port")
|
||||
filerOptions.publicPort = cmdServer.Flag.Int("filer.port.public", 0, "filer server public http listen port")
|
||||
filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "default replication type. If not specified, use master setting.")
|
||||
filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing")
|
||||
@ -108,6 +111,7 @@ func init() {
|
||||
filerOptions.concurrentUploadLimitMB = cmdServer.Flag.Int("filer.concurrentUploadLimitMB", 64, "limit total concurrent upload size")
|
||||
|
||||
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
|
||||
serverOptions.v.portGrpc = cmdServer.Flag.Int("volume.port.grpc", 18080, "volume server grpc listen port")
|
||||
serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port")
|
||||
serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.")
|
||||
serverOptions.v.diskType = cmdServer.Flag.String("volume.disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
|
||||
@ -164,21 +168,16 @@ func runServer(cmd *Command, args []string) bool {
|
||||
*isStartingFiler = true
|
||||
}
|
||||
|
||||
if *isStartingMasterServer {
|
||||
_, peerList := checkPeers(*serverIp, *masterOptions.port, *masterOptions.peers)
|
||||
peers := strings.Join(peerList, ",")
|
||||
masterOptions.peers = &peers
|
||||
}
|
||||
|
||||
// ip address
|
||||
masterOptions.ip = serverIp
|
||||
masterOptions.ipBind = serverBindIp
|
||||
filerOptions.masters = masterOptions.peers
|
||||
_, masters := checkPeers(*masterOptions.ip, *masterOptions.port, *masterOptions.portGrpc, *masterOptions.peers)
|
||||
filerOptions.masters = masters
|
||||
filerOptions.ip = serverIp
|
||||
filerOptions.bindIp = serverBindIp
|
||||
serverOptions.v.ip = serverIp
|
||||
serverOptions.v.bindIp = serverBindIp
|
||||
serverOptions.v.masters = masterOptions.peers
|
||||
serverOptions.v.masters = masters
|
||||
serverOptions.v.idleConnectionTimeout = serverTimeout
|
||||
serverOptions.v.dataCenter = serverDataCenter
|
||||
serverOptions.v.rack = serverRack
|
||||
@ -194,7 +193,7 @@ func runServer(cmd *Command, args []string) bool {
|
||||
filerOptions.disableHttp = serverDisableHttp
|
||||
masterOptions.disableHttp = serverDisableHttp
|
||||
|
||||
filerAddress := fmt.Sprintf("%s:%d", *serverIp, *filerOptions.port)
|
||||
filerAddress := string(pb.NewServerAddress(*serverIp, *filerOptions.port, *filerOptions.portGrpc))
|
||||
s3Options.filer = &filerAddress
|
||||
webdavOptions.filer = &filerAddress
|
||||
msgBrokerOptions.filer = &filerAddress
|
||||
|
@ -2,6 +2,7 @@ package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"github.com/chrislusf/seaweedfs/weed/shell"
|
||||
@ -53,13 +54,7 @@ func runShell(command *Command, args []string) bool {
|
||||
|
||||
fmt.Printf("master: %s filer: %s\n", *shellOptions.Masters, *shellInitialFiler)
|
||||
|
||||
var err error
|
||||
shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(*shellInitialFiler)
|
||||
shellOptions.FilerAddress = *shellInitialFiler
|
||||
if err != nil {
|
||||
fmt.Printf("failed to parse filer %s: %v\n", *shellInitialFiler, err)
|
||||
return false
|
||||
}
|
||||
shellOptions.FilerAddress = pb.ServerAddress(*shellInitialFiler)
|
||||
shellOptions.Directory = "/"
|
||||
|
||||
shell.RunShell(shellOptions)
|
||||
|
@ -71,7 +71,7 @@ func runUpload(cmd *Command, args []string) bool {
|
||||
util.LoadConfiguration("security", false)
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
defaultReplication, err := readMasterConfiguration(grpcDialOption, *upload.master)
|
||||
defaultReplication, err := readMasterConfiguration(grpcDialOption, pb.ServerAddress(*upload.master))
|
||||
if err != nil {
|
||||
fmt.Printf("upload: %v", err)
|
||||
return false
|
||||
@ -96,7 +96,7 @@ func runUpload(cmd *Command, args []string) bool {
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
results, e := operation.SubmitFiles(func() string { return *upload.master }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
|
||||
results, e := operation.SubmitFiles(func() pb.ServerAddress { return pb.ServerAddress(*upload.master) }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
|
||||
bytes, _ := json.Marshal(results)
|
||||
fmt.Println(string(bytes))
|
||||
if e != nil {
|
||||
@ -118,7 +118,7 @@ func runUpload(cmd *Command, args []string) bool {
|
||||
fmt.Println(e.Error())
|
||||
return false
|
||||
}
|
||||
results, err := operation.SubmitFiles(func() string { return *upload.master }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
|
||||
results, err := operation.SubmitFiles(func() pb.ServerAddress { return pb.ServerAddress(*upload.master) }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
return false
|
||||
@ -129,7 +129,7 @@ func runUpload(cmd *Command, args []string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func readMasterConfiguration(grpcDialOption grpc.DialOption, masterAddress string) (replication string, err error) {
|
||||
func readMasterConfiguration(grpcDialOption grpc.DialOption, masterAddress pb.ServerAddress) (replication string, err error) {
|
||||
err = pb.WithMasterClient(masterAddress, grpcDialOption, func(client master_pb.SeaweedClient) error {
|
||||
resp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
|
||||
if err != nil {
|
||||
|
@ -36,6 +36,7 @@ var (
|
||||
|
||||
type VolumeServerOptions struct {
|
||||
port *int
|
||||
portGrpc *int
|
||||
publicPort *int
|
||||
folders []string
|
||||
folderMaxLimits []int
|
||||
@ -43,7 +44,8 @@ type VolumeServerOptions struct {
|
||||
ip *string
|
||||
publicUrl *string
|
||||
bindIp *string
|
||||
masters *string
|
||||
mastersString *string
|
||||
masters []pb.ServerAddress
|
||||
idleConnectionTimeout *int
|
||||
dataCenter *string
|
||||
rack *string
|
||||
@ -68,11 +70,12 @@ type VolumeServerOptions struct {
|
||||
func init() {
|
||||
cmdVolume.Run = runVolume // break init cycle
|
||||
v.port = cmdVolume.Flag.Int("port", 8080, "http listen port")
|
||||
v.portGrpc = cmdVolume.Flag.Int("port.grpc", 18080, "grpc listen port")
|
||||
v.publicPort = cmdVolume.Flag.Int("port.public", 0, "port opened to public")
|
||||
v.ip = cmdVolume.Flag.String("ip", util.DetectedHostAddress(), "ip or server name, also used as identifier")
|
||||
v.publicUrl = cmdVolume.Flag.String("publicUrl", "", "Publicly accessible address")
|
||||
v.bindIp = cmdVolume.Flag.String("ip.bind", "", "ip address to bind to")
|
||||
v.masters = cmdVolume.Flag.String("mserver", "localhost:9333", "comma-separated master servers")
|
||||
v.mastersString = cmdVolume.Flag.String("mserver", "localhost:9333", "comma-separated master servers")
|
||||
v.preStopSeconds = cmdVolume.Flag.Int("preStopSeconds", 10, "number of seconds between stop send heartbeats and stop volume server")
|
||||
// v.pulseSeconds = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than or equal to the master's setting")
|
||||
v.idleConnectionTimeout = cmdVolume.Flag.Int("idleTimeout", 30, "connection idle seconds")
|
||||
@ -123,6 +126,7 @@ func runVolume(cmd *Command, args []string) bool {
|
||||
go stats_collect.StartMetricsServer(*v.metricsHttpPort)
|
||||
|
||||
minFreeSpaces := util.MustParseMinFreeSpace(*minFreeSpace, *minFreeSpacePercent)
|
||||
v.masters = pb.ServerAddresses(*v.mastersString).ToAddresses()
|
||||
v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption, minFreeSpaces)
|
||||
|
||||
return true
|
||||
@ -194,7 +198,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
|
||||
*v.publicPort = *v.port
|
||||
}
|
||||
if *v.publicUrl == "" {
|
||||
*v.publicUrl = *v.ip + ":" + strconv.Itoa(*v.publicPort)
|
||||
*v.publicUrl = util.JoinHostPort(*v.ip, *v.publicPort)
|
||||
}
|
||||
|
||||
volumeMux := http.NewServeMux()
|
||||
@ -221,14 +225,12 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
|
||||
volumeNeedleMapKind = storage.NeedleMapLevelDbLarge
|
||||
}
|
||||
|
||||
masters := *v.masters
|
||||
|
||||
volumeServer := weed_server.NewVolumeServer(volumeMux, publicVolumeMux,
|
||||
*v.ip, *v.port, *v.publicUrl,
|
||||
*v.ip, *v.port, *v.portGrpc, *v.publicUrl,
|
||||
v.folders, v.folderMaxLimits, minFreeSpaces, diskTypes,
|
||||
*v.idxFolder,
|
||||
volumeNeedleMapKind,
|
||||
strings.Split(masters, ","), 5, *v.dataCenter, *v.rack,
|
||||
v.masters, 5, *v.dataCenter, *v.rack,
|
||||
v.whiteList,
|
||||
*v.fixJpgOrientation, *v.readMode,
|
||||
*v.compactionMBPerSecond,
|
||||
@ -307,8 +309,8 @@ func (v VolumeServerOptions) isSeparatedPublicPort() bool {
|
||||
}
|
||||
|
||||
func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerServer) *grpc.Server {
|
||||
grpcPort := *v.port + 10000
|
||||
grpcL, err := util.NewListener(*v.bindIp+":"+strconv.Itoa(grpcPort), 0)
|
||||
grpcPort := *v.portGrpc
|
||||
grpcL, err := util.NewListener(util.JoinHostPort(*v.bindIp, grpcPort), 0)
|
||||
if err != nil {
|
||||
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
|
||||
}
|
||||
@ -324,7 +326,7 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe
|
||||
}
|
||||
|
||||
func (v VolumeServerOptions) startPublicHttpService(handler http.Handler) httpdown.Server {
|
||||
publicListeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.publicPort)
|
||||
publicListeningAddress := util.JoinHostPort(*v.bindIp, *v.publicPort)
|
||||
glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "public at", publicListeningAddress)
|
||||
publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
|
||||
if e != nil {
|
||||
@ -351,7 +353,7 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd
|
||||
keyFile = viper.GetString("https.volume.key")
|
||||
}
|
||||
|
||||
listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port)
|
||||
listeningAddress := util.JoinHostPort(*v.bindIp, *v.port)
|
||||
glog.V(0).Infof("Start Seaweed volume server %s at %s", util.Version(), listeningAddress)
|
||||
listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
|
||||
if e != nil {
|
||||
@ -373,7 +375,7 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd
|
||||
}
|
||||
|
||||
func (v VolumeServerOptions) startTcpService(volumeServer *weed_server.VolumeServer) {
|
||||
listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port+20000)
|
||||
listeningAddress := util.JoinHostPort(*v.bindIp, *v.port+20000)
|
||||
glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "tcp at", listeningAddress)
|
||||
listener, e := util.NewListener(listeningAddress, 0)
|
||||
if e != nil {
|
||||
|
@ -78,46 +78,41 @@ func (wo *WebDavOption) startWebDav() bool {
|
||||
}
|
||||
|
||||
// parse filer grpc address
|
||||
filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*wo.filer)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
return false
|
||||
}
|
||||
filerAddress := pb.ServerAddress(*wo.filer)
|
||||
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
var cipher bool
|
||||
// connect to filer
|
||||
for {
|
||||
err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
err := pb.WithGrpcFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
|
||||
return fmt.Errorf("get filer %s configuration: %v", filerAddress, err)
|
||||
}
|
||||
cipher = resp.Cipher
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
|
||||
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerAddress.ToGrpcAddress())
|
||||
time.Sleep(time.Second)
|
||||
} else {
|
||||
glog.V(0).Infof("connected to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
|
||||
glog.V(0).Infof("connected to filer %s grpc address %s", *wo.filer, filerAddress.ToGrpcAddress())
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
ws, webdavServer_err := weed_server.NewWebDavServer(&weed_server.WebDavOption{
|
||||
Filer: *wo.filer,
|
||||
FilerGrpcAddress: filerGrpcAddress,
|
||||
GrpcDialOption: grpcDialOption,
|
||||
Collection: *wo.collection,
|
||||
Replication: *wo.replication,
|
||||
DiskType: *wo.disk,
|
||||
Uid: uid,
|
||||
Gid: gid,
|
||||
Cipher: cipher,
|
||||
CacheDir: util.ResolvePath(*wo.cacheDir),
|
||||
CacheSizeMB: *wo.cacheSizeMB,
|
||||
Filer: filerAddress,
|
||||
GrpcDialOption: grpcDialOption,
|
||||
Collection: *wo.collection,
|
||||
Replication: *wo.replication,
|
||||
DiskType: *wo.disk,
|
||||
Uid: uid,
|
||||
Gid: gid,
|
||||
Cipher: cipher,
|
||||
CacheDir: util.ResolvePath(*wo.cacheDir),
|
||||
CacheSizeMB: *wo.cacheSizeMB,
|
||||
})
|
||||
if webdavServer_err != nil {
|
||||
glog.Fatalf("WebDav Server startup error: %v", webdavServer_err)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user