Merge branch 'seaweedfs:master' into feature/suport_dameng_as_filer_store

This commit is contained in:
Vegetable540 2024-11-08 17:15:42 +08:00 committed by GitHub
commit b4cfe0ec8b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
220 changed files with 5358 additions and 3905 deletions

View File

@ -44,7 +44,7 @@ jobs:
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
- name: Go Release Binaries Large Disk
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
uses: wangyoucao577/go-release-action@2aa2977ad6a4534f9179e22bd0ff146a1e1d3466 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -60,7 +60,7 @@ jobs:
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
uses: wangyoucao577/go-release-action@2aa2977ad6a4534f9179e22bd0ff146a1e1d3466 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -93,7 +93,7 @@ jobs:
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
- name: Go Release Binaries Large Disk
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
uses: wangyoucao577/go-release-action@2aa2977ad6a4534f9179e22bd0ff146a1e1d3466 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -109,7 +109,7 @@ jobs:
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
uses: wangyoucao577/go-release-action@2aa2977ad6a4534f9179e22bd0ff146a1e1d3466 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}

View File

@ -30,7 +30,7 @@ jobs:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
uses: wangyoucao577/go-release-action@2aa2977ad6a4534f9179e22bd0ff146a1e1d3466 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -44,7 +44,7 @@ jobs:
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
uses: wangyoucao577/go-release-action@2aa2977ad6a4534f9179e22bd0ff146a1e1d3466 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}

View File

@ -30,7 +30,7 @@ jobs:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
uses: wangyoucao577/go-release-action@2aa2977ad6a4534f9179e22bd0ff146a1e1d3466 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -44,7 +44,7 @@ jobs:
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
uses: wangyoucao577/go-release-action@2aa2977ad6a4534f9179e22bd0ff146a1e1d3466 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}

View File

@ -30,7 +30,7 @@ jobs:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
uses: wangyoucao577/go-release-action@2aa2977ad6a4534f9179e22bd0ff146a1e1d3466 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -44,7 +44,7 @@ jobs:
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
uses: wangyoucao577/go-release-action@2aa2977ad6a4534f9179e22bd0ff146a1e1d3466 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}

View File

@ -30,7 +30,7 @@ jobs:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
uses: wangyoucao577/go-release-action@2aa2977ad6a4534f9179e22bd0ff146a1e1d3466 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -44,7 +44,7 @@ jobs:
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
uses: wangyoucao577/go-release-action@2aa2977ad6a4534f9179e22bd0ff146a1e1d3466 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}

View File

@ -30,13 +30,13 @@ jobs:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
uses: wangyoucao577/go-release-action@2aa2977ad6a4534f9179e22bd0ff146a1e1d3466 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
build_flags: -tags elastic,gocdk,sqlite,ydb,tikv,rclone
build_flags: -tags elastic,gocdk,rclone,sqlite,tikv,ydb
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
# build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
@ -45,14 +45,14 @@ jobs:
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_full"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
uses: wangyoucao577/go-release-action@2aa2977ad6a4534f9179e22bd0ff146a1e1d3466 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset,elastic,gocdk,sqlite,ydb,tikv,rclone
build_flags: -tags 5BytesOffset,elastic,gocdk,rclone,sqlite,tikv,ydb
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed

View File

@ -30,7 +30,7 @@ jobs:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
uses: wangyoucao577/go-release-action@2aa2977ad6a4534f9179e22bd0ff146a1e1d3466 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -44,7 +44,7 @@ jobs:
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@6ac7dba1f9e61850053324549cb6bc88e4b473d2 # v1.22
uses: wangyoucao577/go-release-action@2aa2977ad6a4534f9179e22bd0ff146a1e1d3466 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}

View File

@ -36,7 +36,7 @@ jobs:
uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v1
uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v1
with:
buildkitd-flags: "--debug"
-
@ -56,7 +56,7 @@ jobs:
password: ${{ secrets.GHCR_TOKEN }}
-
name: Build
uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v2
uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}

View File

@ -37,7 +37,7 @@ jobs:
uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v1
uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v1
with:
buildkitd-flags: "--debug"
-
@ -57,7 +57,7 @@ jobs:
password: ${{ secrets.GHCR_TOKEN }}
-
name: Build
uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v2
uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}

View File

@ -37,7 +37,7 @@ jobs:
uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v1
uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
@ -47,7 +47,7 @@ jobs:
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v2
uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}

View File

@ -38,7 +38,7 @@ jobs:
uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v1
uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
@ -48,7 +48,7 @@ jobs:
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v2
uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}

View File

@ -38,7 +38,7 @@ jobs:
uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v1
uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
@ -48,7 +48,7 @@ jobs:
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v2
uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}

View File

@ -37,7 +37,7 @@ jobs:
uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v1
uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
@ -47,12 +47,12 @@ jobs:
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v2
uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
file: ./docker/Dockerfile.go_build
build-args: TAGS=elastic,gocdk,sqlite,ydb,tikv,rclone
build-args: TAGS=elastic,gocdk,rclone,sqlite,tikv,ydb
platforms: linux/amd64
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}

View File

@ -37,7 +37,7 @@ jobs:
uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v1
uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
@ -47,12 +47,12 @@ jobs:
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v2
uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
file: ./docker/Dockerfile.go_build
build-args: TAGS=5BytesOffset,elastic,gocdk,sqlite,ydb,tikv,rclone
build-args: TAGS=5BytesOffset,elastic,gocdk,rclone,sqlite,tikv,ydb
platforms: linux/amd64
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}

View File

@ -1,3 +1,5 @@
.PHONY: test
BINARY = weed
SOURCE_DIR = .

125
go.mod
View File

@ -3,9 +3,9 @@ module github.com/seaweedfs/seaweedfs
go 1.22.0
require (
cloud.google.com/go v0.115.1 // indirect
cloud.google.com/go/pubsub v1.43.0
cloud.google.com/go/storage v1.43.0
cloud.google.com/go v0.116.0 // indirect
cloud.google.com/go/pubsub v1.45.1
cloud.google.com/go/storage v1.45.0
github.com/Azure/azure-pipeline-go v0.2.3
github.com/Azure/azure-storage-blob-go v0.15.0
github.com/Shopify/sarama v1.38.1
@ -32,7 +32,7 @@ require (
github.com/go-redsync/redsync/v4 v4.13.0
github.com/go-sql-driver/mysql v1.8.1
github.com/go-zookeeper/zk v1.0.3 // indirect
github.com/gocql/gocql v1.6.0
github.com/gocql/gocql v1.7.0
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4
github.com/golang/snappy v0.0.4 // indirect
@ -56,7 +56,7 @@ require (
github.com/klauspost/reedsolomon v1.12.4
github.com/kurin/blazer v0.5.3
github.com/lib/pq v1.10.9
github.com/linxGnu/grocksdb v1.9.3
github.com/linxGnu/grocksdb v1.9.5
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-ieproxy v0.0.11 // indirect
@ -70,7 +70,7 @@ require (
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/posener/complete v1.2.3
github.com/pquerna/cachecontrol v0.2.0
github.com/prometheus/client_golang v1.20.3
github.com/prometheus/client_golang v1.20.5
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/procfs v0.15.1
@ -85,7 +85,7 @@ require (
github.com/stretchr/testify v1.9.0
github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965
github.com/tidwall/gjson v1.17.3
github.com/tidwall/gjson v1.18.0
github.com/tidwall/match v1.1.1
github.com/tidwall/pretty v1.2.0 // indirect
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365
@ -99,58 +99,58 @@ require (
go.etcd.io/etcd/client/v3 v3.5.16
go.mongodb.org/mongo-driver v1.16.0
go.opencensus.io v0.24.0 // indirect
gocloud.dev v0.39.0
gocloud.dev/pubsub/natspubsub v0.39.0
gocloud.dev/pubsub/rabbitpubsub v0.39.0
golang.org/x/crypto v0.27.0 // indirect
gocloud.dev v0.40.0
gocloud.dev/pubsub/natspubsub v0.40.0
gocloud.dev/pubsub/rabbitpubsub v0.40.0
golang.org/x/crypto v0.28.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
golang.org/x/image v0.18.0
golang.org/x/net v0.29.0
golang.org/x/image v0.21.0
golang.org/x/net v0.30.0
golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/sys v0.25.0
golang.org/x/text v0.18.0 // indirect
golang.org/x/tools v0.25.0
golang.org/x/sys v0.26.0
golang.org/x/text v0.19.0 // indirect
golang.org/x/tools v0.26.0
golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect
google.golang.org/api v0.198.0
google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect
google.golang.org/grpc v1.66.2
google.golang.org/protobuf v1.34.2
google.golang.org/api v0.203.0
google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 // indirect
google.golang.org/grpc v1.67.1
google.golang.org/protobuf v1.35.1
gopkg.in/inf.v0 v0.9.1 // indirect
modernc.org/b v1.0.0 // indirect
modernc.org/mathutil v1.6.0
modernc.org/memory v1.8.0 // indirect
modernc.org/sqlite v1.33.0
modernc.org/sqlite v1.33.1
modernc.org/strutil v1.2.0
modernc.org/token v1.1.0 // indirect
)
require (
github.com/Jille/raft-grpc-transport v1.6.1
github.com/arangodb/go-driver v1.6.2
github.com/arangodb/go-driver v1.6.4
github.com/armon/go-metrics v0.4.1
github.com/aws/aws-sdk-go-v2 v1.31.0
github.com/aws/aws-sdk-go-v2/config v1.27.33
github.com/aws/aws-sdk-go-v2/credentials v1.17.32
github.com/aws/aws-sdk-go-v2/service/s3 v1.61.2
github.com/aws/aws-sdk-go-v2 v1.32.3
github.com/aws/aws-sdk-go-v2/config v1.28.0
github.com/aws/aws-sdk-go-v2/credentials v1.17.41
github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2
github.com/cognusion/imaging v1.0.1
github.com/fluent/fluent-logger-golang v1.9.0
github.com/getsentry/sentry-go v0.29.0
github.com/getsentry/sentry-go v0.29.1
github.com/golang-jwt/jwt/v5 v5.2.1
github.com/google/flatbuffers/go v0.0.0-20230108230133-3b8644d32c50
github.com/hanwen/go-fuse/v2 v2.5.1
github.com/hanwen/go-fuse/v2 v2.6.1
github.com/hashicorp/raft v1.7.1
github.com/hashicorp/raft-boltdb/v2 v2.3.0
github.com/orcaman/concurrent-map/v2 v2.0.1
github.com/parquet-go/parquet-go v0.23.0
github.com/parquet-go/parquet-go v0.23.1-0.20241011155651-6446d1d0d2fe
github.com/rabbitmq/amqp091-go v1.10.0
github.com/rclone/rclone v1.68.0
github.com/rclone/rclone v1.68.1
github.com/rdleal/intervalst v1.4.0
github.com/redis/go-redis/v9 v9.6.1
github.com/schollz/progressbar/v3 v3.14.4
github.com/redis/go-redis/v9 v9.7.0
github.com/schollz/progressbar/v3 v3.16.0
github.com/shirou/gopsutil/v3 v3.24.5
github.com/tikv/client-go/v2 v2.0.7
github.com/ydb-platform/ydb-go-sdk-auth-environ v0.5.0
github.com/ydb-platform/ydb-go-sdk/v3 v3.77.1
github.com/ydb-platform/ydb-go-sdk/v3 v3.89.2
go.etcd.io/etcd/client/pkg/v3 v3.5.16
go.uber.org/atomic v1.11.0
golang.org/x/sync v0.8.0
@ -158,10 +158,12 @@ require (
)
require (
cloud.google.com/go/auth v0.9.4 // indirect
cel.dev/expr v0.16.1 // indirect
cloud.google.com/go/auth v0.9.9 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect
cloud.google.com/go/compute/metadata v0.5.1 // indirect
cloud.google.com/go/iam v1.2.0 // indirect
cloud.google.com/go/compute/metadata v0.5.2 // indirect
cloud.google.com/go/iam v1.2.1 // indirect
cloud.google.com/go/monitoring v1.21.1 // indirect
filippo.io/edwards25519 v1.1.0 // indirect
gitee.com/chunanyong/dm v1.8.16 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 // indirect
@ -172,6 +174,9 @@ require (
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/Files-com/files-sdk-go/v3 v3.2.34 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect
@ -186,31 +191,33 @@ require (
github.com/andybalholm/cascadia v1.3.2 // indirect
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect
github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 // indirect
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.19 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sns v1.31.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sqs v1.34.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 // indirect
github.com/aws/smithy-go v1.21.0 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 // indirect
github.com/aws/smithy-go v1.22.0 // indirect
github.com/boltdb/bolt v1.3.1 // indirect
github.com/bradenaw/juniper v0.15.2 // indirect
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
github.com/buengese/sgzip v0.1.1 // indirect
github.com/calebcase/tmpfile v1.0.3 // indirect
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
github.com/chilts/sid v0.0.0-20190607042430-660e94789ec9 // indirect
github.com/cloudflare/circl v1.3.7 // indirect
github.com/cloudsoda/go-smb2 v0.0.0-20231124195312-f3ec8ae2c891 // indirect
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect
github.com/colinmarc/hdfs/v2 v2.4.0 // indirect
github.com/cronokirby/saferith v0.33.0 // indirect
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect
@ -221,6 +228,8 @@ require (
github.com/emersion/go-message v0.18.0 // indirect
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594 // indirect
github.com/emersion/go-vcard v0.0.0-20230815062825-8fda7d206ec9 // indirect
github.com/envoyproxy/go-control-plane v0.13.0 // indirect
github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect
github.com/fatih/color v1.16.0 // indirect
github.com/fclairamb/go-log v0.5.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
@ -265,7 +274,7 @@ require (
github.com/lpar/date v1.0.0 // indirect
github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/montanaflynn/stats v0.7.1 // indirect
@ -291,6 +300,7 @@ require (
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pkg/sftp v1.13.6 // indirect
github.com/pkg/xattr v0.4.9 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 // indirect
github.com/relvacode/iso8601 v1.3.0 // indirect
@ -319,7 +329,7 @@ require (
github.com/unknwon/goconfig v1.0.0 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/yandex-cloud/go-genproto v0.0.0-20211115083454-9ca41db5ed9e // indirect
github.com/ydb-platform/ydb-go-genproto v0.0.0-20240528144234-5d5a685e41f7 // indirect
github.com/ydb-platform/ydb-go-genproto v0.0.0-20241022174402-dd276c7f197b // indirect
github.com/ydb-platform/ydb-go-yc v0.12.1 // indirect
github.com/ydb-platform/ydb-go-yc-metadata v0.6.1 // indirect
github.com/yunify/qingstor-sdk-go/v3 v3.2.0 // indirect
@ -328,24 +338,29 @@ require (
github.com/zeebo/errs v1.3.0 // indirect
go.etcd.io/bbolt v1.3.10 // indirect
go.etcd.io/etcd/api/v3 v3.5.16 // indirect
go.opentelemetry.io/contrib/detectors/gcp v1.29.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
go.opentelemetry.io/otel v1.29.0 // indirect
go.opentelemetry.io/otel/metric v1.29.0 // indirect
go.opentelemetry.io/otel/sdk v1.29.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.29.0 // indirect
go.opentelemetry.io/otel/trace v1.29.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/mod v0.21.0 // indirect
golang.org/x/term v0.24.0 // indirect
golang.org/x/time v0.6.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
golang.org/x/term v0.25.0 // indirect
golang.org/x/time v0.7.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/validator.v2 v2.0.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 // indirect
modernc.org/libc v1.55.3 // indirect
moul.io/http2curl/v2 v2.3.0 // indirect
storj.io/common v0.0.0-20240812101423-26b53789c348 // indirect
storj.io/drpc v0.0.35-0.20240709171858-0075ac871661 // indirect

275
go.sum
View File

@ -1,3 +1,5 @@
cel.dev/expr v0.16.1 h1:NR0+oFYzR1CqLFhTAqg3ql59G9VfN8fKq1TCHJ6gq1g=
cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
@ -36,8 +38,8 @@ cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRY
cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM=
cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I=
cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY=
cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ=
cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc=
cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE=
cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U=
cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4=
cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw=
cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E=
@ -84,8 +86,8 @@ cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVo
cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo=
cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0=
cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E=
cloud.google.com/go/auth v0.9.4 h1:DxF7imbEbiFu9+zdKC6cKBko1e8XeJnipNqIbWZ+kDI=
cloud.google.com/go/auth v0.9.4/go.mod h1:SHia8n6//Ya940F1rLimhJCjjx7KE17t0ctFEci3HkA=
cloud.google.com/go/auth v0.9.9 h1:BmtbpNQozo8ZwW2t7QJjnrQtdganSdmqeIBxHxNkEZQ=
cloud.google.com/go/auth v0.9.9/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI=
cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY=
cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc=
cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0=
@ -156,8 +158,8 @@ cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZ
cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/compute/metadata v0.5.1 h1:NM6oZeZNlYjiwYje+sYFjEpP0Q0zCan1bmQW/KmIrGs=
cloud.google.com/go/compute/metadata v0.5.1/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k=
cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo=
cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k=
cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY=
cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck=
cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w=
@ -273,8 +275,8 @@ cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQE
cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE=
cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY=
cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY=
cloud.google.com/go/iam v1.2.0 h1:kZKMKVNk/IsSSc/udOb83K0hL/Yh/Gcqpz+oAkoIFN8=
cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q=
cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU=
cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g=
cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc=
cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A=
cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk=
@ -288,8 +290,8 @@ cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxs
cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg=
cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0=
cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w=
cloud.google.com/go/kms v1.19.0 h1:x0OVJDl6UH1BSX4THKlMfdcFWoE4ruh90ZHuilZekrU=
cloud.google.com/go/kms v1.19.0/go.mod h1:e4imokuPJUc17Trz2s6lEXFDt8bgDmvpVynH39bdrHM=
cloud.google.com/go/kms v1.20.0 h1:uKUvjGqbBlI96xGE669hcVnEMw1Px/Mvfa62dhM5UrY=
cloud.google.com/go/kms v1.20.0/go.mod h1:/dMbFF1tLLFnQV44AoI2GlotbjowyUfgVwezxW291fM=
cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic=
cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI=
cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE=
@ -300,11 +302,13 @@ cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6
cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo=
cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw=
cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M=
cloud.google.com/go/logging v1.11.0 h1:v3ktVzXMV7CwHq1MBF65wcqLMA7i+z3YxbUsoK7mOKs=
cloud.google.com/go/logging v1.11.0/go.mod h1:5LDiJC/RxTt+fHc1LAt20R9TKiUTReDg6RuuFOZ67+A=
cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE=
cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc=
cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo=
cloud.google.com/go/longrunning v0.6.0 h1:mM1ZmaNsQsnb+5n1DNPeL0KwQd9jQRqSqSDEkBZr+aI=
cloud.google.com/go/longrunning v0.6.0/go.mod h1:uHzSZqW89h7/pasCWNYdUpwGz3PcVWhrWupreVPYLts=
cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc=
cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0=
cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE=
cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM=
cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA=
@ -326,6 +330,8 @@ cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJP
cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk=
cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4=
cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w=
cloud.google.com/go/monitoring v1.21.1 h1:zWtbIoBMnU5LP9A/fz8LmWMGHpk4skdfeiaa66QdFGc=
cloud.google.com/go/monitoring v1.21.1/go.mod h1:Rj++LKrlht9uBi8+Eb530dIrzG/cU/lB8mt+lbeFK1c=
cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA=
cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o=
cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM=
@ -377,8 +383,8 @@ cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjp
cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI=
cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0=
cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8=
cloud.google.com/go/pubsub v1.43.0 h1:s3Qx+F96J7Kwey/uVHdK3QxFLIlOvvw4SfMYw2jFjb4=
cloud.google.com/go/pubsub v1.43.0/go.mod h1:LNLfqItblovg7mHWgU5g84Vhza4J8kTxx0YqIeTzcXY=
cloud.google.com/go/pubsub v1.45.1 h1:ZC/UzYcrmK12THWn1P72z+Pnp2vu/zCZRXyhAfP1hJY=
cloud.google.com/go/pubsub v1.45.1/go.mod h1:3bn7fTmzZFwaUjllitv1WlsNMkqBgGUb3UdMhI54eCc=
cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg=
cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k=
cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4=
@ -469,8 +475,8 @@ cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq
cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s=
cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y=
cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs=
cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0=
cloud.google.com/go/storage v1.45.0 h1:5av0QcIVj77t+44mV4gffFC/LscFRUhto6UBMB5SimM=
cloud.google.com/go/storage v1.45.0/go.mod h1:wpPblkIuMP5jCB/E48Pz9zIo2S/zD8g+ITmxKkPCITE=
cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w=
cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I=
cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4=
@ -488,6 +494,8 @@ cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV6
cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28=
cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y=
cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA=
cloud.google.com/go/trace v1.11.1 h1:UNqdP+HYYtnm6lb91aNA5JQ0X14GnxkABGlfz2PzPew=
cloud.google.com/go/trace v1.11.1/go.mod h1:IQKNQuBzH72EGaXEodKlNJrWykGZxet2zgjtS60OtjA=
cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs=
cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg=
cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos=
@ -570,6 +578,14 @@ github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3
github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
github.com/Files-com/files-sdk-go/v3 v3.2.34 h1:j6gSzu6BF1wWH1z4itRe7eKhQSCrx/I78SDNiBBUtvI=
github.com/Files-com/files-sdk-go/v3 v3.2.34/go.mod h1:Y/bCHoPJNPKz2hw1ADXjQXJP378HODwK+g/5SR2gqfU=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 h1:pB2F2JKCj1Znmp2rwxxt1J0Fg0wezTMgWYk5Mpbi1kg=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1 h1:oTX4vsorBZo/Zdum6OKPA4o7544hm6smoRv1QjpTwGo=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1/go.mod h1:0wEl7vrAD8mehJyohS9HZy+WyEOaQO2mJx86Cvh93kM=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 h1:8nn+rsCvTq9axyEh382S0PFLBeaFwNsT43IrPWzctRU=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE=
github.com/Jille/raft-grpc-transport v1.6.1 h1:gN3sjapb+fVbiebS7AfQQgbV2ecTOI7ur7NPPC7Mhoc=
github.com/Jille/raft-grpc-transport v1.6.1/go.mod h1:HbOjEdu/yzCJ/mjTF6wEOJNbAUpHfU2UOA2hVD4CNFg=
github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk=
@ -626,56 +642,56 @@ github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0I
github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU=
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc h1:LoL75er+LKDHDUfU5tRvFwxH0LjPpZN8OoG8Ll+liGU=
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc/go.mod h1:w648aMHEgFYS6xb0KVMMtZ2uMeemhiKCuD2vj6gY52A=
github.com/arangodb/go-driver v1.6.2 h1:3o4inejwR7VMmsKvQJ6hepx4au9sUT6C/RDrXykuD1g=
github.com/arangodb/go-driver v1.6.2/go.mod h1:2BCE6y3DNSLqIXnDvf4CR6WdzZZloYudEy+sasimLiQ=
github.com/arangodb/go-driver v1.6.4 h1:tuizDP620e3OFaoFkEh07n4bHSTvCOvm5qVGQ/+zMrs=
github.com/arangodb/go-driver v1.6.4/go.mod h1:oLZCzkEGh82I2XBcMkrDtqsMXFg6QkIRgbqLGalh6jc=
github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e h1:Xg+hGrY2LcQBbxd0ZFdbGSyRKTYMZCfBbw/pMJFOk1g=
github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e/go.mod h1:mq7Shfa/CaixoDxiyAAc5jZ6CVBAyPaNQCGS7mkj4Ho=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go-v2 v1.31.0 h1:3V05LbxTSItI5kUqNwhJrrrY1BAXxXt0sN0l72QmG5U=
github.com/aws/aws-sdk-go-v2 v1.31.0/go.mod h1:ztolYtaEUtdpf9Wftr31CJfLVjOnD/CVRkKOOYgF8hA=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw=
github.com/aws/aws-sdk-go-v2/config v1.27.33 h1:Nof9o/MsmH4oa0s2q9a0k7tMz5x/Yj5k06lDODWz3BU=
github.com/aws/aws-sdk-go-v2/config v1.27.33/go.mod h1:kEqdYzRb8dd8Sy2pOdEbExTTF5v7ozEXX0McgPE7xks=
github.com/aws/aws-sdk-go-v2/credentials v1.17.32 h1:7Cxhp/BnT2RcGy4VisJ9miUPecY+lyE9I8JvcZofn9I=
github.com/aws/aws-sdk-go-v2/credentials v1.17.32/go.mod h1:P5/QMF3/DCHbXGEGkdbilXHsyTBX5D3HSwcrSc9p20I=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 h1:pfQ2sqNpMVK6xz2RbqLEL0GH87JOwSxPV2rzm8Zsb74=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13/go.mod h1:NG7RXPUlqfsCLLFfi0+IpKN4sCB9D9fw/qTaSB+xRoU=
github.com/aws/aws-sdk-go-v2 v1.32.3 h1:T0dRlFBKcdaUPGNtkBSwHZxrtis8CQU17UpNBZYd0wk=
github.com/aws/aws-sdk-go-v2 v1.32.3/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 h1:pT3hpW0cOHRJx8Y0DfJUEQuqPild8jRGmSFmBgvydr0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6/go.mod h1:j/I2++U0xX+cr44QjHay4Cvxj6FUbnxrgmqN3H1jTZA=
github.com/aws/aws-sdk-go-v2/config v1.28.0 h1:FosVYWcqEtWNxHn8gB/Vs6jOlNwSoyOCA/g/sxyySOQ=
github.com/aws/aws-sdk-go-v2/config v1.28.0/go.mod h1:pYhbtvg1siOOg8h5an77rXle9tVG8T+BWLWAo7cOukc=
github.com/aws/aws-sdk-go-v2/credentials v1.17.41 h1:7gXo+Axmp+R4Z+AK8YFQO0ZV3L0gizGINCOWxSLY9W8=
github.com/aws/aws-sdk-go-v2/credentials v1.17.41/go.mod h1:u4Eb8d3394YLubphT4jLEwN1rLNq2wFOlT6OuxFwPzU=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 h1:TMH3f/SCAWdNtXXVPPu5D6wrr4G5hI1rAxbcocKfC7Q=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17/go.mod h1:1ZRXLdTpzdJb9fwTMXiLipENRxkGMTn1sfKexGllQCw=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10 h1:zeN9UtUlA6FTx0vFSayxSX32HDw73Yb6Hh2izDSFxXY=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10/go.mod h1:3HKuexPDcwLWPaqpW2UR/9n8N/u/3CKcGAzSs8p8u8g=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 h1:pI7Bzt0BJtYA0N/JEC6B8fJ4RBrEMi1LBrkMdFYNSnQ=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17/go.mod h1:Dh5zzJYMtxfIjYW+/evjQ8uj2OyR/ve2KROHGHlSFqE=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 h1:Mqr/V5gvrhA2gvgnF42Zh5iMiQNcOYthFYwCyrnuWlc=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17/go.mod h1:aLJpZlCmjE+V+KtN1q1uyZkfnUWpQGpbsn89XPKyzfU=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22 h1:Jw50LwEkVjuVzE1NzkhNKkBf9cRN7MtE1F/b2cOKTUM=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22/go.mod h1:Y/SmAyPcOTmpeVaWSzSKiILfXTVJwrGmYZhcRbhWuEY=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22 h1:981MHwBaRZM7+9QSR6XamDzF/o7ouUGxFzr+nVSIhrs=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22/go.mod h1:1RA1+aBEfn+CAB/Mh0MB6LsdCYCnjZm7tKXtnk499ZQ=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.17 h1:Roo69qTpfu8OlJ2Tb7pAYVuF0CpuUMB0IYWwYP/4DZM=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.17/go.mod h1:NcWPxQzGM1USQggaTVwz6VpqMZPX1CvDJLDh6jnOCa4=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbLPPHEmf9DgMGw51jMj77VfGPAN2Kv4cfhlfgI=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.19 h1:FLMkfEiRjhgeDTCjjLoc3URo/TBkgeQbocA78lfkzSI=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.19/go.mod h1:Vx+GucNSsdhaxs3aZIKfSUjKVGsxN25nX2SRcdhuw08=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 h1:rfprUlsdzgl7ZL2KlXiUAoJnI/VxfHCvDFr2QDFj6u4=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19/go.mod h1:SCWkEdRq8/7EK60NcvvQ6NXKuTcchAD4ROAsC37VEZE=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.17 h1:u+EfGmksnJc/x5tq3A+OD7LrMbSSR/5TrKLvkdy/fhY=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.17/go.mod h1:VaMx6302JHax2vHJWgRo+5n9zvbacs3bLU/23DNQrTY=
github.com/aws/aws-sdk-go-v2/service/s3 v1.61.2 h1:Kp6PWAlXwP1UvIflkIP6MFZYBNDCa4mFCGtxrpICVOg=
github.com/aws/aws-sdk-go-v2/service/s3 v1.61.2/go.mod h1:5FmD/Dqq57gP+XwaUnd5WFPipAuzrf0HmupX27Gvjvc=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22 h1:yV+hCAHZZYJQcwAaszoBNwLbPItHvApxT0kVIw6jRgs=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22/go.mod h1:kbR1TL8llqB1eGnVbybcA4/wgScxdylOdyAd51yxPdw=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 h1:TToQNkvGguu209puTojY/ozlqy2d/SFNcoLIqTFi42g=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0/go.mod h1:0jp+ltwkf+SwG2fm/PKo8t4y8pJSgOCO4D8Lz3k0aHQ=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3 h1:kT6BcZsmMtNkP/iYMcRG+mIEA/IbeiUimXtGmqF39y0=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3/go.mod h1:Z8uGua2k4PPaGOYn66pK02rhMrot3Xk3tpBuUFPomZU=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3 h1:qcxX0JYlgWH3hpPUnd6U0ikcl6LLA9sLkXE2w1fpMvY=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3/go.mod h1:cLSNEmI45soc+Ef8K/L+8sEA3A3pYFEYf5B5UI+6bH4=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3 h1:ZC7Y/XgKUxwqcdhO5LE8P6oGP1eh6xlQReWNKfhvJno=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3/go.mod h1:WqfO7M9l9yUAw0HcHaikwRd/H6gzYdz7vjejCA5e2oY=
github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2 h1:p9TNFL8bFUMd+38YIpTAXpoxyz0MxC7FlbFEH4P4E1U=
github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2/go.mod h1:fNjyo0Coen9QTwQLWeV6WO2Nytwiu+cCcWaTdKCAqqE=
github.com/aws/aws-sdk-go-v2/service/sns v1.31.3 h1:eSTEdxkfle2G98FE+Xl3db/XAXXVTJPNQo9K/Ar8oAI=
github.com/aws/aws-sdk-go-v2/service/sns v1.31.3/go.mod h1:1dn0delSO3J69THuty5iwP0US2Glt0mx2qBBlI13pvw=
github.com/aws/aws-sdk-go-v2/service/sqs v1.34.3 h1:Vjqy5BZCOIsn4Pj8xzyqgGmsSqzz7y/WXbN3RgOoVrc=
github.com/aws/aws-sdk-go-v2/service/sqs v1.34.3/go.mod h1:L0enV3GCRd5iG9B64W35C4/hwsCB00Ib+DKVGTadKHI=
github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 h1:pIaGg+08llrP7Q5aiz9ICWbY8cqhTkyy+0SHvfzQpTc=
github.com/aws/aws-sdk-go-v2/service/sso v1.22.7/go.mod h1:eEygMHnTKH/3kNp9Jr1n3PdejuSNcgwLe1dWgQtO0VQ=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 h1:/Cfdu0XV3mONYKaOt1Gr0k1KvQzkzPyiKUdlWJqy+J4=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7/go.mod h1:bCbAxKDqNvkHxRaIMnyVPXPo+OaPRwvmgzMxbz1VKSA=
github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 h1:NKTa1eqZYw8tiHSRGpP0VtTdub/8KNk8sDkNPFaOKDE=
github.com/aws/aws-sdk-go-v2/service/sts v1.30.7/go.mod h1:NXi1dIAGteSaRLqYgarlhP/Ij0cFT+qmCwiJqWh/U5o=
github.com/aws/smithy-go v1.21.0 h1:H7L8dtDRk0P1Qm6y0ji7MCYMQObJ5R9CRpyPhRUkLYA=
github.com/aws/smithy-go v1.21.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 h1:bSYXVyUzoTHoKalBmwaZxs97HU9DWWI3ehHSAMa7xOk=
github.com/aws/aws-sdk-go-v2/service/sso v1.24.2/go.mod h1:skMqY7JElusiOUjMJMOv1jJsP7YUg7DrhgqZZWuzu1U=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 h1:AhmO1fHINP9vFYUE0LHzCWg/LfUWUF+zFPEcY9QXb7o=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2/go.mod h1:o8aQygT2+MVP0NaV6kbdE1YnnIM8RRVQzoeUH45GOdI=
github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 h1:CiS7i0+FUe+/YY1GvIBLLrR/XNGZ4CtM1Ll0XavNuVo=
github.com/aws/aws-sdk-go-v2/service/sts v1.32.2/go.mod h1:HtaiBI8CjYoNVde8arShXb94UbQQi9L4EMr6D+xGBwo=
github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM=
github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@ -713,12 +729,15 @@ github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK3
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g=
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chengxilo/virtualterm v1.0.4 h1:Z6IpERbRVlfB8WkOmtbHiDbBANU7cimRIof7mk9/PwM=
github.com/chengxilo/virtualterm v1.0.4/go.mod h1:DyxxBZz/x1iqJjFxTFcr6/x+jSpqN0iwWCOK1q10rlY=
github.com/chilts/sid v0.0.0-20190607042430-660e94789ec9 h1:z0uK8UQqjMVYzvk4tiiu3obv2B44+XBsvgEJREQfnO8=
github.com/chilts/sid v0.0.0-20190607042430-660e94789ec9/go.mod h1:Jl2neWsQaDanWORdqZ4emBl50J4/aRBBS4FyyG9/PFo=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
@ -750,6 +769,8 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH
github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI=
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/cognusion/imaging v1.0.1 h1:jJa1+jYHvr2zS5zZxoluYthH5KbVz4LEvD3xy/W2L90=
github.com/cognusion/imaging v1.0.1/go.mod h1:ucYm08RsFoQvYXEV5XMsRBppxrWzD1AGxm6iod5/rvM=
github.com/colinmarc/hdfs/v2 v2.4.0 h1:v6R8oBx/Wu9fHpdPoJJjpGSUxo8NhHIwrwsfhFvU9W0=
@ -811,10 +832,14 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34=
github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI=
github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les=
github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo=
github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w=
github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss=
github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM=
github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4=
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA=
github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0=
@ -853,8 +878,8 @@ github.com/gabriel-vasile/mimetype v1.4.4 h1:QjV6pZ7/XZ7ryI2KuyeEDE8wnh7fHP9YnQy
github.com/gabriel-vasile/mimetype v1.4.4/go.mod h1:JwLei5XPtWdGiMFB5Pjle1oEeoSeEuJfJE+TtfvdB/s=
github.com/geoffgarside/ber v1.1.0 h1:qTmFG4jJbwiSzSXoNJeHcOprVzZ8Ulde2Rrrifu5U9w=
github.com/geoffgarside/ber v1.1.0/go.mod h1:jVPKeCbj6MvQZhwLYsGwaGI52oUorHoHKNecGT85ZCc=
github.com/getsentry/sentry-go v0.29.0 h1:YtWluuCFg9OfcqnaujpY918N/AhCCwarIDWOYSBAjCA=
github.com/getsentry/sentry-go v0.29.0/go.mod h1:jhPesDAL0Q0W2+2YEuVOvdWmVtdsr1+jtBrlDEVWwLY=
github.com/getsentry/sentry-go v0.29.1 h1:DyZuChN8Hz3ARxGVV8ePaNXh1dQ7d76AiB117xcREwA=
github.com/getsentry/sentry-go v0.29.1/go.mod h1:x3AtIzN01d6SiWkderzaH28Tm0lgkafpJ5Bm3li39O0=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
@ -922,8 +947,8 @@ github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL
github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/gocql/gocql v1.6.0 h1:IdFdOTbnpbd0pDhl4REKQDM+Q0SzKXQ1Yh+YZZ8T/qU=
github.com/gocql/gocql v1.6.0/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8=
github.com/gocql/gocql v1.7.0 h1:O+7U7/1gSN7QTEAaMEsJc1Oq2QHXvCWoF3DFK9HDHus=
github.com/gocql/gocql v1.7.0/go.mod h1:vnlvXyFZeLBF0Wy+RS8hrOdbn0UWsWtdg07XJnFxZ+4=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
@ -1084,8 +1109,8 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4Zs
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hanwen/go-fuse/v2 v2.5.1 h1:OQBE8zVemSocRxA4OaFJbjJ5hlpCmIWbGr7r0M4uoQQ=
github.com/hanwen/go-fuse/v2 v2.5.1/go.mod h1:xKwi1cF7nXAOBCXujD5ie0ZKsxc8GGSA1rlMJc+8IJs=
github.com/hanwen/go-fuse/v2 v2.6.1 h1:F3RUMbAuRhVTi3fvgf8HjMPvOm9xEv5wjuy/AXJtEwI=
github.com/hanwen/go-fuse/v2 v2.6.1/go.mod h1:ugNaD/iv5JYyS1Rcvi57Wz7/vrLQJo10mmketmoef48=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@ -1178,7 +1203,6 @@ github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 h1:G+9t9cEtnC9jFiTxyptEKuNIAbiN5ZCQzX2a74lj3xg=
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004/go.mod h1:KmHnJWQrgEvbuy0vcvj00gtMqbvNn1L+3YUZLK/B92c=
github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw=
github.com/karlseguin/ccache/v2 v2.0.8 h1:lT38cE//uyf6KcFok0rlgXtGFBWxkI6h/qg4tbFyDnA=
github.com/karlseguin/ccache/v2 v2.0.8/go.mod h1:2BDThcfQMf/c0jnZowt16eW405XIqZPavt+HoYEtcxQ=
github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003 h1:vJ0Snvo+SLMY72r5J4sEfkuE7AFbixEP2qRbEcum/wA=
@ -1215,15 +1239,14 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kurin/blazer v0.5.3 h1:SAgYv0TKU0kN/ETfO5ExjNAPyMt2FocO2s/UlCHfjAk=
github.com/kurin/blazer v0.5.3/go.mod h1:4FCXMUWo9DllR2Do4TtBd377ezyAJ51vB5uTBjt0pGU=
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/linxGnu/grocksdb v1.9.3 h1:s1cbPcOd0cU2SKXRG1nEqCOWYAELQjdqg3RVI2MH9ik=
github.com/linxGnu/grocksdb v1.9.3/go.mod h1:QYiYypR2d4v63Wj1adOOfzglnoII0gLj3PNh4fZkcFA=
github.com/linxGnu/grocksdb v1.9.5 h1:Pqx1DTR5bdJSZic8CJwc9UNZR60qoAOK09feMg4SoaI=
github.com/linxGnu/grocksdb v1.9.5/go.mod h1:QYiYypR2d4v63Wj1adOOfzglnoII0gLj3PNh4fZkcFA=
github.com/lpar/date v1.0.0 h1:bq/zVqFTUmsxvd/CylidY4Udqpr9BOFrParoP6p0x/I=
github.com/lpar/date v1.0.0/go.mod h1:KjYe0dDyMQTgpqcUz4LEIeM5VZwhggjVx/V2dtc8NSo=
github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed h1:036IscGBfJsFIgJQzlui7nK1Ncm0tp2ktmPj8xO4N/0=
@ -1249,8 +1272,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
@ -1263,7 +1286,6 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -1318,6 +1340,8 @@ github.com/panjf2000/ants/v2 v2.9.1 h1:Q5vh5xohbsZXGcD6hhszzGqB7jSSc2/CRr3QKIga8
github.com/panjf2000/ants/v2 v2.9.1/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I=
github.com/parquet-go/parquet-go v0.23.0 h1:dyEU5oiHCtbASyItMCD2tXtT2nPmoPbKpqf0+nnGrmk=
github.com/parquet-go/parquet-go v0.23.0/go.mod h1:MnwbUcFHU6uBYMymKAlPPAw9yh3kE1wWl6Gl1uLdkNk=
github.com/parquet-go/parquet-go v0.23.1-0.20241011155651-6446d1d0d2fe h1:oUJ5TPnrEK/z+/PeoLL+jCgfngAZIDMyhZASetRcYYg=
github.com/parquet-go/parquet-go v0.23.1-0.20241011155651-6446d1d0d2fe/go.mod h1:OqBBRGBl7+llplCvDMql8dEKaDqjaFA/VAPw+OJiNiw=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
@ -1362,6 +1386,8 @@ github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo=
github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk=
github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE=
github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@ -1375,8 +1401,8 @@ github.com/pquerna/ffjson v0.0.0-20190930134022-aa0246cd15f7/go.mod h1:YARuvh7BU
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.20.3 h1:oPksm4K8B+Vt35tUhw6GbSNSgVlVSBH0qELP/7u83l4=
github.com/prometheus/client_golang v1.20.3/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -1401,14 +1427,14 @@ github.com/quic-go/quic-go v0.40.1 h1:X3AGzUNFs0jVuO3esAGnTfvdgvL4fq655WaOi1snv1
github.com/quic-go/quic-go v0.40.1/go.mod h1:PeN7kuVJ4xZbxSv/4OX6S1USOX8MJvydwpTx31vx60c=
github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw=
github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o=
github.com/rclone/rclone v1.68.0 h1:tsPAAlf7Mu2WPs+hA12v34ojCcgwcOlHas9b8Ju6HYw=
github.com/rclone/rclone v1.68.0/go.mod h1:T8XKOt/2Fb9INROUtFH9eF9q9o9rI1W2qTrW2bw2cYU=
github.com/rclone/rclone v1.68.1 h1:vlEOAuPv4gGxWECM0NIaCwBNUt3ZQY7mCsyBtZjY+68=
github.com/rclone/rclone v1.68.1/go.mod h1:T8XKOt/2Fb9INROUtFH9eF9q9o9rI1W2qTrW2bw2cYU=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rdleal/intervalst v1.4.0 h1:DWrcNJ9kxPOYnXxExrQkta1xsKFRN0rTOVXkBDJzVYg=
github.com/rdleal/intervalst v1.4.0/go.mod h1:xO89Z6BC+LQDH+IPQQw/OESt5UADgFD41tYMUINGpxQ=
github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4=
github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA=
github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E=
github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw=
github.com/redis/rueidis v1.0.19 h1:s65oWtotzlIFN8eMPhyYwxlwLR1lUdhza2KtWprKYSo=
github.com/redis/rueidis v1.0.19/go.mod h1:8B+r5wdnjwK3lTFml5VtxjzGOQAC+5UmujoD12pDrEo=
github.com/rekby/fixenv v0.3.2/go.mod h1:/b5LRc06BYJtslRtHKxsPWFT/ySpHV+rWvzTg+XWk4c=
@ -1440,8 +1466,8 @@ github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6g
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc=
github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU=
github.com/schollz/progressbar/v3 v3.14.4 h1:W9ZrDSJk7eqmQhd3uxFNNcTr0QL+xuGNI9dEMrw0r74=
github.com/schollz/progressbar/v3 v3.14.4/go.mod h1:aT3UQ7yGm+2ZjeXPqsjTenwL3ddUiuZ0kfQ/2tHlyNI=
github.com/schollz/progressbar/v3 v3.16.0 h1:+MbBim/cE9DqDb8UXRfLJ6RZdyDkXG1BDy/sWc5s0Mc=
github.com/schollz/progressbar/v3 v3.16.0/go.mod h1:lLiKjKJ9/yzc9Q8jk+sVLfxWxgXKsktvUf6TO+4Y2nw=
github.com/seaweedfs/goexif v1.0.3 h1:ve/OjI7dxPW8X9YQsv3JuVMaxEyF9Rvfd04ouL+Bz30=
github.com/seaweedfs/goexif v1.0.3/go.mod h1:Oni780Z236sXpIQzk1XoJlTwqrJ02smEin9zQeff7Fk=
github.com/seaweedfs/raft v1.1.3 h1:5B6hgneQ7IuU4Ceom/f6QUt8pEeqjcsRo+IxlyPZCws=
@ -1523,8 +1549,8 @@ github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7/go.mod h1:suDIky6
github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8=
github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a h1:J/YdBZ46WKpXsxsW93SG+q0F8KI+yFrcIDT4c/RNoc4=
github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a/go.mod h1:h4xBhSNtOeEosLJ4P7JyKXX7Cabg7AVkWCK5gV2vOrM=
github.com/tidwall/gjson v1.17.3 h1:bwWLZU7icoKRG+C+0PNwIKC6FCJO/Q3p2pZvuP0jN94=
github.com/tidwall/gjson v1.17.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
@ -1574,14 +1600,14 @@ github.com/yandex-cloud/go-genproto v0.0.0-20211115083454-9ca41db5ed9e h1:9LPdmD
github.com/yandex-cloud/go-genproto v0.0.0-20211115083454-9ca41db5ed9e/go.mod h1:HEUYX/p8966tMUHHT+TsS0hF/Ca/NYwqprC5WXSDMfE=
github.com/ydb-platform/ydb-go-genproto v0.0.0-20221215182650-986f9d10542f/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I=
github.com/ydb-platform/ydb-go-genproto v0.0.0-20230528143953-42c825ace222/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I=
github.com/ydb-platform/ydb-go-genproto v0.0.0-20240528144234-5d5a685e41f7 h1:nL8XwD6fSst7xFUirkaWJmE7kM0CdWRYgu6+YQer1d4=
github.com/ydb-platform/ydb-go-genproto v0.0.0-20240528144234-5d5a685e41f7/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I=
github.com/ydb-platform/ydb-go-genproto v0.0.0-20241022174402-dd276c7f197b h1:8yiv/W+1xTdifJh1Stkck0gFJjys9kg0/r86Buljuss=
github.com/ydb-platform/ydb-go-genproto v0.0.0-20241022174402-dd276c7f197b/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I=
github.com/ydb-platform/ydb-go-sdk-auth-environ v0.5.0 h1:/NyPd9KnCJgzrEXCArqk1ThqCH2Dh31uUwl88o/VkuM=
github.com/ydb-platform/ydb-go-sdk-auth-environ v0.5.0/go.mod h1:9YzkhlIymWaJGX6KMU3vh5sOf3UKbCXkG/ZdjaI3zNM=
github.com/ydb-platform/ydb-go-sdk/v3 v3.44.0/go.mod h1:oSLwnuilwIpaF5bJJMAofnGgzPJusoI3zWMNb8I+GnM=
github.com/ydb-platform/ydb-go-sdk/v3 v3.47.3/go.mod h1:bWnOIcUHd7+Sl7DN+yhyY1H/I61z53GczvwJgXMgvj0=
github.com/ydb-platform/ydb-go-sdk/v3 v3.77.1 h1:1ICGD18cquyG7/bKQ1ZFdQYSvDvmF7m62b5bIYC8Dkw=
github.com/ydb-platform/ydb-go-sdk/v3 v3.77.1/go.mod h1:IHwuXyolaAmGK2Dp7+dlhsnXphG1pwCoaP/OITT3+tU=
github.com/ydb-platform/ydb-go-sdk/v3 v3.89.2 h1:yVubsh4bTmSk7bc1Gss8K4OUs59eWBfJhxmH/uDtv6g=
github.com/ydb-platform/ydb-go-sdk/v3 v3.89.2/go.mod h1:ah+n4KaEcANFM+CBKnJ5VrmR0IvWfYFvkofW/56076c=
github.com/ydb-platform/ydb-go-yc v0.12.1 h1:qw3Fa+T81+Kpu5Io2vYHJOwcrYrVjgJlT6t/0dOXJrA=
github.com/ydb-platform/ydb-go-yc v0.12.1/go.mod h1:t/ZA4ECdgPWjAb4jyDe8AzQZB5dhpGbi3iCahFaNwBY=
github.com/ydb-platform/ydb-go-yc-metadata v0.6.1 h1:9E5q8Nsy2RiJMZDNVy0A3KUrIMBPakJ2VgloeWbcI84=
@ -1631,6 +1657,8 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/contrib/detectors/gcp v1.29.0 h1:TiaiXB4DpGD3sdzNlYQxruQngn5Apwzi1X0DRhuGvDQ=
go.opentelemetry.io/contrib/detectors/gcp v1.29.0/go.mod h1:GW2aWZNwR2ZxDLdv8OyC2G8zkRoQBuURgV7RPQgcPoU=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
@ -1641,6 +1669,8 @@ go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2
go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8=
go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo=
go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok=
go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY=
go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ=
go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4=
go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
@ -1667,12 +1697,12 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
gocloud.dev v0.39.0 h1:EYABYGhAalPUaMrbSKOr5lejxoxvXj99nE8XFtsDgds=
gocloud.dev v0.39.0/go.mod h1:drz+VyYNBvrMTW0KZiBAYEdl8lbNZx+OQ7oQvdrFmSQ=
gocloud.dev/pubsub/natspubsub v0.39.0 h1:T5fOoUtlnttE/NrggpJ0mutN8ui6qd6+ys2Dmq9h1L8=
gocloud.dev/pubsub/natspubsub v0.39.0/go.mod h1:UsxWgxP2/Qv2U5pcN0Y3EHbbjuqUxuqeP7H8ALEnCrk=
gocloud.dev/pubsub/rabbitpubsub v0.39.0 h1:ArMHrrehu5IvvemBVDX235BsARgiEv+KhB26Fr7kOm0=
gocloud.dev/pubsub/rabbitpubsub v0.39.0/go.mod h1:t9Dl2UcBJO9MtrcXlhvR77ebOZ4sLJSfY26uxFoIX7s=
gocloud.dev v0.40.0 h1:f8LgP+4WDqOG/RXoUcyLpeIAGOcAbZrZbDQCUee10ng=
gocloud.dev v0.40.0/go.mod h1:drz+VyYNBvrMTW0KZiBAYEdl8lbNZx+OQ7oQvdrFmSQ=
gocloud.dev/pubsub/natspubsub v0.40.0 h1:KUDqVV1i724df37v2F6EN+bg02WvekUNYFqLKhgBm94=
gocloud.dev/pubsub/natspubsub v0.40.0/go.mod h1:yjUqZCV4qaLjoUlxapY4kja49uMr0L5mxug+3SEZDTk=
gocloud.dev/pubsub/rabbitpubsub v0.40.0 h1:Gz2otW0qiYAA+A3gIgrrXXIyAiv7euoRmXB8LYCVKSs=
gocloud.dev/pubsub/rabbitpubsub v0.40.0/go.mod h1:Z8fouI+E7u3sxuOX/Tq1FpRWl36SF5+iE361SJTjuGQ=
golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc=
golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -1697,8 +1727,8 @@ golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -1729,8 +1759,8 @@ golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeap
golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/image v0.18.0 h1:jGzIakQa/ZXI1I0Fxvaa9W7yP25TqT6cHIHn+6CqvSQ=
golang.org/x/image v0.18.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E=
golang.org/x/image v0.21.0 h1:c5qV36ajHpdj4Qi0GnE0jUc/yuo33OLFaa0d+crTD5s=
golang.org/x/image v0.21.0/go.mod h1:vUbsLavqK/W303ZroQQVKQ+Af3Yl6Uz1Ppu5J/cLz78=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -1836,8 +1866,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1985,9 +2015,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -2001,9 +2030,8 @@ golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM=
golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8=
golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -2022,16 +2050,16 @@ golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -2102,8 +2130,8 @@ golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE=
golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -2177,8 +2205,8 @@ google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/
google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI=
google.golang.org/api v0.198.0 h1:OOH5fZatk57iN0A7tjJQzt6aPfYQ1JiWkt1yGseazks=
google.golang.org/api v0.198.0/go.mod h1:/Lblzl3/Xqqk9hw/yS97TImKTUwnf1bv89v7+OagJzc=
google.golang.org/api v0.203.0 h1:SrEeuwU3S11Wlscsn+LA1kb/Y5xT8uggJSkIhD08NAU=
google.golang.org/api v0.203.0/go.mod h1:BuOVyCSYEPwJb3npWvDnNmFI92f3GeRnHNkETneT3SI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -2312,12 +2340,12 @@ google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ
google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA=
google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw=
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU=
google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4=
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc=
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 h1:Df6WuGvthPzc+JiQ/G+m+sNX24kc0aTBqoDN/0yyykE=
google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53/go.mod h1:fheguH3Am2dGp1LfXkrvwqC/KlFq8F0nLq3LryOMrrE=
google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 h1:T6rh4haD3GVYsgEfWExoCZA2o2FmbNyKpTuAxbEFPTg=
google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:wp2WsuBYj6j8wUdo3ToZsdxxixbvQNAHqVJrTgi5E5M=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -2358,13 +2386,15 @@ google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsA
google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo=
google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b h1:NuxyvVZoDfHZwYW9LD4GJiF5/nhiSyP4/InTrvw9Ibk=
google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b/go.mod h1:IBqQ7wSUJ2Ep09a8rMWFsg4fmI2r38zwsq8a0GgxXpM=
google.golang.org/grpc/security/advancedtls v1.0.0 h1:/KQ7VP/1bs53/aopk9QhuPyFAp9Dm9Ejix3lzYkCrDA=
google.golang.org/grpc/security/advancedtls v1.0.0/go.mod h1:o+s4go+e1PJ2AjuQMY5hU82W7lDlefjJA6FqEHRVHWk=
google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a h1:UIpYSuWdWHSzjwcAFRLjKcPXFZVVLXGEM23W+NWqipw=
google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a/go.mod h1:9i1T9n4ZinTUZGgzENMi8MDDgbGC5mqTS75JAv6xN3A=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -2381,8 +2411,8 @@ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -2432,15 +2462,21 @@ modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg=
modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
modernc.org/cc/v4 v4.21.4 h1:3Be/Rdo1fpr8GrQ7IVw9OHtplU4gWbb+wNgeoBMmGLQ=
modernc.org/cc/v4 v4.21.4/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ=
modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc=
modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw=
modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ=
modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ=
modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws=
modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo=
modernc.org/ccgo/v4 v4.19.2 h1:lwQZgvboKD0jBwdaeVCTouxhxAyN6iawF3STraAal8Y=
modernc.org/ccgo/v4 v4.19.2/go.mod h1:ysS3mxiMV38XGRTTcgo0DQTeTmAO4oCmJl1nX9VFI3s=
modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ=
modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
modernc.org/gc/v2 v2.4.1 h1:9cNzOqPyMJBvrUipmynX0ZohMhcxPtMccYgGOJdOiBw=
modernc.org/gc/v2 v2.4.1/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU=
modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 h1:5D53IMaUuA5InSeMu9eJtlQXS2NxAhyWQvkKEgXZhHI=
modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4=
modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
@ -2451,6 +2487,8 @@ modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU=
modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA=
modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0=
modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s=
modernc.org/libc v1.55.3 h1:AzcW1mhlPNrRtjS5sS+eW2ISCgSOLLNyFzRh/V3Qj/U=
modernc.org/libc v1.55.3/go.mod h1:qFXepLhz+JjFThQ4kzwzOjA/y/artDeg+pcYnY+Q83w=
modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
@ -2463,10 +2501,13 @@ modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E=
modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU=
modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc=
modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss=
modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4=
modernc.org/sqlite v1.33.0 h1:WWkA/T2G17okiLGgKAj4/RMIvgyMT19yQ038160IeYk=
modernc.org/sqlite v1.33.0/go.mod h1:9uQ9hF/pCZoYZK73D/ud5Z7cIRIILSZI8NdIemVMTX8=
modernc.org/sqlite v1.33.1 h1:trb6Z3YYoeM9eDL1O8do81kP+0ejv+YzgyFo+Gwy0nM=
modernc.org/sqlite v1.33.1/go.mod h1:pXV2xHxhzXZsgT/RtTFAPY6JJDEvOTcTdwADQCCWD4k=
modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw=
modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=

View File

@ -1,6 +1,6 @@
apiVersion: v1
description: SeaweedFS
name: seaweedfs
appVersion: "3.73"
appVersion: "3.79"
# Dev note: Trigger a helm chart release by `git tag -a helm-<version>`
version: 4.0.1
version: 4.0.379

View File

@ -10,6 +10,10 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: filer
{{- if .Values.filer.annotations }}
annotations:
{{- toYaml .Values.filer.annotations | nindent 4 }}
{{- end }}
spec:
secretName: {{ template "seaweedfs.name" . }}-filer-cert
issuerRef:

View File

@ -13,6 +13,10 @@ metadata:
{{- if .Values.filer.metricsPort }}
monitoring: "true"
{{- end }}
{{- if .Values.filer.annotations }}
annotations:
{{- toYaml .Values.filer.annotations | nindent 4 }}
{{- end }}
spec:
clusterIP: None
ports:

View File

@ -12,6 +12,10 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: filer
{{- if .Values.filer.annotations }}
annotations:
{{- toYaml .Values.filer.annotations | nindent 4 }}
{{- end }}
spec:
clusterIP: None
publishNotReadyAddresses: true

View File

@ -15,6 +15,10 @@ metadata:
{{- with .Values.global.monitoring.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.filer.annotations }}
annotations:
{{- toYaml .Values.filer.annotations | nindent 4 }}
{{- end }}
spec:
endpoints:
- interval: 30s

View File

@ -10,6 +10,10 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: filer
{{- if .Values.filer.annotations }}
annotations:
{{- toYaml .Values.filer.annotations | nindent 4 }}
{{- end }}
spec:
serviceName: {{ template "seaweedfs.name" . }}-filer
podManagementPolicy: {{ .Values.filer.podManagementPolicy }}
@ -57,6 +61,10 @@ spec:
affinity:
{{ tpl .Values.filer.affinity . | nindent 8 | trim }}
{{- end }}
{{- with .Values.filer.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.filer.tolerations }}
tolerations:
{{ tpl .Values.filer.tolerations . | nindent 8 | trim }}
@ -182,6 +190,12 @@ spec:
{{- if .Values.filer.filerGroup}}
-filerGroup={{ .Values.filer.filerGroup}} \
{{- end }}
{{- if .Values.filer.rack }}
-rack={{ .Values.filer.rack }} \
{{- end }}
{{- if .Values.filer.dataCenter }}
-dataCenter={{ .Values.filer.dataCenter }} \
{{- end }}
{{- if .Values.filer.s3.enabled }}
-s3 \
-s3.port={{ .Values.filer.s3.port }} \
@ -220,6 +234,12 @@ spec:
- name: data-filer
mountPath: /data
{{- end }}
{{- if .Values.filer.notificationConfig }}
- name: notification-config
readOnly: true
mountPath: /etc/seaweedfs/notification.toml
subPath: notification.toml
{{- end }}
{{- if .Values.global.enableSecurity }}
- name: security-config
readOnly: true
@ -335,6 +355,11 @@ spec:
secretName: seaweedfs-s3-secret
{{- end }}
{{- end }}
{{- if .Values.filer.notificationConfig }}
- name: notification-config
configMap:
name: {{ template "seaweedfs.name" . }}-notification-config
{{- end }}
{{- if .Values.global.enableSecurity }}
- name: security-config
configMap:

View File

@ -10,6 +10,10 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: master
{{- if .Values.master.annotations }}
annotations:
{{- toYaml .Values.master.annotations | nindent 4 }}
{{- end }}
spec:
secretName: {{ template "seaweedfs.name" . }}-master-cert
issuerRef:

View File

@ -9,6 +9,10 @@ metadata:
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.master.annotations }}
annotations:
{{- toYaml .Values.master.annotations | nindent 4 }}
{{- end }}
data:
master.toml: |-
{{ .Values.master.config | nindent 4 }}

View File

@ -11,6 +11,9 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }}
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
{{- if .Values.master.annotations }}
{{- toYaml .Values.master.annotations | nindent 4 }}
{{- end }}
spec:
clusterIP: None
publishNotReadyAddresses: true

View File

@ -15,6 +15,10 @@ metadata:
{{- with .Values.global.monitoring.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.master.annotations }}
annotations:
{{- toYaml .Values.master.annotations | nindent 4 }}
{{- end }}
spec:
endpoints:
- interval: 30s

View File

@ -9,6 +9,10 @@ metadata:
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.master.annotations }}
annotations:
{{- toYaml .Values.master.annotations | nindent 4 }}
{{- end }}
spec:
serviceName: {{ template "seaweedfs.name" . }}-master
podManagementPolicy: {{ .Values.master.podManagementPolicy }}
@ -50,6 +54,10 @@ spec:
affinity:
{{ tpl .Values.master.affinity . | nindent 8 | trim }}
{{- end }}
{{- with .Values.master.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.master.tolerations }}
tolerations:
{{ tpl .Values.master.tolerations . | nindent 8 | trim }}

View File

@ -0,0 +1,19 @@
{{- if and .Values.filer.enabled .Values.filer.notificationConfig }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "seaweedfs.name" . }}-notification-config
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.filer.annotations }}
annotations:
{{- toYaml .Values.filer.annotations | nindent 4 }}
{{- end }}
data:
notification.toml: |-
{{ .Values.filer.notificationConfig | nindent 4 }}
{{- end }}

View File

@ -9,12 +9,15 @@ metadata:
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.s3.annotations }}
annotations:
{{- toYaml .Values.s3.annotations | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.s3.replicas }}
selector:
matchLabels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: s3
template:

View File

@ -9,6 +9,10 @@ metadata:
app.kubernetes.io/component: s3
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- if .Values.s3.annotations }}
annotations:
{{- toYaml .Values.s3.annotations | nindent 4 }}
{{- end }}
spec:
internalTrafficPolicy: {{ .Values.s3.internalTrafficPolicy | default "Cluster" }}
ports:

View File

@ -15,6 +15,10 @@ metadata:
{{- with .Values.global.monitoring.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.s3.annotations }}
annotations:
{{- toYaml .Values.s3.annotations | nindent 4 }}
{{- end }}
spec:
endpoints:
- interval: 30s

View File

@ -10,6 +10,10 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: volume
{{- if .Values.volume.annotations }}
annotations:
{{- toYaml .Values.volume.annotations | nindent 4 }}
{{- end }}
spec:
secretName: {{ template "seaweedfs.name" . }}-volume-cert
issuerRef:

View File

@ -9,6 +9,10 @@ metadata:
app.kubernetes.io/component: volume
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- if .Values.volume.annotations }}
annotations:
{{- toYaml .Values.volume.annotations | nindent 4 }}
{{- end }}
spec:
clusterIP: None
internalTrafficPolicy: {{ .Values.volume.internalTrafficPolicy | default "Cluster" }}

View File

@ -15,6 +15,10 @@ metadata:
{{- with .Values.global.monitoring.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.volume.annotations }}
annotations:
{{- toYaml .Values.volume.annotations | nindent 4 }}
{{- end }}
spec:
endpoints:
- interval: 30s

View File

@ -9,6 +9,10 @@ metadata:
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.volume.annotations }}
annotations:
{{- toYaml .Values.volume.annotations | nindent 4 }}
{{- end }}
spec:
serviceName: {{ template "seaweedfs.name" . }}-volume
replicas: {{ .Values.volume.replicas }}
@ -43,6 +47,10 @@ spec:
affinity:
{{ tpl .Values.volume.affinity . | nindent 8 | trim }}
{{- end }}
{{- with .Values.volume.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
restartPolicy: {{ default .Values.global.restartPolicy .Values.volume.restartPolicy }}
{{- if .Values.volume.tolerations }}
tolerations:
@ -146,7 +154,9 @@ spec:
{{- if .Values.volume.idx }}
-dir.idx=/idx \
{{- end }}
-max {{range $index, $dir := .Values.volume.dataDirs }}{{if ne $index 0}},{{end}}{{$dir.maxVolumes}}{{end}} \
-max {{range $index, $dir := .Values.volume.dataDirs }}{{if ne $index 0}},{{end}}
{{- if eq ($dir.maxVolumes | toString) "0" }}0{{ else if not $dir.maxVolumes }}7{{ else }}{{$dir.maxVolumes}}{{ end }}
{{- end }} \
{{- if .Values.volume.rack }}
-rack={{ .Values.volume.rack }} \
{{- end }}
@ -176,9 +186,11 @@ spec:
-mserver={{ if .Values.global.masterServer }}{{.Values.global.masterServer}}{{ else }}{{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}{{ end }}
volumeMounts:
{{- range $dir := .Values.volume.dataDirs }}
{{- if not ( eq $dir.type "custom" ) }}
- name: {{ $dir.name }}
mountPath: "/{{ $dir.name }}/"
{{- end }}
{{- end }}
{{- if .Values.volume.logs }}
- name: logs
mountPath: "/logs/"

View File

@ -140,6 +140,9 @@ master:
# Annotations to be added to the master pods
podAnnotations: {}
# Annotations to be added to the master resources
annotations: {}
## Set podManagementPolicy
podManagementPolicy: Parallel
@ -166,6 +169,11 @@ master:
app.kubernetes.io/component: master
topologyKey: kubernetes.io/hostname
# Topology Spread Constraints Settings
# This should map directly to the value of the topologySpreadConstraints
# for a PodSpec. By Default no constraints are set.
topologySpreadConstraints: {}
# Toleration Settings for master pods
# This should be a multi-line string matching the Toleration array
# in a PodSpec.
@ -301,6 +309,12 @@ volume:
# - name: data
# type: "emptyDir"
# maxVolumes: 0 # If set to zero on non-windows OS, the limit will be auto configured. (default "7")
#
# If these don't meet your needs, you can use "custom" here along with extraVolumes and extraVolumeMounts
# Particularly useful when using more than 1 for the volume server replicas.
# - name: data
# type: "custom"
# maxVolumes: 0 # If set to zero on non-windows OS, the limit will be auto configured. (default "7")
dataDirs:
- name: data1
@ -381,6 +395,15 @@ volume:
sidecars: []
initContainers: ""
# Example for use when using more than 1 volume server replica
# extraVolumeMounts: |
# - name: drive
# mountPath: /drive
# subPathExpr: $(POD_NAME)
# extraVolumes: |
# - name: drive
# hostPath:
# path: /var/mnt/
extraVolumes: ""
extraVolumeMounts: ""
@ -390,6 +413,9 @@ volume:
# Annotations to be added to the volume pods
podAnnotations: {}
# Annotations to be added to the volume resources
annotations: {}
## Set podManagementPolicy
podManagementPolicy: Parallel
@ -406,6 +432,11 @@ volume:
app.kubernetes.io/component: volume
topologyKey: kubernetes.io/hostname
# Topology Spread Constraints Settings
# This should map directly to the value of the topologySpreadConstraints
# for a PodSpec. By Default no constraints are set.
topologySpreadConstraints: {}
# Resource requests, limits, etc. for the server cluster placement. This
# should map directly to the value of the resources field for a PodSpec,
# formatted as a multi-line string. By default no direct resource request
@ -490,6 +521,10 @@ filer:
metricsPort: 9327
loggingOverrideLevel: null
filerGroup: ""
# prefer to read and write to volumes in this data center (not set by default)
dataCenter: null
# prefer to write to volumes in this rack (not set by default)
rack: null
# replication type is XYZ:
# X number of replica in other data centers
# Y number of replica in other racks in the same data center
@ -511,6 +546,19 @@ filer:
# Disable http request, only gRpc operations are allowed
disableHttp: false
# Add a custom notification.toml to configure filer notifications
# Example:
# notificationConfig: |-
# [notification.kafka]
# enabled = false
# hosts = [
# "localhost:9092"
# ]
# topic = "seaweedfs_filer"
# offsetFile = "./last.offset"
# offsetSaveIntervalSeconds = 10
notificationConfig: ""
# DEPRECATE: enablePVC, storage, storageClass
# Consider replacing with filer.data section below instead.
@ -584,6 +632,9 @@ filer:
# Annotations to be added to the filer pods
podAnnotations: {}
# Annotations to be added to the filer resource
annotations: {}
## Set podManagementPolicy
podManagementPolicy: Parallel
@ -600,6 +651,11 @@ filer:
app.kubernetes.io/component: filer
topologyKey: kubernetes.io/hostname
# Topology Spread Constraints Settings
# This should map directly to the value of the topologySpreadConstraints
# for a PodSpec. By Default no constraints are set.
topologySpreadConstraints: {}
# updatePartition is used to control a careful rolling update of SeaweedFS
# masters.
updatePartition: 0
@ -673,7 +729,7 @@ filer:
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
sub_filter_once off;
# extraEnvVars is a list of extra enviroment variables to set with the stateful set.
# extraEnvVars is a list of extra environment variables to set with the stateful set.
extraEnvironmentVars:
WEED_MYSQL_ENABLED: "false"
WEED_MYSQL_HOSTNAME: "mysql-db-host"
@ -798,6 +854,9 @@ s3:
# Annotations to be added to the s3 pods
podAnnotations: {}
# Annotations to be added to the s3 resources
annotations: {}
# Resource requests, limits, etc. for the server cluster placement. This
# should map directly to the value of the resources field for a PodSpec,
# formatted as a multi-line string. By default no direct resource request
@ -895,7 +954,7 @@ s3:
# For more information, visit: https://container-object-storage-interface.github.io/docs/deployment-guide
cosi:
enabled: false
image: "ghcr.io/seaweedfs/seaweedfs-cosi-driver:v0.1.1"
image: "ghcr.io/seaweedfs/seaweedfs-cosi-driver:v0.1.2"
driverName: "seaweedfs.objectstorage.k8s.io"
bucketClassName: "seaweedfs"
endpoint: ""

View File

@ -105,7 +105,7 @@
"steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))",
"expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le))",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
@ -114,7 +114,7 @@
"step": 60
},
{
"expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))",
"expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le, type))",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
@ -200,7 +200,7 @@
"steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))",
"expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le))",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
@ -209,7 +209,7 @@
"step": 60
},
{
"expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))",
"expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le, type))",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
@ -301,7 +301,7 @@
"steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))",
"expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le))",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
@ -310,7 +310,7 @@
"step": 60
},
{
"expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))",
"expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le, type))",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
@ -415,7 +415,7 @@
"steppedLine": false,
"targets": [
{
"expr": "rate(SeaweedFS_filer_request_total[1m])",
"expr": "rate(SeaweedFS_filerStore_request_total[1m])",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{type}}",

View File

@ -21,8 +21,8 @@ import (
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/wdclient"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
"github.com/seaweedfs/seaweedfs/weed/wdclient"
)
type BenchmarkOptions struct {
@ -242,7 +242,7 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
DiskType: *b.diskType,
}
if assignResult, err := operation.Assign(b.masterClient.GetMaster, b.grpcDialOption, ar); err == nil {
fp.Server, fp.Fid, fp.Collection = assignResult.Url, assignResult.Fid, *b.collection
fp.Server, fp.Fid, fp.Pref.Collection = assignResult.Url, assignResult.Fid, *b.collection
if !isSecure && assignResult.Auth != "" {
isSecure = true
}

View File

@ -22,7 +22,6 @@ import (
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/util/grace"
"github.com/seaweedfs/seaweedfs/weed/wdclient"
)
var (
@ -37,7 +36,6 @@ type CopyOptions struct {
ttl *string
diskType *string
maxMB *int
masterClient *wdclient.MasterClient
concurrentFiles *int
concurrentChunks *int
grpcDialOption grpc.DialOption

View File

@ -248,6 +248,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
Cipher: cipher,
UidGidMapper: uidGidMapper,
DisableXAttr: *option.disableXAttr,
IsMacOs: runtime.GOOS == "darwin",
})
// create mount root

View File

@ -1,5 +1,5 @@
//go:build elastic && ydb && gocdk && tikv
// +build elastic,ydb,gocdk,tikv
//go:build elastic && gocdk && rclone && sqlite && tikv && ydb
// +build elastic,gocdk,rclone,sqlite,tikv,ydb
package command

View File

@ -97,7 +97,14 @@ func runUpload(cmd *Command, args []string) bool {
if e != nil {
return e
}
results, e := operation.SubmitFiles(func(_ context.Context) pb.ServerAddress { return pb.ServerAddress(*upload.master) }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
results, e := operation.SubmitFiles(func(_ context.Context) pb.ServerAddress { return pb.ServerAddress(*upload.master) }, grpcDialOption, parts, operation.StoragePreference{
Replication: *upload.replication,
Collection: *upload.collection,
DataCenter: *upload.dataCenter,
Ttl: *upload.ttl,
DiskType: *upload.diskType,
MaxMB: *upload.maxMB,
}, *upload.usePublicUrl)
bytes, _ := json.Marshal(results)
fmt.Println(string(bytes))
if e != nil {
@ -119,7 +126,14 @@ func runUpload(cmd *Command, args []string) bool {
fmt.Println(e.Error())
return false
}
results, err := operation.SubmitFiles(func(_ context.Context) pb.ServerAddress { return pb.ServerAddress(*upload.master) }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
results, err := operation.SubmitFiles(func(_ context.Context) pb.ServerAddress { return pb.ServerAddress(*upload.master) }, grpcDialOption, parts, operation.StoragePreference{
Replication: *upload.replication,
Collection: *upload.collection,
DataCenter: *upload.dataCenter,
Ttl: *upload.ttl,
DiskType: *upload.diskType,
MaxMB: *upload.maxMB,
}, *upload.usePublicUrl)
if err != nil {
fmt.Println(err.Error())
return false

View File

@ -1,8 +1,9 @@
package filer
import (
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"sync"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
)
const SectionSize = 2 * 1024 * 1024 * 32 // 64MiB
@ -62,11 +63,6 @@ func removeGarbageChunks(section *FileChunkSection, garbageFileIds map[string]st
}
func (section *FileChunkSection) setupForRead(group *ChunkGroup, fileSize int64) {
if section.isPrepared {
section.reader.fileSize = fileSize
return
}
section.lock.Lock()
defer section.lock.Unlock()

View File

@ -4,15 +4,16 @@ import (
"container/heap"
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
"github.com/seaweedfs/seaweedfs/weed/wdclient"
"google.golang.org/protobuf/proto"
"io"
"math"
"strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
"github.com/seaweedfs/seaweedfs/weed/wdclient"
"google.golang.org/protobuf/proto"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/util"
)
@ -39,6 +40,19 @@ func (f *Filer) collectPersistedLogBuffer(startPosition log_buffer.MessagePositi
}
func (f *Filer) HasPersistedLogFiles(startPosition log_buffer.MessagePosition) (bool, error) {
startDate := fmt.Sprintf("%04d-%02d-%02d", startPosition.Year(), startPosition.Month(), startPosition.Day())
dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 1, "", "", "")
if listDayErr != nil {
return false, fmt.Errorf("fail to list log by day: %v", listDayErr)
}
if len(dayEntries) == 0 {
return false, nil
}
return true, nil
}
// ----------
type LogEntryItem struct {
Entry *filer_pb.LogEntry
@ -103,7 +117,7 @@ func (o *OrderedLogVisitor) GetNext() (logEntry *filer_pb.LogEntry, err error) {
if nextErr != nil {
if nextErr == io.EOF {
// do nothing since the filer has no more log entries
}else {
} else {
return nil, fmt.Errorf("failed to get next log entry: %v", nextErr)
}
} else {
@ -230,7 +244,7 @@ func (c *LogFileEntryCollector) collectMore(v *OrderedLogVisitor) (err error) {
if nextErr != nil {
if nextErr == io.EOF {
// do nothing since the filer has no more log entries
}else {
} else {
return fmt.Errorf("failed to get next log entry for %v: %v", entryName, err)
}
} else {

View File

@ -164,6 +164,9 @@ func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp util.FullPath) (
entry, err = actualStore.FindEntry(ctx, fp)
// glog.V(4).Infof("FindEntry %s: %v", fp, err)
if err != nil {
if fsw.CanDropWholeBucket() && strings.Contains(err.Error(), "Table") && strings.Contains(err.Error(), "doesn't exist") {
err = filer_pb.ErrNotFound
}
return nil, err
}

View File

@ -97,6 +97,10 @@ func NewChunkReaderAtFromClient(readerCache *ReaderCache, chunkViews *IntervalLi
}
}
func (c *ChunkReadAt) Size() int64 {
return c.fileSize
}
func (c *ChunkReadAt) Close() error {
c.readerCache.destroy()
return nil
@ -169,15 +173,14 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, ts int64, err err
// zero the remaining bytes if a gap exists at the end of the last chunk (or a fully sparse file)
if err == nil && remaining > 0 {
var delta int64
if c.fileSize > startOffset {
if c.fileSize >= startOffset {
delta = min(remaining, c.fileSize-startOffset)
startOffset -= offset
} else {
delta = remaining
startOffset = max(startOffset-offset, startOffset-remaining-offset)
}
glog.V(4).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+delta, c.fileSize)
n += zero(p, startOffset, delta)
if delta > 0 {
glog.V(4).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+delta, c.fileSize)
n += zero(p, startOffset, delta)
}
}
if err == nil && offset+int64(len(p)) >= c.fileSize {
@ -216,6 +219,9 @@ func (c *ChunkReadAt) readChunkSliceAt(buffer []byte, chunkView *ChunkView, next
}
func zero(buffer []byte, start, length int64) int {
if length <= 0 {
return 0
}
end := min(start+length, int64(len(buffer)))
start = max(start, 0)

View File

@ -31,7 +31,7 @@ func (m *mockChunkCache) ReadChunkAt(data []byte, fileId string, offset uint64)
func (m *mockChunkCache) SetChunk(fileId string, data []byte) {
}
func (m *mockChunkCache) GetMaxFilePartSizeInCache() (uint64) {
func (m *mockChunkCache) GetMaxFilePartSizeInCache() uint64 {
return 0
}
@ -81,7 +81,7 @@ func TestReaderAt(t *testing.T) {
}
testReadAt(t, readerAt, 0, 10, 10, io.EOF, nil, nil)
testReadAt(t, readerAt, 0, 12, 12, io.EOF, nil, nil)
testReadAt(t, readerAt, 0, 12, 10, io.EOF, nil, nil)
testReadAt(t, readerAt, 2, 8, 8, io.EOF, nil, nil)
testReadAt(t, readerAt, 3, 6, 6, nil, nil, nil)
@ -131,8 +131,8 @@ func TestReaderAt0(t *testing.T) {
testReadAt(t, readerAt, 3, 16, 7, io.EOF, nil, nil)
testReadAt(t, readerAt, 3, 5, 5, nil, nil, nil)
testReadAt(t, readerAt, 11, 5, 5, io.EOF, nil, nil)
testReadAt(t, readerAt, 10, 5, 5, io.EOF, nil, nil)
testReadAt(t, readerAt, 11, 5, 0, io.EOF, nil, nil)
testReadAt(t, readerAt, 10, 5, 0, io.EOF, nil, nil)
}

View File

@ -1,6 +1,7 @@
package filer
const (
TopicsDir = "/topics"
SystemLogDir = TopicsDir + "/.system/log"
TopicsDir = "/topics"
SystemLogDir = TopicsDir + "/.system/log"
TopicConfFile = "topic.conf"
)

View File

@ -1,16 +1,12 @@
package filer_client
import (
"bytes"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
"google.golang.org/grpc"
jsonpb "google.golang.org/protobuf/encoding/protojson"
)
type FilerClientAccessor struct {
@ -22,41 +18,23 @@ func (fca *FilerClientAccessor) WithFilerClient(streamingMode bool, fn func(file
return pb.WithFilerClient(streamingMode, 0, fca.GetFiler(), fca.GetGrpcDialOption(), fn)
}
func (fca *FilerClientAccessor) SaveTopicConfToFiler(t *mq_pb.Topic, conf *mq_pb.ConfigureTopicResponse) error {
func (fca *FilerClientAccessor) SaveTopicConfToFiler(t topic.Topic, conf *mq_pb.ConfigureTopicResponse) error {
glog.V(0).Infof("save conf for topic %v to filer", t)
// save the topic configuration on filer
topicDir := fmt.Sprintf("%s/%s/%s", filer.TopicsDir, t.Namespace, t.Name)
if err := fca.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
var buf bytes.Buffer
filer.ProtoToText(&buf, conf)
return filer.SaveInsideFiler(client, topicDir, "topic.conf", buf.Bytes())
}); err != nil {
return fmt.Errorf("save topic to %s: %v", topicDir, err)
}
return nil
return fca.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
return t.WriteConfFile(client, conf)
})
}
func (fca *FilerClientAccessor) ReadTopicConfFromFiler(t topic.Topic) (conf *mq_pb.ConfigureTopicResponse, err error) {
glog.V(0).Infof("load conf for topic %v from filer", t)
glog.V(1).Infof("load conf for topic %v from filer", t)
topicDir := fmt.Sprintf("%s/%s/%s", filer.TopicsDir, t.Namespace, t.Name)
if err = fca.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
data, err := filer.ReadInsideFiler(client, topicDir, "topic.conf")
if err == filer_pb.ErrNotFound {
return err
}
if err != nil {
return fmt.Errorf("read topic.conf of %v: %v", t, err)
}
// parse into filer conf object
conf = &mq_pb.ConfigureTopicResponse{}
if err = jsonpb.Unmarshal(data, conf); err != nil {
return fmt.Errorf("unmarshal topic %v conf: %v", t, err)
}
return nil
conf, err = t.ReadConfFile(client)
return err
}); err != nil {
return nil, err
}

View File

@ -47,6 +47,8 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, int64, e
if fileSize == 0 {
glog.V(1).Infof("empty fh %v", fileFullPath)
return 0, 0, io.EOF
} else if offset == fileSize {
return 0, 0, io.EOF
} else if offset >= fileSize {
glog.V(1).Infof("invalid read, fileSize %d, offset %d for %s", fileSize, offset, fileFullPath)
return 0, 0, io.EOF

View File

@ -1,20 +1,17 @@
package mount
import (
"errors"
"fmt"
"path/filepath"
"sync/atomic"
"time"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/mount/meta_cache"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
"path/filepath"
)
func (wfs *WFS) subscribeFilerConfEvents() (func(), error) {
now := time.Now()
func (wfs *WFS) subscribeFilerConfEvents() (*meta_cache.MetadataFollower, error) {
confDir := filer.DirectoryEtcSeaweedFS
confName := filer.FilerConfName
confFullName := filepath.Join(filer.DirectoryEtcSeaweedFS, filer.FilerConfName)
@ -38,7 +35,11 @@ func (wfs *WFS) subscribeFilerConfEvents() (func(), error) {
return nil
})
if err != nil {
return nil, err
if errors.Is(err, filer_pb.ErrNotFound) {
glog.V(0).Infof("fuse filer conf %s not found", confFullName)
} else {
return nil, err
}
}
processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error {
@ -66,41 +67,9 @@ func (wfs *WFS) subscribeFilerConfEvents() (func(), error) {
return nil
}
metadataFollowOption := &pb.MetadataFollowOption{
ClientName: "fuse",
ClientId: wfs.signature,
ClientEpoch: 1,
SelfSignature: 0,
PathPrefix: confFullName,
AdditionalPathPrefixes: nil,
StartTsNs: now.UnixNano(),
StopTsNs: 0,
EventErrorType: pb.FatalOnError,
}
return func() {
// sync new conf changes
util.RetryUntil("followFilerConfChanges", func() error {
metadataFollowOption.ClientEpoch++
i := atomic.LoadInt32(&wfs.option.filerIndex)
n := len(wfs.option.FilerAddresses)
err = pb.FollowMetadata(wfs.option.FilerAddresses[i], wfs.option.GrpcDialOption, metadataFollowOption, processEventFn)
if err == nil {
atomic.StoreInt32(&wfs.option.filerIndex, i)
return nil
}
i++
if i >= int32(n) {
i = 0
}
return err
}, func(err error) bool {
glog.V(0).Infof("fuse follow filer conf changes: %v", err)
return true
})
return &meta_cache.MetadataFollower{
PathPrefixToWatch: confFullName,
ProcessEventFn: processEventFn,
}, nil
}

View File

@ -10,7 +10,44 @@ import (
"strings"
)
func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.FilerClient, dir string, lastTsNs int64) error {
type MetadataFollower struct {
PathPrefixToWatch string
ProcessEventFn func(resp *filer_pb.SubscribeMetadataResponse) error
}
func mergeProcessors(mainProcessor func(resp *filer_pb.SubscribeMetadataResponse) error, followers ...*MetadataFollower) func(resp *filer_pb.SubscribeMetadataResponse) error {
return func(resp *filer_pb.SubscribeMetadataResponse) error {
// build the full path
entry := resp.EventNotification.NewEntry
if entry == nil {
entry = resp.EventNotification.OldEntry
}
if entry != nil {
dir := resp.Directory
if resp.EventNotification.NewParentPath != "" {
dir = resp.EventNotification.NewParentPath
}
fp := util.NewFullPath(dir, entry.Name)
for _, follower := range followers {
if strings.HasPrefix(string(fp), follower.PathPrefixToWatch) {
if err := follower.ProcessEventFn(resp); err != nil {
return err
}
}
}
}
return mainProcessor(resp)
}
}
func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.FilerClient, dir string, lastTsNs int64, followers ...*MetadataFollower) error {
var prefixes []string
for _, follower := range followers {
prefixes = append(prefixes, follower.PathPrefixToWatch)
}
processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error {
message := resp.EventNotification
@ -69,7 +106,7 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
ClientEpoch: 1,
SelfSignature: selfSignature,
PathPrefix: prefix,
AdditionalPathPrefixes: nil,
AdditionalPathPrefixes: prefixes,
DirectoriesToWatch: nil,
StartTsNs: lastTsNs,
StopTsNs: 0,
@ -77,7 +114,7 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
}
util.RetryUntil("followMetaUpdates", func() error {
metadataFollowOption.ClientEpoch++
return pb.WithFilerClientFollowMetadata(client, metadataFollowOption, processEventFn)
return pb.WithFilerClientFollowMetadata(client, metadataFollowOption, mergeProcessors(processEventFn, followers...))
}, func(err error) bool {
glog.Errorf("follow metadata updates: %v", err)
return true

View File

@ -51,7 +51,7 @@ func (pw *PageWriter) FlushData() error {
}
func (pw *PageWriter) ReadDirtyDataAt(data []byte, offset int64, tsNs int64) (maxStop int64) {
glog.V(4).Infof("ReadDirtyDataAt %v [%d, %d)", pw.fh.fh, offset, offset+int64(len(data)))
glog.V(4).Infof("ReadDirtyDataAt %v [%d, %d)", pw.fh.inode, offset, offset+int64(len(data)))
chunkIndex := offset / pw.chunkSize
for i := chunkIndex; len(data) > 0; i++ {

View File

@ -46,6 +46,7 @@ type Option struct {
Umask os.FileMode
Quota int64
DisableXAttr bool
IsMacOs bool
MountUid uint32
MountGid uint32
@ -141,15 +142,13 @@ func NewSeaweedFileSystem(option *Option) *WFS {
}
func (wfs *WFS) StartBackgroundTasks() error {
fn, err := wfs.subscribeFilerConfEvents()
follower, err := wfs.subscribeFilerConfEvents()
if err != nil {
return err
}
go fn()
startTime := time.Now()
go meta_cache.SubscribeMetaEvents(wfs.metaCache, wfs.signature, wfs, wfs.option.FilerMountRootPath, startTime.UnixNano())
go meta_cache.SubscribeMetaEvents(wfs.metaCache, wfs.signature, wfs, wfs.option.FilerMountRootPath, startTime.UnixNano(), follower)
go wfs.loopCheckQuota()
return nil

View File

@ -11,6 +11,7 @@ import (
)
func (wfs *WFS) GetAttr(cancel <-chan struct{}, input *fuse.GetAttrIn, out *fuse.AttrOut) (code fuse.Status) {
glog.V(4).Infof("GetAttr %v", input.NodeId)
if input.NodeId == 1 {
wfs.setRootAttr(out)
return fuse.OK

View File

@ -2,6 +2,7 @@ package mount
import (
"github.com/hanwen/go-fuse/v2/fuse"
"github.com/seaweedfs/seaweedfs/weed/glog"
)
/**
@ -66,6 +67,14 @@ func (wfs *WFS) Open(cancel <-chan struct{}, in *fuse.OpenIn, out *fuse.OpenOut)
if status == fuse.OK {
out.Fh = uint64(fileHandle.fh)
out.OpenFlags = in.Flags
if wfs.option.IsMacOs {
// remove the direct_io flag, as it is not well-supported on macOS
// https://code.google.com/archive/p/macfuse/wikis/OPTIONS.wiki recommended to avoid the direct_io flag
if in.Flags&fuse.FOPEN_DIRECT_IO != 0 {
glog.V(4).Infof("macfuse direct_io mode %v => false\n", in.Flags&fuse.FOPEN_DIRECT_IO != 0)
out.OpenFlags &^= fuse.FOPEN_DIRECT_IO
}
}
// TODO https://github.com/libfuse/libfuse/blob/master/include/fuse_common.h#L64
}
return status

View File

@ -3,15 +3,16 @@ package mount
import (
"context"
"fmt"
"io"
"strings"
"syscall"
"github.com/hanwen/go-fuse/v2/fs"
"github.com/hanwen/go-fuse/v2/fuse"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
"io"
"strings"
"syscall"
)
/** Rename a file
@ -160,6 +161,13 @@ func (wfs *WFS) Rename(cancel <-chan struct{}, in *fuse.RenameIn, oldName string
}
newPath := newDir.Child(newName)
if wfs.FilerConf != nil {
rule := wfs.FilerConf.MatchStorageRule(string(oldPath))
if rule.Worm {
return fuse.EPERM
}
}
glog.V(4).Infof("dir Rename %s => %s", oldPath, newPath)
// update remote filer

View File

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/mq/logstore"
"github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/pb"
@ -26,7 +27,7 @@ func (b *MessageQueueBroker) AssignTopicPartitions(c context.Context, request *m
} else {
var localPartition *topic.LocalPartition
if localPartition = b.localTopicManager.GetLocalPartition(t, partition); localPartition == nil {
localPartition = topic.NewLocalPartition(partition, b.genLogFlushFunc(t, assignment.Partition), b.genLogOnDiskReadFunc(t, assignment.Partition))
localPartition = topic.NewLocalPartition(partition, b.genLogFlushFunc(t, partition), logstore.GenMergedReadFunc(b, t, partition))
b.localTopicManager.AddLocalPartition(t, localPartition)
}
}

View File

@ -67,7 +67,7 @@ func (b *MessageQueueBroker) ConfigureTopic(ctx context.Context, request *mq_pb.
resp.RecordType = request.RecordType
// save the topic configuration on filer
if err := b.fca.SaveTopicConfToFiler(request.Topic, resp); err != nil {
if err := b.fca.SaveTopicConfToFiler(t, resp); err != nil {
return nil, fmt.Errorf("configure topic: %v", err)
}

View File

@ -2,7 +2,6 @@ package broker
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
@ -93,9 +92,7 @@ func (b *MessageQueueBroker) PublishFollowMe(stream mq_pb.SeaweedMessaging_Publi
time.Sleep(113 * time.Millisecond)
}
topicDir := fmt.Sprintf("%s/%s/%s", filer.TopicsDir, t.Namespace, t.Name)
partitionGeneration := time.Unix(0, p.UnixTimeNs).UTC().Format(topic.TIME_FORMAT)
partitionDir := fmt.Sprintf("%s/%s/%04d-%04d", topicDir, partitionGeneration, p.RangeStart, p.RangeStop)
partitionDir := topic.PartitionDir(t, p)
// flush the remaining messages
inMemoryBuffers.CloseInput()

View File

@ -9,7 +9,6 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
"io"
"time"
)
func (b *MessageQueueBroker) SubscribeFollowMe(stream mq_pb.SeaweedMessaging_SubscribeFollowMeServer) (err error) {
@ -65,9 +64,7 @@ func (b *MessageQueueBroker) SubscribeFollowMe(stream mq_pb.SeaweedMessaging_Sub
func (b *MessageQueueBroker) readConsumerGroupOffset(initMessage *mq_pb.SubscribeMessageRequest_InitMessage) (offset int64, err error) {
t, p := topic.FromPbTopic(initMessage.Topic), topic.FromPbPartition(initMessage.PartitionOffset.Partition)
topicDir := fmt.Sprintf("%s/%s/%s", filer.TopicsDir, t.Namespace, t.Name)
partitionGeneration := time.Unix(0, p.UnixTimeNs).UTC().Format(topic.TIME_FORMAT)
partitionDir := fmt.Sprintf("%s/%s/%04d-%04d", topicDir, partitionGeneration, p.RangeStart, p.RangeStop)
partitionDir := topic.PartitionDir(t, p)
offsetFileName := fmt.Sprintf("%s.offset", initMessage.ConsumerGroup)
err = b.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
@ -86,9 +83,7 @@ func (b *MessageQueueBroker) readConsumerGroupOffset(initMessage *mq_pb.Subscrib
func (b *MessageQueueBroker) saveConsumerGroupOffset(t topic.Topic, p topic.Partition, consumerGroup string, offset int64) error {
topicDir := fmt.Sprintf("%s/%s/%s", filer.TopicsDir, t.Namespace, t.Name)
partitionGeneration := time.Unix(0, p.UnixTimeNs).UTC().Format(topic.TIME_FORMAT)
partitionDir := fmt.Sprintf("%s/%s/%04d-%04d", topicDir, partitionGeneration, p.RangeStart, p.RangeStop)
partitionDir := topic.PartitionDir(t, p)
offsetFileName := fmt.Sprintf("%s.offset", consumerGroup)
offsetBytes := make([]byte, 8)

View File

@ -3,6 +3,7 @@ package broker
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/mq/logstore"
"github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
@ -40,7 +41,7 @@ func (b *MessageQueueBroker) genLocalPartitionFromFiler(t topic.Topic, partition
self := b.option.BrokerAddress()
for _, assignment := range conf.BrokerPartitionAssignments {
if assignment.LeaderBroker == string(self) && partition.Equals(topic.FromPbPartition(assignment.Partition)) {
localPartition = topic.NewLocalPartition(partition, b.genLogFlushFunc(t, assignment.Partition), b.genLogOnDiskReadFunc(t, assignment.Partition))
localPartition = topic.NewLocalPartition(partition, b.genLogFlushFunc(t, partition), logstore.GenMergedReadFunc(b, t, partition))
b.localTopicManager.AddLocalPartition(t, localPartition)
isGenerated = true
break
@ -55,7 +56,7 @@ func (b *MessageQueueBroker) ensureTopicActiveAssignments(t topic.Topic, conf *m
hasChanges := pub_balancer.EnsureAssignmentsToActiveBrokers(b.PubBalancer.Brokers, 1, conf.BrokerPartitionAssignments)
if hasChanges {
glog.V(0).Infof("topic %v partition updated assignments: %v", t, conf.BrokerPartitionAssignments)
if err = b.fca.SaveTopicConfToFiler(t.ToPbTopic(), conf); err != nil {
if err = b.fca.SaveTopicConfToFiler(t, conf); err != nil {
return err
}
}

View File

@ -2,24 +2,15 @@ package broker
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
"google.golang.org/protobuf/proto"
"math"
"sync/atomic"
"time"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
func (b *MessageQueueBroker) genLogFlushFunc(t topic.Topic, partition *mq_pb.Partition) log_buffer.LogFlushFuncType {
topicDir := fmt.Sprintf("%s/%s/%s", filer.TopicsDir, t.Namespace, t.Name)
partitionGeneration := time.Unix(0, partition.UnixTimeNs).UTC().Format(topic.TIME_FORMAT)
partitionDir := fmt.Sprintf("%s/%s/%04d-%04d", topicDir, partitionGeneration, partition.RangeStart, partition.RangeStop)
func (b *MessageQueueBroker) genLogFlushFunc(t topic.Topic, p topic.Partition) log_buffer.LogFlushFuncType {
partitionDir := topic.PartitionDir(t, p)
return func(logBuffer *log_buffer.LogBuffer, startTime, stopTime time.Time, buf []byte) {
if len(buf) == 0 {
@ -45,7 +36,6 @@ func (b *MessageQueueBroker) genLogFlushFunc(t topic.Topic, partition *mq_pb.Par
b.accessLock.Lock()
defer b.accessLock.Unlock()
p := topic.FromPbPartition(partition)
if localPartition := b.localTopicManager.GetLocalPartition(t, p); localPartition != nil {
localPartition.NotifyLogFlushed(logBuffer.LastFlushTsNs)
}
@ -53,126 +43,3 @@ func (b *MessageQueueBroker) genLogFlushFunc(t topic.Topic, partition *mq_pb.Par
glog.V(0).Infof("flushing at %d to %s size %d", logBuffer.LastFlushTsNs, targetFile, len(buf))
}
}
func (b *MessageQueueBroker) genLogOnDiskReadFunc(t topic.Topic, partition *mq_pb.Partition) log_buffer.LogReadFromDiskFuncType {
topicDir := fmt.Sprintf("%s/%s/%s", filer.TopicsDir, t.Namespace, t.Name)
partitionGeneration := time.Unix(0, partition.UnixTimeNs).UTC().Format(topic.TIME_FORMAT)
partitionDir := fmt.Sprintf("%s/%s/%04d-%04d", topicDir, partitionGeneration, partition.RangeStart, partition.RangeStop)
lookupFileIdFn := func(fileId string) (targetUrls []string, err error) {
return b.MasterClient.LookupFileId(fileId)
}
eachChunkFn := func(buf []byte, eachLogEntryFn log_buffer.EachLogEntryFuncType, starTsNs, stopTsNs int64) (processedTsNs int64, err error) {
for pos := 0; pos+4 < len(buf); {
size := util.BytesToUint32(buf[pos : pos+4])
if pos+4+int(size) > len(buf) {
err = fmt.Errorf("LogOnDiskReadFunc: read [%d,%d) from [0,%d)", pos, pos+int(size)+4, len(buf))
return
}
entryData := buf[pos+4 : pos+4+int(size)]
logEntry := &filer_pb.LogEntry{}
if err = proto.Unmarshal(entryData, logEntry); err != nil {
pos += 4 + int(size)
err = fmt.Errorf("unexpected unmarshal mq_pb.Message: %v", err)
return
}
if logEntry.TsNs < starTsNs {
pos += 4 + int(size)
continue
}
if stopTsNs != 0 && logEntry.TsNs > stopTsNs {
println("stopTsNs", stopTsNs, "logEntry.TsNs", logEntry.TsNs)
return
}
if _, err = eachLogEntryFn(logEntry); err != nil {
err = fmt.Errorf("process log entry %v: %v", logEntry, err)
return
}
processedTsNs = logEntry.TsNs
pos += 4 + int(size)
}
return
}
eachFileFn := func(entry *filer_pb.Entry, eachLogEntryFn log_buffer.EachLogEntryFuncType, starTsNs, stopTsNs int64) (processedTsNs int64, err error) {
if len(entry.Content) > 0 {
// skip .offset files
return
}
var urlStrings []string
for _, chunk := range entry.Chunks {
if chunk.Size == 0 {
continue
}
if chunk.IsChunkManifest {
glog.Warningf("this should not happen. unexpected chunk manifest in %s/%s", partitionDir, entry.Name)
return
}
urlStrings, err = lookupFileIdFn(chunk.FileId)
if err != nil {
err = fmt.Errorf("lookup %s: %v", chunk.FileId, err)
return
}
if len(urlStrings) == 0 {
err = fmt.Errorf("no url found for %s", chunk.FileId)
return
}
// try one of the urlString until util.Get(urlString) succeeds
var processed bool
for _, urlString := range urlStrings {
// TODO optimization opportunity: reuse the buffer
var data []byte
if data, _, err = util_http.Get(urlString); err == nil {
processed = true
if processedTsNs, err = eachChunkFn(data, eachLogEntryFn, starTsNs, stopTsNs); err != nil {
return
}
break
}
}
if !processed {
err = fmt.Errorf("no data processed for %s %s", entry.Name, chunk.FileId)
return
}
}
return
}
return func(startPosition log_buffer.MessagePosition, stopTsNs int64, eachLogEntryFn log_buffer.EachLogEntryFuncType) (lastReadPosition log_buffer.MessagePosition, isDone bool, err error) {
startFileName := startPosition.UTC().Format(topic.TIME_FORMAT)
startTsNs := startPosition.Time.UnixNano()
stopTime := time.Unix(0, stopTsNs)
var processedTsNs int64
err = b.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
return filer_pb.SeaweedList(client, partitionDir, "", func(entry *filer_pb.Entry, isLast bool) error {
if entry.IsDirectory {
return nil
}
if stopTsNs != 0 && entry.Name > stopTime.UTC().Format(topic.TIME_FORMAT) {
isDone = true
return nil
}
if entry.Name < startPosition.UTC().Format(topic.TIME_FORMAT) {
return nil
}
if processedTsNs, err = eachFileFn(entry, eachLogEntryFn, startTsNs, stopTsNs); err != nil {
return err
}
return nil
}, startFileName, true, math.MaxInt32)
})
lastReadPosition = log_buffer.NewMessagePosition(processedTsNs, -2)
return
}
}

View File

@ -7,11 +7,12 @@ import (
"github.com/seaweedfs/seaweedfs/weed/mq/schema"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
"log"
"strings"
"sync"
"sync/atomic"
"time"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
var (
@ -25,11 +26,17 @@ var (
namespace = flag.String("ns", "test", "namespace")
t = flag.String("t", "test", "t")
seedBrokers = flag.String("brokers", "localhost:17777", "seed brokers")
counter int32
)
func doPublish(publisher *pub_client.TopicPublisher, id int) {
startTime := time.Now()
for i := 0; i < *messageCount / *concurrency; i++ {
for {
i := atomic.AddInt32(&counter, 1)
if i > int32(*messageCount) {
break
}
// Simulate publishing a message
myRecord := genMyRecord(int32(i))
if err := publisher.PublishRecord(myRecord.Key, myRecord.ToRecordValue()); err != nil {
@ -38,7 +45,7 @@ func doPublish(publisher *pub_client.TopicPublisher, id int) {
}
if *messageDelay > 0 {
time.Sleep(*messageDelay)
fmt.Printf("sent %+v\n", myRecord)
fmt.Printf("sent %+v\n", string(myRecord.Key))
}
}
if err := publisher.FinishPublish(); err != nil {

View File

@ -8,12 +8,12 @@ import (
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/protobuf/proto"
"strings"
"time"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
var (
@ -22,6 +22,7 @@ var (
seedBrokers = flag.String("brokers", "localhost:17777", "seed brokers")
maxPartitionCount = flag.Int("maxPartitionCount", 3, "max partition count")
perPartitionConcurrency = flag.Int("perPartitionConcurrency", 1, "per partition concurrency")
timeAgo = flag.Duration("timeAgo", 1*time.Hour, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
clientId = flag.Uint("client_id", uint(util.RandomInt32()), "client id")
)
@ -65,7 +66,7 @@ func main() {
contentConfig := &sub_client.ContentConfiguration{
Topic: topic.NewTopic(*namespace, *t),
Filter: "",
StartTime: time.Unix(1, 1),
StartTime: time.Now().Add(-*timeAgo),
}
brokers := strings.Split(*seedBrokers, ",")
@ -75,9 +76,13 @@ func main() {
subscriber.SetEachMessageFunc(func(key, value []byte) error {
counter++
record := &schema_pb.RecordValue{}
proto.Unmarshal(value, record)
fmt.Printf("record: %v\n", record)
time.Sleep(1300 * time.Millisecond)
err := proto.Unmarshal(value, record)
if err != nil {
fmt.Printf("unmarshal record value: %v\n", err)
} else {
fmt.Printf("%s %d: %v\n", string(key), len(value), record)
}
//time.Sleep(1300 * time.Millisecond)
return nil
})

View File

@ -8,6 +8,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
"io"
"reflect"
)
func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssignment, stopCh chan struct{}) error {
@ -24,6 +25,11 @@ func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssig
perPartitionConcurrency = 1
}
var stopTsNs int64
if !sub.ContentConfig.StopTime.IsZero() {
stopTsNs = sub.ContentConfig.StopTime.UnixNano()
}
if err = subscribeClient.Send(&mq_pb.SubscribeMessageRequest{
Message: &mq_pb.SubscribeMessageRequest_Init{
Init: &mq_pb.SubscribeMessageRequest_InitMessage{
@ -32,6 +38,8 @@ func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssig
Topic: sub.ContentConfig.Topic.ToPbTopic(),
PartitionOffset: &mq_pb.PartitionOffset{
Partition: assigned.Partition,
StartTsNs: sub.ContentConfig.StartTime.UnixNano(),
StopTsNs: stopTsNs,
StartType: mq_pb.PartitionOffsetStartType_EARLIEST_IN_MEMORY,
},
Filter: sub.ContentConfig.Filter,
@ -101,6 +109,10 @@ func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssig
glog.V(2).Infof("subscriber %s received control from producer:%s isClose:%v", sub.SubscriberConfig.ConsumerGroup, m.Data.Ctrl.PublisherName, m.Data.Ctrl.IsClose)
continue
}
if len(m.Data.Key) == 0 {
fmt.Printf("empty key %+v, type %v\n", m, reflect.TypeOf(m))
continue
}
executors.Execute(func() {
processErr := sub.OnEachMessageFunc(m.Data.Key, m.Data.Value)
if processErr == nil {

View File

@ -21,6 +21,7 @@ type ContentConfiguration struct {
Topic topic.Topic
Filter string
StartTime time.Time
StopTime time.Time
}
type OnEachMessageFunc func(key, value []byte) (err error)

View File

@ -0,0 +1,454 @@
package logstore
import (
"encoding/binary"
"fmt"
"github.com/parquet-go/parquet-go"
"github.com/parquet-go/parquet-go/compress/zstd"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/mq/schema"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
"github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
"google.golang.org/protobuf/proto"
"io"
"os"
"strings"
"time"
)
const (
SW_COLUMN_NAME_TS = "_ts_ns"
SW_COLUMN_NAME_KEY = "_key"
)
func CompactTopicPartitions(filerClient filer_pb.FilerClient, t topic.Topic, timeAgo time.Duration, recordType *schema_pb.RecordType, preference *operation.StoragePreference) error {
// list the topic partition versions
topicVersions, err := collectTopicVersions(filerClient, t, timeAgo)
if err != nil {
return fmt.Errorf("list topic files: %v", err)
}
// compact the partitions
for _, topicVersion := range topicVersions {
partitions, err := collectTopicVersionsPartitions(filerClient, t, topicVersion)
if err != nil {
return fmt.Errorf("list partitions %s/%s/%s: %v", t.Namespace, t.Name, topicVersion, err)
}
for _, partition := range partitions {
err := compactTopicPartition(filerClient, t, timeAgo, recordType, partition, preference)
if err != nil {
return fmt.Errorf("compact partition %s/%s/%s/%s: %v", t.Namespace, t.Name, topicVersion, partition, err)
}
}
}
return nil
}
func collectTopicVersions(filerClient filer_pb.FilerClient, t topic.Topic, timeAgo time.Duration) (partitionVersions []time.Time, err error) {
err = filer_pb.ReadDirAllEntries(filerClient, util.FullPath(t.Dir()), "", func(entry *filer_pb.Entry, isLast bool) error {
t, err := topic.ParseTopicVersion(entry.Name)
if err != nil {
// skip non-partition directories
return nil
}
if t.Unix() < time.Now().Unix()-int64(timeAgo/time.Second) {
partitionVersions = append(partitionVersions, t)
}
return nil
})
return
}
func collectTopicVersionsPartitions(filerClient filer_pb.FilerClient, t topic.Topic, topicVersion time.Time) (partitions []topic.Partition, err error) {
version := topicVersion.Format(topic.PartitionGenerationFormat)
err = filer_pb.ReadDirAllEntries(filerClient, util.FullPath(t.Dir()).Child(version), "", func(entry *filer_pb.Entry, isLast bool) error {
if !entry.IsDirectory {
return nil
}
start, stop := topic.ParsePartitionBoundary(entry.Name)
if start != stop {
partitions = append(partitions, topic.Partition{
RangeStart: start,
RangeStop: stop,
RingSize: topic.PartitionCount,
UnixTimeNs: topicVersion.UnixNano(),
})
}
return nil
})
return
}
func compactTopicPartition(filerClient filer_pb.FilerClient, t topic.Topic, timeAgo time.Duration, recordType *schema_pb.RecordType, partition topic.Partition, preference *operation.StoragePreference) error {
partitionDir := topic.PartitionDir(t, partition)
// compact the partition directory
return compactTopicPartitionDir(filerClient, t.Name, partitionDir, timeAgo, recordType, preference)
}
func compactTopicPartitionDir(filerClient filer_pb.FilerClient, topicName, partitionDir string, timeAgo time.Duration, recordType *schema_pb.RecordType, preference *operation.StoragePreference) error {
// read all existing parquet files
minTsNs, maxTsNs, err := readAllParquetFiles(filerClient, partitionDir)
if err != nil {
return err
}
// read all log files
logFiles, err := readAllLogFiles(filerClient, partitionDir, timeAgo, minTsNs, maxTsNs)
if err != nil {
return err
}
if len(logFiles) == 0 {
return nil
}
// divide log files into groups of 128MB
logFileGroups := groupFilesBySize(logFiles, 128*1024*1024)
// write to parquet file
parquetLevels, err := schema.ToParquetLevels(recordType)
if err != nil {
return fmt.Errorf("ToParquetLevels failed %+v: %v", recordType, err)
}
// create a parquet schema
parquetSchema, err := schema.ToParquetSchema(topicName, recordType)
if err != nil {
return fmt.Errorf("ToParquetSchema failed: %v", err)
}
// TODO parallelize the writing
for _, logFileGroup := range logFileGroups {
if err = writeLogFilesToParquet(filerClient, partitionDir, recordType, logFileGroup, parquetSchema, parquetLevels, preference); err != nil {
return err
}
}
return nil
}
func groupFilesBySize(logFiles []*filer_pb.Entry, maxGroupSize int64) (logFileGroups [][]*filer_pb.Entry) {
var logFileGroup []*filer_pb.Entry
var groupSize int64
for _, logFile := range logFiles {
if groupSize+int64(logFile.Attributes.FileSize) > maxGroupSize {
logFileGroups = append(logFileGroups, logFileGroup)
logFileGroup = nil
groupSize = 0
}
logFileGroup = append(logFileGroup, logFile)
groupSize += int64(logFile.Attributes.FileSize)
}
if len(logFileGroup) > 0 {
logFileGroups = append(logFileGroups, logFileGroup)
}
return
}
func readAllLogFiles(filerClient filer_pb.FilerClient, partitionDir string, timeAgo time.Duration, minTsNs, maxTsNs int64) (logFiles []*filer_pb.Entry, err error) {
err = filer_pb.ReadDirAllEntries(filerClient, util.FullPath(partitionDir), "", func(entry *filer_pb.Entry, isLast bool) error {
if strings.HasSuffix(entry.Name, ".parquet") {
return nil
}
if entry.Attributes.Crtime > time.Now().Unix()-int64(timeAgo/time.Second) {
return nil
}
logTime, err := time.Parse(topic.TIME_FORMAT, entry.Name)
if err != nil {
// glog.Warningf("parse log time %s: %v", entry.Name, err)
return nil
}
if maxTsNs > 0 && logTime.UnixNano() <= maxTsNs {
return nil
}
logFiles = append(logFiles, entry)
return nil
})
return
}
func readAllParquetFiles(filerClient filer_pb.FilerClient, partitionDir string) (minTsNs, maxTsNs int64, err error) {
err = filer_pb.ReadDirAllEntries(filerClient, util.FullPath(partitionDir), "", func(entry *filer_pb.Entry, isLast bool) error {
if !strings.HasSuffix(entry.Name, ".parquet") {
return nil
}
if len(entry.Extended) == 0 {
return nil
}
// read min ts
minTsBytes := entry.Extended["min"]
if len(minTsBytes) != 8 {
return nil
}
minTs := int64(binary.BigEndian.Uint64(minTsBytes))
if minTsNs == 0 || minTs < minTsNs {
minTsNs = minTs
}
// read max ts
maxTsBytes := entry.Extended["max"]
if len(maxTsBytes) != 8 {
return nil
}
maxTs := int64(binary.BigEndian.Uint64(maxTsBytes))
if maxTsNs == 0 || maxTs > maxTsNs {
maxTsNs = maxTs
}
return nil
})
return
}
func writeLogFilesToParquet(filerClient filer_pb.FilerClient, partitionDir string, recordType *schema_pb.RecordType, logFileGroups []*filer_pb.Entry, parquetSchema *parquet.Schema, parquetLevels *schema.ParquetLevels, preference *operation.StoragePreference) (err error) {
tempFile, err := os.CreateTemp(".", "t*.parquet")
if err != nil {
return fmt.Errorf("create temp file: %v", err)
}
defer func() {
tempFile.Close()
os.Remove(tempFile.Name())
}()
writer := parquet.NewWriter(tempFile, parquetSchema, parquet.Compression(&zstd.Codec{Level: zstd.DefaultLevel}))
rowBuilder := parquet.NewRowBuilder(parquetSchema)
var startTsNs, stopTsNs int64
for _, logFile := range logFileGroups {
fmt.Printf("compact %s/%s ", partitionDir, logFile.Name)
var rows []parquet.Row
if err := iterateLogEntries(filerClient, logFile, func(entry *filer_pb.LogEntry) error {
if startTsNs == 0 {
startTsNs = entry.TsNs
}
stopTsNs = entry.TsNs
if len(entry.Key) == 0 {
return nil
}
// write to parquet file
rowBuilder.Reset()
record := &schema_pb.RecordValue{}
if err := proto.Unmarshal(entry.Data, record); err != nil {
return fmt.Errorf("unmarshal record value: %v", err)
}
record.Fields[SW_COLUMN_NAME_TS] = &schema_pb.Value{
Kind: &schema_pb.Value_Int64Value{
Int64Value: entry.TsNs,
},
}
record.Fields[SW_COLUMN_NAME_KEY] = &schema_pb.Value{
Kind: &schema_pb.Value_BytesValue{
BytesValue: entry.Key,
},
}
if err := schema.AddRecordValue(rowBuilder, recordType, parquetLevels, record); err != nil {
return fmt.Errorf("add record value: %v", err)
}
rows = append(rows, rowBuilder.Row())
return nil
}); err != nil {
return fmt.Errorf("iterate log entry %v/%v: %v", partitionDir, logFile.Name, err)
}
fmt.Printf("processed %d rows\n", len(rows))
if _, err := writer.WriteRows(rows); err != nil {
return fmt.Errorf("write rows: %v", err)
}
}
if err := writer.Close(); err != nil {
return fmt.Errorf("close writer: %v", err)
}
// write to parquet file to partitionDir
parquetFileName := fmt.Sprintf("%s.parquet", time.Unix(0, startTsNs).UTC().Format("2006-01-02-15-04-05"))
if err := saveParquetFileToPartitionDir(filerClient, tempFile, partitionDir, parquetFileName, preference, startTsNs, stopTsNs); err != nil {
return fmt.Errorf("save parquet file %s: %v", parquetFileName, err)
}
return nil
}
func saveParquetFileToPartitionDir(filerClient filer_pb.FilerClient, sourceFile *os.File, partitionDir, parquetFileName string, preference *operation.StoragePreference, startTsNs, stopTsNs int64) error {
uploader, err := operation.NewUploader()
if err != nil {
return fmt.Errorf("new uploader: %v", err)
}
// get file size
fileInfo, err := sourceFile.Stat()
if err != nil {
return fmt.Errorf("stat source file: %v", err)
}
// upload file in chunks
chunkSize := int64(4 * 1024 * 1024)
chunkCount := (fileInfo.Size() + chunkSize - 1) / chunkSize
entry := &filer_pb.Entry{
Name: parquetFileName,
Attributes: &filer_pb.FuseAttributes{
Crtime: time.Now().Unix(),
Mtime: time.Now().Unix(),
FileMode: uint32(os.FileMode(0644)),
FileSize: uint64(fileInfo.Size()),
Mime: "application/vnd.apache.parquet",
},
}
entry.Extended = make(map[string][]byte)
minTsBytes := make([]byte, 8)
binary.BigEndian.PutUint64(minTsBytes, uint64(startTsNs))
entry.Extended["min"] = minTsBytes
maxTsBytes := make([]byte, 8)
binary.BigEndian.PutUint64(maxTsBytes, uint64(stopTsNs))
entry.Extended["max"] = maxTsBytes
for i := int64(0); i < chunkCount; i++ {
fileId, uploadResult, err, _ := uploader.UploadWithRetry(
filerClient,
&filer_pb.AssignVolumeRequest{
Count: 1,
Replication: preference.Replication,
Collection: preference.Collection,
TtlSec: 0, // TODO set ttl
DiskType: preference.DiskType,
Path: partitionDir + "/" + parquetFileName,
},
&operation.UploadOption{
Filename: parquetFileName,
Cipher: false,
IsInputCompressed: false,
MimeType: "application/vnd.apache.parquet",
PairMap: nil,
},
func(host, fileId string) string {
return fmt.Sprintf("http://%s/%s", host, fileId)
},
io.NewSectionReader(sourceFile, i*chunkSize, chunkSize),
)
if err != nil {
return fmt.Errorf("upload chunk %d: %v", i, err)
}
if uploadResult.Error != "" {
return fmt.Errorf("upload result: %v", uploadResult.Error)
}
entry.Chunks = append(entry.Chunks, uploadResult.ToPbFileChunk(fileId, i*chunkSize, time.Now().UnixNano()))
}
// write the entry to partitionDir
if err := filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
return filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{
Directory: partitionDir,
Entry: entry,
})
}); err != nil {
return fmt.Errorf("create entry: %v", err)
}
fmt.Printf("saved to %s/%s\n", partitionDir, parquetFileName)
return nil
}
func iterateLogEntries(filerClient filer_pb.FilerClient, logFile *filer_pb.Entry, eachLogEntryFn func(entry *filer_pb.LogEntry) error) error {
lookupFn := filer.LookupFn(filerClient)
_, err := eachFile(logFile, lookupFn, func(logEntry *filer_pb.LogEntry) (isDone bool, err error) {
if err := eachLogEntryFn(logEntry); err != nil {
return true, err
}
return false, nil
})
return err
}
func eachFile(entry *filer_pb.Entry, lookupFileIdFn func(fileId string) (targetUrls []string, err error), eachLogEntryFn log_buffer.EachLogEntryFuncType) (processedTsNs int64, err error) {
if len(entry.Content) > 0 {
// skip .offset files
return
}
var urlStrings []string
for _, chunk := range entry.Chunks {
if chunk.Size == 0 {
continue
}
if chunk.IsChunkManifest {
fmt.Printf("this should not happen. unexpected chunk manifest in %s", entry.Name)
return
}
urlStrings, err = lookupFileIdFn(chunk.FileId)
if err != nil {
err = fmt.Errorf("lookup %s: %v", chunk.FileId, err)
return
}
if len(urlStrings) == 0 {
err = fmt.Errorf("no url found for %s", chunk.FileId)
return
}
// try one of the urlString until util.Get(urlString) succeeds
var processed bool
for _, urlString := range urlStrings {
var data []byte
if data, _, err = util_http.Get(urlString); err == nil {
processed = true
if processedTsNs, err = eachChunk(data, eachLogEntryFn); err != nil {
return
}
break
}
}
if !processed {
err = fmt.Errorf("no data processed for %s %s", entry.Name, chunk.FileId)
return
}
}
return
}
func eachChunk(buf []byte, eachLogEntryFn log_buffer.EachLogEntryFuncType) (processedTsNs int64, err error) {
for pos := 0; pos+4 < len(buf); {
size := util.BytesToUint32(buf[pos : pos+4])
if pos+4+int(size) > len(buf) {
err = fmt.Errorf("reach each log chunk: read [%d,%d) from [0,%d)", pos, pos+int(size)+4, len(buf))
return
}
entryData := buf[pos+4 : pos+4+int(size)]
logEntry := &filer_pb.LogEntry{}
if err = proto.Unmarshal(entryData, logEntry); err != nil {
pos += 4 + int(size)
err = fmt.Errorf("unexpected unmarshal mq_pb.Message: %v", err)
return
}
if _, err = eachLogEntryFn(logEntry); err != nil {
err = fmt.Errorf("process log entry %v: %v", logEntry, err)
return
}
processedTsNs = logEntry.TsNs
pos += 4 + int(size)
}
return
}

View File

@ -0,0 +1,41 @@
package logstore
import (
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
)
func GenMergedReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p topic.Partition) log_buffer.LogReadFromDiskFuncType {
fromParquetFn := GenParquetReadFunc(filerClient, t, p)
readLogDirectFn := GenLogOnDiskReadFunc(filerClient, t, p)
return mergeReadFuncs(fromParquetFn, readLogDirectFn)
}
func mergeReadFuncs(fromParquetFn, readLogDirectFn log_buffer.LogReadFromDiskFuncType) log_buffer.LogReadFromDiskFuncType {
var exhaustedParquet bool
var lastProcessedPosition log_buffer.MessagePosition
return func(startPosition log_buffer.MessagePosition, stopTsNs int64, eachLogEntryFn log_buffer.EachLogEntryFuncType) (lastReadPosition log_buffer.MessagePosition, isDone bool, err error) {
if !exhaustedParquet {
// glog.V(4).Infof("reading from parquet startPosition: %v\n", startPosition.UTC())
lastReadPosition, isDone, err = fromParquetFn(startPosition, stopTsNs, eachLogEntryFn)
// glog.V(4).Infof("read from parquet: %v %v %v %v\n", startPosition, lastReadPosition, isDone, err)
if isDone {
isDone = false
}
if err != nil {
return
}
lastProcessedPosition = lastReadPosition
}
exhaustedParquet = true
if startPosition.Before(lastProcessedPosition.Time) {
startPosition = lastProcessedPosition
}
// glog.V(4).Infof("reading from direct log startPosition: %v\n", startPosition.UTC())
lastReadPosition, isDone, err = readLogDirectFn(startPosition, stopTsNs, eachLogEntryFn)
return
}
}

View File

@ -0,0 +1,144 @@
package logstore
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
"github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
"google.golang.org/protobuf/proto"
"math"
"strings"
"time"
)
func GenLogOnDiskReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p topic.Partition) log_buffer.LogReadFromDiskFuncType {
partitionDir := topic.PartitionDir(t, p)
lookupFileIdFn := filer.LookupFn(filerClient)
eachChunkFn := func(buf []byte, eachLogEntryFn log_buffer.EachLogEntryFuncType, starTsNs, stopTsNs int64) (processedTsNs int64, err error) {
for pos := 0; pos+4 < len(buf); {
size := util.BytesToUint32(buf[pos : pos+4])
if pos+4+int(size) > len(buf) {
err = fmt.Errorf("GenLogOnDiskReadFunc: read [%d,%d) from [0,%d)", pos, pos+int(size)+4, len(buf))
return
}
entryData := buf[pos+4 : pos+4+int(size)]
logEntry := &filer_pb.LogEntry{}
if err = proto.Unmarshal(entryData, logEntry); err != nil {
pos += 4 + int(size)
err = fmt.Errorf("unexpected unmarshal mq_pb.Message: %v", err)
return
}
if logEntry.TsNs < starTsNs {
pos += 4 + int(size)
continue
}
if stopTsNs != 0 && logEntry.TsNs > stopTsNs {
println("stopTsNs", stopTsNs, "logEntry.TsNs", logEntry.TsNs)
return
}
// fmt.Printf(" read logEntry: %v, ts %v\n", string(logEntry.Key), time.Unix(0, logEntry.TsNs).UTC())
if _, err = eachLogEntryFn(logEntry); err != nil {
err = fmt.Errorf("process log entry %v: %v", logEntry, err)
return
}
processedTsNs = logEntry.TsNs
pos += 4 + int(size)
}
return
}
eachFileFn := func(entry *filer_pb.Entry, eachLogEntryFn log_buffer.EachLogEntryFuncType, starTsNs, stopTsNs int64) (processedTsNs int64, err error) {
if len(entry.Content) > 0 {
// skip .offset files
return
}
var urlStrings []string
for _, chunk := range entry.Chunks {
if chunk.Size == 0 {
continue
}
if chunk.IsChunkManifest {
glog.Warningf("this should not happen. unexpected chunk manifest in %s/%s", partitionDir, entry.Name)
return
}
urlStrings, err = lookupFileIdFn(chunk.FileId)
if err != nil {
err = fmt.Errorf("lookup %s: %v", chunk.FileId, err)
return
}
if len(urlStrings) == 0 {
err = fmt.Errorf("no url found for %s", chunk.FileId)
return
}
// try one of the urlString until util.Get(urlString) succeeds
var processed bool
for _, urlString := range urlStrings {
// TODO optimization opportunity: reuse the buffer
var data []byte
// fmt.Printf("reading %s/%s %s\n", partitionDir, entry.Name, urlString)
if data, _, err = util_http.Get(urlString); err == nil {
processed = true
if processedTsNs, err = eachChunkFn(data, eachLogEntryFn, starTsNs, stopTsNs); err != nil {
return
}
break
}
}
if !processed {
err = fmt.Errorf("no data processed for %s %s", entry.Name, chunk.FileId)
return
}
}
return
}
return func(startPosition log_buffer.MessagePosition, stopTsNs int64, eachLogEntryFn log_buffer.EachLogEntryFuncType) (lastReadPosition log_buffer.MessagePosition, isDone bool, err error) {
startFileName := startPosition.UTC().Format(topic.TIME_FORMAT)
startTsNs := startPosition.Time.UnixNano()
stopTime := time.Unix(0, stopTsNs)
var processedTsNs int64
err = filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
return filer_pb.SeaweedList(client, partitionDir, "", func(entry *filer_pb.Entry, isLast bool) error {
if entry.IsDirectory {
return nil
}
if strings.HasSuffix(entry.Name, ".parquet") {
return nil
}
// FIXME: this is a hack to skip the .offset files
if strings.HasSuffix(entry.Name, ".offset") {
return nil
}
if stopTsNs != 0 && entry.Name > stopTime.UTC().Format(topic.TIME_FORMAT) {
isDone = true
return nil
}
if entry.Name < startPosition.UTC().Format(topic.TIME_FORMAT) {
return nil
}
if processedTsNs, err = eachFileFn(entry, eachLogEntryFn, startTsNs, stopTsNs); err != nil {
return err
}
return nil
}, startFileName, true, math.MaxInt32)
})
lastReadPosition = log_buffer.NewMessagePosition(processedTsNs, -2)
return
}
}

View File

@ -0,0 +1,162 @@
package logstore
import (
"encoding/binary"
"fmt"
"github.com/parquet-go/parquet-go"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/mq/schema"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
"github.com/seaweedfs/seaweedfs/weed/util/chunk_cache"
"github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
"google.golang.org/protobuf/proto"
"io"
"math"
"strings"
)
var (
chunkCache = chunk_cache.NewChunkCacheInMemory(256) // 256 entries, 8MB max per entry
)
func GenParquetReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p topic.Partition) log_buffer.LogReadFromDiskFuncType {
partitionDir := topic.PartitionDir(t, p)
lookupFileIdFn := filer.LookupFn(filerClient)
// read topic conf from filer
var topicConf *mq_pb.ConfigureTopicResponse
var err error
if err := filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
topicConf, err = t.ReadConfFile(client)
return err
}); err != nil {
return nil
}
recordType := topicConf.GetRecordType()
recordType = schema.NewRecordTypeBuilder(recordType).
WithField(SW_COLUMN_NAME_TS, schema.TypeInt64).
WithField(SW_COLUMN_NAME_KEY, schema.TypeBytes).
RecordTypeEnd()
parquetSchema, err := schema.ToParquetSchema(t.Name, recordType)
if err != nil {
return nil
}
parquetLevels, err := schema.ToParquetLevels(recordType)
if err != nil {
return nil
}
// eachFileFn reads a parquet file and calls eachLogEntryFn for each log entry
eachFileFn := func(entry *filer_pb.Entry, eachLogEntryFn log_buffer.EachLogEntryFuncType, starTsNs, stopTsNs int64) (processedTsNs int64, err error) {
// create readerAt for the parquet file
fileSize := filer.FileSize(entry)
visibleIntervals, _ := filer.NonOverlappingVisibleIntervals(lookupFileIdFn, entry.Chunks, 0, int64(fileSize))
chunkViews := filer.ViewFromVisibleIntervals(visibleIntervals, 0, int64(fileSize))
readerCache := filer.NewReaderCache(32, chunkCache, lookupFileIdFn)
readerAt := filer.NewChunkReaderAtFromClient(readerCache, chunkViews, int64(fileSize))
// create parquet reader
parquetReader := parquet.NewReader(readerAt, parquetSchema)
rows := make([]parquet.Row, 128)
for {
rowCount, readErr := parquetReader.ReadRows(rows)
for i := 0; i < rowCount; i++ {
row := rows[i]
// convert parquet row to schema_pb.RecordValue
recordValue, err := schema.ToRecordValue(recordType, parquetLevels, row)
if err != nil {
return processedTsNs, fmt.Errorf("ToRecordValue failed: %v", err)
}
processedTsNs = recordValue.Fields[SW_COLUMN_NAME_TS].GetInt64Value()
if processedTsNs < starTsNs {
continue
}
if stopTsNs != 0 && processedTsNs >= stopTsNs {
return processedTsNs, nil
}
data, marshalErr := proto.Marshal(recordValue)
if marshalErr != nil {
return processedTsNs, fmt.Errorf("marshal record value: %v", marshalErr)
}
logEntry := &filer_pb.LogEntry{
Key: recordValue.Fields[SW_COLUMN_NAME_KEY].GetBytesValue(),
TsNs: processedTsNs,
Data: data,
}
// fmt.Printf(" parquet entry %s ts %v\n", string(logEntry.Key), time.Unix(0, logEntry.TsNs).UTC())
if _, err = eachLogEntryFn(logEntry); err != nil {
return processedTsNs, fmt.Errorf("process log entry %v: %v", logEntry, err)
}
}
if readErr != nil {
if readErr == io.EOF {
return processedTsNs, nil
}
return processedTsNs, readErr
}
}
return
}
return func(startPosition log_buffer.MessagePosition, stopTsNs int64, eachLogEntryFn log_buffer.EachLogEntryFuncType) (lastReadPosition log_buffer.MessagePosition, isDone bool, err error) {
startFileName := startPosition.UTC().Format(topic.TIME_FORMAT)
startTsNs := startPosition.Time.UnixNano()
var processedTsNs int64
err = filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
return filer_pb.SeaweedList(client, partitionDir, "", func(entry *filer_pb.Entry, isLast bool) error {
if entry.IsDirectory {
return nil
}
if !strings.HasSuffix(entry.Name, ".parquet") {
return nil
}
if len(entry.Extended) == 0 {
return nil
}
// read minTs from the parquet file
minTsBytes := entry.Extended["min"]
if len(minTsBytes) != 8 {
return nil
}
minTsNs := int64(binary.BigEndian.Uint64(minTsBytes))
// read max ts
maxTsBytes := entry.Extended["max"]
if len(maxTsBytes) != 8 {
return nil
}
maxTsNs := int64(binary.BigEndian.Uint64(maxTsBytes))
if stopTsNs != 0 && stopTsNs <= minTsNs {
isDone = true
return nil
}
if maxTsNs < startTsNs {
return nil
}
if processedTsNs, err = eachFileFn(entry, eachLogEntryFn, startTsNs, stopTsNs); err != nil {
return err
}
return nil
}, startFileName, true, math.MaxInt32)
})
lastReadPosition = log_buffer.NewMessagePosition(processedTsNs, -2)
return
}
}

View File

@ -53,7 +53,7 @@ func (bs *BrokerStats) UpdateStats(stats *mq_pb.BrokerStats) {
}
publisherCount += topicPartitionStats.PublisherCount
subscriberCount += topicPartitionStats.SubscriberCount
key := tps.TopicPartition.String()
key := tps.TopicPartition.TopicPartitionId()
bs.TopicPartitionStats.Set(key, tps)
delete(currentTopicPartitions, key)
}
@ -79,7 +79,7 @@ func (bs *BrokerStats) RegisterAssignment(t *mq_pb.Topic, partition *mq_pb.Parti
PublisherCount: 0,
SubscriberCount: 0,
}
key := tps.TopicPartition.String()
key := tps.TopicPartition.TopicPartitionId()
if isAdd {
bs.TopicPartitionStats.SetIfAbsent(key, tps)
} else {

View File

@ -24,3 +24,30 @@ func (s *Schema) GetField(name string) (*schema_pb.Field, bool) {
field, ok := s.fieldMap[name]
return field, ok
}
func TypeToString(t *schema_pb.Type) string {
switch t.Kind.(type) {
case *schema_pb.Type_ScalarType:
switch t.GetScalarType() {
case schema_pb.ScalarType_BOOL:
return "bool"
case schema_pb.ScalarType_INT32:
return "int32"
case schema_pb.ScalarType_INT64:
return "int64"
case schema_pb.ScalarType_FLOAT:
return "float"
case schema_pb.ScalarType_DOUBLE:
return "double"
case schema_pb.ScalarType_BYTES:
return "bytes"
case schema_pb.ScalarType_STRING:
return "string"
}
case *schema_pb.Type_ListType:
return "list"
case *schema_pb.Type_RecordType:
return "record"
}
return "unknown"
}

View File

@ -19,10 +19,12 @@ type RecordTypeBuilder struct {
recordType *schema_pb.RecordType
}
// RecordTypeBegin creates a new RecordTypeBuilder, it should be followed by a series of WithField methods and RecordTypeEnd
func RecordTypeBegin() *RecordTypeBuilder {
return &RecordTypeBuilder{recordType: &schema_pb.RecordType{}}
}
// RecordTypeEnd finishes the building of a RecordValue
func (rtb *RecordTypeBuilder) RecordTypeEnd() *schema_pb.RecordType {
// be consistent with parquet.node.go `func (g Group) Fields() []Field`
sort.Slice(rtb.recordType.Fields, func(i, j int) bool {
@ -31,6 +33,11 @@ func (rtb *RecordTypeBuilder) RecordTypeEnd() *schema_pb.RecordType {
return rtb.recordType
}
// NewRecordTypeBuilder creates a new RecordTypeBuilder from an existing RecordType, it should be followed by a series of WithField methods and RecordTypeEnd
func NewRecordTypeBuilder(recordType *schema_pb.RecordType) (rtb *RecordTypeBuilder) {
return &RecordTypeBuilder{recordType: recordType}
}
func (rtb *RecordTypeBuilder) WithField(name string, scalarType *schema_pb.Type) *RecordTypeBuilder {
rtb.recordType.Fields = append(rtb.recordType.Fields, &schema_pb.Field{
Name: name,

View File

@ -11,13 +11,6 @@ type PartitionConsumerMapping struct {
prevMappings []*PartitionSlotToConsumerInstanceList
}
func NewPartitionConsumerMapping(ringSize int32) *PartitionConsumerMapping {
newVersion := time.Now().UnixNano()
return &PartitionConsumerMapping{
currentMapping: NewPartitionSlotToConsumerInstanceList(ringSize, newVersion),
}
}
// Balance goal:
// 1. max processing power utilization
// 2. allow one consumer instance to be down unexpectedly
@ -27,8 +20,7 @@ func (pcm *PartitionConsumerMapping) BalanceToConsumerInstances(partitionSlotToB
if len(partitionSlotToBrokerList.PartitionSlots) == 0 || len(consumerInstances) == 0 {
return
}
newVersion := time.Now().UnixNano()
newMapping := NewPartitionSlotToConsumerInstanceList(partitionSlotToBrokerList.RingSize, newVersion)
newMapping := NewPartitionSlotToConsumerInstanceList(partitionSlotToBrokerList.RingSize, time.Now())
var prevMapping *PartitionSlotToConsumerInstanceList
if len(pcm.prevMappings) > 0 {
prevMapping = pcm.prevMappings[len(pcm.prevMappings)-1]

View File

@ -1,6 +1,6 @@
package sub_coordinator
import "github.com/seaweedfs/seaweedfs/weed/mq/topic"
import "time"
type PartitionSlotToConsumerInstance struct {
RangeStart int32
@ -17,17 +17,9 @@ type PartitionSlotToConsumerInstanceList struct {
Version int64
}
func NewPartitionSlotToConsumerInstanceList(ringSize int32, version int64) *PartitionSlotToConsumerInstanceList {
func NewPartitionSlotToConsumerInstanceList(ringSize int32, version time.Time) *PartitionSlotToConsumerInstanceList {
return &PartitionSlotToConsumerInstanceList{
RingSize: ringSize,
Version: version,
Version: version.UnixNano(),
}
}
func ToPartitions(ringSize int32, slots []*PartitionSlotToConsumerInstance) []*topic.Partition {
partitions := make([]*topic.Partition, 0, len(slots))
for _, slot := range slots {
partitions = append(partitions, topic.NewPartition(slot.RangeStart, slot.RangeStop, ringSize, slot.UnixTimeNs))
}
return partitions
}

View File

@ -88,7 +88,7 @@ func (manager *LocalTopicManager) CollectStats(duration time.Duration) *mq_pb.Br
Topic: Topic{Namespace: localTopic.Namespace, Name: localTopic.Name},
Partition: localPartition.Partition,
}
stats.Stats[topicPartition.String()] = &mq_pb.TopicPartitionStats{
stats.Stats[topicPartition.TopicPartitionId()] = &mq_pb.TopicPartitionStats{
Topic: &mq_pb.Topic{
Namespace: string(localTopic.Namespace),
Name: localTopic.Name,

View File

@ -34,6 +34,7 @@ type LocalPartition struct {
}
var TIME_FORMAT = "2006-01-02-15-04-05"
var PartitionGenerationFormat = "v2006-01-02-15-04-05"
func NewLocalPartition(partition Partition, logFlushFn log_buffer.LogFlushFuncType, readFromDiskFn log_buffer.LogReadFromDiskFuncType) *LocalPartition {
lp := &LocalPartition{

View File

@ -1,6 +1,10 @@
package topic
import "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
"time"
)
const PartitionCount = 4096
@ -81,3 +85,24 @@ func (partition Partition) Overlaps(partition2 Partition) bool {
}
return true
}
func (partition Partition) String() string {
return fmt.Sprintf("%04d-%04d", partition.RangeStart, partition.RangeStop)
}
func ParseTopicVersion(name string) (t time.Time, err error) {
return time.Parse(PartitionGenerationFormat, name)
}
func ParsePartitionBoundary(name string) (start, stop int32) {
_, err := fmt.Sscanf(name, "%04d-%04d", &start, &stop)
if err != nil {
return 0, 0
}
return start, stop
}
func PartitionDir(t Topic, p Partition) string {
partitionGeneration := time.Unix(0, p.UnixTimeNs).UTC().Format(PartitionGenerationFormat)
return fmt.Sprintf("%s/%s/%04d-%04d", t.Dir(), partitionGeneration, p.RangeStart, p.RangeStop)
}

View File

@ -1,8 +1,13 @@
package topic
import (
"bytes"
"errors"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
jsonpb "google.golang.org/protobuf/encoding/protojson"
)
type Topic struct {
@ -23,13 +28,42 @@ func FromPbTopic(topic *mq_pb.Topic) Topic {
}
}
func (tp Topic) ToPbTopic() *mq_pb.Topic {
func (t Topic) ToPbTopic() *mq_pb.Topic {
return &mq_pb.Topic{
Namespace: tp.Namespace,
Name: tp.Name,
Namespace: t.Namespace,
Name: t.Name,
}
}
func (tp Topic) String() string {
return fmt.Sprintf("%s.%s", tp.Namespace, tp.Name)
func (t Topic) String() string {
return fmt.Sprintf("%s.%s", t.Namespace, t.Name)
}
func (t Topic) Dir() string {
return fmt.Sprintf("%s/%s/%s", filer.TopicsDir, t.Namespace, t.Name)
}
func (t Topic) ReadConfFile(client filer_pb.SeaweedFilerClient) (*mq_pb.ConfigureTopicResponse, error) {
data, err := filer.ReadInsideFiler(client, t.Dir(), filer.TopicConfFile)
if errors.Is(err, filer_pb.ErrNotFound) {
return nil, err
}
if err != nil {
return nil, fmt.Errorf("read topic.conf of %v: %v", t, err)
}
// parse into filer conf object
conf := &mq_pb.ConfigureTopicResponse{}
if err = jsonpb.Unmarshal(data, conf); err != nil {
return nil, fmt.Errorf("unmarshal topic %v conf: %v", t, err)
}
return conf, nil
}
func (t Topic) WriteConfFile(client filer_pb.SeaweedFilerClient, conf *mq_pb.ConfigureTopicResponse) error {
var buf bytes.Buffer
filer.ProtoToText(&buf, conf)
if err := filer.SaveInsideFiler(client, t.Dir(), filer.TopicConfFile, buf.Bytes()); err != nil {
return fmt.Errorf("save topic %v conf: %v", t, err)
}
return nil
}

View File

@ -7,6 +7,6 @@ type TopicPartition struct {
Partition
}
func (tp *TopicPartition) String() string {
func (tp *TopicPartition) TopicPartitionId() string {
return fmt.Sprintf("%v.%v-%04d-%04d", tp.Namespace, tp.Topic, tp.RangeStart, tp.RangeStop)
}

View File

@ -19,19 +19,15 @@ import (
)
type FilePart struct {
Reader io.Reader
FileName string
FileSize int64
MimeType string
ModTime int64 //in seconds
Replication string
Collection string
DataCenter string
Ttl string
DiskType string
Server string //this comes from assign result
Fid string //this comes from assign result, but customizable
Fsync bool
Reader io.Reader
FileName string
FileSize int64
MimeType string
ModTime int64 //in seconds
Pref StoragePreference
Server string //this comes from assign result
Fid string //this comes from assign result, but customizable
Fsync bool
}
type SubmitResult struct {
@ -42,20 +38,29 @@ type SubmitResult struct {
Error string `json:"error,omitempty"`
}
type StoragePreference struct {
Replication string
Collection string
DataCenter string
Ttl string
DiskType string
MaxMB int
}
type GetMasterFn func(ctx context.Context) pb.ServerAddress
func SubmitFiles(masterFn GetMasterFn, grpcDialOption grpc.DialOption, files []FilePart, replication string, collection string, dataCenter string, ttl string, diskType string, maxMB int, usePublicUrl bool) ([]SubmitResult, error) {
func SubmitFiles(masterFn GetMasterFn, grpcDialOption grpc.DialOption, files []*FilePart, pref StoragePreference, usePublicUrl bool) ([]SubmitResult, error) {
results := make([]SubmitResult, len(files))
for index, file := range files {
results[index].FileName = file.FileName
}
ar := &VolumeAssignRequest{
Count: uint64(len(files)),
Replication: replication,
Collection: collection,
DataCenter: dataCenter,
Ttl: ttl,
DiskType: diskType,
Replication: pref.Replication,
Collection: pref.Collection,
DataCenter: pref.DataCenter,
Ttl: pref.Ttl,
DiskType: pref.DiskType,
}
ret, err := Assign(masterFn, grpcDialOption, ar)
if err != nil {
@ -73,12 +78,8 @@ func SubmitFiles(masterFn GetMasterFn, grpcDialOption grpc.DialOption, files []F
if usePublicUrl {
file.Server = ret.PublicUrl
}
file.Replication = replication
file.Collection = collection
file.DataCenter = dataCenter
file.Ttl = ttl
file.DiskType = diskType
results[index].Size, err = file.Upload(maxMB, masterFn, usePublicUrl, ret.Auth, grpcDialOption)
file.Pref = pref
results[index].Size, err = file.Upload(pref.MaxMB, masterFn, usePublicUrl, ret.Auth, grpcDialOption)
if err != nil {
results[index].Error = err.Error()
}
@ -88,8 +89,8 @@ func SubmitFiles(masterFn GetMasterFn, grpcDialOption grpc.DialOption, files []F
return results, nil
}
func NewFileParts(fullPathFilenames []string) (ret []FilePart, err error) {
ret = make([]FilePart, len(fullPathFilenames))
func NewFileParts(fullPathFilenames []string) (ret []*FilePart, err error) {
ret = make([]*FilePart, len(fullPathFilenames))
for index, file := range fullPathFilenames {
if ret[index], err = newFilePart(file); err != nil {
return
@ -97,7 +98,8 @@ func NewFileParts(fullPathFilenames []string) (ret []FilePart, err error) {
}
return
}
func newFilePart(fullPathFilename string) (ret FilePart, err error) {
func newFilePart(fullPathFilename string) (ret *FilePart, err error) {
ret = &FilePart{}
fh, openErr := os.Open(fullPathFilename)
if openErr != nil {
glog.V(0).Info("Failed to open file: ", fullPathFilename)
@ -121,7 +123,7 @@ func newFilePart(fullPathFilename string) (ret FilePart, err error) {
return ret, nil
}
func (fi FilePart) Upload(maxMB int, masterFn GetMasterFn, usePublicUrl bool, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) {
func (fi *FilePart) Upload(maxMB int, masterFn GetMasterFn, usePublicUrl bool, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) {
fileUrl := "http://" + fi.Server + "/" + fi.Fid
if fi.ModTime != 0 {
fileUrl += "?ts=" + strconv.Itoa(int(fi.ModTime))
@ -145,13 +147,13 @@ func (fi FilePart) Upload(maxMB int, masterFn GetMasterFn, usePublicUrl bool, jw
var ret *AssignResult
var id string
if fi.DataCenter != "" {
if fi.Pref.DataCenter != "" {
ar := &VolumeAssignRequest{
Count: uint64(chunks),
Replication: fi.Replication,
Collection: fi.Collection,
Ttl: fi.Ttl,
DiskType: fi.DiskType,
Replication: fi.Pref.Replication,
Collection: fi.Pref.Collection,
Ttl: fi.Pref.Ttl,
DiskType: fi.Pref.DiskType,
}
ret, err = Assign(masterFn, grpcDialOption, ar)
if err != nil {
@ -159,13 +161,13 @@ func (fi FilePart) Upload(maxMB int, masterFn GetMasterFn, usePublicUrl bool, jw
}
}
for i := int64(0); i < chunks; i++ {
if fi.DataCenter == "" {
if fi.Pref.DataCenter == "" {
ar := &VolumeAssignRequest{
Count: 1,
Replication: fi.Replication,
Collection: fi.Collection,
Ttl: fi.Ttl,
DiskType: fi.DiskType,
Replication: fi.Pref.Replication,
Collection: fi.Pref.Collection,
Ttl: fi.Pref.Ttl,
DiskType: fi.Pref.DiskType,
}
ret, err = Assign(masterFn, grpcDialOption, ar)
if err != nil {

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
// protoc v4.25.3
// protoc-gen-go v1.34.2
// protoc v5.28.1
// source: filer.proto
package filer_pb
@ -5216,7 +5216,7 @@ func file_filer_proto_rawDescGZIP() []byte {
}
var file_filer_proto_msgTypes = make([]protoimpl.MessageInfo, 70)
var file_filer_proto_goTypes = []interface{}{
var file_filer_proto_goTypes = []any{
(*LookupDirectoryEntryRequest)(nil), // 0: filer_pb.LookupDirectoryEntryRequest
(*LookupDirectoryEntryResponse)(nil), // 1: filer_pb.LookupDirectoryEntryResponse
(*ListEntriesRequest)(nil), // 2: filer_pb.ListEntriesRequest
@ -5379,7 +5379,7 @@ func file_filer_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
file_filer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*LookupDirectoryEntryRequest); i {
case 0:
return &v.state
@ -5391,7 +5391,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*LookupDirectoryEntryResponse); i {
case 0:
return &v.state
@ -5403,7 +5403,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*ListEntriesRequest); i {
case 0:
return &v.state
@ -5415,7 +5415,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*ListEntriesResponse); i {
case 0:
return &v.state
@ -5427,7 +5427,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*RemoteEntry); i {
case 0:
return &v.state
@ -5439,7 +5439,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*Entry); i {
case 0:
return &v.state
@ -5451,7 +5451,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*FullEntry); i {
case 0:
return &v.state
@ -5463,7 +5463,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*EventNotification); i {
case 0:
return &v.state
@ -5475,7 +5475,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[8].Exporter = func(v any, i int) any {
switch v := v.(*FileChunk); i {
case 0:
return &v.state
@ -5487,7 +5487,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[9].Exporter = func(v any, i int) any {
switch v := v.(*FileChunkManifest); i {
case 0:
return &v.state
@ -5499,7 +5499,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[10].Exporter = func(v any, i int) any {
switch v := v.(*FileId); i {
case 0:
return &v.state
@ -5511,7 +5511,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[11].Exporter = func(v any, i int) any {
switch v := v.(*FuseAttributes); i {
case 0:
return &v.state
@ -5523,7 +5523,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[12].Exporter = func(v any, i int) any {
switch v := v.(*CreateEntryRequest); i {
case 0:
return &v.state
@ -5535,7 +5535,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[13].Exporter = func(v any, i int) any {
switch v := v.(*CreateEntryResponse); i {
case 0:
return &v.state
@ -5547,7 +5547,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[14].Exporter = func(v any, i int) any {
switch v := v.(*UpdateEntryRequest); i {
case 0:
return &v.state
@ -5559,7 +5559,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[15].Exporter = func(v any, i int) any {
switch v := v.(*UpdateEntryResponse); i {
case 0:
return &v.state
@ -5571,7 +5571,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[16].Exporter = func(v any, i int) any {
switch v := v.(*AppendToEntryRequest); i {
case 0:
return &v.state
@ -5583,7 +5583,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[17].Exporter = func(v any, i int) any {
switch v := v.(*AppendToEntryResponse); i {
case 0:
return &v.state
@ -5595,7 +5595,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[18].Exporter = func(v any, i int) any {
switch v := v.(*DeleteEntryRequest); i {
case 0:
return &v.state
@ -5607,7 +5607,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[19].Exporter = func(v any, i int) any {
switch v := v.(*DeleteEntryResponse); i {
case 0:
return &v.state
@ -5619,7 +5619,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[20].Exporter = func(v any, i int) any {
switch v := v.(*AtomicRenameEntryRequest); i {
case 0:
return &v.state
@ -5631,7 +5631,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[21].Exporter = func(v any, i int) any {
switch v := v.(*AtomicRenameEntryResponse); i {
case 0:
return &v.state
@ -5643,7 +5643,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[22].Exporter = func(v any, i int) any {
switch v := v.(*StreamRenameEntryRequest); i {
case 0:
return &v.state
@ -5655,7 +5655,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[23].Exporter = func(v any, i int) any {
switch v := v.(*StreamRenameEntryResponse); i {
case 0:
return &v.state
@ -5667,7 +5667,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[24].Exporter = func(v any, i int) any {
switch v := v.(*AssignVolumeRequest); i {
case 0:
return &v.state
@ -5679,7 +5679,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[25].Exporter = func(v any, i int) any {
switch v := v.(*AssignVolumeResponse); i {
case 0:
return &v.state
@ -5691,7 +5691,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[26].Exporter = func(v any, i int) any {
switch v := v.(*LookupVolumeRequest); i {
case 0:
return &v.state
@ -5703,7 +5703,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[27].Exporter = func(v any, i int) any {
switch v := v.(*Locations); i {
case 0:
return &v.state
@ -5715,7 +5715,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[28].Exporter = func(v any, i int) any {
switch v := v.(*Location); i {
case 0:
return &v.state
@ -5727,7 +5727,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[29].Exporter = func(v any, i int) any {
switch v := v.(*LookupVolumeResponse); i {
case 0:
return &v.state
@ -5739,7 +5739,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[30].Exporter = func(v any, i int) any {
switch v := v.(*Collection); i {
case 0:
return &v.state
@ -5751,7 +5751,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[31].Exporter = func(v any, i int) any {
switch v := v.(*CollectionListRequest); i {
case 0:
return &v.state
@ -5763,7 +5763,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[32].Exporter = func(v any, i int) any {
switch v := v.(*CollectionListResponse); i {
case 0:
return &v.state
@ -5775,7 +5775,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[33].Exporter = func(v any, i int) any {
switch v := v.(*DeleteCollectionRequest); i {
case 0:
return &v.state
@ -5787,7 +5787,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[34].Exporter = func(v any, i int) any {
switch v := v.(*DeleteCollectionResponse); i {
case 0:
return &v.state
@ -5799,7 +5799,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[35].Exporter = func(v any, i int) any {
switch v := v.(*StatisticsRequest); i {
case 0:
return &v.state
@ -5811,7 +5811,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[36].Exporter = func(v any, i int) any {
switch v := v.(*StatisticsResponse); i {
case 0:
return &v.state
@ -5823,7 +5823,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[37].Exporter = func(v any, i int) any {
switch v := v.(*PingRequest); i {
case 0:
return &v.state
@ -5835,7 +5835,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[38].Exporter = func(v any, i int) any {
switch v := v.(*PingResponse); i {
case 0:
return &v.state
@ -5847,7 +5847,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[39].Exporter = func(v any, i int) any {
switch v := v.(*GetFilerConfigurationRequest); i {
case 0:
return &v.state
@ -5859,7 +5859,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[40].Exporter = func(v any, i int) any {
switch v := v.(*GetFilerConfigurationResponse); i {
case 0:
return &v.state
@ -5871,7 +5871,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[41].Exporter = func(v any, i int) any {
switch v := v.(*SubscribeMetadataRequest); i {
case 0:
return &v.state
@ -5883,7 +5883,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[42].Exporter = func(v any, i int) any {
switch v := v.(*SubscribeMetadataResponse); i {
case 0:
return &v.state
@ -5895,7 +5895,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[43].Exporter = func(v any, i int) any {
switch v := v.(*TraverseBfsMetadataRequest); i {
case 0:
return &v.state
@ -5907,7 +5907,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[44].Exporter = func(v any, i int) any {
switch v := v.(*TraverseBfsMetadataResponse); i {
case 0:
return &v.state
@ -5919,7 +5919,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[45].Exporter = func(v any, i int) any {
switch v := v.(*LogEntry); i {
case 0:
return &v.state
@ -5931,7 +5931,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[46].Exporter = func(v any, i int) any {
switch v := v.(*KeepConnectedRequest); i {
case 0:
return &v.state
@ -5943,7 +5943,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[47].Exporter = func(v any, i int) any {
switch v := v.(*KeepConnectedResponse); i {
case 0:
return &v.state
@ -5955,7 +5955,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[48].Exporter = func(v any, i int) any {
switch v := v.(*LocateBrokerRequest); i {
case 0:
return &v.state
@ -5967,7 +5967,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[49].Exporter = func(v any, i int) any {
switch v := v.(*LocateBrokerResponse); i {
case 0:
return &v.state
@ -5979,7 +5979,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[50].Exporter = func(v any, i int) any {
switch v := v.(*KvGetRequest); i {
case 0:
return &v.state
@ -5991,7 +5991,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[51].Exporter = func(v any, i int) any {
switch v := v.(*KvGetResponse); i {
case 0:
return &v.state
@ -6003,7 +6003,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[52].Exporter = func(v any, i int) any {
switch v := v.(*KvPutRequest); i {
case 0:
return &v.state
@ -6015,7 +6015,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[53].Exporter = func(v any, i int) any {
switch v := v.(*KvPutResponse); i {
case 0:
return &v.state
@ -6027,7 +6027,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[54].Exporter = func(v any, i int) any {
switch v := v.(*FilerConf); i {
case 0:
return &v.state
@ -6039,7 +6039,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[55].Exporter = func(v any, i int) any {
switch v := v.(*CacheRemoteObjectToLocalClusterRequest); i {
case 0:
return &v.state
@ -6051,7 +6051,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[56].Exporter = func(v any, i int) any {
switch v := v.(*CacheRemoteObjectToLocalClusterResponse); i {
case 0:
return &v.state
@ -6063,7 +6063,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[57].Exporter = func(v any, i int) any {
switch v := v.(*LockRequest); i {
case 0:
return &v.state
@ -6075,7 +6075,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[58].Exporter = func(v any, i int) any {
switch v := v.(*LockResponse); i {
case 0:
return &v.state
@ -6087,7 +6087,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[59].Exporter = func(v any, i int) any {
switch v := v.(*UnlockRequest); i {
case 0:
return &v.state
@ -6099,7 +6099,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[60].Exporter = func(v any, i int) any {
switch v := v.(*UnlockResponse); i {
case 0:
return &v.state
@ -6111,7 +6111,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[61].Exporter = func(v any, i int) any {
switch v := v.(*FindLockOwnerRequest); i {
case 0:
return &v.state
@ -6123,7 +6123,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[62].Exporter = func(v any, i int) any {
switch v := v.(*FindLockOwnerResponse); i {
case 0:
return &v.state
@ -6135,7 +6135,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[63].Exporter = func(v any, i int) any {
switch v := v.(*Lock); i {
case 0:
return &v.state
@ -6147,7 +6147,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[64].Exporter = func(v any, i int) any {
switch v := v.(*TransferLocksRequest); i {
case 0:
return &v.state
@ -6159,7 +6159,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[65].Exporter = func(v any, i int) any {
switch v := v.(*TransferLocksResponse); i {
case 0:
return &v.state
@ -6171,7 +6171,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[68].Exporter = func(v any, i int) any {
switch v := v.(*LocateBrokerResponse_Resource); i {
case 0:
return &v.state
@ -6183,7 +6183,7 @@ func file_filer_proto_init() {
return nil
}
}
file_filer_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} {
file_filer_proto_msgTypes[69].Exporter = func(v any, i int) any {
switch v := v.(*FilerConf_PathConf); i {
case 0:
return &v.state

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.3
// - protoc-gen-go-grpc v1.5.1
// - protoc v5.28.1
// source: filer.proto
package filer_pb
@ -15,8 +15,8 @@ import (
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
SeaweedFiler_LookupDirectoryEntry_FullMethodName = "/filer_pb.SeaweedFiler/LookupDirectoryEntry"
@ -51,13 +51,13 @@ const (
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type SeaweedFilerClient interface {
LookupDirectoryEntry(ctx context.Context, in *LookupDirectoryEntryRequest, opts ...grpc.CallOption) (*LookupDirectoryEntryResponse, error)
ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (SeaweedFiler_ListEntriesClient, error)
ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ListEntriesResponse], error)
CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error)
UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error)
AppendToEntry(ctx context.Context, in *AppendToEntryRequest, opts ...grpc.CallOption) (*AppendToEntryResponse, error)
DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error)
AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error)
StreamRenameEntry(ctx context.Context, in *StreamRenameEntryRequest, opts ...grpc.CallOption) (SeaweedFiler_StreamRenameEntryClient, error)
StreamRenameEntry(ctx context.Context, in *StreamRenameEntryRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StreamRenameEntryResponse], error)
AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error)
LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error)
CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error)
@ -65,9 +65,9 @@ type SeaweedFilerClient interface {
Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error)
Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error)
GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error)
TraverseBfsMetadata(ctx context.Context, in *TraverseBfsMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_TraverseBfsMetadataClient, error)
SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeMetadataClient, error)
SubscribeLocalMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeLocalMetadataClient, error)
TraverseBfsMetadata(ctx context.Context, in *TraverseBfsMetadataRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[TraverseBfsMetadataResponse], error)
SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SubscribeMetadataResponse], error)
SubscribeLocalMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SubscribeMetadataResponse], error)
KvGet(ctx context.Context, in *KvGetRequest, opts ...grpc.CallOption) (*KvGetResponse, error)
KvPut(ctx context.Context, in *KvPutRequest, opts ...grpc.CallOption) (*KvPutResponse, error)
CacheRemoteObjectToLocalCluster(ctx context.Context, in *CacheRemoteObjectToLocalClusterRequest, opts ...grpc.CallOption) (*CacheRemoteObjectToLocalClusterResponse, error)
@ -87,20 +87,22 @@ func NewSeaweedFilerClient(cc grpc.ClientConnInterface) SeaweedFilerClient {
}
func (c *seaweedFilerClient) LookupDirectoryEntry(ctx context.Context, in *LookupDirectoryEntryRequest, opts ...grpc.CallOption) (*LookupDirectoryEntryResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(LookupDirectoryEntryResponse)
err := c.cc.Invoke(ctx, SeaweedFiler_LookupDirectoryEntry_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedFiler_LookupDirectoryEntry_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *seaweedFilerClient) ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (SeaweedFiler_ListEntriesClient, error) {
stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[0], SeaweedFiler_ListEntries_FullMethodName, opts...)
func (c *seaweedFilerClient) ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ListEntriesResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[0], SeaweedFiler_ListEntries_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &seaweedFilerListEntriesClient{stream}
x := &grpc.GenericClientStream[ListEntriesRequest, ListEntriesResponse]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
@ -110,26 +112,13 @@ func (c *seaweedFilerClient) ListEntries(ctx context.Context, in *ListEntriesReq
return x, nil
}
type SeaweedFiler_ListEntriesClient interface {
Recv() (*ListEntriesResponse, error)
grpc.ClientStream
}
type seaweedFilerListEntriesClient struct {
grpc.ClientStream
}
func (x *seaweedFilerListEntriesClient) Recv() (*ListEntriesResponse, error) {
m := new(ListEntriesResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type SeaweedFiler_ListEntriesClient = grpc.ServerStreamingClient[ListEntriesResponse]
func (c *seaweedFilerClient) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CreateEntryResponse)
err := c.cc.Invoke(ctx, SeaweedFiler_CreateEntry_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedFiler_CreateEntry_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -137,8 +126,9 @@ func (c *seaweedFilerClient) CreateEntry(ctx context.Context, in *CreateEntryReq
}
func (c *seaweedFilerClient) UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(UpdateEntryResponse)
err := c.cc.Invoke(ctx, SeaweedFiler_UpdateEntry_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedFiler_UpdateEntry_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -146,8 +136,9 @@ func (c *seaweedFilerClient) UpdateEntry(ctx context.Context, in *UpdateEntryReq
}
func (c *seaweedFilerClient) AppendToEntry(ctx context.Context, in *AppendToEntryRequest, opts ...grpc.CallOption) (*AppendToEntryResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(AppendToEntryResponse)
err := c.cc.Invoke(ctx, SeaweedFiler_AppendToEntry_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedFiler_AppendToEntry_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -155,8 +146,9 @@ func (c *seaweedFilerClient) AppendToEntry(ctx context.Context, in *AppendToEntr
}
func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DeleteEntryResponse)
err := c.cc.Invoke(ctx, SeaweedFiler_DeleteEntry_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedFiler_DeleteEntry_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -164,20 +156,22 @@ func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryReq
}
func (c *seaweedFilerClient) AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(AtomicRenameEntryResponse)
err := c.cc.Invoke(ctx, SeaweedFiler_AtomicRenameEntry_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedFiler_AtomicRenameEntry_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *seaweedFilerClient) StreamRenameEntry(ctx context.Context, in *StreamRenameEntryRequest, opts ...grpc.CallOption) (SeaweedFiler_StreamRenameEntryClient, error) {
stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[1], SeaweedFiler_StreamRenameEntry_FullMethodName, opts...)
func (c *seaweedFilerClient) StreamRenameEntry(ctx context.Context, in *StreamRenameEntryRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StreamRenameEntryResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[1], SeaweedFiler_StreamRenameEntry_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &seaweedFilerStreamRenameEntryClient{stream}
x := &grpc.GenericClientStream[StreamRenameEntryRequest, StreamRenameEntryResponse]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
@ -187,26 +181,13 @@ func (c *seaweedFilerClient) StreamRenameEntry(ctx context.Context, in *StreamRe
return x, nil
}
type SeaweedFiler_StreamRenameEntryClient interface {
Recv() (*StreamRenameEntryResponse, error)
grpc.ClientStream
}
type seaweedFilerStreamRenameEntryClient struct {
grpc.ClientStream
}
func (x *seaweedFilerStreamRenameEntryClient) Recv() (*StreamRenameEntryResponse, error) {
m := new(StreamRenameEntryResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type SeaweedFiler_StreamRenameEntryClient = grpc.ServerStreamingClient[StreamRenameEntryResponse]
func (c *seaweedFilerClient) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(AssignVolumeResponse)
err := c.cc.Invoke(ctx, SeaweedFiler_AssignVolume_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedFiler_AssignVolume_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -214,8 +195,9 @@ func (c *seaweedFilerClient) AssignVolume(ctx context.Context, in *AssignVolumeR
}
func (c *seaweedFilerClient) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(LookupVolumeResponse)
err := c.cc.Invoke(ctx, SeaweedFiler_LookupVolume_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedFiler_LookupVolume_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -223,8 +205,9 @@ func (c *seaweedFilerClient) LookupVolume(ctx context.Context, in *LookupVolumeR
}
func (c *seaweedFilerClient) CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CollectionListResponse)
err := c.cc.Invoke(ctx, SeaweedFiler_CollectionList_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedFiler_CollectionList_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -232,8 +215,9 @@ func (c *seaweedFilerClient) CollectionList(ctx context.Context, in *CollectionL
}
func (c *seaweedFilerClient) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DeleteCollectionResponse)
err := c.cc.Invoke(ctx, SeaweedFiler_DeleteCollection_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedFiler_DeleteCollection_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -241,8 +225,9 @@ func (c *seaweedFilerClient) DeleteCollection(ctx context.Context, in *DeleteCol
}
func (c *seaweedFilerClient) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(StatisticsResponse)
err := c.cc.Invoke(ctx, SeaweedFiler_Statistics_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedFiler_Statistics_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -250,8 +235,9 @@ func (c *seaweedFilerClient) Statistics(ctx context.Context, in *StatisticsReque
}
func (c *seaweedFilerClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(PingResponse)
err := c.cc.Invoke(ctx, SeaweedFiler_Ping_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedFiler_Ping_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -259,20 +245,22 @@ func (c *seaweedFilerClient) Ping(ctx context.Context, in *PingRequest, opts ...
}
func (c *seaweedFilerClient) GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetFilerConfigurationResponse)
err := c.cc.Invoke(ctx, SeaweedFiler_GetFilerConfiguration_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedFiler_GetFilerConfiguration_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *seaweedFilerClient) TraverseBfsMetadata(ctx context.Context, in *TraverseBfsMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_TraverseBfsMetadataClient, error) {
stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[2], SeaweedFiler_TraverseBfsMetadata_FullMethodName, opts...)
func (c *seaweedFilerClient) TraverseBfsMetadata(ctx context.Context, in *TraverseBfsMetadataRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[TraverseBfsMetadataResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[2], SeaweedFiler_TraverseBfsMetadata_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &seaweedFilerTraverseBfsMetadataClient{stream}
x := &grpc.GenericClientStream[TraverseBfsMetadataRequest, TraverseBfsMetadataResponse]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
@ -282,29 +270,16 @@ func (c *seaweedFilerClient) TraverseBfsMetadata(ctx context.Context, in *Traver
return x, nil
}
type SeaweedFiler_TraverseBfsMetadataClient interface {
Recv() (*TraverseBfsMetadataResponse, error)
grpc.ClientStream
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type SeaweedFiler_TraverseBfsMetadataClient = grpc.ServerStreamingClient[TraverseBfsMetadataResponse]
type seaweedFilerTraverseBfsMetadataClient struct {
grpc.ClientStream
}
func (x *seaweedFilerTraverseBfsMetadataClient) Recv() (*TraverseBfsMetadataResponse, error) {
m := new(TraverseBfsMetadataResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *seaweedFilerClient) SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeMetadataClient, error) {
stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[3], SeaweedFiler_SubscribeMetadata_FullMethodName, opts...)
func (c *seaweedFilerClient) SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SubscribeMetadataResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[3], SeaweedFiler_SubscribeMetadata_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &seaweedFilerSubscribeMetadataClient{stream}
x := &grpc.GenericClientStream[SubscribeMetadataRequest, SubscribeMetadataResponse]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
@ -314,29 +289,16 @@ func (c *seaweedFilerClient) SubscribeMetadata(ctx context.Context, in *Subscrib
return x, nil
}
type SeaweedFiler_SubscribeMetadataClient interface {
Recv() (*SubscribeMetadataResponse, error)
grpc.ClientStream
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type SeaweedFiler_SubscribeMetadataClient = grpc.ServerStreamingClient[SubscribeMetadataResponse]
type seaweedFilerSubscribeMetadataClient struct {
grpc.ClientStream
}
func (x *seaweedFilerSubscribeMetadataClient) Recv() (*SubscribeMetadataResponse, error) {
m := new(SubscribeMetadataResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *seaweedFilerClient) SubscribeLocalMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeLocalMetadataClient, error) {
stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[4], SeaweedFiler_SubscribeLocalMetadata_FullMethodName, opts...)
func (c *seaweedFilerClient) SubscribeLocalMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SubscribeMetadataResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[4], SeaweedFiler_SubscribeLocalMetadata_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &seaweedFilerSubscribeLocalMetadataClient{stream}
x := &grpc.GenericClientStream[SubscribeMetadataRequest, SubscribeMetadataResponse]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
@ -346,26 +308,13 @@ func (c *seaweedFilerClient) SubscribeLocalMetadata(ctx context.Context, in *Sub
return x, nil
}
type SeaweedFiler_SubscribeLocalMetadataClient interface {
Recv() (*SubscribeMetadataResponse, error)
grpc.ClientStream
}
type seaweedFilerSubscribeLocalMetadataClient struct {
grpc.ClientStream
}
func (x *seaweedFilerSubscribeLocalMetadataClient) Recv() (*SubscribeMetadataResponse, error) {
m := new(SubscribeMetadataResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type SeaweedFiler_SubscribeLocalMetadataClient = grpc.ServerStreamingClient[SubscribeMetadataResponse]
func (c *seaweedFilerClient) KvGet(ctx context.Context, in *KvGetRequest, opts ...grpc.CallOption) (*KvGetResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(KvGetResponse)
err := c.cc.Invoke(ctx, SeaweedFiler_KvGet_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedFiler_KvGet_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -373,8 +322,9 @@ func (c *seaweedFilerClient) KvGet(ctx context.Context, in *KvGetRequest, opts .
}
func (c *seaweedFilerClient) KvPut(ctx context.Context, in *KvPutRequest, opts ...grpc.CallOption) (*KvPutResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(KvPutResponse)
err := c.cc.Invoke(ctx, SeaweedFiler_KvPut_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedFiler_KvPut_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -382,8 +332,9 @@ func (c *seaweedFilerClient) KvPut(ctx context.Context, in *KvPutRequest, opts .
}
func (c *seaweedFilerClient) CacheRemoteObjectToLocalCluster(ctx context.Context, in *CacheRemoteObjectToLocalClusterRequest, opts ...grpc.CallOption) (*CacheRemoteObjectToLocalClusterResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CacheRemoteObjectToLocalClusterResponse)
err := c.cc.Invoke(ctx, SeaweedFiler_CacheRemoteObjectToLocalCluster_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedFiler_CacheRemoteObjectToLocalCluster_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -391,8 +342,9 @@ func (c *seaweedFilerClient) CacheRemoteObjectToLocalCluster(ctx context.Context
}
func (c *seaweedFilerClient) DistributedLock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(LockResponse)
err := c.cc.Invoke(ctx, SeaweedFiler_DistributedLock_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedFiler_DistributedLock_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -400,8 +352,9 @@ func (c *seaweedFilerClient) DistributedLock(ctx context.Context, in *LockReques
}
func (c *seaweedFilerClient) DistributedUnlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(UnlockResponse)
err := c.cc.Invoke(ctx, SeaweedFiler_DistributedUnlock_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedFiler_DistributedUnlock_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -409,8 +362,9 @@ func (c *seaweedFilerClient) DistributedUnlock(ctx context.Context, in *UnlockRe
}
func (c *seaweedFilerClient) FindLockOwner(ctx context.Context, in *FindLockOwnerRequest, opts ...grpc.CallOption) (*FindLockOwnerResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(FindLockOwnerResponse)
err := c.cc.Invoke(ctx, SeaweedFiler_FindLockOwner_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedFiler_FindLockOwner_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -418,8 +372,9 @@ func (c *seaweedFilerClient) FindLockOwner(ctx context.Context, in *FindLockOwne
}
func (c *seaweedFilerClient) TransferLocks(ctx context.Context, in *TransferLocksRequest, opts ...grpc.CallOption) (*TransferLocksResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(TransferLocksResponse)
err := c.cc.Invoke(ctx, SeaweedFiler_TransferLocks_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedFiler_TransferLocks_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -428,16 +383,16 @@ func (c *seaweedFilerClient) TransferLocks(ctx context.Context, in *TransferLock
// SeaweedFilerServer is the server API for SeaweedFiler service.
// All implementations must embed UnimplementedSeaweedFilerServer
// for forward compatibility
// for forward compatibility.
type SeaweedFilerServer interface {
LookupDirectoryEntry(context.Context, *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error)
ListEntries(*ListEntriesRequest, SeaweedFiler_ListEntriesServer) error
ListEntries(*ListEntriesRequest, grpc.ServerStreamingServer[ListEntriesResponse]) error
CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error)
UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error)
AppendToEntry(context.Context, *AppendToEntryRequest) (*AppendToEntryResponse, error)
DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error)
AtomicRenameEntry(context.Context, *AtomicRenameEntryRequest) (*AtomicRenameEntryResponse, error)
StreamRenameEntry(*StreamRenameEntryRequest, SeaweedFiler_StreamRenameEntryServer) error
StreamRenameEntry(*StreamRenameEntryRequest, grpc.ServerStreamingServer[StreamRenameEntryResponse]) error
AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error)
LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error)
CollectionList(context.Context, *CollectionListRequest) (*CollectionListResponse, error)
@ -445,9 +400,9 @@ type SeaweedFilerServer interface {
Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error)
Ping(context.Context, *PingRequest) (*PingResponse, error)
GetFilerConfiguration(context.Context, *GetFilerConfigurationRequest) (*GetFilerConfigurationResponse, error)
TraverseBfsMetadata(*TraverseBfsMetadataRequest, SeaweedFiler_TraverseBfsMetadataServer) error
SubscribeMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeMetadataServer) error
SubscribeLocalMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeLocalMetadataServer) error
TraverseBfsMetadata(*TraverseBfsMetadataRequest, grpc.ServerStreamingServer[TraverseBfsMetadataResponse]) error
SubscribeMetadata(*SubscribeMetadataRequest, grpc.ServerStreamingServer[SubscribeMetadataResponse]) error
SubscribeLocalMetadata(*SubscribeMetadataRequest, grpc.ServerStreamingServer[SubscribeMetadataResponse]) error
KvGet(context.Context, *KvGetRequest) (*KvGetResponse, error)
KvPut(context.Context, *KvPutRequest) (*KvPutResponse, error)
CacheRemoteObjectToLocalCluster(context.Context, *CacheRemoteObjectToLocalClusterRequest) (*CacheRemoteObjectToLocalClusterResponse, error)
@ -459,14 +414,17 @@ type SeaweedFilerServer interface {
mustEmbedUnimplementedSeaweedFilerServer()
}
// UnimplementedSeaweedFilerServer must be embedded to have forward compatible implementations.
type UnimplementedSeaweedFilerServer struct {
}
// UnimplementedSeaweedFilerServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedSeaweedFilerServer struct{}
func (UnimplementedSeaweedFilerServer) LookupDirectoryEntry(context.Context, *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method LookupDirectoryEntry not implemented")
}
func (UnimplementedSeaweedFilerServer) ListEntries(*ListEntriesRequest, SeaweedFiler_ListEntriesServer) error {
func (UnimplementedSeaweedFilerServer) ListEntries(*ListEntriesRequest, grpc.ServerStreamingServer[ListEntriesResponse]) error {
return status.Errorf(codes.Unimplemented, "method ListEntries not implemented")
}
func (UnimplementedSeaweedFilerServer) CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error) {
@ -484,7 +442,7 @@ func (UnimplementedSeaweedFilerServer) DeleteEntry(context.Context, *DeleteEntry
func (UnimplementedSeaweedFilerServer) AtomicRenameEntry(context.Context, *AtomicRenameEntryRequest) (*AtomicRenameEntryResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method AtomicRenameEntry not implemented")
}
func (UnimplementedSeaweedFilerServer) StreamRenameEntry(*StreamRenameEntryRequest, SeaweedFiler_StreamRenameEntryServer) error {
func (UnimplementedSeaweedFilerServer) StreamRenameEntry(*StreamRenameEntryRequest, grpc.ServerStreamingServer[StreamRenameEntryResponse]) error {
return status.Errorf(codes.Unimplemented, "method StreamRenameEntry not implemented")
}
func (UnimplementedSeaweedFilerServer) AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error) {
@ -508,13 +466,13 @@ func (UnimplementedSeaweedFilerServer) Ping(context.Context, *PingRequest) (*Pin
func (UnimplementedSeaweedFilerServer) GetFilerConfiguration(context.Context, *GetFilerConfigurationRequest) (*GetFilerConfigurationResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetFilerConfiguration not implemented")
}
func (UnimplementedSeaweedFilerServer) TraverseBfsMetadata(*TraverseBfsMetadataRequest, SeaweedFiler_TraverseBfsMetadataServer) error {
func (UnimplementedSeaweedFilerServer) TraverseBfsMetadata(*TraverseBfsMetadataRequest, grpc.ServerStreamingServer[TraverseBfsMetadataResponse]) error {
return status.Errorf(codes.Unimplemented, "method TraverseBfsMetadata not implemented")
}
func (UnimplementedSeaweedFilerServer) SubscribeMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeMetadataServer) error {
func (UnimplementedSeaweedFilerServer) SubscribeMetadata(*SubscribeMetadataRequest, grpc.ServerStreamingServer[SubscribeMetadataResponse]) error {
return status.Errorf(codes.Unimplemented, "method SubscribeMetadata not implemented")
}
func (UnimplementedSeaweedFilerServer) SubscribeLocalMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeLocalMetadataServer) error {
func (UnimplementedSeaweedFilerServer) SubscribeLocalMetadata(*SubscribeMetadataRequest, grpc.ServerStreamingServer[SubscribeMetadataResponse]) error {
return status.Errorf(codes.Unimplemented, "method SubscribeLocalMetadata not implemented")
}
func (UnimplementedSeaweedFilerServer) KvGet(context.Context, *KvGetRequest) (*KvGetResponse, error) {
@ -539,6 +497,7 @@ func (UnimplementedSeaweedFilerServer) TransferLocks(context.Context, *TransferL
return nil, status.Errorf(codes.Unimplemented, "method TransferLocks not implemented")
}
func (UnimplementedSeaweedFilerServer) mustEmbedUnimplementedSeaweedFilerServer() {}
func (UnimplementedSeaweedFilerServer) testEmbeddedByValue() {}
// UnsafeSeaweedFilerServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to SeaweedFilerServer will
@ -548,6 +507,13 @@ type UnsafeSeaweedFilerServer interface {
}
func RegisterSeaweedFilerServer(s grpc.ServiceRegistrar, srv SeaweedFilerServer) {
// If the following call pancis, it indicates UnimplementedSeaweedFilerServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&SeaweedFiler_ServiceDesc, srv)
}
@ -574,21 +540,11 @@ func _SeaweedFiler_ListEntries_Handler(srv interface{}, stream grpc.ServerStream
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(SeaweedFilerServer).ListEntries(m, &seaweedFilerListEntriesServer{stream})
return srv.(SeaweedFilerServer).ListEntries(m, &grpc.GenericServerStream[ListEntriesRequest, ListEntriesResponse]{ServerStream: stream})
}
type SeaweedFiler_ListEntriesServer interface {
Send(*ListEntriesResponse) error
grpc.ServerStream
}
type seaweedFilerListEntriesServer struct {
grpc.ServerStream
}
func (x *seaweedFilerListEntriesServer) Send(m *ListEntriesResponse) error {
return x.ServerStream.SendMsg(m)
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type SeaweedFiler_ListEntriesServer = grpc.ServerStreamingServer[ListEntriesResponse]
func _SeaweedFiler_CreateEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateEntryRequest)
@ -685,21 +641,11 @@ func _SeaweedFiler_StreamRenameEntry_Handler(srv interface{}, stream grpc.Server
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(SeaweedFilerServer).StreamRenameEntry(m, &seaweedFilerStreamRenameEntryServer{stream})
return srv.(SeaweedFilerServer).StreamRenameEntry(m, &grpc.GenericServerStream[StreamRenameEntryRequest, StreamRenameEntryResponse]{ServerStream: stream})
}
type SeaweedFiler_StreamRenameEntryServer interface {
Send(*StreamRenameEntryResponse) error
grpc.ServerStream
}
type seaweedFilerStreamRenameEntryServer struct {
grpc.ServerStream
}
func (x *seaweedFilerStreamRenameEntryServer) Send(m *StreamRenameEntryResponse) error {
return x.ServerStream.SendMsg(m)
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type SeaweedFiler_StreamRenameEntryServer = grpc.ServerStreamingServer[StreamRenameEntryResponse]
func _SeaweedFiler_AssignVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AssignVolumeRequest)
@ -832,63 +778,33 @@ func _SeaweedFiler_TraverseBfsMetadata_Handler(srv interface{}, stream grpc.Serv
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(SeaweedFilerServer).TraverseBfsMetadata(m, &seaweedFilerTraverseBfsMetadataServer{stream})
return srv.(SeaweedFilerServer).TraverseBfsMetadata(m, &grpc.GenericServerStream[TraverseBfsMetadataRequest, TraverseBfsMetadataResponse]{ServerStream: stream})
}
type SeaweedFiler_TraverseBfsMetadataServer interface {
Send(*TraverseBfsMetadataResponse) error
grpc.ServerStream
}
type seaweedFilerTraverseBfsMetadataServer struct {
grpc.ServerStream
}
func (x *seaweedFilerTraverseBfsMetadataServer) Send(m *TraverseBfsMetadataResponse) error {
return x.ServerStream.SendMsg(m)
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type SeaweedFiler_TraverseBfsMetadataServer = grpc.ServerStreamingServer[TraverseBfsMetadataResponse]
func _SeaweedFiler_SubscribeMetadata_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(SubscribeMetadataRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(SeaweedFilerServer).SubscribeMetadata(m, &seaweedFilerSubscribeMetadataServer{stream})
return srv.(SeaweedFilerServer).SubscribeMetadata(m, &grpc.GenericServerStream[SubscribeMetadataRequest, SubscribeMetadataResponse]{ServerStream: stream})
}
type SeaweedFiler_SubscribeMetadataServer interface {
Send(*SubscribeMetadataResponse) error
grpc.ServerStream
}
type seaweedFilerSubscribeMetadataServer struct {
grpc.ServerStream
}
func (x *seaweedFilerSubscribeMetadataServer) Send(m *SubscribeMetadataResponse) error {
return x.ServerStream.SendMsg(m)
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type SeaweedFiler_SubscribeMetadataServer = grpc.ServerStreamingServer[SubscribeMetadataResponse]
func _SeaweedFiler_SubscribeLocalMetadata_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(SubscribeMetadataRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(SeaweedFilerServer).SubscribeLocalMetadata(m, &seaweedFilerSubscribeLocalMetadataServer{stream})
return srv.(SeaweedFilerServer).SubscribeLocalMetadata(m, &grpc.GenericServerStream[SubscribeMetadataRequest, SubscribeMetadataResponse]{ServerStream: stream})
}
type SeaweedFiler_SubscribeLocalMetadataServer interface {
Send(*SubscribeMetadataResponse) error
grpc.ServerStream
}
type seaweedFilerSubscribeLocalMetadataServer struct {
grpc.ServerStream
}
func (x *seaweedFilerSubscribeLocalMetadataServer) Send(m *SubscribeMetadataResponse) error {
return x.ServerStream.SendMsg(m)
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type SeaweedFiler_SubscribeLocalMetadataServer = grpc.ServerStreamingServer[SubscribeMetadataResponse]
func _SeaweedFiler_KvGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(KvGetRequest)

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
// protoc v4.25.3
// protoc-gen-go v1.34.2
// protoc v5.28.1
// source: iam.proto
package iam_pb
@ -319,7 +319,7 @@ func file_iam_proto_rawDescGZIP() []byte {
}
var file_iam_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_iam_proto_goTypes = []interface{}{
var file_iam_proto_goTypes = []any{
(*S3ApiConfiguration)(nil), // 0: iam_pb.S3ApiConfiguration
(*Identity)(nil), // 1: iam_pb.Identity
(*Credential)(nil), // 2: iam_pb.Credential
@ -343,7 +343,7 @@ func file_iam_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
file_iam_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
file_iam_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*S3ApiConfiguration); i {
case 0:
return &v.state
@ -355,7 +355,7 @@ func file_iam_proto_init() {
return nil
}
}
file_iam_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
file_iam_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*Identity); i {
case 0:
return &v.state
@ -367,7 +367,7 @@ func file_iam_proto_init() {
return nil
}
}
file_iam_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
file_iam_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*Credential); i {
case 0:
return &v.state
@ -379,7 +379,7 @@ func file_iam_proto_init() {
return nil
}
}
file_iam_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
file_iam_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*Account); i {
case 0:
return &v.state

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.3
// - protoc-gen-go-grpc v1.5.1
// - protoc v5.28.1
// source: iam.proto
package iam_pb
@ -12,10 +12,8 @@ import (
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
const ()
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
// SeaweedIdentityAccessManagementClient is the client API for SeaweedIdentityAccessManagement service.
//
@ -33,17 +31,21 @@ func NewSeaweedIdentityAccessManagementClient(cc grpc.ClientConnInterface) Seawe
// SeaweedIdentityAccessManagementServer is the server API for SeaweedIdentityAccessManagement service.
// All implementations must embed UnimplementedSeaweedIdentityAccessManagementServer
// for forward compatibility
// for forward compatibility.
type SeaweedIdentityAccessManagementServer interface {
mustEmbedUnimplementedSeaweedIdentityAccessManagementServer()
}
// UnimplementedSeaweedIdentityAccessManagementServer must be embedded to have forward compatible implementations.
type UnimplementedSeaweedIdentityAccessManagementServer struct {
}
// UnimplementedSeaweedIdentityAccessManagementServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedSeaweedIdentityAccessManagementServer struct{}
func (UnimplementedSeaweedIdentityAccessManagementServer) mustEmbedUnimplementedSeaweedIdentityAccessManagementServer() {
}
func (UnimplementedSeaweedIdentityAccessManagementServer) testEmbeddedByValue() {}
// UnsafeSeaweedIdentityAccessManagementServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to SeaweedIdentityAccessManagementServer will
@ -53,6 +55,13 @@ type UnsafeSeaweedIdentityAccessManagementServer interface {
}
func RegisterSeaweedIdentityAccessManagementServer(s grpc.ServiceRegistrar, srv SeaweedIdentityAccessManagementServer) {
// If the following call pancis, it indicates UnimplementedSeaweedIdentityAccessManagementServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&SeaweedIdentityAccessManagement_ServiceDesc, srv)
}

View File

@ -125,7 +125,7 @@ message VolumeEcShardInformationMessage {
string collection = 2;
uint32 ec_index_bits = 3;
string disk_type = 4;
uint64 destroy_time = 5; // used to record the destruction time of ec volume
uint64 expire_at_sec = 5; // used to record the destruction time of ec volume
}
message StorageBackend {

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.3
// - protoc-gen-go-grpc v1.5.1
// - protoc v5.28.1
// source: master.proto
package master_pb
@ -15,8 +15,8 @@ import (
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
Seaweed_SendHeartbeat_FullMethodName = "/master_pb.Seaweed/SendHeartbeat"
@ -48,11 +48,11 @@ const (
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type SeaweedClient interface {
SendHeartbeat(ctx context.Context, opts ...grpc.CallOption) (Seaweed_SendHeartbeatClient, error)
KeepConnected(ctx context.Context, opts ...grpc.CallOption) (Seaweed_KeepConnectedClient, error)
SendHeartbeat(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[Heartbeat, HeartbeatResponse], error)
KeepConnected(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[KeepConnectedRequest, KeepConnectedResponse], error)
LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error)
Assign(ctx context.Context, in *AssignRequest, opts ...grpc.CallOption) (*AssignResponse, error)
StreamAssign(ctx context.Context, opts ...grpc.CallOption) (Seaweed_StreamAssignClient, error)
StreamAssign(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[AssignRequest, AssignResponse], error)
Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error)
CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error)
CollectionDelete(ctx context.Context, in *CollectionDeleteRequest, opts ...grpc.CallOption) (*CollectionDeleteResponse, error)
@ -81,71 +81,36 @@ func NewSeaweedClient(cc grpc.ClientConnInterface) SeaweedClient {
return &seaweedClient{cc}
}
func (c *seaweedClient) SendHeartbeat(ctx context.Context, opts ...grpc.CallOption) (Seaweed_SendHeartbeatClient, error) {
stream, err := c.cc.NewStream(ctx, &Seaweed_ServiceDesc.Streams[0], Seaweed_SendHeartbeat_FullMethodName, opts...)
func (c *seaweedClient) SendHeartbeat(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[Heartbeat, HeartbeatResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &Seaweed_ServiceDesc.Streams[0], Seaweed_SendHeartbeat_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &seaweedSendHeartbeatClient{stream}
x := &grpc.GenericClientStream[Heartbeat, HeartbeatResponse]{ClientStream: stream}
return x, nil
}
type Seaweed_SendHeartbeatClient interface {
Send(*Heartbeat) error
Recv() (*HeartbeatResponse, error)
grpc.ClientStream
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Seaweed_SendHeartbeatClient = grpc.BidiStreamingClient[Heartbeat, HeartbeatResponse]
type seaweedSendHeartbeatClient struct {
grpc.ClientStream
}
func (x *seaweedSendHeartbeatClient) Send(m *Heartbeat) error {
return x.ClientStream.SendMsg(m)
}
func (x *seaweedSendHeartbeatClient) Recv() (*HeartbeatResponse, error) {
m := new(HeartbeatResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *seaweedClient) KeepConnected(ctx context.Context, opts ...grpc.CallOption) (Seaweed_KeepConnectedClient, error) {
stream, err := c.cc.NewStream(ctx, &Seaweed_ServiceDesc.Streams[1], Seaweed_KeepConnected_FullMethodName, opts...)
func (c *seaweedClient) KeepConnected(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[KeepConnectedRequest, KeepConnectedResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &Seaweed_ServiceDesc.Streams[1], Seaweed_KeepConnected_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &seaweedKeepConnectedClient{stream}
x := &grpc.GenericClientStream[KeepConnectedRequest, KeepConnectedResponse]{ClientStream: stream}
return x, nil
}
type Seaweed_KeepConnectedClient interface {
Send(*KeepConnectedRequest) error
Recv() (*KeepConnectedResponse, error)
grpc.ClientStream
}
type seaweedKeepConnectedClient struct {
grpc.ClientStream
}
func (x *seaweedKeepConnectedClient) Send(m *KeepConnectedRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *seaweedKeepConnectedClient) Recv() (*KeepConnectedResponse, error) {
m := new(KeepConnectedResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Seaweed_KeepConnectedClient = grpc.BidiStreamingClient[KeepConnectedRequest, KeepConnectedResponse]
func (c *seaweedClient) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(LookupVolumeResponse)
err := c.cc.Invoke(ctx, Seaweed_LookupVolume_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, Seaweed_LookupVolume_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -153,48 +118,32 @@ func (c *seaweedClient) LookupVolume(ctx context.Context, in *LookupVolumeReques
}
func (c *seaweedClient) Assign(ctx context.Context, in *AssignRequest, opts ...grpc.CallOption) (*AssignResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(AssignResponse)
err := c.cc.Invoke(ctx, Seaweed_Assign_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, Seaweed_Assign_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *seaweedClient) StreamAssign(ctx context.Context, opts ...grpc.CallOption) (Seaweed_StreamAssignClient, error) {
stream, err := c.cc.NewStream(ctx, &Seaweed_ServiceDesc.Streams[2], Seaweed_StreamAssign_FullMethodName, opts...)
func (c *seaweedClient) StreamAssign(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[AssignRequest, AssignResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &Seaweed_ServiceDesc.Streams[2], Seaweed_StreamAssign_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &seaweedStreamAssignClient{stream}
x := &grpc.GenericClientStream[AssignRequest, AssignResponse]{ClientStream: stream}
return x, nil
}
type Seaweed_StreamAssignClient interface {
Send(*AssignRequest) error
Recv() (*AssignResponse, error)
grpc.ClientStream
}
type seaweedStreamAssignClient struct {
grpc.ClientStream
}
func (x *seaweedStreamAssignClient) Send(m *AssignRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *seaweedStreamAssignClient) Recv() (*AssignResponse, error) {
m := new(AssignResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Seaweed_StreamAssignClient = grpc.BidiStreamingClient[AssignRequest, AssignResponse]
func (c *seaweedClient) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(StatisticsResponse)
err := c.cc.Invoke(ctx, Seaweed_Statistics_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, Seaweed_Statistics_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -202,8 +151,9 @@ func (c *seaweedClient) Statistics(ctx context.Context, in *StatisticsRequest, o
}
func (c *seaweedClient) CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CollectionListResponse)
err := c.cc.Invoke(ctx, Seaweed_CollectionList_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, Seaweed_CollectionList_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -211,8 +161,9 @@ func (c *seaweedClient) CollectionList(ctx context.Context, in *CollectionListRe
}
func (c *seaweedClient) CollectionDelete(ctx context.Context, in *CollectionDeleteRequest, opts ...grpc.CallOption) (*CollectionDeleteResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CollectionDeleteResponse)
err := c.cc.Invoke(ctx, Seaweed_CollectionDelete_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, Seaweed_CollectionDelete_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -220,8 +171,9 @@ func (c *seaweedClient) CollectionDelete(ctx context.Context, in *CollectionDele
}
func (c *seaweedClient) VolumeList(ctx context.Context, in *VolumeListRequest, opts ...grpc.CallOption) (*VolumeListResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(VolumeListResponse)
err := c.cc.Invoke(ctx, Seaweed_VolumeList_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, Seaweed_VolumeList_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -229,8 +181,9 @@ func (c *seaweedClient) VolumeList(ctx context.Context, in *VolumeListRequest, o
}
func (c *seaweedClient) LookupEcVolume(ctx context.Context, in *LookupEcVolumeRequest, opts ...grpc.CallOption) (*LookupEcVolumeResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(LookupEcVolumeResponse)
err := c.cc.Invoke(ctx, Seaweed_LookupEcVolume_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, Seaweed_LookupEcVolume_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -238,8 +191,9 @@ func (c *seaweedClient) LookupEcVolume(ctx context.Context, in *LookupEcVolumeRe
}
func (c *seaweedClient) VacuumVolume(ctx context.Context, in *VacuumVolumeRequest, opts ...grpc.CallOption) (*VacuumVolumeResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(VacuumVolumeResponse)
err := c.cc.Invoke(ctx, Seaweed_VacuumVolume_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, Seaweed_VacuumVolume_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -247,8 +201,9 @@ func (c *seaweedClient) VacuumVolume(ctx context.Context, in *VacuumVolumeReques
}
func (c *seaweedClient) DisableVacuum(ctx context.Context, in *DisableVacuumRequest, opts ...grpc.CallOption) (*DisableVacuumResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DisableVacuumResponse)
err := c.cc.Invoke(ctx, Seaweed_DisableVacuum_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, Seaweed_DisableVacuum_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -256,8 +211,9 @@ func (c *seaweedClient) DisableVacuum(ctx context.Context, in *DisableVacuumRequ
}
func (c *seaweedClient) EnableVacuum(ctx context.Context, in *EnableVacuumRequest, opts ...grpc.CallOption) (*EnableVacuumResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(EnableVacuumResponse)
err := c.cc.Invoke(ctx, Seaweed_EnableVacuum_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, Seaweed_EnableVacuum_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -265,8 +221,9 @@ func (c *seaweedClient) EnableVacuum(ctx context.Context, in *EnableVacuumReques
}
func (c *seaweedClient) VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(VolumeMarkReadonlyResponse)
err := c.cc.Invoke(ctx, Seaweed_VolumeMarkReadonly_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, Seaweed_VolumeMarkReadonly_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -274,8 +231,9 @@ func (c *seaweedClient) VolumeMarkReadonly(ctx context.Context, in *VolumeMarkRe
}
func (c *seaweedClient) GetMasterConfiguration(ctx context.Context, in *GetMasterConfigurationRequest, opts ...grpc.CallOption) (*GetMasterConfigurationResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetMasterConfigurationResponse)
err := c.cc.Invoke(ctx, Seaweed_GetMasterConfiguration_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, Seaweed_GetMasterConfiguration_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -283,8 +241,9 @@ func (c *seaweedClient) GetMasterConfiguration(ctx context.Context, in *GetMaste
}
func (c *seaweedClient) ListClusterNodes(ctx context.Context, in *ListClusterNodesRequest, opts ...grpc.CallOption) (*ListClusterNodesResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ListClusterNodesResponse)
err := c.cc.Invoke(ctx, Seaweed_ListClusterNodes_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, Seaweed_ListClusterNodes_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -292,8 +251,9 @@ func (c *seaweedClient) ListClusterNodes(ctx context.Context, in *ListClusterNod
}
func (c *seaweedClient) LeaseAdminToken(ctx context.Context, in *LeaseAdminTokenRequest, opts ...grpc.CallOption) (*LeaseAdminTokenResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(LeaseAdminTokenResponse)
err := c.cc.Invoke(ctx, Seaweed_LeaseAdminToken_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, Seaweed_LeaseAdminToken_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -301,8 +261,9 @@ func (c *seaweedClient) LeaseAdminToken(ctx context.Context, in *LeaseAdminToken
}
func (c *seaweedClient) ReleaseAdminToken(ctx context.Context, in *ReleaseAdminTokenRequest, opts ...grpc.CallOption) (*ReleaseAdminTokenResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ReleaseAdminTokenResponse)
err := c.cc.Invoke(ctx, Seaweed_ReleaseAdminToken_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, Seaweed_ReleaseAdminToken_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -310,8 +271,9 @@ func (c *seaweedClient) ReleaseAdminToken(ctx context.Context, in *ReleaseAdminT
}
func (c *seaweedClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(PingResponse)
err := c.cc.Invoke(ctx, Seaweed_Ping_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, Seaweed_Ping_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -319,8 +281,9 @@ func (c *seaweedClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.
}
func (c *seaweedClient) RaftListClusterServers(ctx context.Context, in *RaftListClusterServersRequest, opts ...grpc.CallOption) (*RaftListClusterServersResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(RaftListClusterServersResponse)
err := c.cc.Invoke(ctx, Seaweed_RaftListClusterServers_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, Seaweed_RaftListClusterServers_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -328,8 +291,9 @@ func (c *seaweedClient) RaftListClusterServers(ctx context.Context, in *RaftList
}
func (c *seaweedClient) RaftAddServer(ctx context.Context, in *RaftAddServerRequest, opts ...grpc.CallOption) (*RaftAddServerResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(RaftAddServerResponse)
err := c.cc.Invoke(ctx, Seaweed_RaftAddServer_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, Seaweed_RaftAddServer_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -337,8 +301,9 @@ func (c *seaweedClient) RaftAddServer(ctx context.Context, in *RaftAddServerRequ
}
func (c *seaweedClient) RaftRemoveServer(ctx context.Context, in *RaftRemoveServerRequest, opts ...grpc.CallOption) (*RaftRemoveServerResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(RaftRemoveServerResponse)
err := c.cc.Invoke(ctx, Seaweed_RaftRemoveServer_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, Seaweed_RaftRemoveServer_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -346,8 +311,9 @@ func (c *seaweedClient) RaftRemoveServer(ctx context.Context, in *RaftRemoveServ
}
func (c *seaweedClient) VolumeGrow(ctx context.Context, in *VolumeGrowRequest, opts ...grpc.CallOption) (*VolumeGrowResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(VolumeGrowResponse)
err := c.cc.Invoke(ctx, Seaweed_VolumeGrow_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, Seaweed_VolumeGrow_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -356,13 +322,13 @@ func (c *seaweedClient) VolumeGrow(ctx context.Context, in *VolumeGrowRequest, o
// SeaweedServer is the server API for Seaweed service.
// All implementations must embed UnimplementedSeaweedServer
// for forward compatibility
// for forward compatibility.
type SeaweedServer interface {
SendHeartbeat(Seaweed_SendHeartbeatServer) error
KeepConnected(Seaweed_KeepConnectedServer) error
SendHeartbeat(grpc.BidiStreamingServer[Heartbeat, HeartbeatResponse]) error
KeepConnected(grpc.BidiStreamingServer[KeepConnectedRequest, KeepConnectedResponse]) error
LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error)
Assign(context.Context, *AssignRequest) (*AssignResponse, error)
StreamAssign(Seaweed_StreamAssignServer) error
StreamAssign(grpc.BidiStreamingServer[AssignRequest, AssignResponse]) error
Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error)
CollectionList(context.Context, *CollectionListRequest) (*CollectionListResponse, error)
CollectionDelete(context.Context, *CollectionDeleteRequest) (*CollectionDeleteResponse, error)
@ -384,14 +350,17 @@ type SeaweedServer interface {
mustEmbedUnimplementedSeaweedServer()
}
// UnimplementedSeaweedServer must be embedded to have forward compatible implementations.
type UnimplementedSeaweedServer struct {
}
// UnimplementedSeaweedServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedSeaweedServer struct{}
func (UnimplementedSeaweedServer) SendHeartbeat(Seaweed_SendHeartbeatServer) error {
func (UnimplementedSeaweedServer) SendHeartbeat(grpc.BidiStreamingServer[Heartbeat, HeartbeatResponse]) error {
return status.Errorf(codes.Unimplemented, "method SendHeartbeat not implemented")
}
func (UnimplementedSeaweedServer) KeepConnected(Seaweed_KeepConnectedServer) error {
func (UnimplementedSeaweedServer) KeepConnected(grpc.BidiStreamingServer[KeepConnectedRequest, KeepConnectedResponse]) error {
return status.Errorf(codes.Unimplemented, "method KeepConnected not implemented")
}
func (UnimplementedSeaweedServer) LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) {
@ -400,7 +369,7 @@ func (UnimplementedSeaweedServer) LookupVolume(context.Context, *LookupVolumeReq
func (UnimplementedSeaweedServer) Assign(context.Context, *AssignRequest) (*AssignResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Assign not implemented")
}
func (UnimplementedSeaweedServer) StreamAssign(Seaweed_StreamAssignServer) error {
func (UnimplementedSeaweedServer) StreamAssign(grpc.BidiStreamingServer[AssignRequest, AssignResponse]) error {
return status.Errorf(codes.Unimplemented, "method StreamAssign not implemented")
}
func (UnimplementedSeaweedServer) Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) {
@ -458,6 +427,7 @@ func (UnimplementedSeaweedServer) VolumeGrow(context.Context, *VolumeGrowRequest
return nil, status.Errorf(codes.Unimplemented, "method VolumeGrow not implemented")
}
func (UnimplementedSeaweedServer) mustEmbedUnimplementedSeaweedServer() {}
func (UnimplementedSeaweedServer) testEmbeddedByValue() {}
// UnsafeSeaweedServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to SeaweedServer will
@ -467,60 +437,29 @@ type UnsafeSeaweedServer interface {
}
func RegisterSeaweedServer(s grpc.ServiceRegistrar, srv SeaweedServer) {
// If the following call pancis, it indicates UnimplementedSeaweedServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&Seaweed_ServiceDesc, srv)
}
func _Seaweed_SendHeartbeat_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(SeaweedServer).SendHeartbeat(&seaweedSendHeartbeatServer{stream})
return srv.(SeaweedServer).SendHeartbeat(&grpc.GenericServerStream[Heartbeat, HeartbeatResponse]{ServerStream: stream})
}
type Seaweed_SendHeartbeatServer interface {
Send(*HeartbeatResponse) error
Recv() (*Heartbeat, error)
grpc.ServerStream
}
type seaweedSendHeartbeatServer struct {
grpc.ServerStream
}
func (x *seaweedSendHeartbeatServer) Send(m *HeartbeatResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *seaweedSendHeartbeatServer) Recv() (*Heartbeat, error) {
m := new(Heartbeat)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Seaweed_SendHeartbeatServer = grpc.BidiStreamingServer[Heartbeat, HeartbeatResponse]
func _Seaweed_KeepConnected_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(SeaweedServer).KeepConnected(&seaweedKeepConnectedServer{stream})
return srv.(SeaweedServer).KeepConnected(&grpc.GenericServerStream[KeepConnectedRequest, KeepConnectedResponse]{ServerStream: stream})
}
type Seaweed_KeepConnectedServer interface {
Send(*KeepConnectedResponse) error
Recv() (*KeepConnectedRequest, error)
grpc.ServerStream
}
type seaweedKeepConnectedServer struct {
grpc.ServerStream
}
func (x *seaweedKeepConnectedServer) Send(m *KeepConnectedResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *seaweedKeepConnectedServer) Recv() (*KeepConnectedRequest, error) {
m := new(KeepConnectedRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Seaweed_KeepConnectedServer = grpc.BidiStreamingServer[KeepConnectedRequest, KeepConnectedResponse]
func _Seaweed_LookupVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(LookupVolumeRequest)
@ -559,30 +498,11 @@ func _Seaweed_Assign_Handler(srv interface{}, ctx context.Context, dec func(inte
}
func _Seaweed_StreamAssign_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(SeaweedServer).StreamAssign(&seaweedStreamAssignServer{stream})
return srv.(SeaweedServer).StreamAssign(&grpc.GenericServerStream[AssignRequest, AssignResponse]{ServerStream: stream})
}
type Seaweed_StreamAssignServer interface {
Send(*AssignResponse) error
Recv() (*AssignRequest, error)
grpc.ServerStream
}
type seaweedStreamAssignServer struct {
grpc.ServerStream
}
func (x *seaweedStreamAssignServer) Send(m *AssignResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *seaweedStreamAssignServer) Recv() (*AssignRequest, error) {
m := new(AssignRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Seaweed_StreamAssignServer = grpc.BidiStreamingServer[AssignRequest, AssignResponse]
func _Seaweed_Statistics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StatisticsRequest)

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
// protoc v4.25.3
// protoc-gen-go v1.34.2
// protoc v5.28.1
// source: mount.proto
package mount_pb
@ -142,7 +142,7 @@ func file_mount_proto_rawDescGZIP() []byte {
}
var file_mount_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_mount_proto_goTypes = []interface{}{
var file_mount_proto_goTypes = []any{
(*ConfigureRequest)(nil), // 0: messaging_pb.ConfigureRequest
(*ConfigureResponse)(nil), // 1: messaging_pb.ConfigureResponse
}
@ -162,7 +162,7 @@ func file_mount_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
file_mount_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
file_mount_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*ConfigureRequest); i {
case 0:
return &v.state
@ -174,7 +174,7 @@ func file_mount_proto_init() {
return nil
}
}
file_mount_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
file_mount_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*ConfigureResponse); i {
case 0:
return &v.state

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.3
// - protoc-gen-go-grpc v1.5.1
// - protoc v5.28.1
// source: mount.proto
package mount_pb
@ -15,8 +15,8 @@ import (
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
SeaweedMount_Configure_FullMethodName = "/messaging_pb.SeaweedMount/Configure"
@ -38,8 +38,9 @@ func NewSeaweedMountClient(cc grpc.ClientConnInterface) SeaweedMountClient {
}
func (c *seaweedMountClient) Configure(ctx context.Context, in *ConfigureRequest, opts ...grpc.CallOption) (*ConfigureResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ConfigureResponse)
err := c.cc.Invoke(ctx, SeaweedMount_Configure_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedMount_Configure_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -48,20 +49,24 @@ func (c *seaweedMountClient) Configure(ctx context.Context, in *ConfigureRequest
// SeaweedMountServer is the server API for SeaweedMount service.
// All implementations must embed UnimplementedSeaweedMountServer
// for forward compatibility
// for forward compatibility.
type SeaweedMountServer interface {
Configure(context.Context, *ConfigureRequest) (*ConfigureResponse, error)
mustEmbedUnimplementedSeaweedMountServer()
}
// UnimplementedSeaweedMountServer must be embedded to have forward compatible implementations.
type UnimplementedSeaweedMountServer struct {
}
// UnimplementedSeaweedMountServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedSeaweedMountServer struct{}
func (UnimplementedSeaweedMountServer) Configure(context.Context, *ConfigureRequest) (*ConfigureResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented")
}
func (UnimplementedSeaweedMountServer) mustEmbedUnimplementedSeaweedMountServer() {}
func (UnimplementedSeaweedMountServer) testEmbeddedByValue() {}
// UnsafeSeaweedMountServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to SeaweedMountServer will
@ -71,6 +76,13 @@ type UnsafeSeaweedMountServer interface {
}
func RegisterSeaweedMountServer(s grpc.ServiceRegistrar, srv SeaweedMountServer) {
// If the following call pancis, it indicates UnimplementedSeaweedMountServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&SeaweedMount_ServiceDesc, srv)
}

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
// protoc v4.25.3
// protoc-gen-go v1.34.2
// protoc v5.28.1
// source: mq.proto
package mq_pb
@ -3770,7 +3770,7 @@ func file_mq_proto_rawDescGZIP() []byte {
var file_mq_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_mq_proto_msgTypes = make([]protoimpl.MessageInfo, 54)
var file_mq_proto_goTypes = []interface{}{
var file_mq_proto_goTypes = []any{
(PartitionOffsetStartType)(0), // 0: messaging_pb.PartitionOffsetStartType
(*FindBrokerLeaderRequest)(nil), // 1: messaging_pb.FindBrokerLeaderRequest
(*FindBrokerLeaderResponse)(nil), // 2: messaging_pb.FindBrokerLeaderResponse
@ -3925,7 +3925,7 @@ func file_mq_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
file_mq_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*FindBrokerLeaderRequest); i {
case 0:
return &v.state
@ -3937,7 +3937,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*FindBrokerLeaderResponse); i {
case 0:
return &v.state
@ -3949,7 +3949,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*Topic); i {
case 0:
return &v.state
@ -3961,7 +3961,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*Partition); i {
case 0:
return &v.state
@ -3973,7 +3973,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*Offset); i {
case 0:
return &v.state
@ -3985,7 +3985,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*PartitionOffset); i {
case 0:
return &v.state
@ -3997,7 +3997,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*BrokerStats); i {
case 0:
return &v.state
@ -4009,7 +4009,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*TopicPartitionStats); i {
case 0:
return &v.state
@ -4021,7 +4021,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[8].Exporter = func(v any, i int) any {
switch v := v.(*PublisherToPubBalancerRequest); i {
case 0:
return &v.state
@ -4033,7 +4033,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[9].Exporter = func(v any, i int) any {
switch v := v.(*PublisherToPubBalancerResponse); i {
case 0:
return &v.state
@ -4045,7 +4045,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[10].Exporter = func(v any, i int) any {
switch v := v.(*BalanceTopicsRequest); i {
case 0:
return &v.state
@ -4057,7 +4057,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[11].Exporter = func(v any, i int) any {
switch v := v.(*BalanceTopicsResponse); i {
case 0:
return &v.state
@ -4069,7 +4069,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[12].Exporter = func(v any, i int) any {
switch v := v.(*ConfigureTopicRequest); i {
case 0:
return &v.state
@ -4081,7 +4081,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[13].Exporter = func(v any, i int) any {
switch v := v.(*ConfigureTopicResponse); i {
case 0:
return &v.state
@ -4093,7 +4093,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[14].Exporter = func(v any, i int) any {
switch v := v.(*ListTopicsRequest); i {
case 0:
return &v.state
@ -4105,7 +4105,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[15].Exporter = func(v any, i int) any {
switch v := v.(*ListTopicsResponse); i {
case 0:
return &v.state
@ -4117,7 +4117,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[16].Exporter = func(v any, i int) any {
switch v := v.(*LookupTopicBrokersRequest); i {
case 0:
return &v.state
@ -4129,7 +4129,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[17].Exporter = func(v any, i int) any {
switch v := v.(*LookupTopicBrokersResponse); i {
case 0:
return &v.state
@ -4141,7 +4141,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[18].Exporter = func(v any, i int) any {
switch v := v.(*BrokerPartitionAssignment); i {
case 0:
return &v.state
@ -4153,7 +4153,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[19].Exporter = func(v any, i int) any {
switch v := v.(*AssignTopicPartitionsRequest); i {
case 0:
return &v.state
@ -4165,7 +4165,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[20].Exporter = func(v any, i int) any {
switch v := v.(*AssignTopicPartitionsResponse); i {
case 0:
return &v.state
@ -4177,7 +4177,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[21].Exporter = func(v any, i int) any {
switch v := v.(*SubscriberToSubCoordinatorRequest); i {
case 0:
return &v.state
@ -4189,7 +4189,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[22].Exporter = func(v any, i int) any {
switch v := v.(*SubscriberToSubCoordinatorResponse); i {
case 0:
return &v.state
@ -4201,7 +4201,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[23].Exporter = func(v any, i int) any {
switch v := v.(*ControlMessage); i {
case 0:
return &v.state
@ -4213,7 +4213,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[24].Exporter = func(v any, i int) any {
switch v := v.(*DataMessage); i {
case 0:
return &v.state
@ -4225,7 +4225,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[25].Exporter = func(v any, i int) any {
switch v := v.(*PublishMessageRequest); i {
case 0:
return &v.state
@ -4237,7 +4237,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[26].Exporter = func(v any, i int) any {
switch v := v.(*PublishMessageResponse); i {
case 0:
return &v.state
@ -4249,7 +4249,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[27].Exporter = func(v any, i int) any {
switch v := v.(*PublishFollowMeRequest); i {
case 0:
return &v.state
@ -4261,7 +4261,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[28].Exporter = func(v any, i int) any {
switch v := v.(*PublishFollowMeResponse); i {
case 0:
return &v.state
@ -4273,7 +4273,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[29].Exporter = func(v any, i int) any {
switch v := v.(*SubscribeMessageRequest); i {
case 0:
return &v.state
@ -4285,7 +4285,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[30].Exporter = func(v any, i int) any {
switch v := v.(*SubscribeMessageResponse); i {
case 0:
return &v.state
@ -4297,7 +4297,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[31].Exporter = func(v any, i int) any {
switch v := v.(*SubscribeFollowMeRequest); i {
case 0:
return &v.state
@ -4309,7 +4309,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[32].Exporter = func(v any, i int) any {
switch v := v.(*SubscribeFollowMeResponse); i {
case 0:
return &v.state
@ -4321,7 +4321,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[33].Exporter = func(v any, i int) any {
switch v := v.(*ClosePublishersRequest); i {
case 0:
return &v.state
@ -4333,7 +4333,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[34].Exporter = func(v any, i int) any {
switch v := v.(*ClosePublishersResponse); i {
case 0:
return &v.state
@ -4345,7 +4345,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[35].Exporter = func(v any, i int) any {
switch v := v.(*CloseSubscribersRequest); i {
case 0:
return &v.state
@ -4357,7 +4357,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[36].Exporter = func(v any, i int) any {
switch v := v.(*CloseSubscribersResponse); i {
case 0:
return &v.state
@ -4369,7 +4369,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[38].Exporter = func(v any, i int) any {
switch v := v.(*PublisherToPubBalancerRequest_InitMessage); i {
case 0:
return &v.state
@ -4381,7 +4381,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[39].Exporter = func(v any, i int) any {
switch v := v.(*SubscriberToSubCoordinatorRequest_InitMessage); i {
case 0:
return &v.state
@ -4393,7 +4393,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[40].Exporter = func(v any, i int) any {
switch v := v.(*SubscriberToSubCoordinatorRequest_AckUnAssignmentMessage); i {
case 0:
return &v.state
@ -4405,7 +4405,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[41].Exporter = func(v any, i int) any {
switch v := v.(*SubscriberToSubCoordinatorRequest_AckAssignmentMessage); i {
case 0:
return &v.state
@ -4417,7 +4417,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[42].Exporter = func(v any, i int) any {
switch v := v.(*SubscriberToSubCoordinatorResponse_Assignment); i {
case 0:
return &v.state
@ -4429,7 +4429,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[43].Exporter = func(v any, i int) any {
switch v := v.(*SubscriberToSubCoordinatorResponse_UnAssignment); i {
case 0:
return &v.state
@ -4441,7 +4441,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[44].Exporter = func(v any, i int) any {
switch v := v.(*PublishMessageRequest_InitMessage); i {
case 0:
return &v.state
@ -4453,7 +4453,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[45].Exporter = func(v any, i int) any {
switch v := v.(*PublishFollowMeRequest_InitMessage); i {
case 0:
return &v.state
@ -4465,7 +4465,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[46].Exporter = func(v any, i int) any {
switch v := v.(*PublishFollowMeRequest_FlushMessage); i {
case 0:
return &v.state
@ -4477,7 +4477,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[47].Exporter = func(v any, i int) any {
switch v := v.(*PublishFollowMeRequest_CloseMessage); i {
case 0:
return &v.state
@ -4489,7 +4489,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[48].Exporter = func(v any, i int) any {
switch v := v.(*SubscribeMessageRequest_InitMessage); i {
case 0:
return &v.state
@ -4501,7 +4501,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[49].Exporter = func(v any, i int) any {
switch v := v.(*SubscribeMessageRequest_AckMessage); i {
case 0:
return &v.state
@ -4513,7 +4513,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[50].Exporter = func(v any, i int) any {
switch v := v.(*SubscribeMessageResponse_SubscribeCtrlMessage); i {
case 0:
return &v.state
@ -4525,7 +4525,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[51].Exporter = func(v any, i int) any {
switch v := v.(*SubscribeFollowMeRequest_InitMessage); i {
case 0:
return &v.state
@ -4537,7 +4537,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[52].Exporter = func(v any, i int) any {
switch v := v.(*SubscribeFollowMeRequest_AckMessage); i {
case 0:
return &v.state
@ -4549,7 +4549,7 @@ func file_mq_proto_init() {
return nil
}
}
file_mq_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} {
file_mq_proto_msgTypes[53].Exporter = func(v any, i int) any {
switch v := v.(*SubscribeFollowMeRequest_CloseMessage); i {
case 0:
return &v.state
@ -4562,38 +4562,38 @@ func file_mq_proto_init() {
}
}
}
file_mq_proto_msgTypes[8].OneofWrappers = []interface{}{
file_mq_proto_msgTypes[8].OneofWrappers = []any{
(*PublisherToPubBalancerRequest_Init)(nil),
(*PublisherToPubBalancerRequest_Stats)(nil),
}
file_mq_proto_msgTypes[21].OneofWrappers = []interface{}{
file_mq_proto_msgTypes[21].OneofWrappers = []any{
(*SubscriberToSubCoordinatorRequest_Init)(nil),
(*SubscriberToSubCoordinatorRequest_AckAssignment)(nil),
(*SubscriberToSubCoordinatorRequest_AckUnAssignment)(nil),
}
file_mq_proto_msgTypes[22].OneofWrappers = []interface{}{
file_mq_proto_msgTypes[22].OneofWrappers = []any{
(*SubscriberToSubCoordinatorResponse_Assignment_)(nil),
(*SubscriberToSubCoordinatorResponse_UnAssignment_)(nil),
}
file_mq_proto_msgTypes[25].OneofWrappers = []interface{}{
file_mq_proto_msgTypes[25].OneofWrappers = []any{
(*PublishMessageRequest_Init)(nil),
(*PublishMessageRequest_Data)(nil),
}
file_mq_proto_msgTypes[27].OneofWrappers = []interface{}{
file_mq_proto_msgTypes[27].OneofWrappers = []any{
(*PublishFollowMeRequest_Init)(nil),
(*PublishFollowMeRequest_Data)(nil),
(*PublishFollowMeRequest_Flush)(nil),
(*PublishFollowMeRequest_Close)(nil),
}
file_mq_proto_msgTypes[29].OneofWrappers = []interface{}{
file_mq_proto_msgTypes[29].OneofWrappers = []any{
(*SubscribeMessageRequest_Init)(nil),
(*SubscribeMessageRequest_Ack)(nil),
}
file_mq_proto_msgTypes[30].OneofWrappers = []interface{}{
file_mq_proto_msgTypes[30].OneofWrappers = []any{
(*SubscribeMessageResponse_Ctrl)(nil),
(*SubscribeMessageResponse_Data)(nil),
}
file_mq_proto_msgTypes[31].OneofWrappers = []interface{}{
file_mq_proto_msgTypes[31].OneofWrappers = []any{
(*SubscribeFollowMeRequest_Init)(nil),
(*SubscribeFollowMeRequest_Ack)(nil),
(*SubscribeFollowMeRequest_Close)(nil),

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.3
// - protoc-gen-go-grpc v1.5.1
// - protoc v5.28.1
// source: mq.proto
package mq_pb
@ -15,8 +15,8 @@ import (
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
SeaweedMessaging_FindBrokerLeader_FullMethodName = "/messaging_pb.SeaweedMessaging/FindBrokerLeader"
@ -42,7 +42,7 @@ type SeaweedMessagingClient interface {
// control plane
FindBrokerLeader(ctx context.Context, in *FindBrokerLeaderRequest, opts ...grpc.CallOption) (*FindBrokerLeaderResponse, error)
// control plane for balancer
PublisherToPubBalancer(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublisherToPubBalancerClient, error)
PublisherToPubBalancer(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[PublisherToPubBalancerRequest, PublisherToPubBalancerResponse], error)
BalanceTopics(ctx context.Context, in *BalanceTopicsRequest, opts ...grpc.CallOption) (*BalanceTopicsResponse, error)
// control plane for topic partitions
ListTopics(ctx context.Context, in *ListTopicsRequest, opts ...grpc.CallOption) (*ListTopicsResponse, error)
@ -53,13 +53,13 @@ type SeaweedMessagingClient interface {
ClosePublishers(ctx context.Context, in *ClosePublishersRequest, opts ...grpc.CallOption) (*ClosePublishersResponse, error)
CloseSubscribers(ctx context.Context, in *CloseSubscribersRequest, opts ...grpc.CallOption) (*CloseSubscribersResponse, error)
// subscriber connects to broker balancer, which coordinates with the subscribers
SubscriberToSubCoordinator(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscriberToSubCoordinatorClient, error)
SubscriberToSubCoordinator(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[SubscriberToSubCoordinatorRequest, SubscriberToSubCoordinatorResponse], error)
// data plane for each topic partition
PublishMessage(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishMessageClient, error)
SubscribeMessage(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeMessageClient, error)
PublishMessage(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[PublishMessageRequest, PublishMessageResponse], error)
SubscribeMessage(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[SubscribeMessageRequest, SubscribeMessageResponse], error)
// The lead broker asks a follower broker to follow itself
PublishFollowMe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishFollowMeClient, error)
SubscribeFollowMe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeFollowMeClient, error)
PublishFollowMe(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[PublishFollowMeRequest, PublishFollowMeResponse], error)
SubscribeFollowMe(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[SubscribeFollowMeRequest, SubscribeFollowMeResponse], error)
}
type seaweedMessagingClient struct {
@ -71,48 +71,32 @@ func NewSeaweedMessagingClient(cc grpc.ClientConnInterface) SeaweedMessagingClie
}
func (c *seaweedMessagingClient) FindBrokerLeader(ctx context.Context, in *FindBrokerLeaderRequest, opts ...grpc.CallOption) (*FindBrokerLeaderResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(FindBrokerLeaderResponse)
err := c.cc.Invoke(ctx, SeaweedMessaging_FindBrokerLeader_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedMessaging_FindBrokerLeader_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *seaweedMessagingClient) PublisherToPubBalancer(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublisherToPubBalancerClient, error) {
stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[0], SeaweedMessaging_PublisherToPubBalancer_FullMethodName, opts...)
func (c *seaweedMessagingClient) PublisherToPubBalancer(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[PublisherToPubBalancerRequest, PublisherToPubBalancerResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[0], SeaweedMessaging_PublisherToPubBalancer_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &seaweedMessagingPublisherToPubBalancerClient{stream}
x := &grpc.GenericClientStream[PublisherToPubBalancerRequest, PublisherToPubBalancerResponse]{ClientStream: stream}
return x, nil
}
type SeaweedMessaging_PublisherToPubBalancerClient interface {
Send(*PublisherToPubBalancerRequest) error
Recv() (*PublisherToPubBalancerResponse, error)
grpc.ClientStream
}
type seaweedMessagingPublisherToPubBalancerClient struct {
grpc.ClientStream
}
func (x *seaweedMessagingPublisherToPubBalancerClient) Send(m *PublisherToPubBalancerRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *seaweedMessagingPublisherToPubBalancerClient) Recv() (*PublisherToPubBalancerResponse, error) {
m := new(PublisherToPubBalancerResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type SeaweedMessaging_PublisherToPubBalancerClient = grpc.BidiStreamingClient[PublisherToPubBalancerRequest, PublisherToPubBalancerResponse]
func (c *seaweedMessagingClient) BalanceTopics(ctx context.Context, in *BalanceTopicsRequest, opts ...grpc.CallOption) (*BalanceTopicsResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(BalanceTopicsResponse)
err := c.cc.Invoke(ctx, SeaweedMessaging_BalanceTopics_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedMessaging_BalanceTopics_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -120,8 +104,9 @@ func (c *seaweedMessagingClient) BalanceTopics(ctx context.Context, in *BalanceT
}
func (c *seaweedMessagingClient) ListTopics(ctx context.Context, in *ListTopicsRequest, opts ...grpc.CallOption) (*ListTopicsResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ListTopicsResponse)
err := c.cc.Invoke(ctx, SeaweedMessaging_ListTopics_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedMessaging_ListTopics_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -129,8 +114,9 @@ func (c *seaweedMessagingClient) ListTopics(ctx context.Context, in *ListTopicsR
}
func (c *seaweedMessagingClient) ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ConfigureTopicResponse)
err := c.cc.Invoke(ctx, SeaweedMessaging_ConfigureTopic_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedMessaging_ConfigureTopic_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -138,8 +124,9 @@ func (c *seaweedMessagingClient) ConfigureTopic(ctx context.Context, in *Configu
}
func (c *seaweedMessagingClient) LookupTopicBrokers(ctx context.Context, in *LookupTopicBrokersRequest, opts ...grpc.CallOption) (*LookupTopicBrokersResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(LookupTopicBrokersResponse)
err := c.cc.Invoke(ctx, SeaweedMessaging_LookupTopicBrokers_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedMessaging_LookupTopicBrokers_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -147,8 +134,9 @@ func (c *seaweedMessagingClient) LookupTopicBrokers(ctx context.Context, in *Loo
}
func (c *seaweedMessagingClient) AssignTopicPartitions(ctx context.Context, in *AssignTopicPartitionsRequest, opts ...grpc.CallOption) (*AssignTopicPartitionsResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(AssignTopicPartitionsResponse)
err := c.cc.Invoke(ctx, SeaweedMessaging_AssignTopicPartitions_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedMessaging_AssignTopicPartitions_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -156,8 +144,9 @@ func (c *seaweedMessagingClient) AssignTopicPartitions(ctx context.Context, in *
}
func (c *seaweedMessagingClient) ClosePublishers(ctx context.Context, in *ClosePublishersRequest, opts ...grpc.CallOption) (*ClosePublishersResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ClosePublishersResponse)
err := c.cc.Invoke(ctx, SeaweedMessaging_ClosePublishers_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedMessaging_ClosePublishers_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -165,180 +154,88 @@ func (c *seaweedMessagingClient) ClosePublishers(ctx context.Context, in *CloseP
}
func (c *seaweedMessagingClient) CloseSubscribers(ctx context.Context, in *CloseSubscribersRequest, opts ...grpc.CallOption) (*CloseSubscribersResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CloseSubscribersResponse)
err := c.cc.Invoke(ctx, SeaweedMessaging_CloseSubscribers_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedMessaging_CloseSubscribers_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *seaweedMessagingClient) SubscriberToSubCoordinator(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscriberToSubCoordinatorClient, error) {
stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[1], SeaweedMessaging_SubscriberToSubCoordinator_FullMethodName, opts...)
func (c *seaweedMessagingClient) SubscriberToSubCoordinator(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[SubscriberToSubCoordinatorRequest, SubscriberToSubCoordinatorResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[1], SeaweedMessaging_SubscriberToSubCoordinator_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &seaweedMessagingSubscriberToSubCoordinatorClient{stream}
x := &grpc.GenericClientStream[SubscriberToSubCoordinatorRequest, SubscriberToSubCoordinatorResponse]{ClientStream: stream}
return x, nil
}
type SeaweedMessaging_SubscriberToSubCoordinatorClient interface {
Send(*SubscriberToSubCoordinatorRequest) error
Recv() (*SubscriberToSubCoordinatorResponse, error)
grpc.ClientStream
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type SeaweedMessaging_SubscriberToSubCoordinatorClient = grpc.BidiStreamingClient[SubscriberToSubCoordinatorRequest, SubscriberToSubCoordinatorResponse]
type seaweedMessagingSubscriberToSubCoordinatorClient struct {
grpc.ClientStream
}
func (x *seaweedMessagingSubscriberToSubCoordinatorClient) Send(m *SubscriberToSubCoordinatorRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *seaweedMessagingSubscriberToSubCoordinatorClient) Recv() (*SubscriberToSubCoordinatorResponse, error) {
m := new(SubscriberToSubCoordinatorResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *seaweedMessagingClient) PublishMessage(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishMessageClient, error) {
stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[2], SeaweedMessaging_PublishMessage_FullMethodName, opts...)
func (c *seaweedMessagingClient) PublishMessage(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[PublishMessageRequest, PublishMessageResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[2], SeaweedMessaging_PublishMessage_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &seaweedMessagingPublishMessageClient{stream}
x := &grpc.GenericClientStream[PublishMessageRequest, PublishMessageResponse]{ClientStream: stream}
return x, nil
}
type SeaweedMessaging_PublishMessageClient interface {
Send(*PublishMessageRequest) error
Recv() (*PublishMessageResponse, error)
grpc.ClientStream
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type SeaweedMessaging_PublishMessageClient = grpc.BidiStreamingClient[PublishMessageRequest, PublishMessageResponse]
type seaweedMessagingPublishMessageClient struct {
grpc.ClientStream
}
func (x *seaweedMessagingPublishMessageClient) Send(m *PublishMessageRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *seaweedMessagingPublishMessageClient) Recv() (*PublishMessageResponse, error) {
m := new(PublishMessageResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *seaweedMessagingClient) SubscribeMessage(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeMessageClient, error) {
stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[3], SeaweedMessaging_SubscribeMessage_FullMethodName, opts...)
func (c *seaweedMessagingClient) SubscribeMessage(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[SubscribeMessageRequest, SubscribeMessageResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[3], SeaweedMessaging_SubscribeMessage_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &seaweedMessagingSubscribeMessageClient{stream}
x := &grpc.GenericClientStream[SubscribeMessageRequest, SubscribeMessageResponse]{ClientStream: stream}
return x, nil
}
type SeaweedMessaging_SubscribeMessageClient interface {
Send(*SubscribeMessageRequest) error
Recv() (*SubscribeMessageResponse, error)
grpc.ClientStream
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type SeaweedMessaging_SubscribeMessageClient = grpc.BidiStreamingClient[SubscribeMessageRequest, SubscribeMessageResponse]
type seaweedMessagingSubscribeMessageClient struct {
grpc.ClientStream
}
func (x *seaweedMessagingSubscribeMessageClient) Send(m *SubscribeMessageRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *seaweedMessagingSubscribeMessageClient) Recv() (*SubscribeMessageResponse, error) {
m := new(SubscribeMessageResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *seaweedMessagingClient) PublishFollowMe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishFollowMeClient, error) {
stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[4], SeaweedMessaging_PublishFollowMe_FullMethodName, opts...)
func (c *seaweedMessagingClient) PublishFollowMe(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[PublishFollowMeRequest, PublishFollowMeResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[4], SeaweedMessaging_PublishFollowMe_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &seaweedMessagingPublishFollowMeClient{stream}
x := &grpc.GenericClientStream[PublishFollowMeRequest, PublishFollowMeResponse]{ClientStream: stream}
return x, nil
}
type SeaweedMessaging_PublishFollowMeClient interface {
Send(*PublishFollowMeRequest) error
Recv() (*PublishFollowMeResponse, error)
grpc.ClientStream
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type SeaweedMessaging_PublishFollowMeClient = grpc.BidiStreamingClient[PublishFollowMeRequest, PublishFollowMeResponse]
type seaweedMessagingPublishFollowMeClient struct {
grpc.ClientStream
}
func (x *seaweedMessagingPublishFollowMeClient) Send(m *PublishFollowMeRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *seaweedMessagingPublishFollowMeClient) Recv() (*PublishFollowMeResponse, error) {
m := new(PublishFollowMeResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *seaweedMessagingClient) SubscribeFollowMe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeFollowMeClient, error) {
stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[5], SeaweedMessaging_SubscribeFollowMe_FullMethodName, opts...)
func (c *seaweedMessagingClient) SubscribeFollowMe(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[SubscribeFollowMeRequest, SubscribeFollowMeResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[5], SeaweedMessaging_SubscribeFollowMe_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &seaweedMessagingSubscribeFollowMeClient{stream}
x := &grpc.GenericClientStream[SubscribeFollowMeRequest, SubscribeFollowMeResponse]{ClientStream: stream}
return x, nil
}
type SeaweedMessaging_SubscribeFollowMeClient interface {
Send(*SubscribeFollowMeRequest) error
CloseAndRecv() (*SubscribeFollowMeResponse, error)
grpc.ClientStream
}
type seaweedMessagingSubscribeFollowMeClient struct {
grpc.ClientStream
}
func (x *seaweedMessagingSubscribeFollowMeClient) Send(m *SubscribeFollowMeRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *seaweedMessagingSubscribeFollowMeClient) CloseAndRecv() (*SubscribeFollowMeResponse, error) {
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
m := new(SubscribeFollowMeResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type SeaweedMessaging_SubscribeFollowMeClient = grpc.ClientStreamingClient[SubscribeFollowMeRequest, SubscribeFollowMeResponse]
// SeaweedMessagingServer is the server API for SeaweedMessaging service.
// All implementations must embed UnimplementedSeaweedMessagingServer
// for forward compatibility
// for forward compatibility.
type SeaweedMessagingServer interface {
// control plane
FindBrokerLeader(context.Context, *FindBrokerLeaderRequest) (*FindBrokerLeaderResponse, error)
// control plane for balancer
PublisherToPubBalancer(SeaweedMessaging_PublisherToPubBalancerServer) error
PublisherToPubBalancer(grpc.BidiStreamingServer[PublisherToPubBalancerRequest, PublisherToPubBalancerResponse]) error
BalanceTopics(context.Context, *BalanceTopicsRequest) (*BalanceTopicsResponse, error)
// control plane for topic partitions
ListTopics(context.Context, *ListTopicsRequest) (*ListTopicsResponse, error)
@ -349,24 +246,27 @@ type SeaweedMessagingServer interface {
ClosePublishers(context.Context, *ClosePublishersRequest) (*ClosePublishersResponse, error)
CloseSubscribers(context.Context, *CloseSubscribersRequest) (*CloseSubscribersResponse, error)
// subscriber connects to broker balancer, which coordinates with the subscribers
SubscriberToSubCoordinator(SeaweedMessaging_SubscriberToSubCoordinatorServer) error
SubscriberToSubCoordinator(grpc.BidiStreamingServer[SubscriberToSubCoordinatorRequest, SubscriberToSubCoordinatorResponse]) error
// data plane for each topic partition
PublishMessage(SeaweedMessaging_PublishMessageServer) error
SubscribeMessage(SeaweedMessaging_SubscribeMessageServer) error
PublishMessage(grpc.BidiStreamingServer[PublishMessageRequest, PublishMessageResponse]) error
SubscribeMessage(grpc.BidiStreamingServer[SubscribeMessageRequest, SubscribeMessageResponse]) error
// The lead broker asks a follower broker to follow itself
PublishFollowMe(SeaweedMessaging_PublishFollowMeServer) error
SubscribeFollowMe(SeaweedMessaging_SubscribeFollowMeServer) error
PublishFollowMe(grpc.BidiStreamingServer[PublishFollowMeRequest, PublishFollowMeResponse]) error
SubscribeFollowMe(grpc.ClientStreamingServer[SubscribeFollowMeRequest, SubscribeFollowMeResponse]) error
mustEmbedUnimplementedSeaweedMessagingServer()
}
// UnimplementedSeaweedMessagingServer must be embedded to have forward compatible implementations.
type UnimplementedSeaweedMessagingServer struct {
}
// UnimplementedSeaweedMessagingServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedSeaweedMessagingServer struct{}
func (UnimplementedSeaweedMessagingServer) FindBrokerLeader(context.Context, *FindBrokerLeaderRequest) (*FindBrokerLeaderResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method FindBrokerLeader not implemented")
}
func (UnimplementedSeaweedMessagingServer) PublisherToPubBalancer(SeaweedMessaging_PublisherToPubBalancerServer) error {
func (UnimplementedSeaweedMessagingServer) PublisherToPubBalancer(grpc.BidiStreamingServer[PublisherToPubBalancerRequest, PublisherToPubBalancerResponse]) error {
return status.Errorf(codes.Unimplemented, "method PublisherToPubBalancer not implemented")
}
func (UnimplementedSeaweedMessagingServer) BalanceTopics(context.Context, *BalanceTopicsRequest) (*BalanceTopicsResponse, error) {
@ -390,22 +290,23 @@ func (UnimplementedSeaweedMessagingServer) ClosePublishers(context.Context, *Clo
func (UnimplementedSeaweedMessagingServer) CloseSubscribers(context.Context, *CloseSubscribersRequest) (*CloseSubscribersResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method CloseSubscribers not implemented")
}
func (UnimplementedSeaweedMessagingServer) SubscriberToSubCoordinator(SeaweedMessaging_SubscriberToSubCoordinatorServer) error {
func (UnimplementedSeaweedMessagingServer) SubscriberToSubCoordinator(grpc.BidiStreamingServer[SubscriberToSubCoordinatorRequest, SubscriberToSubCoordinatorResponse]) error {
return status.Errorf(codes.Unimplemented, "method SubscriberToSubCoordinator not implemented")
}
func (UnimplementedSeaweedMessagingServer) PublishMessage(SeaweedMessaging_PublishMessageServer) error {
func (UnimplementedSeaweedMessagingServer) PublishMessage(grpc.BidiStreamingServer[PublishMessageRequest, PublishMessageResponse]) error {
return status.Errorf(codes.Unimplemented, "method PublishMessage not implemented")
}
func (UnimplementedSeaweedMessagingServer) SubscribeMessage(SeaweedMessaging_SubscribeMessageServer) error {
func (UnimplementedSeaweedMessagingServer) SubscribeMessage(grpc.BidiStreamingServer[SubscribeMessageRequest, SubscribeMessageResponse]) error {
return status.Errorf(codes.Unimplemented, "method SubscribeMessage not implemented")
}
func (UnimplementedSeaweedMessagingServer) PublishFollowMe(SeaweedMessaging_PublishFollowMeServer) error {
func (UnimplementedSeaweedMessagingServer) PublishFollowMe(grpc.BidiStreamingServer[PublishFollowMeRequest, PublishFollowMeResponse]) error {
return status.Errorf(codes.Unimplemented, "method PublishFollowMe not implemented")
}
func (UnimplementedSeaweedMessagingServer) SubscribeFollowMe(SeaweedMessaging_SubscribeFollowMeServer) error {
func (UnimplementedSeaweedMessagingServer) SubscribeFollowMe(grpc.ClientStreamingServer[SubscribeFollowMeRequest, SubscribeFollowMeResponse]) error {
return status.Errorf(codes.Unimplemented, "method SubscribeFollowMe not implemented")
}
func (UnimplementedSeaweedMessagingServer) mustEmbedUnimplementedSeaweedMessagingServer() {}
func (UnimplementedSeaweedMessagingServer) testEmbeddedByValue() {}
// UnsafeSeaweedMessagingServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to SeaweedMessagingServer will
@ -415,6 +316,13 @@ type UnsafeSeaweedMessagingServer interface {
}
func RegisterSeaweedMessagingServer(s grpc.ServiceRegistrar, srv SeaweedMessagingServer) {
// If the following call pancis, it indicates UnimplementedSeaweedMessagingServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&SeaweedMessaging_ServiceDesc, srv)
}
@ -437,30 +345,11 @@ func _SeaweedMessaging_FindBrokerLeader_Handler(srv interface{}, ctx context.Con
}
func _SeaweedMessaging_PublisherToPubBalancer_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(SeaweedMessagingServer).PublisherToPubBalancer(&seaweedMessagingPublisherToPubBalancerServer{stream})
return srv.(SeaweedMessagingServer).PublisherToPubBalancer(&grpc.GenericServerStream[PublisherToPubBalancerRequest, PublisherToPubBalancerResponse]{ServerStream: stream})
}
type SeaweedMessaging_PublisherToPubBalancerServer interface {
Send(*PublisherToPubBalancerResponse) error
Recv() (*PublisherToPubBalancerRequest, error)
grpc.ServerStream
}
type seaweedMessagingPublisherToPubBalancerServer struct {
grpc.ServerStream
}
func (x *seaweedMessagingPublisherToPubBalancerServer) Send(m *PublisherToPubBalancerResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *seaweedMessagingPublisherToPubBalancerServer) Recv() (*PublisherToPubBalancerRequest, error) {
m := new(PublisherToPubBalancerRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type SeaweedMessaging_PublisherToPubBalancerServer = grpc.BidiStreamingServer[PublisherToPubBalancerRequest, PublisherToPubBalancerResponse]
func _SeaweedMessaging_BalanceTopics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(BalanceTopicsRequest)
@ -589,134 +478,39 @@ func _SeaweedMessaging_CloseSubscribers_Handler(srv interface{}, ctx context.Con
}
func _SeaweedMessaging_SubscriberToSubCoordinator_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(SeaweedMessagingServer).SubscriberToSubCoordinator(&seaweedMessagingSubscriberToSubCoordinatorServer{stream})
return srv.(SeaweedMessagingServer).SubscriberToSubCoordinator(&grpc.GenericServerStream[SubscriberToSubCoordinatorRequest, SubscriberToSubCoordinatorResponse]{ServerStream: stream})
}
type SeaweedMessaging_SubscriberToSubCoordinatorServer interface {
Send(*SubscriberToSubCoordinatorResponse) error
Recv() (*SubscriberToSubCoordinatorRequest, error)
grpc.ServerStream
}
type seaweedMessagingSubscriberToSubCoordinatorServer struct {
grpc.ServerStream
}
func (x *seaweedMessagingSubscriberToSubCoordinatorServer) Send(m *SubscriberToSubCoordinatorResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *seaweedMessagingSubscriberToSubCoordinatorServer) Recv() (*SubscriberToSubCoordinatorRequest, error) {
m := new(SubscriberToSubCoordinatorRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type SeaweedMessaging_SubscriberToSubCoordinatorServer = grpc.BidiStreamingServer[SubscriberToSubCoordinatorRequest, SubscriberToSubCoordinatorResponse]
func _SeaweedMessaging_PublishMessage_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(SeaweedMessagingServer).PublishMessage(&seaweedMessagingPublishMessageServer{stream})
return srv.(SeaweedMessagingServer).PublishMessage(&grpc.GenericServerStream[PublishMessageRequest, PublishMessageResponse]{ServerStream: stream})
}
type SeaweedMessaging_PublishMessageServer interface {
Send(*PublishMessageResponse) error
Recv() (*PublishMessageRequest, error)
grpc.ServerStream
}
type seaweedMessagingPublishMessageServer struct {
grpc.ServerStream
}
func (x *seaweedMessagingPublishMessageServer) Send(m *PublishMessageResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *seaweedMessagingPublishMessageServer) Recv() (*PublishMessageRequest, error) {
m := new(PublishMessageRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type SeaweedMessaging_PublishMessageServer = grpc.BidiStreamingServer[PublishMessageRequest, PublishMessageResponse]
func _SeaweedMessaging_SubscribeMessage_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(SeaweedMessagingServer).SubscribeMessage(&seaweedMessagingSubscribeMessageServer{stream})
return srv.(SeaweedMessagingServer).SubscribeMessage(&grpc.GenericServerStream[SubscribeMessageRequest, SubscribeMessageResponse]{ServerStream: stream})
}
type SeaweedMessaging_SubscribeMessageServer interface {
Send(*SubscribeMessageResponse) error
Recv() (*SubscribeMessageRequest, error)
grpc.ServerStream
}
type seaweedMessagingSubscribeMessageServer struct {
grpc.ServerStream
}
func (x *seaweedMessagingSubscribeMessageServer) Send(m *SubscribeMessageResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *seaweedMessagingSubscribeMessageServer) Recv() (*SubscribeMessageRequest, error) {
m := new(SubscribeMessageRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type SeaweedMessaging_SubscribeMessageServer = grpc.BidiStreamingServer[SubscribeMessageRequest, SubscribeMessageResponse]
func _SeaweedMessaging_PublishFollowMe_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(SeaweedMessagingServer).PublishFollowMe(&seaweedMessagingPublishFollowMeServer{stream})
return srv.(SeaweedMessagingServer).PublishFollowMe(&grpc.GenericServerStream[PublishFollowMeRequest, PublishFollowMeResponse]{ServerStream: stream})
}
type SeaweedMessaging_PublishFollowMeServer interface {
Send(*PublishFollowMeResponse) error
Recv() (*PublishFollowMeRequest, error)
grpc.ServerStream
}
type seaweedMessagingPublishFollowMeServer struct {
grpc.ServerStream
}
func (x *seaweedMessagingPublishFollowMeServer) Send(m *PublishFollowMeResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *seaweedMessagingPublishFollowMeServer) Recv() (*PublishFollowMeRequest, error) {
m := new(PublishFollowMeRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type SeaweedMessaging_PublishFollowMeServer = grpc.BidiStreamingServer[PublishFollowMeRequest, PublishFollowMeResponse]
func _SeaweedMessaging_SubscribeFollowMe_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(SeaweedMessagingServer).SubscribeFollowMe(&seaweedMessagingSubscribeFollowMeServer{stream})
return srv.(SeaweedMessagingServer).SubscribeFollowMe(&grpc.GenericServerStream[SubscribeFollowMeRequest, SubscribeFollowMeResponse]{ServerStream: stream})
}
type SeaweedMessaging_SubscribeFollowMeServer interface {
SendAndClose(*SubscribeFollowMeResponse) error
Recv() (*SubscribeFollowMeRequest, error)
grpc.ServerStream
}
type seaweedMessagingSubscribeFollowMeServer struct {
grpc.ServerStream
}
func (x *seaweedMessagingSubscribeFollowMeServer) SendAndClose(m *SubscribeFollowMeResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *seaweedMessagingSubscribeFollowMeServer) Recv() (*SubscribeFollowMeRequest, error) {
m := new(SubscribeFollowMeRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type SeaweedMessaging_SubscribeFollowMeServer = grpc.ClientStreamingServer[SubscribeFollowMeRequest, SubscribeFollowMeResponse]
// SeaweedMessaging_ServiceDesc is the grpc.ServiceDesc for SeaweedMessaging service.
// It's only intended for direct use with grpc.RegisterService,

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
// protoc v4.25.3
// protoc-gen-go v1.34.2
// protoc v5.28.1
// source: remote.proto
package remote_pb
@ -685,7 +685,7 @@ func file_remote_proto_rawDescGZIP() []byte {
}
var file_remote_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_remote_proto_goTypes = []interface{}{
var file_remote_proto_goTypes = []any{
(*RemoteConf)(nil), // 0: remote_pb.RemoteConf
(*RemoteStorageMapping)(nil), // 1: remote_pb.RemoteStorageMapping
(*RemoteStorageLocation)(nil), // 2: remote_pb.RemoteStorageLocation
@ -707,7 +707,7 @@ func file_remote_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
file_remote_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
file_remote_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*RemoteConf); i {
case 0:
return &v.state
@ -719,7 +719,7 @@ func file_remote_proto_init() {
return nil
}
}
file_remote_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
file_remote_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*RemoteStorageMapping); i {
case 0:
return &v.state
@ -731,7 +731,7 @@ func file_remote_proto_init() {
return nil
}
}
file_remote_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
file_remote_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*RemoteStorageLocation); i {
case 0:
return &v.state

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
// protoc v4.25.3
// protoc-gen-go v1.34.2
// protoc v5.28.1
// source: s3.proto
package s3_pb
@ -282,7 +282,7 @@ func file_s3_proto_rawDescGZIP() []byte {
}
var file_s3_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
var file_s3_proto_goTypes = []interface{}{
var file_s3_proto_goTypes = []any{
(*S3ConfigureRequest)(nil), // 0: messaging_pb.S3ConfigureRequest
(*S3ConfigureResponse)(nil), // 1: messaging_pb.S3ConfigureResponse
(*S3CircuitBreakerConfig)(nil), // 2: messaging_pb.S3CircuitBreakerConfig
@ -310,7 +310,7 @@ func file_s3_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
file_s3_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
file_s3_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*S3ConfigureRequest); i {
case 0:
return &v.state
@ -322,7 +322,7 @@ func file_s3_proto_init() {
return nil
}
}
file_s3_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
file_s3_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*S3ConfigureResponse); i {
case 0:
return &v.state
@ -334,7 +334,7 @@ func file_s3_proto_init() {
return nil
}
}
file_s3_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
file_s3_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*S3CircuitBreakerConfig); i {
case 0:
return &v.state
@ -346,7 +346,7 @@ func file_s3_proto_init() {
return nil
}
}
file_s3_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
file_s3_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*S3CircuitBreakerOptions); i {
case 0:
return &v.state

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.3
// - protoc-gen-go-grpc v1.5.1
// - protoc v5.28.1
// source: s3.proto
package s3_pb
@ -15,8 +15,8 @@ import (
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
SeaweedS3_Configure_FullMethodName = "/messaging_pb.SeaweedS3/Configure"
@ -38,8 +38,9 @@ func NewSeaweedS3Client(cc grpc.ClientConnInterface) SeaweedS3Client {
}
func (c *seaweedS3Client) Configure(ctx context.Context, in *S3ConfigureRequest, opts ...grpc.CallOption) (*S3ConfigureResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(S3ConfigureResponse)
err := c.cc.Invoke(ctx, SeaweedS3_Configure_FullMethodName, in, out, opts...)
err := c.cc.Invoke(ctx, SeaweedS3_Configure_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -48,20 +49,24 @@ func (c *seaweedS3Client) Configure(ctx context.Context, in *S3ConfigureRequest,
// SeaweedS3Server is the server API for SeaweedS3 service.
// All implementations must embed UnimplementedSeaweedS3Server
// for forward compatibility
// for forward compatibility.
type SeaweedS3Server interface {
Configure(context.Context, *S3ConfigureRequest) (*S3ConfigureResponse, error)
mustEmbedUnimplementedSeaweedS3Server()
}
// UnimplementedSeaweedS3Server must be embedded to have forward compatible implementations.
type UnimplementedSeaweedS3Server struct {
}
// UnimplementedSeaweedS3Server must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedSeaweedS3Server struct{}
func (UnimplementedSeaweedS3Server) Configure(context.Context, *S3ConfigureRequest) (*S3ConfigureResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented")
}
func (UnimplementedSeaweedS3Server) mustEmbedUnimplementedSeaweedS3Server() {}
func (UnimplementedSeaweedS3Server) testEmbeddedByValue() {}
// UnsafeSeaweedS3Server may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to SeaweedS3Server will
@ -71,6 +76,13 @@ type UnsafeSeaweedS3Server interface {
}
func RegisterSeaweedS3Server(s grpc.ServiceRegistrar, srv SeaweedS3Server) {
// If the following call pancis, it indicates UnimplementedSeaweedS3Server was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&SeaweedS3_ServiceDesc, srv)
}

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
// protoc v4.25.3
// protoc-gen-go v1.34.2
// protoc v5.28.1
// source: schema.proto
package schema_pb
@ -722,7 +722,7 @@ func file_schema_proto_rawDescGZIP() []byte {
var file_schema_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_schema_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
var file_schema_proto_goTypes = []interface{}{
var file_schema_proto_goTypes = []any{
(ScalarType)(0), // 0: schema_pb.ScalarType
(*RecordType)(nil), // 1: schema_pb.RecordType
(*Field)(nil), // 2: schema_pb.Field
@ -758,7 +758,7 @@ func file_schema_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
file_schema_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
file_schema_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*RecordType); i {
case 0:
return &v.state
@ -770,7 +770,7 @@ func file_schema_proto_init() {
return nil
}
}
file_schema_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
file_schema_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*Field); i {
case 0:
return &v.state
@ -782,7 +782,7 @@ func file_schema_proto_init() {
return nil
}
}
file_schema_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
file_schema_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*Type); i {
case 0:
return &v.state
@ -794,7 +794,7 @@ func file_schema_proto_init() {
return nil
}
}
file_schema_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
file_schema_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*ListType); i {
case 0:
return &v.state
@ -806,7 +806,7 @@ func file_schema_proto_init() {
return nil
}
}
file_schema_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
file_schema_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*RecordValue); i {
case 0:
return &v.state
@ -818,7 +818,7 @@ func file_schema_proto_init() {
return nil
}
}
file_schema_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
file_schema_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*Value); i {
case 0:
return &v.state
@ -830,7 +830,7 @@ func file_schema_proto_init() {
return nil
}
}
file_schema_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
file_schema_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*ListValue); i {
case 0:
return &v.state
@ -843,12 +843,12 @@ func file_schema_proto_init() {
}
}
}
file_schema_proto_msgTypes[2].OneofWrappers = []interface{}{
file_schema_proto_msgTypes[2].OneofWrappers = []any{
(*Type_ScalarType)(nil),
(*Type_RecordType)(nil),
(*Type_ListType)(nil),
}
file_schema_proto_msgTypes[5].OneofWrappers = []interface{}{
file_schema_proto_msgTypes[5].OneofWrappers = []any{
(*Value_BoolValue)(nil),
(*Value_Int32Value)(nil),
(*Value_Int64Value)(nil),

View File

@ -475,12 +475,21 @@ message RemoteFile {
string extension = 7;
}
message VolumeInfo {
repeated RemoteFile files = 1;
uint32 version = 2;
string replication = 3;
uint32 bytes_offset = 4;
int64 dat_file_size = 5; // store the original dat file size
uint64 expire_at_sec = 6; // expiration time of ec volume
bool read_only = 7;
}
message OldVersionVolumeInfo {
repeated RemoteFile files = 1;
uint32 version = 2;
string replication = 3;
uint32 BytesOffset = 4;
int64 dat_file_size = 5; // used for EC encoded volumes to store the original file size
uint64 DestroyTime = 6; // used to record the destruction time of ec volume
int64 dat_file_size = 5; // store the original dat file size
uint64 DestroyTime = 6; // expiration time of ec volume
bool read_only = 7;
}

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More