Compare commits
209 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 934e65a4e6 | |||
|
|
5be467a478 | ||
|
|
5e1c112468 | ||
|
|
94f69a0eb3 | ||
|
|
e4254bbf69 | ||
|
|
e71e6856d0 | ||
|
|
a47d955e3f | ||
|
|
69a24afc04 | ||
|
|
adfec9a19d | ||
|
|
dae4f6e481 | ||
|
|
676810cc13 | ||
|
|
0a7f6e9345 | ||
|
|
1af88c4052 | ||
|
|
c633eb1fea | ||
|
|
7badf0e549 | ||
|
|
91ad34d709 | ||
|
|
6824eb61a8 | ||
|
|
1a96ad4389 | ||
|
|
25ac72ae6c | ||
|
|
85ddf32225 | ||
|
|
ae4c9918d9 | ||
|
|
977d4a5ee4 | ||
|
|
c81010e7ab | ||
|
|
c1ae968188 | ||
|
|
9f0e50ddf7 | ||
|
|
fe9ab5c517 | ||
|
|
d02ba97453 | ||
|
|
b1abd6bd99 | ||
|
|
fd892eef6d | ||
|
|
1bfcaa4a17 | ||
|
|
a369b05ee4 | ||
|
|
e0493f979a | ||
|
|
016546a3d5 | ||
|
|
10f97f90f2 | ||
|
|
3de4391349 | ||
|
|
2512d35d8d | ||
|
|
749bc0d95a | ||
|
|
2045363cd1 | ||
|
|
18760d2a7a | ||
|
|
ad3d65382f | ||
|
|
4abb3d15f2 | ||
|
|
3368b7cf44 | ||
|
|
ae670b0d27 | ||
|
|
5afd0bf161 | ||
|
|
6b93b6012a | ||
|
|
cca838b671 | ||
|
|
a34a92c0b9 | ||
|
|
68e60cbfdf | ||
|
|
366cfc6baa | ||
|
|
adf971f669 | ||
|
|
fa3f8a21c8 | ||
|
|
fafe08ed90 | ||
|
|
360183dc96 | ||
|
|
cb1b1a0afe | ||
|
|
393317b6f8 | ||
|
|
1e6878998c | ||
|
|
55c17b9d8e | ||
|
|
4d41652c12 | ||
|
|
8b70e3d8a8 | ||
|
|
8fb9c910f0 | ||
|
|
c06997789b | ||
|
|
403959218d | ||
|
|
b2233cf4de | ||
|
|
1c0c69a5ab | ||
|
|
88909e3bd0 | ||
|
|
59b025d3da | ||
|
|
48cd11a361 | ||
|
|
18e8805798 | ||
|
|
d1ac03bd8c | ||
|
|
31da780ed3 | ||
|
|
84dc68c09c | ||
|
|
ac32e8e089 | ||
|
|
6c865e37f1 | ||
|
|
fb80bc10ae | ||
|
|
8685e9f2d1 | ||
|
|
9019aceef8 | ||
|
|
2e3dda89ad | ||
|
|
331f6072bf | ||
|
|
6acfbcbc64 | ||
|
|
f21ebcda28 | ||
|
|
e366b348cd | ||
|
|
064158b060 | ||
|
|
1d1fd6e686 | ||
|
|
73f8de23a6 | ||
|
|
7f4325e814 | ||
|
|
ad4b84410d | ||
|
|
0b7906aaac | ||
|
|
6fc6d151b1 | ||
|
|
03831e2d06 | ||
|
|
257b625749 | ||
|
|
1c01af4e69 | ||
|
|
bbd0f3b33c | ||
|
|
9439822763 | ||
|
|
63e51d2050 | ||
|
|
84830bc4ad | ||
|
|
ce0a5d2820 | ||
|
|
3ad157d841 | ||
|
|
a20e35c5cc | ||
|
|
029921299c | ||
|
|
f8d6969a4f | ||
|
|
1e8bce029f | ||
|
|
b29fb7e2ba | ||
|
|
dbb69786d3 | ||
|
|
c70e9c0ba8 | ||
|
|
dd8fdd8c8c | ||
|
|
dfd1b1b92d | ||
|
|
c4c30e394d | ||
|
|
28673aeb01 | ||
|
|
04ae010378 | ||
|
|
cefbcb0460 | ||
|
|
01407c3c25 | ||
|
|
66fdb208c7 | ||
|
|
247896b8b7 | ||
|
|
1e6e741f55 | ||
|
|
b31daabf52 | ||
|
|
1ccb4ef922 | ||
|
|
54f0659c7b | ||
|
|
5c0b8536f0 | ||
|
|
462330ead6 | ||
|
|
178cf836b1 | ||
|
|
f45c0d8560 | ||
|
|
bb146e2c09 | ||
|
|
f1d2df3e73 | ||
|
|
ef39987cc7 | ||
|
|
ee0c4c62d8 | ||
|
|
e2de7e852c | ||
|
|
0529ce74b7 | ||
|
|
421693130c | ||
|
|
4a17c81b91 | ||
|
|
ba068ea3f8 | ||
|
|
085974fe1d | ||
|
|
18598a10e6 | ||
|
|
16c50d1d2e | ||
|
|
929a66314c | ||
|
|
2e7a6ae715 | ||
|
|
896aef070c | ||
|
|
3598d8b572 | ||
|
|
3183cf0e2e | ||
|
|
a59ee6ed63 | ||
|
|
e4c1a82eb2 | ||
|
|
aa54e4698d | ||
|
|
43e40ce8d3 | ||
|
|
0bd1b13b94 | ||
|
|
1c16aae550 | ||
|
|
49f47ea87f | ||
|
|
8947adc312 | ||
|
|
9341863bdb | ||
|
|
998e831c5c | ||
|
|
b2ca113fb5 | ||
|
|
b211e82ed2 | ||
|
|
e2779169a0 | ||
|
|
ec2f9c84d5 | ||
|
|
78eb8dcc53 | ||
|
|
cafa0f5942 | ||
|
|
325482a97c | ||
|
|
9cee1285b9 | ||
|
|
a14a74d678 | ||
|
|
f652186adf | ||
|
|
afb8731ead | ||
|
|
ce2d54cc20 | ||
|
|
d802e15b4f | ||
|
|
d6ece0b826 | ||
|
|
cb007762be | ||
|
|
6a90714bbe | ||
|
|
622ba82ebb | ||
|
|
6ca010e9db | ||
|
|
ad9ce43127 | ||
|
|
167d333fcd | ||
|
|
c5390dec1d | ||
|
|
e2cbd5fb63 | ||
|
|
8d083f7a2d | ||
|
|
37442babfb | ||
|
|
962a2a894b | ||
|
|
0c61c9f7fe | ||
|
|
267b140fab | ||
|
|
1b194535e6 | ||
|
|
58a6c765fe | ||
|
|
2604052a9f | ||
|
|
7f81890bae | ||
|
|
2ff073c735 | ||
|
|
6fd11ccab5 | ||
|
|
6c49fba592 | ||
|
|
922fa0798e | ||
|
|
976df182c0 | ||
|
|
0e28a650e6 | ||
|
|
f17768e830 | ||
|
|
2c142d3a0c | ||
|
|
4e469f8b02 | ||
|
|
3f268b70f8 | ||
|
|
ad7bf7f974 | ||
|
|
778451f09f | ||
|
|
8e9a15933b | ||
|
|
da1d7c3183 | ||
|
|
a178ef4714 | ||
|
|
7ca2e193b9 | ||
|
|
39a6fabb9f | ||
|
|
0249b4e4f5 | ||
|
|
67eeb49101 | ||
|
|
f7213ed35c | ||
|
|
a532a0198e | ||
|
|
16f79e5814 | ||
|
|
39aefc2c02 | ||
|
|
0b08669bca | ||
|
|
8f5761ee13 | ||
|
|
d2eb4fa5ba | ||
|
|
ca489c5e73 | ||
|
|
f0ae48af4c | ||
|
|
9eae39e2fa | ||
|
|
42ab293ffb |
17
.github/CONTRIBUTING.md
vendored
Normal file
17
.github/CONTRIBUTING.md
vendored
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
## Contribution Policy
|
||||||
|
|
||||||
|
Initially, Litestream was closed to outside contributions. The goal was to
|
||||||
|
reduce burnout by limiting the maintenance overhead of reviewing and validating
|
||||||
|
third-party code. However, this policy is overly broad and has prevented small,
|
||||||
|
easily testable patches from being contributed.
|
||||||
|
|
||||||
|
Litestream is now open to code contributions for bug fixes only. Features carry
|
||||||
|
a long-term maintenance burden so they will not be accepted at this time.
|
||||||
|
Please [submit an issue][new-issue] if you have a feature you'd like to
|
||||||
|
request.
|
||||||
|
|
||||||
|
If you find mistakes in the documentation, please submit a fix to the
|
||||||
|
[documentation repository][docs].
|
||||||
|
|
||||||
|
[new-issue]: https://github.com/benbjohnson/litestream/issues/new
|
||||||
|
[docs]: https://github.com/benbjohnson/litestream.io
|
||||||
229
.github/workflows/commit.yml
vendored
Normal file
229
.github/workflows/commit.yml
vendored
Normal file
@@ -0,0 +1,229 @@
|
|||||||
|
on:
|
||||||
|
push:
|
||||||
|
pull_request:
|
||||||
|
types:
|
||||||
|
- opened
|
||||||
|
- synchronize
|
||||||
|
- reopened
|
||||||
|
|
||||||
|
env:
|
||||||
|
GO_VERSION: "1.21"
|
||||||
|
|
||||||
|
name: Commit
|
||||||
|
jobs:
|
||||||
|
lint:
|
||||||
|
name: Lint
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: ${{ env.GO_VERSION }}
|
||||||
|
|
||||||
|
- run: |
|
||||||
|
go install golang.org/x/tools/cmd/goimports@latest
|
||||||
|
go install honnef.co/go/tools/cmd/staticcheck@latest
|
||||||
|
export PATH="$HOME/go/bin:$PATH"
|
||||||
|
|
||||||
|
- uses: pre-commit/action@v3.0.0
|
||||||
|
|
||||||
|
build-windows:
|
||||||
|
name: Build Windows
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- run: sudo apt-get install -y mingw-w64
|
||||||
|
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: ${{ env.GO_VERSION }}
|
||||||
|
|
||||||
|
- run: |
|
||||||
|
go build ./cmd/litestream/
|
||||||
|
file ./litestream.exe
|
||||||
|
env:
|
||||||
|
CGO_ENABLED: "1"
|
||||||
|
GOOS: windows
|
||||||
|
GOARCH: amd64
|
||||||
|
CC: x86_64-w64-mingw32-gcc
|
||||||
|
|
||||||
|
build:
|
||||||
|
name: Build & Unit Test
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: ${{ env.GO_VERSION }}
|
||||||
|
|
||||||
|
- run: go env
|
||||||
|
|
||||||
|
- run: go install ./cmd/litestream
|
||||||
|
|
||||||
|
- run: go test -v ./...
|
||||||
|
|
||||||
|
# long-running-test:
|
||||||
|
# name: Run Long Running Unit Test
|
||||||
|
# runs-on: ubuntu-latest
|
||||||
|
# steps:
|
||||||
|
# - uses: actions/checkout@v2
|
||||||
|
# - uses: actions/setup-go@v2
|
||||||
|
# with:
|
||||||
|
# go-version: '1.20'
|
||||||
|
# - uses: actions/cache@v2
|
||||||
|
# with:
|
||||||
|
# path: ~/go/pkg/mod
|
||||||
|
# key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
|
# restore-keys: ${{ inputs.os }}-go-
|
||||||
|
#
|
||||||
|
# - run: go install ./cmd/litestream
|
||||||
|
# - run: go test -v -run=TestCmd_Replicate_LongRunning ./integration -long-running-duration 1m
|
||||||
|
|
||||||
|
s3-mock-test:
|
||||||
|
name: Run S3 Mock Tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.12'
|
||||||
|
# cache: 'pip'
|
||||||
|
- run: pip install moto[s3,server]
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: ${{ env.GO_VERSION }}
|
||||||
|
|
||||||
|
- run: go env
|
||||||
|
|
||||||
|
- run: go install ./cmd/litestream
|
||||||
|
|
||||||
|
- run: ./etc/s3_mock.py go test -v ./replica_client_test.go -integration s3
|
||||||
|
|
||||||
|
s3-integration-test:
|
||||||
|
name: Run S3 Integration Tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
|
if: github.ref == 'refs/heads/main'
|
||||||
|
concurrency:
|
||||||
|
group: integration-test-s3
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: ${{ env.GO_VERSION }}
|
||||||
|
|
||||||
|
- run: go env
|
||||||
|
|
||||||
|
- run: go install ./cmd/litestream
|
||||||
|
|
||||||
|
- run: go test -v ./replica_client_test.go -integration s3
|
||||||
|
env:
|
||||||
|
LITESTREAM_S3_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }}
|
||||||
|
LITESTREAM_S3_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_S3_SECRET_ACCESS_KEY }}
|
||||||
|
LITESTREAM_S3_REGION: us-east-1
|
||||||
|
LITESTREAM_S3_BUCKET: integration.litestream.io
|
||||||
|
|
||||||
|
gcp-integration-test:
|
||||||
|
name: Run GCP Integration Tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
|
if: github.ref == 'refs/heads/main'
|
||||||
|
concurrency:
|
||||||
|
group: integration-test-gcp
|
||||||
|
steps:
|
||||||
|
- name: Extract GCP credentials
|
||||||
|
run: 'echo "$GOOGLE_APPLICATION_CREDENTIALS" > /opt/gcp.json'
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
GOOGLE_APPLICATION_CREDENTIALS: ${{secrets.GOOGLE_APPLICATION_CREDENTIALS}}
|
||||||
|
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: ${{ env.GO_VERSION }}
|
||||||
|
|
||||||
|
- run: go env
|
||||||
|
|
||||||
|
- run: go install ./cmd/litestream
|
||||||
|
|
||||||
|
- run: go test -v ./replica_client_test.go -integration gcs
|
||||||
|
env:
|
||||||
|
GOOGLE_APPLICATION_CREDENTIALS: /opt/gcp.json
|
||||||
|
LITESTREAM_GCS_BUCKET: integration.litestream.io
|
||||||
|
|
||||||
|
abs-integration-test:
|
||||||
|
name: Run Azure Blob Store Integration Tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
|
if: github.ref == 'refs/heads/main'
|
||||||
|
concurrency:
|
||||||
|
group: integration-test-abs
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: ${{ env.GO_VERSION }}
|
||||||
|
|
||||||
|
- run: go env
|
||||||
|
|
||||||
|
- run: go install ./cmd/litestream
|
||||||
|
|
||||||
|
- run: go test -v ./replica_client_test.go -integration abs
|
||||||
|
env:
|
||||||
|
LITESTREAM_ABS_ACCOUNT_NAME: ${{ secrets.LITESTREAM_ABS_ACCOUNT_NAME }}
|
||||||
|
LITESTREAM_ABS_ACCOUNT_KEY: ${{ secrets.LITESTREAM_ABS_ACCOUNT_KEY }}
|
||||||
|
LITESTREAM_ABS_BUCKET: integration
|
||||||
|
|
||||||
|
sftp-integration-test:
|
||||||
|
name: Run SFTP Integration Tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
|
steps:
|
||||||
|
- name: Prepare OpenSSH server
|
||||||
|
run: |-
|
||||||
|
sudo mkdir -p /test/etc/ssh /test/home /run/sshd /test/data/
|
||||||
|
sudo ssh-keygen -t ed25519 -f /test/etc/ssh/id_ed25519_host -N ""
|
||||||
|
sudo ssh-keygen -t ed25519 -f /test/etc/ssh/id_ed25519 -N ""
|
||||||
|
sudo chmod 0600 /test/etc/ssh/id_ed25519_host /test/etc/ssh/id_ed25519
|
||||||
|
sudo chmod 0644 /test/etc/ssh/id_ed25519_host.pub /test/etc/ssh/id_ed25519.pub
|
||||||
|
sudo cp /test/etc/ssh/id_ed25519 /test/id_ed25519
|
||||||
|
sudo chown $USER /test/id_ed25519
|
||||||
|
sudo tee /test/etc/ssh/sshd_config <<EOF
|
||||||
|
Port 2222
|
||||||
|
HostKey /test/etc/ssh/id_ed25519_host
|
||||||
|
AuthorizedKeysFile /test/etc/ssh/id_ed25519.pub
|
||||||
|
AuthenticationMethods publickey
|
||||||
|
Subsystem sftp internal-sftp
|
||||||
|
UsePAM no
|
||||||
|
LogLevel DEBUG
|
||||||
|
EOF
|
||||||
|
sudo /usr/sbin/sshd -e -f /test/etc/ssh/sshd_config -E /test/debug.log
|
||||||
|
|
||||||
|
- name: Test OpenSSH server works with pubkey auth
|
||||||
|
run: ssh -v -i /test/id_ed25519 -o StrictHostKeyChecking=accept-new -p 2222 root@localhost whoami || (sudo cat /test/debug.log && exit 1)
|
||||||
|
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: ${{ env.GO_VERSION }}
|
||||||
|
|
||||||
|
- run: go env
|
||||||
|
|
||||||
|
- run: go install ./cmd/litestream
|
||||||
|
|
||||||
|
- run: go test -v ./replica_client_test.go -integration sftp
|
||||||
|
env:
|
||||||
|
LITESTREAM_SFTP_HOST: "localhost:2222"
|
||||||
|
LITESTREAM_SFTP_USER: "root"
|
||||||
|
LITESTREAM_SFTP_KEY_PATH: /test/id_ed25519
|
||||||
|
LITESTREAM_SFTP_PATH: /test/data
|
||||||
51
.github/workflows/release.docker.yml
vendored
Normal file
51
.github/workflows/release.docker.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
on:
|
||||||
|
release:
|
||||||
|
types:
|
||||||
|
- published
|
||||||
|
# pull_request:
|
||||||
|
# types:
|
||||||
|
# - opened
|
||||||
|
# - synchronize
|
||||||
|
# - reopened
|
||||||
|
# branches-ignore:
|
||||||
|
# - "dependabot/**"
|
||||||
|
|
||||||
|
name: Release (Docker)
|
||||||
|
jobs:
|
||||||
|
docker:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
PLATFORMS: "linux/amd64,linux/arm64,linux/arm/v7"
|
||||||
|
VERSION: "${{ github.event_name == 'release' && github.event.release.name || github.sha }}"
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- uses: docker/setup-qemu-action@v1
|
||||||
|
- uses: docker/setup-buildx-action@v1
|
||||||
|
|
||||||
|
- uses: docker/login-action@v1
|
||||||
|
with:
|
||||||
|
username: benbjohnson
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
|
- id: meta
|
||||||
|
uses: docker/metadata-action@v3
|
||||||
|
with:
|
||||||
|
images: litestream/litestream
|
||||||
|
tags: |
|
||||||
|
type=ref,event=branch
|
||||||
|
type=ref,event=pr
|
||||||
|
type=sha
|
||||||
|
type=sha,format=long
|
||||||
|
type=semver,pattern={{version}}
|
||||||
|
type=semver,pattern={{major}}.{{minor}}
|
||||||
|
|
||||||
|
- uses: docker/build-push-action@v2
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
push: true
|
||||||
|
platforms: ${{ env.PLATFORMS }}
|
||||||
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
|
build-args: |
|
||||||
|
LITESTREAM_VERSION=${{ env.VERSION }}
|
||||||
@@ -3,19 +3,46 @@ on:
|
|||||||
types:
|
types:
|
||||||
- created
|
- created
|
||||||
|
|
||||||
name: release
|
name: release (linux)
|
||||||
jobs:
|
jobs:
|
||||||
linux:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- arch: amd64
|
||||||
|
cc: gcc
|
||||||
|
- arch: arm64
|
||||||
|
cc: aarch64-linux-gnu-gcc
|
||||||
|
- arch: arm
|
||||||
|
arm: 6
|
||||||
|
cc: arm-linux-gnueabi-gcc
|
||||||
|
- arch: arm
|
||||||
|
arm: 7
|
||||||
|
cc: arm-linux-gnueabihf-gcc
|
||||||
|
|
||||||
|
env:
|
||||||
|
GOOS: linux
|
||||||
|
GOARCH: ${{ matrix.arch }}
|
||||||
|
GOARM: ${{ matrix.arm }}
|
||||||
|
CC: ${{ matrix.cc }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
- uses: actions/setup-go@v2
|
- uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: '1.21'
|
||||||
|
|
||||||
- id: release
|
- id: release
|
||||||
uses: bruceadams/get-release@v1.2.2
|
uses: bruceadams/get-release@v1.2.2
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ github.token }}
|
GITHUB_TOKEN: ${{ github.token }}
|
||||||
|
|
||||||
|
- name: Install cross-compilers
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y gcc-aarch64-linux-gnu gcc-arm-linux-gnueabihf gcc-arm-linux-gnueabi
|
||||||
|
|
||||||
- name: Install nfpm
|
- name: Install nfpm
|
||||||
run: |
|
run: |
|
||||||
wget https://github.com/goreleaser/nfpm/releases/download/v2.2.3/nfpm_2.2.3_Linux_x86_64.tar.gz
|
wget https://github.com/goreleaser/nfpm/releases/download/v2.2.3/nfpm_2.2.3_Linux_x86_64.tar.gz
|
||||||
@@ -23,23 +50,24 @@ jobs:
|
|||||||
|
|
||||||
- name: Build litestream
|
- name: Build litestream
|
||||||
run: |
|
run: |
|
||||||
|
rm -rf dist
|
||||||
mkdir -p dist
|
mkdir -p dist
|
||||||
cp etc/litestream.yml etc/litestream.service dist
|
cp etc/litestream.yml etc/litestream.service dist
|
||||||
cat etc/nfpm.yml | LITESTREAM_VERSION=${{ steps.release.outputs.tag_name }} envsubst > dist/nfpm.yml
|
cat etc/nfpm.yml | LITESTREAM_VERSION=${{ steps.release.outputs.tag_name }} envsubst > dist/nfpm.yml
|
||||||
go build -ldflags "-X 'main.Version=${{ steps.release.outputs.tag_name }}'" -o dist/litestream ./cmd/litestream
|
CGO_ENABLED=1 go build -ldflags "-s -w -extldflags "-static" -X 'main.Version=${{ steps.release.outputs.tag_name }}'" -tags osusergo,netgo,sqlite_omit_load_extension -o dist/litestream ./cmd/litestream
|
||||||
|
|
||||||
cd dist
|
|
||||||
tar -czvf litestream-${{ steps.release.outputs.tag_name }}-linux-amd64.tar.gz litestream
|
|
||||||
../nfpm pkg --config nfpm.yml --packager deb --target litestream-${{ steps.release.outputs.tag_name }}-linux-amd64.deb
|
|
||||||
|
|
||||||
- name: Upload release binary
|
cd dist
|
||||||
|
tar -czvf litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.tar.gz litestream
|
||||||
|
../nfpm pkg --config nfpm.yml --packager deb --target litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.deb
|
||||||
|
|
||||||
|
- name: Upload release tarball
|
||||||
uses: actions/upload-release-asset@v1.0.2
|
uses: actions/upload-release-asset@v1.0.2
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ github.token }}
|
GITHUB_TOKEN: ${{ github.token }}
|
||||||
with:
|
with:
|
||||||
upload_url: ${{ steps.release.outputs.upload_url }}
|
upload_url: ${{ steps.release.outputs.upload_url }}
|
||||||
asset_path: ./dist/litestream-${{ steps.release.outputs.tag_name }}-linux-amd64.tar.gz
|
asset_path: ./dist/litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.tar.gz
|
||||||
asset_name: litestream-${{ steps.release.outputs.tag_name }}-linux-amd64.tar.gz
|
asset_name: litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.tar.gz
|
||||||
asset_content_type: application/gzip
|
asset_content_type: application/gzip
|
||||||
|
|
||||||
- name: Upload debian package
|
- name: Upload debian package
|
||||||
@@ -48,6 +76,6 @@ jobs:
|
|||||||
GITHUB_TOKEN: ${{ github.token }}
|
GITHUB_TOKEN: ${{ github.token }}
|
||||||
with:
|
with:
|
||||||
upload_url: ${{ steps.release.outputs.upload_url }}
|
upload_url: ${{ steps.release.outputs.upload_url }}
|
||||||
asset_path: ./dist/litestream-${{ steps.release.outputs.tag_name }}-linux-amd64.deb
|
asset_path: ./dist/litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.deb
|
||||||
asset_name: litestream-${{ steps.release.outputs.tag_name }}-linux-amd64.deb
|
asset_name: litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.deb
|
||||||
asset_content_type: application/octet-stream
|
asset_content_type: application/octet-stream
|
||||||
21
.github/workflows/test.yml
vendored
21
.github/workflows/test.yml
vendored
@@ -1,21 +0,0 @@
|
|||||||
on: [push, pull_request]
|
|
||||||
name: test
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: '1.15'
|
|
||||||
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: ~/go/pkg/mod
|
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go-
|
|
||||||
|
|
||||||
- name: Run unit tests
|
|
||||||
run: go test -v ./...
|
|
||||||
20
.pre-commit-config.yaml
Normal file
20
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
repos:
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: v4.1.0
|
||||||
|
hooks:
|
||||||
|
- id: trailing-whitespace
|
||||||
|
exclude_types: [markdown]
|
||||||
|
- id: end-of-file-fixer
|
||||||
|
- id: check-yaml
|
||||||
|
- id: check-added-large-files
|
||||||
|
|
||||||
|
- repo: https://github.com/tekwizely/pre-commit-golang
|
||||||
|
rev: v1.0.0-beta.5
|
||||||
|
hooks:
|
||||||
|
- id: go-imports-repo
|
||||||
|
args:
|
||||||
|
- "-local"
|
||||||
|
- "github.com/benbjohnson/litestrem"
|
||||||
|
- "-w"
|
||||||
|
- id: go-vet-repo-mod
|
||||||
|
- id: go-staticcheck-repo-mod
|
||||||
16
Dockerfile
Normal file
16
Dockerfile
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
FROM golang:1.21.3 as builder
|
||||||
|
|
||||||
|
WORKDIR /src/litestream
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
ARG LITESTREAM_VERSION=latest
|
||||||
|
|
||||||
|
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||||
|
--mount=type=cache,target=/go/pkg \
|
||||||
|
go build -ldflags "-s -w -X 'main.Version=${LITESTREAM_VERSION}' -extldflags '-static'" -tags osusergo,netgo,sqlite_omit_load_extension -o /usr/local/bin/litestream ./cmd/litestream
|
||||||
|
|
||||||
|
|
||||||
|
FROM alpine:3.17.2
|
||||||
|
COPY --from=builder /usr/local/bin/litestream /usr/local/bin/litestream
|
||||||
|
ENTRYPOINT ["/usr/local/bin/litestream"]
|
||||||
|
CMD []
|
||||||
876
LICENSE
876
LICENSE
@@ -1,674 +1,202 @@
|
|||||||
GNU GENERAL PUBLIC LICENSE
|
|
||||||
Version 3, 29 June 2007
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
http://www.apache.org/licenses/
|
||||||
Everyone is permitted to copy and distribute verbatim copies
|
|
||||||
of this license document, but changing it is not allowed.
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
Preamble
|
1. Definitions.
|
||||||
|
|
||||||
The GNU General Public License is a free, copyleft license for
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
software and other kinds of works.
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
The licenses for most software and other practical works are designed
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
to take away your freedom to share and change the works. By contrast,
|
the copyright owner that is granting the License.
|
||||||
the GNU General Public License is intended to guarantee your freedom to
|
|
||||||
share and change all versions of a program--to make sure it remains free
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
software for all its users. We, the Free Software Foundation, use the
|
other entities that control, are controlled by, or are under common
|
||||||
GNU General Public License for most of our software; it applies also to
|
control with that entity. For the purposes of this definition,
|
||||||
any other work released this way by its authors. You can apply it to
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
your programs, too.
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
When we speak of free software, we are referring to freedom, not
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
price. Our General Public Licenses are designed to make sure that you
|
|
||||||
have the freedom to distribute copies of free software (and charge for
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
them if you wish), that you receive source code or can get it if you
|
exercising permissions granted by this License.
|
||||||
want it, that you can change the software or use pieces of it in new
|
|
||||||
free programs, and that you know you can do these things.
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
To protect your rights, we need to prevent others from denying you
|
source, and configuration files.
|
||||||
these rights or asking you to surrender the rights. Therefore, you have
|
|
||||||
certain responsibilities if you distribute copies of the software, or if
|
"Object" form shall mean any form resulting from mechanical
|
||||||
you modify it: responsibilities to respect the freedom of others.
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
For example, if you distribute copies of such a program, whether
|
and conversions to other media types.
|
||||||
gratis or for a fee, you must pass on to the recipients the same
|
|
||||||
freedoms that you received. You must make sure that they, too, receive
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
or can get the source code. And you must show them these terms so they
|
Object form, made available under the License, as indicated by a
|
||||||
know their rights.
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
Developers that use the GNU GPL protect your rights with two steps:
|
|
||||||
(1) assert copyright on the software, and (2) offer you this License
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
giving you legal permission to copy, distribute and/or modify it.
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
For the developers' and authors' protection, the GPL clearly explains
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
that there is no warranty for this free software. For both users' and
|
of this License, Derivative Works shall not include works that remain
|
||||||
authors' sake, the GPL requires that modified versions be marked as
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
changed, so that their problems will not be attributed erroneously to
|
the Work and Derivative Works thereof.
|
||||||
authors of previous versions.
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
Some devices are designed to deny users access to install or run
|
the original version of the Work and any modifications or additions
|
||||||
modified versions of the software inside them, although the manufacturer
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
can do so. This is fundamentally incompatible with the aim of
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
protecting users' freedom to change the software. The systematic
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
pattern of such abuse occurs in the area of products for individuals to
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
use, which is precisely where it is most unacceptable. Therefore, we
|
means any form of electronic, verbal, or written communication sent
|
||||||
have designed this version of the GPL to prohibit the practice for those
|
to the Licensor or its representatives, including but not limited to
|
||||||
products. If such problems arise substantially in other domains, we
|
communication on electronic mailing lists, source code control systems,
|
||||||
stand ready to extend this provision to those domains in future versions
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
of the GPL, as needed to protect the freedom of users.
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
Finally, every program is threatened constantly by software patents.
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
States should not allow patents to restrict development and use of
|
|
||||||
software on general-purpose computers, but in those that do, we wish to
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
avoid the special danger that patents applied to a free program could
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
make it effectively proprietary. To prevent this, the GPL assures that
|
subsequently incorporated within the Work.
|
||||||
patents cannot be used to render the program non-free.
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
The precise terms and conditions for copying, distribution and
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
modification follow.
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
TERMS AND CONDITIONS
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
0. Definitions.
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
"This License" refers to version 3 of the GNU General Public License.
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
(except as stated in this section) patent license to make, have made,
|
||||||
works, such as semiconductor masks.
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
"The Program" refers to any copyrightable work licensed under this
|
by such Contributor that are necessarily infringed by their
|
||||||
License. Each licensee is addressed as "you". "Licensees" and
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
"recipients" may be individuals or organizations.
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
To "modify" a work means to copy from or adapt all or part of the work
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
in a fashion requiring copyright permission, other than the making of an
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
exact copy. The resulting work is called a "modified version" of the
|
or contributory patent infringement, then any patent licenses
|
||||||
earlier work or a work "based on" the earlier work.
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
A "covered work" means either the unmodified Program or a work based
|
|
||||||
on the Program.
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
To "propagate" a work means to do anything with it that, without
|
modifications, and in Source or Object form, provided that You
|
||||||
permission, would make you directly or secondarily liable for
|
meet the following conditions:
|
||||||
infringement under applicable copyright law, except executing it on a
|
|
||||||
computer or modifying a private copy. Propagation includes copying,
|
(a) You must give any other recipients of the Work or
|
||||||
distribution (with or without modification), making available to the
|
Derivative Works a copy of this License; and
|
||||||
public, and in some countries other activities as well.
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
To "convey" a work means any kind of propagation that enables other
|
stating that You changed the files; and
|
||||||
parties to make or receive copies. Mere interaction with a user through
|
|
||||||
a computer network, with no transfer of a copy, is not conveying.
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
An interactive user interface displays "Appropriate Legal Notices"
|
attribution notices from the Source form of the Work,
|
||||||
to the extent that it includes a convenient and prominently visible
|
excluding those notices that do not pertain to any part of
|
||||||
feature that (1) displays an appropriate copyright notice, and (2)
|
the Derivative Works; and
|
||||||
tells the user that there is no warranty for the work (except to the
|
|
||||||
extent that warranties are provided), that licensees may convey the
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
work under this License, and how to view a copy of this License. If
|
distribution, then any Derivative Works that You distribute must
|
||||||
the interface presents a list of user commands or options, such as a
|
include a readable copy of the attribution notices contained
|
||||||
menu, a prominent item in the list meets this criterion.
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
1. Source Code.
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
The "source code" for a work means the preferred form of the work
|
documentation, if provided along with the Derivative Works; or,
|
||||||
for making modifications to it. "Object code" means any non-source
|
within a display generated by the Derivative Works, if and
|
||||||
form of a work.
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
A "Standard Interface" means an interface that either is an official
|
do not modify the License. You may add Your own attribution
|
||||||
standard defined by a recognized standards body, or, in the case of
|
notices within Derivative Works that You distribute, alongside
|
||||||
interfaces specified for a particular programming language, one that
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
is widely used among developers working in that language.
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
The "System Libraries" of an executable work include anything, other
|
|
||||||
than the work as a whole, that (a) is included in the normal form of
|
You may add Your own copyright statement to Your modifications and
|
||||||
packaging a Major Component, but which is not part of that Major
|
may provide additional or different license terms and conditions
|
||||||
Component, and (b) serves only to enable use of the work with that
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
Major Component, or to implement a Standard Interface for which an
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
implementation is available to the public in source code form. A
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
"Major Component", in this context, means a major essential component
|
the conditions stated in this License.
|
||||||
(kernel, window system, and so on) of the specific operating system
|
|
||||||
(if any) on which the executable work runs, or a compiler used to
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
produce the work, or an object code interpreter used to run it.
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
The "Corresponding Source" for a work in object code form means all
|
this License, without any additional terms or conditions.
|
||||||
the source code needed to generate, install, and (for an executable
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
work) run the object code and to modify the work, including scripts to
|
the terms of any separate license agreement you may have executed
|
||||||
control those activities. However, it does not include the work's
|
with Licensor regarding such Contributions.
|
||||||
System Libraries, or general-purpose tools or generally available free
|
|
||||||
programs which are used unmodified in performing those activities but
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
which are not part of the work. For example, Corresponding Source
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
includes interface definition files associated with source files for
|
except as required for reasonable and customary use in describing the
|
||||||
the work, and the source code for shared libraries and dynamically
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
linked subprograms that the work is specifically designed to require,
|
|
||||||
such as by intimate data communication or control flow between those
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
subprograms and other parts of the work.
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
The Corresponding Source need not include anything that users
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
can regenerate automatically from other parts of the Corresponding
|
implied, including, without limitation, any warranties or conditions
|
||||||
Source.
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
The Corresponding Source for a work in source code form is that
|
appropriateness of using or redistributing the Work and assume any
|
||||||
same work.
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
2. Basic Permissions.
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
All rights granted under this License are granted for the term of
|
unless required by applicable law (such as deliberate and grossly
|
||||||
copyright on the Program, and are irrevocable provided the stated
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
conditions are met. This License explicitly affirms your unlimited
|
liable to You for damages, including any direct, indirect, special,
|
||||||
permission to run the unmodified Program. The output from running a
|
incidental, or consequential damages of any character arising as a
|
||||||
covered work is covered by this License only if the output, given its
|
result of this License or out of the use or inability to use the
|
||||||
content, constitutes a covered work. This License acknowledges your
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
rights of fair use or other equivalent, as provided by copyright law.
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
You may make, run and propagate covered works that you do not
|
has been advised of the possibility of such damages.
|
||||||
convey, without conditions so long as your license otherwise remains
|
|
||||||
in force. You may convey covered works to others for the sole purpose
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
of having them make modifications exclusively for you, or provide you
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
with facilities for running those works, provided that you comply with
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
the terms of this License in conveying all material for which you do
|
or other liability obligations and/or rights consistent with this
|
||||||
not control copyright. Those thus making or running the covered works
|
License. However, in accepting such obligations, You may act only
|
||||||
for you must do so exclusively on your behalf, under your direction
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
and control, on terms that prohibit them from making any copies of
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
your copyrighted material outside their relationship with you.
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
Conveying under any other circumstances is permitted solely under
|
of your accepting any such warranty or additional liability.
|
||||||
the conditions stated below. Sublicensing is not allowed; section 10
|
|
||||||
makes it unnecessary.
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
No covered work shall be deemed part of an effective technological
|
To apply the Apache License to your work, attach the following
|
||||||
measure under any applicable law fulfilling obligations under article
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
replaced with your own identifying information. (Don't include
|
||||||
similar laws prohibiting or restricting circumvention of such
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
measures.
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
When you convey a covered work, you waive any legal power to forbid
|
same "printed page" as the copyright notice for easier
|
||||||
circumvention of technological measures to the extent such circumvention
|
identification within third-party archives.
|
||||||
is effected by exercising rights under this License with respect to
|
|
||||||
the covered work, and you disclaim any intention to limit operation or
|
Copyright [yyyy] [name of copyright owner]
|
||||||
modification of the work as a means of enforcing, against the work's
|
|
||||||
users, your or third parties' legal rights to forbid circumvention of
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
technological measures.
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
4. Conveying Verbatim Copies.
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
You may convey verbatim copies of the Program's source code as you
|
|
||||||
receive it, in any medium, provided that you conspicuously and
|
Unless required by applicable law or agreed to in writing, software
|
||||||
appropriately publish on each copy an appropriate copyright notice;
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
keep intact all notices stating that this License and any
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
non-permissive terms added in accord with section 7 apply to the code;
|
See the License for the specific language governing permissions and
|
||||||
keep intact all notices of the absence of any warranty; and give all
|
limitations under the License.
|
||||||
recipients a copy of this License along with the Program.
|
|
||||||
|
|
||||||
You may charge any price or no price for each copy that you convey,
|
|
||||||
and you may offer support or warranty protection for a fee.
|
|
||||||
|
|
||||||
5. Conveying Modified Source Versions.
|
|
||||||
|
|
||||||
You may convey a work based on the Program, or the modifications to
|
|
||||||
produce it from the Program, in the form of source code under the
|
|
||||||
terms of section 4, provided that you also meet all of these conditions:
|
|
||||||
|
|
||||||
a) The work must carry prominent notices stating that you modified
|
|
||||||
it, and giving a relevant date.
|
|
||||||
|
|
||||||
b) The work must carry prominent notices stating that it is
|
|
||||||
released under this License and any conditions added under section
|
|
||||||
7. This requirement modifies the requirement in section 4 to
|
|
||||||
"keep intact all notices".
|
|
||||||
|
|
||||||
c) You must license the entire work, as a whole, under this
|
|
||||||
License to anyone who comes into possession of a copy. This
|
|
||||||
License will therefore apply, along with any applicable section 7
|
|
||||||
additional terms, to the whole of the work, and all its parts,
|
|
||||||
regardless of how they are packaged. This License gives no
|
|
||||||
permission to license the work in any other way, but it does not
|
|
||||||
invalidate such permission if you have separately received it.
|
|
||||||
|
|
||||||
d) If the work has interactive user interfaces, each must display
|
|
||||||
Appropriate Legal Notices; however, if the Program has interactive
|
|
||||||
interfaces that do not display Appropriate Legal Notices, your
|
|
||||||
work need not make them do so.
|
|
||||||
|
|
||||||
A compilation of a covered work with other separate and independent
|
|
||||||
works, which are not by their nature extensions of the covered work,
|
|
||||||
and which are not combined with it such as to form a larger program,
|
|
||||||
in or on a volume of a storage or distribution medium, is called an
|
|
||||||
"aggregate" if the compilation and its resulting copyright are not
|
|
||||||
used to limit the access or legal rights of the compilation's users
|
|
||||||
beyond what the individual works permit. Inclusion of a covered work
|
|
||||||
in an aggregate does not cause this License to apply to the other
|
|
||||||
parts of the aggregate.
|
|
||||||
|
|
||||||
6. Conveying Non-Source Forms.
|
|
||||||
|
|
||||||
You may convey a covered work in object code form under the terms
|
|
||||||
of sections 4 and 5, provided that you also convey the
|
|
||||||
machine-readable Corresponding Source under the terms of this License,
|
|
||||||
in one of these ways:
|
|
||||||
|
|
||||||
a) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by the
|
|
||||||
Corresponding Source fixed on a durable physical medium
|
|
||||||
customarily used for software interchange.
|
|
||||||
|
|
||||||
b) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by a
|
|
||||||
written offer, valid for at least three years and valid for as
|
|
||||||
long as you offer spare parts or customer support for that product
|
|
||||||
model, to give anyone who possesses the object code either (1) a
|
|
||||||
copy of the Corresponding Source for all the software in the
|
|
||||||
product that is covered by this License, on a durable physical
|
|
||||||
medium customarily used for software interchange, for a price no
|
|
||||||
more than your reasonable cost of physically performing this
|
|
||||||
conveying of source, or (2) access to copy the
|
|
||||||
Corresponding Source from a network server at no charge.
|
|
||||||
|
|
||||||
c) Convey individual copies of the object code with a copy of the
|
|
||||||
written offer to provide the Corresponding Source. This
|
|
||||||
alternative is allowed only occasionally and noncommercially, and
|
|
||||||
only if you received the object code with such an offer, in accord
|
|
||||||
with subsection 6b.
|
|
||||||
|
|
||||||
d) Convey the object code by offering access from a designated
|
|
||||||
place (gratis or for a charge), and offer equivalent access to the
|
|
||||||
Corresponding Source in the same way through the same place at no
|
|
||||||
further charge. You need not require recipients to copy the
|
|
||||||
Corresponding Source along with the object code. If the place to
|
|
||||||
copy the object code is a network server, the Corresponding Source
|
|
||||||
may be on a different server (operated by you or a third party)
|
|
||||||
that supports equivalent copying facilities, provided you maintain
|
|
||||||
clear directions next to the object code saying where to find the
|
|
||||||
Corresponding Source. Regardless of what server hosts the
|
|
||||||
Corresponding Source, you remain obligated to ensure that it is
|
|
||||||
available for as long as needed to satisfy these requirements.
|
|
||||||
|
|
||||||
e) Convey the object code using peer-to-peer transmission, provided
|
|
||||||
you inform other peers where the object code and Corresponding
|
|
||||||
Source of the work are being offered to the general public at no
|
|
||||||
charge under subsection 6d.
|
|
||||||
|
|
||||||
A separable portion of the object code, whose source code is excluded
|
|
||||||
from the Corresponding Source as a System Library, need not be
|
|
||||||
included in conveying the object code work.
|
|
||||||
|
|
||||||
A "User Product" is either (1) a "consumer product", which means any
|
|
||||||
tangible personal property which is normally used for personal, family,
|
|
||||||
or household purposes, or (2) anything designed or sold for incorporation
|
|
||||||
into a dwelling. In determining whether a product is a consumer product,
|
|
||||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
|
||||||
product received by a particular user, "normally used" refers to a
|
|
||||||
typical or common use of that class of product, regardless of the status
|
|
||||||
of the particular user or of the way in which the particular user
|
|
||||||
actually uses, or expects or is expected to use, the product. A product
|
|
||||||
is a consumer product regardless of whether the product has substantial
|
|
||||||
commercial, industrial or non-consumer uses, unless such uses represent
|
|
||||||
the only significant mode of use of the product.
|
|
||||||
|
|
||||||
"Installation Information" for a User Product means any methods,
|
|
||||||
procedures, authorization keys, or other information required to install
|
|
||||||
and execute modified versions of a covered work in that User Product from
|
|
||||||
a modified version of its Corresponding Source. The information must
|
|
||||||
suffice to ensure that the continued functioning of the modified object
|
|
||||||
code is in no case prevented or interfered with solely because
|
|
||||||
modification has been made.
|
|
||||||
|
|
||||||
If you convey an object code work under this section in, or with, or
|
|
||||||
specifically for use in, a User Product, and the conveying occurs as
|
|
||||||
part of a transaction in which the right of possession and use of the
|
|
||||||
User Product is transferred to the recipient in perpetuity or for a
|
|
||||||
fixed term (regardless of how the transaction is characterized), the
|
|
||||||
Corresponding Source conveyed under this section must be accompanied
|
|
||||||
by the Installation Information. But this requirement does not apply
|
|
||||||
if neither you nor any third party retains the ability to install
|
|
||||||
modified object code on the User Product (for example, the work has
|
|
||||||
been installed in ROM).
|
|
||||||
|
|
||||||
The requirement to provide Installation Information does not include a
|
|
||||||
requirement to continue to provide support service, warranty, or updates
|
|
||||||
for a work that has been modified or installed by the recipient, or for
|
|
||||||
the User Product in which it has been modified or installed. Access to a
|
|
||||||
network may be denied when the modification itself materially and
|
|
||||||
adversely affects the operation of the network or violates the rules and
|
|
||||||
protocols for communication across the network.
|
|
||||||
|
|
||||||
Corresponding Source conveyed, and Installation Information provided,
|
|
||||||
in accord with this section must be in a format that is publicly
|
|
||||||
documented (and with an implementation available to the public in
|
|
||||||
source code form), and must require no special password or key for
|
|
||||||
unpacking, reading or copying.
|
|
||||||
|
|
||||||
7. Additional Terms.
|
|
||||||
|
|
||||||
"Additional permissions" are terms that supplement the terms of this
|
|
||||||
License by making exceptions from one or more of its conditions.
|
|
||||||
Additional permissions that are applicable to the entire Program shall
|
|
||||||
be treated as though they were included in this License, to the extent
|
|
||||||
that they are valid under applicable law. If additional permissions
|
|
||||||
apply only to part of the Program, that part may be used separately
|
|
||||||
under those permissions, but the entire Program remains governed by
|
|
||||||
this License without regard to the additional permissions.
|
|
||||||
|
|
||||||
When you convey a copy of a covered work, you may at your option
|
|
||||||
remove any additional permissions from that copy, or from any part of
|
|
||||||
it. (Additional permissions may be written to require their own
|
|
||||||
removal in certain cases when you modify the work.) You may place
|
|
||||||
additional permissions on material, added by you to a covered work,
|
|
||||||
for which you have or can give appropriate copyright permission.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, for material you
|
|
||||||
add to a covered work, you may (if authorized by the copyright holders of
|
|
||||||
that material) supplement the terms of this License with terms:
|
|
||||||
|
|
||||||
a) Disclaiming warranty or limiting liability differently from the
|
|
||||||
terms of sections 15 and 16 of this License; or
|
|
||||||
|
|
||||||
b) Requiring preservation of specified reasonable legal notices or
|
|
||||||
author attributions in that material or in the Appropriate Legal
|
|
||||||
Notices displayed by works containing it; or
|
|
||||||
|
|
||||||
c) Prohibiting misrepresentation of the origin of that material, or
|
|
||||||
requiring that modified versions of such material be marked in
|
|
||||||
reasonable ways as different from the original version; or
|
|
||||||
|
|
||||||
d) Limiting the use for publicity purposes of names of licensors or
|
|
||||||
authors of the material; or
|
|
||||||
|
|
||||||
e) Declining to grant rights under trademark law for use of some
|
|
||||||
trade names, trademarks, or service marks; or
|
|
||||||
|
|
||||||
f) Requiring indemnification of licensors and authors of that
|
|
||||||
material by anyone who conveys the material (or modified versions of
|
|
||||||
it) with contractual assumptions of liability to the recipient, for
|
|
||||||
any liability that these contractual assumptions directly impose on
|
|
||||||
those licensors and authors.
|
|
||||||
|
|
||||||
All other non-permissive additional terms are considered "further
|
|
||||||
restrictions" within the meaning of section 10. If the Program as you
|
|
||||||
received it, or any part of it, contains a notice stating that it is
|
|
||||||
governed by this License along with a term that is a further
|
|
||||||
restriction, you may remove that term. If a license document contains
|
|
||||||
a further restriction but permits relicensing or conveying under this
|
|
||||||
License, you may add to a covered work material governed by the terms
|
|
||||||
of that license document, provided that the further restriction does
|
|
||||||
not survive such relicensing or conveying.
|
|
||||||
|
|
||||||
If you add terms to a covered work in accord with this section, you
|
|
||||||
must place, in the relevant source files, a statement of the
|
|
||||||
additional terms that apply to those files, or a notice indicating
|
|
||||||
where to find the applicable terms.
|
|
||||||
|
|
||||||
Additional terms, permissive or non-permissive, may be stated in the
|
|
||||||
form of a separately written license, or stated as exceptions;
|
|
||||||
the above requirements apply either way.
|
|
||||||
|
|
||||||
8. Termination.
|
|
||||||
|
|
||||||
You may not propagate or modify a covered work except as expressly
|
|
||||||
provided under this License. Any attempt otherwise to propagate or
|
|
||||||
modify it is void, and will automatically terminate your rights under
|
|
||||||
this License (including any patent licenses granted under the third
|
|
||||||
paragraph of section 11).
|
|
||||||
|
|
||||||
However, if you cease all violation of this License, then your
|
|
||||||
license from a particular copyright holder is reinstated (a)
|
|
||||||
provisionally, unless and until the copyright holder explicitly and
|
|
||||||
finally terminates your license, and (b) permanently, if the copyright
|
|
||||||
holder fails to notify you of the violation by some reasonable means
|
|
||||||
prior to 60 days after the cessation.
|
|
||||||
|
|
||||||
Moreover, your license from a particular copyright holder is
|
|
||||||
reinstated permanently if the copyright holder notifies you of the
|
|
||||||
violation by some reasonable means, this is the first time you have
|
|
||||||
received notice of violation of this License (for any work) from that
|
|
||||||
copyright holder, and you cure the violation prior to 30 days after
|
|
||||||
your receipt of the notice.
|
|
||||||
|
|
||||||
Termination of your rights under this section does not terminate the
|
|
||||||
licenses of parties who have received copies or rights from you under
|
|
||||||
this License. If your rights have been terminated and not permanently
|
|
||||||
reinstated, you do not qualify to receive new licenses for the same
|
|
||||||
material under section 10.
|
|
||||||
|
|
||||||
9. Acceptance Not Required for Having Copies.
|
|
||||||
|
|
||||||
You are not required to accept this License in order to receive or
|
|
||||||
run a copy of the Program. Ancillary propagation of a covered work
|
|
||||||
occurring solely as a consequence of using peer-to-peer transmission
|
|
||||||
to receive a copy likewise does not require acceptance. However,
|
|
||||||
nothing other than this License grants you permission to propagate or
|
|
||||||
modify any covered work. These actions infringe copyright if you do
|
|
||||||
not accept this License. Therefore, by modifying or propagating a
|
|
||||||
covered work, you indicate your acceptance of this License to do so.
|
|
||||||
|
|
||||||
10. Automatic Licensing of Downstream Recipients.
|
|
||||||
|
|
||||||
Each time you convey a covered work, the recipient automatically
|
|
||||||
receives a license from the original licensors, to run, modify and
|
|
||||||
propagate that work, subject to this License. You are not responsible
|
|
||||||
for enforcing compliance by third parties with this License.
|
|
||||||
|
|
||||||
An "entity transaction" is a transaction transferring control of an
|
|
||||||
organization, or substantially all assets of one, or subdividing an
|
|
||||||
organization, or merging organizations. If propagation of a covered
|
|
||||||
work results from an entity transaction, each party to that
|
|
||||||
transaction who receives a copy of the work also receives whatever
|
|
||||||
licenses to the work the party's predecessor in interest had or could
|
|
||||||
give under the previous paragraph, plus a right to possession of the
|
|
||||||
Corresponding Source of the work from the predecessor in interest, if
|
|
||||||
the predecessor has it or can get it with reasonable efforts.
|
|
||||||
|
|
||||||
You may not impose any further restrictions on the exercise of the
|
|
||||||
rights granted or affirmed under this License. For example, you may
|
|
||||||
not impose a license fee, royalty, or other charge for exercise of
|
|
||||||
rights granted under this License, and you may not initiate litigation
|
|
||||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
|
||||||
any patent claim is infringed by making, using, selling, offering for
|
|
||||||
sale, or importing the Program or any portion of it.
|
|
||||||
|
|
||||||
11. Patents.
|
|
||||||
|
|
||||||
A "contributor" is a copyright holder who authorizes use under this
|
|
||||||
License of the Program or a work on which the Program is based. The
|
|
||||||
work thus licensed is called the contributor's "contributor version".
|
|
||||||
|
|
||||||
A contributor's "essential patent claims" are all patent claims
|
|
||||||
owned or controlled by the contributor, whether already acquired or
|
|
||||||
hereafter acquired, that would be infringed by some manner, permitted
|
|
||||||
by this License, of making, using, or selling its contributor version,
|
|
||||||
but do not include claims that would be infringed only as a
|
|
||||||
consequence of further modification of the contributor version. For
|
|
||||||
purposes of this definition, "control" includes the right to grant
|
|
||||||
patent sublicenses in a manner consistent with the requirements of
|
|
||||||
this License.
|
|
||||||
|
|
||||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
|
||||||
patent license under the contributor's essential patent claims, to
|
|
||||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
|
||||||
propagate the contents of its contributor version.
|
|
||||||
|
|
||||||
In the following three paragraphs, a "patent license" is any express
|
|
||||||
agreement or commitment, however denominated, not to enforce a patent
|
|
||||||
(such as an express permission to practice a patent or covenant not to
|
|
||||||
sue for patent infringement). To "grant" such a patent license to a
|
|
||||||
party means to make such an agreement or commitment not to enforce a
|
|
||||||
patent against the party.
|
|
||||||
|
|
||||||
If you convey a covered work, knowingly relying on a patent license,
|
|
||||||
and the Corresponding Source of the work is not available for anyone
|
|
||||||
to copy, free of charge and under the terms of this License, through a
|
|
||||||
publicly available network server or other readily accessible means,
|
|
||||||
then you must either (1) cause the Corresponding Source to be so
|
|
||||||
available, or (2) arrange to deprive yourself of the benefit of the
|
|
||||||
patent license for this particular work, or (3) arrange, in a manner
|
|
||||||
consistent with the requirements of this License, to extend the patent
|
|
||||||
license to downstream recipients. "Knowingly relying" means you have
|
|
||||||
actual knowledge that, but for the patent license, your conveying the
|
|
||||||
covered work in a country, or your recipient's use of the covered work
|
|
||||||
in a country, would infringe one or more identifiable patents in that
|
|
||||||
country that you have reason to believe are valid.
|
|
||||||
|
|
||||||
If, pursuant to or in connection with a single transaction or
|
|
||||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
|
||||||
covered work, and grant a patent license to some of the parties
|
|
||||||
receiving the covered work authorizing them to use, propagate, modify
|
|
||||||
or convey a specific copy of the covered work, then the patent license
|
|
||||||
you grant is automatically extended to all recipients of the covered
|
|
||||||
work and works based on it.
|
|
||||||
|
|
||||||
A patent license is "discriminatory" if it does not include within
|
|
||||||
the scope of its coverage, prohibits the exercise of, or is
|
|
||||||
conditioned on the non-exercise of one or more of the rights that are
|
|
||||||
specifically granted under this License. You may not convey a covered
|
|
||||||
work if you are a party to an arrangement with a third party that is
|
|
||||||
in the business of distributing software, under which you make payment
|
|
||||||
to the third party based on the extent of your activity of conveying
|
|
||||||
the work, and under which the third party grants, to any of the
|
|
||||||
parties who would receive the covered work from you, a discriminatory
|
|
||||||
patent license (a) in connection with copies of the covered work
|
|
||||||
conveyed by you (or copies made from those copies), or (b) primarily
|
|
||||||
for and in connection with specific products or compilations that
|
|
||||||
contain the covered work, unless you entered into that arrangement,
|
|
||||||
or that patent license was granted, prior to 28 March 2007.
|
|
||||||
|
|
||||||
Nothing in this License shall be construed as excluding or limiting
|
|
||||||
any implied license or other defenses to infringement that may
|
|
||||||
otherwise be available to you under applicable patent law.
|
|
||||||
|
|
||||||
12. No Surrender of Others' Freedom.
|
|
||||||
|
|
||||||
If conditions are imposed on you (whether by court order, agreement or
|
|
||||||
otherwise) that contradict the conditions of this License, they do not
|
|
||||||
excuse you from the conditions of this License. If you cannot convey a
|
|
||||||
covered work so as to satisfy simultaneously your obligations under this
|
|
||||||
License and any other pertinent obligations, then as a consequence you may
|
|
||||||
not convey it at all. For example, if you agree to terms that obligate you
|
|
||||||
to collect a royalty for further conveying from those to whom you convey
|
|
||||||
the Program, the only way you could satisfy both those terms and this
|
|
||||||
License would be to refrain entirely from conveying the Program.
|
|
||||||
|
|
||||||
13. Use with the GNU Affero General Public License.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, you have
|
|
||||||
permission to link or combine any covered work with a work licensed
|
|
||||||
under version 3 of the GNU Affero General Public License into a single
|
|
||||||
combined work, and to convey the resulting work. The terms of this
|
|
||||||
License will continue to apply to the part which is the covered work,
|
|
||||||
but the special requirements of the GNU Affero General Public License,
|
|
||||||
section 13, concerning interaction through a network will apply to the
|
|
||||||
combination as such.
|
|
||||||
|
|
||||||
14. Revised Versions of this License.
|
|
||||||
|
|
||||||
The Free Software Foundation may publish revised and/or new versions of
|
|
||||||
the GNU General Public License from time to time. Such new versions will
|
|
||||||
be similar in spirit to the present version, but may differ in detail to
|
|
||||||
address new problems or concerns.
|
|
||||||
|
|
||||||
Each version is given a distinguishing version number. If the
|
|
||||||
Program specifies that a certain numbered version of the GNU General
|
|
||||||
Public License "or any later version" applies to it, you have the
|
|
||||||
option of following the terms and conditions either of that numbered
|
|
||||||
version or of any later version published by the Free Software
|
|
||||||
Foundation. If the Program does not specify a version number of the
|
|
||||||
GNU General Public License, you may choose any version ever published
|
|
||||||
by the Free Software Foundation.
|
|
||||||
|
|
||||||
If the Program specifies that a proxy can decide which future
|
|
||||||
versions of the GNU General Public License can be used, that proxy's
|
|
||||||
public statement of acceptance of a version permanently authorizes you
|
|
||||||
to choose that version for the Program.
|
|
||||||
|
|
||||||
Later license versions may give you additional or different
|
|
||||||
permissions. However, no additional obligations are imposed on any
|
|
||||||
author or copyright holder as a result of your choosing to follow a
|
|
||||||
later version.
|
|
||||||
|
|
||||||
15. Disclaimer of Warranty.
|
|
||||||
|
|
||||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
|
||||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
|
||||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
|
||||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
||||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
|
||||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
|
||||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
|
||||||
|
|
||||||
16. Limitation of Liability.
|
|
||||||
|
|
||||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
|
||||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
|
||||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
|
||||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
|
||||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
|
||||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
|
||||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
|
||||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
|
||||||
SUCH DAMAGES.
|
|
||||||
|
|
||||||
17. Interpretation of Sections 15 and 16.
|
|
||||||
|
|
||||||
If the disclaimer of warranty and limitation of liability provided
|
|
||||||
above cannot be given local legal effect according to their terms,
|
|
||||||
reviewing courts shall apply local law that most closely approximates
|
|
||||||
an absolute waiver of all civil liability in connection with the
|
|
||||||
Program, unless a warranty or assumption of liability accompanies a
|
|
||||||
copy of the Program in return for a fee.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
How to Apply These Terms to Your New Programs
|
|
||||||
|
|
||||||
If you develop a new program, and you want it to be of the greatest
|
|
||||||
possible use to the public, the best way to achieve this is to make it
|
|
||||||
free software which everyone can redistribute and change under these terms.
|
|
||||||
|
|
||||||
To do so, attach the following notices to the program. It is safest
|
|
||||||
to attach them to the start of each source file to most effectively
|
|
||||||
state the exclusion of warranty; and each file should have at least
|
|
||||||
the "copyright" line and a pointer to where the full notice is found.
|
|
||||||
|
|
||||||
<one line to give the program's name and a brief idea of what it does.>
|
|
||||||
Copyright (C) <year> <name of author>
|
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation, either version 3 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
Also add information on how to contact you by electronic and paper mail.
|
|
||||||
|
|
||||||
If the program does terminal interaction, make it output a short
|
|
||||||
notice like this when it starts in an interactive mode:
|
|
||||||
|
|
||||||
<program> Copyright (C) <year> <name of author>
|
|
||||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
|
||||||
This is free software, and you are welcome to redistribute it
|
|
||||||
under certain conditions; type `show c' for details.
|
|
||||||
|
|
||||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
|
||||||
parts of the General Public License. Of course, your program's commands
|
|
||||||
might be different; for a GUI interface, you would use an "about box".
|
|
||||||
|
|
||||||
You should also get your employer (if you work as a programmer) or school,
|
|
||||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
|
||||||
For more information on this, and how to apply and follow the GNU GPL, see
|
|
||||||
<https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
The GNU General Public License does not permit incorporating your program
|
|
||||||
into proprietary programs. If your program is a subroutine library, you
|
|
||||||
may consider it more useful to permit linking proprietary applications with
|
|
||||||
the library. If this is what you want to do, use the GNU Lesser General
|
|
||||||
Public License instead of this License. But first, please read
|
|
||||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
|
||||||
|
|||||||
30
Makefile
30
Makefile
@@ -1,19 +1,37 @@
|
|||||||
default:
|
default:
|
||||||
|
|
||||||
dist:
|
docker:
|
||||||
|
docker build -t litestream .
|
||||||
|
|
||||||
|
dist-linux:
|
||||||
mkdir -p dist
|
mkdir -p dist
|
||||||
cp etc/litestream.yml dist/litestream.yml
|
cp etc/litestream.yml dist/litestream.yml
|
||||||
docker run --rm -v "${PWD}":/usr/src/litestream -w /usr/src/litestream -e GOOS=linux -e GOARCH=amd64 golang:1.15 go build -v -o dist/litestream ./cmd/litestream
|
docker run --rm -v "${PWD}":/usr/src/litestream -w /usr/src/litestream -e GOOS=linux -e GOARCH=amd64 golang:1.16 go build -v -ldflags "-s -w" -o dist/litestream ./cmd/litestream
|
||||||
tar -cz -f dist/litestream-linux-amd64.tar.gz -C dist litestream
|
tar -cz -f dist/litestream-linux-amd64.tar.gz -C dist litestream
|
||||||
|
|
||||||
deb: dist
|
dist-linux-arm:
|
||||||
|
docker run --rm -v "${PWD}":/usr/src/litestream -w /usr/src/litestream -e CGO_ENABLED=1 -e CC=arm-linux-gnueabihf-gcc -e GOOS=linux -e GOARCH=arm golang-xc:1.16 go build -v -o dist/litestream-linux-arm ./cmd/litestream
|
||||||
|
|
||||||
|
dist-linux-arm64:
|
||||||
|
docker run --rm -v "${PWD}":/usr/src/litestream -w /usr/src/litestream -e CGO_ENABLED=1 -e CC=aarch64-linux-gnu-gcc -e GOOS=linux -e GOARCH=arm64 golang-xc:1.16 go build -v -o dist/litestream-linux-arm64 ./cmd/litestream
|
||||||
|
|
||||||
|
dist-macos:
|
||||||
ifndef LITESTREAM_VERSION
|
ifndef LITESTREAM_VERSION
|
||||||
$(error LITESTREAM_VERSION is undefined)
|
$(error LITESTREAM_VERSION is undefined)
|
||||||
endif
|
endif
|
||||||
cat etc/nfpm.yml | envsubst > dist/nfpm.yml
|
mkdir -p dist
|
||||||
nfpm pkg --config dist/nfpm.yml --packager deb --target dist/litestream.deb
|
|
||||||
|
GOOS=darwin GOARCH=amd64 CC="gcc -target amd64-apple-macos11" CGO_ENABLED=1 go build -v -ldflags "-s -w -X 'main.Version=${LITESTREAM_VERSION}'" -o dist/litestream ./cmd/litestream
|
||||||
|
gon etc/gon.hcl
|
||||||
|
mv dist/litestream.zip dist/litestream-${LITESTREAM_VERSION}-darwin-amd64.zip
|
||||||
|
openssl dgst -sha256 dist/litestream-${LITESTREAM_VERSION}-darwin-amd64.zip
|
||||||
|
|
||||||
|
GOOS=darwin GOARCH=arm64 CC="gcc -target arm64-apple-macos11" CGO_ENABLED=1 go build -v -ldflags "-s -w -X 'main.Version=${LITESTREAM_VERSION}'" -o dist/litestream ./cmd/litestream
|
||||||
|
gon etc/gon.hcl
|
||||||
|
mv dist/litestream.zip dist/litestream-${LITESTREAM_VERSION}-darwin-arm64.zip
|
||||||
|
openssl dgst -sha256 dist/litestream-${LITESTREAM_VERSION}-darwin-arm64.zip
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf dist
|
rm -rf dist
|
||||||
|
|
||||||
.PHONY: deb dist clean
|
.PHONY: default dist-linux dist-macos clean
|
||||||
|
|||||||
218
README.md
218
README.md
@@ -1,200 +1,60 @@
|
|||||||
Litestream 
|
Litestream
|
||||||
|

|
||||||
|

|
||||||
|

|
||||||
|
[](https://hub.docker.com/r/litestream/litestream/)
|
||||||
|

|
||||||
==========
|
==========
|
||||||
|
|
||||||
Litestream is a standalone streaming replication tool for SQLite. It runs as a
|
Litestream is a standalone disaster recovery tool for SQLite. It runs as a
|
||||||
background process and safely replicates changes incrementally to another file
|
background process and safely replicates changes incrementally to another file
|
||||||
or S3. Litestream only communicates with SQLite through the SQLite API so it
|
or S3. Litestream only communicates with SQLite through the SQLite API so it
|
||||||
will not corrupt your database.
|
will not corrupt your database.
|
||||||
|
|
||||||
|
If you need support or have ideas for improving Litestream, please join the
|
||||||
|
[Litestream Slack][slack] or visit the [GitHub Discussions](https://github.com/benbjohnson/litestream/discussions).
|
||||||
|
Please visit the [Litestream web site](https://litestream.io) for installation
|
||||||
|
instructions and documentation.
|
||||||
|
|
||||||
If you find this project interesting, please consider starring the project on
|
If you find this project interesting, please consider starring the project on
|
||||||
GitHub.
|
GitHub.
|
||||||
|
|
||||||
|
[slack]: https://join.slack.com/t/litestream/shared_invite/zt-n0j4s3ci-lx1JziR3bV6L2NMF723H3Q
|
||||||
## Installation
|
|
||||||
|
|
||||||
### Homebrew
|
|
||||||
|
|
||||||
TODO
|
|
||||||
|
|
||||||
|
|
||||||
### Linux (Debian)
|
## Acknowledgements
|
||||||
|
|
||||||
You can download the `.deb` file from the [Releases page][releases] page and
|
While the Litestream project does not accept external code patches, many
|
||||||
then run the following:
|
of the most valuable contributions are in the forms of testing, feedback, and
|
||||||
|
documentation. These help harden software and streamline usage for other users.
|
||||||
|
|
||||||
```sh
|
I want to give special thanks to individuals who invest much of their time and
|
||||||
$ sudo dpkg -i litestream-v0.3.0-linux-amd64.deb
|
energy into the project to help make it better:
|
||||||
```
|
|
||||||
|
|
||||||
Once installed, you'll need to enable & start the service:
|
- Thanks to [Cory LaNou](https://twitter.com/corylanou) for giving early feedback and testing when Litestream was still pre-release.
|
||||||
|
- Thanks to [Michael Lynch](https://github.com/mtlynch) for digging into issues and contributing to the documentation.
|
||||||
|
- Thanks to [Kurt Mackey](https://twitter.com/mrkurt) for feedback and testing.
|
||||||
|
- Thanks to [Sam Weston](https://twitter.com/cablespaghetti) for figuring out how to run Litestream on Kubernetes and writing up the docs for it.
|
||||||
|
- Thanks to [Rafael](https://github.com/netstx) & [Jungle Boogie](https://github.com/jungle-boogie) for helping to get OpenBSD release builds working.
|
||||||
|
- Thanks to [Simon Gottschlag](https://github.com/simongottschlag), [Marin](https://github.com/supermarin),[Victor Björklund](https://github.com/victorbjorklund), [Jonathan Beri](https://twitter.com/beriberikix) [Yuri](https://github.com/yurivish), [Nathan Probst](https://github.com/nprbst), [Yann Coleu](https://github.com/yanc0), and [Nicholas Grilly](https://twitter.com/ngrilly) for frequent feedback, testing, & support.
|
||||||
|
|
||||||
```sh
|
Huge thanks to fly.io for their support and for contributing credits for testing and development!
|
||||||
$ sudo systemctl enable litestream
|
|
||||||
$ sudo systemctl start litestream
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### Release binaries
|
## Contribution Policy
|
||||||
|
|
||||||
You can also download the release binary for your system from the
|
Initially, Litestream was closed to outside contributions. The goal was to
|
||||||
[Releases page][releases] and run it as a standalone application.
|
reduce burnout by limiting the maintenance overhead of reviewing and validating
|
||||||
|
third-party code. However, this policy is overly broad and has prevented small,
|
||||||
|
easily testable patches from being contributed.
|
||||||
|
|
||||||
|
Litestream is now open to code contributions for bug fixes only. Features carry
|
||||||
|
a long-term maintenance burden so they will not be accepted at this time.
|
||||||
|
Please [submit an issue][new-issue] if you have a feature you'd like to
|
||||||
|
request.
|
||||||
|
|
||||||
## Configuration
|
If you find mistakes in the documentation, please submit a fix to the
|
||||||
|
[documentation repository][docs].
|
||||||
|
|
||||||
Once installed locally, you'll need to create a config file. By default, the
|
[new-issue]: https://github.com/benbjohnson/litestream/issues/new
|
||||||
config file lives at `/etc/litestream.yml` but you can pass in a different
|
[docs]: https://github.com/benbjohnson/litestream.io
|
||||||
path to any `litestream` command using the `-config PATH` flag.
|
|
||||||
|
|
||||||
The configuration specifies one or more `dbs` and a list of one or more replica
|
|
||||||
locations for each db. Below are some common configurations:
|
|
||||||
|
|
||||||
### Replicate to S3
|
|
||||||
|
|
||||||
This will replicate the database at `/path/to/db` to the `"/db"` path inside
|
|
||||||
the S3 bucket named `"mybkt"`.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
access-key-id: AKIAxxxxxxxxxxxxxxxx
|
|
||||||
secret-access-key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/xxxxxxxxx
|
|
||||||
|
|
||||||
dbs:
|
|
||||||
- path: /path/to/db
|
|
||||||
replicas:
|
|
||||||
- path: s3://mybkt/db
|
|
||||||
```
|
|
||||||
|
|
||||||
### Replicate to another file path
|
|
||||||
|
|
||||||
This will replicate the database at `/path/to/db` to a directory named
|
|
||||||
`/path/to/replica`.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
dbs:
|
|
||||||
- path: /path/to/db
|
|
||||||
replicas:
|
|
||||||
- path: /path/to/replica
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### Other configuration options
|
|
||||||
|
|
||||||
These are some additional configuration options available on replicas:
|
|
||||||
|
|
||||||
- `type`—Specify the type of replica (`"file"` or `"s3"`). Derived from `"path"`.
|
|
||||||
- `name`—Specify an optional name for the replica if you are using multiple replicas.
|
|
||||||
- `path`—File path or URL to the replica location.
|
|
||||||
- `retention`—Length of time to keep replicated WAL files. Defaults to `24h`.
|
|
||||||
- `retention-check-interval`—Time between retention enforcement checks. Defaults to `1h`.
|
|
||||||
- `validation-interval`—Interval between periodic checks to ensure restored backup matches current database. Disabled by default.
|
|
||||||
|
|
||||||
These replica options are only available for S3 replicas:
|
|
||||||
|
|
||||||
- `bucket`—S3 bucket name. Derived from `"path"`.
|
|
||||||
- `region`—S3 bucket region. Looked up on startup if unspecified.
|
|
||||||
- `sync-interval`—Replication sync frequency.
|
|
||||||
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
### Replication
|
|
||||||
|
|
||||||
Once your configuration is saved, you'll need to begin replication. If you
|
|
||||||
installed the `.deb` file then run:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
$ sudo systemctl restart litestream
|
|
||||||
```
|
|
||||||
|
|
||||||
To run litestream on its own, run:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
# Replicate using the /etc/litestream.yml configuration.
|
|
||||||
$ litestream replicate
|
|
||||||
|
|
||||||
# Replicate using a different configuration path.
|
|
||||||
$ litestream replicate -config /path/to/litestream.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
The `litestream` command will initialize and then wait indefinitely for changes.
|
|
||||||
You should see your destination replica path is now populated with a
|
|
||||||
`generations` directory. Inside there should be a 16-character hex generation
|
|
||||||
directory and inside there should be snapshots & WAL files. As you make changes
|
|
||||||
to your source database, changes will be copied over to your replica incrementally.
|
|
||||||
|
|
||||||
|
|
||||||
### Restoring a backup
|
|
||||||
|
|
||||||
Litestream can restore a previous snapshot and replay all replicated WAL files.
|
|
||||||
By default, it will restore up to the latest WAL file but you can also perform
|
|
||||||
point-in-time restores.
|
|
||||||
|
|
||||||
A database can only be restored to a path that does not exist so you don't need
|
|
||||||
to worry about accidentally overwriting your current database.
|
|
||||||
|
|
||||||
```sh
|
|
||||||
# Restore database to original path.
|
|
||||||
$ litestream restore /path/to/db
|
|
||||||
|
|
||||||
# Restore database to a new location.
|
|
||||||
$ litestream restore -o /tmp/mynewdb /path/to/db
|
|
||||||
|
|
||||||
# Restore database to a specific point-in-time.
|
|
||||||
$ litestream restore -timestamp 2020-01-01T00:00:00Z /path/to/db
|
|
||||||
```
|
|
||||||
|
|
||||||
Point-in-time restores only have the resolution of the timestamp of the WAL file
|
|
||||||
itself. By default, litestream will start a new WAL file every minute so
|
|
||||||
point-in-time restores are only accurate to the minute.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## How it works
|
|
||||||
|
|
||||||
SQLite provides a WAL (write-ahead log) journaling mode which writes pages to
|
|
||||||
a `-wal` file before eventually being copied over to the original database file.
|
|
||||||
This copying process is known as checkpointing. The WAL file works as a circular
|
|
||||||
buffer so when the WAL reaches a certain size then it restarts from the beginning.
|
|
||||||
|
|
||||||
Litestream works by taking over the checkpointing process and controlling when
|
|
||||||
it is restarted to ensure that it copies every new page. Checkpointing is only
|
|
||||||
allowed when there are no read transactions so Litestream maintains a
|
|
||||||
long-running read transaction against each database until it is ready to
|
|
||||||
checkpoint.
|
|
||||||
|
|
||||||
The SQLite WAL file is copied to a separate location called the shadow WAL which
|
|
||||||
ensures that it will not be overwritten by SQLite. This shadow WAL acts as a
|
|
||||||
temporary buffer so that replicas can replicate to their destination (e.g.
|
|
||||||
another file path or to S3). The shadow WAL files are removed once they have
|
|
||||||
been fully replicated. You can find the shadow directory as a hidden directory
|
|
||||||
next to your database file. If you database file is named `/var/lib/my.db` then
|
|
||||||
the shadow directory will be `/var/lib/.my.db-litestream`.
|
|
||||||
|
|
||||||
Litestream groups a snapshot and all subsequent WAL changes into "generations".
|
|
||||||
A generation is started on initial replication of a database and a new
|
|
||||||
generation will be started if litestream detects that the WAL replication is
|
|
||||||
no longer contiguous. This can occur if the `litestream` process is stopped and
|
|
||||||
another process is allowed to checkpoint the WAL.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Open-source, not open-contribution
|
|
||||||
|
|
||||||
[Similar to SQLite](https://www.sqlite.org/copyright.html), litestream is open
|
|
||||||
source but closed to contributions. This keeps the code base free of proprietary
|
|
||||||
or licensed code but it also helps me continue to maintain and build litestream.
|
|
||||||
|
|
||||||
As the author of [BoltDB](https://github.com/boltdb/bolt), I found that
|
|
||||||
accepting and maintaining third party patches contributed to my burn out and
|
|
||||||
I eventually archived the project. Writing databases & low-level replication
|
|
||||||
tools involves nuance and simple one line changes can have profound and
|
|
||||||
unexpected changes in correctness and performance. Small contributions
|
|
||||||
typically required hours of my time to properly test and validate them.
|
|
||||||
|
|
||||||
I am grateful for community involvement, bug reports, & feature requests. I do
|
|
||||||
not wish to come off as anything but welcoming, however, I've
|
|
||||||
made the decision to keep this project closed to contributions for my own
|
|
||||||
mental health and long term viability of the project.
|
|
||||||
|
|
||||||
|
|
||||||
[releases]: https://github.com/benbjohnson/litestream/releases
|
|
||||||
|
|||||||
565
abs/replica_client.go
Normal file
565
abs/replica_client.go
Normal file
@@ -0,0 +1,565 @@
|
|||||||
|
package abs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||||
|
"github.com/benbjohnson/litestream"
|
||||||
|
"github.com/benbjohnson/litestream/internal"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReplicaClientType is the client type for this package.
|
||||||
|
const ReplicaClientType = "abs"
|
||||||
|
|
||||||
|
var _ litestream.ReplicaClient = (*ReplicaClient)(nil)
|
||||||
|
|
||||||
|
// ReplicaClient is a client for writing snapshots & WAL segments to disk.
|
||||||
|
type ReplicaClient struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
containerURL *azblob.ContainerURL
|
||||||
|
|
||||||
|
// Azure credentials
|
||||||
|
AccountName string
|
||||||
|
AccountKey string
|
||||||
|
Endpoint string
|
||||||
|
|
||||||
|
// Azure Blob Storage container information
|
||||||
|
Bucket string
|
||||||
|
Path string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReplicaClient returns a new instance of ReplicaClient.
|
||||||
|
func NewReplicaClient() *ReplicaClient {
|
||||||
|
return &ReplicaClient{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns "abs" as the client type.
|
||||||
|
func (c *ReplicaClient) Type() string {
|
||||||
|
return ReplicaClientType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init initializes the connection to Azure. No-op if already initialized.
|
||||||
|
func (c *ReplicaClient) Init(ctx context.Context) (err error) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
if c.containerURL != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read account key from environment, if available.
|
||||||
|
accountKey := c.AccountKey
|
||||||
|
if accountKey == "" {
|
||||||
|
accountKey = os.Getenv("LITESTREAM_AZURE_ACCOUNT_KEY")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Authenticate to ACS.
|
||||||
|
credential, err := azblob.NewSharedKeyCredential(c.AccountName, accountKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct & parse endpoint unless already set.
|
||||||
|
endpoint := c.Endpoint
|
||||||
|
if endpoint == "" {
|
||||||
|
endpoint = fmt.Sprintf("https://%s.blob.core.windows.net", c.AccountName)
|
||||||
|
}
|
||||||
|
endpointURL, err := url.Parse(endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot parse azure endpoint: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build pipeline and reference to container.
|
||||||
|
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{
|
||||||
|
Retry: azblob.RetryOptions{
|
||||||
|
TryTimeout: 24 * time.Hour,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
containerURL := azblob.NewServiceURL(*endpointURL, pipeline).NewContainerURL(c.Bucket)
|
||||||
|
c.containerURL = &containerURL
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generations returns a list of available generation names.
|
||||||
|
func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var generations []string
|
||||||
|
var marker azblob.Marker
|
||||||
|
for marker.NotDone() {
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
|
||||||
|
|
||||||
|
resp, err := c.containerURL.ListBlobsHierarchySegment(ctx, marker, "/", azblob.ListBlobsSegmentOptions{
|
||||||
|
Prefix: litestream.GenerationsPath(c.Path) + "/",
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
marker = resp.NextMarker
|
||||||
|
|
||||||
|
for _, prefix := range resp.Segment.BlobPrefixes {
|
||||||
|
name := path.Base(strings.TrimSuffix(prefix.Name, "/"))
|
||||||
|
if !litestream.IsGenerationName(name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
generations = append(generations, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return generations, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteGeneration deletes all snapshots & WAL segments within a generation.
|
||||||
|
func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dir, err := litestream.GenerationPath(c.Path, generation)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot determine generation path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var marker azblob.Marker
|
||||||
|
for marker.NotDone() {
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
|
||||||
|
|
||||||
|
resp, err := c.containerURL.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{Prefix: dir + "/"})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
marker = resp.NextMarker
|
||||||
|
|
||||||
|
for _, item := range resp.Segment.BlobItems {
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
|
||||||
|
|
||||||
|
blobURL := c.containerURL.NewBlobURL(item.Name)
|
||||||
|
if _, err := blobURL.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}); isNotExists(err) {
|
||||||
|
continue
|
||||||
|
} else if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// log.Printf("%s(%s): retainer: deleting generation: %s", r.db.Path(), r.Name(), generation)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshots returns an iterator over all available snapshots for a generation.
|
||||||
|
func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (litestream.SnapshotIterator, error) {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return newSnapshotIterator(ctx, generation, c), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteSnapshot writes LZ4 compressed data from rd to the object storage.
|
||||||
|
func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := litestream.SnapshotPath(c.Path, generation, index)
|
||||||
|
if err != nil {
|
||||||
|
return info, fmt.Errorf("cannot determine snapshot path: %w", err)
|
||||||
|
}
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
rc := internal.NewReadCounter(rd)
|
||||||
|
|
||||||
|
blobURL := c.containerURL.NewBlockBlobURL(key)
|
||||||
|
if _, err := azblob.UploadStreamToBlockBlob(ctx, rc, blobURL, azblob.UploadStreamToBlockBlobOptions{
|
||||||
|
BlobHTTPHeaders: azblob.BlobHTTPHeaders{ContentType: "application/octet-stream"},
|
||||||
|
BlobAccessTier: azblob.DefaultAccessTier,
|
||||||
|
}); err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc()
|
||||||
|
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(rc.N()))
|
||||||
|
|
||||||
|
// log.Printf("%s(%s): snapshot: creating %s/%08x t=%s", r.db.Path(), r.Name(), generation, index, time.Since(startTime).Truncate(time.Millisecond))
|
||||||
|
|
||||||
|
return litestream.SnapshotInfo{
|
||||||
|
Generation: generation,
|
||||||
|
Index: index,
|
||||||
|
Size: rc.N(),
|
||||||
|
CreatedAt: startTime.UTC(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SnapshotReader returns a reader for snapshot data at the given generation/index.
|
||||||
|
func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := litestream.SnapshotPath(c.Path, generation, index)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot determine snapshot path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
blobURL := c.containerURL.NewBlobURL(key)
|
||||||
|
resp, err := blobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
|
||||||
|
if isNotExists(err) {
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot start new reader for %q: %w", key, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "GET").Inc()
|
||||||
|
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "GET").Add(float64(resp.ContentLength()))
|
||||||
|
|
||||||
|
return resp.Body(azblob.RetryReaderOptions{}), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteSnapshot deletes a snapshot with the given generation & index.
|
||||||
|
func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := litestream.SnapshotPath(c.Path, generation, index)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot determine snapshot path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
|
||||||
|
|
||||||
|
blobURL := c.containerURL.NewBlobURL(key)
|
||||||
|
if _, err := blobURL.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}); isNotExists(err) {
|
||||||
|
return nil
|
||||||
|
} else if err != nil {
|
||||||
|
return fmt.Errorf("cannot delete snapshot %q: %w", key, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WALSegments returns an iterator over all available WAL files for a generation.
|
||||||
|
func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return newWALSegmentIterator(ctx, generation, c), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteWALSegment writes LZ4 compressed data from rd into a file on disk.
|
||||||
|
func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
|
||||||
|
if err != nil {
|
||||||
|
return info, fmt.Errorf("cannot determine wal segment path: %w", err)
|
||||||
|
}
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
rc := internal.NewReadCounter(rd)
|
||||||
|
|
||||||
|
blobURL := c.containerURL.NewBlockBlobURL(key)
|
||||||
|
if _, err := azblob.UploadStreamToBlockBlob(ctx, rc, blobURL, azblob.UploadStreamToBlockBlobOptions{
|
||||||
|
BlobHTTPHeaders: azblob.BlobHTTPHeaders{ContentType: "application/octet-stream"},
|
||||||
|
BlobAccessTier: azblob.DefaultAccessTier,
|
||||||
|
}); err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc()
|
||||||
|
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(rc.N()))
|
||||||
|
|
||||||
|
return litestream.WALSegmentInfo{
|
||||||
|
Generation: pos.Generation,
|
||||||
|
Index: pos.Index,
|
||||||
|
Offset: pos.Offset,
|
||||||
|
Size: rc.N(),
|
||||||
|
CreatedAt: startTime.UTC(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WALSegmentReader returns a reader for a section of WAL data at the given index.
|
||||||
|
// Returns os.ErrNotExist if no matching index/offset is found.
|
||||||
|
func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot determine wal segment path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
blobURL := c.containerURL.NewBlobURL(key)
|
||||||
|
resp, err := blobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
|
||||||
|
if isNotExists(err) {
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot start new reader for %q: %w", key, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "GET").Inc()
|
||||||
|
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "GET").Add(float64(resp.ContentLength()))
|
||||||
|
|
||||||
|
return resp.Body(azblob.RetryReaderOptions{}), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteWALSegments deletes WAL segments with at the given positions.
|
||||||
|
func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Pos) error {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pos := range a {
|
||||||
|
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot determine wal segment path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
|
||||||
|
|
||||||
|
blobURL := c.containerURL.NewBlobURL(key)
|
||||||
|
if _, err := blobURL.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}); isNotExists(err) {
|
||||||
|
continue
|
||||||
|
} else if err != nil {
|
||||||
|
return fmt.Errorf("cannot delete wal segment %q: %w", key, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type snapshotIterator struct {
|
||||||
|
client *ReplicaClient
|
||||||
|
generation string
|
||||||
|
|
||||||
|
ch chan litestream.SnapshotInfo
|
||||||
|
g errgroup.Group
|
||||||
|
ctx context.Context
|
||||||
|
cancel func()
|
||||||
|
|
||||||
|
info litestream.SnapshotInfo
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSnapshotIterator(ctx context.Context, generation string, client *ReplicaClient) *snapshotIterator {
|
||||||
|
itr := &snapshotIterator{
|
||||||
|
client: client,
|
||||||
|
generation: generation,
|
||||||
|
ch: make(chan litestream.SnapshotInfo),
|
||||||
|
}
|
||||||
|
|
||||||
|
itr.ctx, itr.cancel = context.WithCancel(ctx)
|
||||||
|
itr.g.Go(itr.fetch)
|
||||||
|
|
||||||
|
return itr
|
||||||
|
}
|
||||||
|
|
||||||
|
// fetch runs in a separate goroutine to fetch pages of objects and stream them to a channel.
|
||||||
|
func (itr *snapshotIterator) fetch() error {
|
||||||
|
defer close(itr.ch)
|
||||||
|
|
||||||
|
dir, err := litestream.SnapshotsPath(itr.client.Path, itr.generation)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot determine snapshots path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var marker azblob.Marker
|
||||||
|
for marker.NotDone() {
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
|
||||||
|
|
||||||
|
resp, err := itr.client.containerURL.ListBlobsFlatSegment(itr.ctx, marker, azblob.ListBlobsSegmentOptions{Prefix: dir + "/"})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
marker = resp.NextMarker
|
||||||
|
|
||||||
|
for _, item := range resp.Segment.BlobItems {
|
||||||
|
key := path.Base(item.Name)
|
||||||
|
index, err := litestream.ParseSnapshotPath(key)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
info := litestream.SnapshotInfo{
|
||||||
|
Generation: itr.generation,
|
||||||
|
Index: index,
|
||||||
|
Size: *item.Properties.ContentLength,
|
||||||
|
CreatedAt: item.Properties.CreationTime.UTC(),
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-itr.ctx.Done():
|
||||||
|
case itr.ch <- info:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (itr *snapshotIterator) Close() (err error) {
|
||||||
|
err = itr.err
|
||||||
|
|
||||||
|
// Cancel context and wait for error group to finish.
|
||||||
|
itr.cancel()
|
||||||
|
if e := itr.g.Wait(); e != nil && err == nil {
|
||||||
|
err = e
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (itr *snapshotIterator) Next() bool {
|
||||||
|
// Exit if an error has already occurred.
|
||||||
|
if itr.err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return false if context was canceled or if there are no more snapshots.
|
||||||
|
// Otherwise fetch the next snapshot and store it on the iterator.
|
||||||
|
select {
|
||||||
|
case <-itr.ctx.Done():
|
||||||
|
return false
|
||||||
|
case info, ok := <-itr.ch:
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
itr.info = info
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (itr *snapshotIterator) Err() error { return itr.err }
|
||||||
|
|
||||||
|
func (itr *snapshotIterator) Snapshot() litestream.SnapshotInfo {
|
||||||
|
return itr.info
|
||||||
|
}
|
||||||
|
|
||||||
|
type walSegmentIterator struct {
|
||||||
|
client *ReplicaClient
|
||||||
|
generation string
|
||||||
|
|
||||||
|
ch chan litestream.WALSegmentInfo
|
||||||
|
g errgroup.Group
|
||||||
|
ctx context.Context
|
||||||
|
cancel func()
|
||||||
|
|
||||||
|
info litestream.WALSegmentInfo
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func newWALSegmentIterator(ctx context.Context, generation string, client *ReplicaClient) *walSegmentIterator {
|
||||||
|
itr := &walSegmentIterator{
|
||||||
|
client: client,
|
||||||
|
generation: generation,
|
||||||
|
ch: make(chan litestream.WALSegmentInfo),
|
||||||
|
}
|
||||||
|
|
||||||
|
itr.ctx, itr.cancel = context.WithCancel(ctx)
|
||||||
|
itr.g.Go(itr.fetch)
|
||||||
|
|
||||||
|
return itr
|
||||||
|
}
|
||||||
|
|
||||||
|
// fetch runs in a separate goroutine to fetch pages of objects and stream them to a channel.
|
||||||
|
func (itr *walSegmentIterator) fetch() error {
|
||||||
|
defer close(itr.ch)
|
||||||
|
|
||||||
|
dir, err := litestream.WALPath(itr.client.Path, itr.generation)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot determine wal path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var marker azblob.Marker
|
||||||
|
for marker.NotDone() {
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
|
||||||
|
|
||||||
|
resp, err := itr.client.containerURL.ListBlobsFlatSegment(itr.ctx, marker, azblob.ListBlobsSegmentOptions{Prefix: dir + "/"})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
marker = resp.NextMarker
|
||||||
|
|
||||||
|
for _, item := range resp.Segment.BlobItems {
|
||||||
|
key := path.Base(item.Name)
|
||||||
|
index, offset, err := litestream.ParseWALSegmentPath(key)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
info := litestream.WALSegmentInfo{
|
||||||
|
Generation: itr.generation,
|
||||||
|
Index: index,
|
||||||
|
Offset: offset,
|
||||||
|
Size: *item.Properties.ContentLength,
|
||||||
|
CreatedAt: item.Properties.CreationTime.UTC(),
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-itr.ctx.Done():
|
||||||
|
case itr.ch <- info:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (itr *walSegmentIterator) Close() (err error) {
|
||||||
|
err = itr.err
|
||||||
|
|
||||||
|
// Cancel context and wait for error group to finish.
|
||||||
|
itr.cancel()
|
||||||
|
if e := itr.g.Wait(); e != nil && err == nil {
|
||||||
|
err = e
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (itr *walSegmentIterator) Next() bool {
|
||||||
|
// Exit if an error has already occurred.
|
||||||
|
if itr.err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return false if context was canceled or if there are no more segments.
|
||||||
|
// Otherwise fetch the next segment and store it on the iterator.
|
||||||
|
select {
|
||||||
|
case <-itr.ctx.Done():
|
||||||
|
return false
|
||||||
|
case info, ok := <-itr.ch:
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
itr.info = info
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (itr *walSegmentIterator) Err() error { return itr.err }
|
||||||
|
|
||||||
|
func (itr *walSegmentIterator) WALSegment() litestream.WALSegmentInfo {
|
||||||
|
return itr.info
|
||||||
|
}
|
||||||
|
|
||||||
|
func isNotExists(err error) bool {
|
||||||
|
switch err := err.(type) {
|
||||||
|
case azblob.StorageError:
|
||||||
|
return err.ServiceCode() == azblob.ServiceCodeBlobNotFound
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -2,7 +2,6 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
@@ -15,30 +14,31 @@ type DatabasesCommand struct{}
|
|||||||
|
|
||||||
// Run executes the command.
|
// Run executes the command.
|
||||||
func (c *DatabasesCommand) Run(ctx context.Context, args []string) (err error) {
|
func (c *DatabasesCommand) Run(ctx context.Context, args []string) (err error) {
|
||||||
var configPath string
|
|
||||||
fs := flag.NewFlagSet("litestream-databases", flag.ContinueOnError)
|
fs := flag.NewFlagSet("litestream-databases", flag.ContinueOnError)
|
||||||
registerConfigFlag(fs, &configPath)
|
configPath, noExpandEnv := registerConfigFlag(fs)
|
||||||
fs.Usage = c.Usage
|
fs.Usage = c.Usage
|
||||||
if err := fs.Parse(args); err != nil {
|
if err := fs.Parse(args); err != nil {
|
||||||
return err
|
return err
|
||||||
} else if fs.NArg() != 0 {
|
} else if fs.NArg() != 0 {
|
||||||
return fmt.Errorf("too many argument")
|
return fmt.Errorf("too many arguments")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load configuration.
|
// Load configuration.
|
||||||
if configPath == "" {
|
if *configPath == "" {
|
||||||
return errors.New("-config required")
|
*configPath = DefaultConfigPath()
|
||||||
}
|
}
|
||||||
config, err := ReadConfigFile(configPath)
|
config, err := ReadConfigFile(*configPath, !*noExpandEnv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// List all databases.
|
// List all databases.
|
||||||
w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0)
|
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
|
||||||
|
defer w.Flush()
|
||||||
|
|
||||||
fmt.Fprintln(w, "path\treplicas")
|
fmt.Fprintln(w, "path\treplicas")
|
||||||
for _, dbConfig := range config.DBs {
|
for _, dbConfig := range config.DBs {
|
||||||
db, err := newDBFromConfig(&config, dbConfig)
|
db, err := NewDBFromConfig(dbConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -53,7 +53,6 @@ func (c *DatabasesCommand) Run(ctx context.Context, args []string) (err error) {
|
|||||||
strings.Join(replicaNames, ","),
|
strings.Join(replicaNames, ","),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
w.Flush()
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -73,6 +72,9 @@ Arguments:
|
|||||||
Specifies the configuration file.
|
Specifies the configuration file.
|
||||||
Defaults to %s
|
Defaults to %s
|
||||||
|
|
||||||
|
-no-expand-env
|
||||||
|
Disables environment variable expansion in configuration file.
|
||||||
|
|
||||||
`[1:],
|
`[1:],
|
||||||
DefaultConfigPath(),
|
DefaultConfigPath(),
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -2,14 +2,14 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"sort"
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/litestream"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GenerationsCommand represents a command to list all generations for a database.
|
// GenerationsCommand represents a command to list all generations for a database.
|
||||||
@@ -17,83 +17,99 @@ type GenerationsCommand struct{}
|
|||||||
|
|
||||||
// Run executes the command.
|
// Run executes the command.
|
||||||
func (c *GenerationsCommand) Run(ctx context.Context, args []string) (err error) {
|
func (c *GenerationsCommand) Run(ctx context.Context, args []string) (err error) {
|
||||||
var configPath string
|
|
||||||
fs := flag.NewFlagSet("litestream-generations", flag.ContinueOnError)
|
fs := flag.NewFlagSet("litestream-generations", flag.ContinueOnError)
|
||||||
registerConfigFlag(fs, &configPath)
|
configPath, noExpandEnv := registerConfigFlag(fs)
|
||||||
replicaName := fs.String("replica", "", "replica name")
|
replicaName := fs.String("replica", "", "replica name")
|
||||||
fs.Usage = c.Usage
|
fs.Usage = c.Usage
|
||||||
if err := fs.Parse(args); err != nil {
|
if err := fs.Parse(args); err != nil {
|
||||||
return err
|
return err
|
||||||
} else if fs.NArg() == 0 || fs.Arg(0) == "" {
|
} else if fs.NArg() == 0 || fs.Arg(0) == "" {
|
||||||
return fmt.Errorf("database path required")
|
return fmt.Errorf("database path or replica URL required")
|
||||||
} else if fs.NArg() > 1 {
|
} else if fs.NArg() > 1 {
|
||||||
return fmt.Errorf("too many arguments")
|
return fmt.Errorf("too many arguments")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load configuration.
|
var db *litestream.DB
|
||||||
if configPath == "" {
|
var r *litestream.Replica
|
||||||
return errors.New("-config required")
|
dbUpdatedAt := time.Now()
|
||||||
}
|
if isURL(fs.Arg(0)) {
|
||||||
config, err := ReadConfigFile(configPath)
|
if *configPath != "" {
|
||||||
if err != nil {
|
return fmt.Errorf("cannot specify a replica URL and the -config flag")
|
||||||
return err
|
}
|
||||||
|
if r, err = NewReplicaFromConfig(&ReplicaConfig{URL: fs.Arg(0)}, nil); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if *configPath == "" {
|
||||||
|
*configPath = DefaultConfigPath()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load configuration.
|
||||||
|
config, err := ReadConfigFile(*configPath, !*noExpandEnv)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lookup database from configuration file by path.
|
||||||
|
if path, err := expand(fs.Arg(0)); err != nil {
|
||||||
|
return err
|
||||||
|
} else if dbc := config.DBConfig(path); dbc == nil {
|
||||||
|
return fmt.Errorf("database not found in config: %s", path)
|
||||||
|
} else if db, err = NewDBFromConfig(dbc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter by replica, if specified.
|
||||||
|
if *replicaName != "" {
|
||||||
|
if r = db.Replica(*replicaName); r == nil {
|
||||||
|
return fmt.Errorf("replica %q not found for database %q", *replicaName, db.Path())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine last time database or WAL was updated.
|
||||||
|
if dbUpdatedAt, err = db.UpdatedAt(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine absolute path for database.
|
var replicas []*litestream.Replica
|
||||||
dbPath, err := filepath.Abs(fs.Arg(0))
|
if r != nil {
|
||||||
if err != nil {
|
replicas = []*litestream.Replica{r}
|
||||||
return err
|
} else {
|
||||||
}
|
replicas = db.Replicas
|
||||||
|
|
||||||
// Instantiate DB from from configuration.
|
|
||||||
dbConfig := config.DBConfig(dbPath)
|
|
||||||
if dbConfig == nil {
|
|
||||||
return fmt.Errorf("database not found in config: %s", dbPath)
|
|
||||||
}
|
|
||||||
db, err := newDBFromConfig(&config, dbConfig)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine last time database or WAL was updated.
|
|
||||||
updatedAt, err := db.UpdatedAt()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// List each generation.
|
// List each generation.
|
||||||
w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0)
|
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
|
||||||
|
defer w.Flush()
|
||||||
|
|
||||||
fmt.Fprintln(w, "name\tgeneration\tlag\tstart\tend")
|
fmt.Fprintln(w, "name\tgeneration\tlag\tstart\tend")
|
||||||
for _, r := range db.Replicas {
|
for _, r := range replicas {
|
||||||
if *replicaName != "" && r.Name() != *replicaName {
|
generations, err := r.Client.Generations(ctx)
|
||||||
|
if err != nil {
|
||||||
|
r.Logger().Error("cannot list generations", "error", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
generations, err := r.Generations(ctx)
|
sort.Strings(generations)
|
||||||
if err != nil {
|
|
||||||
log.Printf("%s: cannot list generations: %s", r.Name(), err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Iterate over each generation for the replica.
|
// Iterate over each generation for the replica.
|
||||||
for _, generation := range generations {
|
for _, generation := range generations {
|
||||||
stats, err := r.GenerationStats(ctx, generation)
|
createdAt, updatedAt, err := r.GenerationTimeBounds(ctx, generation)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("%s: cannot find generation stats: %s", r.Name(), err)
|
r.Logger().Error("cannot determine generation time bounds", "error", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n",
|
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n",
|
||||||
r.Name(),
|
r.Name(),
|
||||||
generation,
|
generation,
|
||||||
truncateDuration(updatedAt.Sub(stats.UpdatedAt)).String(),
|
truncateDuration(dbUpdatedAt.Sub(updatedAt)).String(),
|
||||||
stats.CreatedAt.Format(time.RFC3339),
|
createdAt.Format(time.RFC3339),
|
||||||
stats.UpdatedAt.Format(time.RFC3339),
|
updatedAt.Format(time.RFC3339),
|
||||||
)
|
)
|
||||||
w.Flush()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
w.Flush()
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -101,17 +117,24 @@ func (c *GenerationsCommand) Run(ctx context.Context, args []string) (err error)
|
|||||||
// Usage prints the help message to STDOUT.
|
// Usage prints the help message to STDOUT.
|
||||||
func (c *GenerationsCommand) Usage() {
|
func (c *GenerationsCommand) Usage() {
|
||||||
fmt.Printf(`
|
fmt.Printf(`
|
||||||
The generations command lists all generations for a database. It also lists
|
The generations command lists all generations for a database or replica. It also
|
||||||
stats about their lag behind the primary database and the time range they cover.
|
lists stats about their lag behind the primary database and the time range they
|
||||||
|
cover.
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
|
|
||||||
litestream generations [arguments] DB
|
litestream generations [arguments] DB_PATH
|
||||||
|
|
||||||
|
litestream generations [arguments] REPLICA_URL
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
|
|
||||||
-config PATH
|
-config PATH
|
||||||
Specifies the configuration file. Defaults to %s
|
Specifies the configuration file.
|
||||||
|
Defaults to %s
|
||||||
|
|
||||||
|
-no-expand-env
|
||||||
|
Disables environment variable expansion in configuration file.
|
||||||
|
|
||||||
-replica NAME
|
-replica NAME
|
||||||
Optional, filters by replica.
|
Optional, filters by replica.
|
||||||
|
|||||||
@@ -2,20 +2,27 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"log/slog"
|
||||||
"log"
|
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"os/user"
|
"os/user"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"filippo.io/age"
|
||||||
"github.com/benbjohnson/litestream"
|
"github.com/benbjohnson/litestream"
|
||||||
|
"github.com/benbjohnson/litestream/abs"
|
||||||
|
"github.com/benbjohnson/litestream/file"
|
||||||
|
"github.com/benbjohnson/litestream/gcs"
|
||||||
"github.com/benbjohnson/litestream/s3"
|
"github.com/benbjohnson/litestream/s3"
|
||||||
|
"github.com/benbjohnson/litestream/sftp"
|
||||||
_ "github.com/mattn/go-sqlite3"
|
_ "github.com/mattn/go-sqlite3"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
@@ -25,14 +32,15 @@ var (
|
|||||||
Version = "(development build)"
|
Version = "(development build)"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
// errStop is a terminal error for indicating program should quit.
|
||||||
log.SetFlags(0)
|
var errStop = errors.New("stop")
|
||||||
|
|
||||||
|
func main() {
|
||||||
m := NewMain()
|
m := NewMain()
|
||||||
if err := m.Run(context.Background(), os.Args[1:]); err == flag.ErrHelp {
|
if err := m.Run(context.Background(), os.Args[1:]); err == flag.ErrHelp || err == errStop {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
fmt.Fprintln(os.Stderr, err)
|
slog.Error("failed to run", "error", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -47,6 +55,17 @@ func NewMain() *Main {
|
|||||||
|
|
||||||
// Run executes the program.
|
// Run executes the program.
|
||||||
func (m *Main) Run(ctx context.Context, args []string) (err error) {
|
func (m *Main) Run(ctx context.Context, args []string) (err error) {
|
||||||
|
// Execute replication command if running as a Windows service.
|
||||||
|
if isService, err := isWindowsService(); err != nil {
|
||||||
|
return err
|
||||||
|
} else if isService {
|
||||||
|
return runWindowsService(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy "LITESTEAM" environment credentials.
|
||||||
|
applyLitestreamEnv()
|
||||||
|
|
||||||
|
// Extract command name.
|
||||||
var cmd string
|
var cmd string
|
||||||
if len(args) > 0 {
|
if len(args) > 0 {
|
||||||
cmd, args = args[0], args[1:]
|
cmd, args = args[0], args[1:]
|
||||||
@@ -58,7 +77,45 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) {
|
|||||||
case "generations":
|
case "generations":
|
||||||
return (&GenerationsCommand{}).Run(ctx, args)
|
return (&GenerationsCommand{}).Run(ctx, args)
|
||||||
case "replicate":
|
case "replicate":
|
||||||
return (&ReplicateCommand{}).Run(ctx, args)
|
c := NewReplicateCommand()
|
||||||
|
if err := c.ParseFlags(ctx, args); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup signal handler.
|
||||||
|
signalCh := signalChan()
|
||||||
|
|
||||||
|
if err := c.Run(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for signal to stop program.
|
||||||
|
select {
|
||||||
|
case err = <-c.execCh:
|
||||||
|
slog.Info("subprocess exited, litestream shutting down")
|
||||||
|
case sig := <-signalCh:
|
||||||
|
slog.Info("signal received, litestream shutting down")
|
||||||
|
|
||||||
|
if c.cmd != nil {
|
||||||
|
slog.Info("sending signal to exec process")
|
||||||
|
if err := c.cmd.Process.Signal(sig); err != nil {
|
||||||
|
return fmt.Errorf("cannot signal exec process: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("waiting for exec process to close")
|
||||||
|
if err := <-c.execCh; err != nil && !strings.HasPrefix(err.Error(), "signal:") {
|
||||||
|
return fmt.Errorf("cannot wait for exec process: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gracefully close.
|
||||||
|
if e := c.Close(); e != nil && err == nil {
|
||||||
|
err = e
|
||||||
|
}
|
||||||
|
slog.Info("litestream shut down")
|
||||||
|
return err
|
||||||
|
|
||||||
case "restore":
|
case "restore":
|
||||||
return (&RestoreCommand{}).Run(ctx, args)
|
return (&RestoreCommand{}).Run(ctx, args)
|
||||||
case "snapshots":
|
case "snapshots":
|
||||||
@@ -87,21 +144,16 @@ Usage:
|
|||||||
|
|
||||||
The commands are:
|
The commands are:
|
||||||
|
|
||||||
|
databases list databases specified in config file
|
||||||
generations list available generations for a database
|
generations list available generations for a database
|
||||||
replicate runs a server to replicate databases
|
replicate runs a server to replicate databases
|
||||||
restore recovers database backup from a replica
|
restore recovers database backup from a replica
|
||||||
snapshots list available snapshots for a database
|
snapshots list available snapshots for a database
|
||||||
validate checks replica to ensure a consistent state with primary
|
version prints the binary version
|
||||||
version prints the version
|
|
||||||
wal list available WAL files for a database
|
wal list available WAL files for a database
|
||||||
`[1:])
|
`[1:])
|
||||||
}
|
}
|
||||||
|
|
||||||
// Default configuration settings.
|
|
||||||
const (
|
|
||||||
DefaultAddr = ":9090"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Config represents a configuration file for the litestream daemon.
|
// Config represents a configuration file for the litestream daemon.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
// Bind address for serving metrics.
|
// Bind address for serving metrics.
|
||||||
@@ -110,28 +162,42 @@ type Config struct {
|
|||||||
// List of databases to manage.
|
// List of databases to manage.
|
||||||
DBs []*DBConfig `yaml:"dbs"`
|
DBs []*DBConfig `yaml:"dbs"`
|
||||||
|
|
||||||
|
// Subcommand to execute during replication.
|
||||||
|
// Litestream will shutdown when subcommand exits.
|
||||||
|
Exec string `yaml:"exec"`
|
||||||
|
|
||||||
// Global S3 settings
|
// Global S3 settings
|
||||||
AccessKeyID string `yaml:"access-key-id"`
|
AccessKeyID string `yaml:"access-key-id"`
|
||||||
SecretAccessKey string `yaml:"secret-access-key"`
|
SecretAccessKey string `yaml:"secret-access-key"`
|
||||||
Region string `yaml:"region"`
|
|
||||||
Bucket string `yaml:"bucket"`
|
// Logging
|
||||||
|
Logging LoggingConfig `yaml:"logging"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Normalize expands paths and parses URL-specified replicas.
|
// LoggingConfig configures logging.
|
||||||
func (c *Config) Normalize() error {
|
type LoggingConfig struct {
|
||||||
for i := range c.DBs {
|
Level string `yaml:"level"`
|
||||||
if err := c.DBs[i].Normalize(); err != nil {
|
Type string `yaml:"type"`
|
||||||
return err
|
Stderr bool `yaml:"stderr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// propagateGlobalSettings copies global S3 settings to replica configs.
|
||||||
|
func (c *Config) propagateGlobalSettings() {
|
||||||
|
for _, dbc := range c.DBs {
|
||||||
|
for _, rc := range dbc.Replicas {
|
||||||
|
if rc.AccessKeyID == "" {
|
||||||
|
rc.AccessKeyID = c.AccessKeyID
|
||||||
|
}
|
||||||
|
if rc.SecretAccessKey == "" {
|
||||||
|
rc.SecretAccessKey = c.SecretAccessKey
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultConfig returns a new instance of Config with defaults set.
|
// DefaultConfig returns a new instance of Config with defaults set.
|
||||||
func DefaultConfig() Config {
|
func DefaultConfig() Config {
|
||||||
return Config{
|
return Config{}
|
||||||
Addr: DefaultAddr,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DBConfig returns database configuration by path.
|
// DBConfig returns database configuration by path.
|
||||||
@@ -145,129 +211,118 @@ func (c *Config) DBConfig(path string) *DBConfig {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ReadConfigFile unmarshals config from filename. Expands path if needed.
|
// ReadConfigFile unmarshals config from filename. Expands path if needed.
|
||||||
func ReadConfigFile(filename string) (Config, error) {
|
// If expandEnv is true then environment variables are expanded in the config.
|
||||||
|
func ReadConfigFile(filename string, expandEnv bool) (_ Config, err error) {
|
||||||
config := DefaultConfig()
|
config := DefaultConfig()
|
||||||
|
|
||||||
// Expand filename, if necessary.
|
// Expand filename, if necessary.
|
||||||
if prefix := "~" + string(os.PathSeparator); strings.HasPrefix(filename, prefix) {
|
filename, err = expand(filename)
|
||||||
u, err := user.Current()
|
if err != nil {
|
||||||
if err != nil {
|
return config, err
|
||||||
return config, err
|
|
||||||
} else if u.HomeDir == "" {
|
|
||||||
return config, fmt.Errorf("home directory unset")
|
|
||||||
}
|
|
||||||
filename = filepath.Join(u.HomeDir, strings.TrimPrefix(filename, prefix))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read & deserialize configuration.
|
// Read configuration.
|
||||||
if buf, err := ioutil.ReadFile(filename); os.IsNotExist(err) {
|
buf, err := os.ReadFile(filename)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
return config, fmt.Errorf("config file not found: %s", filename)
|
return config, fmt.Errorf("config file not found: %s", filename)
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return config, err
|
return config, err
|
||||||
} else if err := yaml.Unmarshal(buf, &config); err != nil {
|
}
|
||||||
|
|
||||||
|
// Expand environment variables, if enabled.
|
||||||
|
if expandEnv {
|
||||||
|
buf = []byte(os.ExpandEnv(string(buf)))
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := yaml.Unmarshal(buf, &config); err != nil {
|
||||||
return config, err
|
return config, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := config.Normalize(); err != nil {
|
// Normalize paths.
|
||||||
return config, err
|
for _, dbConfig := range config.DBs {
|
||||||
|
if dbConfig.Path, err = expand(dbConfig.Path); err != nil {
|
||||||
|
return config, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Propage settings from global config to replica configs.
|
||||||
|
config.propagateGlobalSettings()
|
||||||
|
|
||||||
|
// Configure logging.
|
||||||
|
logOutput := os.Stdout
|
||||||
|
if config.Logging.Stderr {
|
||||||
|
logOutput = os.Stderr
|
||||||
|
}
|
||||||
|
|
||||||
|
logOptions := slog.HandlerOptions{
|
||||||
|
Level: slog.LevelInfo,
|
||||||
|
}
|
||||||
|
|
||||||
|
switch strings.ToUpper(config.Logging.Level) {
|
||||||
|
case "DEBUG":
|
||||||
|
logOptions.Level = slog.LevelDebug
|
||||||
|
case "WARN", "WARNING":
|
||||||
|
logOptions.Level = slog.LevelWarn
|
||||||
|
case "ERROR":
|
||||||
|
logOptions.Level = slog.LevelError
|
||||||
|
}
|
||||||
|
|
||||||
|
var logHandler slog.Handler
|
||||||
|
switch config.Logging.Type {
|
||||||
|
case "json":
|
||||||
|
logHandler = slog.NewJSONHandler(logOutput, &logOptions)
|
||||||
|
case "text", "":
|
||||||
|
logHandler = slog.NewTextHandler(logOutput, &logOptions)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set global default logger.
|
||||||
|
slog.SetDefault(slog.New(logHandler))
|
||||||
|
|
||||||
return config, nil
|
return config, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DBConfig represents the configuration for a single database.
|
// DBConfig represents the configuration for a single database.
|
||||||
type DBConfig struct {
|
type DBConfig struct {
|
||||||
Path string `yaml:"path"`
|
Path string `yaml:"path"`
|
||||||
|
MetaPath *string `yaml:"meta-path"`
|
||||||
|
MonitorInterval *time.Duration `yaml:"monitor-interval"`
|
||||||
|
CheckpointInterval *time.Duration `yaml:"checkpoint-interval"`
|
||||||
|
MinCheckpointPageN *int `yaml:"min-checkpoint-page-count"`
|
||||||
|
MaxCheckpointPageN *int `yaml:"max-checkpoint-page-count"`
|
||||||
|
|
||||||
Replicas []*ReplicaConfig `yaml:"replicas"`
|
Replicas []*ReplicaConfig `yaml:"replicas"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Normalize expands paths and parses URL-specified replicas.
|
// NewDBFromConfig instantiates a DB based on a configuration.
|
||||||
func (c *DBConfig) Normalize() error {
|
func NewDBFromConfig(dbc *DBConfig) (*litestream.DB, error) {
|
||||||
for i := range c.Replicas {
|
path, err := expand(dbc.Path)
|
||||||
if err := c.Replicas[i].Normalize(); err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReplicaConfig represents the configuration for a single replica in a database.
|
|
||||||
type ReplicaConfig struct {
|
|
||||||
Type string `yaml:"type"` // "file", "s3"
|
|
||||||
Name string `yaml:"name"` // name of replica, optional.
|
|
||||||
Path string `yaml:"path"`
|
|
||||||
Retention time.Duration `yaml:"retention"`
|
|
||||||
RetentionCheckInterval time.Duration `yaml:"retention-check-interval"`
|
|
||||||
SyncInterval time.Duration `yaml:"sync-interval"` // s3 only
|
|
||||||
ValidationInterval time.Duration `yaml:"validation-interval"`
|
|
||||||
|
|
||||||
// S3 settings
|
|
||||||
AccessKeyID string `yaml:"access-key-id"`
|
|
||||||
SecretAccessKey string `yaml:"secret-access-key"`
|
|
||||||
Region string `yaml:"region"`
|
|
||||||
Bucket string `yaml:"bucket"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Normalize expands paths and parses URL-specified replicas.
|
|
||||||
func (c *ReplicaConfig) Normalize() error {
|
|
||||||
// Expand path filename, if necessary.
|
|
||||||
if prefix := "~" + string(os.PathSeparator); strings.HasPrefix(c.Path, prefix) {
|
|
||||||
u, err := user.Current()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if u.HomeDir == "" {
|
|
||||||
return fmt.Errorf("cannot expand replica path, no home directory available")
|
|
||||||
}
|
|
||||||
c.Path = filepath.Join(u.HomeDir, strings.TrimPrefix(c.Path, prefix))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Attempt to parse as URL. Ignore if it is not a URL or if there is no scheme.
|
|
||||||
u, err := url.Parse(c.Path)
|
|
||||||
if err != nil || u.Scheme == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
switch u.Scheme {
|
|
||||||
case "file":
|
|
||||||
u.Scheme = ""
|
|
||||||
c.Type = u.Scheme
|
|
||||||
c.Path = path.Clean(u.String())
|
|
||||||
return nil
|
|
||||||
|
|
||||||
case "s3":
|
|
||||||
c.Type = u.Scheme
|
|
||||||
c.Path = strings.TrimPrefix(path.Clean(u.Path), "/")
|
|
||||||
c.Bucket = u.Host
|
|
||||||
if u := u.User; u != nil {
|
|
||||||
c.AccessKeyID = u.Username()
|
|
||||||
c.SecretAccessKey, _ = u.Password()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unrecognized replica type in path scheme: %s", c.Path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultConfigPath returns the default config path.
|
|
||||||
func DefaultConfigPath() string {
|
|
||||||
if v := os.Getenv("LITESTREAM_CONFIG"); v != "" {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
return "/etc/litestream.yml"
|
|
||||||
}
|
|
||||||
|
|
||||||
func registerConfigFlag(fs *flag.FlagSet, p *string) {
|
|
||||||
fs.StringVar(p, "config", DefaultConfigPath(), "config path")
|
|
||||||
}
|
|
||||||
|
|
||||||
// newDBFromConfig instantiates a DB based on a configuration.
|
|
||||||
func newDBFromConfig(c *Config, dbc *DBConfig) (*litestream.DB, error) {
|
|
||||||
// Initialize database with given path.
|
// Initialize database with given path.
|
||||||
db := litestream.NewDB(dbc.Path)
|
db := litestream.NewDB(path)
|
||||||
|
|
||||||
|
// Override default database settings if specified in configuration.
|
||||||
|
if dbc.MetaPath != nil {
|
||||||
|
db.SetMetaPath(*dbc.MetaPath)
|
||||||
|
}
|
||||||
|
if dbc.MonitorInterval != nil {
|
||||||
|
db.MonitorInterval = *dbc.MonitorInterval
|
||||||
|
}
|
||||||
|
if dbc.CheckpointInterval != nil {
|
||||||
|
db.CheckpointInterval = *dbc.CheckpointInterval
|
||||||
|
}
|
||||||
|
if dbc.MinCheckpointPageN != nil {
|
||||||
|
db.MinCheckpointPageN = *dbc.MinCheckpointPageN
|
||||||
|
}
|
||||||
|
if dbc.MaxCheckpointPageN != nil {
|
||||||
|
db.MaxCheckpointPageN = *dbc.MaxCheckpointPageN
|
||||||
|
}
|
||||||
|
|
||||||
// Instantiate and attach replicas.
|
// Instantiate and attach replicas.
|
||||||
for _, rc := range dbc.Replicas {
|
for _, rc := range dbc.Replicas {
|
||||||
r, err := newReplicaFromConfig(db, c, dbc, rc)
|
r, err := NewReplicaFromConfig(rc, db)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -277,85 +332,445 @@ func newDBFromConfig(c *Config, dbc *DBConfig) (*litestream.DB, error) {
|
|||||||
return db, nil
|
return db, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// newReplicaFromConfig instantiates a replica for a DB based on a config.
|
// ReplicaConfig represents the configuration for a single replica in a database.
|
||||||
func newReplicaFromConfig(db *litestream.DB, c *Config, dbc *DBConfig, rc *ReplicaConfig) (litestream.Replica, error) {
|
type ReplicaConfig struct {
|
||||||
switch rc.Type {
|
Type string `yaml:"type"` // "file", "s3"
|
||||||
case "", "file":
|
Name string `yaml:"name"` // name of replica, optional.
|
||||||
return newFileReplicaFromConfig(db, c, dbc, rc)
|
Path string `yaml:"path"`
|
||||||
case "s3":
|
URL string `yaml:"url"`
|
||||||
return newS3ReplicaFromConfig(db, c, dbc, rc)
|
Retention *time.Duration `yaml:"retention"`
|
||||||
default:
|
RetentionCheckInterval *time.Duration `yaml:"retention-check-interval"`
|
||||||
return nil, fmt.Errorf("unknown replica type in config: %q", rc.Type)
|
SyncInterval *time.Duration `yaml:"sync-interval"`
|
||||||
}
|
SnapshotInterval *time.Duration `yaml:"snapshot-interval"`
|
||||||
|
ValidationInterval *time.Duration `yaml:"validation-interval"`
|
||||||
|
|
||||||
|
// S3 settings
|
||||||
|
AccessKeyID string `yaml:"access-key-id"`
|
||||||
|
SecretAccessKey string `yaml:"secret-access-key"`
|
||||||
|
Region string `yaml:"region"`
|
||||||
|
Bucket string `yaml:"bucket"`
|
||||||
|
Endpoint string `yaml:"endpoint"`
|
||||||
|
ForcePathStyle *bool `yaml:"force-path-style"`
|
||||||
|
SkipVerify bool `yaml:"skip-verify"`
|
||||||
|
|
||||||
|
// ABS settings
|
||||||
|
AccountName string `yaml:"account-name"`
|
||||||
|
AccountKey string `yaml:"account-key"`
|
||||||
|
|
||||||
|
// SFTP settings
|
||||||
|
Host string `yaml:"host"`
|
||||||
|
User string `yaml:"user"`
|
||||||
|
Password string `yaml:"password"`
|
||||||
|
KeyPath string `yaml:"key-path"`
|
||||||
|
|
||||||
|
// Encryption identities and recipients
|
||||||
|
Age struct {
|
||||||
|
Identities []string `yaml:"identities"`
|
||||||
|
Recipients []string `yaml:"recipients"`
|
||||||
|
} `yaml:"age"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// newFileReplicaFromConfig returns a new instance of FileReplica build from config.
|
// NewReplicaFromConfig instantiates a replica for a DB based on a config.
|
||||||
func newFileReplicaFromConfig(db *litestream.DB, c *Config, dbc *DBConfig, rc *ReplicaConfig) (*litestream.FileReplica, error) {
|
func NewReplicaFromConfig(c *ReplicaConfig, db *litestream.DB) (_ *litestream.Replica, err error) {
|
||||||
if rc.Path == "" {
|
// Ensure user did not specify URL in path.
|
||||||
return nil, fmt.Errorf("%s: file replica path required", db.Path())
|
if isURL(c.Path) {
|
||||||
}
|
return nil, fmt.Errorf("replica path cannot be a url, please use the 'url' field instead: %s", c.Path)
|
||||||
|
|
||||||
r := litestream.NewFileReplica(db, rc.Name, rc.Path)
|
|
||||||
if v := rc.Retention; v > 0 {
|
|
||||||
r.Retention = v
|
|
||||||
}
|
|
||||||
if v := rc.RetentionCheckInterval; v > 0 {
|
|
||||||
r.RetentionCheckInterval = v
|
|
||||||
}
|
|
||||||
if v := rc.ValidationInterval; v > 0 {
|
|
||||||
r.ValidationInterval = v
|
|
||||||
}
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newS3ReplicaFromConfig returns a new instance of S3Replica build from config.
|
|
||||||
func newS3ReplicaFromConfig(db *litestream.DB, c *Config, dbc *DBConfig, rc *ReplicaConfig) (*s3.Replica, error) {
|
|
||||||
// Use global or replica-specific S3 settings.
|
|
||||||
accessKeyID := c.AccessKeyID
|
|
||||||
if v := rc.AccessKeyID; v != "" {
|
|
||||||
accessKeyID = v
|
|
||||||
}
|
|
||||||
secretAccessKey := c.SecretAccessKey
|
|
||||||
if v := rc.SecretAccessKey; v != "" {
|
|
||||||
secretAccessKey = v
|
|
||||||
}
|
|
||||||
bucket := c.Bucket
|
|
||||||
if v := rc.Bucket; v != "" {
|
|
||||||
bucket = v
|
|
||||||
}
|
|
||||||
region := c.Region
|
|
||||||
if v := rc.Region; v != "" {
|
|
||||||
region = v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure required settings are set.
|
|
||||||
if accessKeyID == "" {
|
|
||||||
return nil, fmt.Errorf("%s: s3 access key id required", db.Path())
|
|
||||||
} else if secretAccessKey == "" {
|
|
||||||
return nil, fmt.Errorf("%s: s3 secret access key required", db.Path())
|
|
||||||
} else if bucket == "" {
|
|
||||||
return nil, fmt.Errorf("%s: s3 bucket required", db.Path())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build replica.
|
// Build replica.
|
||||||
r := s3.NewReplica(db, rc.Name)
|
r := litestream.NewReplica(db, c.Name)
|
||||||
r.AccessKeyID = accessKeyID
|
if v := c.Retention; v != nil {
|
||||||
r.SecretAccessKey = secretAccessKey
|
r.Retention = *v
|
||||||
r.Region = region
|
}
|
||||||
r.Bucket = bucket
|
if v := c.RetentionCheckInterval; v != nil {
|
||||||
r.Path = rc.Path
|
r.RetentionCheckInterval = *v
|
||||||
|
}
|
||||||
|
if v := c.SyncInterval; v != nil {
|
||||||
|
r.SyncInterval = *v
|
||||||
|
}
|
||||||
|
if v := c.SnapshotInterval; v != nil {
|
||||||
|
r.SnapshotInterval = *v
|
||||||
|
}
|
||||||
|
if v := c.ValidationInterval; v != nil {
|
||||||
|
r.ValidationInterval = *v
|
||||||
|
}
|
||||||
|
for _, str := range c.Age.Identities {
|
||||||
|
identities, err := age.ParseIdentities(strings.NewReader(str))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
if v := rc.Retention; v > 0 {
|
r.AgeIdentities = append(r.AgeIdentities, identities...)
|
||||||
r.Retention = v
|
|
||||||
}
|
}
|
||||||
if v := rc.RetentionCheckInterval; v > 0 {
|
for _, str := range c.Age.Recipients {
|
||||||
r.RetentionCheckInterval = v
|
recipients, err := age.ParseRecipients(strings.NewReader(str))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
r.AgeRecipients = append(r.AgeRecipients, recipients...)
|
||||||
}
|
}
|
||||||
if v := rc.SyncInterval; v > 0 {
|
|
||||||
r.SyncInterval = v
|
// Build and set client on replica.
|
||||||
}
|
switch c.ReplicaType() {
|
||||||
if v := rc.ValidationInterval; v > 0 {
|
case "file":
|
||||||
r.ValidationInterval = v
|
if r.Client, err = newFileReplicaClientFromConfig(c, r); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case "s3":
|
||||||
|
if r.Client, err = newS3ReplicaClientFromConfig(c, r); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case "gcs":
|
||||||
|
if r.Client, err = newGCSReplicaClientFromConfig(c, r); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case "abs":
|
||||||
|
if r.Client, err = newABSReplicaClientFromConfig(c, r); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case "sftp":
|
||||||
|
if r.Client, err = newSFTPReplicaClientFromConfig(c, r); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unknown replica type in config: %q", c.Type)
|
||||||
}
|
}
|
||||||
|
|
||||||
return r, nil
|
return r, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// newFileReplicaClientFromConfig returns a new instance of file.ReplicaClient built from config.
|
||||||
|
func newFileReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *file.ReplicaClient, err error) {
|
||||||
|
// Ensure URL & path are not both specified.
|
||||||
|
if c.URL != "" && c.Path != "" {
|
||||||
|
return nil, fmt.Errorf("cannot specify url & path for file replica")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse path from URL, if specified.
|
||||||
|
path := c.Path
|
||||||
|
if c.URL != "" {
|
||||||
|
if _, _, path, err = ParseReplicaURL(c.URL); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure path is set explicitly or derived from URL field.
|
||||||
|
if path == "" {
|
||||||
|
return nil, fmt.Errorf("file replica path required")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expand home prefix and return absolute path.
|
||||||
|
if path, err = expand(path); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Instantiate replica and apply time fields, if set.
|
||||||
|
client := file.NewReplicaClient(path)
|
||||||
|
client.Replica = r
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newS3ReplicaClientFromConfig returns a new instance of s3.ReplicaClient built from config.
|
||||||
|
func newS3ReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *s3.ReplicaClient, err error) {
|
||||||
|
// Ensure URL & constituent parts are not both specified.
|
||||||
|
if c.URL != "" && c.Path != "" {
|
||||||
|
return nil, fmt.Errorf("cannot specify url & path for s3 replica")
|
||||||
|
} else if c.URL != "" && c.Bucket != "" {
|
||||||
|
return nil, fmt.Errorf("cannot specify url & bucket for s3 replica")
|
||||||
|
}
|
||||||
|
|
||||||
|
bucket, path := c.Bucket, c.Path
|
||||||
|
region, endpoint, skipVerify := c.Region, c.Endpoint, c.SkipVerify
|
||||||
|
|
||||||
|
// Use path style if an endpoint is explicitly set. This works because the
|
||||||
|
// only service to not use path style is AWS which does not use an endpoint.
|
||||||
|
forcePathStyle := (endpoint != "")
|
||||||
|
if v := c.ForcePathStyle; v != nil {
|
||||||
|
forcePathStyle = *v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply settings from URL, if specified.
|
||||||
|
if c.URL != "" {
|
||||||
|
_, host, upath, err := ParseReplicaURL(c.URL)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ubucket, uregion, uendpoint, uforcePathStyle := s3.ParseHost(host)
|
||||||
|
|
||||||
|
// Only apply URL parts to field that have not been overridden.
|
||||||
|
if path == "" {
|
||||||
|
path = upath
|
||||||
|
}
|
||||||
|
if bucket == "" {
|
||||||
|
bucket = ubucket
|
||||||
|
}
|
||||||
|
if region == "" {
|
||||||
|
region = uregion
|
||||||
|
}
|
||||||
|
if endpoint == "" {
|
||||||
|
endpoint = uendpoint
|
||||||
|
}
|
||||||
|
if !forcePathStyle {
|
||||||
|
forcePathStyle = uforcePathStyle
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure required settings are set.
|
||||||
|
if bucket == "" {
|
||||||
|
return nil, fmt.Errorf("bucket required for s3 replica")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build replica.
|
||||||
|
client := s3.NewReplicaClient()
|
||||||
|
client.AccessKeyID = c.AccessKeyID
|
||||||
|
client.SecretAccessKey = c.SecretAccessKey
|
||||||
|
client.Bucket = bucket
|
||||||
|
client.Path = path
|
||||||
|
client.Region = region
|
||||||
|
client.Endpoint = endpoint
|
||||||
|
client.ForcePathStyle = forcePathStyle
|
||||||
|
client.SkipVerify = skipVerify
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newGCSReplicaClientFromConfig returns a new instance of gcs.ReplicaClient built from config.
|
||||||
|
func newGCSReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *gcs.ReplicaClient, err error) {
|
||||||
|
// Ensure URL & constituent parts are not both specified.
|
||||||
|
if c.URL != "" && c.Path != "" {
|
||||||
|
return nil, fmt.Errorf("cannot specify url & path for gcs replica")
|
||||||
|
} else if c.URL != "" && c.Bucket != "" {
|
||||||
|
return nil, fmt.Errorf("cannot specify url & bucket for gcs replica")
|
||||||
|
}
|
||||||
|
|
||||||
|
bucket, path := c.Bucket, c.Path
|
||||||
|
|
||||||
|
// Apply settings from URL, if specified.
|
||||||
|
if c.URL != "" {
|
||||||
|
_, uhost, upath, err := ParseReplicaURL(c.URL)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only apply URL parts to field that have not been overridden.
|
||||||
|
if path == "" {
|
||||||
|
path = upath
|
||||||
|
}
|
||||||
|
if bucket == "" {
|
||||||
|
bucket = uhost
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure required settings are set.
|
||||||
|
if bucket == "" {
|
||||||
|
return nil, fmt.Errorf("bucket required for gcs replica")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build replica.
|
||||||
|
client := gcs.NewReplicaClient()
|
||||||
|
client.Bucket = bucket
|
||||||
|
client.Path = path
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newABSReplicaClientFromConfig returns a new instance of abs.ReplicaClient built from config.
|
||||||
|
func newABSReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *abs.ReplicaClient, err error) {
|
||||||
|
// Ensure URL & constituent parts are not both specified.
|
||||||
|
if c.URL != "" && c.Path != "" {
|
||||||
|
return nil, fmt.Errorf("cannot specify url & path for abs replica")
|
||||||
|
} else if c.URL != "" && c.Bucket != "" {
|
||||||
|
return nil, fmt.Errorf("cannot specify url & bucket for abs replica")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build replica.
|
||||||
|
client := abs.NewReplicaClient()
|
||||||
|
client.AccountName = c.AccountName
|
||||||
|
client.AccountKey = c.AccountKey
|
||||||
|
client.Bucket = c.Bucket
|
||||||
|
client.Path = c.Path
|
||||||
|
client.Endpoint = c.Endpoint
|
||||||
|
|
||||||
|
// Apply settings from URL, if specified.
|
||||||
|
if c.URL != "" {
|
||||||
|
u, err := url.Parse(c.URL)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if client.AccountName == "" && u.User != nil {
|
||||||
|
client.AccountName = u.User.Username()
|
||||||
|
}
|
||||||
|
if client.Bucket == "" {
|
||||||
|
client.Bucket = u.Host
|
||||||
|
}
|
||||||
|
if client.Path == "" {
|
||||||
|
client.Path = strings.TrimPrefix(path.Clean(u.Path), "/")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure required settings are set.
|
||||||
|
if client.Bucket == "" {
|
||||||
|
return nil, fmt.Errorf("bucket required for abs replica")
|
||||||
|
}
|
||||||
|
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newSFTPReplicaClientFromConfig returns a new instance of sftp.ReplicaClient built from config.
|
||||||
|
func newSFTPReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *sftp.ReplicaClient, err error) {
|
||||||
|
// Ensure URL & constituent parts are not both specified.
|
||||||
|
if c.URL != "" && c.Path != "" {
|
||||||
|
return nil, fmt.Errorf("cannot specify url & path for sftp replica")
|
||||||
|
} else if c.URL != "" && c.Host != "" {
|
||||||
|
return nil, fmt.Errorf("cannot specify url & host for sftp replica")
|
||||||
|
}
|
||||||
|
|
||||||
|
host, user, password, path := c.Host, c.User, c.Password, c.Path
|
||||||
|
|
||||||
|
// Apply settings from URL, if specified.
|
||||||
|
if c.URL != "" {
|
||||||
|
u, err := url.Parse(c.URL)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only apply URL parts to field that have not been overridden.
|
||||||
|
if host == "" {
|
||||||
|
host = u.Host
|
||||||
|
}
|
||||||
|
if user == "" && u.User != nil {
|
||||||
|
user = u.User.Username()
|
||||||
|
}
|
||||||
|
if password == "" && u.User != nil {
|
||||||
|
password, _ = u.User.Password()
|
||||||
|
}
|
||||||
|
if path == "" {
|
||||||
|
path = u.Path
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure required settings are set.
|
||||||
|
if host == "" {
|
||||||
|
return nil, fmt.Errorf("host required for sftp replica")
|
||||||
|
} else if user == "" {
|
||||||
|
return nil, fmt.Errorf("user required for sftp replica")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build replica.
|
||||||
|
client := sftp.NewReplicaClient()
|
||||||
|
client.Host = host
|
||||||
|
client.User = user
|
||||||
|
client.Password = password
|
||||||
|
client.Path = path
|
||||||
|
client.KeyPath = c.KeyPath
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// applyLitestreamEnv copies "LITESTREAM" prefixed environment variables to
|
||||||
|
// their AWS counterparts as the "AWS" prefix can be confusing when using a
|
||||||
|
// non-AWS S3-compatible service.
|
||||||
|
func applyLitestreamEnv() {
|
||||||
|
if v, ok := os.LookupEnv("LITESTREAM_ACCESS_KEY_ID"); ok {
|
||||||
|
if _, ok := os.LookupEnv("AWS_ACCESS_KEY_ID"); !ok {
|
||||||
|
os.Setenv("AWS_ACCESS_KEY_ID", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := os.LookupEnv("LITESTREAM_SECRET_ACCESS_KEY"); ok {
|
||||||
|
if _, ok := os.LookupEnv("AWS_SECRET_ACCESS_KEY"); !ok {
|
||||||
|
os.Setenv("AWS_SECRET_ACCESS_KEY", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseReplicaURL parses a replica URL.
|
||||||
|
func ParseReplicaURL(s string) (scheme, host, urlpath string, err error) {
|
||||||
|
u, err := url.Parse(s)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch u.Scheme {
|
||||||
|
case "file":
|
||||||
|
scheme, u.Scheme = u.Scheme, ""
|
||||||
|
return scheme, "", path.Clean(u.String()), nil
|
||||||
|
|
||||||
|
case "":
|
||||||
|
return u.Scheme, u.Host, u.Path, fmt.Errorf("replica url scheme required: %s", s)
|
||||||
|
|
||||||
|
default:
|
||||||
|
return u.Scheme, u.Host, strings.TrimPrefix(path.Clean(u.Path), "/"), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// isURL returns true if s can be parsed and has a scheme.
|
||||||
|
func isURL(s string) bool {
|
||||||
|
return regexp.MustCompile(`^\w+:\/\/`).MatchString(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplicaType returns the type based on the type field or extracted from the URL.
|
||||||
|
func (c *ReplicaConfig) ReplicaType() string {
|
||||||
|
scheme, _, _, _ := ParseReplicaURL(c.URL)
|
||||||
|
if scheme != "" {
|
||||||
|
return scheme
|
||||||
|
} else if c.Type != "" {
|
||||||
|
return c.Type
|
||||||
|
}
|
||||||
|
return "file"
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultConfigPath returns the default config path.
|
||||||
|
func DefaultConfigPath() string {
|
||||||
|
if v := os.Getenv("LITESTREAM_CONFIG"); v != "" {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
return defaultConfigPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func registerConfigFlag(fs *flag.FlagSet) (configPath *string, noExpandEnv *bool) {
|
||||||
|
return fs.String("config", "", "config path"),
|
||||||
|
fs.Bool("no-expand-env", false, "do not expand env vars in config")
|
||||||
|
}
|
||||||
|
|
||||||
|
// expand returns an absolute path for s.
|
||||||
|
func expand(s string) (string, error) {
|
||||||
|
// Just expand to absolute path if there is no home directory prefix.
|
||||||
|
prefix := "~" + string(os.PathSeparator)
|
||||||
|
if s != "~" && !strings.HasPrefix(s, prefix) {
|
||||||
|
return filepath.Abs(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look up home directory.
|
||||||
|
u, err := user.Current()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
} else if u.HomeDir == "" {
|
||||||
|
return "", fmt.Errorf("cannot expand path %s, no home directory available", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return path with tilde replaced by the home directory.
|
||||||
|
if s == "~" {
|
||||||
|
return u.HomeDir, nil
|
||||||
|
}
|
||||||
|
return filepath.Join(u.HomeDir, strings.TrimPrefix(s, prefix)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// indexVar allows the flag package to parse index flags as 4-byte hexadecimal values.
|
||||||
|
type indexVar int
|
||||||
|
|
||||||
|
// Ensure type implements interface.
|
||||||
|
var _ flag.Value = (*indexVar)(nil)
|
||||||
|
|
||||||
|
// String returns an 8-character hexadecimal value.
|
||||||
|
func (v *indexVar) String() string {
|
||||||
|
return fmt.Sprintf("%08x", int(*v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set parses s into an integer from a hexadecimal value.
|
||||||
|
func (v *indexVar) Set(s string) error {
|
||||||
|
i, err := strconv.ParseInt(s, 16, 32)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid hexadecimal format")
|
||||||
|
}
|
||||||
|
*v = indexVar(i)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
26
cmd/litestream/main_notwindows.go
Normal file
26
cmd/litestream/main_notwindows.go
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
//go:build !windows
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
const defaultConfigPath = "/etc/litestream.yml"
|
||||||
|
|
||||||
|
func isWindowsService() (bool, error) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runWindowsService(ctx context.Context) error {
|
||||||
|
panic("cannot run windows service as unix process")
|
||||||
|
}
|
||||||
|
|
||||||
|
func signalChan() <-chan os.Signal {
|
||||||
|
ch := make(chan os.Signal, 2)
|
||||||
|
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
return ch
|
||||||
|
}
|
||||||
176
cmd/litestream/main_test.go
Normal file
176
cmd/litestream/main_test.go
Normal file
@@ -0,0 +1,176 @@
|
|||||||
|
package main_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
main "github.com/benbjohnson/litestream/cmd/litestream"
|
||||||
|
"github.com/benbjohnson/litestream/file"
|
||||||
|
"github.com/benbjohnson/litestream/gcs"
|
||||||
|
"github.com/benbjohnson/litestream/s3"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestReadConfigFile(t *testing.T) {
|
||||||
|
// Ensure global AWS settings are propagated down to replica configurations.
|
||||||
|
t.Run("PropagateGlobalSettings", func(t *testing.T) {
|
||||||
|
filename := filepath.Join(t.TempDir(), "litestream.yml")
|
||||||
|
if err := os.WriteFile(filename, []byte(`
|
||||||
|
access-key-id: XXX
|
||||||
|
secret-access-key: YYY
|
||||||
|
|
||||||
|
dbs:
|
||||||
|
- path: /path/to/db
|
||||||
|
replicas:
|
||||||
|
- url: s3://foo/bar
|
||||||
|
`[1:]), 0666); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := main.ReadConfigFile(filename, true)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if got, want := config.AccessKeyID, `XXX`; got != want {
|
||||||
|
t.Fatalf("AccessKeyID=%v, want %v", got, want)
|
||||||
|
} else if got, want := config.SecretAccessKey, `YYY`; got != want {
|
||||||
|
t.Fatalf("SecretAccessKey=%v, want %v", got, want)
|
||||||
|
} else if got, want := config.DBs[0].Replicas[0].AccessKeyID, `XXX`; got != want {
|
||||||
|
t.Fatalf("Replica.AccessKeyID=%v, want %v", got, want)
|
||||||
|
} else if got, want := config.DBs[0].Replicas[0].SecretAccessKey, `YYY`; got != want {
|
||||||
|
t.Fatalf("Replica.SecretAccessKey=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Ensure environment variables are expanded.
|
||||||
|
t.Run("ExpandEnv", func(t *testing.T) {
|
||||||
|
os.Setenv("LITESTREAM_TEST_0129380", "/path/to/db")
|
||||||
|
os.Setenv("LITESTREAM_TEST_1872363", "s3://foo/bar")
|
||||||
|
|
||||||
|
filename := filepath.Join(t.TempDir(), "litestream.yml")
|
||||||
|
if err := os.WriteFile(filename, []byte(`
|
||||||
|
dbs:
|
||||||
|
- path: $LITESTREAM_TEST_0129380
|
||||||
|
replicas:
|
||||||
|
- url: ${LITESTREAM_TEST_1872363}
|
||||||
|
- url: ${LITESTREAM_TEST_NO_SUCH_ENV}
|
||||||
|
`[1:]), 0666); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := main.ReadConfigFile(filename, true)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if got, want := config.DBs[0].Path, `/path/to/db`; got != want {
|
||||||
|
t.Fatalf("DB.Path=%v, want %v", got, want)
|
||||||
|
} else if got, want := config.DBs[0].Replicas[0].URL, `s3://foo/bar`; got != want {
|
||||||
|
t.Fatalf("Replica[0].URL=%v, want %v", got, want)
|
||||||
|
} else if got, want := config.DBs[0].Replicas[1].URL, ``; got != want {
|
||||||
|
t.Fatalf("Replica[1].URL=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Ensure environment variables are not expanded.
|
||||||
|
t.Run("NoExpandEnv", func(t *testing.T) {
|
||||||
|
os.Setenv("LITESTREAM_TEST_9847533", "s3://foo/bar")
|
||||||
|
|
||||||
|
filename := filepath.Join(t.TempDir(), "litestream.yml")
|
||||||
|
if err := os.WriteFile(filename, []byte(`
|
||||||
|
dbs:
|
||||||
|
- path: /path/to/db
|
||||||
|
replicas:
|
||||||
|
- url: ${LITESTREAM_TEST_9847533}
|
||||||
|
`[1:]), 0666); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := main.ReadConfigFile(filename, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if got, want := config.DBs[0].Replicas[0].URL, `${LITESTREAM_TEST_9847533}`; got != want {
|
||||||
|
t.Fatalf("Replica.URL=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewFileReplicaFromConfig(t *testing.T) {
|
||||||
|
r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{Path: "/foo"}, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if client, ok := r.Client.(*file.ReplicaClient); !ok {
|
||||||
|
t.Fatal("unexpected replica type")
|
||||||
|
} else if got, want := client.Path(), "/foo"; got != want {
|
||||||
|
t.Fatalf("Path=%s, want %s", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewS3ReplicaFromConfig(t *testing.T) {
|
||||||
|
t.Run("URL", func(t *testing.T) {
|
||||||
|
r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "s3://foo/bar"}, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if client, ok := r.Client.(*s3.ReplicaClient); !ok {
|
||||||
|
t.Fatal("unexpected replica type")
|
||||||
|
} else if got, want := client.Bucket, "foo"; got != want {
|
||||||
|
t.Fatalf("Bucket=%s, want %s", got, want)
|
||||||
|
} else if got, want := client.Path, "bar"; got != want {
|
||||||
|
t.Fatalf("Path=%s, want %s", got, want)
|
||||||
|
} else if got, want := client.Region, ""; got != want {
|
||||||
|
t.Fatalf("Region=%s, want %s", got, want)
|
||||||
|
} else if got, want := client.Endpoint, ""; got != want {
|
||||||
|
t.Fatalf("Endpoint=%s, want %s", got, want)
|
||||||
|
} else if got, want := client.ForcePathStyle, false; got != want {
|
||||||
|
t.Fatalf("ForcePathStyle=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("MinIO", func(t *testing.T) {
|
||||||
|
r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "s3://foo.localhost:9000/bar"}, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if client, ok := r.Client.(*s3.ReplicaClient); !ok {
|
||||||
|
t.Fatal("unexpected replica type")
|
||||||
|
} else if got, want := client.Bucket, "foo"; got != want {
|
||||||
|
t.Fatalf("Bucket=%s, want %s", got, want)
|
||||||
|
} else if got, want := client.Path, "bar"; got != want {
|
||||||
|
t.Fatalf("Path=%s, want %s", got, want)
|
||||||
|
} else if got, want := client.Region, "us-east-1"; got != want {
|
||||||
|
t.Fatalf("Region=%s, want %s", got, want)
|
||||||
|
} else if got, want := client.Endpoint, "http://localhost:9000"; got != want {
|
||||||
|
t.Fatalf("Endpoint=%s, want %s", got, want)
|
||||||
|
} else if got, want := client.ForcePathStyle, true; got != want {
|
||||||
|
t.Fatalf("ForcePathStyle=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Backblaze", func(t *testing.T) {
|
||||||
|
r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "s3://foo.s3.us-west-000.backblazeb2.com/bar"}, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if client, ok := r.Client.(*s3.ReplicaClient); !ok {
|
||||||
|
t.Fatal("unexpected replica type")
|
||||||
|
} else if got, want := client.Bucket, "foo"; got != want {
|
||||||
|
t.Fatalf("Bucket=%s, want %s", got, want)
|
||||||
|
} else if got, want := client.Path, "bar"; got != want {
|
||||||
|
t.Fatalf("Path=%s, want %s", got, want)
|
||||||
|
} else if got, want := client.Region, "us-west-000"; got != want {
|
||||||
|
t.Fatalf("Region=%s, want %s", got, want)
|
||||||
|
} else if got, want := client.Endpoint, "https://s3.us-west-000.backblazeb2.com"; got != want {
|
||||||
|
t.Fatalf("Endpoint=%s, want %s", got, want)
|
||||||
|
} else if got, want := client.ForcePathStyle, true; got != want {
|
||||||
|
t.Fatalf("ForcePathStyle=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewGCSReplicaFromConfig(t *testing.T) {
|
||||||
|
r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "gcs://foo/bar"}, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if client, ok := r.Client.(*gcs.ReplicaClient); !ok {
|
||||||
|
t.Fatal("unexpected replica type")
|
||||||
|
} else if got, want := client.Bucket, "foo"; got != want {
|
||||||
|
t.Fatalf("Bucket=%s, want %s", got, want)
|
||||||
|
} else if got, want := client.Path, "bar"; got != want {
|
||||||
|
t.Fatalf("Path=%s, want %s", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
112
cmd/litestream/main_windows.go
Normal file
112
cmd/litestream/main_windows.go
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
"golang.org/x/sys/windows/svc"
|
||||||
|
"golang.org/x/sys/windows/svc/eventlog"
|
||||||
|
)
|
||||||
|
|
||||||
|
const defaultConfigPath = `C:\Litestream\litestream.yml`
|
||||||
|
|
||||||
|
// serviceName is the Windows Service name.
|
||||||
|
const serviceName = "Litestream"
|
||||||
|
|
||||||
|
// isWindowsService returns true if currently executing within a Windows service.
|
||||||
|
func isWindowsService() (bool, error) {
|
||||||
|
return svc.IsWindowsService()
|
||||||
|
}
|
||||||
|
|
||||||
|
func runWindowsService(ctx context.Context) error {
|
||||||
|
// Attempt to install new log service. This will fail if already installed.
|
||||||
|
// We don't log the error because we don't have anywhere to log until we open the log.
|
||||||
|
_ = eventlog.InstallAsEventCreate(serviceName, eventlog.Error|eventlog.Warning|eventlog.Info)
|
||||||
|
|
||||||
|
elog, err := eventlog.Open(serviceName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer elog.Close()
|
||||||
|
|
||||||
|
// Set eventlog as log writer while running.
|
||||||
|
slog.SetDefault(slog.New(slog.NewTextHandler((*eventlogWriter)(elog), nil)))
|
||||||
|
defer slog.SetDefault(slog.New(slog.NewTextHandler(os.Stderr, nil)))
|
||||||
|
|
||||||
|
slog.Info("Litestream service starting")
|
||||||
|
|
||||||
|
if err := svc.Run(serviceName, &windowsService{ctx: ctx}); err != nil {
|
||||||
|
return errStop
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("Litestream service stopped")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// windowsService is an interface adapter for svc.Handler.
|
||||||
|
type windowsService struct {
|
||||||
|
ctx context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *windowsService) Execute(args []string, r <-chan svc.ChangeRequest, statusCh chan<- svc.Status) (svcSpecificEC bool, exitCode uint32) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Notify Windows that the service is starting up.
|
||||||
|
statusCh <- svc.Status{State: svc.StartPending}
|
||||||
|
|
||||||
|
// Instantiate replication command and load configuration.
|
||||||
|
c := NewReplicateCommand()
|
||||||
|
if c.Config, err = ReadConfigFile(DefaultConfigPath(), true); err != nil {
|
||||||
|
slog.Error("cannot load configuration", "error", err)
|
||||||
|
return true, 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute replication command.
|
||||||
|
if err := c.Run(); err != nil {
|
||||||
|
slog.Error("cannot replicate", "error", err)
|
||||||
|
statusCh <- svc.Status{State: svc.StopPending}
|
||||||
|
return true, 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// Notify Windows that the service is now running.
|
||||||
|
statusCh <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop}
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case req := <-r:
|
||||||
|
switch req.Cmd {
|
||||||
|
case svc.Stop:
|
||||||
|
c.Close()
|
||||||
|
statusCh <- svc.Status{State: svc.StopPending}
|
||||||
|
return false, windows.NO_ERROR
|
||||||
|
case svc.Interrogate:
|
||||||
|
statusCh <- req.CurrentStatus
|
||||||
|
default:
|
||||||
|
slog.Error("Litestream service received unexpected change request", "cmd", req.Cmd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure implementation implements io.Writer interface.
|
||||||
|
var _ io.Writer = (*eventlogWriter)(nil)
|
||||||
|
|
||||||
|
// eventlogWriter is an adapter for using eventlog.Log as an io.Writer.
|
||||||
|
type eventlogWriter eventlog.Log
|
||||||
|
|
||||||
|
func (w *eventlogWriter) Write(p []byte) (n int, err error) {
|
||||||
|
elog := (*eventlog.Log)(w)
|
||||||
|
return 0, elog.Info(1, string(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
func signalChan() <-chan os.Signal {
|
||||||
|
ch := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(ch, os.Interrupt)
|
||||||
|
return ch
|
||||||
|
}
|
||||||
@@ -2,69 +2,98 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
_ "net/http/pprof"
|
_ "net/http/pprof"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/exec"
|
||||||
|
|
||||||
"github.com/benbjohnson/litestream"
|
"github.com/benbjohnson/litestream"
|
||||||
|
"github.com/benbjohnson/litestream/abs"
|
||||||
|
"github.com/benbjohnson/litestream/file"
|
||||||
|
"github.com/benbjohnson/litestream/gcs"
|
||||||
"github.com/benbjohnson/litestream/s3"
|
"github.com/benbjohnson/litestream/s3"
|
||||||
|
"github.com/benbjohnson/litestream/sftp"
|
||||||
|
"github.com/mattn/go-shellwords"
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ReplicateCommand represents a command that continuously replicates SQLite databases.
|
// ReplicateCommand represents a command that continuously replicates SQLite databases.
|
||||||
type ReplicateCommand struct {
|
type ReplicateCommand struct {
|
||||||
ConfigPath string
|
cmd *exec.Cmd // subcommand
|
||||||
Config Config
|
execCh chan error // subcommand error channel
|
||||||
|
|
||||||
|
Config Config
|
||||||
|
|
||||||
// List of managed databases specified in the config.
|
// List of managed databases specified in the config.
|
||||||
DBs []*litestream.DB
|
DBs []*litestream.DB
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run loads all databases specified in the configuration.
|
func NewReplicateCommand() *ReplicateCommand {
|
||||||
func (c *ReplicateCommand) Run(ctx context.Context, args []string) (err error) {
|
return &ReplicateCommand{
|
||||||
|
execCh: make(chan error),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseFlags parses the CLI flags and loads the configuration file.
|
||||||
|
func (c *ReplicateCommand) ParseFlags(ctx context.Context, args []string) (err error) {
|
||||||
fs := flag.NewFlagSet("litestream-replicate", flag.ContinueOnError)
|
fs := flag.NewFlagSet("litestream-replicate", flag.ContinueOnError)
|
||||||
verbose := fs.Bool("v", false, "verbose logging")
|
execFlag := fs.String("exec", "", "execute subcommand")
|
||||||
registerConfigFlag(fs, &c.ConfigPath)
|
configPath, noExpandEnv := registerConfigFlag(fs)
|
||||||
fs.Usage = c.Usage
|
fs.Usage = c.Usage
|
||||||
if err := fs.Parse(args); err != nil {
|
if err := fs.Parse(args); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load configuration.
|
// Load configuration or use CLI args to build db/replica.
|
||||||
if c.ConfigPath == "" {
|
if fs.NArg() == 1 {
|
||||||
return errors.New("-config required")
|
return fmt.Errorf("must specify at least one replica URL for %s", fs.Arg(0))
|
||||||
}
|
} else if fs.NArg() > 1 {
|
||||||
config, err := ReadConfigFile(c.ConfigPath)
|
if *configPath != "" {
|
||||||
if err != nil {
|
return fmt.Errorf("cannot specify a replica URL and the -config flag")
|
||||||
return err
|
}
|
||||||
|
|
||||||
|
dbConfig := &DBConfig{Path: fs.Arg(0)}
|
||||||
|
for _, u := range fs.Args()[1:] {
|
||||||
|
syncInterval := litestream.DefaultSyncInterval
|
||||||
|
dbConfig.Replicas = append(dbConfig.Replicas, &ReplicaConfig{
|
||||||
|
URL: u,
|
||||||
|
SyncInterval: &syncInterval,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
c.Config.DBs = []*DBConfig{dbConfig}
|
||||||
|
} else {
|
||||||
|
if *configPath == "" {
|
||||||
|
*configPath = DefaultConfigPath()
|
||||||
|
}
|
||||||
|
if c.Config, err = ReadConfigFile(*configPath, !*noExpandEnv); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enable trace logging.
|
// Override config exec command, if specified.
|
||||||
if *verbose {
|
if *execFlag != "" {
|
||||||
litestream.Tracef = log.Printf
|
c.Config.Exec = *execFlag
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setup signal handler.
|
return nil
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
}
|
||||||
ch := make(chan os.Signal, 1)
|
|
||||||
signal.Notify(ch, os.Interrupt)
|
|
||||||
go func() { <-ch; cancel() }()
|
|
||||||
|
|
||||||
|
// Run loads all databases specified in the configuration.
|
||||||
|
func (c *ReplicateCommand) Run() (err error) {
|
||||||
// Display version information.
|
// Display version information.
|
||||||
fmt.Printf("litestream %s\n", Version)
|
slog.Info("litestream", "version", Version)
|
||||||
|
|
||||||
if len(config.DBs) == 0 {
|
// Setup databases.
|
||||||
fmt.Println("no databases specified in configuration")
|
if len(c.Config.DBs) == 0 {
|
||||||
|
slog.Error("no databases specified in configuration")
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, dbConfig := range config.DBs {
|
for _, dbConfig := range c.Config.DBs {
|
||||||
db, err := newDBFromConfig(&config, dbConfig)
|
db, err := NewDBFromConfig(dbConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -78,39 +107,59 @@ func (c *ReplicateCommand) Run(ctx context.Context, args []string) (err error) {
|
|||||||
|
|
||||||
// Notify user that initialization is done.
|
// Notify user that initialization is done.
|
||||||
for _, db := range c.DBs {
|
for _, db := range c.DBs {
|
||||||
fmt.Printf("initialized db: %s\n", db.Path())
|
slog.Info("initialized db", "path", db.Path())
|
||||||
for _, r := range db.Replicas {
|
for _, r := range db.Replicas {
|
||||||
switch r := r.(type) {
|
slog := slog.With("name", r.Name(), "type", r.Client.Type(), "sync-interval", r.SyncInterval)
|
||||||
case *litestream.FileReplica:
|
switch client := r.Client.(type) {
|
||||||
fmt.Printf("replicating to: name=%q type=%q path=%q\n", r.Name(), r.Type(), r.Path())
|
case *file.ReplicaClient:
|
||||||
case *s3.Replica:
|
slog.Info("replicating to", "path", client.Path())
|
||||||
fmt.Printf("replicating to: name=%q type=%q bucket=%q path=%q region=%q\n", r.Name(), r.Type(), r.Bucket, r.Path, r.Region)
|
case *s3.ReplicaClient:
|
||||||
|
slog.Info("replicating to", "bucket", client.Bucket, "path", client.Path, "region", client.Region, "endpoint", client.Endpoint)
|
||||||
|
case *gcs.ReplicaClient:
|
||||||
|
slog.Info("replicating to", "bucket", client.Bucket, "path", client.Path)
|
||||||
|
case *abs.ReplicaClient:
|
||||||
|
slog.Info("replicating to", "bucket", client.Bucket, "path", client.Path, "endpoint", client.Endpoint)
|
||||||
|
case *sftp.ReplicaClient:
|
||||||
|
slog.Info("replicating to", "host", client.Host, "user", client.User, "path", client.Path)
|
||||||
default:
|
default:
|
||||||
fmt.Printf("replicating to: name=%q type=%q\n", r.Name(), r.Type())
|
slog.Info("replicating to")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Serve metrics over HTTP if enabled.
|
// Serve metrics over HTTP if enabled.
|
||||||
if config.Addr != "" {
|
if c.Config.Addr != "" {
|
||||||
_, port, _ := net.SplitHostPort(config.Addr)
|
hostport := c.Config.Addr
|
||||||
fmt.Printf("serving metrics on http://localhost:%s/metrics\n", port)
|
if host, port, _ := net.SplitHostPort(c.Config.Addr); port == "" {
|
||||||
|
return fmt.Errorf("must specify port for bind address: %q", c.Config.Addr)
|
||||||
|
} else if host == "" {
|
||||||
|
hostport = net.JoinHostPort("localhost", port)
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("serving metrics on", "url", fmt.Sprintf("http://%s/metrics", hostport))
|
||||||
go func() {
|
go func() {
|
||||||
http.Handle("/metrics", promhttp.Handler())
|
http.Handle("/metrics", promhttp.Handler())
|
||||||
if err := http.ListenAndServe(config.Addr, nil); err != nil {
|
if err := http.ListenAndServe(c.Config.Addr, nil); err != nil {
|
||||||
log.Printf("cannot start metrics server: %s", err)
|
slog.Error("cannot start metrics server", "error", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for signal to stop program.
|
// Parse exec commands args & start subprocess.
|
||||||
<-ctx.Done()
|
if c.Config.Exec != "" {
|
||||||
signal.Reset()
|
execArgs, err := shellwords.Parse(c.Config.Exec)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot parse exec command: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Gracefully close
|
c.cmd = exec.Command(execArgs[0], execArgs[1:]...)
|
||||||
if err := c.Close(); err != nil {
|
c.cmd.Env = os.Environ()
|
||||||
fmt.Fprintln(os.Stderr, err)
|
c.cmd.Stdout = os.Stdout
|
||||||
os.Exit(1)
|
c.cmd.Stderr = os.Stderr
|
||||||
|
if err := c.cmd.Start(); err != nil {
|
||||||
|
return fmt.Errorf("cannot start exec command: %w", err)
|
||||||
|
}
|
||||||
|
go func() { c.execCh <- c.cmd.Wait() }()
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -119,8 +168,8 @@ func (c *ReplicateCommand) Run(ctx context.Context, args []string) (err error) {
|
|||||||
// Close closes all open databases.
|
// Close closes all open databases.
|
||||||
func (c *ReplicateCommand) Close() (err error) {
|
func (c *ReplicateCommand) Close() (err error) {
|
||||||
for _, db := range c.DBs {
|
for _, db := range c.DBs {
|
||||||
if e := db.SoftClose(); e != nil {
|
if e := db.Close(context.Background()); e != nil {
|
||||||
fmt.Printf("error closing db: path=%s err=%s\n", db.Path(), e)
|
db.Logger.Error("error closing db", "error", e)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = e
|
err = e
|
||||||
}
|
}
|
||||||
@@ -132,20 +181,29 @@ func (c *ReplicateCommand) Close() (err error) {
|
|||||||
// Usage prints the help screen to STDOUT.
|
// Usage prints the help screen to STDOUT.
|
||||||
func (c *ReplicateCommand) Usage() {
|
func (c *ReplicateCommand) Usage() {
|
||||||
fmt.Printf(`
|
fmt.Printf(`
|
||||||
The replicate command starts a server to monitor & replicate databases
|
The replicate command starts a server to monitor & replicate databases.
|
||||||
specified in your configuration file.
|
You can specify your database & replicas in a configuration file or you can
|
||||||
|
replicate a single database file by specifying its path and its replicas in the
|
||||||
|
command line arguments.
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
|
|
||||||
litestream replicate [arguments]
|
litestream replicate [arguments]
|
||||||
|
|
||||||
|
litestream replicate [arguments] DB_PATH REPLICA_URL [REPLICA_URL...]
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
|
|
||||||
-config PATH
|
-config PATH
|
||||||
Specifies the configuration file. Defaults to %s
|
Specifies the configuration file.
|
||||||
|
Defaults to %s
|
||||||
|
|
||||||
-v
|
-exec CMD
|
||||||
Enable verbose logging output.
|
Executes a subcommand. Litestream will exit when the child
|
||||||
|
process exits. Useful for simple process management.
|
||||||
|
|
||||||
|
-no-expand-env
|
||||||
|
Disables environment variable expansion in configuration file.
|
||||||
|
|
||||||
`[1:], DefaultConfigPath())
|
`[1:], DefaultConfigPath())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,9 +5,9 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/benbjohnson/litestream"
|
"github.com/benbjohnson/litestream"
|
||||||
@@ -18,35 +18,27 @@ type RestoreCommand struct{}
|
|||||||
|
|
||||||
// Run executes the command.
|
// Run executes the command.
|
||||||
func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) {
|
func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) {
|
||||||
var configPath string
|
|
||||||
opt := litestream.NewRestoreOptions()
|
opt := litestream.NewRestoreOptions()
|
||||||
|
|
||||||
fs := flag.NewFlagSet("litestream-restore", flag.ContinueOnError)
|
fs := flag.NewFlagSet("litestream-restore", flag.ContinueOnError)
|
||||||
registerConfigFlag(fs, &configPath)
|
configPath, noExpandEnv := registerConfigFlag(fs)
|
||||||
fs.StringVar(&opt.OutputPath, "o", "", "output path")
|
fs.StringVar(&opt.OutputPath, "o", "", "output path")
|
||||||
fs.StringVar(&opt.ReplicaName, "replica", "", "replica name")
|
fs.StringVar(&opt.ReplicaName, "replica", "", "replica name")
|
||||||
fs.StringVar(&opt.Generation, "generation", "", "generation name")
|
fs.StringVar(&opt.Generation, "generation", "", "generation name")
|
||||||
fs.IntVar(&opt.Index, "index", opt.Index, "wal index")
|
fs.Var((*indexVar)(&opt.Index), "index", "wal index")
|
||||||
fs.BoolVar(&opt.DryRun, "dry-run", false, "dry run")
|
fs.IntVar(&opt.Parallelism, "parallelism", opt.Parallelism, "parallelism")
|
||||||
|
ifDBNotExists := fs.Bool("if-db-not-exists", false, "")
|
||||||
|
ifReplicaExists := fs.Bool("if-replica-exists", false, "")
|
||||||
timestampStr := fs.String("timestamp", "", "timestamp")
|
timestampStr := fs.String("timestamp", "", "timestamp")
|
||||||
verbose := fs.Bool("v", false, "verbose output")
|
|
||||||
fs.Usage = c.Usage
|
fs.Usage = c.Usage
|
||||||
if err := fs.Parse(args); err != nil {
|
if err := fs.Parse(args); err != nil {
|
||||||
return err
|
return err
|
||||||
} else if fs.NArg() == 0 || fs.Arg(0) == "" {
|
} else if fs.NArg() == 0 || fs.Arg(0) == "" {
|
||||||
return fmt.Errorf("database path required")
|
return fmt.Errorf("database path or replica URL required")
|
||||||
} else if fs.NArg() > 1 {
|
} else if fs.NArg() > 1 {
|
||||||
return fmt.Errorf("too many arguments")
|
return fmt.Errorf("too many arguments")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load configuration.
|
|
||||||
if configPath == "" {
|
|
||||||
return errors.New("-config required")
|
|
||||||
}
|
|
||||||
config, err := ReadConfigFile(configPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse timestamp, if specified.
|
// Parse timestamp, if specified.
|
||||||
if *timestampStr != "" {
|
if *timestampStr != "" {
|
||||||
if opt.Timestamp, err = time.Parse(time.RFC3339, *timestampStr); err != nil {
|
if opt.Timestamp, err = time.Parse(time.RFC3339, *timestampStr); err != nil {
|
||||||
@@ -54,33 +46,105 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verbose output is automatically enabled if dry run is specified.
|
// Determine replica & generation to restore from.
|
||||||
if opt.DryRun {
|
var r *litestream.Replica
|
||||||
*verbose = true
|
if isURL(fs.Arg(0)) {
|
||||||
|
if *configPath != "" {
|
||||||
|
return fmt.Errorf("cannot specify a replica URL and the -config flag")
|
||||||
|
}
|
||||||
|
if r, err = c.loadFromURL(ctx, fs.Arg(0), *ifDBNotExists, &opt); err == errSkipDBExists {
|
||||||
|
slog.Info("database already exists, skipping")
|
||||||
|
return nil
|
||||||
|
} else if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if *configPath == "" {
|
||||||
|
*configPath = DefaultConfigPath()
|
||||||
|
}
|
||||||
|
if r, err = c.loadFromConfig(ctx, fs.Arg(0), *configPath, !*noExpandEnv, *ifDBNotExists, &opt); err == errSkipDBExists {
|
||||||
|
slog.Info("database already exists, skipping")
|
||||||
|
return nil
|
||||||
|
} else if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Instantiate logger if verbose output is enabled.
|
// Return an error if no matching targets found.
|
||||||
if *verbose {
|
// If optional flag set, return success. Useful for automated recovery.
|
||||||
opt.Logger = log.New(os.Stderr, "", log.LstdFlags)
|
if opt.Generation == "" {
|
||||||
|
if *ifReplicaExists {
|
||||||
|
slog.Info("no matching backups found")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("no matching backups found")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine absolute path for database.
|
return r.Restore(ctx, opt)
|
||||||
dbPath, err := filepath.Abs(fs.Arg(0))
|
}
|
||||||
|
|
||||||
|
// loadFromURL creates a replica & updates the restore options from a replica URL.
|
||||||
|
func (c *RestoreCommand) loadFromURL(ctx context.Context, replicaURL string, ifDBNotExists bool, opt *litestream.RestoreOptions) (*litestream.Replica, error) {
|
||||||
|
if opt.OutputPath == "" {
|
||||||
|
return nil, fmt.Errorf("output path required")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exit successfully if the output file already exists.
|
||||||
|
if _, err := os.Stat(opt.OutputPath); !os.IsNotExist(err) && ifDBNotExists {
|
||||||
|
return nil, errSkipDBExists
|
||||||
|
}
|
||||||
|
|
||||||
|
syncInterval := litestream.DefaultSyncInterval
|
||||||
|
r, err := NewReplicaFromConfig(&ReplicaConfig{
|
||||||
|
URL: replicaURL,
|
||||||
|
SyncInterval: &syncInterval,
|
||||||
|
}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
|
}
|
||||||
|
opt.Generation, _, err = r.CalcRestoreTarget(ctx, *opt)
|
||||||
|
return r, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadFromConfig returns a replica & updates the restore options from a DB reference.
|
||||||
|
func (c *RestoreCommand) loadFromConfig(ctx context.Context, dbPath, configPath string, expandEnv, ifDBNotExists bool, opt *litestream.RestoreOptions) (*litestream.Replica, error) {
|
||||||
|
// Load configuration.
|
||||||
|
config, err := ReadConfigFile(configPath, expandEnv)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Instantiate DB.
|
// Lookup database from configuration file by path.
|
||||||
|
if dbPath, err = expand(dbPath); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
dbConfig := config.DBConfig(dbPath)
|
dbConfig := config.DBConfig(dbPath)
|
||||||
if dbConfig == nil {
|
if dbConfig == nil {
|
||||||
return fmt.Errorf("database not found in config: %s", dbPath)
|
return nil, fmt.Errorf("database not found in config: %s", dbPath)
|
||||||
}
|
}
|
||||||
db, err := newDBFromConfig(&config, dbConfig)
|
db, err := NewDBFromConfig(dbConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return db.Restore(ctx, opt)
|
// Restore into original database path if not specified.
|
||||||
|
if opt.OutputPath == "" {
|
||||||
|
opt.OutputPath = dbPath
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exit successfully if the output file already exists.
|
||||||
|
if _, err := os.Stat(opt.OutputPath); !os.IsNotExist(err) && ifDBNotExists {
|
||||||
|
return nil, errSkipDBExists
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine the appropriate replica & generation to restore from,
|
||||||
|
r, generation, err := db.CalcRestoreTarget(ctx, *opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
opt.Generation = generation
|
||||||
|
|
||||||
|
return r, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Usage prints the help screen to STDOUT.
|
// Usage prints the help screen to STDOUT.
|
||||||
@@ -90,7 +154,9 @@ The restore command recovers a database from a previous snapshot and WAL.
|
|||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
|
|
||||||
litestream restore [arguments] DB
|
litestream restore [arguments] DB_PATH
|
||||||
|
|
||||||
|
litestream restore [arguments] REPLICA_URL
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
|
|
||||||
@@ -98,6 +164,9 @@ Arguments:
|
|||||||
Specifies the configuration file.
|
Specifies the configuration file.
|
||||||
Defaults to %s
|
Defaults to %s
|
||||||
|
|
||||||
|
-no-expand-env
|
||||||
|
Disables environment variable expansion in configuration file.
|
||||||
|
|
||||||
-replica NAME
|
-replica NAME
|
||||||
Restore from a specific replica.
|
Restore from a specific replica.
|
||||||
Defaults to replica with latest data.
|
Defaults to replica with latest data.
|
||||||
@@ -107,7 +176,7 @@ Arguments:
|
|||||||
Defaults to generation with latest data.
|
Defaults to generation with latest data.
|
||||||
|
|
||||||
-index NUM
|
-index NUM
|
||||||
Restore up to a specific WAL index (inclusive).
|
Restore up to a specific hex-encoded WAL index (inclusive).
|
||||||
Defaults to use the highest available index.
|
Defaults to use the highest available index.
|
||||||
|
|
||||||
-timestamp TIMESTAMP
|
-timestamp TIMESTAMP
|
||||||
@@ -118,12 +187,15 @@ Arguments:
|
|||||||
Output path of the restored database.
|
Output path of the restored database.
|
||||||
Defaults to original DB path.
|
Defaults to original DB path.
|
||||||
|
|
||||||
-dry-run
|
-if-db-not-exists
|
||||||
Prints all log output as if it were running but does
|
Returns exit code of 0 if the database already exists.
|
||||||
not perform actual restore.
|
|
||||||
|
|
||||||
-v
|
-if-replica-exists
|
||||||
Verbose output.
|
Returns exit code of 0 if no backups found.
|
||||||
|
|
||||||
|
-parallelism NUM
|
||||||
|
Determines the number of WAL files downloaded in parallel.
|
||||||
|
Defaults to `+strconv.Itoa(litestream.DefaultRestoreParallelism)+`.
|
||||||
|
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
@@ -147,3 +219,5 @@ Examples:
|
|||||||
DefaultConfigPath(),
|
DefaultConfigPath(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var errSkipDBExists = errors.New("database already exists, skipping")
|
||||||
|
|||||||
@@ -2,11 +2,10 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -18,9 +17,8 @@ type SnapshotsCommand struct{}
|
|||||||
|
|
||||||
// Run executes the command.
|
// Run executes the command.
|
||||||
func (c *SnapshotsCommand) Run(ctx context.Context, args []string) (err error) {
|
func (c *SnapshotsCommand) Run(ctx context.Context, args []string) (err error) {
|
||||||
var configPath string
|
|
||||||
fs := flag.NewFlagSet("litestream-snapshots", flag.ContinueOnError)
|
fs := flag.NewFlagSet("litestream-snapshots", flag.ContinueOnError)
|
||||||
registerConfigFlag(fs, &configPath)
|
configPath, noExpandEnv := registerConfigFlag(fs)
|
||||||
replicaName := fs.String("replica", "", "replica name")
|
replicaName := fs.String("replica", "", "replica name")
|
||||||
fs.Usage = c.Usage
|
fs.Usage = c.Usage
|
||||||
if err := fs.Parse(args); err != nil {
|
if err := fs.Parse(args); err != nil {
|
||||||
@@ -31,58 +29,72 @@ func (c *SnapshotsCommand) Run(ctx context.Context, args []string) (err error) {
|
|||||||
return fmt.Errorf("too many arguments")
|
return fmt.Errorf("too many arguments")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load configuration.
|
var db *litestream.DB
|
||||||
if configPath == "" {
|
var r *litestream.Replica
|
||||||
return errors.New("-config required")
|
if isURL(fs.Arg(0)) {
|
||||||
}
|
if *configPath != "" {
|
||||||
config, err := ReadConfigFile(configPath)
|
return fmt.Errorf("cannot specify a replica URL and the -config flag")
|
||||||
if err != nil {
|
}
|
||||||
return err
|
if r, err = NewReplicaFromConfig(&ReplicaConfig{URL: fs.Arg(0)}, nil); err != nil {
|
||||||
}
|
|
||||||
|
|
||||||
// Determine absolute path for database.
|
|
||||||
dbPath, err := filepath.Abs(fs.Arg(0))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Instantiate DB.
|
|
||||||
dbConfig := config.DBConfig(dbPath)
|
|
||||||
if dbConfig == nil {
|
|
||||||
return fmt.Errorf("database not found in config: %s", dbPath)
|
|
||||||
}
|
|
||||||
db, err := newDBFromConfig(&config, dbConfig)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find snapshots by db or replica.
|
|
||||||
var infos []*litestream.SnapshotInfo
|
|
||||||
if *replicaName != "" {
|
|
||||||
if r := db.Replica(*replicaName); r == nil {
|
|
||||||
return fmt.Errorf("replica %q not found for database %q", *replicaName, dbPath)
|
|
||||||
} else if infos, err = r.Snapshots(ctx); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if infos, err = db.Snapshots(ctx); err != nil {
|
if *configPath == "" {
|
||||||
|
*configPath = DefaultConfigPath()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load configuration.
|
||||||
|
config, err := ReadConfigFile(*configPath, !*noExpandEnv)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Lookup database from configuration file by path.
|
||||||
|
if path, err := expand(fs.Arg(0)); err != nil {
|
||||||
|
return err
|
||||||
|
} else if dbc := config.DBConfig(path); dbc == nil {
|
||||||
|
return fmt.Errorf("database not found in config: %s", path)
|
||||||
|
} else if db, err = NewDBFromConfig(dbc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter by replica, if specified.
|
||||||
|
if *replicaName != "" {
|
||||||
|
if r = db.Replica(*replicaName); r == nil {
|
||||||
|
return fmt.Errorf("replica %q not found for database %q", *replicaName, db.Path())
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// List all snapshots.
|
// Find snapshots by db or replica.
|
||||||
w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0)
|
var replicas []*litestream.Replica
|
||||||
fmt.Fprintln(w, "replica\tgeneration\tindex\tsize\tcreated")
|
if r != nil {
|
||||||
for _, info := range infos {
|
replicas = []*litestream.Replica{r}
|
||||||
fmt.Fprintf(w, "%s\t%s\t%d\t%d\t%s\n",
|
} else {
|
||||||
info.Replica,
|
replicas = db.Replicas
|
||||||
info.Generation,
|
}
|
||||||
info.Index,
|
|
||||||
info.Size,
|
// List all snapshots.
|
||||||
info.CreatedAt.Format(time.RFC3339),
|
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
|
||||||
)
|
defer w.Flush()
|
||||||
|
|
||||||
|
fmt.Fprintln(w, "replica\tgeneration\tindex\tsize\tcreated")
|
||||||
|
for _, r := range replicas {
|
||||||
|
infos, err := r.Snapshots(ctx)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("cannot determine snapshots", "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, info := range infos {
|
||||||
|
fmt.Fprintf(w, "%s\t%s\t%d\t%d\t%s\n",
|
||||||
|
r.Name(),
|
||||||
|
info.Generation,
|
||||||
|
info.Index,
|
||||||
|
info.Size,
|
||||||
|
info.CreatedAt.Format(time.RFC3339),
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
w.Flush()
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -90,11 +102,13 @@ func (c *SnapshotsCommand) Run(ctx context.Context, args []string) (err error) {
|
|||||||
// Usage prints the help screen to STDOUT.
|
// Usage prints the help screen to STDOUT.
|
||||||
func (c *SnapshotsCommand) Usage() {
|
func (c *SnapshotsCommand) Usage() {
|
||||||
fmt.Printf(`
|
fmt.Printf(`
|
||||||
The snapshots command lists all snapshots available for a database.
|
The snapshots command lists all snapshots available for a database or replica.
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
|
|
||||||
litestream snapshots [arguments] DB
|
litestream snapshots [arguments] DB_PATH
|
||||||
|
|
||||||
|
litestream snapshots [arguments] REPLICA_URL
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
|
|
||||||
@@ -102,10 +116,12 @@ Arguments:
|
|||||||
Specifies the configuration file.
|
Specifies the configuration file.
|
||||||
Defaults to %s
|
Defaults to %s
|
||||||
|
|
||||||
|
-no-expand-env
|
||||||
|
Disables environment variable expansion in configuration file.
|
||||||
|
|
||||||
-replica NAME
|
-replica NAME
|
||||||
Optional, filter by a specific replica.
|
Optional, filter by a specific replica.
|
||||||
|
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
|
|
||||||
# List all snapshots for a database.
|
# List all snapshots for a database.
|
||||||
@@ -114,6 +130,9 @@ Examples:
|
|||||||
# List all snapshots on S3.
|
# List all snapshots on S3.
|
||||||
$ litestream snapshots -replica s3 /path/to/db
|
$ litestream snapshots -replica s3 /path/to/db
|
||||||
|
|
||||||
|
# List all snapshots by replica URL.
|
||||||
|
$ litestream snapshots s3://mybkt/db
|
||||||
|
|
||||||
`[1:],
|
`[1:],
|
||||||
DefaultConfigPath(),
|
DefaultConfigPath(),
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -2,11 +2,9 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -18,9 +16,8 @@ type WALCommand struct{}
|
|||||||
|
|
||||||
// Run executes the command.
|
// Run executes the command.
|
||||||
func (c *WALCommand) Run(ctx context.Context, args []string) (err error) {
|
func (c *WALCommand) Run(ctx context.Context, args []string) (err error) {
|
||||||
var configPath string
|
|
||||||
fs := flag.NewFlagSet("litestream-wal", flag.ContinueOnError)
|
fs := flag.NewFlagSet("litestream-wal", flag.ContinueOnError)
|
||||||
registerConfigFlag(fs, &configPath)
|
configPath, noExpandEnv := registerConfigFlag(fs)
|
||||||
replicaName := fs.String("replica", "", "replica name")
|
replicaName := fs.String("replica", "", "replica name")
|
||||||
generation := fs.String("generation", "", "generation name")
|
generation := fs.String("generation", "", "generation name")
|
||||||
fs.Usage = c.Usage
|
fs.Usage = c.Usage
|
||||||
@@ -32,63 +29,94 @@ func (c *WALCommand) Run(ctx context.Context, args []string) (err error) {
|
|||||||
return fmt.Errorf("too many arguments")
|
return fmt.Errorf("too many arguments")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load configuration.
|
var db *litestream.DB
|
||||||
if configPath == "" {
|
var r *litestream.Replica
|
||||||
return errors.New("-config required")
|
if isURL(fs.Arg(0)) {
|
||||||
}
|
if *configPath != "" {
|
||||||
config, err := ReadConfigFile(configPath)
|
return fmt.Errorf("cannot specify a replica URL and the -config flag")
|
||||||
if err != nil {
|
}
|
||||||
return err
|
if r, err = NewReplicaFromConfig(&ReplicaConfig{URL: fs.Arg(0)}, nil); err != nil {
|
||||||
}
|
|
||||||
|
|
||||||
// Determine absolute path for database.
|
|
||||||
dbPath, err := filepath.Abs(fs.Arg(0))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Instantiate DB.
|
|
||||||
dbConfig := config.DBConfig(dbPath)
|
|
||||||
if dbConfig == nil {
|
|
||||||
return fmt.Errorf("database not found in config: %s", dbPath)
|
|
||||||
}
|
|
||||||
db, err := newDBFromConfig(&config, dbConfig)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find snapshots by db or replica.
|
|
||||||
var infos []*litestream.WALInfo
|
|
||||||
if *replicaName != "" {
|
|
||||||
if r := db.Replica(*replicaName); r == nil {
|
|
||||||
return fmt.Errorf("replica %q not found for database %q", *replicaName, dbPath)
|
|
||||||
} else if infos, err = r.WALs(ctx); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if infos, err = db.WALs(ctx); err != nil {
|
if *configPath == "" {
|
||||||
|
*configPath = DefaultConfigPath()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load configuration.
|
||||||
|
config, err := ReadConfigFile(*configPath, !*noExpandEnv)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Lookup database from configuration file by path.
|
||||||
|
if path, err := expand(fs.Arg(0)); err != nil {
|
||||||
|
return err
|
||||||
|
} else if dbc := config.DBConfig(path); dbc == nil {
|
||||||
|
return fmt.Errorf("database not found in config: %s", path)
|
||||||
|
} else if db, err = NewDBFromConfig(dbc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter by replica, if specified.
|
||||||
|
if *replicaName != "" {
|
||||||
|
if r = db.Replica(*replicaName); r == nil {
|
||||||
|
return fmt.Errorf("replica %q not found for database %q", *replicaName, db.Path())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find WAL files by db or replica.
|
||||||
|
var replicas []*litestream.Replica
|
||||||
|
if r != nil {
|
||||||
|
replicas = []*litestream.Replica{r}
|
||||||
|
} else {
|
||||||
|
replicas = db.Replicas
|
||||||
}
|
}
|
||||||
|
|
||||||
// List all WAL files.
|
// List all WAL files.
|
||||||
w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0)
|
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
|
||||||
|
defer w.Flush()
|
||||||
|
|
||||||
fmt.Fprintln(w, "replica\tgeneration\tindex\toffset\tsize\tcreated")
|
fmt.Fprintln(w, "replica\tgeneration\tindex\toffset\tsize\tcreated")
|
||||||
for _, info := range infos {
|
for _, r := range replicas {
|
||||||
if *generation != "" && info.Generation != *generation {
|
var generations []string
|
||||||
continue
|
if *generation != "" {
|
||||||
|
generations = []string{*generation}
|
||||||
|
} else {
|
||||||
|
if generations, err = r.Client.Generations(ctx); err != nil {
|
||||||
|
r.Logger().Error("cannot determine generations", "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(w, "%s\t%s\t%d\t%d\t%d\t%s\n",
|
for _, generation := range generations {
|
||||||
info.Replica,
|
if err := func() error {
|
||||||
info.Generation,
|
itr, err := r.Client.WALSegments(ctx, generation)
|
||||||
info.Index,
|
if err != nil {
|
||||||
info.Offset,
|
return err
|
||||||
info.Size,
|
}
|
||||||
info.CreatedAt.Format(time.RFC3339),
|
defer itr.Close()
|
||||||
)
|
|
||||||
|
for itr.Next() {
|
||||||
|
info := itr.WALSegment()
|
||||||
|
|
||||||
|
fmt.Fprintf(w, "%s\t%s\t%x\t%d\t%d\t%s\n",
|
||||||
|
r.Name(),
|
||||||
|
info.Generation,
|
||||||
|
info.Index,
|
||||||
|
info.Offset,
|
||||||
|
info.Size,
|
||||||
|
info.CreatedAt.Format(time.RFC3339),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return itr.Close()
|
||||||
|
}(); err != nil {
|
||||||
|
r.Logger().Error("cannot fetch wal segments", "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
w.Flush()
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -96,11 +124,13 @@ func (c *WALCommand) Run(ctx context.Context, args []string) (err error) {
|
|||||||
// Usage prints the help screen to STDOUT.
|
// Usage prints the help screen to STDOUT.
|
||||||
func (c *WALCommand) Usage() {
|
func (c *WALCommand) Usage() {
|
||||||
fmt.Printf(`
|
fmt.Printf(`
|
||||||
The wal command lists all wal files available for a database.
|
The wal command lists all wal segments available for a database.
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
|
|
||||||
litestream wal [arguments] DB
|
litestream wal [arguments] DB_PATH
|
||||||
|
|
||||||
|
litestream wal [arguments] REPLICA_URL
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
|
|
||||||
@@ -108,20 +138,25 @@ Arguments:
|
|||||||
Specifies the configuration file.
|
Specifies the configuration file.
|
||||||
Defaults to %s
|
Defaults to %s
|
||||||
|
|
||||||
|
-no-expand-env
|
||||||
|
Disables environment variable expansion in configuration file.
|
||||||
|
|
||||||
-replica NAME
|
-replica NAME
|
||||||
Optional, filter by a specific replica.
|
Optional, filter by a specific replica.
|
||||||
|
|
||||||
-generation NAME
|
-generation NAME
|
||||||
Optional, filter by a specific generation.
|
Optional, filter by a specific generation.
|
||||||
|
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
|
|
||||||
# List all WAL files for a database.
|
# List all WAL segments for a database.
|
||||||
$ litestream wal /path/to/db
|
$ litestream wal /path/to/db
|
||||||
|
|
||||||
# List all WAL files on S3 for a specific generation.
|
# List all WAL segments on S3 for a specific generation.
|
||||||
$ litestream snapshots -replica s3 -generation xxxxxxxx /path/to/db
|
$ litestream wal -replica s3 -generation xxxxxxxx /path/to/db
|
||||||
|
|
||||||
|
# List all WAL segments for replica URL.
|
||||||
|
$ litestream wal s3://mybkt/db
|
||||||
|
|
||||||
`[1:],
|
`[1:],
|
||||||
DefaultConfigPath(),
|
DefaultConfigPath(),
|
||||||
|
|||||||
86
db_test.go
86
db_test.go
@@ -1,10 +1,11 @@
|
|||||||
package litestream_test
|
package litestream_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -98,9 +99,12 @@ func TestDB_UpdatedAt(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sleepTime := 100 * time.Millisecond
|
||||||
if os.Getenv("CI") != "" {
|
if os.Getenv("CI") != "" {
|
||||||
time.Sleep(1 * time.Second)
|
sleepTime = 1 * time.Second
|
||||||
}
|
}
|
||||||
|
time.Sleep(sleepTime)
|
||||||
|
|
||||||
if _, err := sqldb.Exec(`CREATE TABLE t (id INT);`); err != nil {
|
if _, err := sqldb.Exec(`CREATE TABLE t (id INT);`); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -118,7 +122,7 @@ func TestDB_CRC64(t *testing.T) {
|
|||||||
t.Run("ErrNotExist", func(t *testing.T) {
|
t.Run("ErrNotExist", func(t *testing.T) {
|
||||||
db := MustOpenDB(t)
|
db := MustOpenDB(t)
|
||||||
defer MustCloseDB(t, db)
|
defer MustCloseDB(t, db)
|
||||||
if _, _, err := db.CRC64(); !os.IsNotExist(err) {
|
if _, _, err := db.CRC64(context.Background()); !os.IsNotExist(err) {
|
||||||
t.Fatalf("unexpected error: %#v", err)
|
t.Fatalf("unexpected error: %#v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -127,11 +131,11 @@ func TestDB_CRC64(t *testing.T) {
|
|||||||
db, sqldb := MustOpenDBs(t)
|
db, sqldb := MustOpenDBs(t)
|
||||||
defer MustCloseDBs(t, db, sqldb)
|
defer MustCloseDBs(t, db, sqldb)
|
||||||
|
|
||||||
if err := db.Sync(); err != nil {
|
if err := db.Sync(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
chksum0, _, err := db.CRC64()
|
chksum0, _, err := db.CRC64(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -139,18 +143,18 @@ func TestDB_CRC64(t *testing.T) {
|
|||||||
// Issue change that is applied to the WAL. Checksum should not change.
|
// Issue change that is applied to the WAL. Checksum should not change.
|
||||||
if _, err := sqldb.Exec(`CREATE TABLE t (id INT);`); err != nil {
|
if _, err := sqldb.Exec(`CREATE TABLE t (id INT);`); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if chksum1, _, err := db.CRC64(); err != nil {
|
} else if chksum1, _, err := db.CRC64(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if chksum0 == chksum1 {
|
} else if chksum0 == chksum1 {
|
||||||
t.Fatal("expected different checksum event after WAL change")
|
t.Fatal("expected different checksum event after WAL change")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Checkpoint change into database. Checksum should change.
|
// Checkpoint change into database. Checksum should change.
|
||||||
if _, err := sqldb.Exec(`PRAGMA wal_checkpoint(TRUNCATE);`); err != nil {
|
if err := db.Checkpoint(context.Background(), litestream.CheckpointModeTruncate); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if chksum2, _, err := db.CRC64(); err != nil {
|
if chksum2, _, err := db.CRC64(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if chksum0 == chksum2 {
|
} else if chksum0 == chksum2 {
|
||||||
t.Fatal("expected different checksums after checkpoint")
|
t.Fatal("expected different checksums after checkpoint")
|
||||||
@@ -164,7 +168,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
t.Run("NoDB", func(t *testing.T) {
|
t.Run("NoDB", func(t *testing.T) {
|
||||||
db := MustOpenDB(t)
|
db := MustOpenDB(t)
|
||||||
defer MustCloseDB(t, db)
|
defer MustCloseDB(t, db)
|
||||||
if err := db.Sync(); err != nil {
|
if err := db.Sync(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -174,7 +178,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
db, sqldb := MustOpenDBs(t)
|
db, sqldb := MustOpenDBs(t)
|
||||||
defer MustCloseDBs(t, db, sqldb)
|
defer MustCloseDBs(t, db, sqldb)
|
||||||
|
|
||||||
if err := db.Sync(); err != nil {
|
if err := db.Sync(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -212,7 +216,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Perform initial sync & grab initial position.
|
// Perform initial sync & grab initial position.
|
||||||
if err := db.Sync(); err != nil {
|
if err := db.Sync(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -227,7 +231,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Sync to ensure position moves forward one page.
|
// Sync to ensure position moves forward one page.
|
||||||
if err := db.Sync(); err != nil {
|
if err := db.Sync(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if pos1, err := db.Pos(); err != nil {
|
} else if pos1, err := db.Pos(); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -246,7 +250,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
defer MustCloseDBs(t, db, sqldb)
|
defer MustCloseDBs(t, db, sqldb)
|
||||||
|
|
||||||
// Issue initial sync and truncate WAL.
|
// Issue initial sync and truncate WAL.
|
||||||
if err := db.Sync(); err != nil {
|
if err := db.Sync(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -257,16 +261,16 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Checkpoint & fully close which should close WAL file.
|
// Checkpoint & fully close which should close WAL file.
|
||||||
if err := db.Checkpoint(litestream.CheckpointModeTruncate); err != nil {
|
if err := db.Checkpoint(context.Background(), litestream.CheckpointModeTruncate); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if err := db.Close(); err != nil {
|
} else if err := db.Close(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if err := sqldb.Close(); err != nil {
|
} else if err := sqldb.Close(); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify WAL does not exist.
|
// Remove WAL file.
|
||||||
if _, err := os.Stat(db.WALPath()); !os.IsNotExist(err) {
|
if err := os.Remove(db.WALPath()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -275,7 +279,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
defer MustCloseDB(t, db)
|
defer MustCloseDB(t, db)
|
||||||
|
|
||||||
// Re-sync and ensure new generation has been created.
|
// Re-sync and ensure new generation has been created.
|
||||||
if err := db.Sync(); err != nil {
|
if err := db.Sync(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -298,7 +302,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Issue initial sync and truncate WAL.
|
// Issue initial sync and truncate WAL.
|
||||||
if err := db.Sync(); err != nil {
|
if err := db.Sync(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -309,7 +313,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Fully close which should close WAL file.
|
// Fully close which should close WAL file.
|
||||||
if err := db.Close(); err != nil {
|
if err := db.Close(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if err := sqldb.Close(); err != nil {
|
} else if err := sqldb.Close(); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -334,7 +338,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
defer MustCloseDB(t, db)
|
defer MustCloseDB(t, db)
|
||||||
|
|
||||||
// Re-sync and ensure new generation has been created.
|
// Re-sync and ensure new generation has been created.
|
||||||
if err := db.Sync(); err != nil {
|
if err := db.Sync(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -354,7 +358,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
// Execute a query to force a write to the WAL and then sync.
|
// Execute a query to force a write to the WAL and then sync.
|
||||||
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if err := db.Sync(); err != nil {
|
} else if err := db.Sync(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -362,23 +366,23 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
pos0, err := db.Pos()
|
pos0, err := db.Pos()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if err := db.Close(); err != nil {
|
} else if err := db.Close(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read existing file, update header checksum, and write back only header
|
// Read existing file, update header checksum, and write back only header
|
||||||
// to simulate a header with a mismatched checksum.
|
// to simulate a header with a mismatched checksum.
|
||||||
shadowWALPath := db.ShadowWALPath(pos0.Generation, pos0.Index)
|
shadowWALPath := db.ShadowWALPath(pos0.Generation, pos0.Index)
|
||||||
if buf, err := ioutil.ReadFile(shadowWALPath); err != nil {
|
if buf, err := os.ReadFile(shadowWALPath); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if err := ioutil.WriteFile(shadowWALPath, append(buf[:litestream.WALHeaderSize-8], 0, 0, 0, 0, 0, 0, 0, 0), 0600); err != nil {
|
} else if err := os.WriteFile(shadowWALPath, append(buf[:litestream.WALHeaderSize-8], 0, 0, 0, 0, 0, 0, 0, 0), 0600); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reopen managed database & ensure sync will still work.
|
// Reopen managed database & ensure sync will still work.
|
||||||
db = MustOpenDBAt(t, db.Path())
|
db = MustOpenDBAt(t, db.Path())
|
||||||
defer MustCloseDB(t, db)
|
defer MustCloseDB(t, db)
|
||||||
if err := db.Sync(); err != nil {
|
if err := db.Sync(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -398,7 +402,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
// Execute a query to force a write to the WAL and then sync.
|
// Execute a query to force a write to the WAL and then sync.
|
||||||
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if err := db.Sync(); err != nil {
|
} else if err := db.Sync(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -408,7 +412,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Close & truncate shadow WAL to simulate a partial header write.
|
// Close & truncate shadow WAL to simulate a partial header write.
|
||||||
if err := db.Close(); err != nil {
|
if err := db.Close(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if err := os.Truncate(db.ShadowWALPath(pos0.Generation, pos0.Index), litestream.WALHeaderSize-1); err != nil {
|
} else if err := os.Truncate(db.ShadowWALPath(pos0.Generation, pos0.Index), litestream.WALHeaderSize-1); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -417,7 +421,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
// Reopen managed database & ensure sync will still work.
|
// Reopen managed database & ensure sync will still work.
|
||||||
db = MustOpenDBAt(t, db.Path())
|
db = MustOpenDBAt(t, db.Path())
|
||||||
defer MustCloseDB(t, db)
|
defer MustCloseDB(t, db)
|
||||||
if err := db.Sync(); err != nil {
|
if err := db.Sync(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -437,7 +441,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
// Execute a query to force a write to the WAL and then sync.
|
// Execute a query to force a write to the WAL and then sync.
|
||||||
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if err := db.Sync(); err != nil {
|
} else if err := db.Sync(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -453,7 +457,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Close & truncate shadow WAL to simulate a partial frame write.
|
// Close & truncate shadow WAL to simulate a partial frame write.
|
||||||
if err := db.Close(); err != nil {
|
if err := db.Close(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if err := os.Truncate(db.ShadowWALPath(pos0.Generation, pos0.Index), fi.Size()-1); err != nil {
|
} else if err := os.Truncate(db.ShadowWALPath(pos0.Generation, pos0.Index), fi.Size()-1); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -462,7 +466,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
// Reopen managed database & ensure sync will still work.
|
// Reopen managed database & ensure sync will still work.
|
||||||
db = MustOpenDBAt(t, db.Path())
|
db = MustOpenDBAt(t, db.Path())
|
||||||
defer MustCloseDB(t, db)
|
defer MustCloseDB(t, db)
|
||||||
if err := db.Sync(); err != nil {
|
if err := db.Sync(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -489,7 +493,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
// Execute a query to force a write to the WAL and then sync.
|
// Execute a query to force a write to the WAL and then sync.
|
||||||
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if err := db.Sync(); err != nil {
|
} else if err := db.Sync(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -499,7 +503,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Close & delete shadow WAL to simulate dir created but not WAL.
|
// Close & delete shadow WAL to simulate dir created but not WAL.
|
||||||
if err := db.Close(); err != nil {
|
if err := db.Close(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if err := os.Remove(db.ShadowWALPath(pos0.Generation, pos0.Index)); err != nil {
|
} else if err := os.Remove(db.ShadowWALPath(pos0.Generation, pos0.Index)); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -508,7 +512,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
// Reopen managed database & ensure sync will still work.
|
// Reopen managed database & ensure sync will still work.
|
||||||
db = MustOpenDBAt(t, db.Path())
|
db = MustOpenDBAt(t, db.Path())
|
||||||
defer MustCloseDB(t, db)
|
defer MustCloseDB(t, db)
|
||||||
if err := db.Sync(); err != nil {
|
if err := db.Sync(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -532,7 +536,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
// Execute a query to force a write to the WAL and then sync.
|
// Execute a query to force a write to the WAL and then sync.
|
||||||
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if err := db.Sync(); err != nil {
|
} else if err := db.Sync(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -544,7 +548,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Sync to shadow WAL.
|
// Sync to shadow WAL.
|
||||||
if err := db.Sync(); err != nil {
|
if err := db.Sync(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -564,7 +568,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
// Execute a query to force a write to the WAL and then sync.
|
// Execute a query to force a write to the WAL and then sync.
|
||||||
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if err := db.Sync(); err != nil {
|
} else if err := db.Sync(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -574,7 +578,7 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
// Write to WAL & sync.
|
// Write to WAL & sync.
|
||||||
if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz');`); err != nil {
|
if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz');`); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if err := db.Sync(); err != nil {
|
} else if err := db.Sync(context.Background()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -589,12 +593,14 @@ func TestDB_Sync(t *testing.T) {
|
|||||||
|
|
||||||
// MustOpenDBs returns a new instance of a DB & associated SQL DB.
|
// MustOpenDBs returns a new instance of a DB & associated SQL DB.
|
||||||
func MustOpenDBs(tb testing.TB) (*litestream.DB, *sql.DB) {
|
func MustOpenDBs(tb testing.TB) (*litestream.DB, *sql.DB) {
|
||||||
|
tb.Helper()
|
||||||
db := MustOpenDB(tb)
|
db := MustOpenDB(tb)
|
||||||
return db, MustOpenSQLDB(tb, db.Path())
|
return db, MustOpenSQLDB(tb, db.Path())
|
||||||
}
|
}
|
||||||
|
|
||||||
// MustCloseDBs closes db & sqldb and removes the parent directory.
|
// MustCloseDBs closes db & sqldb and removes the parent directory.
|
||||||
func MustCloseDBs(tb testing.TB, db *litestream.DB, sqldb *sql.DB) {
|
func MustCloseDBs(tb testing.TB, db *litestream.DB, sqldb *sql.DB) {
|
||||||
|
tb.Helper()
|
||||||
MustCloseDB(tb, db)
|
MustCloseDB(tb, db)
|
||||||
MustCloseSQLDB(tb, sqldb)
|
MustCloseSQLDB(tb, sqldb)
|
||||||
}
|
}
|
||||||
@@ -619,7 +625,7 @@ func MustOpenDBAt(tb testing.TB, path string) *litestream.DB {
|
|||||||
// MustCloseDB closes db and removes its parent directory.
|
// MustCloseDB closes db and removes its parent directory.
|
||||||
func MustCloseDB(tb testing.TB, db *litestream.DB) {
|
func MustCloseDB(tb testing.TB, db *litestream.DB) {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
if err := db.Close(); err != nil {
|
if err := db.Close(context.Background()); err != nil && !strings.Contains(err.Error(), `database is closed`) {
|
||||||
tb.Fatal(err)
|
tb.Fatal(err)
|
||||||
} else if err := os.RemoveAll(filepath.Dir(db.Path())); err != nil {
|
} else if err := os.RemoveAll(filepath.Dir(db.Path())); err != nil {
|
||||||
tb.Fatal(err)
|
tb.Fatal(err)
|
||||||
|
|||||||
17
etc/build.ps1
Normal file
17
etc/build.ps1
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
[CmdletBinding()]
|
||||||
|
Param (
|
||||||
|
[Parameter(Mandatory = $true)]
|
||||||
|
[String] $Version
|
||||||
|
)
|
||||||
|
$ErrorActionPreference = "Stop"
|
||||||
|
|
||||||
|
# Update working directory.
|
||||||
|
Push-Location $PSScriptRoot
|
||||||
|
Trap {
|
||||||
|
Pop-Location
|
||||||
|
}
|
||||||
|
|
||||||
|
Invoke-Expression "candle.exe -nologo -arch x64 -ext WixUtilExtension -out litestream.wixobj -dVersion=`"$Version`" litestream.wxs"
|
||||||
|
Invoke-Expression "light.exe -nologo -spdb -ext WixUtilExtension -out `"litestream-${Version}.msi`" litestream.wixobj"
|
||||||
|
|
||||||
|
Pop-Location
|
||||||
15
etc/gon.hcl
Normal file
15
etc/gon.hcl
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
source = ["./dist/litestream"]
|
||||||
|
bundle_id = "com.middlemost.litestream"
|
||||||
|
|
||||||
|
apple_id {
|
||||||
|
username = "benbjohnson@yahoo.com"
|
||||||
|
password = "@env:AC_PASSWORD"
|
||||||
|
}
|
||||||
|
|
||||||
|
sign {
|
||||||
|
application_identity = "Developer ID Application: Middlemost Systems, LLC"
|
||||||
|
}
|
||||||
|
|
||||||
|
zip {
|
||||||
|
output_path = "dist/litestream.zip"
|
||||||
|
}
|
||||||
89
etc/litestream.wxs
Normal file
89
etc/litestream.wxs
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<Wix
|
||||||
|
xmlns="http://schemas.microsoft.com/wix/2006/wi"
|
||||||
|
xmlns:util="http://schemas.microsoft.com/wix/UtilExtension"
|
||||||
|
>
|
||||||
|
<?if $(sys.BUILDARCH)=x64 ?>
|
||||||
|
<?define PlatformProgramFiles = "ProgramFiles64Folder" ?>
|
||||||
|
<?else ?>
|
||||||
|
<?define PlatformProgramFiles = "ProgramFilesFolder" ?>
|
||||||
|
<?endif ?>
|
||||||
|
|
||||||
|
<Product
|
||||||
|
Id="*"
|
||||||
|
UpgradeCode="5371367e-58b3-4e52-be0d-46945eb71ce6"
|
||||||
|
Name="Litestream"
|
||||||
|
Version="$(var.Version)"
|
||||||
|
Manufacturer="Litestream"
|
||||||
|
Language="1033"
|
||||||
|
Codepage="1252"
|
||||||
|
>
|
||||||
|
<Package
|
||||||
|
Id="*"
|
||||||
|
Manufacturer="Litestream"
|
||||||
|
InstallScope="perMachine"
|
||||||
|
InstallerVersion="500"
|
||||||
|
Description="Litestream $(var.Version) installer"
|
||||||
|
Compressed="yes"
|
||||||
|
/>
|
||||||
|
|
||||||
|
<Media Id="1" Cabinet="litestream.cab" EmbedCab="yes"/>
|
||||||
|
|
||||||
|
<MajorUpgrade
|
||||||
|
Schedule="afterInstallInitialize"
|
||||||
|
DowngradeErrorMessage="A later version of [ProductName] is already installed. Setup will now exit."
|
||||||
|
/>
|
||||||
|
|
||||||
|
<Directory Id="TARGETDIR" Name="SourceDir">
|
||||||
|
<Directory Id="$(var.PlatformProgramFiles)">
|
||||||
|
<Directory Id="APPLICATIONROOTDIRECTORY" Name="Litestream"/>
|
||||||
|
</Directory>
|
||||||
|
</Directory>
|
||||||
|
|
||||||
|
<ComponentGroup Id="Files">
|
||||||
|
<Component Directory="APPLICATIONROOTDIRECTORY">
|
||||||
|
<File
|
||||||
|
Id="litestream.exe"
|
||||||
|
Name="litestream.exe"
|
||||||
|
Source="litestream.exe"
|
||||||
|
KeyPath="yes"
|
||||||
|
/>
|
||||||
|
|
||||||
|
<ServiceInstall
|
||||||
|
Id="InstallService"
|
||||||
|
Name="Litestream"
|
||||||
|
DisplayName="Litestream"
|
||||||
|
Description="Replicates SQLite databases"
|
||||||
|
ErrorControl="normal"
|
||||||
|
Start="auto"
|
||||||
|
Type="ownProcess"
|
||||||
|
>
|
||||||
|
<util:ServiceConfig
|
||||||
|
FirstFailureActionType="restart"
|
||||||
|
SecondFailureActionType="restart"
|
||||||
|
ThirdFailureActionType="restart"
|
||||||
|
RestartServiceDelayInSeconds="60"
|
||||||
|
/>
|
||||||
|
<ServiceDependency Id="wmiApSrv" />
|
||||||
|
</ServiceInstall>
|
||||||
|
|
||||||
|
<ServiceControl
|
||||||
|
Id="ServiceStateControl"
|
||||||
|
Name="Litestream"
|
||||||
|
Remove="uninstall"
|
||||||
|
Start="install"
|
||||||
|
Stop="both"
|
||||||
|
/>
|
||||||
|
<util:EventSource
|
||||||
|
Log="Application"
|
||||||
|
Name="Litestream"
|
||||||
|
EventMessageFile="%SystemRoot%\System32\EventCreate.exe"
|
||||||
|
/>
|
||||||
|
</Component>
|
||||||
|
</ComponentGroup>
|
||||||
|
|
||||||
|
<Feature Id="DefaultFeature" Level="1">
|
||||||
|
<ComponentGroupRef Id="Files" />
|
||||||
|
</Feature>
|
||||||
|
</Product>
|
||||||
|
</Wix>
|
||||||
@@ -6,5 +6,4 @@
|
|||||||
# - path: /path/to/primary/db # Database to replicate from
|
# - path: /path/to/primary/db # Database to replicate from
|
||||||
# replicas:
|
# replicas:
|
||||||
# - path: /path/to/replica # File-based replication
|
# - path: /path/to/replica # File-based replication
|
||||||
# - path: s3://my.bucket.com/db # S3-based replication
|
# - url: s3://my.bucket.com/db # S3-based replication
|
||||||
|
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
name: litestream
|
name: litestream
|
||||||
arch: amd64
|
arch: "${GOARCH}"
|
||||||
platform: linux
|
platform: "${GOOS}"
|
||||||
version: "${LITESTREAM_VERSION}"
|
version: "${LITESTREAM_VERSION}"
|
||||||
section: "default"
|
section: "default"
|
||||||
priority: "extra"
|
priority: "extra"
|
||||||
maintainer: "Ben Johnson <benbjohnson@yahoo.com>"
|
maintainer: "Ben Johnson <benbjohnson@yahoo.com>"
|
||||||
description: Litestream is a tool for real-time replication of SQLite databases.
|
description: Litestream is a tool for real-time replication of SQLite databases.
|
||||||
homepage: "https://github.com/benbjohnson/litestream"
|
homepage: "https://github.com/benbjohnson/litestream"
|
||||||
license: "GPLv3"
|
license: "Apache 2"
|
||||||
contents:
|
contents:
|
||||||
- src: ./litestream
|
- src: ./litestream
|
||||||
dst: /usr/bin/litestream
|
dst: /usr/bin/litestream
|
||||||
|
|||||||
35
etc/s3_mock.py
Executable file
35
etc/s3_mock.py
Executable file
@@ -0,0 +1,35 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from moto.server import ThreadedMotoServer
|
||||||
|
import boto3
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
cmd = sys.argv[1:]
|
||||||
|
if len(cmd) == 0:
|
||||||
|
print(f"usage: {sys.argv[0]} <command> [arguments]", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
env = os.environ.copy() | {
|
||||||
|
"LITESTREAM_S3_ACCESS_KEY_ID": "lite",
|
||||||
|
"LITESTREAM_S3_SECRET_ACCESS_KEY": "stream",
|
||||||
|
"LITESTREAM_S3_BUCKET": f"test{int(time.time())}",
|
||||||
|
"LITESTREAM_S3_ENDPOINT": "http://127.0.0.1:5000",
|
||||||
|
"LITESTREAM_S3_FORCE_PATH_STYLE": "true",
|
||||||
|
}
|
||||||
|
|
||||||
|
server = ThreadedMotoServer()
|
||||||
|
server.start()
|
||||||
|
|
||||||
|
s3 = boto3.client(
|
||||||
|
"s3",
|
||||||
|
aws_access_key_id=env["LITESTREAM_S3_ACCESS_KEY_ID"],
|
||||||
|
aws_secret_access_key=["LITESTREAM_S3_SECRET_ACCESS_KEY"],
|
||||||
|
endpoint_url=env["LITESTREAM_S3_ENDPOINT"]
|
||||||
|
).create_bucket(Bucket=env["LITESTREAM_S3_BUCKET"])
|
||||||
|
|
||||||
|
proc = subprocess.run(cmd, env=env)
|
||||||
|
|
||||||
|
server.stop()
|
||||||
|
sys.exit(proc.returncode)
|
||||||
375
file/replica_client.go
Normal file
375
file/replica_client.go
Normal file
@@ -0,0 +1,375 @@
|
|||||||
|
package file
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/litestream"
|
||||||
|
"github.com/benbjohnson/litestream/internal"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReplicaClientType is the client type for this package.
|
||||||
|
const ReplicaClientType = "file"
|
||||||
|
|
||||||
|
var _ litestream.ReplicaClient = (*ReplicaClient)(nil)
|
||||||
|
|
||||||
|
// ReplicaClient is a client for writing snapshots & WAL segments to disk.
|
||||||
|
type ReplicaClient struct {
|
||||||
|
path string // destination path
|
||||||
|
|
||||||
|
Replica *litestream.Replica
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReplicaClient returns a new instance of ReplicaClient.
|
||||||
|
func NewReplicaClient(path string) *ReplicaClient {
|
||||||
|
return &ReplicaClient{
|
||||||
|
path: path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// db returns the database, if available.
|
||||||
|
func (c *ReplicaClient) db() *litestream.DB {
|
||||||
|
if c.Replica == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return c.Replica.DB()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns "file" as the client type.
|
||||||
|
func (c *ReplicaClient) Type() string {
|
||||||
|
return ReplicaClientType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Path returns the destination path to replicate the database to.
|
||||||
|
func (c *ReplicaClient) Path() string {
|
||||||
|
return c.path
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerationsDir returns the path to a generation root directory.
|
||||||
|
func (c *ReplicaClient) GenerationsDir() (string, error) {
|
||||||
|
if c.path == "" {
|
||||||
|
return "", fmt.Errorf("file replica path required")
|
||||||
|
}
|
||||||
|
return filepath.Join(c.path, "generations"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerationDir returns the path to a generation's root directory.
|
||||||
|
func (c *ReplicaClient) GenerationDir(generation string) (string, error) {
|
||||||
|
dir, err := c.GenerationsDir()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
} else if generation == "" {
|
||||||
|
return "", fmt.Errorf("generation required")
|
||||||
|
}
|
||||||
|
return filepath.Join(dir, generation), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SnapshotsDir returns the path to a generation's snapshot directory.
|
||||||
|
func (c *ReplicaClient) SnapshotsDir(generation string) (string, error) {
|
||||||
|
dir, err := c.GenerationDir(generation)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return filepath.Join(dir, "snapshots"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SnapshotPath returns the path to an uncompressed snapshot file.
|
||||||
|
func (c *ReplicaClient) SnapshotPath(generation string, index int) (string, error) {
|
||||||
|
dir, err := c.SnapshotsDir(generation)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return filepath.Join(dir, litestream.FormatSnapshotPath(index)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WALDir returns the path to a generation's WAL directory
|
||||||
|
func (c *ReplicaClient) WALDir(generation string) (string, error) {
|
||||||
|
dir, err := c.GenerationDir(generation)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return filepath.Join(dir, "wal"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WALSegmentPath returns the path to a WAL segment file.
|
||||||
|
func (c *ReplicaClient) WALSegmentPath(generation string, index int, offset int64) (string, error) {
|
||||||
|
dir, err := c.WALDir(generation)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return filepath.Join(dir, litestream.FormatWALSegmentPath(index, offset)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generations returns a list of available generation names.
|
||||||
|
func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) {
|
||||||
|
root, err := c.GenerationsDir()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot determine generations path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fis, err := os.ReadDir(root)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var generations []string
|
||||||
|
for _, fi := range fis {
|
||||||
|
if !litestream.IsGenerationName(fi.Name()) {
|
||||||
|
continue
|
||||||
|
} else if !fi.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
generations = append(generations, fi.Name())
|
||||||
|
}
|
||||||
|
return generations, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteGeneration deletes all snapshots & WAL segments within a generation.
|
||||||
|
func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error {
|
||||||
|
dir, err := c.GenerationDir(generation)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot determine generation path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshots returns an iterator over all available snapshots for a generation.
|
||||||
|
func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (litestream.SnapshotIterator, error) {
|
||||||
|
dir, err := c.SnapshotsDir(generation)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot determine snapshots path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Open(dir)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return litestream.NewSnapshotInfoSliceIterator(nil), nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
fis, err := f.Readdir(-1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate over every file and convert to metadata.
|
||||||
|
infos := make([]litestream.SnapshotInfo, 0, len(fis))
|
||||||
|
for _, fi := range fis {
|
||||||
|
// Parse index from filename.
|
||||||
|
index, err := litestream.ParseSnapshotPath(fi.Name())
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
infos = append(infos, litestream.SnapshotInfo{
|
||||||
|
Generation: generation,
|
||||||
|
Index: index,
|
||||||
|
Size: fi.Size(),
|
||||||
|
CreatedAt: fi.ModTime().UTC(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return litestream.NewSnapshotInfoSliceIterator(infos), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteSnapshot writes LZ4 compressed data from rd into a file on disk.
|
||||||
|
func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) {
|
||||||
|
filename, err := c.SnapshotPath(generation, index)
|
||||||
|
if err != nil {
|
||||||
|
return info, fmt.Errorf("cannot determine snapshot path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileInfo, dirInfo os.FileInfo
|
||||||
|
if db := c.db(); db != nil {
|
||||||
|
fileInfo, dirInfo = db.FileInfo(), db.DirInfo()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure parent directory exists.
|
||||||
|
if err := internal.MkdirAll(filepath.Dir(filename), dirInfo); err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write snapshot to temporary file next to destination path.
|
||||||
|
f, err := internal.CreateFile(filename+".tmp", fileInfo)
|
||||||
|
if err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
if _, err := io.Copy(f, rd); err != nil {
|
||||||
|
return info, err
|
||||||
|
} else if err := f.Sync(); err != nil {
|
||||||
|
return info, err
|
||||||
|
} else if err := f.Close(); err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build metadata.
|
||||||
|
fi, err := os.Stat(filename + ".tmp")
|
||||||
|
if err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
info = litestream.SnapshotInfo{
|
||||||
|
Generation: generation,
|
||||||
|
Index: index,
|
||||||
|
Size: fi.Size(),
|
||||||
|
CreatedAt: fi.ModTime().UTC(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move snapshot to final path when it has been fully written & synced to disk.
|
||||||
|
if err := os.Rename(filename+".tmp", filename); err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SnapshotReader returns a reader for snapshot data at the given generation/index.
|
||||||
|
// Returns os.ErrNotExist if no matching index is found.
|
||||||
|
func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) {
|
||||||
|
filename, err := c.SnapshotPath(generation, index)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot determine snapshot path: %w", err)
|
||||||
|
}
|
||||||
|
return os.Open(filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteSnapshot deletes a snapshot with the given generation & index.
|
||||||
|
func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error {
|
||||||
|
filename, err := c.SnapshotPath(generation, index)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot determine snapshot path: %w", err)
|
||||||
|
}
|
||||||
|
if err := os.Remove(filename); err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WALSegments returns an iterator over all available WAL files for a generation.
|
||||||
|
func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) {
|
||||||
|
dir, err := c.WALDir(generation)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot determine wal path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Open(dir)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return litestream.NewWALSegmentInfoSliceIterator(nil), nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
fis, err := f.Readdir(-1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate over every file and convert to metadata.
|
||||||
|
infos := make([]litestream.WALSegmentInfo, 0, len(fis))
|
||||||
|
for _, fi := range fis {
|
||||||
|
// Parse index from filename.
|
||||||
|
index, offset, err := litestream.ParseWALSegmentPath(fi.Name())
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
infos = append(infos, litestream.WALSegmentInfo{
|
||||||
|
Generation: generation,
|
||||||
|
Index: index,
|
||||||
|
Offset: offset,
|
||||||
|
Size: fi.Size(),
|
||||||
|
CreatedAt: fi.ModTime().UTC(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return litestream.NewWALSegmentInfoSliceIterator(infos), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteWALSegment writes LZ4 compressed data from rd into a file on disk.
|
||||||
|
func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) {
|
||||||
|
filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset)
|
||||||
|
if err != nil {
|
||||||
|
return info, fmt.Errorf("cannot determine wal segment path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileInfo, dirInfo os.FileInfo
|
||||||
|
if db := c.db(); db != nil {
|
||||||
|
fileInfo, dirInfo = db.FileInfo(), db.DirInfo()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure parent directory exists.
|
||||||
|
if err := internal.MkdirAll(filepath.Dir(filename), dirInfo); err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write WAL segment to temporary file next to destination path.
|
||||||
|
f, err := internal.CreateFile(filename+".tmp", fileInfo)
|
||||||
|
if err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
if _, err := io.Copy(f, rd); err != nil {
|
||||||
|
return info, err
|
||||||
|
} else if err := f.Sync(); err != nil {
|
||||||
|
return info, err
|
||||||
|
} else if err := f.Close(); err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build metadata.
|
||||||
|
fi, err := os.Stat(filename + ".tmp")
|
||||||
|
if err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
info = litestream.WALSegmentInfo{
|
||||||
|
Generation: pos.Generation,
|
||||||
|
Index: pos.Index,
|
||||||
|
Offset: pos.Offset,
|
||||||
|
Size: fi.Size(),
|
||||||
|
CreatedAt: fi.ModTime().UTC(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move WAL segment to final path when it has been written & synced to disk.
|
||||||
|
if err := os.Rename(filename+".tmp", filename); err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WALSegmentReader returns a reader for a section of WAL data at the given position.
|
||||||
|
// Returns os.ErrNotExist if no matching index/offset is found.
|
||||||
|
func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) {
|
||||||
|
filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot determine wal segment path: %w", err)
|
||||||
|
}
|
||||||
|
return os.Open(filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteWALSegments deletes WAL segments at the given positions.
|
||||||
|
func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Pos) error {
|
||||||
|
for _, pos := range a {
|
||||||
|
filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot determine wal segment path: %w", err)
|
||||||
|
}
|
||||||
|
if err := os.Remove(filename); err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
223
file/replica_client_test.go
Normal file
223
file/replica_client_test.go
Normal file
@@ -0,0 +1,223 @@
|
|||||||
|
package file_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/litestream/file"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestReplicaClient_Path(t *testing.T) {
|
||||||
|
c := file.NewReplicaClient("/foo/bar")
|
||||||
|
if got, want := c.Path(), "/foo/bar"; got != want {
|
||||||
|
t.Fatalf("Path()=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReplicaClient_Type(t *testing.T) {
|
||||||
|
if got, want := file.NewReplicaClient("").Type(), "file"; got != want {
|
||||||
|
t.Fatalf("Type()=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReplicaClient_GenerationsDir(t *testing.T) {
|
||||||
|
t.Run("OK", func(t *testing.T) {
|
||||||
|
if got, err := file.NewReplicaClient("/foo").GenerationsDir(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if want := "/foo/generations"; got != want {
|
||||||
|
t.Fatalf("GenerationsDir()=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("ErrNoPath", func(t *testing.T) {
|
||||||
|
if _, err := file.NewReplicaClient("").GenerationsDir(); err == nil || err.Error() != `file replica path required` {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReplicaClient_GenerationDir(t *testing.T) {
|
||||||
|
t.Run("OK", func(t *testing.T) {
|
||||||
|
if got, err := file.NewReplicaClient("/foo").GenerationDir("0123456701234567"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if want := "/foo/generations/0123456701234567"; got != want {
|
||||||
|
t.Fatalf("GenerationDir()=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("ErrNoPath", func(t *testing.T) {
|
||||||
|
if _, err := file.NewReplicaClient("").GenerationDir("0123456701234567"); err == nil || err.Error() != `file replica path required` {
|
||||||
|
t.Fatalf("expected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||||
|
if _, err := file.NewReplicaClient("/foo").GenerationDir(""); err == nil || err.Error() != `generation required` {
|
||||||
|
t.Fatalf("expected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReplicaClient_SnapshotsDir(t *testing.T) {
|
||||||
|
t.Run("OK", func(t *testing.T) {
|
||||||
|
if got, err := file.NewReplicaClient("/foo").SnapshotsDir("0123456701234567"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if want := "/foo/generations/0123456701234567/snapshots"; got != want {
|
||||||
|
t.Fatalf("SnapshotsDir()=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("ErrNoPath", func(t *testing.T) {
|
||||||
|
if _, err := file.NewReplicaClient("").SnapshotsDir("0123456701234567"); err == nil || err.Error() != `file replica path required` {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||||
|
if _, err := file.NewReplicaClient("/foo").SnapshotsDir(""); err == nil || err.Error() != `generation required` {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReplicaClient_SnapshotPath(t *testing.T) {
|
||||||
|
t.Run("OK", func(t *testing.T) {
|
||||||
|
if got, err := file.NewReplicaClient("/foo").SnapshotPath("0123456701234567", 1000); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if want := "/foo/generations/0123456701234567/snapshots/000003e8.snapshot.lz4"; got != want {
|
||||||
|
t.Fatalf("SnapshotPath()=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("ErrNoPath", func(t *testing.T) {
|
||||||
|
if _, err := file.NewReplicaClient("").SnapshotPath("0123456701234567", 1000); err == nil || err.Error() != `file replica path required` {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||||
|
if _, err := file.NewReplicaClient("/foo").SnapshotPath("", 1000); err == nil || err.Error() != `generation required` {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReplicaClient_WALDir(t *testing.T) {
|
||||||
|
t.Run("OK", func(t *testing.T) {
|
||||||
|
if got, err := file.NewReplicaClient("/foo").WALDir("0123456701234567"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if want := "/foo/generations/0123456701234567/wal"; got != want {
|
||||||
|
t.Fatalf("WALDir()=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("ErrNoPath", func(t *testing.T) {
|
||||||
|
if _, err := file.NewReplicaClient("").WALDir("0123456701234567"); err == nil || err.Error() != `file replica path required` {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||||
|
if _, err := file.NewReplicaClient("/foo").WALDir(""); err == nil || err.Error() != `generation required` {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReplicaClient_WALSegmentPath(t *testing.T) {
|
||||||
|
t.Run("OK", func(t *testing.T) {
|
||||||
|
if got, err := file.NewReplicaClient("/foo").WALSegmentPath("0123456701234567", 1000, 1001); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if want := "/foo/generations/0123456701234567/wal/000003e8_000003e9.wal.lz4"; got != want {
|
||||||
|
t.Fatalf("WALPath()=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("ErrNoPath", func(t *testing.T) {
|
||||||
|
if _, err := file.NewReplicaClient("").WALSegmentPath("0123456701234567", 1000, 0); err == nil || err.Error() != `file replica path required` {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||||
|
if _, err := file.NewReplicaClient("/foo").WALSegmentPath("", 1000, 0); err == nil || err.Error() != `generation required` {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
func TestReplica_Sync(t *testing.T) {
|
||||||
|
// Ensure replica can successfully sync after DB has sync'd.
|
||||||
|
t.Run("InitialSync", func(t *testing.T) {
|
||||||
|
db, sqldb := MustOpenDBs(t)
|
||||||
|
defer MustCloseDBs(t, db, sqldb)
|
||||||
|
|
||||||
|
r := litestream.NewReplica(db, "", file.NewReplicaClient(t.TempDir()))
|
||||||
|
r.MonitorEnabled = false
|
||||||
|
db.Replicas = []*litestream.Replica{r}
|
||||||
|
|
||||||
|
// Sync database & then sync replica.
|
||||||
|
if err := db.Sync(context.Background()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := r.Sync(context.Background()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure posistions match.
|
||||||
|
if want, err := db.Pos(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if got, err := r.Pos(context.Background()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if got != want {
|
||||||
|
t.Fatalf("Pos()=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Ensure replica can successfully sync multiple times.
|
||||||
|
t.Run("MultiSync", func(t *testing.T) {
|
||||||
|
db, sqldb := MustOpenDBs(t)
|
||||||
|
defer MustCloseDBs(t, db, sqldb)
|
||||||
|
|
||||||
|
r := litestream.NewReplica(db, "", file.NewReplicaClient(t.TempDir()))
|
||||||
|
r.MonitorEnabled = false
|
||||||
|
db.Replicas = []*litestream.Replica{r}
|
||||||
|
|
||||||
|
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write to the database multiple times and sync after each write.
|
||||||
|
for i, n := 0, db.MinCheckpointPageN*2; i < n; i++ {
|
||||||
|
if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz')`); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sync periodically.
|
||||||
|
if i%100 == 0 || i == n-1 {
|
||||||
|
if err := db.Sync(context.Background()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := r.Sync(context.Background()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure posistions match.
|
||||||
|
pos, err := db.Pos()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if got, want := pos.Index, 2; got != want {
|
||||||
|
t.Fatalf("Index=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
if want, err := r.Pos(context.Background()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if got := pos; got != want {
|
||||||
|
t.Fatalf("Pos()=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Ensure replica returns an error if there is no generation available from the DB.
|
||||||
|
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||||
|
db, sqldb := MustOpenDBs(t)
|
||||||
|
defer MustCloseDBs(t, db, sqldb)
|
||||||
|
|
||||||
|
r := litestream.NewReplica(db, "", file.NewReplicaClient(t.TempDir()))
|
||||||
|
r.MonitorEnabled = false
|
||||||
|
db.Replicas = []*litestream.Replica{r}
|
||||||
|
|
||||||
|
if err := r.Sync(context.Background()); err == nil || err.Error() != `no generation, waiting for data` {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
*/
|
||||||
428
gcs/replica_client.go
Normal file
428
gcs/replica_client.go
Normal file
@@ -0,0 +1,428 @@
|
|||||||
|
package gcs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"cloud.google.com/go/storage"
|
||||||
|
"github.com/benbjohnson/litestream"
|
||||||
|
"github.com/benbjohnson/litestream/internal"
|
||||||
|
"google.golang.org/api/iterator"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReplicaClientType is the client type for this package.
|
||||||
|
const ReplicaClientType = "gcs"
|
||||||
|
|
||||||
|
var _ litestream.ReplicaClient = (*ReplicaClient)(nil)
|
||||||
|
|
||||||
|
// ReplicaClient is a client for writing snapshots & WAL segments to disk.
|
||||||
|
type ReplicaClient struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
client *storage.Client // gcs client
|
||||||
|
bkt *storage.BucketHandle // gcs bucket handle
|
||||||
|
|
||||||
|
// GCS bucket information
|
||||||
|
Bucket string
|
||||||
|
Path string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReplicaClient returns a new instance of ReplicaClient.
|
||||||
|
func NewReplicaClient() *ReplicaClient {
|
||||||
|
return &ReplicaClient{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns "gcs" as the client type.
|
||||||
|
func (c *ReplicaClient) Type() string {
|
||||||
|
return ReplicaClientType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init initializes the connection to GCS. No-op if already initialized.
|
||||||
|
func (c *ReplicaClient) Init(ctx context.Context) (err error) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
if c.client != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.client, err = storage.NewClient(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.bkt = c.client.Bucket(c.Bucket)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generations returns a list of available generation names.
|
||||||
|
func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct query to only pull generation directory names.
|
||||||
|
query := &storage.Query{
|
||||||
|
Delimiter: "/",
|
||||||
|
Prefix: litestream.GenerationsPath(c.Path) + "/",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loop over results and only build list of generation-formatted names.
|
||||||
|
it := c.bkt.Objects(ctx, query)
|
||||||
|
var generations []string
|
||||||
|
for {
|
||||||
|
attrs, err := it.Next()
|
||||||
|
if err == iterator.Done {
|
||||||
|
break
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
name := path.Base(strings.TrimSuffix(attrs.Prefix, "/"))
|
||||||
|
if !litestream.IsGenerationName(name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
generations = append(generations, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return generations, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteGeneration deletes all snapshots & WAL segments within a generation.
|
||||||
|
func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dir, err := litestream.GenerationPath(c.Path, generation)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot determine generation path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate over every object in generation and delete it.
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
|
||||||
|
for it := c.bkt.Objects(ctx, &storage.Query{Prefix: dir + "/"}); ; {
|
||||||
|
attrs, err := it.Next()
|
||||||
|
if err == iterator.Done {
|
||||||
|
break
|
||||||
|
} else if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.bkt.Object(attrs.Name).Delete(ctx); isNotExists(err) {
|
||||||
|
continue
|
||||||
|
} else if err != nil {
|
||||||
|
return fmt.Errorf("cannot delete object %q: %w", attrs.Name, err)
|
||||||
|
}
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// log.Printf("%s(%s): retainer: deleting generation: %s", r.db.Path(), r.Name(), generation)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshots returns an iterator over all available snapshots for a generation.
|
||||||
|
func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (litestream.SnapshotIterator, error) {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dir, err := litestream.SnapshotsPath(c.Path, generation)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot determine snapshots path: %w", err)
|
||||||
|
}
|
||||||
|
return newSnapshotIterator(generation, c.bkt.Objects(ctx, &storage.Query{Prefix: dir + "/"})), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteSnapshot writes LZ4 compressed data from rd to the object storage.
|
||||||
|
func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := litestream.SnapshotPath(c.Path, generation, index)
|
||||||
|
if err != nil {
|
||||||
|
return info, fmt.Errorf("cannot determine snapshot path: %w", err)
|
||||||
|
}
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
w := c.bkt.Object(key).NewWriter(ctx)
|
||||||
|
defer w.Close()
|
||||||
|
|
||||||
|
n, err := io.Copy(w, rd)
|
||||||
|
if err != nil {
|
||||||
|
return info, err
|
||||||
|
} else if err := w.Close(); err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc()
|
||||||
|
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(n))
|
||||||
|
|
||||||
|
// log.Printf("%s(%s): snapshot: creating %s/%08x t=%s", r.db.Path(), r.Name(), generation, index, time.Since(startTime).Truncate(time.Millisecond))
|
||||||
|
|
||||||
|
return litestream.SnapshotInfo{
|
||||||
|
Generation: generation,
|
||||||
|
Index: index,
|
||||||
|
Size: n,
|
||||||
|
CreatedAt: startTime.UTC(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SnapshotReader returns a reader for snapshot data at the given generation/index.
|
||||||
|
func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := litestream.SnapshotPath(c.Path, generation, index)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot determine snapshot path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := c.bkt.Object(key).NewReader(ctx)
|
||||||
|
if isNotExists(err) {
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot start new reader for %q: %w", key, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "GET").Inc()
|
||||||
|
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "GET").Add(float64(r.Attrs.Size))
|
||||||
|
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteSnapshot deletes a snapshot with the given generation & index.
|
||||||
|
func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := litestream.SnapshotPath(c.Path, generation, index)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot determine snapshot path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.bkt.Object(key).Delete(ctx); err != nil && !isNotExists(err) {
|
||||||
|
return fmt.Errorf("cannot delete snapshot %q: %w", key, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WALSegments returns an iterator over all available WAL files for a generation.
|
||||||
|
func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dir, err := litestream.WALPath(c.Path, generation)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot determine wal path: %w", err)
|
||||||
|
}
|
||||||
|
return newWALSegmentIterator(generation, c.bkt.Objects(ctx, &storage.Query{Prefix: dir + "/"})), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteWALSegment writes LZ4 compressed data from rd into a file on disk.
|
||||||
|
func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
|
||||||
|
if err != nil {
|
||||||
|
return info, fmt.Errorf("cannot determine wal segment path: %w", err)
|
||||||
|
}
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
w := c.bkt.Object(key).NewWriter(ctx)
|
||||||
|
defer w.Close()
|
||||||
|
|
||||||
|
n, err := io.Copy(w, rd)
|
||||||
|
if err != nil {
|
||||||
|
return info, err
|
||||||
|
} else if err := w.Close(); err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc()
|
||||||
|
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(n))
|
||||||
|
|
||||||
|
return litestream.WALSegmentInfo{
|
||||||
|
Generation: pos.Generation,
|
||||||
|
Index: pos.Index,
|
||||||
|
Offset: pos.Offset,
|
||||||
|
Size: n,
|
||||||
|
CreatedAt: startTime.UTC(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WALSegmentReader returns a reader for a section of WAL data at the given index.
|
||||||
|
// Returns os.ErrNotExist if no matching index/offset is found.
|
||||||
|
func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot determine wal segment path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := c.bkt.Object(key).NewReader(ctx)
|
||||||
|
if isNotExists(err) {
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "GET").Inc()
|
||||||
|
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "GET").Add(float64(r.Attrs.Size))
|
||||||
|
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteWALSegments deletes WAL segments with at the given positions.
|
||||||
|
func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Pos) error {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pos := range a {
|
||||||
|
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot determine wal segment path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.bkt.Object(key).Delete(ctx); err != nil && !isNotExists(err) {
|
||||||
|
return fmt.Errorf("cannot delete wal segment %q: %w", key, err)
|
||||||
|
}
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type snapshotIterator struct {
|
||||||
|
generation string
|
||||||
|
|
||||||
|
it *storage.ObjectIterator
|
||||||
|
info litestream.SnapshotInfo
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSnapshotIterator(generation string, it *storage.ObjectIterator) *snapshotIterator {
|
||||||
|
return &snapshotIterator{
|
||||||
|
generation: generation,
|
||||||
|
it: it,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (itr *snapshotIterator) Close() (err error) {
|
||||||
|
return itr.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (itr *snapshotIterator) Next() bool {
|
||||||
|
// Exit if an error has already occurred.
|
||||||
|
if itr.err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Fetch next object.
|
||||||
|
attrs, err := itr.it.Next()
|
||||||
|
if err == iterator.Done {
|
||||||
|
return false
|
||||||
|
} else if err != nil {
|
||||||
|
itr.err = err
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse index, otherwise skip to the next object.
|
||||||
|
index, err := litestream.ParseSnapshotPath(path.Base(attrs.Name))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store current snapshot and return.
|
||||||
|
itr.info = litestream.SnapshotInfo{
|
||||||
|
Generation: itr.generation,
|
||||||
|
Index: index,
|
||||||
|
Size: attrs.Size,
|
||||||
|
CreatedAt: attrs.Created.UTC(),
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (itr *snapshotIterator) Err() error { return itr.err }
|
||||||
|
|
||||||
|
func (itr *snapshotIterator) Snapshot() litestream.SnapshotInfo { return itr.info }
|
||||||
|
|
||||||
|
type walSegmentIterator struct {
|
||||||
|
generation string
|
||||||
|
|
||||||
|
it *storage.ObjectIterator
|
||||||
|
info litestream.WALSegmentInfo
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func newWALSegmentIterator(generation string, it *storage.ObjectIterator) *walSegmentIterator {
|
||||||
|
return &walSegmentIterator{
|
||||||
|
generation: generation,
|
||||||
|
it: it,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (itr *walSegmentIterator) Close() (err error) {
|
||||||
|
return itr.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (itr *walSegmentIterator) Next() bool {
|
||||||
|
// Exit if an error has already occurred.
|
||||||
|
if itr.err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Fetch next object.
|
||||||
|
attrs, err := itr.it.Next()
|
||||||
|
if err == iterator.Done {
|
||||||
|
return false
|
||||||
|
} else if err != nil {
|
||||||
|
itr.err = err
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse index & offset, otherwise skip to the next object.
|
||||||
|
index, offset, err := litestream.ParseWALSegmentPath(path.Base(attrs.Name))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store current snapshot and return.
|
||||||
|
itr.info = litestream.WALSegmentInfo{
|
||||||
|
Generation: itr.generation,
|
||||||
|
Index: index,
|
||||||
|
Offset: offset,
|
||||||
|
Size: attrs.Size,
|
||||||
|
CreatedAt: attrs.Created.UTC(),
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (itr *walSegmentIterator) Err() error { return itr.err }
|
||||||
|
|
||||||
|
func (itr *walSegmentIterator) WALSegment() litestream.WALSegmentInfo {
|
||||||
|
return itr.info
|
||||||
|
}
|
||||||
|
|
||||||
|
func isNotExists(err error) bool {
|
||||||
|
return err == storage.ErrObjectNotExist
|
||||||
|
}
|
||||||
65
go.mod
65
go.mod
@@ -1,12 +1,65 @@
|
|||||||
module github.com/benbjohnson/litestream
|
module github.com/benbjohnson/litestream
|
||||||
|
|
||||||
go 1.15
|
go 1.21
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/aws/aws-sdk-go v1.27.0
|
cloud.google.com/go/storage v1.36.0
|
||||||
github.com/davecgh/go-spew v1.1.1
|
filippo.io/age v1.1.1
|
||||||
github.com/mattn/go-sqlite3 v1.14.5
|
github.com/Azure/azure-storage-blob-go v0.15.0
|
||||||
github.com/pierrec/lz4/v4 v4.1.3
|
github.com/aws/aws-sdk-go v1.49.5
|
||||||
github.com/prometheus/client_golang v1.9.0
|
github.com/mattn/go-shellwords v1.0.12
|
||||||
|
github.com/mattn/go-sqlite3 v1.14.19
|
||||||
|
github.com/pierrec/lz4/v4 v4.1.19
|
||||||
|
github.com/pkg/sftp v1.13.6
|
||||||
|
github.com/prometheus/client_golang v1.17.0
|
||||||
|
golang.org/x/crypto v0.17.0
|
||||||
|
golang.org/x/sync v0.5.0
|
||||||
|
golang.org/x/sys v0.15.0
|
||||||
|
google.golang.org/api v0.154.0
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
)
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
cloud.google.com/go v0.111.0 // indirect
|
||||||
|
cloud.google.com/go/compute v1.23.3 // indirect
|
||||||
|
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||||
|
cloud.google.com/go/iam v1.1.5 // indirect
|
||||||
|
github.com/Azure/azure-pipeline-go v0.2.3 // indirect
|
||||||
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||||
|
github.com/go-logr/logr v1.3.0 // indirect
|
||||||
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
|
github.com/golang/protobuf v1.5.3 // indirect
|
||||||
|
github.com/google/go-cmp v0.6.0 // indirect
|
||||||
|
github.com/google/s2a-go v0.1.7 // indirect
|
||||||
|
github.com/google/uuid v1.5.0 // indirect
|
||||||
|
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
|
||||||
|
github.com/googleapis/gax-go/v2 v2.12.0 // indirect
|
||||||
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
|
github.com/kr/fs v0.1.0 // indirect
|
||||||
|
github.com/mattn/go-ieproxy v0.0.11 // indirect
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||||
|
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
|
||||||
|
github.com/prometheus/client_model v0.5.0 // indirect
|
||||||
|
github.com/prometheus/common v0.45.0 // indirect
|
||||||
|
github.com/prometheus/procfs v0.12.0 // indirect
|
||||||
|
go.opencensus.io v0.24.0 // indirect
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
|
||||||
|
go.opentelemetry.io/otel v1.21.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/metric v1.21.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/trace v1.21.0 // indirect
|
||||||
|
golang.org/x/net v0.19.0 // indirect
|
||||||
|
golang.org/x/oauth2 v0.15.0 // indirect
|
||||||
|
golang.org/x/text v0.14.0 // indirect
|
||||||
|
golang.org/x/time v0.5.0 // indirect
|
||||||
|
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
|
||||||
|
google.golang.org/appengine v1.6.8 // indirect
|
||||||
|
google.golang.org/genproto v0.0.0-20231212172506-995d672761c0 // indirect
|
||||||
|
google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0 // indirect
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 // indirect
|
||||||
|
google.golang.org/grpc v1.60.1 // indirect
|
||||||
|
google.golang.org/protobuf v1.31.0 // indirect
|
||||||
|
)
|
||||||
|
|||||||
610
go.sum
610
go.sum
@@ -1,410 +1,372 @@
|
|||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o=
|
||||||
|
cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI=
|
||||||
|
cloud.google.com/go v0.111.0 h1:YHLKNupSD1KqjDbQ3+LVdQ81h/UJbJyZG203cEfnQgM=
|
||||||
|
cloud.google.com/go v0.111.0/go.mod h1:0mibmpKP1TyOOFYQY5izo0LnT+ecvOQ0Sg3OdmMiNRU=
|
||||||
|
cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
|
||||||
|
cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
|
||||||
|
cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
|
||||||
|
cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI=
|
||||||
|
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||||
|
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||||
|
cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y=
|
||||||
|
cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU=
|
||||||
|
cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI=
|
||||||
|
cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8=
|
||||||
|
cloud.google.com/go/storage v1.31.0 h1:+S3LjjEN2zZ+L5hOwj4+1OkGCsLVe0NzpXKQ1pSdTCI=
|
||||||
|
cloud.google.com/go/storage v1.31.0/go.mod h1:81ams1PrhW16L4kF7qg+4mTq7SRs5HsbDTM0bWvrwJ0=
|
||||||
|
cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8=
|
||||||
|
cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8=
|
||||||
|
filippo.io/age v1.1.1 h1:pIpO7l151hCnQ4BdyBujnGP2YlUo0uj6sAVNHGBvXHg=
|
||||||
|
filippo.io/age v1.1.1/go.mod h1:l03SrzDUrBkdBx8+IILdnn2KZysqQdbEBUQ4p3sqEQE=
|
||||||
|
github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U=
|
||||||
|
github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=
|
||||||
|
github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk=
|
||||||
|
github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58=
|
||||||
|
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||||
|
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||||
|
github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q=
|
||||||
|
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
|
||||||
|
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
|
||||||
|
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
|
||||||
|
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||||
|
github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
|
||||||
|
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||||
|
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
|
||||||
|
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
github.com/aws/aws-sdk-go v1.44.318 h1:Yl66rpbQHFUbxe9JBKLcvOvRivhVgP6+zH0b9KzARX8=
|
||||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
github.com/aws/aws-sdk-go v1.44.318/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
github.com/aws/aws-sdk-go v1.49.5 h1:y2yfBlwjPDi3/sBVKeznYEdDy6wIhjA2L5NCBMLUIYA=
|
||||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
github.com/aws/aws-sdk-go v1.49.5/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
|
||||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
|
||||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
|
||||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
|
||||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
|
||||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
|
||||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
|
||||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
|
||||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
|
||||||
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
|
|
||||||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
|
||||||
github.com/aws/aws-sdk-go v1.27.0 h1:0xphMHGMLBrPMfxR2AmVjZKcMEESEgWF8Kru94BNByk=
|
|
||||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
|
||||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
|
||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
|
||||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
|
||||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||||
|
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
|
||||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
|
||||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
|
||||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
|
||||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||||
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
||||||
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
|
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
|
||||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
|
||||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
|
||||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
|
||||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||||
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
|
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
|
||||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||||
|
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw=
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
|
||||||
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
|
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
|
||||||
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
|
||||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
|
||||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM=
|
||||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w=
|
||||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
|
||||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
|
||||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
|
||||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
|
||||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
|
||||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
|
||||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
|
||||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
|
||||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
|
||||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
|
||||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
|
||||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
|
||||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
|
||||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
|
||||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
|
||||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
|
||||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
github.com/mattn/go-ieproxy v0.0.11 h1:MQ/5BuGSgDAHZOJe6YY80IF2UVCfGkwfo6AeD7HtHYo=
|
||||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
github.com/mattn/go-ieproxy v0.0.11/go.mod h1:/NsJd+kxZBmjMc5hrJCKMbP57B84rvq9BiDRbtO9AS0=
|
||||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
|
||||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM=
|
||||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||||
github.com/mattn/go-sqlite3 v1.14.5 h1:1IdxlwTNazvbKJQSxoJ5/9ECbEeaTTyeU7sEAZ5KKTQ=
|
github.com/mattn/go-sqlite3 v1.14.19 h1:fhGleo2h1p8tVChob4I9HpmVFIAkKGpiukdrgQbWfGI=
|
||||||
github.com/mattn/go-sqlite3 v1.14.5/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI=
|
github.com/mattn/go-sqlite3 v1.14.19/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
|
||||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
|
||||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
|
||||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
github.com/pierrec/lz4/v4 v4.1.19 h1:tYLzDnjDXh9qIxSTKHwXwOYmm9d887Y7Y1ZkyXYHAN4=
|
||||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
github.com/pierrec/lz4/v4 v4.1.19/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
|
||||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
|
||||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
|
||||||
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
|
||||||
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
|
|
||||||
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
|
|
||||||
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
|
|
||||||
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
|
||||||
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
|
||||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
|
||||||
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
|
|
||||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
|
||||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
|
||||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
|
||||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
|
||||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
|
||||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
|
|
||||||
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
|
|
||||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
|
||||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
|
||||||
github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
|
|
||||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
|
||||||
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
|
||||||
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
|
||||||
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
|
|
||||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
|
||||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
|
||||||
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
|
|
||||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
|
||||||
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
|
|
||||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
|
||||||
github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A=
|
|
||||||
github.com/pierrec/lz4/v4 v4.1.3 h1:/dvQpkb0o1pVlSgKNQqfkavlnXaIK+hJ0LXsKRUN9D4=
|
|
||||||
github.com/pierrec/lz4/v4 v4.1.3/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
|
||||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
github.com/pkg/sftp v1.13.5 h1:a3RLUqkyjYRtBTZJZ1VRrKbN3zhuPLlUc3sphVz81go=
|
||||||
|
github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg=
|
||||||
|
github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo=
|
||||||
|
github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
|
||||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
|
||||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
|
||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
|
||||||
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
|
|
||||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
|
||||||
github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU=
|
|
||||||
github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
|
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
|
||||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
|
||||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
||||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
|
||||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
|
||||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
|
||||||
github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM=
|
github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
|
||||||
github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
|
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
|
||||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||||
github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
|
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
|
||||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
|
||||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
|
||||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
|
||||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
|
||||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
|
||||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
|
||||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
|
||||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
|
||||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
|
||||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
|
||||||
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
|
|
||||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
|
||||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
|
||||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
|
||||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
|
||||||
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
|
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
|
||||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
|
||||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24=
|
||||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo=
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
|
||||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
|
||||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
|
||||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
|
||||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
|
||||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
|
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
|
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
|
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||||
|
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
|
||||||
|
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||||
|
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
|
||||||
|
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
|
||||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
|
||||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
|
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
|
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||||
|
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
||||||
|
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||||
|
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
|
||||||
|
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
|
||||||
|
golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
|
||||||
|
golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ=
|
||||||
|
golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||||
|
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||||
|
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
|
||||||
|
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e h1:AyodaIpKjppX+cBfTASF2E1US3H2JFBj920Ot3rtDjs=
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
||||||
|
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
||||||
|
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
|
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
|
golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
|
||||||
|
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||||
|
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||||
|
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
|
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
||||||
|
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||||
|
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||||
|
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
|
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||||
|
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
|
||||||
|
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||||
|
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
|
||||||
|
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
|
||||||
|
google.golang.org/api v0.135.0 h1:6Vgfj6uPMXcyy66waYWBwmkeNB+9GmUlJDOzkukPQYQ=
|
||||||
|
google.golang.org/api v0.135.0/go.mod h1:Bp77uRFgwsSKI0BWH573F5Q6wSlznwI2NFayLOp/7mQ=
|
||||||
|
google.golang.org/api v0.154.0 h1:X7QkVKZBskztmpPKWQXgjJRPA2dJYrL6r+sYPRLj050=
|
||||||
|
google.golang.org/api v0.154.0/go.mod h1:qhSMkM85hgqiokIYsrRyKxrjfBeIhgl4Z2JmeRkYylc=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||||
|
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
|
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||||
|
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
|
|
||||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
|
google.golang.org/genproto v0.0.0-20230807174057-1744710a1577 h1:Tyk/35yqszRCvaragTn5NnkY6IiKk/XvHzEWepo71N0=
|
||||||
|
google.golang.org/genproto v0.0.0-20230807174057-1744710a1577/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
|
||||||
|
google.golang.org/genproto v0.0.0-20231212172506-995d672761c0 h1:YJ5pD9rF8o9Qtta0Cmy9rdBwkSjrTCT6XTiUQVOtIos=
|
||||||
|
google.golang.org/genproto v0.0.0-20231212172506-995d672761c0/go.mod h1:l/k7rMz0vFTBPy+tFSGvXEd3z+BcoG1k7EHbqm+YBsY=
|
||||||
|
google.golang.org/genproto/googleapis/api v0.0.0-20230807174057-1744710a1577 h1:xv8KoglAClYGkprUSmDTKaILtzfD8XzG9NYVXMprjKo=
|
||||||
|
google.golang.org/genproto/googleapis/api v0.0.0-20230807174057-1744710a1577/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
|
||||||
|
google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0 h1:s1w3X6gQxwrLEpxnLd/qXTVLgQE2yXwaOaoa6IlY/+o=
|
||||||
|
google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0/go.mod h1:CAny0tYF+0/9rmDB9fahA9YLzX3+AEVl1qXbv5hhj6c=
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 h1:wukfNtZmZUurLN/atp2hiIeTKn7QJWIQdHzqmsOnAOk=
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 h1:/jFB8jK5R3Sq3i/lmeZO0cATSzFfZaJq1J2Euan3XKU=
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0/go.mod h1:FUoWkonphQm3RhTS+kOEhF8h0iDpm4tdXolVCeZ9KKA=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
|
||||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
|
||||||
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
|
||||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||||
|
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||||
|
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||||
|
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||||
|
google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
|
||||||
|
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
|
||||||
|
google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
|
||||||
|
google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
|
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
|
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||||
|
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
|
||||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
|
||||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
|
||||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
|
||||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
|
||||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
|
||||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
|
||||||
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
|
|
||||||
|
|||||||
@@ -2,6 +2,11 @@ package internal
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ReadCloser wraps a reader to also attach a separate closer.
|
// ReadCloser wraps a reader to also attach a separate closer.
|
||||||
@@ -30,3 +35,107 @@ func (r *ReadCloser) Close() error {
|
|||||||
}
|
}
|
||||||
return r.c.Close()
|
return r.c.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadCounter wraps an io.Reader and counts the total number of bytes read.
|
||||||
|
type ReadCounter struct {
|
||||||
|
r io.Reader
|
||||||
|
n int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReadCounter returns a new instance of ReadCounter that wraps r.
|
||||||
|
func NewReadCounter(r io.Reader) *ReadCounter {
|
||||||
|
return &ReadCounter{r: r}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads from the underlying reader into p and adds the bytes read to the counter.
|
||||||
|
func (r *ReadCounter) Read(p []byte) (int, error) {
|
||||||
|
n, err := r.r.Read(p)
|
||||||
|
r.n += int64(n)
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// N returns the total number of bytes read.
|
||||||
|
func (r *ReadCounter) N() int64 { return r.n }
|
||||||
|
|
||||||
|
// CreateFile creates the file and matches the mode & uid/gid of fi.
|
||||||
|
func CreateFile(filename string, fi os.FileInfo) (*os.File, error) {
|
||||||
|
mode := os.FileMode(0600)
|
||||||
|
if fi != nil {
|
||||||
|
mode = fi.Mode()
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
uid, gid := Fileinfo(fi)
|
||||||
|
_ = f.Chown(uid, gid)
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MkdirAll is a copy of os.MkdirAll() except that it attempts to set the
|
||||||
|
// mode/uid/gid to match fi for each created directory.
|
||||||
|
func MkdirAll(path string, fi os.FileInfo) error {
|
||||||
|
uid, gid := Fileinfo(fi)
|
||||||
|
|
||||||
|
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
|
||||||
|
dir, err := os.Stat(path)
|
||||||
|
if err == nil {
|
||||||
|
if dir.IsDir() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slow path: make sure parent exists and then call Mkdir for path.
|
||||||
|
i := len(path)
|
||||||
|
for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
|
||||||
|
j := i
|
||||||
|
for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
|
||||||
|
if j > 1 {
|
||||||
|
// Create parent.
|
||||||
|
err = MkdirAll(fixRootDirectory(path[:j-1]), fi)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parent now exists; invoke Mkdir and use its result.
|
||||||
|
mode := os.FileMode(0700)
|
||||||
|
if fi != nil {
|
||||||
|
mode = fi.Mode()
|
||||||
|
}
|
||||||
|
err = os.Mkdir(path, mode)
|
||||||
|
if err != nil {
|
||||||
|
// Handle arguments like "foo/." by
|
||||||
|
// double-checking that directory doesn't exist.
|
||||||
|
dir, err1 := os.Lstat(path)
|
||||||
|
if err1 == nil && dir.IsDir() {
|
||||||
|
_ = os.Chown(path, uid, gid)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_ = os.Chown(path, uid, gid)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shared replica metrics.
|
||||||
|
var (
|
||||||
|
OperationTotalCounterVec = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||||
|
Name: "litestream_replica_operation_total",
|
||||||
|
Help: "The number of replica operations performed",
|
||||||
|
}, []string{"replica_type", "operation"})
|
||||||
|
|
||||||
|
OperationBytesCounterVec = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||||
|
Name: "litestream_replica_operation_bytes",
|
||||||
|
Help: "The number of bytes used by replica operations",
|
||||||
|
}, []string{"replica_type", "operation"})
|
||||||
|
)
|
||||||
|
|||||||
22
internal/internal_unix.go
Normal file
22
internal/internal_unix.go
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
|
||||||
|
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Fileinfo returns syscall fields from a FileInfo object.
|
||||||
|
func Fileinfo(fi os.FileInfo) (uid, gid int) {
|
||||||
|
if fi == nil {
|
||||||
|
return -1, -1
|
||||||
|
}
|
||||||
|
stat := fi.Sys().(*syscall.Stat_t)
|
||||||
|
return int(stat.Uid), int(stat.Gid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func fixRootDirectory(p string) string {
|
||||||
|
return p
|
||||||
|
}
|
||||||
23
internal/internal_windows.go
Normal file
23
internal/internal_windows.go
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
//go:build windows
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Fileinfo returns syscall fields from a FileInfo object.
|
||||||
|
func Fileinfo(fi os.FileInfo) (uid, gid int) {
|
||||||
|
return -1, -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// fixRootDirectory is copied from the standard library for use with mkdirAll()
|
||||||
|
func fixRootDirectory(p string) string {
|
||||||
|
if len(p) == len(`\\?\c:`) {
|
||||||
|
if os.IsPathSeparator(p[0]) && os.IsPathSeparator(p[1]) && p[2] == '?' && os.IsPathSeparator(p[3]) && p[5] == ':' {
|
||||||
|
return p + `\`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
package internal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Shared replica metrics.
|
|
||||||
var (
|
|
||||||
ReplicaSnapshotTotalGaugeVec = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
|
||||||
Namespace: "litestream",
|
|
||||||
Subsystem: "replica",
|
|
||||||
Name: "snapshot_total",
|
|
||||||
Help: "The current number of snapshots",
|
|
||||||
}, []string{"db", "name"})
|
|
||||||
|
|
||||||
ReplicaWALBytesCounterVec = promauto.NewCounterVec(prometheus.CounterOpts{
|
|
||||||
Namespace: "litestream",
|
|
||||||
Subsystem: "replica",
|
|
||||||
Name: "wal_bytes",
|
|
||||||
Help: "The number wal bytes written",
|
|
||||||
}, []string{"db", "name"})
|
|
||||||
|
|
||||||
ReplicaWALIndexGaugeVec = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
|
||||||
Namespace: "litestream",
|
|
||||||
Subsystem: "replica",
|
|
||||||
Name: "wal_index",
|
|
||||||
Help: "The current WAL index",
|
|
||||||
}, []string{"db", "name"})
|
|
||||||
|
|
||||||
ReplicaWALOffsetGaugeVec = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
|
||||||
Namespace: "litestream",
|
|
||||||
Subsystem: "replica",
|
|
||||||
Name: "wal_offset",
|
|
||||||
Help: "The current WAL offset",
|
|
||||||
}, []string{"db", "name"})
|
|
||||||
|
|
||||||
ReplicaValidationTotalCounterVec = promauto.NewCounterVec(prometheus.CounterOpts{
|
|
||||||
Namespace: "litestream",
|
|
||||||
Subsystem: "replica",
|
|
||||||
Name: "validation_total",
|
|
||||||
Help: "The number of validations performed",
|
|
||||||
}, []string{"db", "name", "status"})
|
|
||||||
)
|
|
||||||
408
litestream.go
408
litestream.go
@@ -7,21 +7,24 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/mattn/go-sqlite3"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Naming constants.
|
// Naming constants.
|
||||||
const (
|
const (
|
||||||
MetaDirSuffix = "-litestream"
|
MetaDirSuffix = "-litestream"
|
||||||
|
|
||||||
WALDirName = "wal"
|
WALDirName = "wal"
|
||||||
WALExt = ".wal"
|
WALExt = ".wal"
|
||||||
SnapshotExt = ".snapshot"
|
WALSegmentExt = ".wal.lz4"
|
||||||
|
SnapshotExt = ".snapshot.lz4"
|
||||||
|
|
||||||
GenerationNameLen = 16
|
GenerationNameLen = 16
|
||||||
)
|
)
|
||||||
@@ -36,23 +39,184 @@ const (
|
|||||||
|
|
||||||
// Litestream errors.
|
// Litestream errors.
|
||||||
var (
|
var (
|
||||||
|
ErrNoGeneration = errors.New("no generation available")
|
||||||
ErrNoSnapshots = errors.New("no snapshots available")
|
ErrNoSnapshots = errors.New("no snapshots available")
|
||||||
ErrChecksumMismatch = errors.New("invalid replica, checksum mismatch")
|
ErrChecksumMismatch = errors.New("invalid replica, checksum mismatch")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// LogWriter is the destination writer for all logging.
|
||||||
|
LogWriter = os.Stdout
|
||||||
|
|
||||||
|
// LogFlags are the flags passed to log.New().
|
||||||
|
LogFlags = 0
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
sql.Register("litestream-sqlite3", &sqlite3.SQLiteDriver{
|
||||||
|
ConnectHook: func(conn *sqlite3.SQLiteConn) error {
|
||||||
|
if err := conn.SetFileControlInt("main", sqlite3.SQLITE_FCNTL_PERSIST_WAL, 1); err != nil {
|
||||||
|
return fmt.Errorf("cannot set file control: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SnapshotIterator represents an iterator over a collection of snapshot metadata.
|
||||||
|
type SnapshotIterator interface {
|
||||||
|
io.Closer
|
||||||
|
|
||||||
|
// Prepares the the next snapshot for reading with the Snapshot() method.
|
||||||
|
// Returns true if another snapshot is available. Returns false if no more
|
||||||
|
// snapshots are available or if an error occured.
|
||||||
|
Next() bool
|
||||||
|
|
||||||
|
// Returns an error that occurred during iteration.
|
||||||
|
Err() error
|
||||||
|
|
||||||
|
// Returns metadata for the currently positioned snapshot.
|
||||||
|
Snapshot() SnapshotInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// SliceSnapshotIterator returns all snapshots from an iterator as a slice.
|
||||||
|
func SliceSnapshotIterator(itr SnapshotIterator) ([]SnapshotInfo, error) {
|
||||||
|
var a []SnapshotInfo
|
||||||
|
for itr.Next() {
|
||||||
|
a = append(a, itr.Snapshot())
|
||||||
|
}
|
||||||
|
return a, itr.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ SnapshotIterator = (*SnapshotInfoSliceIterator)(nil)
|
||||||
|
|
||||||
|
// SnapshotInfoSliceIterator represents an iterator for iterating over a slice of snapshots.
|
||||||
|
type SnapshotInfoSliceIterator struct {
|
||||||
|
init bool
|
||||||
|
a []SnapshotInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSnapshotInfoSliceIterator returns a new instance of SnapshotInfoSliceIterator.
|
||||||
|
func NewSnapshotInfoSliceIterator(a []SnapshotInfo) *SnapshotInfoSliceIterator {
|
||||||
|
return &SnapshotInfoSliceIterator{a: a}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close always returns nil.
|
||||||
|
func (itr *SnapshotInfoSliceIterator) Close() error { return nil }
|
||||||
|
|
||||||
|
// Next moves to the next snapshot. Returns true if another snapshot is available.
|
||||||
|
func (itr *SnapshotInfoSliceIterator) Next() bool {
|
||||||
|
if !itr.init {
|
||||||
|
itr.init = true
|
||||||
|
return len(itr.a) > 0
|
||||||
|
}
|
||||||
|
itr.a = itr.a[1:]
|
||||||
|
return len(itr.a) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Err always returns nil.
|
||||||
|
func (itr *SnapshotInfoSliceIterator) Err() error { return nil }
|
||||||
|
|
||||||
|
// Snapshot returns the metadata from the currently positioned snapshot.
|
||||||
|
func (itr *SnapshotInfoSliceIterator) Snapshot() SnapshotInfo {
|
||||||
|
if len(itr.a) == 0 {
|
||||||
|
return SnapshotInfo{}
|
||||||
|
}
|
||||||
|
return itr.a[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// WALSegmentIterator represents an iterator over a collection of WAL segments.
|
||||||
|
type WALSegmentIterator interface {
|
||||||
|
io.Closer
|
||||||
|
|
||||||
|
// Prepares the the next WAL for reading with the WAL() method.
|
||||||
|
// Returns true if another WAL is available. Returns false if no more
|
||||||
|
// WAL files are available or if an error occured.
|
||||||
|
Next() bool
|
||||||
|
|
||||||
|
// Returns an error that occurred during iteration.
|
||||||
|
Err() error
|
||||||
|
|
||||||
|
// Returns metadata for the currently positioned WAL segment file.
|
||||||
|
WALSegment() WALSegmentInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// SliceWALSegmentIterator returns all WAL segment files from an iterator as a slice.
|
||||||
|
func SliceWALSegmentIterator(itr WALSegmentIterator) ([]WALSegmentInfo, error) {
|
||||||
|
var a []WALSegmentInfo
|
||||||
|
for itr.Next() {
|
||||||
|
a = append(a, itr.WALSegment())
|
||||||
|
}
|
||||||
|
return a, itr.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ WALSegmentIterator = (*WALSegmentInfoSliceIterator)(nil)
|
||||||
|
|
||||||
|
// WALSegmentInfoSliceIterator represents an iterator for iterating over a slice of wal segments.
|
||||||
|
type WALSegmentInfoSliceIterator struct {
|
||||||
|
init bool
|
||||||
|
a []WALSegmentInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWALSegmentInfoSliceIterator returns a new instance of WALSegmentInfoSliceIterator.
|
||||||
|
func NewWALSegmentInfoSliceIterator(a []WALSegmentInfo) *WALSegmentInfoSliceIterator {
|
||||||
|
return &WALSegmentInfoSliceIterator{a: a}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close always returns nil.
|
||||||
|
func (itr *WALSegmentInfoSliceIterator) Close() error { return nil }
|
||||||
|
|
||||||
|
// Next moves to the next wal segment. Returns true if another segment is available.
|
||||||
|
func (itr *WALSegmentInfoSliceIterator) Next() bool {
|
||||||
|
if !itr.init {
|
||||||
|
itr.init = true
|
||||||
|
return len(itr.a) > 0
|
||||||
|
}
|
||||||
|
itr.a = itr.a[1:]
|
||||||
|
return len(itr.a) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Err always returns nil.
|
||||||
|
func (itr *WALSegmentInfoSliceIterator) Err() error { return nil }
|
||||||
|
|
||||||
|
// WALSegment returns the metadata from the currently positioned wal segment.
|
||||||
|
func (itr *WALSegmentInfoSliceIterator) WALSegment() WALSegmentInfo {
|
||||||
|
if len(itr.a) == 0 {
|
||||||
|
return WALSegmentInfo{}
|
||||||
|
}
|
||||||
|
return itr.a[0]
|
||||||
|
}
|
||||||
|
|
||||||
// SnapshotInfo represents file information about a snapshot.
|
// SnapshotInfo represents file information about a snapshot.
|
||||||
type SnapshotInfo struct {
|
type SnapshotInfo struct {
|
||||||
Name string
|
|
||||||
Replica string
|
|
||||||
Generation string
|
Generation string
|
||||||
Index int
|
Index int
|
||||||
Size int64
|
Size int64
|
||||||
CreatedAt time.Time
|
CreatedAt time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Pos returns the WAL position when the snapshot was made.
|
||||||
|
func (info *SnapshotInfo) Pos() Pos {
|
||||||
|
return Pos{Generation: info.Generation, Index: info.Index}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SnapshotInfoSlice represents a slice of snapshot metadata.
|
||||||
|
type SnapshotInfoSlice []SnapshotInfo
|
||||||
|
|
||||||
|
func (a SnapshotInfoSlice) Len() int { return len(a) }
|
||||||
|
|
||||||
|
func (a SnapshotInfoSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
|
|
||||||
|
func (a SnapshotInfoSlice) Less(i, j int) bool {
|
||||||
|
if a[i].Generation != a[j].Generation {
|
||||||
|
return a[i].Generation < a[j].Generation
|
||||||
|
}
|
||||||
|
return a[i].Index < a[j].Index
|
||||||
|
}
|
||||||
|
|
||||||
// FilterSnapshotsAfter returns all snapshots that were created on or after t.
|
// FilterSnapshotsAfter returns all snapshots that were created on or after t.
|
||||||
func FilterSnapshotsAfter(a []*SnapshotInfo, t time.Time) []*SnapshotInfo {
|
func FilterSnapshotsAfter(a []SnapshotInfo, t time.Time) []SnapshotInfo {
|
||||||
other := make([]*SnapshotInfo, 0, len(a))
|
other := make([]SnapshotInfo, 0, len(a))
|
||||||
for _, snapshot := range a {
|
for _, snapshot := range a {
|
||||||
if !snapshot.CreatedAt.Before(t) {
|
if !snapshot.CreatedAt.Before(t) {
|
||||||
other = append(other, snapshot)
|
other = append(other, snapshot)
|
||||||
@@ -62,9 +226,11 @@ func FilterSnapshotsAfter(a []*SnapshotInfo, t time.Time) []*SnapshotInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FindMinSnapshotByGeneration finds the snapshot with the lowest index in a generation.
|
// FindMinSnapshotByGeneration finds the snapshot with the lowest index in a generation.
|
||||||
func FindMinSnapshotByGeneration(a []*SnapshotInfo, generation string) *SnapshotInfo {
|
func FindMinSnapshotByGeneration(a []SnapshotInfo, generation string) *SnapshotInfo {
|
||||||
var min *SnapshotInfo
|
var min *SnapshotInfo
|
||||||
for _, snapshot := range a {
|
for i := range a {
|
||||||
|
snapshot := &a[i]
|
||||||
|
|
||||||
if snapshot.Generation != generation {
|
if snapshot.Generation != generation {
|
||||||
continue
|
continue
|
||||||
} else if min == nil || snapshot.Index < min.Index {
|
} else if min == nil || snapshot.Index < min.Index {
|
||||||
@@ -76,8 +242,27 @@ func FindMinSnapshotByGeneration(a []*SnapshotInfo, generation string) *Snapshot
|
|||||||
|
|
||||||
// WALInfo represents file information about a WAL file.
|
// WALInfo represents file information about a WAL file.
|
||||||
type WALInfo struct {
|
type WALInfo struct {
|
||||||
Name string
|
Generation string
|
||||||
Replica string
|
Index int
|
||||||
|
CreatedAt time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// WALInfoSlice represents a slice of WAL metadata.
|
||||||
|
type WALInfoSlice []WALInfo
|
||||||
|
|
||||||
|
func (a WALInfoSlice) Len() int { return len(a) }
|
||||||
|
|
||||||
|
func (a WALInfoSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
|
|
||||||
|
func (a WALInfoSlice) Less(i, j int) bool {
|
||||||
|
if a[i].Generation != a[j].Generation {
|
||||||
|
return a[i].Generation < a[j].Generation
|
||||||
|
}
|
||||||
|
return a[i].Index < a[j].Index
|
||||||
|
}
|
||||||
|
|
||||||
|
// WALSegmentInfo represents file information about a WAL segment file.
|
||||||
|
type WALSegmentInfo struct {
|
||||||
Generation string
|
Generation string
|
||||||
Index int
|
Index int
|
||||||
Offset int64
|
Offset int64
|
||||||
@@ -85,6 +270,27 @@ type WALInfo struct {
|
|||||||
CreatedAt time.Time
|
CreatedAt time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Pos returns the WAL position when the segment was made.
|
||||||
|
func (info *WALSegmentInfo) Pos() Pos {
|
||||||
|
return Pos{Generation: info.Generation, Index: info.Index, Offset: info.Offset}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WALSegmentInfoSlice represents a slice of WAL segment metadata.
|
||||||
|
type WALSegmentInfoSlice []WALSegmentInfo
|
||||||
|
|
||||||
|
func (a WALSegmentInfoSlice) Len() int { return len(a) }
|
||||||
|
|
||||||
|
func (a WALSegmentInfoSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
|
|
||||||
|
func (a WALSegmentInfoSlice) Less(i, j int) bool {
|
||||||
|
if a[i].Generation != a[j].Generation {
|
||||||
|
return a[i].Generation < a[j].Generation
|
||||||
|
} else if a[i].Index != a[j].Index {
|
||||||
|
return a[i].Index < a[j].Index
|
||||||
|
}
|
||||||
|
return a[i].Offset < a[j].Offset
|
||||||
|
}
|
||||||
|
|
||||||
// Pos is a position in the WAL for a generation.
|
// Pos is a position in the WAL for a generation.
|
||||||
type Pos struct {
|
type Pos struct {
|
||||||
Generation string // generation name
|
Generation string // generation name
|
||||||
@@ -95,9 +301,9 @@ type Pos struct {
|
|||||||
// String returns a string representation.
|
// String returns a string representation.
|
||||||
func (p Pos) String() string {
|
func (p Pos) String() string {
|
||||||
if p.IsZero() {
|
if p.IsZero() {
|
||||||
return "<>"
|
return ""
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("<%s,%08x,%d>", p.Generation, p.Index, p.Offset)
|
return fmt.Sprintf("%s/%08x:%d", p.Generation, p.Index, p.Offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsZero returns true if p is the zero value.
|
// IsZero returns true if p is the zero value.
|
||||||
@@ -105,6 +311,11 @@ func (p Pos) IsZero() bool {
|
|||||||
return p == (Pos{})
|
return p == (Pos{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Truncate returns p with the offset truncated to zero.
|
||||||
|
func (p Pos) Truncate() Pos {
|
||||||
|
return Pos{Generation: p.Generation, Index: p.Index}
|
||||||
|
}
|
||||||
|
|
||||||
// Checksum computes a running SQLite checksum over a byte slice.
|
// Checksum computes a running SQLite checksum over a byte slice.
|
||||||
func Checksum(bo binary.ByteOrder, s0, s1 uint32, b []byte) (uint32, uint32) {
|
func Checksum(bo binary.ByteOrder, s0, s1 uint32, b []byte) (uint32, uint32) {
|
||||||
assert(len(b)%8 == 0, "misaligned checksum byte slice")
|
assert(len(b)%8 == 0, "misaligned checksum byte slice")
|
||||||
@@ -151,8 +362,9 @@ func readWALHeader(filename string) ([]byte, error) {
|
|||||||
return buf[:n], err
|
return buf[:n], err
|
||||||
}
|
}
|
||||||
|
|
||||||
// readFileAt reads a slice from a file.
|
// readWALFileAt reads a slice from a file. Do not use this with database files
|
||||||
func readFileAt(filename string, offset, n int64) ([]byte, error) {
|
// as it causes problems with non-OFD locks.
|
||||||
|
func readWALFileAt(filename string, offset, n int64) ([]byte, error) {
|
||||||
f, err := os.Open(filename)
|
f, err := os.Open(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -195,6 +407,56 @@ func IsGenerationName(s string) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GenerationsPath returns the path to a generation root directory.
|
||||||
|
func GenerationsPath(root string) string {
|
||||||
|
return path.Join(root, "generations")
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerationPath returns the path to a generation's root directory.
|
||||||
|
func GenerationPath(root, generation string) (string, error) {
|
||||||
|
dir := GenerationsPath(root)
|
||||||
|
if generation == "" {
|
||||||
|
return "", fmt.Errorf("generation required")
|
||||||
|
}
|
||||||
|
return path.Join(dir, generation), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SnapshotsPath returns the path to a generation's snapshot directory.
|
||||||
|
func SnapshotsPath(root, generation string) (string, error) {
|
||||||
|
dir, err := GenerationPath(root, generation)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return path.Join(dir, "snapshots"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SnapshotPath returns the path to an uncompressed snapshot file.
|
||||||
|
func SnapshotPath(root, generation string, index int) (string, error) {
|
||||||
|
dir, err := SnapshotsPath(root, generation)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return path.Join(dir, FormatSnapshotPath(index)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WALPath returns the path to a generation's WAL directory
|
||||||
|
func WALPath(root, generation string) (string, error) {
|
||||||
|
dir, err := GenerationPath(root, generation)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return path.Join(dir, "wal"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WALSegmentPath returns the path to a WAL segment file.
|
||||||
|
func WALSegmentPath(root, generation string, index int, offset int64) (string, error) {
|
||||||
|
dir, err := WALPath(root, generation)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return path.Join(dir, FormatWALSegmentPath(index, offset)), nil
|
||||||
|
}
|
||||||
|
|
||||||
// IsSnapshotPath returns true if s is a path to a snapshot file.
|
// IsSnapshotPath returns true if s is a path to a snapshot file.
|
||||||
func IsSnapshotPath(s string) bool {
|
func IsSnapshotPath(s string) bool {
|
||||||
return snapshotPathRegex.MatchString(s)
|
return snapshotPathRegex.MatchString(s)
|
||||||
@@ -202,38 +464,43 @@ func IsSnapshotPath(s string) bool {
|
|||||||
|
|
||||||
// ParseSnapshotPath returns the index for the snapshot.
|
// ParseSnapshotPath returns the index for the snapshot.
|
||||||
// Returns an error if the path is not a valid snapshot path.
|
// Returns an error if the path is not a valid snapshot path.
|
||||||
func ParseSnapshotPath(s string) (index int, ext string, err error) {
|
func ParseSnapshotPath(s string) (index int, err error) {
|
||||||
s = filepath.Base(s)
|
s = filepath.Base(s)
|
||||||
|
|
||||||
a := snapshotPathRegex.FindStringSubmatch(s)
|
a := snapshotPathRegex.FindStringSubmatch(s)
|
||||||
if a == nil {
|
if a == nil {
|
||||||
return 0, "", fmt.Errorf("invalid snapshot path: %s", s)
|
return 0, fmt.Errorf("invalid snapshot path: %s", s)
|
||||||
}
|
}
|
||||||
|
|
||||||
i64, _ := strconv.ParseUint(a[1], 16, 64)
|
i64, _ := strconv.ParseUint(a[1], 16, 64)
|
||||||
return int(i64), a[2], nil
|
return int(i64), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var snapshotPathRegex = regexp.MustCompile(`^([0-9a-f]{8})(.snapshot(?:.lz4)?)$`)
|
// FormatSnapshotPath formats a snapshot filename with a given index.
|
||||||
|
func FormatSnapshotPath(index int) string {
|
||||||
|
assert(index >= 0, "snapshot index must be non-negative")
|
||||||
|
return fmt.Sprintf("%08x%s", index, SnapshotExt)
|
||||||
|
}
|
||||||
|
|
||||||
|
var snapshotPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\.snapshot\.lz4$`)
|
||||||
|
|
||||||
// IsWALPath returns true if s is a path to a WAL file.
|
// IsWALPath returns true if s is a path to a WAL file.
|
||||||
func IsWALPath(s string) bool {
|
func IsWALPath(s string) bool {
|
||||||
return walPathRegex.MatchString(s)
|
return walPathRegex.MatchString(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseWALPath returns the index & offset for the WAL file.
|
// ParseWALPath returns the index for the WAL file.
|
||||||
// Returns an error if the path is not a valid snapshot path.
|
// Returns an error if the path is not a valid WAL path.
|
||||||
func ParseWALPath(s string) (index int, offset int64, ext string, err error) {
|
func ParseWALPath(s string) (index int, err error) {
|
||||||
s = filepath.Base(s)
|
s = filepath.Base(s)
|
||||||
|
|
||||||
a := walPathRegex.FindStringSubmatch(s)
|
a := walPathRegex.FindStringSubmatch(s)
|
||||||
if a == nil {
|
if a == nil {
|
||||||
return 0, 0, "", fmt.Errorf("invalid wal path: %s", s)
|
return 0, fmt.Errorf("invalid wal path: %s", s)
|
||||||
}
|
}
|
||||||
|
|
||||||
i64, _ := strconv.ParseUint(a[1], 16, 64)
|
i64, _ := strconv.ParseUint(a[1], 16, 64)
|
||||||
off64, _ := strconv.ParseUint(a[2], 16, 64)
|
return int(i64), nil
|
||||||
return int(i64), int64(off64), a[3], nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// FormatWALPath formats a WAL filename with a given index.
|
// FormatWALPath formats a WAL filename with a given index.
|
||||||
@@ -242,80 +509,37 @@ func FormatWALPath(index int) string {
|
|||||||
return fmt.Sprintf("%08x%s", index, WALExt)
|
return fmt.Sprintf("%08x%s", index, WALExt)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FormatWALPathWithOffset formats a WAL filename with a given index & offset.
|
var walPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\.wal$`)
|
||||||
func FormatWALPathWithOffset(index int, offset int64) string {
|
|
||||||
assert(index >= 0, "wal index must be non-negative")
|
// ParseWALSegmentPath returns the index & offset for the WAL segment file.
|
||||||
assert(offset >= 0, "wal offset must be non-negative")
|
// Returns an error if the path is not a valid wal segment path.
|
||||||
return fmt.Sprintf("%08x_%08x%s", index, offset, WALExt)
|
func ParseWALSegmentPath(s string) (index int, offset int64, err error) {
|
||||||
|
s = filepath.Base(s)
|
||||||
|
|
||||||
|
a := walSegmentPathRegex.FindStringSubmatch(s)
|
||||||
|
if a == nil {
|
||||||
|
return 0, 0, fmt.Errorf("invalid wal segment path: %s", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
i64, _ := strconv.ParseUint(a[1], 16, 64)
|
||||||
|
off64, _ := strconv.ParseUint(a[2], 16, 64)
|
||||||
|
return int(i64), int64(off64), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var walPathRegex = regexp.MustCompile(`^([0-9a-f]{8})(?:_([0-9a-f]{8}))?(.wal(?:.lz4)?)$`)
|
// FormatWALSegmentPath formats a WAL segment filename with a given index & offset.
|
||||||
|
func FormatWALSegmentPath(index int, offset int64) string {
|
||||||
|
assert(index >= 0, "wal index must be non-negative")
|
||||||
|
assert(offset >= 0, "wal offset must be non-negative")
|
||||||
|
return fmt.Sprintf("%08x_%08x%s", index, offset, WALSegmentExt)
|
||||||
|
}
|
||||||
|
|
||||||
|
var walSegmentPathRegex = regexp.MustCompile(`^([0-9a-f]{8})(?:_([0-9a-f]{8}))\.wal\.lz4$`)
|
||||||
|
|
||||||
// isHexChar returns true if ch is a lowercase hex character.
|
// isHexChar returns true if ch is a lowercase hex character.
|
||||||
func isHexChar(ch rune) bool {
|
func isHexChar(ch rune) bool {
|
||||||
return (ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'f')
|
return (ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'f')
|
||||||
}
|
}
|
||||||
|
|
||||||
// createFile creates the file and attempts to set the UID/GID.
|
|
||||||
func createFile(filename string, uid, gid int) (*os.File, error) {
|
|
||||||
f, err := os.Create(filename)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
_ = f.Chown(uid, gid)
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// mkdirAll is a copy of os.MkdirAll() except that it attempts to set the
|
|
||||||
// uid/gid for each created directory.
|
|
||||||
func mkdirAll(path string, perm os.FileMode, uid, gid int) error {
|
|
||||||
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
|
|
||||||
dir, err := os.Stat(path)
|
|
||||||
if err == nil {
|
|
||||||
if dir.IsDir() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Slow path: make sure parent exists and then call Mkdir for path.
|
|
||||||
i := len(path)
|
|
||||||
for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
|
|
||||||
j := i
|
|
||||||
for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
|
|
||||||
j--
|
|
||||||
}
|
|
||||||
|
|
||||||
if j > 1 {
|
|
||||||
// Create parent.
|
|
||||||
err = mkdirAll(fixRootDirectory(path[:j-1]), perm, uid, gid)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parent now exists; invoke Mkdir and use its result.
|
|
||||||
err = os.Mkdir(path, perm)
|
|
||||||
if err != nil {
|
|
||||||
// Handle arguments like "foo/." by
|
|
||||||
// double-checking that directory doesn't exist.
|
|
||||||
dir, err1 := os.Lstat(path)
|
|
||||||
if err1 == nil && dir.IsDir() {
|
|
||||||
_ = os.Chown(path, uid, gid)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_ = os.Chown(path, uid, gid)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tracef is used for low-level tracing.
|
|
||||||
var Tracef = func(format string, a ...interface{}) {}
|
|
||||||
|
|
||||||
func assert(condition bool, message string) {
|
func assert(condition bool, message string) {
|
||||||
if !condition {
|
if !condition {
|
||||||
panic("assertion failed: " + message)
|
panic("assertion failed: " + message)
|
||||||
|
|||||||
@@ -40,6 +40,104 @@ func TestChecksum(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGenerationsPath(t *testing.T) {
|
||||||
|
t.Run("OK", func(t *testing.T) {
|
||||||
|
if got, want := litestream.GenerationsPath("foo"), "foo/generations"; got != want {
|
||||||
|
t.Fatalf("GenerationsPath()=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("NoPath", func(t *testing.T) {
|
||||||
|
if got, want := litestream.GenerationsPath(""), "generations"; got != want {
|
||||||
|
t.Fatalf("GenerationsPath()=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGenerationPath(t *testing.T) {
|
||||||
|
t.Run("OK", func(t *testing.T) {
|
||||||
|
if got, err := litestream.GenerationPath("foo", "0123456701234567"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if want := "foo/generations/0123456701234567"; got != want {
|
||||||
|
t.Fatalf("GenerationPath()=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||||
|
if _, err := litestream.GenerationPath("foo", ""); err == nil || err.Error() != `generation required` {
|
||||||
|
t.Fatalf("expected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSnapshotsPath(t *testing.T) {
|
||||||
|
t.Run("OK", func(t *testing.T) {
|
||||||
|
if got, err := litestream.SnapshotsPath("foo", "0123456701234567"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if want := "foo/generations/0123456701234567/snapshots"; got != want {
|
||||||
|
t.Fatalf("SnapshotsPath()=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||||
|
if _, err := litestream.SnapshotsPath("foo", ""); err == nil || err.Error() != `generation required` {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSnapshotPath(t *testing.T) {
|
||||||
|
t.Run("OK", func(t *testing.T) {
|
||||||
|
if got, err := litestream.SnapshotPath("foo", "0123456701234567", 1000); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if want := "foo/generations/0123456701234567/snapshots/000003e8.snapshot.lz4"; got != want {
|
||||||
|
t.Fatalf("SnapshotPath()=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||||
|
if _, err := litestream.SnapshotPath("foo", "", 1000); err == nil || err.Error() != `generation required` {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWALPath(t *testing.T) {
|
||||||
|
t.Run("OK", func(t *testing.T) {
|
||||||
|
if got, err := litestream.WALPath("foo", "0123456701234567"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if want := "foo/generations/0123456701234567/wal"; got != want {
|
||||||
|
t.Fatalf("WALPath()=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||||
|
if _, err := litestream.WALPath("foo", ""); err == nil || err.Error() != `generation required` {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWALSegmentPath(t *testing.T) {
|
||||||
|
t.Run("OK", func(t *testing.T) {
|
||||||
|
if got, err := litestream.WALSegmentPath("foo", "0123456701234567", 1000, 1001); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if want := "foo/generations/0123456701234567/wal/000003e8_000003e9.wal.lz4"; got != want {
|
||||||
|
t.Fatalf("WALPath()=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||||
|
if _, err := litestream.WALSegmentPath("foo", "", 1000, 0); err == nil || err.Error() != `generation required` {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFindMinSnapshotByGeneration(t *testing.T) {
|
||||||
|
infos := []litestream.SnapshotInfo{
|
||||||
|
{Generation: "29cf4bced74e92ab", Index: 0},
|
||||||
|
{Generation: "5dfeb4aa03232553", Index: 24},
|
||||||
|
}
|
||||||
|
if got, want := litestream.FindMinSnapshotByGeneration(infos, "29cf4bced74e92ab"), &infos[0]; got != want {
|
||||||
|
t.Fatalf("info=%#v, want %#v", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func MustDecodeHexString(s string) []byte {
|
func MustDecodeHexString(s string) []byte {
|
||||||
b, err := hex.DecodeString(s)
|
b, err := hex.DecodeString(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -1,18 +0,0 @@
|
|||||||
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
|
|
||||||
|
|
||||||
package litestream
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// fileinfo returns syscall fields from a FileInfo object.
|
|
||||||
func fileinfo(fi os.FileInfo) (uid, gid int) {
|
|
||||||
stat := fi.Sys().(*syscall.Stat_t)
|
|
||||||
return int(stat.Uid), int(stat.Gid)
|
|
||||||
}
|
|
||||||
|
|
||||||
func fixRootDirectory(p string) string {
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
// +build windows
|
|
||||||
|
|
||||||
package litestream
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// fileinfo returns syscall fields from a FileInfo object.
|
|
||||||
func fileinfo(fi os.FileInfo) (uid, gid int) {
|
|
||||||
return -1, -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// fixRootDirectory is copied from the standard library for use with mkdirAll()
|
|
||||||
func fixRootDirectory(p string) string {
|
|
||||||
if len(p) == len(`\\?\c:`) {
|
|
||||||
if IsPathSeparator(p[0]) && IsPathSeparator(p[1]) && p[2] == '?' && IsPathSeparator(p[3]) && p[5] == ':' {
|
|
||||||
return p + `\`
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
65
mock/replica_client.go
Normal file
65
mock/replica_client.go
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
package mock
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/litestream"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ litestream.ReplicaClient = (*ReplicaClient)(nil)
|
||||||
|
|
||||||
|
type ReplicaClient struct {
|
||||||
|
GenerationsFunc func(ctx context.Context) ([]string, error)
|
||||||
|
DeleteGenerationFunc func(ctx context.Context, generation string) error
|
||||||
|
SnapshotsFunc func(ctx context.Context, generation string) (litestream.SnapshotIterator, error)
|
||||||
|
WriteSnapshotFunc func(ctx context.Context, generation string, index int, r io.Reader) (litestream.SnapshotInfo, error)
|
||||||
|
DeleteSnapshotFunc func(ctx context.Context, generation string, index int) error
|
||||||
|
SnapshotReaderFunc func(ctx context.Context, generation string, index int) (io.ReadCloser, error)
|
||||||
|
WALSegmentsFunc func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error)
|
||||||
|
WriteWALSegmentFunc func(ctx context.Context, pos litestream.Pos, r io.Reader) (litestream.WALSegmentInfo, error)
|
||||||
|
DeleteWALSegmentsFunc func(ctx context.Context, a []litestream.Pos) error
|
||||||
|
WALSegmentReaderFunc func(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReplicaClient) Type() string { return "mock" }
|
||||||
|
|
||||||
|
func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) {
|
||||||
|
return c.GenerationsFunc(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error {
|
||||||
|
return c.DeleteGenerationFunc(ctx, generation)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (litestream.SnapshotIterator, error) {
|
||||||
|
return c.SnapshotsFunc(ctx, generation)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, r io.Reader) (litestream.SnapshotInfo, error) {
|
||||||
|
return c.WriteSnapshotFunc(ctx, generation, index, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error {
|
||||||
|
return c.DeleteSnapshotFunc(ctx, generation, index)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) {
|
||||||
|
return c.SnapshotReaderFunc(ctx, generation, index)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) {
|
||||||
|
return c.WALSegmentsFunc(ctx, generation)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, r io.Reader) (litestream.WALSegmentInfo, error) {
|
||||||
|
return c.WriteWALSegmentFunc(ctx, pos, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Pos) error {
|
||||||
|
return c.DeleteWALSegmentsFunc(ctx, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) {
|
||||||
|
return c.WALSegmentReaderFunc(ctx, pos)
|
||||||
|
}
|
||||||
1942
replica.go
1942
replica.go
File diff suppressed because it is too large
Load Diff
48
replica_client.go
Normal file
48
replica_client.go
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
package litestream
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReplicaClient represents client to connect to a Replica.
|
||||||
|
type ReplicaClient interface {
|
||||||
|
// Returns the type of client.
|
||||||
|
Type() string
|
||||||
|
|
||||||
|
// Returns a list of available generations. Order is undefined.
|
||||||
|
Generations(ctx context.Context) ([]string, error)
|
||||||
|
|
||||||
|
// Deletes all snapshots & WAL segments within a generation.
|
||||||
|
DeleteGeneration(ctx context.Context, generation string) error
|
||||||
|
|
||||||
|
// Returns an iterator of all snapshots within a generation on the replica. Order is undefined.
|
||||||
|
Snapshots(ctx context.Context, generation string) (SnapshotIterator, error)
|
||||||
|
|
||||||
|
// Writes LZ4 compressed snapshot data to the replica at a given index
|
||||||
|
// within a generation. Returns metadata for the snapshot.
|
||||||
|
WriteSnapshot(ctx context.Context, generation string, index int, r io.Reader) (SnapshotInfo, error)
|
||||||
|
|
||||||
|
// Deletes a snapshot with the given generation & index.
|
||||||
|
DeleteSnapshot(ctx context.Context, generation string, index int) error
|
||||||
|
|
||||||
|
// Returns a reader that contains LZ4 compressed snapshot data for a
|
||||||
|
// given index within a generation. Returns an os.ErrNotFound error if
|
||||||
|
// the snapshot does not exist.
|
||||||
|
SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error)
|
||||||
|
|
||||||
|
// Returns an iterator of all WAL segments within a generation on the replica. Order is undefined.
|
||||||
|
WALSegments(ctx context.Context, generation string) (WALSegmentIterator, error)
|
||||||
|
|
||||||
|
// Writes an LZ4 compressed WAL segment at a given position.
|
||||||
|
// Returns metadata for the written segment.
|
||||||
|
WriteWALSegment(ctx context.Context, pos Pos, r io.Reader) (WALSegmentInfo, error)
|
||||||
|
|
||||||
|
// Deletes one or more WAL segments at the given positions.
|
||||||
|
DeleteWALSegments(ctx context.Context, a []Pos) error
|
||||||
|
|
||||||
|
// Returns a reader that contains an LZ4 compressed WAL segment at a given
|
||||||
|
// index/offset within a generation. Returns an os.ErrNotFound error if the
|
||||||
|
// WAL segment does not exist.
|
||||||
|
WALSegmentReader(ctx context.Context, pos Pos) (io.ReadCloser, error)
|
||||||
|
}
|
||||||
572
replica_client_test.go
Normal file
572
replica_client_test.go
Normal file
@@ -0,0 +1,572 @@
|
|||||||
|
package litestream_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/litestream"
|
||||||
|
"github.com/benbjohnson/litestream/abs"
|
||||||
|
"github.com/benbjohnson/litestream/file"
|
||||||
|
"github.com/benbjohnson/litestream/gcs"
|
||||||
|
"github.com/benbjohnson/litestream/s3"
|
||||||
|
"github.com/benbjohnson/litestream/sftp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Enables integration tests.
|
||||||
|
integration = flag.String("integration", "file", "")
|
||||||
|
)
|
||||||
|
|
||||||
|
// S3 settings
|
||||||
|
var (
|
||||||
|
// Replica client settings
|
||||||
|
s3AccessKeyID = flag.String("s3-access-key-id", os.Getenv("LITESTREAM_S3_ACCESS_KEY_ID"), "")
|
||||||
|
s3SecretAccessKey = flag.String("s3-secret-access-key", os.Getenv("LITESTREAM_S3_SECRET_ACCESS_KEY"), "")
|
||||||
|
s3Region = flag.String("s3-region", os.Getenv("LITESTREAM_S3_REGION"), "")
|
||||||
|
s3Bucket = flag.String("s3-bucket", os.Getenv("LITESTREAM_S3_BUCKET"), "")
|
||||||
|
s3Path = flag.String("s3-path", os.Getenv("LITESTREAM_S3_PATH"), "")
|
||||||
|
s3Endpoint = flag.String("s3-endpoint", os.Getenv("LITESTREAM_S3_ENDPOINT"), "")
|
||||||
|
s3ForcePathStyle = flag.Bool("s3-force-path-style", os.Getenv("LITESTREAM_S3_FORCE_PATH_STYLE") == "true", "")
|
||||||
|
s3SkipVerify = flag.Bool("s3-skip-verify", os.Getenv("LITESTREAM_S3_SKIP_VERIFY") == "true", "")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Google cloud storage settings
|
||||||
|
var (
|
||||||
|
gcsBucket = flag.String("gcs-bucket", os.Getenv("LITESTREAM_GCS_BUCKET"), "")
|
||||||
|
gcsPath = flag.String("gcs-path", os.Getenv("LITESTREAM_GCS_PATH"), "")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Azure blob storage settings
|
||||||
|
var (
|
||||||
|
absAccountName = flag.String("abs-account-name", os.Getenv("LITESTREAM_ABS_ACCOUNT_NAME"), "")
|
||||||
|
absAccountKey = flag.String("abs-account-key", os.Getenv("LITESTREAM_ABS_ACCOUNT_KEY"), "")
|
||||||
|
absBucket = flag.String("abs-bucket", os.Getenv("LITESTREAM_ABS_BUCKET"), "")
|
||||||
|
absPath = flag.String("abs-path", os.Getenv("LITESTREAM_ABS_PATH"), "")
|
||||||
|
)
|
||||||
|
|
||||||
|
// SFTP settings
|
||||||
|
var (
|
||||||
|
sftpHost = flag.String("sftp-host", os.Getenv("LITESTREAM_SFTP_HOST"), "")
|
||||||
|
sftpUser = flag.String("sftp-user", os.Getenv("LITESTREAM_SFTP_USER"), "")
|
||||||
|
sftpPassword = flag.String("sftp-password", os.Getenv("LITESTREAM_SFTP_PASSWORD"), "")
|
||||||
|
sftpKeyPath = flag.String("sftp-key-path", os.Getenv("LITESTREAM_SFTP_KEY_PATH"), "")
|
||||||
|
sftpPath = flag.String("sftp-path", os.Getenv("LITESTREAM_SFTP_PATH"), "")
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestReplicaClient_Generations(t *testing.T) {
|
||||||
|
RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Write snapshots.
|
||||||
|
if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 0, strings.NewReader(`foo`)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 0, strings.NewReader(`bar`)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if _, err := c.WriteSnapshot(context.Background(), "155fe292f8333c72", 0, strings.NewReader(`baz`)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch and sort generations.
|
||||||
|
got, err := c.Generations(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
sort.Strings(got)
|
||||||
|
|
||||||
|
if want := []string{"155fe292f8333c72", "5efbd8d042012dca", "b16ddcf5c697540f"}; !reflect.DeepEqual(got, want) {
|
||||||
|
t.Fatalf("Generations()=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
RunWithReplicaClient(t, "NoGenerationsDir", func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if generations, err := c.Generations(context.Background()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if got, want := len(generations), 0; got != want {
|
||||||
|
t.Fatalf("len(Generations())=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReplicaClient_Snapshots(t *testing.T) {
|
||||||
|
RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Write snapshots.
|
||||||
|
if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 1, strings.NewReader(``)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 5, strings.NewReader(`x`)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 10, strings.NewReader(`xyz`)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch all snapshots by generation.
|
||||||
|
itr, err := c.Snapshots(context.Background(), "b16ddcf5c697540f")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer itr.Close()
|
||||||
|
|
||||||
|
// Read all snapshots into a slice so they can be sorted.
|
||||||
|
a, err := litestream.SliceSnapshotIterator(itr)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if got, want := len(a), 2; got != want {
|
||||||
|
t.Fatalf("len=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
sort.Sort(litestream.SnapshotInfoSlice(a))
|
||||||
|
|
||||||
|
// Verify first snapshot metadata.
|
||||||
|
if got, want := a[0].Generation, "b16ddcf5c697540f"; got != want {
|
||||||
|
t.Fatalf("Generation=%v, want %v", got, want)
|
||||||
|
} else if got, want := a[0].Index, 5; got != want {
|
||||||
|
t.Fatalf("Index=%v, want %v", got, want)
|
||||||
|
} else if got, want := a[0].Size, int64(1); got != want {
|
||||||
|
t.Fatalf("Size=%v, want %v", got, want)
|
||||||
|
} else if a[0].CreatedAt.IsZero() {
|
||||||
|
t.Fatalf("expected CreatedAt")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify second snapshot metadata.
|
||||||
|
if got, want := a[1].Generation, "b16ddcf5c697540f"; got != want {
|
||||||
|
t.Fatalf("Generation=%v, want %v", got, want)
|
||||||
|
} else if got, want := a[1].Index, 0xA; got != want {
|
||||||
|
t.Fatalf("Index=%v, want %v", got, want)
|
||||||
|
} else if got, want := a[1].Size, int64(3); got != want {
|
||||||
|
t.Fatalf("Size=%v, want %v", got, want)
|
||||||
|
} else if a[1].CreatedAt.IsZero() {
|
||||||
|
t.Fatalf("expected CreatedAt")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure close is clean.
|
||||||
|
if err := itr.Close(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
RunWithReplicaClient(t, "NoGenerationDir", func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
itr, err := c.Snapshots(context.Background(), "5efbd8d042012dca")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer itr.Close()
|
||||||
|
|
||||||
|
if itr.Next() {
|
||||||
|
t.Fatal("expected no snapshots")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
itr, err := c.Snapshots(context.Background(), "")
|
||||||
|
if err == nil {
|
||||||
|
err = itr.Close()
|
||||||
|
}
|
||||||
|
if err == nil || err.Error() != `cannot determine snapshots path: generation required` {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReplicaClient_WriteSnapshot(t *testing.T) {
|
||||||
|
RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 1000, strings.NewReader(`foobar`)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if r, err := c.SnapshotReader(context.Background(), "b16ddcf5c697540f", 1000); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if buf, err := io.ReadAll(r); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := r.Close(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if got, want := string(buf), `foobar`; got != want {
|
||||||
|
t.Fatalf("data=%q, want %q", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Parallel()
|
||||||
|
if _, err := c.WriteSnapshot(context.Background(), "", 0, nil); err == nil || err.Error() != `cannot determine snapshot path: generation required` {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReplicaClient_SnapshotReader(t *testing.T) {
|
||||||
|
RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 10, strings.NewReader(`foo`)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := c.SnapshotReader(context.Background(), "5efbd8d042012dca", 10)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
|
if buf, err := io.ReadAll(r); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if got, want := string(buf), "foo"; got != want {
|
||||||
|
t.Fatalf("ReadAll=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
RunWithReplicaClient(t, "ErrNotFound", func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if _, err := c.SnapshotReader(context.Background(), "5efbd8d042012dca", 1); !os.IsNotExist(err) {
|
||||||
|
t.Fatalf("expected not exist, got %#v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if _, err := c.SnapshotReader(context.Background(), "", 1); err == nil || err.Error() != `cannot determine snapshot path: generation required` {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReplicaClient_WALs(t *testing.T) {
|
||||||
|
RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 1, Offset: 0}, strings.NewReader(``)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 2, Offset: 0}, strings.NewReader(`12345`)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 2, Offset: 5}, strings.NewReader(`67`)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 3, Offset: 0}, strings.NewReader(`xyz`)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
itr, err := c.WALSegments(context.Background(), "b16ddcf5c697540f")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer itr.Close()
|
||||||
|
|
||||||
|
// Read all WAL segment files into a slice so they can be sorted.
|
||||||
|
a, err := litestream.SliceWALSegmentIterator(itr)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if got, want := len(a), 3; got != want {
|
||||||
|
t.Fatalf("len=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
sort.Sort(litestream.WALSegmentInfoSlice(a))
|
||||||
|
|
||||||
|
// Verify first WAL segment metadata.
|
||||||
|
if got, want := a[0].Generation, "b16ddcf5c697540f"; got != want {
|
||||||
|
t.Fatalf("Generation=%v, want %v", got, want)
|
||||||
|
} else if got, want := a[0].Index, 2; got != want {
|
||||||
|
t.Fatalf("Index=%v, want %v", got, want)
|
||||||
|
} else if got, want := a[0].Offset, int64(0); got != want {
|
||||||
|
t.Fatalf("Offset=%v, want %v", got, want)
|
||||||
|
} else if got, want := a[0].Size, int64(5); got != want {
|
||||||
|
t.Fatalf("Size=%v, want %v", got, want)
|
||||||
|
} else if a[0].CreatedAt.IsZero() {
|
||||||
|
t.Fatalf("expected CreatedAt")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify first WAL segment metadata.
|
||||||
|
if got, want := a[1].Generation, "b16ddcf5c697540f"; got != want {
|
||||||
|
t.Fatalf("Generation=%v, want %v", got, want)
|
||||||
|
} else if got, want := a[1].Index, 2; got != want {
|
||||||
|
t.Fatalf("Index=%v, want %v", got, want)
|
||||||
|
} else if got, want := a[1].Offset, int64(5); got != want {
|
||||||
|
t.Fatalf("Offset=%v, want %v", got, want)
|
||||||
|
} else if got, want := a[1].Size, int64(2); got != want {
|
||||||
|
t.Fatalf("Size=%v, want %v", got, want)
|
||||||
|
} else if a[1].CreatedAt.IsZero() {
|
||||||
|
t.Fatalf("expected CreatedAt")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify third WAL segment metadata.
|
||||||
|
if got, want := a[2].Generation, "b16ddcf5c697540f"; got != want {
|
||||||
|
t.Fatalf("Generation=%v, want %v", got, want)
|
||||||
|
} else if got, want := a[2].Index, 3; got != want {
|
||||||
|
t.Fatalf("Index=%v, want %v", got, want)
|
||||||
|
} else if got, want := a[2].Offset, int64(0); got != want {
|
||||||
|
t.Fatalf("Offset=%v, want %v", got, want)
|
||||||
|
} else if got, want := a[2].Size, int64(3); got != want {
|
||||||
|
t.Fatalf("Size=%v, want %v", got, want)
|
||||||
|
} else if a[1].CreatedAt.IsZero() {
|
||||||
|
t.Fatalf("expected CreatedAt")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure close is clean.
|
||||||
|
if err := itr.Close(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
RunWithReplicaClient(t, "NoGenerationDir", func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
itr, err := c.WALSegments(context.Background(), "5efbd8d042012dca")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer itr.Close()
|
||||||
|
|
||||||
|
if itr.Next() {
|
||||||
|
t.Fatal("expected no wal files")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
RunWithReplicaClient(t, "NoWALs", func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 0, strings.NewReader(`foo`)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
itr, err := c.WALSegments(context.Background(), "5efbd8d042012dca")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer itr.Close()
|
||||||
|
|
||||||
|
if itr.Next() {
|
||||||
|
t.Fatal("expected no wal files")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
itr, err := c.WALSegments(context.Background(), "")
|
||||||
|
if err == nil {
|
||||||
|
err = itr.Close()
|
||||||
|
}
|
||||||
|
if err == nil || err.Error() != `cannot determine wal path: generation required` {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReplicaClient_WriteWALSegment(t *testing.T) {
|
||||||
|
RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1000, Offset: 2000}, strings.NewReader(`foobar`)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1000, Offset: 2000}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if buf, err := io.ReadAll(r); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := r.Close(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if got, want := string(buf), `foobar`; got != want {
|
||||||
|
t.Fatalf("data=%q, want %q", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Parallel()
|
||||||
|
if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "", Index: 0, Offset: 0}, nil); err == nil || err.Error() != `cannot determine wal segment path: generation required` {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReplicaClient_WALReader(t *testing.T) {
|
||||||
|
|
||||||
|
RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Parallel()
|
||||||
|
if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 10, Offset: 5}, strings.NewReader(`foobar`)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 10, Offset: 5})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
|
if buf, err := io.ReadAll(r); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if got, want := string(buf), "foobar"; got != want {
|
||||||
|
t.Fatalf("ReadAll=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
RunWithReplicaClient(t, "ErrNotFound", func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 1, Offset: 0}); !os.IsNotExist(err) {
|
||||||
|
t.Fatalf("expected not exist, got %#v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReplicaClient_DeleteWALSegments(t *testing.T) {
|
||||||
|
RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}, strings.NewReader(`foo`)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 3, Offset: 4}, strings.NewReader(`bar`)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.DeleteWALSegments(context.Background(), []litestream.Pos{
|
||||||
|
{Generation: "b16ddcf5c697540f", Index: 1, Offset: 2},
|
||||||
|
{Generation: "5efbd8d042012dca", Index: 3, Offset: 4},
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}); !os.IsNotExist(err) {
|
||||||
|
t.Fatalf("expected not exist, got %#v", err)
|
||||||
|
} else if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 3, Offset: 4}); !os.IsNotExist(err) {
|
||||||
|
t.Fatalf("expected not exist, got %#v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Parallel()
|
||||||
|
if err := c.DeleteWALSegments(context.Background(), []litestream.Pos{{}}); err == nil || err.Error() != `cannot determine wal segment path: generation required` {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunWithReplicaClient executes fn with each replica specified by the -integration flag
|
||||||
|
func RunWithReplicaClient(t *testing.T, name string, fn func(*testing.T, litestream.ReplicaClient)) {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
for _, typ := range strings.Split(*integration, ",") {
|
||||||
|
t.Run(typ, func(t *testing.T) {
|
||||||
|
c := NewReplicaClient(t, typ)
|
||||||
|
defer MustDeleteAll(t, c)
|
||||||
|
|
||||||
|
fn(t, c)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReplicaClient returns a new client for integration testing by type name.
|
||||||
|
func NewReplicaClient(tb testing.TB, typ string) litestream.ReplicaClient {
|
||||||
|
tb.Helper()
|
||||||
|
|
||||||
|
switch typ {
|
||||||
|
case file.ReplicaClientType:
|
||||||
|
return NewFileReplicaClient(tb)
|
||||||
|
case s3.ReplicaClientType:
|
||||||
|
return NewS3ReplicaClient(tb)
|
||||||
|
case gcs.ReplicaClientType:
|
||||||
|
return NewGCSReplicaClient(tb)
|
||||||
|
case abs.ReplicaClientType:
|
||||||
|
return NewABSReplicaClient(tb)
|
||||||
|
case sftp.ReplicaClientType:
|
||||||
|
return NewSFTPReplicaClient(tb)
|
||||||
|
default:
|
||||||
|
tb.Fatalf("invalid replica client type: %q", typ)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFileReplicaClient returns a new client for integration testing.
|
||||||
|
func NewFileReplicaClient(tb testing.TB) *file.ReplicaClient {
|
||||||
|
tb.Helper()
|
||||||
|
return file.NewReplicaClient(tb.TempDir())
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewS3ReplicaClient returns a new client for integration testing.
|
||||||
|
func NewS3ReplicaClient(tb testing.TB) *s3.ReplicaClient {
|
||||||
|
tb.Helper()
|
||||||
|
|
||||||
|
c := s3.NewReplicaClient()
|
||||||
|
c.AccessKeyID = *s3AccessKeyID
|
||||||
|
c.SecretAccessKey = *s3SecretAccessKey
|
||||||
|
c.Region = *s3Region
|
||||||
|
c.Bucket = *s3Bucket
|
||||||
|
c.Path = path.Join(*s3Path, fmt.Sprintf("%016x", rand.Uint64()))
|
||||||
|
c.Endpoint = *s3Endpoint
|
||||||
|
c.ForcePathStyle = *s3ForcePathStyle
|
||||||
|
c.SkipVerify = *s3SkipVerify
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGCSReplicaClient returns a new client for integration testing.
|
||||||
|
func NewGCSReplicaClient(tb testing.TB) *gcs.ReplicaClient {
|
||||||
|
tb.Helper()
|
||||||
|
|
||||||
|
c := gcs.NewReplicaClient()
|
||||||
|
c.Bucket = *gcsBucket
|
||||||
|
c.Path = path.Join(*gcsPath, fmt.Sprintf("%016x", rand.Uint64()))
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewABSReplicaClient returns a new client for integration testing.
|
||||||
|
func NewABSReplicaClient(tb testing.TB) *abs.ReplicaClient {
|
||||||
|
tb.Helper()
|
||||||
|
|
||||||
|
c := abs.NewReplicaClient()
|
||||||
|
c.AccountName = *absAccountName
|
||||||
|
c.AccountKey = *absAccountKey
|
||||||
|
c.Bucket = *absBucket
|
||||||
|
c.Path = path.Join(*absPath, fmt.Sprintf("%016x", rand.Uint64()))
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSFTPReplicaClient returns a new client for integration testing.
|
||||||
|
func NewSFTPReplicaClient(tb testing.TB) *sftp.ReplicaClient {
|
||||||
|
tb.Helper()
|
||||||
|
|
||||||
|
c := sftp.NewReplicaClient()
|
||||||
|
c.Host = *sftpHost
|
||||||
|
c.User = *sftpUser
|
||||||
|
c.Password = *sftpPassword
|
||||||
|
c.KeyPath = *sftpKeyPath
|
||||||
|
c.Path = path.Join(*sftpPath, fmt.Sprintf("%016x", rand.Uint64()))
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustDeleteAll deletes all objects under the client's path.
|
||||||
|
func MustDeleteAll(tb testing.TB, c litestream.ReplicaClient) {
|
||||||
|
tb.Helper()
|
||||||
|
|
||||||
|
generations, err := c.Generations(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
tb.Fatalf("cannot list generations for deletion: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, generation := range generations {
|
||||||
|
if err := c.DeleteGeneration(context.Background(), generation); err != nil {
|
||||||
|
tb.Fatalf("cannot delete generation: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch c := c.(type) {
|
||||||
|
case *sftp.ReplicaClient:
|
||||||
|
if err := c.Cleanup(context.Background()); err != nil {
|
||||||
|
tb.Fatalf("cannot cleanup sftp: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
241
replica_test.go
241
replica_test.go
@@ -1,90 +1,185 @@
|
|||||||
package litestream_test
|
package litestream_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/benbjohnson/litestream"
|
"github.com/benbjohnson/litestream"
|
||||||
|
"github.com/benbjohnson/litestream/file"
|
||||||
|
"github.com/benbjohnson/litestream/mock"
|
||||||
|
"github.com/pierrec/lz4/v4"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFileReplica_Sync(t *testing.T) {
|
func nextIndex(pos litestream.Pos) litestream.Pos {
|
||||||
// Ensure replica can successfully sync after DB has sync'd.
|
return litestream.Pos{
|
||||||
t.Run("InitialSync", func(t *testing.T) {
|
Generation: pos.Generation,
|
||||||
db, sqldb := MustOpenDBs(t)
|
Index: pos.Index + 1,
|
||||||
defer MustCloseDBs(t, db, sqldb)
|
}
|
||||||
r := NewTestFileReplica(t, db)
|
}
|
||||||
|
|
||||||
// Sync database & then sync replica.
|
func TestReplica_Name(t *testing.T) {
|
||||||
if err := db.Sync(); err != nil {
|
t.Run("WithName", func(t *testing.T) {
|
||||||
t.Fatal(err)
|
if got, want := litestream.NewReplica(nil, "NAME").Name(), "NAME"; got != want {
|
||||||
} else if err := r.Sync(context.Background()); err != nil {
|
t.Fatalf("Name()=%v, want %v", got, want)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure posistions match.
|
|
||||||
if pos, err := db.Pos(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
} else if got, want := r.LastPos(), pos; got != want {
|
|
||||||
t.Fatalf("LastPos()=%v, want %v", got, want)
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
t.Run("WithoutName", func(t *testing.T) {
|
||||||
// Ensure replica can successfully sync multiple times.
|
r := litestream.NewReplica(nil, "")
|
||||||
t.Run("MultiSync", func(t *testing.T) {
|
r.Client = &mock.ReplicaClient{}
|
||||||
db, sqldb := MustOpenDBs(t)
|
if got, want := r.Name(), "mock"; got != want {
|
||||||
defer MustCloseDBs(t, db, sqldb)
|
t.Fatalf("Name()=%v, want %v", got, want)
|
||||||
r := NewTestFileReplica(t, db)
|
|
||||||
|
|
||||||
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write to the database multiple times and sync after each write.
|
|
||||||
for i, n := 0, db.MinCheckpointPageN*2; i < n; i++ {
|
|
||||||
if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz')`); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sync periodically.
|
|
||||||
if i%100 == 0 || i == n-1 {
|
|
||||||
if err := db.Sync(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
} else if err := r.Sync(context.Background()); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure posistions match.
|
|
||||||
if pos, err := db.Pos(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
} else if got, want := pos.Index, 2; got != want {
|
|
||||||
t.Fatalf("Index=%v, want %v", got, want)
|
|
||||||
} else if calcPos, err := r.CalcPos(context.Background(), pos.Generation); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
} else if got, want := calcPos, pos; got != want {
|
|
||||||
t.Fatalf("CalcPos()=%v, want %v", got, want)
|
|
||||||
} else if got, want := r.LastPos(), pos; got != want {
|
|
||||||
t.Fatalf("LastPos()=%v, want %v", got, want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// Ensure replica returns an error if there is no generation available from the DB.
|
|
||||||
t.Run("ErrNoGeneration", func(t *testing.T) {
|
|
||||||
db, sqldb := MustOpenDBs(t)
|
|
||||||
defer MustCloseDBs(t, db, sqldb)
|
|
||||||
r := NewTestFileReplica(t, db)
|
|
||||||
|
|
||||||
if err := r.Sync(context.Background()); err == nil || err.Error() != `no generation, waiting for data` {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTestFileReplica returns a new replica using a temp directory & with monitoring disabled.
|
func TestReplica_Sync(t *testing.T) {
|
||||||
func NewTestFileReplica(tb testing.TB, db *litestream.DB) *litestream.FileReplica {
|
db, sqldb := MustOpenDBs(t)
|
||||||
r := litestream.NewFileReplica(db, "", tb.TempDir())
|
defer MustCloseDBs(t, db, sqldb)
|
||||||
r.MonitorEnabled = false
|
|
||||||
db.Replicas = []litestream.Replica{r}
|
// Issue initial database sync to setup generation.
|
||||||
return r
|
if err := db.Sync(context.Background()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch current database position.
|
||||||
|
dpos, err := db.Pos()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c := file.NewReplicaClient(t.TempDir())
|
||||||
|
r := litestream.NewReplica(db, "")
|
||||||
|
c.Replica, r.Client = r, c
|
||||||
|
|
||||||
|
if err := r.Sync(context.Background()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify client generation matches database.
|
||||||
|
generations, err := c.Generations(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if got, want := len(generations), 1; got != want {
|
||||||
|
t.Fatalf("len(generations)=%v, want %v", got, want)
|
||||||
|
} else if got, want := generations[0], dpos.Generation; got != want {
|
||||||
|
t.Fatalf("generations[0]=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify we synced checkpoint page to WAL.
|
||||||
|
if r, err := c.WALSegmentReader(context.Background(), nextIndex(dpos)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if b, err := io.ReadAll(lz4.NewReader(r)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := r.Close(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if len(b) == db.PageSize() {
|
||||||
|
t.Fatalf("wal mismatch: len(%d), len(%d)", len(b), db.PageSize())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset WAL so the next write will only write out the segment we are checking.
|
||||||
|
if err := db.Checkpoint(context.Background(), litestream.CheckpointModeTruncate); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute a query to write something into the truncated WAL.
|
||||||
|
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sync database to catch up the shadow WAL.
|
||||||
|
if err := db.Sync(context.Background()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save position after sync, it should be after our write.
|
||||||
|
dpos, err = db.Pos()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sync WAL segment out to replica.
|
||||||
|
if err := r.Sync(context.Background()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify WAL matches replica WAL.
|
||||||
|
if b0, err := os.ReadFile(db.Path() + "-wal"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if r, err := c.WALSegmentReader(context.Background(), dpos.Truncate()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if b1, err := io.ReadAll(lz4.NewReader(r)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := r.Close(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if !bytes.Equal(b0, b1) {
|
||||||
|
t.Fatalf("wal mismatch: len(%d), len(%d)", len(b0), len(b1))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReplica_Snapshot(t *testing.T) {
|
||||||
|
db, sqldb := MustOpenDBs(t)
|
||||||
|
defer MustCloseDBs(t, db, sqldb)
|
||||||
|
|
||||||
|
c := file.NewReplicaClient(t.TempDir())
|
||||||
|
r := litestream.NewReplica(db, "")
|
||||||
|
r.Client = c
|
||||||
|
|
||||||
|
// Execute a query to force a write to the WAL.
|
||||||
|
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := db.Sync(context.Background()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := r.Sync(context.Background()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch current database position & snapshot.
|
||||||
|
pos0, err := db.Pos()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if info, err := r.Snapshot(context.Background()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if got, want := info.Pos(), nextIndex(pos0); got != want {
|
||||||
|
t.Fatalf("pos=%s, want %s", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sync database and then replica.
|
||||||
|
if err := db.Sync(context.Background()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := r.Sync(context.Background()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute a query to force a write to the WAL & truncate to start new index.
|
||||||
|
if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz');`); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := db.Checkpoint(context.Background(), litestream.CheckpointModeTruncate); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch current database position & snapshot.
|
||||||
|
pos1, err := db.Pos()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if info, err := r.Snapshot(context.Background()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if got, want := info.Pos(), nextIndex(pos1); got != want {
|
||||||
|
t.Fatalf("pos=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify three snapshots exist.
|
||||||
|
if infos, err := r.Snapshots(context.Background()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if got, want := len(infos), 3; got != want {
|
||||||
|
t.Fatalf("len=%v, want %v", got, want)
|
||||||
|
} else if got, want := infos[0].Pos(), pos0.Truncate(); got != want {
|
||||||
|
t.Fatalf("info[0]=%s, want %s", got, want)
|
||||||
|
} else if got, want := infos[1].Pos(), nextIndex(pos0); got != want {
|
||||||
|
t.Fatalf("info[1]=%s, want %s", got, want)
|
||||||
|
} else if got, want := infos[2].Pos(), nextIndex(pos1); got != want {
|
||||||
|
t.Fatalf("info[2]=%s, want %s", got, want)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
771
s3/replica_client.go
Normal file
771
s3/replica_client.go
Normal file
@@ -0,0 +1,771 @@
|
|||||||
|
package s3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"regexp"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
|
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||||
|
"github.com/benbjohnson/litestream"
|
||||||
|
"github.com/benbjohnson/litestream/internal"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReplicaClientType is the client type for this package.
|
||||||
|
const ReplicaClientType = "s3"
|
||||||
|
|
||||||
|
// MaxKeys is the number of keys S3 can operate on per batch.
|
||||||
|
const MaxKeys = 1000
|
||||||
|
|
||||||
|
// DefaultRegion is the region used if one is not specified.
|
||||||
|
const DefaultRegion = "us-east-1"
|
||||||
|
|
||||||
|
var _ litestream.ReplicaClient = (*ReplicaClient)(nil)
|
||||||
|
|
||||||
|
// ReplicaClient is a client for writing snapshots & WAL segments to disk.
|
||||||
|
type ReplicaClient struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
s3 *s3.S3 // s3 service
|
||||||
|
uploader *s3manager.Uploader
|
||||||
|
|
||||||
|
// AWS authentication keys.
|
||||||
|
AccessKeyID string
|
||||||
|
SecretAccessKey string
|
||||||
|
|
||||||
|
// S3 bucket information
|
||||||
|
Region string
|
||||||
|
Bucket string
|
||||||
|
Path string
|
||||||
|
Endpoint string
|
||||||
|
ForcePathStyle bool
|
||||||
|
SkipVerify bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReplicaClient returns a new instance of ReplicaClient.
|
||||||
|
func NewReplicaClient() *ReplicaClient {
|
||||||
|
return &ReplicaClient{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns "s3" as the client type.
|
||||||
|
func (c *ReplicaClient) Type() string {
|
||||||
|
return ReplicaClientType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init initializes the connection to S3. No-op if already initialized.
|
||||||
|
func (c *ReplicaClient) Init(ctx context.Context) (err error) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
if c.s3 != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look up region if not specified and no endpoint is used.
|
||||||
|
// Endpoints are typically used for non-S3 object stores and do not
|
||||||
|
// necessarily require a region.
|
||||||
|
region := c.Region
|
||||||
|
if region == "" {
|
||||||
|
if c.Endpoint == "" {
|
||||||
|
if region, err = c.findBucketRegion(ctx, c.Bucket); err != nil {
|
||||||
|
return fmt.Errorf("cannot lookup bucket region: %w", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
region = DefaultRegion // default for non-S3 object stores
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create new AWS session.
|
||||||
|
config := c.config()
|
||||||
|
if region != "" {
|
||||||
|
config.Region = aws.String(region)
|
||||||
|
}
|
||||||
|
|
||||||
|
sess, err := session.NewSession(config)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot create aws session: %w", err)
|
||||||
|
}
|
||||||
|
c.s3 = s3.New(sess)
|
||||||
|
c.uploader = s3manager.NewUploader(sess)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// config returns the AWS configuration. Uses the default credential chain
|
||||||
|
// unless a key/secret are explicitly set.
|
||||||
|
func (c *ReplicaClient) config() *aws.Config {
|
||||||
|
config := &aws.Config{}
|
||||||
|
|
||||||
|
if c.AccessKeyID != "" || c.SecretAccessKey != "" {
|
||||||
|
config.Credentials = credentials.NewStaticCredentials(c.AccessKeyID, c.SecretAccessKey, "")
|
||||||
|
}
|
||||||
|
if c.Endpoint != "" {
|
||||||
|
config.Endpoint = aws.String(c.Endpoint)
|
||||||
|
}
|
||||||
|
if c.ForcePathStyle {
|
||||||
|
config.S3ForcePathStyle = aws.Bool(c.ForcePathStyle)
|
||||||
|
}
|
||||||
|
if c.SkipVerify {
|
||||||
|
config.HTTPClient = &http.Client{Transport: &http.Transport{
|
||||||
|
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReplicaClient) findBucketRegion(ctx context.Context, bucket string) (string, error) {
|
||||||
|
// Connect to US standard region to fetch info.
|
||||||
|
config := c.config()
|
||||||
|
config.Region = aws.String(DefaultRegion)
|
||||||
|
sess, err := session.NewSession(config)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch bucket location, if possible. Must be bucket owner.
|
||||||
|
// This call can return a nil location which means it's in us-east-1.
|
||||||
|
if out, err := s3.New(sess).HeadBucketWithContext(ctx, &s3.HeadBucketInput{
|
||||||
|
Bucket: aws.String(bucket),
|
||||||
|
}); err != nil {
|
||||||
|
return "", err
|
||||||
|
} else if out.BucketRegion != nil {
|
||||||
|
return *out.BucketRegion, nil
|
||||||
|
}
|
||||||
|
return DefaultRegion, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generations returns a list of available generation names.
|
||||||
|
func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var generations []string
|
||||||
|
if err := c.s3.ListObjectsPagesWithContext(ctx, &s3.ListObjectsInput{
|
||||||
|
Bucket: aws.String(c.Bucket),
|
||||||
|
Prefix: aws.String(litestream.GenerationsPath(c.Path) + "/"),
|
||||||
|
Delimiter: aws.String("/"),
|
||||||
|
}, func(page *s3.ListObjectsOutput, lastPage bool) bool {
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
|
||||||
|
|
||||||
|
for _, prefix := range page.CommonPrefixes {
|
||||||
|
name := path.Base(aws.StringValue(prefix.Prefix))
|
||||||
|
if !litestream.IsGenerationName(name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
generations = append(generations, name)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return generations, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteGeneration deletes all snapshots & WAL segments within a generation.
|
||||||
|
func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dir, err := litestream.GenerationPath(c.Path, generation)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot determine generation path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect all files for the generation.
|
||||||
|
var objIDs []*s3.ObjectIdentifier
|
||||||
|
if err := c.s3.ListObjectsPagesWithContext(ctx, &s3.ListObjectsInput{
|
||||||
|
Bucket: aws.String(c.Bucket),
|
||||||
|
Prefix: aws.String(dir),
|
||||||
|
}, func(page *s3.ListObjectsOutput, lastPage bool) bool {
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
|
||||||
|
|
||||||
|
for _, obj := range page.Contents {
|
||||||
|
objIDs = append(objIDs, &s3.ObjectIdentifier{Key: obj.Key})
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete all files in batches.
|
||||||
|
for len(objIDs) > 0 {
|
||||||
|
n := MaxKeys
|
||||||
|
if len(objIDs) < n {
|
||||||
|
n = len(objIDs)
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{
|
||||||
|
Bucket: aws.String(c.Bucket),
|
||||||
|
Delete: &s3.Delete{Objects: objIDs[:n], Quiet: aws.Bool(true)},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := deleteOutputError(out); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
|
||||||
|
|
||||||
|
objIDs = objIDs[n:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// log.Printf("%s(%s): retainer: deleting generation: %s", r.db.Path(), r.Name(), generation)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshots returns an iterator over all available snapshots for a generation.
|
||||||
|
func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (litestream.SnapshotIterator, error) {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return newSnapshotIterator(ctx, c, generation), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteSnapshot writes LZ4 compressed data from rd into a file on disk.
|
||||||
|
func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := litestream.SnapshotPath(c.Path, generation, index)
|
||||||
|
if err != nil {
|
||||||
|
return info, fmt.Errorf("cannot determine snapshot path: %w", err)
|
||||||
|
}
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
rc := internal.NewReadCounter(rd)
|
||||||
|
if _, err := c.uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
||||||
|
Bucket: aws.String(c.Bucket),
|
||||||
|
Key: aws.String(key),
|
||||||
|
Body: rc,
|
||||||
|
}); err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc()
|
||||||
|
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(rc.N()))
|
||||||
|
|
||||||
|
// log.Printf("%s(%s): snapshot: creating %s/%08x t=%s", r.db.Path(), r.Name(), generation, index, time.Since(startTime).Truncate(time.Millisecond))
|
||||||
|
|
||||||
|
return litestream.SnapshotInfo{
|
||||||
|
Generation: generation,
|
||||||
|
Index: index,
|
||||||
|
Size: rc.N(),
|
||||||
|
CreatedAt: startTime.UTC(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SnapshotReader returns a reader for snapshot data at the given generation/index.
|
||||||
|
func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := litestream.SnapshotPath(c.Path, generation, index)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot determine snapshot path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := c.s3.GetObjectWithContext(ctx, &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String(c.Bucket),
|
||||||
|
Key: aws.String(key),
|
||||||
|
})
|
||||||
|
if isNotExists(err) {
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "GET").Inc()
|
||||||
|
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "GET").Add(float64(aws.Int64Value(out.ContentLength)))
|
||||||
|
|
||||||
|
return out.Body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteSnapshot deletes a snapshot with the given generation & index.
|
||||||
|
func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := litestream.SnapshotPath(c.Path, generation, index)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot determine snapshot path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{
|
||||||
|
Bucket: aws.String(c.Bucket),
|
||||||
|
Delete: &s3.Delete{Objects: []*s3.ObjectIdentifier{{Key: &key}}, Quiet: aws.Bool(true)},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := deleteOutputError(out); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WALSegments returns an iterator over all available WAL files for a generation.
|
||||||
|
func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return newWALSegmentIterator(ctx, c, generation), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteWALSegment writes LZ4 compressed data from rd into a file on disk.
|
||||||
|
func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
|
||||||
|
if err != nil {
|
||||||
|
return info, fmt.Errorf("cannot determine wal segment path: %w", err)
|
||||||
|
}
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
rc := internal.NewReadCounter(rd)
|
||||||
|
if _, err := c.uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
||||||
|
Bucket: aws.String(c.Bucket),
|
||||||
|
Key: aws.String(key),
|
||||||
|
Body: rc,
|
||||||
|
}); err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc()
|
||||||
|
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(rc.N()))
|
||||||
|
|
||||||
|
return litestream.WALSegmentInfo{
|
||||||
|
Generation: pos.Generation,
|
||||||
|
Index: pos.Index,
|
||||||
|
Offset: pos.Offset,
|
||||||
|
Size: rc.N(),
|
||||||
|
CreatedAt: startTime.UTC(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WALSegmentReader returns a reader for a section of WAL data at the given index.
|
||||||
|
// Returns os.ErrNotExist if no matching index/offset is found.
|
||||||
|
func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot determine wal segment path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := c.s3.GetObjectWithContext(ctx, &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String(c.Bucket),
|
||||||
|
Key: aws.String(key),
|
||||||
|
})
|
||||||
|
if isNotExists(err) {
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "GET").Inc()
|
||||||
|
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "GET").Add(float64(aws.Int64Value(out.ContentLength)))
|
||||||
|
|
||||||
|
return out.Body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteWALSegments deletes WAL segments with at the given positions.
|
||||||
|
func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Pos) error {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
objIDs := make([]*s3.ObjectIdentifier, MaxKeys)
|
||||||
|
for len(a) > 0 {
|
||||||
|
n := MaxKeys
|
||||||
|
if len(a) < n {
|
||||||
|
n = len(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate a batch of object IDs for deleting the WAL segments.
|
||||||
|
for i, pos := range a[:n] {
|
||||||
|
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot determine wal segment path: %w", err)
|
||||||
|
}
|
||||||
|
objIDs[i] = &s3.ObjectIdentifier{Key: &key}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete S3 objects in bulk.
|
||||||
|
out, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{
|
||||||
|
Bucket: aws.String(c.Bucket),
|
||||||
|
Delete: &s3.Delete{Objects: objIDs[:n], Quiet: aws.Bool(true)},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := deleteOutputError(out); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
|
||||||
|
|
||||||
|
a = a[n:]
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAll deletes everything on the remote path. Mainly used for testing.
|
||||||
|
func (c *ReplicaClient) DeleteAll(ctx context.Context) error {
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
prefix := c.Path
|
||||||
|
if prefix != "" {
|
||||||
|
prefix += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect all files for the generation.
|
||||||
|
var objIDs []*s3.ObjectIdentifier
|
||||||
|
if err := c.s3.ListObjectsPagesWithContext(ctx, &s3.ListObjectsInput{
|
||||||
|
Bucket: aws.String(c.Bucket),
|
||||||
|
Prefix: aws.String(prefix),
|
||||||
|
}, func(page *s3.ListObjectsOutput, lastPage bool) bool {
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
|
||||||
|
|
||||||
|
for _, obj := range page.Contents {
|
||||||
|
objIDs = append(objIDs, &s3.ObjectIdentifier{Key: obj.Key})
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete all files in batches.
|
||||||
|
for len(objIDs) > 0 {
|
||||||
|
n := MaxKeys
|
||||||
|
if len(objIDs) < n {
|
||||||
|
n = len(objIDs)
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{
|
||||||
|
Bucket: aws.String(c.Bucket),
|
||||||
|
Delete: &s3.Delete{Objects: objIDs[:n], Quiet: aws.Bool(true)},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := deleteOutputError(out); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
|
||||||
|
|
||||||
|
objIDs = objIDs[n:]
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type snapshotIterator struct {
|
||||||
|
client *ReplicaClient
|
||||||
|
generation string
|
||||||
|
|
||||||
|
ch chan litestream.SnapshotInfo
|
||||||
|
g errgroup.Group
|
||||||
|
ctx context.Context
|
||||||
|
cancel func()
|
||||||
|
|
||||||
|
info litestream.SnapshotInfo
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSnapshotIterator(ctx context.Context, client *ReplicaClient, generation string) *snapshotIterator {
|
||||||
|
itr := &snapshotIterator{
|
||||||
|
client: client,
|
||||||
|
generation: generation,
|
||||||
|
ch: make(chan litestream.SnapshotInfo),
|
||||||
|
}
|
||||||
|
|
||||||
|
itr.ctx, itr.cancel = context.WithCancel(ctx)
|
||||||
|
itr.g.Go(itr.fetch)
|
||||||
|
|
||||||
|
return itr
|
||||||
|
}
|
||||||
|
|
||||||
|
// fetch runs in a separate goroutine to fetch pages of objects and stream them to a channel.
|
||||||
|
func (itr *snapshotIterator) fetch() error {
|
||||||
|
defer close(itr.ch)
|
||||||
|
|
||||||
|
dir, err := litestream.SnapshotsPath(itr.client.Path, itr.generation)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot determine snapshots path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return itr.client.s3.ListObjectsPagesWithContext(itr.ctx, &s3.ListObjectsInput{
|
||||||
|
Bucket: aws.String(itr.client.Bucket),
|
||||||
|
Prefix: aws.String(dir + "/"),
|
||||||
|
Delimiter: aws.String("/"),
|
||||||
|
}, func(page *s3.ListObjectsOutput, lastPage bool) bool {
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
|
||||||
|
|
||||||
|
for _, obj := range page.Contents {
|
||||||
|
key := path.Base(aws.StringValue(obj.Key))
|
||||||
|
index, err := litestream.ParseSnapshotPath(key)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
info := litestream.SnapshotInfo{
|
||||||
|
Generation: itr.generation,
|
||||||
|
Index: index,
|
||||||
|
Size: aws.Int64Value(obj.Size),
|
||||||
|
CreatedAt: obj.LastModified.UTC(),
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-itr.ctx.Done():
|
||||||
|
case itr.ch <- info:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (itr *snapshotIterator) Close() (err error) {
|
||||||
|
err = itr.err
|
||||||
|
|
||||||
|
// Cancel context and wait for error group to finish.
|
||||||
|
itr.cancel()
|
||||||
|
if e := itr.g.Wait(); e != nil && err == nil {
|
||||||
|
err = e
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (itr *snapshotIterator) Next() bool {
|
||||||
|
// Exit if an error has already occurred.
|
||||||
|
if itr.err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return false if context was canceled or if there are no more snapshots.
|
||||||
|
// Otherwise fetch the next snapshot and store it on the iterator.
|
||||||
|
select {
|
||||||
|
case <-itr.ctx.Done():
|
||||||
|
return false
|
||||||
|
case info, ok := <-itr.ch:
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
itr.info = info
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (itr *snapshotIterator) Err() error { return itr.err }
|
||||||
|
|
||||||
|
func (itr *snapshotIterator) Snapshot() litestream.SnapshotInfo {
|
||||||
|
return itr.info
|
||||||
|
}
|
||||||
|
|
||||||
|
type walSegmentIterator struct {
|
||||||
|
client *ReplicaClient
|
||||||
|
generation string
|
||||||
|
|
||||||
|
ch chan litestream.WALSegmentInfo
|
||||||
|
g errgroup.Group
|
||||||
|
ctx context.Context
|
||||||
|
cancel func()
|
||||||
|
|
||||||
|
info litestream.WALSegmentInfo
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func newWALSegmentIterator(ctx context.Context, client *ReplicaClient, generation string) *walSegmentIterator {
|
||||||
|
itr := &walSegmentIterator{
|
||||||
|
client: client,
|
||||||
|
generation: generation,
|
||||||
|
ch: make(chan litestream.WALSegmentInfo),
|
||||||
|
}
|
||||||
|
|
||||||
|
itr.ctx, itr.cancel = context.WithCancel(ctx)
|
||||||
|
itr.g.Go(itr.fetch)
|
||||||
|
|
||||||
|
return itr
|
||||||
|
}
|
||||||
|
|
||||||
|
// fetch runs in a separate goroutine to fetch pages of objects and stream them to a channel.
|
||||||
|
func (itr *walSegmentIterator) fetch() error {
|
||||||
|
defer close(itr.ch)
|
||||||
|
|
||||||
|
dir, err := litestream.WALPath(itr.client.Path, itr.generation)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot determine wal path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return itr.client.s3.ListObjectsPagesWithContext(itr.ctx, &s3.ListObjectsInput{
|
||||||
|
Bucket: aws.String(itr.client.Bucket),
|
||||||
|
Prefix: aws.String(dir + "/"),
|
||||||
|
Delimiter: aws.String("/"),
|
||||||
|
}, func(page *s3.ListObjectsOutput, lastPage bool) bool {
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
|
||||||
|
|
||||||
|
for _, obj := range page.Contents {
|
||||||
|
key := path.Base(aws.StringValue(obj.Key))
|
||||||
|
index, offset, err := litestream.ParseWALSegmentPath(key)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
info := litestream.WALSegmentInfo{
|
||||||
|
Generation: itr.generation,
|
||||||
|
Index: index,
|
||||||
|
Offset: offset,
|
||||||
|
Size: aws.Int64Value(obj.Size),
|
||||||
|
CreatedAt: obj.LastModified.UTC(),
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-itr.ctx.Done():
|
||||||
|
return false
|
||||||
|
case itr.ch <- info:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (itr *walSegmentIterator) Close() (err error) {
|
||||||
|
err = itr.err
|
||||||
|
|
||||||
|
// Cancel context and wait for error group to finish.
|
||||||
|
itr.cancel()
|
||||||
|
if e := itr.g.Wait(); e != nil && err == nil {
|
||||||
|
err = e
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (itr *walSegmentIterator) Next() bool {
|
||||||
|
// Exit if an error has already occurred.
|
||||||
|
if itr.err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return false if context was canceled or if there are no more segments.
|
||||||
|
// Otherwise fetch the next segment and store it on the iterator.
|
||||||
|
select {
|
||||||
|
case <-itr.ctx.Done():
|
||||||
|
return false
|
||||||
|
case info, ok := <-itr.ch:
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
itr.info = info
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (itr *walSegmentIterator) Err() error { return itr.err }
|
||||||
|
|
||||||
|
func (itr *walSegmentIterator) WALSegment() litestream.WALSegmentInfo {
|
||||||
|
return itr.info
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseHost extracts data from a hostname depending on the service provider.
|
||||||
|
func ParseHost(s string) (bucket, region, endpoint string, forcePathStyle bool) {
|
||||||
|
// Extract port if one is specified.
|
||||||
|
host, port, err := net.SplitHostPort(s)
|
||||||
|
if err != nil {
|
||||||
|
host = s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default to path-based URLs, except for with AWS S3 itself.
|
||||||
|
forcePathStyle = true
|
||||||
|
|
||||||
|
// Extract fields from provider-specific host formats.
|
||||||
|
scheme := "https"
|
||||||
|
if a := localhostRegex.FindStringSubmatch(host); a != nil {
|
||||||
|
bucket, region = a[1], "us-east-1"
|
||||||
|
scheme, endpoint = "http", "localhost"
|
||||||
|
} else if a := backblazeRegex.FindStringSubmatch(host); a != nil {
|
||||||
|
bucket, region = a[1], a[2]
|
||||||
|
endpoint = fmt.Sprintf("s3.%s.backblazeb2.com", region)
|
||||||
|
} else if a := filebaseRegex.FindStringSubmatch(host); a != nil {
|
||||||
|
bucket, endpoint = a[1], "s3.filebase.com"
|
||||||
|
} else if a := digitalOceanRegex.FindStringSubmatch(host); a != nil {
|
||||||
|
bucket, region = a[1], a[2]
|
||||||
|
endpoint = fmt.Sprintf("%s.digitaloceanspaces.com", region)
|
||||||
|
} else if a := scalewayRegex.FindStringSubmatch(host); a != nil {
|
||||||
|
bucket, region = a[1], a[2]
|
||||||
|
endpoint = fmt.Sprintf("s3.%s.scw.cloud", region)
|
||||||
|
} else if a := linodeRegex.FindStringSubmatch(host); a != nil {
|
||||||
|
bucket, region = a[1], a[2]
|
||||||
|
endpoint = fmt.Sprintf("%s.linodeobjects.com", region)
|
||||||
|
} else {
|
||||||
|
bucket = host
|
||||||
|
forcePathStyle = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add port back to endpoint, if available.
|
||||||
|
if endpoint != "" && port != "" {
|
||||||
|
endpoint = net.JoinHostPort(endpoint, port)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepend scheme to endpoint.
|
||||||
|
if endpoint != "" {
|
||||||
|
endpoint = scheme + "://" + endpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
return bucket, region, endpoint, forcePathStyle
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
localhostRegex = regexp.MustCompile(`^(?:(.+)\.)?localhost$`)
|
||||||
|
backblazeRegex = regexp.MustCompile(`^(?:(.+)\.)?s3.([^.]+)\.backblazeb2.com$`)
|
||||||
|
filebaseRegex = regexp.MustCompile(`^(?:(.+)\.)?s3.filebase.com$`)
|
||||||
|
digitalOceanRegex = regexp.MustCompile(`^(?:(.+)\.)?([^.]+)\.digitaloceanspaces.com$`)
|
||||||
|
scalewayRegex = regexp.MustCompile(`^(?:(.+)\.)?s3.([^.]+)\.scw\.cloud$`)
|
||||||
|
linodeRegex = regexp.MustCompile(`^(?:(.+)\.)?([^.]+)\.linodeobjects.com$`)
|
||||||
|
)
|
||||||
|
|
||||||
|
func isNotExists(err error) bool {
|
||||||
|
switch err := err.(type) {
|
||||||
|
case awserr.Error:
|
||||||
|
return err.Code() == `NoSuchKey`
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func deleteOutputError(out *s3.DeleteObjectsOutput) error {
|
||||||
|
switch len(out.Errors) {
|
||||||
|
case 0:
|
||||||
|
return nil
|
||||||
|
case 1:
|
||||||
|
return fmt.Errorf("deleting object %s: %s - %s", aws.StringValue(out.Errors[0].Key), aws.StringValue(out.Errors[0].Code), aws.StringValue(out.Errors[0].Message))
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("%d errors occurred deleting objects, %s: %s - (%s (and %d others)",
|
||||||
|
len(out.Errors), aws.StringValue(out.Errors[0].Key), aws.StringValue(out.Errors[0].Code), aws.StringValue(out.Errors[0].Message), len(out.Errors)-1)
|
||||||
|
}
|
||||||
|
}
|
||||||
488
sftp/replica_client.go
Normal file
488
sftp/replica_client.go
Normal file
@@ -0,0 +1,488 @@
|
|||||||
|
package sftp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/litestream"
|
||||||
|
"github.com/benbjohnson/litestream/internal"
|
||||||
|
"github.com/pkg/sftp"
|
||||||
|
"golang.org/x/crypto/ssh"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReplicaClientType is the client type for this package.
|
||||||
|
const ReplicaClientType = "sftp"
|
||||||
|
|
||||||
|
// Default settings for replica client.
|
||||||
|
const (
|
||||||
|
DefaultDialTimeout = 30 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ litestream.ReplicaClient = (*ReplicaClient)(nil)
|
||||||
|
|
||||||
|
// ReplicaClient is a client for writing snapshots & WAL segments to disk.
|
||||||
|
type ReplicaClient struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
sshClient *ssh.Client
|
||||||
|
sftpClient *sftp.Client
|
||||||
|
|
||||||
|
// SFTP connection info
|
||||||
|
Host string
|
||||||
|
User string
|
||||||
|
Password string
|
||||||
|
Path string
|
||||||
|
KeyPath string
|
||||||
|
DialTimeout time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReplicaClient returns a new instance of ReplicaClient.
|
||||||
|
func NewReplicaClient() *ReplicaClient {
|
||||||
|
return &ReplicaClient{
|
||||||
|
DialTimeout: DefaultDialTimeout,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns "gcs" as the client type.
|
||||||
|
func (c *ReplicaClient) Type() string {
|
||||||
|
return ReplicaClientType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init initializes the connection to GCS. No-op if already initialized.
|
||||||
|
func (c *ReplicaClient) Init(ctx context.Context) (_ *sftp.Client, err error) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
if c.sftpClient != nil {
|
||||||
|
return c.sftpClient, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.User == "" {
|
||||||
|
return nil, fmt.Errorf("sftp user required")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build SSH configuration & auth methods
|
||||||
|
config := &ssh.ClientConfig{
|
||||||
|
User: c.User,
|
||||||
|
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||||
|
BannerCallback: ssh.BannerDisplayStderr(),
|
||||||
|
}
|
||||||
|
if c.Password != "" {
|
||||||
|
config.Auth = append(config.Auth, ssh.Password(c.Password))
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.KeyPath != "" {
|
||||||
|
buf, err := os.ReadFile(c.KeyPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot read sftp key path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
signer, err := ssh.ParsePrivateKey(buf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot parse sftp key path: %w", err)
|
||||||
|
}
|
||||||
|
config.Auth = append(config.Auth, ssh.PublicKeys(signer))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append standard port, if necessary.
|
||||||
|
host := c.Host
|
||||||
|
if _, _, err := net.SplitHostPort(c.Host); err != nil {
|
||||||
|
host = net.JoinHostPort(c.Host, "22")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connect via SSH.
|
||||||
|
if c.sshClient, err = ssh.Dial("tcp", host, config); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wrap connection with an SFTP client.
|
||||||
|
if c.sftpClient, err = sftp.NewClient(c.sshClient); err != nil {
|
||||||
|
c.sshClient.Close()
|
||||||
|
c.sshClient = nil
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.sftpClient, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generations returns a list of available generation names.
|
||||||
|
func (c *ReplicaClient) Generations(ctx context.Context) (_ []string, err error) {
|
||||||
|
defer func() { c.resetOnConnError(err) }()
|
||||||
|
|
||||||
|
sftpClient, err := c.Init(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fis, err := sftpClient.ReadDir(litestream.GenerationsPath(c.Path))
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var generations []string
|
||||||
|
for _, fi := range fis {
|
||||||
|
if !fi.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
name := path.Base(fi.Name())
|
||||||
|
if !litestream.IsGenerationName(name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
generations = append(generations, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return generations, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteGeneration deletes all snapshots & WAL segments within a generation.
|
||||||
|
func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) (err error) {
|
||||||
|
defer func() { c.resetOnConnError(err) }()
|
||||||
|
|
||||||
|
sftpClient, err := c.Init(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dir, err := litestream.GenerationPath(c.Path, generation)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot determine generation path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var dirs []string
|
||||||
|
walker := sftpClient.Walk(dir)
|
||||||
|
for walker.Step() {
|
||||||
|
if err := walker.Err(); err != nil {
|
||||||
|
return fmt.Errorf("cannot walk path %q: %w", walker.Path(), err)
|
||||||
|
}
|
||||||
|
if walker.Stat().IsDir() {
|
||||||
|
dirs = append(dirs, walker.Path())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := sftpClient.Remove(walker.Path()); err != nil && !os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("cannot delete file %q: %w", walker.Path(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove directories in reverse order after they have been emptied.
|
||||||
|
for i := len(dirs) - 1; i >= 0; i-- {
|
||||||
|
filename := dirs[i]
|
||||||
|
if err := sftpClient.RemoveDirectory(filename); err != nil && !os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("cannot delete directory %q: %w", filename, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// log.Printf("%s(%s): retainer: deleting generation: %s", r.db.Path(), r.Name(), generation)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshots returns an iterator over all available snapshots for a generation.
|
||||||
|
func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (_ litestream.SnapshotIterator, err error) {
|
||||||
|
defer func() { c.resetOnConnError(err) }()
|
||||||
|
|
||||||
|
sftpClient, err := c.Init(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dir, err := litestream.SnapshotsPath(c.Path, generation)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot determine snapshots path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fis, err := sftpClient.ReadDir(dir)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return litestream.NewSnapshotInfoSliceIterator(nil), nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate over every file and convert to metadata.
|
||||||
|
infos := make([]litestream.SnapshotInfo, 0, len(fis))
|
||||||
|
for _, fi := range fis {
|
||||||
|
// Parse index from filename.
|
||||||
|
index, err := litestream.ParseSnapshotPath(path.Base(fi.Name()))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
infos = append(infos, litestream.SnapshotInfo{
|
||||||
|
Generation: generation,
|
||||||
|
Index: index,
|
||||||
|
Size: fi.Size(),
|
||||||
|
CreatedAt: fi.ModTime().UTC(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return litestream.NewSnapshotInfoSliceIterator(infos), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteSnapshot writes LZ4 compressed data from rd to the object storage.
|
||||||
|
func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) {
|
||||||
|
defer func() { c.resetOnConnError(err) }()
|
||||||
|
|
||||||
|
sftpClient, err := c.Init(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
filename, err := litestream.SnapshotPath(c.Path, generation, index)
|
||||||
|
if err != nil {
|
||||||
|
return info, fmt.Errorf("cannot determine snapshot path: %w", err)
|
||||||
|
}
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
if err := sftpClient.MkdirAll(path.Dir(filename)); err != nil {
|
||||||
|
return info, fmt.Errorf("cannot make parent wal segment directory %q: %w", path.Dir(filename), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := sftpClient.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
|
||||||
|
if err != nil {
|
||||||
|
return info, fmt.Errorf("cannot open snapshot file for writing: %w", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
n, err := io.Copy(f, rd)
|
||||||
|
if err != nil {
|
||||||
|
return info, err
|
||||||
|
} else if err := f.Close(); err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc()
|
||||||
|
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(n))
|
||||||
|
|
||||||
|
// log.Printf("%s(%s): snapshot: creating %s/%08x t=%s", r.db.Path(), r.Name(), generation, index, time.Since(startTime).Truncate(time.Millisecond))
|
||||||
|
|
||||||
|
return litestream.SnapshotInfo{
|
||||||
|
Generation: generation,
|
||||||
|
Index: index,
|
||||||
|
Size: n,
|
||||||
|
CreatedAt: startTime.UTC(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SnapshotReader returns a reader for snapshot data at the given generation/index.
|
||||||
|
func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (_ io.ReadCloser, err error) {
|
||||||
|
defer func() { c.resetOnConnError(err) }()
|
||||||
|
|
||||||
|
sftpClient, err := c.Init(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
filename, err := litestream.SnapshotPath(c.Path, generation, index)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot determine snapshot path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := sftpClient.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "GET").Inc()
|
||||||
|
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteSnapshot deletes a snapshot with the given generation & index.
|
||||||
|
func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) (err error) {
|
||||||
|
defer func() { c.resetOnConnError(err) }()
|
||||||
|
|
||||||
|
sftpClient, err := c.Init(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
filename, err := litestream.SnapshotPath(c.Path, generation, index)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot determine snapshot path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := sftpClient.Remove(filename); err != nil && !os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("cannot delete snapshot %q: %w", filename, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WALSegments returns an iterator over all available WAL files for a generation.
|
||||||
|
func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (_ litestream.WALSegmentIterator, err error) {
|
||||||
|
defer func() { c.resetOnConnError(err) }()
|
||||||
|
|
||||||
|
sftpClient, err := c.Init(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dir, err := litestream.WALPath(c.Path, generation)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot determine wal path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fis, err := sftpClient.ReadDir(dir)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return litestream.NewWALSegmentInfoSliceIterator(nil), nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate over every file and convert to metadata.
|
||||||
|
infos := make([]litestream.WALSegmentInfo, 0, len(fis))
|
||||||
|
for _, fi := range fis {
|
||||||
|
index, offset, err := litestream.ParseWALSegmentPath(path.Base(fi.Name()))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
infos = append(infos, litestream.WALSegmentInfo{
|
||||||
|
Generation: generation,
|
||||||
|
Index: index,
|
||||||
|
Offset: offset,
|
||||||
|
Size: fi.Size(),
|
||||||
|
CreatedAt: fi.ModTime().UTC(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return litestream.NewWALSegmentInfoSliceIterator(infos), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteWALSegment writes LZ4 compressed data from rd into a file on disk.
|
||||||
|
func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) {
|
||||||
|
defer func() { c.resetOnConnError(err) }()
|
||||||
|
|
||||||
|
sftpClient, err := c.Init(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
filename, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
|
||||||
|
if err != nil {
|
||||||
|
return info, fmt.Errorf("cannot determine wal segment path: %w", err)
|
||||||
|
}
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
if err := sftpClient.MkdirAll(path.Dir(filename)); err != nil {
|
||||||
|
return info, fmt.Errorf("cannot make parent snapshot directory %q: %w", path.Dir(filename), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := sftpClient.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
|
||||||
|
if err != nil {
|
||||||
|
return info, fmt.Errorf("cannot open snapshot file for writing: %w", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
n, err := io.Copy(f, rd)
|
||||||
|
if err != nil {
|
||||||
|
return info, err
|
||||||
|
} else if err := f.Close(); err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc()
|
||||||
|
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(n))
|
||||||
|
|
||||||
|
return litestream.WALSegmentInfo{
|
||||||
|
Generation: pos.Generation,
|
||||||
|
Index: pos.Index,
|
||||||
|
Offset: pos.Offset,
|
||||||
|
Size: n,
|
||||||
|
CreatedAt: startTime.UTC(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WALSegmentReader returns a reader for a section of WAL data at the given index.
|
||||||
|
// Returns os.ErrNotExist if no matching index/offset is found.
|
||||||
|
func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (_ io.ReadCloser, err error) {
|
||||||
|
defer func() { c.resetOnConnError(err) }()
|
||||||
|
|
||||||
|
sftpClient, err := c.Init(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
filename, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot determine wal segment path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := sftpClient.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "GET").Inc()
|
||||||
|
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteWALSegments deletes WAL segments with at the given positions.
|
||||||
|
func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Pos) (err error) {
|
||||||
|
defer func() { c.resetOnConnError(err) }()
|
||||||
|
|
||||||
|
sftpClient, err := c.Init(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pos := range a {
|
||||||
|
filename, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot determine wal segment path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := sftpClient.Remove(filename); err != nil && !os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("cannot delete wal segment %q: %w", filename, err)
|
||||||
|
}
|
||||||
|
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cleanup deletes path & generations directories after empty.
|
||||||
|
func (c *ReplicaClient) Cleanup(ctx context.Context) (err error) {
|
||||||
|
defer func() { c.resetOnConnError(err) }()
|
||||||
|
|
||||||
|
sftpClient, err := c.Init(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := sftpClient.RemoveDirectory(litestream.GenerationsPath(c.Path)); err != nil && !os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("cannot delete generations path: %w", err)
|
||||||
|
} else if err := sftpClient.RemoveDirectory(c.Path); err != nil && !os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("cannot delete path: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// resetOnConnError closes & clears the client if a connection error occurs.
|
||||||
|
func (c *ReplicaClient) resetOnConnError(err error) {
|
||||||
|
if !errors.Is(err, sftp.ErrSSHFxConnectionLost) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.sftpClient != nil {
|
||||||
|
c.sftpClient.Close()
|
||||||
|
c.sftpClient = nil
|
||||||
|
}
|
||||||
|
if c.sshClient != nil {
|
||||||
|
c.sshClient.Close()
|
||||||
|
c.sshClient = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user