Compare commits

..

2 Commits

Author SHA1 Message Date
Ben Johnson
99fe882376 Refactor shadow WAL to use segments 2021-07-22 16:03:29 -06:00
Ben Johnson
fc897b481f Group replica wal segments by index
This commit changes the replica path format to group segments within
a single index in the same directory. This is to eventually add the
ability to seek to a record on file-based systems without having
to iterate over the records. The DB shadow WAL will also be changed
to this same format to support live replicas.
2021-06-14 15:24:05 -06:00
45 changed files with 2608 additions and 2499 deletions

View File

@@ -1,17 +1,17 @@
## Contribution Policy ## Open-source, not open-contribution
Initially, Litestream was closed to outside contributions. The goal was to [Similar to SQLite](https://www.sqlite.org/copyright.html), Litestream is open
reduce burnout by limiting the maintenance overhead of reviewing and validating source but closed to contributions. This keeps the code base free of proprietary
third-party code. However, this policy is overly broad and has prevented small, or licensed code but it also helps me continue to maintain and build Litestream.
easily testable patches from being contributed.
Litestream is now open to code contributions for bug fixes only. Features carry As the author of [BoltDB](https://github.com/boltdb/bolt), I found that
a long-term maintenance burden so they will not be accepted at this time. accepting and maintaining third party patches contributed to my burn out and
Please [submit an issue][new-issue] if you have a feature you'd like to I eventually archived the project. Writing databases & low-level replication
request. tools involves nuance and simple one line changes can have profound and
unexpected changes in correctness and performance. Small contributions
typically required hours of my time to properly test and validate them.
If you find mistakes in the documentation, please submit a fix to the I am grateful for community involvement, bug reports, & feature requests. I do
[documentation repository][docs]. not wish to come off as anything but welcoming, however, I've
made the decision to keep this project closed to contributions for my own
[new-issue]: https://github.com/benbjohnson/litestream/issues/new mental health and long term viability of the project.
[docs]: https://github.com/benbjohnson/litestream.io

7
.github/pull_request_template.md vendored Normal file
View File

@@ -0,0 +1,7 @@
Litestream is not accepting code contributions at this time. You can find a summary of why on the project's GitHub README:
https://github.com/benbjohnson/litestream#open-source-not-open-contribution
Web site & Documentation changes, however, are welcome. You can find that repository here:
https://github.com/benbjohnson/litestream.io

View File

@@ -1,229 +0,0 @@
on:
push:
pull_request:
types:
- opened
- synchronize
- reopened
env:
GO_VERSION: "1.21"
name: Commit
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- run: |
go install golang.org/x/tools/cmd/goimports@latest
go install honnef.co/go/tools/cmd/staticcheck@latest
export PATH="$HOME/go/bin:$PATH"
- uses: pre-commit/action@v3.0.0
build-windows:
name: Build Windows
runs-on: ubuntu-latest
steps:
- run: sudo apt-get install -y mingw-w64
- uses: actions/checkout@v4
- uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- run: |
go build ./cmd/litestream/
file ./litestream.exe
env:
CGO_ENABLED: "1"
GOOS: windows
GOARCH: amd64
CC: x86_64-w64-mingw32-gcc
build:
name: Build & Unit Test
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- run: go env
- run: go install ./cmd/litestream
- run: go test -v ./...
# long-running-test:
# name: Run Long Running Unit Test
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v2
# - uses: actions/setup-go@v2
# with:
# go-version: '1.20'
# - uses: actions/cache@v2
# with:
# path: ~/go/pkg/mod
# key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }}
# restore-keys: ${{ inputs.os }}-go-
#
# - run: go install ./cmd/litestream
# - run: go test -v -run=TestCmd_Replicate_LongRunning ./integration -long-running-duration 1m
s3-mock-test:
name: Run S3 Mock Tests
runs-on: ubuntu-latest
needs: build
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.12'
# cache: 'pip'
- run: pip install moto[s3,server]
- uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- run: go env
- run: go install ./cmd/litestream
- run: ./etc/s3_mock.py go test -v ./replica_client_test.go -integration s3
s3-integration-test:
name: Run S3 Integration Tests
runs-on: ubuntu-latest
needs: build
if: github.ref == 'refs/heads/main'
concurrency:
group: integration-test-s3
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- run: go env
- run: go install ./cmd/litestream
- run: go test -v ./replica_client_test.go -integration s3
env:
LITESTREAM_S3_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }}
LITESTREAM_S3_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_S3_SECRET_ACCESS_KEY }}
LITESTREAM_S3_REGION: us-east-1
LITESTREAM_S3_BUCKET: integration.litestream.io
gcp-integration-test:
name: Run GCP Integration Tests
runs-on: ubuntu-latest
needs: build
if: github.ref == 'refs/heads/main'
concurrency:
group: integration-test-gcp
steps:
- name: Extract GCP credentials
run: 'echo "$GOOGLE_APPLICATION_CREDENTIALS" > /opt/gcp.json'
shell: bash
env:
GOOGLE_APPLICATION_CREDENTIALS: ${{secrets.GOOGLE_APPLICATION_CREDENTIALS}}
- uses: actions/checkout@v4
- uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- run: go env
- run: go install ./cmd/litestream
- run: go test -v ./replica_client_test.go -integration gcs
env:
GOOGLE_APPLICATION_CREDENTIALS: /opt/gcp.json
LITESTREAM_GCS_BUCKET: integration.litestream.io
abs-integration-test:
name: Run Azure Blob Store Integration Tests
runs-on: ubuntu-latest
needs: build
if: github.ref == 'refs/heads/main'
concurrency:
group: integration-test-abs
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- run: go env
- run: go install ./cmd/litestream
- run: go test -v ./replica_client_test.go -integration abs
env:
LITESTREAM_ABS_ACCOUNT_NAME: ${{ secrets.LITESTREAM_ABS_ACCOUNT_NAME }}
LITESTREAM_ABS_ACCOUNT_KEY: ${{ secrets.LITESTREAM_ABS_ACCOUNT_KEY }}
LITESTREAM_ABS_BUCKET: integration
sftp-integration-test:
name: Run SFTP Integration Tests
runs-on: ubuntu-latest
needs: build
steps:
- name: Prepare OpenSSH server
run: |-
sudo mkdir -p /test/etc/ssh /test/home /run/sshd /test/data/
sudo ssh-keygen -t ed25519 -f /test/etc/ssh/id_ed25519_host -N ""
sudo ssh-keygen -t ed25519 -f /test/etc/ssh/id_ed25519 -N ""
sudo chmod 0600 /test/etc/ssh/id_ed25519_host /test/etc/ssh/id_ed25519
sudo chmod 0644 /test/etc/ssh/id_ed25519_host.pub /test/etc/ssh/id_ed25519.pub
sudo cp /test/etc/ssh/id_ed25519 /test/id_ed25519
sudo chown $USER /test/id_ed25519
sudo tee /test/etc/ssh/sshd_config <<EOF
Port 2222
HostKey /test/etc/ssh/id_ed25519_host
AuthorizedKeysFile /test/etc/ssh/id_ed25519.pub
AuthenticationMethods publickey
Subsystem sftp internal-sftp
UsePAM no
LogLevel DEBUG
EOF
sudo /usr/sbin/sshd -e -f /test/etc/ssh/sshd_config -E /test/debug.log
- name: Test OpenSSH server works with pubkey auth
run: ssh -v -i /test/id_ed25519 -o StrictHostKeyChecking=accept-new -p 2222 root@localhost whoami || (sudo cat /test/debug.log && exit 1)
- uses: actions/checkout@v4
- uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- run: go env
- run: go install ./cmd/litestream
- run: go test -v ./replica_client_test.go -integration sftp
env:
LITESTREAM_SFTP_HOST: "localhost:2222"
LITESTREAM_SFTP_USER: "root"
LITESTREAM_SFTP_KEY_PATH: /test/id_ed25519
LITESTREAM_SFTP_PATH: /test/data

View File

@@ -1,51 +0,0 @@
on:
release:
types:
- published
# pull_request:
# types:
# - opened
# - synchronize
# - reopened
# branches-ignore:
# - "dependabot/**"
name: Release (Docker)
jobs:
docker:
runs-on: ubuntu-latest
env:
PLATFORMS: "linux/amd64,linux/arm64,linux/arm/v7"
VERSION: "${{ github.event_name == 'release' && github.event.release.name || github.sha }}"
steps:
- uses: actions/checkout@v2
- uses: docker/setup-qemu-action@v1
- uses: docker/setup-buildx-action@v1
- uses: docker/login-action@v1
with:
username: benbjohnson
password: ${{ secrets.DOCKERHUB_TOKEN }}
- id: meta
uses: docker/metadata-action@v3
with:
images: litestream/litestream
tags: |
type=ref,event=branch
type=ref,event=pr
type=sha
type=sha,format=long
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
- uses: docker/build-push-action@v2
with:
context: .
push: true
platforms: ${{ env.PLATFORMS }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: |
LITESTREAM_VERSION=${{ env.VERSION }}

View File

@@ -6,7 +6,7 @@ on:
name: release (linux) name: release (linux)
jobs: jobs:
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-18.04
strategy: strategy:
matrix: matrix:
include: include:
@@ -31,7 +31,7 @@ jobs:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- uses: actions/setup-go@v2 - uses: actions/setup-go@v2
with: with:
go-version: '1.21' go-version: '1.16'
- id: release - id: release
uses: bruceadams/get-release@v1.2.2 uses: bruceadams/get-release@v1.2.2
@@ -40,7 +40,7 @@ jobs:
- name: Install cross-compilers - name: Install cross-compilers
run: | run: |
sudo apt-get update sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu gcc-arm-linux-gnueabihf gcc-arm-linux-gnueabi sudo apt-get install -y gcc-aarch64-linux-gnu gcc-arm-linux-gnueabihf gcc-arm-linux-gnueabi
- name: Install nfpm - name: Install nfpm
@@ -54,11 +54,11 @@ jobs:
mkdir -p dist mkdir -p dist
cp etc/litestream.yml etc/litestream.service dist cp etc/litestream.yml etc/litestream.service dist
cat etc/nfpm.yml | LITESTREAM_VERSION=${{ steps.release.outputs.tag_name }} envsubst > dist/nfpm.yml cat etc/nfpm.yml | LITESTREAM_VERSION=${{ steps.release.outputs.tag_name }} envsubst > dist/nfpm.yml
CGO_ENABLED=1 go build -ldflags "-s -w -extldflags "-static" -X 'main.Version=${{ steps.release.outputs.tag_name }}'" -tags osusergo,netgo,sqlite_omit_load_extension -o dist/litestream ./cmd/litestream CGO_ENABLED=1 go build -ldflags "-s -w -X 'main.Version=${{ steps.release.outputs.tag_name }}'" -o dist/litestream ./cmd/litestream
cd dist cd dist
tar -czvf litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.tar.gz litestream tar -czvf litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.tar.gz litestream
../nfpm pkg --config nfpm.yml --packager deb --target litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.deb ../nfpm pkg --config nfpm.yml --packager deb --target litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.deb
- name: Upload release tarball - name: Upload release tarball
uses: actions/upload-release-asset@v1.0.2 uses: actions/upload-release-asset@v1.0.2

View File

@@ -0,0 +1,62 @@
on:
release:
types:
- created
name: release (linux/static)
jobs:
build:
runs-on: ubuntu-18.04
strategy:
matrix:
include:
- arch: amd64
cc: gcc
- arch: arm64
cc: aarch64-linux-gnu-gcc
- arch: arm
arm: 6
cc: arm-linux-gnueabi-gcc
- arch: arm
arm: 7
cc: arm-linux-gnueabihf-gcc
env:
GOOS: linux
GOARCH: ${{ matrix.arch }}
GOARM: ${{ matrix.arm }}
CC: ${{ matrix.cc }}
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: '1.16'
- id: release
uses: bruceadams/get-release@v1.2.2
env:
GITHUB_TOKEN: ${{ github.token }}
- name: Install cross-compilers
run: |
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu gcc-arm-linux-gnueabihf gcc-arm-linux-gnueabi
- name: Build litestream
run: |
rm -rf dist
mkdir -p dist
CGO_ENABLED=1 go build -ldflags "-s -w -extldflags "-static" -X 'main.Version=${{ steps.release.outputs.tag_name }}'" -tags osusergo,netgo,sqlite_omit_load_extension -o dist/litestream ./cmd/litestream
cd dist
tar -czvf litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}-static.tar.gz litestream
- name: Upload release tarball
uses: actions/upload-release-asset@v1.0.2
env:
GITHUB_TOKEN: ${{ github.token }}
with:
upload_url: ${{ steps.release.outputs.upload_url }}
asset_path: ./dist/litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}-static.tar.gz
asset_name: litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}-static.tar.gz
asset_content_type: application/gzip

62
.github/workflows/test.yml vendored Normal file
View File

@@ -0,0 +1,62 @@
on: push
name: test
jobs:
test:
runs-on: ubuntu-18.04
steps:
- uses: actions/setup-go@v2
with:
go-version: '1.16'
- uses: actions/checkout@v2
- uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Extract GCP credentials
run: 'echo "$GOOGLE_APPLICATION_CREDENTIALS" > /opt/gcp.json'
shell: bash
env:
GOOGLE_APPLICATION_CREDENTIALS: ${{secrets.GOOGLE_APPLICATION_CREDENTIALS}}
- name: Extract SSH key
run: 'echo "$LITESTREAM_SFTP_KEY" > /opt/id_ed25519'
shell: bash
env:
LITESTREAM_SFTP_KEY: ${{secrets.LITESTREAM_SFTP_KEY}}
- name: Run unit tests
run: go test -v ./...
- name: Run aws s3 tests
run: go test -v -run=TestReplicaClient . -integration s3
env:
LITESTREAM_S3_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }}
LITESTREAM_S3_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_S3_SECRET_ACCESS_KEY }}
LITESTREAM_S3_REGION: ${{ secrets.LITESTREAM_S3_REGION }}
LITESTREAM_S3_BUCKET: ${{ secrets.LITESTREAM_S3_BUCKET }}
- name: Run google cloud storage (gcs) tests
run: go test -v -run=TestReplicaClient . -integration gcs
env:
GOOGLE_APPLICATION_CREDENTIALS: /opt/gcp.json
LITESTREAM_GCS_BUCKET: ${{ secrets.LITESTREAM_GCS_BUCKET }}
- name: Run azure blob storage (abs) tests
run: go test -v -run=TestReplicaClient . -integration abs
env:
LITESTREAM_ABS_ACCOUNT_NAME: ${{ secrets.LITESTREAM_ABS_ACCOUNT_NAME }}
LITESTREAM_ABS_ACCOUNT_KEY: ${{ secrets.LITESTREAM_ABS_ACCOUNT_KEY }}
LITESTREAM_ABS_BUCKET: ${{ secrets.LITESTREAM_ABS_BUCKET }}
- name: Run sftp tests
run: go test -v -run=TestReplicaClient . -integration sftp
env:
LITESTREAM_SFTP_HOST: ${{ secrets.LITESTREAM_SFTP_HOST }}
LITESTREAM_SFTP_USER: ${{ secrets.LITESTREAM_SFTP_USER }}
LITESTREAM_SFTP_KEY_PATH: /opt/id_ed25519
LITESTREAM_SFTP_PATH: ${{ secrets.LITESTREAM_SFTP_PATH }}

View File

@@ -1,20 +0,0 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.1.0
hooks:
- id: trailing-whitespace
exclude_types: [markdown]
- id: end-of-file-fixer
- id: check-yaml
- id: check-added-large-files
- repo: https://github.com/tekwizely/pre-commit-golang
rev: v1.0.0-beta.5
hooks:
- id: go-imports-repo
args:
- "-local"
- "github.com/benbjohnson/litestrem"
- "-w"
- id: go-vet-repo-mod
- id: go-staticcheck-repo-mod

View File

@@ -1,16 +1,12 @@
FROM golang:1.21.3 as builder FROM golang:1.16 as builder
WORKDIR /src/litestream WORKDIR /src/litestream
COPY . . COPY . .
ARG LITESTREAM_VERSION=latest ARG LITESTREAM_VERSION=latest
RUN --mount=type=cache,target=/root/.cache/go-build \ RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg \ --mount=type=cache,target=/go/pkg \
go build -ldflags "-s -w -X 'main.Version=${LITESTREAM_VERSION}' -extldflags '-static'" -tags osusergo,netgo,sqlite_omit_load_extension -o /usr/local/bin/litestream ./cmd/litestream go build -ldflags "-s -w -X 'main.Version=${LITESTREAM_VERSION}' -extldflags '-static'" -tags osusergo,netgo,sqlite_omit_load_extension -o /usr/local/bin/litestream ./cmd/litestream
FROM alpine
FROM alpine:3.17.2
COPY --from=builder /usr/local/bin/litestream /usr/local/bin/litestream COPY --from=builder /usr/local/bin/litestream /usr/local/bin/litestream
ENTRYPOINT ["/usr/local/bin/litestream"] ENTRYPOINT ["/usr/local/bin/litestream"]
CMD [] CMD []

View File

@@ -199,4 +199,4 @@
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.

View File

@@ -20,17 +20,11 @@ ifndef LITESTREAM_VERSION
$(error LITESTREAM_VERSION is undefined) $(error LITESTREAM_VERSION is undefined)
endif endif
mkdir -p dist mkdir -p dist
go build -v -ldflags "-s -w -X 'main.Version=${LITESTREAM_VERSION}'" -o dist/litestream ./cmd/litestream
GOOS=darwin GOARCH=amd64 CC="gcc -target amd64-apple-macos11" CGO_ENABLED=1 go build -v -ldflags "-s -w -X 'main.Version=${LITESTREAM_VERSION}'" -o dist/litestream ./cmd/litestream
gon etc/gon.hcl gon etc/gon.hcl
mv dist/litestream.zip dist/litestream-${LITESTREAM_VERSION}-darwin-amd64.zip mv dist/litestream.zip dist/litestream-${LITESTREAM_VERSION}-darwin-amd64.zip
openssl dgst -sha256 dist/litestream-${LITESTREAM_VERSION}-darwin-amd64.zip openssl dgst -sha256 dist/litestream-${LITESTREAM_VERSION}-darwin-amd64.zip
GOOS=darwin GOARCH=arm64 CC="gcc -target arm64-apple-macos11" CGO_ENABLED=1 go build -v -ldflags "-s -w -X 'main.Version=${LITESTREAM_VERSION}'" -o dist/litestream ./cmd/litestream
gon etc/gon.hcl
mv dist/litestream.zip dist/litestream-${LITESTREAM_VERSION}-darwin-arm64.zip
openssl dgst -sha256 dist/litestream-${LITESTREAM_VERSION}-darwin-arm64.zip
clean: clean:
rm -rf dist rm -rf dist

View File

@@ -6,7 +6,7 @@ Litestream
![test](https://github.com/benbjohnson/litestream/workflows/test/badge.svg) ![test](https://github.com/benbjohnson/litestream/workflows/test/badge.svg)
========== ==========
Litestream is a standalone disaster recovery tool for SQLite. It runs as a Litestream is a standalone streaming replication tool for SQLite. It runs as a
background process and safely replicates changes incrementally to another file background process and safely replicates changes incrementally to another file
or S3. Litestream only communicates with SQLite through the SQLite API so it or S3. Litestream only communicates with SQLite through the SQLite API so it
will not corrupt your database. will not corrupt your database.
@@ -33,28 +33,35 @@ energy into the project to help make it better:
- Thanks to [Cory LaNou](https://twitter.com/corylanou) for giving early feedback and testing when Litestream was still pre-release. - Thanks to [Cory LaNou](https://twitter.com/corylanou) for giving early feedback and testing when Litestream was still pre-release.
- Thanks to [Michael Lynch](https://github.com/mtlynch) for digging into issues and contributing to the documentation. - Thanks to [Michael Lynch](https://github.com/mtlynch) for digging into issues and contributing to the documentation.
- Thanks to [Kurt Mackey](https://twitter.com/mrkurt) for feedback and testing. - Thanks to [Kurt Mackey](https://twitter.com/mrkurt) for feedback and testing. Also, thanks to fly.io for providing testing resources.
- Thanks to [Sam Weston](https://twitter.com/cablespaghetti) for figuring out how to run Litestream on Kubernetes and writing up the docs for it. - Thanks to [Sam Weston](https://twitter.com/cablespaghetti) for figuring out how to run Litestream on Kubernetes and writing up the docs for it.
- Thanks to [Rafael](https://github.com/netstx) & [Jungle Boogie](https://github.com/jungle-boogie) for helping to get OpenBSD release builds working. - Thanks to [Rafael](https://github.com/netstx) & [Jungle Boogie](https://github.com/jungle-boogie) for helping to get OpenBSD release builds working.
- Thanks to [Simon Gottschlag](https://github.com/simongottschlag), [Marin](https://github.com/supermarin),[Victor Björklund](https://github.com/victorbjorklund), [Jonathan Beri](https://twitter.com/beriberikix) [Yuri](https://github.com/yurivish), [Nathan Probst](https://github.com/nprbst), [Yann Coleu](https://github.com/yanc0), and [Nicholas Grilly](https://twitter.com/ngrilly) for frequent feedback, testing, & support. - Thanks to [Simon Gottschlag](https://github.com/simongottschlag), [Marin](https://github.com/supermarin),[Victor Björklund](https://github.com/victorbjorklund), [Jonathan Beri](https://twitter.com/beriberikix) [Yuri](https://github.com/yurivish), [Nathan Probst](https://github.com/nprbst), [Yann Coleuu](https://github.com/yanc0), and [Nicholas Grilly](https://twitter.com/ngrilly) for frequent feedback, testing, & support.
Huge thanks to fly.io for their support and for contributing credits for testing and development!
## Contribution Policy
Initially, Litestream was closed to outside contributions. The goal was to
reduce burnout by limiting the maintenance overhead of reviewing and validating
third-party code. However, this policy is overly broad and has prevented small,
easily testable patches from being contributed.
Litestream is now open to code contributions for bug fixes only. Features carry ## Open-source, not open-contribution
a long-term maintenance burden so they will not be accepted at this time.
Please [submit an issue][new-issue] if you have a feature you'd like to
request.
If you find mistakes in the documentation, please submit a fix to the [Similar to SQLite](https://www.sqlite.org/copyright.html), Litestream is open
[documentation repository][docs]. source but closed to code contributions. This keeps the code base free of
proprietary or licensed code but it also helps me continue to maintain and build
Litestream.
[new-issue]: https://github.com/benbjohnson/litestream/issues/new As the author of [BoltDB](https://github.com/boltdb/bolt), I found that
accepting and maintaining third party patches contributed to my burn out and
I eventually archived the project. Writing databases & low-level replication
tools involves nuance and simple one line changes can have profound and
unexpected changes in correctness and performance. Small contributions
typically required hours of my time to properly test and validate them.
I am grateful for community involvement, bug reports, & feature requests. I do
not wish to come off as anything but welcoming, however, I've
made the decision to keep this project closed to contributions for my own
mental health and long term viability of the project.
The [documentation repository][docs] is MIT licensed and pull requests are welcome there.
[releases]: https://github.com/benbjohnson/litestream/releases
[docs]: https://github.com/benbjohnson/litestream.io [docs]: https://github.com/benbjohnson/litestream.io

View File

@@ -102,7 +102,7 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) {
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
resp, err := c.containerURL.ListBlobsHierarchySegment(ctx, marker, "/", azblob.ListBlobsSegmentOptions{ resp, err := c.containerURL.ListBlobsHierarchySegment(ctx, marker, "/", azblob.ListBlobsSegmentOptions{
Prefix: litestream.GenerationsPath(c.Path) + "/", Prefix: path.Join(c.Path, "generations") + "/",
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@@ -125,18 +125,17 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) {
func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error { func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error {
if err := c.Init(ctx); err != nil { if err := c.Init(ctx); err != nil {
return err return err
} else if generation == "" {
return fmt.Errorf("generation required")
} }
dir, err := litestream.GenerationPath(c.Path, generation) prefix := path.Join(c.Path, "generations", generation) + "/"
if err != nil {
return fmt.Errorf("cannot determine generation path: %w", err)
}
var marker azblob.Marker var marker azblob.Marker
for marker.NotDone() { for marker.NotDone() {
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
resp, err := c.containerURL.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{Prefix: dir + "/"}) resp, err := c.containerURL.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{Prefix: prefix})
if err != nil { if err != nil {
return err return err
} }
@@ -171,12 +170,11 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (lites
func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) { func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) {
if err := c.Init(ctx); err != nil { if err := c.Init(ctx); err != nil {
return info, err return info, err
} else if generation == "" {
return info, fmt.Errorf("generation required")
} }
key, err := litestream.SnapshotPath(c.Path, generation, index) key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4")
if err != nil {
return info, fmt.Errorf("cannot determine snapshot path: %w", err)
}
startTime := time.Now() startTime := time.Now()
rc := internal.NewReadCounter(rd) rc := internal.NewReadCounter(rd)
@@ -206,12 +204,11 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in
func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) { func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) {
if err := c.Init(ctx); err != nil { if err := c.Init(ctx); err != nil {
return nil, err return nil, err
} else if generation == "" {
return nil, fmt.Errorf("generation required")
} }
key, err := litestream.SnapshotPath(c.Path, generation, index) key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4")
if err != nil {
return nil, fmt.Errorf("cannot determine snapshot path: %w", err)
}
blobURL := c.containerURL.NewBlobURL(key) blobURL := c.containerURL.NewBlobURL(key)
resp, err := blobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) resp, err := blobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
@@ -231,12 +228,11 @@ func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, i
func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error { func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error {
if err := c.Init(ctx); err != nil { if err := c.Init(ctx); err != nil {
return err return err
} else if generation == "" {
return fmt.Errorf("generation required")
} }
key, err := litestream.SnapshotPath(c.Path, generation, index) key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4")
if err != nil {
return fmt.Errorf("cannot determine snapshot path: %w", err)
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc() internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
@@ -261,12 +257,11 @@ func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (lit
func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) { func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) {
if err := c.Init(ctx); err != nil { if err := c.Init(ctx); err != nil {
return info, err return info, err
} else if pos.Generation == "" {
return info, fmt.Errorf("generation required")
} }
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4")
if err != nil {
return info, fmt.Errorf("cannot determine wal segment path: %w", err)
}
startTime := time.Now() startTime := time.Now()
rc := internal.NewReadCounter(rd) rc := internal.NewReadCounter(rd)
@@ -296,12 +291,11 @@ func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos,
func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) { func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) {
if err := c.Init(ctx); err != nil { if err := c.Init(ctx); err != nil {
return nil, err return nil, err
} else if pos.Generation == "" {
return nil, fmt.Errorf("generation required")
} }
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4")
if err != nil {
return nil, fmt.Errorf("cannot determine wal segment path: %w", err)
}
blobURL := c.containerURL.NewBlobURL(key) blobURL := c.containerURL.NewBlobURL(key)
resp, err := blobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) resp, err := blobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
@@ -324,11 +318,12 @@ func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Po
} }
for _, pos := range a { for _, pos := range a {
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) if pos.Generation == "" {
if err != nil { return fmt.Errorf("generation required")
return fmt.Errorf("cannot determine wal segment path: %w", err)
} }
key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4")
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc() internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
blobURL := c.containerURL.NewBlobURL(key) blobURL := c.containerURL.NewBlobURL(key)
@@ -372,24 +367,24 @@ func newSnapshotIterator(ctx context.Context, generation string, client *Replica
func (itr *snapshotIterator) fetch() error { func (itr *snapshotIterator) fetch() error {
defer close(itr.ch) defer close(itr.ch)
dir, err := litestream.SnapshotsPath(itr.client.Path, itr.generation) if itr.generation == "" {
if err != nil { return fmt.Errorf("generation required")
return fmt.Errorf("cannot determine snapshots path: %w", err)
} }
prefix := path.Join(itr.client.Path, "generations", itr.generation) + "/"
var marker azblob.Marker var marker azblob.Marker
for marker.NotDone() { for marker.NotDone() {
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
resp, err := itr.client.containerURL.ListBlobsFlatSegment(itr.ctx, marker, azblob.ListBlobsSegmentOptions{Prefix: dir + "/"}) resp, err := itr.client.containerURL.ListBlobsFlatSegment(itr.ctx, marker, azblob.ListBlobsSegmentOptions{Prefix: prefix})
if err != nil { if err != nil {
return err return err
} }
marker = resp.NextMarker marker = resp.NextMarker
for _, item := range resp.Segment.BlobItems { for _, item := range resp.Segment.BlobItems {
key := path.Base(item.Name) index, err := internal.ParseSnapshotPath(path.Base(item.Name))
index, err := litestream.ParseSnapshotPath(key)
if err != nil { if err != nil {
continue continue
} }
@@ -478,24 +473,24 @@ func newWALSegmentIterator(ctx context.Context, generation string, client *Repli
func (itr *walSegmentIterator) fetch() error { func (itr *walSegmentIterator) fetch() error {
defer close(itr.ch) defer close(itr.ch)
dir, err := litestream.WALPath(itr.client.Path, itr.generation) if itr.generation == "" {
if err != nil { return fmt.Errorf("generation required")
return fmt.Errorf("cannot determine wal path: %w", err)
} }
prefix := path.Join(itr.client.Path, "generations", itr.generation, "wal")
var marker azblob.Marker var marker azblob.Marker
for marker.NotDone() { for marker.NotDone() {
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
resp, err := itr.client.containerURL.ListBlobsFlatSegment(itr.ctx, marker, azblob.ListBlobsSegmentOptions{Prefix: dir + "/"}) resp, err := itr.client.containerURL.ListBlobsFlatSegment(itr.ctx, marker, azblob.ListBlobsSegmentOptions{Prefix: prefix})
if err != nil { if err != nil {
return err return err
} }
marker = resp.NextMarker marker = resp.NextMarker
for _, item := range resp.Segment.BlobItems { for _, item := range resp.Segment.BlobItems {
key := path.Base(item.Name) key := strings.TrimPrefix(item.Name, prefix+"/")
index, offset, err := litestream.ParseWALSegmentPath(key) index, offset, err := internal.ParseWALSegmentPath(key)
if err != nil { if err != nil {
continue continue
} }

View File

@@ -4,8 +4,8 @@ import (
"context" "context"
"flag" "flag"
"fmt" "fmt"
"log"
"os" "os"
"sort"
"text/tabwriter" "text/tabwriter"
"time" "time"
@@ -87,17 +87,15 @@ func (c *GenerationsCommand) Run(ctx context.Context, args []string) (err error)
for _, r := range replicas { for _, r := range replicas {
generations, err := r.Client.Generations(ctx) generations, err := r.Client.Generations(ctx)
if err != nil { if err != nil {
r.Logger().Error("cannot list generations", "error", err) log.Printf("%s: cannot list generations: %s", r.Name(), err)
continue continue
} }
sort.Strings(generations)
// Iterate over each generation for the replica. // Iterate over each generation for the replica.
for _, generation := range generations { for _, generation := range generations {
createdAt, updatedAt, err := r.GenerationTimeBounds(ctx, generation) createdAt, updatedAt, err := r.GenerationTimeBounds(ctx, generation)
if err != nil { if err != nil {
r.Logger().Error("cannot determine generation time bounds", "error", err) log.Printf("%s: cannot determine generation time bounds: %s", r.Name(), err)
continue continue
} }
@@ -124,7 +122,7 @@ cover.
Usage: Usage:
litestream generations [arguments] DB_PATH litestream generations [arguments] DB_PATH
litestream generations [arguments] REPLICA_URL litestream generations [arguments] REPLICA_URL
Arguments: Arguments:

View File

@@ -5,9 +5,11 @@ import (
"errors" "errors"
"flag" "flag"
"fmt" "fmt"
"log/slog" "io/ioutil"
"log"
"net/url" "net/url"
"os" "os"
"os/signal"
"os/user" "os/user"
"path" "path"
"path/filepath" "path/filepath"
@@ -16,7 +18,6 @@ import (
"strings" "strings"
"time" "time"
"filippo.io/age"
"github.com/benbjohnson/litestream" "github.com/benbjohnson/litestream"
"github.com/benbjohnson/litestream/abs" "github.com/benbjohnson/litestream/abs"
"github.com/benbjohnson/litestream/file" "github.com/benbjohnson/litestream/file"
@@ -36,11 +37,13 @@ var (
var errStop = errors.New("stop") var errStop = errors.New("stop")
func main() { func main() {
log.SetFlags(0)
m := NewMain() m := NewMain()
if err := m.Run(context.Background(), os.Args[1:]); err == flag.ErrHelp || err == errStop { if err := m.Run(context.Background(), os.Args[1:]); err == flag.ErrHelp || err == errStop {
os.Exit(1) os.Exit(1)
} else if err != nil { } else if err != nil {
slog.Error("failed to run", "error", err) log.Println(err)
os.Exit(1) os.Exit(1)
} }
} }
@@ -83,26 +86,32 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) {
} }
// Setup signal handler. // Setup signal handler.
signalCh := signalChan() ctx, cancel := context.WithCancel(ctx)
signalCh := make(chan os.Signal, 1)
signal.Notify(signalCh, notifySignals...)
if err := c.Run(); err != nil { if err := c.Run(ctx); err != nil {
return err return err
} }
// Wait for signal to stop program. // Wait for signal to stop program.
select { select {
case <-ctx.Done():
fmt.Println("context done, litestream shutting down")
case err = <-c.execCh: case err = <-c.execCh:
slog.Info("subprocess exited, litestream shutting down") cancel()
fmt.Println("subprocess exited, litestream shutting down")
case sig := <-signalCh: case sig := <-signalCh:
slog.Info("signal received, litestream shutting down") cancel()
fmt.Println("signal received, litestream shutting down")
if c.cmd != nil { if c.cmd != nil {
slog.Info("sending signal to exec process") fmt.Println("sending signal to exec process")
if err := c.cmd.Process.Signal(sig); err != nil { if err := c.cmd.Process.Signal(sig); err != nil {
return fmt.Errorf("cannot signal exec process: %w", err) return fmt.Errorf("cannot signal exec process: %w", err)
} }
slog.Info("waiting for exec process to close") fmt.Println("waiting for exec process to close")
if err := <-c.execCh; err != nil && !strings.HasPrefix(err.Error(), "signal:") { if err := <-c.execCh; err != nil && !strings.HasPrefix(err.Error(), "signal:") {
return fmt.Errorf("cannot wait for exec process: %w", err) return fmt.Errorf("cannot wait for exec process: %w", err)
} }
@@ -113,7 +122,7 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) {
if e := c.Close(); e != nil && err == nil { if e := c.Close(); e != nil && err == nil {
err = e err = e
} }
slog.Info("litestream shut down") fmt.Println("litestream shut down")
return err return err
case "restore": case "restore":
@@ -169,16 +178,6 @@ type Config struct {
// Global S3 settings // Global S3 settings
AccessKeyID string `yaml:"access-key-id"` AccessKeyID string `yaml:"access-key-id"`
SecretAccessKey string `yaml:"secret-access-key"` SecretAccessKey string `yaml:"secret-access-key"`
// Logging
Logging LoggingConfig `yaml:"logging"`
}
// LoggingConfig configures logging.
type LoggingConfig struct {
Level string `yaml:"level"`
Type string `yaml:"type"`
Stderr bool `yaml:"stderr"`
} }
// propagateGlobalSettings copies global S3 settings to replica configs. // propagateGlobalSettings copies global S3 settings to replica configs.
@@ -222,7 +221,7 @@ func ReadConfigFile(filename string, expandEnv bool) (_ Config, err error) {
} }
// Read configuration. // Read configuration.
buf, err := os.ReadFile(filename) buf, err := ioutil.ReadFile(filename)
if os.IsNotExist(err) { if os.IsNotExist(err) {
return config, fmt.Errorf("config file not found: %s", filename) return config, fmt.Errorf("config file not found: %s", filename)
} else if err != nil { } else if err != nil {
@@ -248,43 +247,12 @@ func ReadConfigFile(filename string, expandEnv bool) (_ Config, err error) {
// Propage settings from global config to replica configs. // Propage settings from global config to replica configs.
config.propagateGlobalSettings() config.propagateGlobalSettings()
// Configure logging.
logOutput := os.Stdout
if config.Logging.Stderr {
logOutput = os.Stderr
}
logOptions := slog.HandlerOptions{
Level: slog.LevelInfo,
}
switch strings.ToUpper(config.Logging.Level) {
case "DEBUG":
logOptions.Level = slog.LevelDebug
case "WARN", "WARNING":
logOptions.Level = slog.LevelWarn
case "ERROR":
logOptions.Level = slog.LevelError
}
var logHandler slog.Handler
switch config.Logging.Type {
case "json":
logHandler = slog.NewJSONHandler(logOutput, &logOptions)
case "text", "":
logHandler = slog.NewTextHandler(logOutput, &logOptions)
}
// Set global default logger.
slog.SetDefault(slog.New(logHandler))
return config, nil return config, nil
} }
// DBConfig represents the configuration for a single database. // DBConfig represents the configuration for a single database.
type DBConfig struct { type DBConfig struct {
Path string `yaml:"path"` Path string `yaml:"path"`
MetaPath *string `yaml:"meta-path"`
MonitorInterval *time.Duration `yaml:"monitor-interval"` MonitorInterval *time.Duration `yaml:"monitor-interval"`
CheckpointInterval *time.Duration `yaml:"checkpoint-interval"` CheckpointInterval *time.Duration `yaml:"checkpoint-interval"`
MinCheckpointPageN *int `yaml:"min-checkpoint-page-count"` MinCheckpointPageN *int `yaml:"min-checkpoint-page-count"`
@@ -304,9 +272,6 @@ func NewDBFromConfig(dbc *DBConfig) (*litestream.DB, error) {
db := litestream.NewDB(path) db := litestream.NewDB(path)
// Override default database settings if specified in configuration. // Override default database settings if specified in configuration.
if dbc.MetaPath != nil {
db.SetMetaPath(*dbc.MetaPath)
}
if dbc.MonitorInterval != nil { if dbc.MonitorInterval != nil {
db.MonitorInterval = *dbc.MonitorInterval db.MonitorInterval = *dbc.MonitorInterval
} }
@@ -362,12 +327,6 @@ type ReplicaConfig struct {
User string `yaml:"user"` User string `yaml:"user"`
Password string `yaml:"password"` Password string `yaml:"password"`
KeyPath string `yaml:"key-path"` KeyPath string `yaml:"key-path"`
// Encryption identities and recipients
Age struct {
Identities []string `yaml:"identities"`
Recipients []string `yaml:"recipients"`
} `yaml:"age"`
} }
// NewReplicaFromConfig instantiates a replica for a DB based on a config. // NewReplicaFromConfig instantiates a replica for a DB based on a config.
@@ -394,22 +353,6 @@ func NewReplicaFromConfig(c *ReplicaConfig, db *litestream.DB) (_ *litestream.Re
if v := c.ValidationInterval; v != nil { if v := c.ValidationInterval; v != nil {
r.ValidationInterval = *v r.ValidationInterval = *v
} }
for _, str := range c.Age.Identities {
identities, err := age.ParseIdentities(strings.NewReader(str))
if err != nil {
return nil, err
}
r.AgeIdentities = append(r.AgeIdentities, identities...)
}
for _, str := range c.Age.Recipients {
recipients, err := age.ParseRecipients(strings.NewReader(str))
if err != nil {
return nil, err
}
r.AgeRecipients = append(r.AgeRecipients, recipients...)
}
// Build and set client on replica. // Build and set client on replica.
switch c.ReplicaType() { switch c.ReplicaType() {

View File

@@ -1,11 +1,10 @@
//go:build !windows // +build !windows
package main package main
import ( import (
"context" "context"
"os" "os"
"os/signal"
"syscall" "syscall"
) )
@@ -19,8 +18,4 @@ func runWindowsService(ctx context.Context) error {
panic("cannot run windows service as unix process") panic("cannot run windows service as unix process")
} }
func signalChan() <-chan os.Signal { var notifySignals = []os.Signal{syscall.SIGINT, syscall.SIGTERM}
ch := make(chan os.Signal, 2)
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
return ch
}

View File

@@ -1,21 +1,28 @@
package main_test package main_test
import ( import (
"io/ioutil"
"log"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
"github.com/benbjohnson/litestream"
main "github.com/benbjohnson/litestream/cmd/litestream" main "github.com/benbjohnson/litestream/cmd/litestream"
"github.com/benbjohnson/litestream/file" "github.com/benbjohnson/litestream/file"
"github.com/benbjohnson/litestream/gcs" "github.com/benbjohnson/litestream/gcs"
"github.com/benbjohnson/litestream/s3" "github.com/benbjohnson/litestream/s3"
) )
func init() {
litestream.LogFlags = log.Lmsgprefix | log.Ldate | log.Ltime | log.Lmicroseconds | log.LUTC | log.Lshortfile
}
func TestReadConfigFile(t *testing.T) { func TestReadConfigFile(t *testing.T) {
// Ensure global AWS settings are propagated down to replica configurations. // Ensure global AWS settings are propagated down to replica configurations.
t.Run("PropagateGlobalSettings", func(t *testing.T) { t.Run("PropagateGlobalSettings", func(t *testing.T) {
filename := filepath.Join(t.TempDir(), "litestream.yml") filename := filepath.Join(t.TempDir(), "litestream.yml")
if err := os.WriteFile(filename, []byte(` if err := ioutil.WriteFile(filename, []byte(`
access-key-id: XXX access-key-id: XXX
secret-access-key: YYY secret-access-key: YYY
@@ -47,7 +54,7 @@ dbs:
os.Setenv("LITESTREAM_TEST_1872363", "s3://foo/bar") os.Setenv("LITESTREAM_TEST_1872363", "s3://foo/bar")
filename := filepath.Join(t.TempDir(), "litestream.yml") filename := filepath.Join(t.TempDir(), "litestream.yml")
if err := os.WriteFile(filename, []byte(` if err := ioutil.WriteFile(filename, []byte(`
dbs: dbs:
- path: $LITESTREAM_TEST_0129380 - path: $LITESTREAM_TEST_0129380
replicas: replicas:
@@ -74,7 +81,7 @@ dbs:
os.Setenv("LITESTREAM_TEST_9847533", "s3://foo/bar") os.Setenv("LITESTREAM_TEST_9847533", "s3://foo/bar")
filename := filepath.Join(t.TempDir(), "litestream.yml") filename := filepath.Join(t.TempDir(), "litestream.yml")
if err := os.WriteFile(filename, []byte(` if err := ioutil.WriteFile(filename, []byte(`
dbs: dbs:
- path: /path/to/db - path: /path/to/db
replicas: replicas:

View File

@@ -1,13 +1,12 @@
//go:build windows // +build windows
package main package main
import ( import (
"context" "context"
"io" "io"
"log/slog" "log"
"os" "os"
"os/signal"
"golang.org/x/sys/windows" "golang.org/x/sys/windows"
"golang.org/x/sys/windows/svc" "golang.org/x/sys/windows/svc"
@@ -36,16 +35,16 @@ func runWindowsService(ctx context.Context) error {
defer elog.Close() defer elog.Close()
// Set eventlog as log writer while running. // Set eventlog as log writer while running.
slog.SetDefault(slog.New(slog.NewTextHandler((*eventlogWriter)(elog), nil))) log.SetOutput((*eventlogWriter)(elog))
defer slog.SetDefault(slog.New(slog.NewTextHandler(os.Stderr, nil))) defer log.SetOutput(os.Stderr)
slog.Info("Litestream service starting") log.Print("Litestream service starting")
if err := svc.Run(serviceName, &windowsService{ctx: ctx}); err != nil { if err := svc.Run(serviceName, &windowsService{ctx: ctx}); err != nil {
return errStop return errStop
} }
slog.Info("Litestream service stopped") log.Print("Litestream service stopped")
return nil return nil
} }
@@ -63,13 +62,13 @@ func (s *windowsService) Execute(args []string, r <-chan svc.ChangeRequest, stat
// Instantiate replication command and load configuration. // Instantiate replication command and load configuration.
c := NewReplicateCommand() c := NewReplicateCommand()
if c.Config, err = ReadConfigFile(DefaultConfigPath(), true); err != nil { if c.Config, err = ReadConfigFile(DefaultConfigPath(), true); err != nil {
slog.Error("cannot load configuration", "error", err) log.Printf("cannot load configuration: %s", err)
return true, 1 return true, 1
} }
// Execute replication command. // Execute replication command.
if err := c.Run(); err != nil { if err := c.Run(s.ctx); err != nil {
slog.Error("cannot replicate", "error", err) log.Printf("cannot replicate: %s", err)
statusCh <- svc.Status{State: svc.StopPending} statusCh <- svc.Status{State: svc.StopPending}
return true, 2 return true, 2
} }
@@ -88,7 +87,7 @@ func (s *windowsService) Execute(args []string, r <-chan svc.ChangeRequest, stat
case svc.Interrogate: case svc.Interrogate:
statusCh <- req.CurrentStatus statusCh <- req.CurrentStatus
default: default:
slog.Error("Litestream service received unexpected change request", "cmd", req.Cmd) log.Printf("Litestream service received unexpected change request cmd: %d", req.Cmd)
} }
} }
} }
@@ -105,8 +104,4 @@ func (w *eventlogWriter) Write(p []byte) (n int, err error) {
return 0, elog.Info(1, string(p)) return 0, elog.Info(1, string(p))
} }
func signalChan() <-chan os.Signal { var notifySignals = []os.Signal{os.Interrupt}
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt)
return ch
}

View File

@@ -4,7 +4,7 @@ import (
"context" "context"
"flag" "flag"
"fmt" "fmt"
"log/slog" "log"
"net" "net"
"net/http" "net/http"
_ "net/http/pprof" _ "net/http/pprof"
@@ -83,13 +83,13 @@ func (c *ReplicateCommand) ParseFlags(ctx context.Context, args []string) (err e
} }
// Run loads all databases specified in the configuration. // Run loads all databases specified in the configuration.
func (c *ReplicateCommand) Run() (err error) { func (c *ReplicateCommand) Run(ctx context.Context) (err error) {
// Display version information. // Display version information.
slog.Info("litestream", "version", Version) log.Printf("litestream %s", Version)
// Setup databases. // Setup databases.
if len(c.Config.DBs) == 0 { if len(c.Config.DBs) == 0 {
slog.Error("no databases specified in configuration") log.Println("no databases specified in configuration")
} }
for _, dbConfig := range c.Config.DBs { for _, dbConfig := range c.Config.DBs {
@@ -107,22 +107,21 @@ func (c *ReplicateCommand) Run() (err error) {
// Notify user that initialization is done. // Notify user that initialization is done.
for _, db := range c.DBs { for _, db := range c.DBs {
slog.Info("initialized db", "path", db.Path()) log.Printf("initialized db: %s", db.Path())
for _, r := range db.Replicas { for _, r := range db.Replicas {
slog := slog.With("name", r.Name(), "type", r.Client.Type(), "sync-interval", r.SyncInterval)
switch client := r.Client.(type) { switch client := r.Client.(type) {
case *file.ReplicaClient: case *file.ReplicaClient:
slog.Info("replicating to", "path", client.Path()) log.Printf("replicating to: name=%q type=%q path=%q", r.Name(), client.Type(), client.Path())
case *s3.ReplicaClient: case *s3.ReplicaClient:
slog.Info("replicating to", "bucket", client.Bucket, "path", client.Path, "region", client.Region, "endpoint", client.Endpoint) log.Printf("replicating to: name=%q type=%q bucket=%q path=%q region=%q endpoint=%q sync-interval=%s", r.Name(), client.Type(), client.Bucket, client.Path, client.Region, client.Endpoint, r.SyncInterval)
case *gcs.ReplicaClient: case *gcs.ReplicaClient:
slog.Info("replicating to", "bucket", client.Bucket, "path", client.Path) log.Printf("replicating to: name=%q type=%q bucket=%q path=%q sync-interval=%s", r.Name(), client.Type(), client.Bucket, client.Path, r.SyncInterval)
case *abs.ReplicaClient: case *abs.ReplicaClient:
slog.Info("replicating to", "bucket", client.Bucket, "path", client.Path, "endpoint", client.Endpoint) log.Printf("replicating to: name=%q type=%q bucket=%q path=%q endpoint=%q sync-interval=%s", r.Name(), client.Type(), client.Bucket, client.Path, client.Endpoint, r.SyncInterval)
case *sftp.ReplicaClient: case *sftp.ReplicaClient:
slog.Info("replicating to", "host", client.Host, "user", client.User, "path", client.Path) log.Printf("replicating to: name=%q type=%q host=%q user=%q path=%q sync-interval=%s", r.Name(), client.Type(), client.Host, client.User, client.Path, r.SyncInterval)
default: default:
slog.Info("replicating to") log.Printf("replicating to: name=%q type=%q", r.Name(), client.Type())
} }
} }
} }
@@ -136,11 +135,11 @@ func (c *ReplicateCommand) Run() (err error) {
hostport = net.JoinHostPort("localhost", port) hostport = net.JoinHostPort("localhost", port)
} }
slog.Info("serving metrics on", "url", fmt.Sprintf("http://%s/metrics", hostport)) log.Printf("serving metrics on http://%s/metrics", hostport)
go func() { go func() {
http.Handle("/metrics", promhttp.Handler()) http.Handle("/metrics", promhttp.Handler())
if err := http.ListenAndServe(c.Config.Addr, nil); err != nil { if err := http.ListenAndServe(c.Config.Addr, nil); err != nil {
slog.Error("cannot start metrics server", "error", err) log.Printf("cannot start metrics server: %s", err)
} }
}() }()
} }
@@ -152,7 +151,7 @@ func (c *ReplicateCommand) Run() (err error) {
return fmt.Errorf("cannot parse exec command: %w", err) return fmt.Errorf("cannot parse exec command: %w", err)
} }
c.cmd = exec.Command(execArgs[0], execArgs[1:]...) c.cmd = exec.CommandContext(ctx, execArgs[0], execArgs[1:]...)
c.cmd.Env = os.Environ() c.cmd.Env = os.Environ()
c.cmd.Stdout = os.Stdout c.cmd.Stdout = os.Stdout
c.cmd.Stderr = os.Stderr c.cmd.Stderr = os.Stderr
@@ -168,8 +167,8 @@ func (c *ReplicateCommand) Run() (err error) {
// Close closes all open databases. // Close closes all open databases.
func (c *ReplicateCommand) Close() (err error) { func (c *ReplicateCommand) Close() (err error) {
for _, db := range c.DBs { for _, db := range c.DBs {
if e := db.Close(context.Background()); e != nil { if e := db.SoftClose(); e != nil {
db.Logger.Error("error closing db", "error", e) log.Printf("error closing db: path=%s err=%s", db.Path(), e)
if err == nil { if err == nil {
err = e err = e
} }
@@ -181,7 +180,7 @@ func (c *ReplicateCommand) Close() (err error) {
// Usage prints the help screen to STDOUT. // Usage prints the help screen to STDOUT.
func (c *ReplicateCommand) Usage() { func (c *ReplicateCommand) Usage() {
fmt.Printf(` fmt.Printf(`
The replicate command starts a server to monitor & replicate databases. The replicate command starts a server to monitor & replicate databases.
You can specify your database & replicas in a configuration file or you can You can specify your database & replicas in a configuration file or you can
replicate a single database file by specifying its path and its replicas in the replicate a single database file by specifying its path and its replicas in the
command line arguments. command line arguments.

View File

@@ -0,0 +1,135 @@
package main_test
import (
"context"
"database/sql"
"errors"
"fmt"
"hash/crc64"
"io"
"os"
"path/filepath"
"runtime"
"testing"
"time"
main "github.com/benbjohnson/litestream/cmd/litestream"
"golang.org/x/sync/errgroup"
)
func TestReplicateCommand(t *testing.T) {
if testing.Short() {
t.Skip("long running test, skipping")
} else if runtime.GOOS != "linux" {
t.Skip("must run system tests on Linux, skipping")
}
const writeTime = 10 * time.Second
dir := t.TempDir()
configPath := filepath.Join(dir, "litestream.yml")
dbPath := filepath.Join(dir, "db")
restorePath := filepath.Join(dir, "restored")
replicaPath := filepath.Join(dir, "replica")
if err := os.WriteFile(configPath, []byte(`
dbs:
- path: `+dbPath+`
replicas:
- path: `+replicaPath+`
`), 0666); err != nil {
t.Fatal(err)
}
// Generate data into SQLite database from separate goroutine.
g, ctx := errgroup.WithContext(context.Background())
mainctx, cancel := context.WithCancel(ctx)
g.Go(func() error {
defer cancel()
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
return err
}
defer db.Close()
if _, err := db.ExecContext(ctx, `PRAGMA journal_mode = WAL`); err != nil {
return fmt.Errorf("cannot enable wal: %w", err)
} else if _, err := db.ExecContext(ctx, `PRAGMA synchronous = NORMAL`); err != nil {
return fmt.Errorf("cannot enable wal: %w", err)
} else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil {
return fmt.Errorf("cannot create table: %w", err)
}
ticker := time.NewTicker(1 * time.Millisecond)
defer ticker.Stop()
timer := time.NewTimer(writeTime)
defer timer.Stop()
for i := 0; ; i++ {
select {
case <-ctx.Done():
return ctx.Err()
case <-timer.C:
return nil
case <-ticker.C:
if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (?);`, i); err != nil {
return fmt.Errorf("cannot insert: i=%d err=%w", i, err)
}
}
}
})
// Replicate database unless the context is canceled.
g.Go(func() error {
return main.NewMain().Run(mainctx, []string{"replicate", "-config", configPath})
})
if err := g.Wait(); err != nil {
t.Fatal(err)
}
// Checkpoint database.
mustCheckpoint(t, dbPath)
chksum0 := mustChecksum(t, dbPath)
// Restore to another path.
if err := main.NewMain().Run(context.Background(), []string{"restore", "-config", configPath, "-o", restorePath, dbPath}); err != nil && !errors.Is(err, context.Canceled) {
t.Fatal(err)
}
// Verify contents match.
if chksum1 := mustChecksum(t, restorePath); chksum0 != chksum1 {
t.Fatal("restore mismatch")
}
}
func mustCheckpoint(tb testing.TB, path string) {
tb.Helper()
db, err := sql.Open("sqlite3", path)
if err != nil {
tb.Fatal(err)
}
defer db.Close()
if _, err := db.Exec(`PRAGMA wal_checkpoint(TRUNCATE)`); err != nil {
tb.Fatal(err)
}
}
func mustChecksum(tb testing.TB, path string) uint64 {
tb.Helper()
f, err := os.Open(path)
if err != nil {
tb.Fatal(err)
}
defer f.Close()
h := crc64.New(crc64.MakeTable(crc64.ISO))
if _, err := io.Copy(h, f); err != nil {
tb.Fatal(err)
}
return h.Sum64()
}

View File

@@ -5,7 +5,7 @@ import (
"errors" "errors"
"flag" "flag"
"fmt" "fmt"
"log/slog" "log"
"os" "os"
"strconv" "strconv"
"time" "time"
@@ -19,6 +19,7 @@ type RestoreCommand struct{}
// Run executes the command. // Run executes the command.
func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) { func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) {
opt := litestream.NewRestoreOptions() opt := litestream.NewRestoreOptions()
opt.Verbose = true
fs := flag.NewFlagSet("litestream-restore", flag.ContinueOnError) fs := flag.NewFlagSet("litestream-restore", flag.ContinueOnError)
configPath, noExpandEnv := registerConfigFlag(fs) configPath, noExpandEnv := registerConfigFlag(fs)
@@ -30,6 +31,7 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) {
ifDBNotExists := fs.Bool("if-db-not-exists", false, "") ifDBNotExists := fs.Bool("if-db-not-exists", false, "")
ifReplicaExists := fs.Bool("if-replica-exists", false, "") ifReplicaExists := fs.Bool("if-replica-exists", false, "")
timestampStr := fs.String("timestamp", "", "timestamp") timestampStr := fs.String("timestamp", "", "timestamp")
verbose := fs.Bool("v", false, "verbose output")
fs.Usage = c.Usage fs.Usage = c.Usage
if err := fs.Parse(args); err != nil { if err := fs.Parse(args); err != nil {
return err return err
@@ -46,6 +48,11 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) {
} }
} }
// Instantiate logger if verbose output is enabled.
if *verbose {
opt.Logger = log.New(os.Stderr, "", log.LstdFlags|log.Lmicroseconds)
}
// Determine replica & generation to restore from. // Determine replica & generation to restore from.
var r *litestream.Replica var r *litestream.Replica
if isURL(fs.Arg(0)) { if isURL(fs.Arg(0)) {
@@ -53,7 +60,7 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) {
return fmt.Errorf("cannot specify a replica URL and the -config flag") return fmt.Errorf("cannot specify a replica URL and the -config flag")
} }
if r, err = c.loadFromURL(ctx, fs.Arg(0), *ifDBNotExists, &opt); err == errSkipDBExists { if r, err = c.loadFromURL(ctx, fs.Arg(0), *ifDBNotExists, &opt); err == errSkipDBExists {
slog.Info("database already exists, skipping") fmt.Println("database already exists, skipping")
return nil return nil
} else if err != nil { } else if err != nil {
return err return err
@@ -63,7 +70,7 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) {
*configPath = DefaultConfigPath() *configPath = DefaultConfigPath()
} }
if r, err = c.loadFromConfig(ctx, fs.Arg(0), *configPath, !*noExpandEnv, *ifDBNotExists, &opt); err == errSkipDBExists { if r, err = c.loadFromConfig(ctx, fs.Arg(0), *configPath, !*noExpandEnv, *ifDBNotExists, &opt); err == errSkipDBExists {
slog.Info("database already exists, skipping") fmt.Println("database already exists, skipping")
return nil return nil
} else if err != nil { } else if err != nil {
return err return err
@@ -74,7 +81,7 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) {
// If optional flag set, return success. Useful for automated recovery. // If optional flag set, return success. Useful for automated recovery.
if opt.Generation == "" { if opt.Generation == "" {
if *ifReplicaExists { if *ifReplicaExists {
slog.Info("no matching backups found") fmt.Println("no matching backups found")
return nil return nil
} }
return fmt.Errorf("no matching backups found") return fmt.Errorf("no matching backups found")
@@ -197,6 +204,9 @@ Arguments:
Determines the number of WAL files downloaded in parallel. Determines the number of WAL files downloaded in parallel.
Defaults to `+strconv.Itoa(litestream.DefaultRestoreParallelism)+`. Defaults to `+strconv.Itoa(litestream.DefaultRestoreParallelism)+`.
-v
Verbose output.
Examples: Examples:

View File

@@ -4,7 +4,7 @@ import (
"context" "context"
"flag" "flag"
"fmt" "fmt"
"log/slog" "log"
"os" "os"
"text/tabwriter" "text/tabwriter"
"time" "time"
@@ -82,7 +82,7 @@ func (c *SnapshotsCommand) Run(ctx context.Context, args []string) (err error) {
for _, r := range replicas { for _, r := range replicas {
infos, err := r.Snapshots(ctx) infos, err := r.Snapshots(ctx)
if err != nil { if err != nil {
slog.Error("cannot determine snapshots", "error", err) log.Printf("cannot determine snapshots: %s", err)
continue continue
} }
for _, info := range infos { for _, info := range infos {

View File

@@ -4,6 +4,7 @@ import (
"context" "context"
"flag" "flag"
"fmt" "fmt"
"log"
"os" "os"
"text/tabwriter" "text/tabwriter"
"time" "time"
@@ -85,7 +86,7 @@ func (c *WALCommand) Run(ctx context.Context, args []string) (err error) {
generations = []string{*generation} generations = []string{*generation}
} else { } else {
if generations, err = r.Client.Generations(ctx); err != nil { if generations, err = r.Client.Generations(ctx); err != nil {
r.Logger().Error("cannot determine generations", "error", err) log.Printf("%s: cannot determine generations: %s", r.Name(), err)
continue continue
} }
} }
@@ -101,7 +102,7 @@ func (c *WALCommand) Run(ctx context.Context, args []string) (err error) {
for itr.Next() { for itr.Next() {
info := itr.WALSegment() info := itr.WALSegment()
fmt.Fprintf(w, "%s\t%s\t%x\t%d\t%d\t%s\n", fmt.Fprintf(w, "%s\t%s\t%d\t%d\t%d\t%s\n",
r.Name(), r.Name(),
info.Generation, info.Generation,
info.Index, info.Index,
@@ -112,7 +113,7 @@ func (c *WALCommand) Run(ctx context.Context, args []string) (err error) {
} }
return itr.Close() return itr.Close()
}(); err != nil { }(); err != nil {
r.Logger().Error("cannot fetch wal segments", "error", err) log.Printf("%s: cannot fetch wal segments: %s", r.Name(), err)
continue continue
} }
} }

1182
db.go

File diff suppressed because it is too large Load Diff

View File

@@ -29,13 +29,13 @@ func TestDB_WALPath(t *testing.T) {
func TestDB_MetaPath(t *testing.T) { func TestDB_MetaPath(t *testing.T) {
t.Run("Absolute", func(t *testing.T) { t.Run("Absolute", func(t *testing.T) {
db := litestream.NewDB("/tmp/db") db := litestream.NewDB("/tmp/db")
if got, want := db.MetaPath(), `/tmp/.db-litestream`; got != want { if got, want := db.MetaPath(), `/tmp/db-litestream`; got != want {
t.Fatalf("MetaPath()=%v, want %v", got, want) t.Fatalf("MetaPath()=%v, want %v", got, want)
} }
}) })
t.Run("Relative", func(t *testing.T) { t.Run("Relative", func(t *testing.T) {
db := litestream.NewDB("db") db := litestream.NewDB("db")
if got, want := db.MetaPath(), `.db-litestream`; got != want { if got, want := db.MetaPath(), `db-litestream`; got != want {
t.Fatalf("MetaPath()=%v, want %v", got, want) t.Fatalf("MetaPath()=%v, want %v", got, want)
} }
}) })
@@ -43,32 +43,25 @@ func TestDB_MetaPath(t *testing.T) {
func TestDB_GenerationNamePath(t *testing.T) { func TestDB_GenerationNamePath(t *testing.T) {
db := litestream.NewDB("/tmp/db") db := litestream.NewDB("/tmp/db")
if got, want := db.GenerationNamePath(), `/tmp/.db-litestream/generation`; got != want { if got, want := db.GenerationNamePath(), `/tmp/db-litestream/generation`; got != want {
t.Fatalf("GenerationNamePath()=%v, want %v", got, want) t.Fatalf("GenerationNamePath()=%v, want %v", got, want)
} }
} }
func TestDB_GenerationPath(t *testing.T) { func TestDB_GenerationPath(t *testing.T) {
db := litestream.NewDB("/tmp/db") db := litestream.NewDB("/tmp/db")
if got, want := db.GenerationPath("xxxx"), `/tmp/.db-litestream/generations/xxxx`; got != want { if got, want := db.GenerationPath("xxxx"), `/tmp/db-litestream/generations/xxxx`; got != want {
t.Fatalf("GenerationPath()=%v, want %v", got, want) t.Fatalf("GenerationPath()=%v, want %v", got, want)
} }
} }
func TestDB_ShadowWALDir(t *testing.T) { func TestDB_ShadowWALDir(t *testing.T) {
db := litestream.NewDB("/tmp/db") db := litestream.NewDB("/tmp/db")
if got, want := db.ShadowWALDir("xxxx"), `/tmp/.db-litestream/generations/xxxx/wal`; got != want { if got, want := db.ShadowWALDir("xxxx"), `/tmp/db-litestream/generations/xxxx/wal`; got != want {
t.Fatalf("ShadowWALDir()=%v, want %v", got, want) t.Fatalf("ShadowWALDir()=%v, want %v", got, want)
} }
} }
func TestDB_ShadowWALPath(t *testing.T) {
db := litestream.NewDB("/tmp/db")
if got, want := db.ShadowWALPath("xxxx", 1000), `/tmp/.db-litestream/generations/xxxx/wal/000003e8.wal`; got != want {
t.Fatalf("ShadowWALPath()=%v, want %v", got, want)
}
}
// Ensure we can check the last modified time of the real database and its WAL. // Ensure we can check the last modified time of the real database and its WAL.
func TestDB_UpdatedAt(t *testing.T) { func TestDB_UpdatedAt(t *testing.T) {
t.Run("ErrNotExist", func(t *testing.T) { t.Run("ErrNotExist", func(t *testing.T) {
@@ -194,9 +187,7 @@ func TestDB_Sync(t *testing.T) {
} }
// Ensure position now available. // Ensure position now available.
if pos, err := db.Pos(); err != nil { if pos := db.Pos(); pos.Generation == "" {
t.Fatal(err)
} else if pos.Generation == "" {
t.Fatal("expected generation") t.Fatal("expected generation")
} else if got, want := pos.Index, 0; got != want { } else if got, want := pos.Index, 0; got != want {
t.Fatalf("pos.Index=%v, want %v", got, want) t.Fatalf("pos.Index=%v, want %v", got, want)
@@ -220,10 +211,7 @@ func TestDB_Sync(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
pos0, err := db.Pos() pos0 := db.Pos()
if err != nil {
t.Fatal(err)
}
// Insert into table. // Insert into table.
if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz');`); err != nil { if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz');`); err != nil {
@@ -233,9 +221,7 @@ func TestDB_Sync(t *testing.T) {
// Sync to ensure position moves forward one page. // Sync to ensure position moves forward one page.
if err := db.Sync(context.Background()); err != nil { if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} else if pos1, err := db.Pos(); err != nil { } else if pos1 := db.Pos(); pos0.Generation != pos1.Generation {
t.Fatal(err)
} else if pos0.Generation != pos1.Generation {
t.Fatal("expected the same generation") t.Fatal("expected the same generation")
} else if got, want := pos1.Index, pos0.Index; got != want { } else if got, want := pos1.Index, pos0.Index; got != want {
t.Fatalf("Index=%v, want %v", got, want) t.Fatalf("Index=%v, want %v", got, want)
@@ -255,22 +241,19 @@ func TestDB_Sync(t *testing.T) {
} }
// Obtain initial position. // Obtain initial position.
pos0, err := db.Pos() pos0 := db.Pos()
if err != nil {
t.Fatal(err)
}
// Checkpoint & fully close which should close WAL file. // Checkpoint & fully close which should close WAL file.
if err := db.Checkpoint(context.Background(), litestream.CheckpointModeTruncate); err != nil { if err := db.Checkpoint(context.Background(), litestream.CheckpointModeTruncate); err != nil {
t.Fatal(err) t.Fatal(err)
} else if err := db.Close(context.Background()); err != nil { } else if err := db.Close(); err != nil {
t.Fatal(err) t.Fatal(err)
} else if err := sqldb.Close(); err != nil { } else if err := sqldb.Close(); err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Remove WAL file. // Verify WAL does not exist.
if err := os.Remove(db.WALPath()); err != nil { if _, err := os.Stat(db.WALPath()); !os.IsNotExist(err) {
t.Fatal(err) t.Fatal(err)
} }
@@ -284,9 +267,7 @@ func TestDB_Sync(t *testing.T) {
} }
// Obtain initial position. // Obtain initial position.
if pos1, err := db.Pos(); err != nil { if pos1 := db.Pos(); pos0.Generation == pos1.Generation {
t.Fatal(err)
} else if pos0.Generation == pos1.Generation {
t.Fatal("expected new generation after truncation") t.Fatal("expected new generation after truncation")
} }
}) })
@@ -307,13 +288,10 @@ func TestDB_Sync(t *testing.T) {
} }
// Obtain initial position. // Obtain initial position.
pos0, err := db.Pos() pos0 := db.Pos()
if err != nil {
t.Fatal(err)
}
// Fully close which should close WAL file. // Fully close which should close WAL file.
if err := db.Close(context.Background()); err != nil { if err := db.Close(); err != nil {
t.Fatal(err) t.Fatal(err)
} else if err := sqldb.Close(); err != nil { } else if err := sqldb.Close(); err != nil {
t.Fatal(err) t.Fatal(err)
@@ -343,190 +321,98 @@ func TestDB_Sync(t *testing.T) {
} }
// Obtain initial position. // Obtain initial position.
if pos1, err := db.Pos(); err != nil { if pos1 := db.Pos(); pos0.Generation == pos1.Generation {
t.Fatal(err)
} else if pos0.Generation == pos1.Generation {
t.Fatal("expected new generation after truncation") t.Fatal("expected new generation after truncation")
} }
}) })
// Ensure DB can handle a mismatched header-only and start new generation. // TODO: Fix test to check for header mismatch
t.Run("WALHeaderMismatch", func(t *testing.T) { /*
db, sqldb := MustOpenDBs(t) // Ensure DB can handle a mismatched header-only and start new generation.
defer MustCloseDBs(t, db, sqldb) t.Run("WALHeaderMismatch", func(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
// Execute a query to force a write to the WAL and then sync. // Execute a query to force a write to the WAL and then sync.
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err) t.Fatal(err)
} else if err := db.Sync(context.Background()); err != nil { } else if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Grab initial position & close. // Grab initial position & close.
pos0, err := db.Pos() pos0 := db.Pos()
if err != nil { if err := db.Close(); err != nil {
t.Fatal(err) t.Fatal(err)
} else if err := db.Close(context.Background()); err != nil { }
t.Fatal(err)
}
// Read existing file, update header checksum, and write back only header // Read existing file, update header checksum, and write back only header
// to simulate a header with a mismatched checksum. // to simulate a header with a mismatched checksum.
shadowWALPath := db.ShadowWALPath(pos0.Generation, pos0.Index) shadowWALPath := db.ShadowWALPath(pos0.Generation, pos0.Index)
if buf, err := os.ReadFile(shadowWALPath); err != nil { if buf, err := os.ReadFile(shadowWALPath); err != nil {
t.Fatal(err) t.Fatal(err)
} else if err := os.WriteFile(shadowWALPath, append(buf[:litestream.WALHeaderSize-8], 0, 0, 0, 0, 0, 0, 0, 0), 0600); err != nil { } else if err := os.WriteFile(shadowWALPath, append(buf[:litestream.WALHeaderSize-8], 0, 0, 0, 0, 0, 0, 0, 0), 0600); err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Reopen managed database & ensure sync will still work. // Reopen managed database & ensure sync will still work.
db = MustOpenDBAt(t, db.Path()) db = MustOpenDBAt(t, db.Path())
defer MustCloseDB(t, db) defer MustCloseDB(t, db)
if err := db.Sync(context.Background()); err != nil { if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Verify a new generation was started. // Verify a new generation was started.
if pos1, err := db.Pos(); err != nil { if pos1, err := db.Pos(); err != nil {
t.Fatal(err) t.Fatal(err)
} else if pos0.Generation == pos1.Generation { } else if pos0.Generation == pos1.Generation {
t.Fatal("expected new generation") t.Fatal("expected new generation")
} }
}) })
*/
// Ensure DB can handle partial shadow WAL header write. // TODO: Fix test for segmented shadow WAL.
t.Run("PartialShadowWALHeader", func(t *testing.T) { /*
db, sqldb := MustOpenDBs(t) // Ensure DB can handle a generation directory with a missing shadow WAL.
defer MustCloseDBs(t, db, sqldb) t.Run("NoShadowWAL", func(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
// Execute a query to force a write to the WAL and then sync. // Execute a query to force a write to the WAL and then sync.
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err) t.Fatal(err)
} else if err := db.Sync(context.Background()); err != nil { } else if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
pos0, err := db.Pos() pos0 := db.Pos()
if err != nil {
t.Fatal(err)
}
// Close & truncate shadow WAL to simulate a partial header write. // Close & delete shadow WAL to simulate dir created but not WAL.
if err := db.Close(context.Background()); err != nil { if err := db.Close(); err != nil {
t.Fatal(err) t.Fatal(err)
} else if err := os.Truncate(db.ShadowWALPath(pos0.Generation, pos0.Index), litestream.WALHeaderSize-1); err != nil { } else if err := os.Remove(db.ShadowWALPath(pos0.Generation, pos0.Index)); err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Reopen managed database & ensure sync will still work. // Reopen managed database & ensure sync will still work.
db = MustOpenDBAt(t, db.Path()) db = MustOpenDBAt(t, db.Path())
defer MustCloseDB(t, db) defer MustCloseDB(t, db)
if err := db.Sync(context.Background()); err != nil { if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Verify a new generation was started. // Verify new generation created but index/offset the same.
if pos1, err := db.Pos(); err != nil { if pos1, err := db.Pos(); err != nil {
t.Fatal(err) t.Fatal(err)
} else if pos0.Generation == pos1.Generation { } else if pos0.Generation == pos1.Generation {
t.Fatal("expected new generation") t.Fatal("expected new generation")
} } else if got, want := pos1.Index, pos0.Index; got != want {
}) t.Fatalf("Index=%v want %v", got, want)
} else if got, want := pos1.Offset, pos0.Offset; got != want {
// Ensure DB can handle partial shadow WAL writes. t.Fatalf("Offset=%v want %v", got, want)
t.Run("PartialShadowWALFrame", func(t *testing.T) { }
db, sqldb := MustOpenDBs(t) })
defer MustCloseDBs(t, db, sqldb) */
// Execute a query to force a write to the WAL and then sync.
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err)
} else if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
pos0, err := db.Pos()
if err != nil {
t.Fatal(err)
}
// Obtain current shadow WAL size.
fi, err := os.Stat(db.ShadowWALPath(pos0.Generation, pos0.Index))
if err != nil {
t.Fatal(err)
}
// Close & truncate shadow WAL to simulate a partial frame write.
if err := db.Close(context.Background()); err != nil {
t.Fatal(err)
} else if err := os.Truncate(db.ShadowWALPath(pos0.Generation, pos0.Index), fi.Size()-1); err != nil {
t.Fatal(err)
}
// Reopen managed database & ensure sync will still work.
db = MustOpenDBAt(t, db.Path())
defer MustCloseDB(t, db)
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Verify same generation is kept.
if pos1, err := db.Pos(); err != nil {
t.Fatal(err)
} else if got, want := pos1, pos0; got != want {
t.Fatalf("Pos()=%s want %s", got, want)
}
// Ensure shadow WAL has recovered.
if fi0, err := os.Stat(db.ShadowWALPath(pos0.Generation, pos0.Index)); err != nil {
t.Fatal(err)
} else if got, want := fi0.Size(), fi.Size(); got != want {
t.Fatalf("Size()=%v, want %v", got, want)
}
})
// Ensure DB can handle a generation directory with a missing shadow WAL.
t.Run("NoShadowWAL", func(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
// Execute a query to force a write to the WAL and then sync.
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err)
} else if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
pos0, err := db.Pos()
if err != nil {
t.Fatal(err)
}
// Close & delete shadow WAL to simulate dir created but not WAL.
if err := db.Close(context.Background()); err != nil {
t.Fatal(err)
} else if err := os.Remove(db.ShadowWALPath(pos0.Generation, pos0.Index)); err != nil {
t.Fatal(err)
}
// Reopen managed database & ensure sync will still work.
db = MustOpenDBAt(t, db.Path())
defer MustCloseDB(t, db)
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Verify new generation created but index/offset the same.
if pos1, err := db.Pos(); err != nil {
t.Fatal(err)
} else if pos0.Generation == pos1.Generation {
t.Fatal("expected new generation")
} else if got, want := pos1.Index, pos0.Index; got != want {
t.Fatalf("Index=%v want %v", got, want)
} else if got, want := pos1.Offset, pos0.Offset; got != want {
t.Fatalf("Offset=%v want %v", got, want)
}
})
// Ensure DB checkpoints after minimum number of pages. // Ensure DB checkpoints after minimum number of pages.
t.Run("MinCheckpointPageN", func(t *testing.T) { t.Run("MinCheckpointPageN", func(t *testing.T) {
@@ -553,9 +439,7 @@ func TestDB_Sync(t *testing.T) {
} }
// Ensure position is now on the second index. // Ensure position is now on the second index.
if pos, err := db.Pos(); err != nil { if got, want := db.Pos().Index, 1; got != want {
t.Fatal(err)
} else if got, want := pos.Index, 1; got != want {
t.Fatalf("Index=%v, want %v", got, want) t.Fatalf("Index=%v, want %v", got, want)
} }
}) })
@@ -583,9 +467,7 @@ func TestDB_Sync(t *testing.T) {
} }
// Ensure position is now on the second index. // Ensure position is now on the second index.
if pos, err := db.Pos(); err != nil { if got, want := db.Pos().Index, 1; got != want {
t.Fatal(err)
} else if got, want := pos.Index, 1; got != want {
t.Fatalf("Index=%v, want %v", got, want) t.Fatalf("Index=%v, want %v", got, want)
} }
}) })
@@ -625,7 +507,7 @@ func MustOpenDBAt(tb testing.TB, path string) *litestream.DB {
// MustCloseDB closes db and removes its parent directory. // MustCloseDB closes db and removes its parent directory.
func MustCloseDB(tb testing.TB, db *litestream.DB) { func MustCloseDB(tb testing.TB, db *litestream.DB) {
tb.Helper() tb.Helper()
if err := db.Close(context.Background()); err != nil && !strings.Contains(err.Error(), `database is closed`) { if err := db.Close(); err != nil && !strings.Contains(err.Error(), `database is closed`) {
tb.Fatal(err) tb.Fatal(err)
} else if err := os.RemoveAll(filepath.Dir(db.Path())); err != nil { } else if err := os.RemoveAll(filepath.Dir(db.Path())); err != nil {
tb.Fatal(err) tb.Fatal(err)

View File

@@ -26,9 +26,9 @@
Description="Litestream $(var.Version) installer" Description="Litestream $(var.Version) installer"
Compressed="yes" Compressed="yes"
/> />
<Media Id="1" Cabinet="litestream.cab" EmbedCab="yes"/> <Media Id="1" Cabinet="litestream.cab" EmbedCab="yes"/>
<MajorUpgrade <MajorUpgrade
Schedule="afterInstallInitialize" Schedule="afterInstallInitialize"
DowngradeErrorMessage="A later version of [ProductName] is already installed. Setup will now exit." DowngradeErrorMessage="A later version of [ProductName] is already installed. Setup will now exit."

View File

@@ -7,3 +7,4 @@
# replicas: # replicas:
# - path: /path/to/replica # File-based replication # - path: /path/to/replica # File-based replication
# - url: s3://my.bucket.com/db # S3-based replication # - url: s3://my.bucket.com/db # S3-based replication

View File

@@ -1,35 +0,0 @@
#!/usr/bin/env python3
import sys
import os
import time
from moto.server import ThreadedMotoServer
import boto3
import subprocess
cmd = sys.argv[1:]
if len(cmd) == 0:
print(f"usage: {sys.argv[0]} <command> [arguments]", file=sys.stderr)
sys.exit(1)
env = os.environ.copy() | {
"LITESTREAM_S3_ACCESS_KEY_ID": "lite",
"LITESTREAM_S3_SECRET_ACCESS_KEY": "stream",
"LITESTREAM_S3_BUCKET": f"test{int(time.time())}",
"LITESTREAM_S3_ENDPOINT": "http://127.0.0.1:5000",
"LITESTREAM_S3_FORCE_PATH_STYLE": "true",
}
server = ThreadedMotoServer()
server.start()
s3 = boto3.client(
"s3",
aws_access_key_id=env["LITESTREAM_S3_ACCESS_KEY_ID"],
aws_secret_access_key=["LITESTREAM_S3_SECRET_ACCESS_KEY"],
endpoint_url=env["LITESTREAM_S3_ENDPOINT"]
).create_bucket(Bucket=env["LITESTREAM_S3_BUCKET"])
proc = subprocess.run(cmd, env=env)
server.stop()
sys.exit(proc.returncode)

View File

@@ -4,8 +4,11 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"sort"
"strings"
"github.com/benbjohnson/litestream" "github.com/benbjohnson/litestream"
"github.com/benbjohnson/litestream/internal" "github.com/benbjohnson/litestream/internal"
@@ -82,7 +85,7 @@ func (c *ReplicaClient) SnapshotPath(generation string, index int) (string, erro
if err != nil { if err != nil {
return "", err return "", err
} }
return filepath.Join(dir, litestream.FormatSnapshotPath(index)), nil return filepath.Join(dir, litestream.FormatIndex(index)+".snapshot.lz4"), nil
} }
// WALDir returns the path to a generation's WAL directory // WALDir returns the path to a generation's WAL directory
@@ -100,7 +103,7 @@ func (c *ReplicaClient) WALSegmentPath(generation string, index int, offset int6
if err != nil { if err != nil {
return "", err return "", err
} }
return filepath.Join(dir, litestream.FormatWALSegmentPath(index, offset)), nil return filepath.Join(dir, litestream.FormatIndex(index), fmt.Sprintf("%08x.wal.lz4", offset)), nil
} }
// Generations returns a list of available generation names. // Generations returns a list of available generation names.
@@ -110,7 +113,7 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) {
return nil, fmt.Errorf("cannot determine generations path: %w", err) return nil, fmt.Errorf("cannot determine generations path: %w", err)
} }
fis, err := os.ReadDir(root) fis, err := ioutil.ReadDir(root)
if os.IsNotExist(err) { if os.IsNotExist(err) {
return nil, nil return nil, nil
} else if err != nil { } else if err != nil {
@@ -146,7 +149,7 @@ func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string)
func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (litestream.SnapshotIterator, error) {
dir, err := c.SnapshotsDir(generation) dir, err := c.SnapshotsDir(generation)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot determine snapshots path: %w", err) return nil, err
} }
f, err := os.Open(dir) f, err := os.Open(dir)
@@ -166,7 +169,7 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (lites
infos := make([]litestream.SnapshotInfo, 0, len(fis)) infos := make([]litestream.SnapshotInfo, 0, len(fis))
for _, fi := range fis { for _, fi := range fis {
// Parse index from filename. // Parse index from filename.
index, err := litestream.ParseSnapshotPath(fi.Name()) index, err := internal.ParseSnapshotPath(filepath.Base(fi.Name()))
if err != nil { if err != nil {
continue continue
} }
@@ -179,6 +182,8 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (lites
}) })
} }
sort.Sort(litestream.SnapshotInfoSlice(infos))
return litestream.NewSnapshotInfoSliceIterator(infos), nil return litestream.NewSnapshotInfoSliceIterator(infos), nil
} }
@@ -186,7 +191,7 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (lites
func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) { func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) {
filename, err := c.SnapshotPath(generation, index) filename, err := c.SnapshotPath(generation, index)
if err != nil { if err != nil {
return info, fmt.Errorf("cannot determine snapshot path: %w", err) return info, err
} }
var fileInfo, dirInfo os.FileInfo var fileInfo, dirInfo os.FileInfo
@@ -239,7 +244,7 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in
func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) { func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) {
filename, err := c.SnapshotPath(generation, index) filename, err := c.SnapshotPath(generation, index)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot determine snapshot path: %w", err) return nil, err
} }
return os.Open(filename) return os.Open(filename)
} }
@@ -260,7 +265,7 @@ func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, i
func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) {
dir, err := c.WALDir(generation) dir, err := c.WALDir(generation)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot determine wal path: %w", err) return nil, err
} }
f, err := os.Open(dir) f, err := os.Open(dir)
@@ -277,31 +282,25 @@ func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (lit
} }
// Iterate over every file and convert to metadata. // Iterate over every file and convert to metadata.
infos := make([]litestream.WALSegmentInfo, 0, len(fis)) indexes := make([]int, 0, len(fis))
for _, fi := range fis { for _, fi := range fis {
// Parse index from filename. index, err := litestream.ParseIndex(fi.Name())
index, offset, err := litestream.ParseWALSegmentPath(fi.Name()) if err != nil || !fi.IsDir() {
if err != nil {
continue continue
} }
indexes = append(indexes, index)
infos = append(infos, litestream.WALSegmentInfo{
Generation: generation,
Index: index,
Offset: offset,
Size: fi.Size(),
CreatedAt: fi.ModTime().UTC(),
})
} }
return litestream.NewWALSegmentInfoSliceIterator(infos), nil sort.Ints(indexes)
return newWALSegmentIterator(dir, generation, indexes), nil
} }
// WriteWALSegment writes LZ4 compressed data from rd into a file on disk. // WriteWALSegment writes LZ4 compressed data from rd into a file on disk.
func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) { func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) {
filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset) filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset)
if err != nil { if err != nil {
return info, fmt.Errorf("cannot determine wal segment path: %w", err) return info, err
} }
var fileInfo, dirInfo os.FileInfo var fileInfo, dirInfo os.FileInfo
@@ -355,7 +354,7 @@ func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos,
func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) { func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) {
filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset) filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot determine wal segment path: %w", err) return nil, err
} }
return os.Open(filename) return os.Open(filename)
} }
@@ -365,7 +364,7 @@ func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Po
for _, pos := range a { for _, pos := range a {
filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset) filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset)
if err != nil { if err != nil {
return fmt.Errorf("cannot determine wal segment path: %w", err) return err
} }
if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { if err := os.Remove(filename); err != nil && !os.IsNotExist(err) {
return err return err
@@ -373,3 +372,100 @@ func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Po
} }
return nil return nil
} }
type walSegmentIterator struct {
dir string
generation string
indexes []int
infos []litestream.WALSegmentInfo
err error
}
func newWALSegmentIterator(dir, generation string, indexes []int) *walSegmentIterator {
return &walSegmentIterator{
dir: dir,
generation: generation,
indexes: indexes,
}
}
func (itr *walSegmentIterator) Close() (err error) {
return itr.err
}
func (itr *walSegmentIterator) Next() bool {
// Exit if an error has already occurred.
if itr.err != nil {
return false
}
for {
// Move to the next segment in cache, if available.
if len(itr.infos) > 1 {
itr.infos = itr.infos[1:]
return true
}
itr.infos = itr.infos[:0] // otherwise clear infos
// If no indexes remain, stop iteration.
if len(itr.indexes) == 0 {
return false
}
// Read segments into a cache for the current index.
index := itr.indexes[0]
itr.indexes = itr.indexes[1:]
f, err := os.Open(filepath.Join(itr.dir, litestream.FormatIndex(index)))
if err != nil {
itr.err = err
return false
}
defer f.Close()
fis, err := f.Readdir(-1)
if err != nil {
itr.err = err
return false
} else if err := f.Close(); err != nil {
itr.err = err
return false
}
for _, fi := range fis {
filename := filepath.Base(fi.Name())
if fi.IsDir() {
continue
}
offset, err := litestream.ParseOffset(strings.TrimSuffix(filename, ".wal.lz4"))
if err != nil {
continue
}
itr.infos = append(itr.infos, litestream.WALSegmentInfo{
Generation: itr.generation,
Index: index,
Offset: offset,
Size: fi.Size(),
CreatedAt: fi.ModTime().UTC(),
})
}
// Ensure segments are sorted within index.
sort.Sort(litestream.WALSegmentInfoSlice(itr.infos))
if len(itr.infos) > 0 {
return true
}
}
}
func (itr *walSegmentIterator) Err() error { return itr.err }
func (itr *walSegmentIterator) WALSegment() litestream.WALSegmentInfo {
if len(itr.infos) == 0 {
return litestream.WALSegmentInfo{}
}
return itr.infos[0]
}

View File

@@ -118,7 +118,7 @@ func TestReplicaClient_WALSegmentPath(t *testing.T) {
t.Run("OK", func(t *testing.T) { t.Run("OK", func(t *testing.T) {
if got, err := file.NewReplicaClient("/foo").WALSegmentPath("0123456701234567", 1000, 1001); err != nil { if got, err := file.NewReplicaClient("/foo").WALSegmentPath("0123456701234567", 1000, 1001); err != nil {
t.Fatal(err) t.Fatal(err)
} else if want := "/foo/generations/0123456701234567/wal/000003e8_000003e9.wal.lz4"; got != want { } else if want := "/foo/generations/0123456701234567/wal/000003e8/000003e9.wal.lz4"; got != want {
t.Fatalf("WALPath()=%v, want %v", got, want) t.Fatalf("WALPath()=%v, want %v", got, want)
} }
}) })
@@ -133,91 +133,3 @@ func TestReplicaClient_WALSegmentPath(t *testing.T) {
} }
}) })
} }
/*
func TestReplica_Sync(t *testing.T) {
// Ensure replica can successfully sync after DB has sync'd.
t.Run("InitialSync", func(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
r := litestream.NewReplica(db, "", file.NewReplicaClient(t.TempDir()))
r.MonitorEnabled = false
db.Replicas = []*litestream.Replica{r}
// Sync database & then sync replica.
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
} else if err := r.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Ensure posistions match.
if want, err := db.Pos(); err != nil {
t.Fatal(err)
} else if got, err := r.Pos(context.Background()); err != nil {
t.Fatal(err)
} else if got != want {
t.Fatalf("Pos()=%v, want %v", got, want)
}
})
// Ensure replica can successfully sync multiple times.
t.Run("MultiSync", func(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
r := litestream.NewReplica(db, "", file.NewReplicaClient(t.TempDir()))
r.MonitorEnabled = false
db.Replicas = []*litestream.Replica{r}
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err)
}
// Write to the database multiple times and sync after each write.
for i, n := 0, db.MinCheckpointPageN*2; i < n; i++ {
if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz')`); err != nil {
t.Fatal(err)
}
// Sync periodically.
if i%100 == 0 || i == n-1 {
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
} else if err := r.Sync(context.Background()); err != nil {
t.Fatal(err)
}
}
}
// Ensure posistions match.
pos, err := db.Pos()
if err != nil {
t.Fatal(err)
} else if got, want := pos.Index, 2; got != want {
t.Fatalf("Index=%v, want %v", got, want)
}
if want, err := r.Pos(context.Background()); err != nil {
t.Fatal(err)
} else if got := pos; got != want {
t.Fatalf("Pos()=%v, want %v", got, want)
}
})
// Ensure replica returns an error if there is no generation available from the DB.
t.Run("ErrNoGeneration", func(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
r := litestream.NewReplica(db, "", file.NewReplicaClient(t.TempDir()))
r.MonitorEnabled = false
db.Replicas = []*litestream.Replica{r}
if err := r.Sync(context.Background()); err == nil || err.Error() != `no generation, waiting for data` {
t.Fatal(err)
}
})
}
*/

View File

@@ -68,7 +68,7 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) {
// Construct query to only pull generation directory names. // Construct query to only pull generation directory names.
query := &storage.Query{ query := &storage.Query{
Delimiter: "/", Delimiter: "/",
Prefix: litestream.GenerationsPath(c.Path) + "/", Prefix: path.Join(c.Path, "generations") + "/",
} }
// Loop over results and only build list of generation-formatted names. // Loop over results and only build list of generation-formatted names.
@@ -96,16 +96,15 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) {
func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error { func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error {
if err := c.Init(ctx); err != nil { if err := c.Init(ctx); err != nil {
return err return err
} else if generation == "" {
return fmt.Errorf("generation required")
} }
dir, err := litestream.GenerationPath(c.Path, generation) prefix := path.Join(c.Path, "generations", generation) + "/"
if err != nil {
return fmt.Errorf("cannot determine generation path: %w", err)
}
// Iterate over every object in generation and delete it. // Iterate over every object in generation and delete it.
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
for it := c.bkt.Objects(ctx, &storage.Query{Prefix: dir + "/"}); ; { for it := c.bkt.Objects(ctx, &storage.Query{Prefix: prefix}); ; {
attrs, err := it.Next() attrs, err := it.Next()
if err == iterator.Done { if err == iterator.Done {
break break
@@ -130,24 +129,22 @@ func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string)
func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (litestream.SnapshotIterator, error) {
if err := c.Init(ctx); err != nil { if err := c.Init(ctx); err != nil {
return nil, err return nil, err
} else if generation == "" {
return nil, fmt.Errorf("generation required")
} }
dir, err := litestream.SnapshotsPath(c.Path, generation) prefix := path.Join(c.Path, "generations", generation) + "/"
if err != nil { return newSnapshotIterator(generation, c.bkt.Objects(ctx, &storage.Query{Prefix: prefix})), nil
return nil, fmt.Errorf("cannot determine snapshots path: %w", err)
}
return newSnapshotIterator(generation, c.bkt.Objects(ctx, &storage.Query{Prefix: dir + "/"})), nil
} }
// WriteSnapshot writes LZ4 compressed data from rd to the object storage. // WriteSnapshot writes LZ4 compressed data from rd to the object storage.
func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) { func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) {
if err := c.Init(ctx); err != nil { if err := c.Init(ctx); err != nil {
return info, err return info, err
} else if generation == "" {
return info, fmt.Errorf("generation required")
} }
key, err := litestream.SnapshotPath(c.Path, generation, index) key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4")
if err != nil {
return info, fmt.Errorf("cannot determine snapshot path: %w", err)
}
startTime := time.Now() startTime := time.Now()
w := c.bkt.Object(key).NewWriter(ctx) w := c.bkt.Object(key).NewWriter(ctx)
@@ -177,12 +174,11 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in
func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) { func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) {
if err := c.Init(ctx); err != nil { if err := c.Init(ctx); err != nil {
return nil, err return nil, err
} else if generation == "" {
return nil, fmt.Errorf("generation required")
} }
key, err := litestream.SnapshotPath(c.Path, generation, index) key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4")
if err != nil {
return nil, fmt.Errorf("cannot determine snapshot path: %w", err)
}
r, err := c.bkt.Object(key).NewReader(ctx) r, err := c.bkt.Object(key).NewReader(ctx)
if isNotExists(err) { if isNotExists(err) {
@@ -201,12 +197,11 @@ func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, i
func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error { func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error {
if err := c.Init(ctx); err != nil { if err := c.Init(ctx); err != nil {
return err return err
} else if generation == "" {
return fmt.Errorf("generation required")
} }
key, err := litestream.SnapshotPath(c.Path, generation, index) key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index), ".snapshot.lz4")
if err != nil {
return fmt.Errorf("cannot determine snapshot path: %w", err)
}
if err := c.bkt.Object(key).Delete(ctx); err != nil && !isNotExists(err) { if err := c.bkt.Object(key).Delete(ctx); err != nil && !isNotExists(err) {
return fmt.Errorf("cannot delete snapshot %q: %w", key, err) return fmt.Errorf("cannot delete snapshot %q: %w", key, err)
@@ -220,24 +215,22 @@ func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, i
func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) {
if err := c.Init(ctx); err != nil { if err := c.Init(ctx); err != nil {
return nil, err return nil, err
} else if generation == "" {
return nil, fmt.Errorf("generation required")
} }
dir, err := litestream.WALPath(c.Path, generation) prefix := path.Join(c.Path, "generations", generation, "wal") + "/"
if err != nil { return newWALSegmentIterator(generation, prefix, c.bkt.Objects(ctx, &storage.Query{Prefix: prefix})), nil
return nil, fmt.Errorf("cannot determine wal path: %w", err)
}
return newWALSegmentIterator(generation, c.bkt.Objects(ctx, &storage.Query{Prefix: dir + "/"})), nil
} }
// WriteWALSegment writes LZ4 compressed data from rd into a file on disk. // WriteWALSegment writes LZ4 compressed data from rd into a file on disk.
func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) { func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) {
if err := c.Init(ctx); err != nil { if err := c.Init(ctx); err != nil {
return info, err return info, err
} else if pos.Generation == "" {
return info, fmt.Errorf("generation required")
} }
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4")
if err != nil {
return info, fmt.Errorf("cannot determine wal segment path: %w", err)
}
startTime := time.Now() startTime := time.Now()
w := c.bkt.Object(key).NewWriter(ctx) w := c.bkt.Object(key).NewWriter(ctx)
@@ -267,12 +260,11 @@ func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos,
func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) { func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) {
if err := c.Init(ctx); err != nil { if err := c.Init(ctx); err != nil {
return nil, err return nil, err
} else if pos.Generation == "" {
return nil, fmt.Errorf("generation required")
} }
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4")
if err != nil {
return nil, fmt.Errorf("cannot determine wal segment path: %w", err)
}
r, err := c.bkt.Object(key).NewReader(ctx) r, err := c.bkt.Object(key).NewReader(ctx)
if isNotExists(err) { if isNotExists(err) {
@@ -294,11 +286,11 @@ func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Po
} }
for _, pos := range a { for _, pos := range a {
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) if pos.Generation == "" {
if err != nil { return fmt.Errorf("generation required")
return fmt.Errorf("cannot determine wal segment path: %w", err)
} }
key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4")
if err := c.bkt.Object(key).Delete(ctx); err != nil && !isNotExists(err) { if err := c.bkt.Object(key).Delete(ctx); err != nil && !isNotExists(err) {
return fmt.Errorf("cannot delete wal segment %q: %w", key, err) return fmt.Errorf("cannot delete wal segment %q: %w", key, err)
} }
@@ -344,7 +336,7 @@ func (itr *snapshotIterator) Next() bool {
} }
// Parse index, otherwise skip to the next object. // Parse index, otherwise skip to the next object.
index, err := litestream.ParseSnapshotPath(path.Base(attrs.Name)) index, err := internal.ParseSnapshotPath(path.Base(attrs.Name))
if err != nil { if err != nil {
continue continue
} }
@@ -366,15 +358,17 @@ func (itr *snapshotIterator) Snapshot() litestream.SnapshotInfo { return itr.inf
type walSegmentIterator struct { type walSegmentIterator struct {
generation string generation string
prefix string
it *storage.ObjectIterator it *storage.ObjectIterator
info litestream.WALSegmentInfo info litestream.WALSegmentInfo
err error err error
} }
func newWALSegmentIterator(generation string, it *storage.ObjectIterator) *walSegmentIterator { func newWALSegmentIterator(generation, prefix string, it *storage.ObjectIterator) *walSegmentIterator {
return &walSegmentIterator{ return &walSegmentIterator{
generation: generation, generation: generation,
prefix: prefix,
it: it, it: it,
} }
} }
@@ -400,7 +394,7 @@ func (itr *walSegmentIterator) Next() bool {
} }
// Parse index & offset, otherwise skip to the next object. // Parse index & offset, otherwise skip to the next object.
index, offset, err := litestream.ParseWALSegmentPath(path.Base(attrs.Name)) index, offset, err := internal.ParseWALSegmentPath(strings.TrimPrefix(attrs.Name, itr.prefix))
if err != nil { if err != nil {
continue continue
} }

74
go.mod
View File

@@ -1,65 +1,21 @@
module github.com/benbjohnson/litestream module github.com/benbjohnson/litestream
go 1.21 go 1.16
require ( require (
cloud.google.com/go/storage v1.36.0 cloud.google.com/go/storage v1.15.0
filippo.io/age v1.1.1 github.com/Azure/azure-storage-blob-go v0.13.0 // indirect
github.com/Azure/azure-storage-blob-go v0.15.0 github.com/Azure/go-autorest/autorest v0.9.0 // indirect
github.com/aws/aws-sdk-go v1.49.5 github.com/aws/aws-sdk-go v1.27.0
github.com/mattn/go-shellwords v1.0.12 github.com/davecgh/go-spew v1.1.1
github.com/mattn/go-sqlite3 v1.14.19 github.com/mattn/go-shellwords v1.0.11 // indirect
github.com/pierrec/lz4/v4 v4.1.19 github.com/mattn/go-sqlite3 v1.14.5
github.com/pkg/sftp v1.13.6 github.com/pierrec/lz4/v4 v4.1.3
github.com/prometheus/client_golang v1.17.0 github.com/pkg/sftp v1.13.0 // indirect
golang.org/x/crypto v0.17.0 github.com/prometheus/client_golang v1.9.0
golang.org/x/sync v0.5.0 golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a // indirect
golang.org/x/sys v0.15.0 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
google.golang.org/api v0.154.0 golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750
google.golang.org/api v0.45.0
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
) )
require (
cloud.google.com/go v0.111.0 // indirect
cloud.google.com/go/compute v1.23.3 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v1.1.5 // indirect
github.com/Azure/azure-pipeline-go v0.2.3 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/s2a-go v0.1.7 // indirect
github.com/google/uuid v1.5.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/mattn/go-ieproxy v0.0.11 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.45.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
go.opentelemetry.io/otel v1.21.0 // indirect
go.opentelemetry.io/otel/metric v1.21.0 // indirect
go.opentelemetry.io/otel/trace v1.21.0 // indirect
golang.org/x/net v0.19.0 // indirect
golang.org/x/oauth2 v0.15.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.5.0 // indirect
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto v0.0.0-20231212172506-995d672761c0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 // indirect
google.golang.org/grpc v1.60.1 // indirect
google.golang.org/protobuf v1.31.0 // indirect
)

913
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,11 @@
package internal package internal
import ( import (
"fmt"
"io" "io"
"os" "os"
"regexp"
"strconv"
"syscall" "syscall"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
@@ -36,6 +39,39 @@ func (r *ReadCloser) Close() error {
return r.c.Close() return r.c.Close()
} }
// MultiReadCloser is a logical concatenation of io.ReadCloser.
// It works like io.MultiReader except all objects are closed when Close() is called.
type MultiReadCloser struct {
mr io.Reader
closers []io.Closer
}
// NewMultiReadCloser returns a new instance of MultiReadCloser.
func NewMultiReadCloser(a []io.ReadCloser) *MultiReadCloser {
readers := make([]io.Reader, len(a))
closers := make([]io.Closer, len(a))
for i, rc := range a {
readers[i] = rc
closers[i] = rc
}
return &MultiReadCloser{mr: io.MultiReader(readers...), closers: closers}
}
// Read reads from the next available reader.
func (mrc *MultiReadCloser) Read(p []byte) (n int, err error) {
return mrc.mr.Read(p)
}
// Close closes all underlying ReadClosers and returns first error encountered.
func (mrc *MultiReadCloser) Close() (err error) {
for _, c := range mrc.closers {
if e := c.Close(); e != nil && err == nil {
err = e
}
}
return err
}
// ReadCounter wraps an io.Reader and counts the total number of bytes read. // ReadCounter wraps an io.Reader and counts the total number of bytes read.
type ReadCounter struct { type ReadCounter struct {
r io.Reader r io.Reader
@@ -127,6 +163,33 @@ func MkdirAll(path string, fi os.FileInfo) error {
return nil return nil
} }
// ParseSnapshotPath parses the index from a snapshot filename. Used by path-based replicas.
func ParseSnapshotPath(s string) (index int, err error) {
a := snapshotPathRegex.FindStringSubmatch(s)
if a == nil {
return 0, fmt.Errorf("invalid snapshot path")
}
i64, _ := strconv.ParseUint(a[1], 16, 64)
return int(i64), nil
}
var snapshotPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\.snapshot\.lz4$`)
// ParseWALSegmentPath parses the index/offset from a segment filename. Used by path-based replicas.
func ParseWALSegmentPath(s string) (index int, offset int64, err error) {
a := walSegmentPathRegex.FindStringSubmatch(s)
if a == nil {
return 0, 0, fmt.Errorf("invalid wal segment path")
}
i64, _ := strconv.ParseUint(a[1], 16, 64)
off64, _ := strconv.ParseUint(a[2], 16, 64)
return int(i64), int64(off64), nil
}
var walSegmentPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\/([0-9a-f]{8})\.wal\.lz4$`)
// Shared replica metrics. // Shared replica metrics.
var ( var (
OperationTotalCounterVec = promauto.NewCounterVec(prometheus.CounterOpts{ OperationTotalCounterVec = promauto.NewCounterVec(prometheus.CounterOpts{

61
internal/internal_test.go Normal file
View File

@@ -0,0 +1,61 @@
package internal_test
import (
"fmt"
"reflect"
"testing"
"github.com/benbjohnson/litestream/internal"
)
func TestParseSnapshotPath(t *testing.T) {
for _, tt := range []struct {
s string
index int
err error
}{
{"00bc614e.snapshot.lz4", 12345678, nil},
{"xxxxxxxx.snapshot.lz4", 0, fmt.Errorf("invalid snapshot path")},
{"00bc614.snapshot.lz4", 0, fmt.Errorf("invalid snapshot path")},
{"00bc614e.snapshot.lz", 0, fmt.Errorf("invalid snapshot path")},
{"00bc614e.snapshot", 0, fmt.Errorf("invalid snapshot path")},
{"00bc614e", 0, fmt.Errorf("invalid snapshot path")},
{"", 0, fmt.Errorf("invalid snapshot path")},
} {
t.Run("", func(t *testing.T) {
index, err := internal.ParseSnapshotPath(tt.s)
if got, want := index, tt.index; got != want {
t.Errorf("index=%#v, want %#v", got, want)
} else if got, want := err, tt.err; !reflect.DeepEqual(got, want) {
t.Errorf("err=%#v, want %#v", got, want)
}
})
}
}
func TestParseWALSegmentPath(t *testing.T) {
for _, tt := range []struct {
s string
index int
offset int64
err error
}{
{"00bc614e/000003e8.wal.lz4", 12345678, 1000, nil},
{"00000000/00000000.wal", 0, 0, fmt.Errorf("invalid wal segment path")},
{"00000000/00000000", 0, 0, fmt.Errorf("invalid wal segment path")},
{"00000000/", 0, 0, fmt.Errorf("invalid wal segment path")},
{"00000000", 0, 0, fmt.Errorf("invalid wal segment path")},
{"", 0, 0, fmt.Errorf("invalid wal segment path")},
} {
t.Run("", func(t *testing.T) {
index, offset, err := internal.ParseWALSegmentPath(tt.s)
if got, want := index, tt.index; got != want {
t.Errorf("index=%#v, want %#v", got, want)
} else if got, want := offset, tt.offset; got != want {
t.Errorf("offset=%#v, want %#v", got, want)
} else if got, want := err, tt.err; !reflect.DeepEqual(got, want) {
t.Errorf("err=%#v, want %#v", got, want)
}
})
}
}

View File

@@ -1,4 +1,3 @@
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package internal package internal

View File

@@ -1,4 +1,3 @@
//go:build windows
// +build windows // +build windows
package internal package internal

View File

@@ -7,14 +7,10 @@ import (
"fmt" "fmt"
"io" "io"
"os" "os"
"path"
"path/filepath" "path/filepath"
"regexp"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/mattn/go-sqlite3"
) )
// Naming constants. // Naming constants.
@@ -46,23 +42,12 @@ var (
var ( var (
// LogWriter is the destination writer for all logging. // LogWriter is the destination writer for all logging.
LogWriter = os.Stdout LogWriter = os.Stderr
// LogFlags are the flags passed to log.New(). // LogFlags are the flags passed to log.New().
LogFlags = 0 LogFlags = 0
) )
func init() {
sql.Register("litestream-sqlite3", &sqlite3.SQLiteDriver{
ConnectHook: func(conn *sqlite3.SQLiteConn) error {
if err := conn.SetFileControlInt("main", sqlite3.SQLITE_FCNTL_PERSIST_WAL, 1); err != nil {
return fmt.Errorf("cannot set file control: %w", err)
}
return nil
},
})
}
// SnapshotIterator represents an iterator over a collection of snapshot metadata. // SnapshotIterator represents an iterator over a collection of snapshot metadata.
type SnapshotIterator interface { type SnapshotIterator interface {
io.Closer io.Closer
@@ -228,13 +213,11 @@ func FilterSnapshotsAfter(a []SnapshotInfo, t time.Time) []SnapshotInfo {
// FindMinSnapshotByGeneration finds the snapshot with the lowest index in a generation. // FindMinSnapshotByGeneration finds the snapshot with the lowest index in a generation.
func FindMinSnapshotByGeneration(a []SnapshotInfo, generation string) *SnapshotInfo { func FindMinSnapshotByGeneration(a []SnapshotInfo, generation string) *SnapshotInfo {
var min *SnapshotInfo var min *SnapshotInfo
for i := range a { for _, snapshot := range a {
snapshot := &a[i]
if snapshot.Generation != generation { if snapshot.Generation != generation {
continue continue
} else if min == nil || snapshot.Index < min.Index { } else if min == nil || snapshot.Index < min.Index {
min = snapshot min = &snapshot
} }
} }
return min return min
@@ -316,6 +299,26 @@ func (p Pos) Truncate() Pos {
return Pos{Generation: p.Generation, Index: p.Index} return Pos{Generation: p.Generation, Index: p.Index}
} }
// ComparePos returns -1 if a is less than b, 1 if a is greater than b, and
// returns 0 if a and b are equal. Only index & offset are compared.
// Returns an error if generations are not equal.
func ComparePos(a, b Pos) (int, error) {
if a.Generation != b.Generation {
return 0, fmt.Errorf("generation mismatch")
}
if a.Index < b.Index {
return -1, nil
} else if a.Index > b.Index {
return 1, nil
} else if a.Offset < b.Offset {
return -1, nil
} else if a.Offset > b.Offset {
return 1, nil
}
return 0, nil
}
// Checksum computes a running SQLite checksum over a byte slice. // Checksum computes a running SQLite checksum over a byte slice.
func Checksum(bo binary.ByteOrder, s0, s1 uint32, b []byte) (uint32, uint32) { func Checksum(bo binary.ByteOrder, s0, s1 uint32, b []byte) (uint32, uint32) {
assert(len(b)%8 == 0, "misaligned checksum byte slice") assert(len(b)%8 == 0, "misaligned checksum byte slice")
@@ -407,139 +410,42 @@ func IsGenerationName(s string) bool {
return true return true
} }
// GenerationsPath returns the path to a generation root directory. // FormatIndex formats an index as an 8-character hex value.
func GenerationsPath(root string) string { func FormatIndex(index int) string {
return path.Join(root, "generations") return fmt.Sprintf("%08x", index)
} }
// GenerationPath returns the path to a generation's root directory. // ParseIndex parses a hex-formatted index into an integer.
func GenerationPath(root, generation string) (string, error) { func ParseIndex(s string) (int, error) {
dir := GenerationsPath(root) v, err := strconv.ParseUint(s, 16, 32)
if generation == "" {
return "", fmt.Errorf("generation required")
}
return path.Join(dir, generation), nil
}
// SnapshotsPath returns the path to a generation's snapshot directory.
func SnapshotsPath(root, generation string) (string, error) {
dir, err := GenerationPath(root, generation)
if err != nil { if err != nil {
return "", err return -1, fmt.Errorf("cannot parse index: %q", s)
} }
return path.Join(dir, "snapshots"), nil return int(v), nil
} }
// SnapshotPath returns the path to an uncompressed snapshot file. // FormatOffset formats an offset as an 8-character hex value.
func SnapshotPath(root, generation string, index int) (string, error) { func FormatOffset(offset int64) string {
dir, err := SnapshotsPath(root, generation) return fmt.Sprintf("%08x", offset)
}
// ParseOffset parses a hex-formatted offset into an integer.
func ParseOffset(s string) (int64, error) {
v, err := strconv.ParseInt(s, 16, 32)
if err != nil { if err != nil {
return "", err return -1, fmt.Errorf("cannot parse index: %q", s)
} }
return path.Join(dir, FormatSnapshotPath(index)), nil return v, nil
} }
// WALPath returns the path to a generation's WAL directory
func WALPath(root, generation string) (string, error) {
dir, err := GenerationPath(root, generation)
if err != nil {
return "", err
}
return path.Join(dir, "wal"), nil
}
// WALSegmentPath returns the path to a WAL segment file.
func WALSegmentPath(root, generation string, index int, offset int64) (string, error) {
dir, err := WALPath(root, generation)
if err != nil {
return "", err
}
return path.Join(dir, FormatWALSegmentPath(index, offset)), nil
}
// IsSnapshotPath returns true if s is a path to a snapshot file.
func IsSnapshotPath(s string) bool {
return snapshotPathRegex.MatchString(s)
}
// ParseSnapshotPath returns the index for the snapshot.
// Returns an error if the path is not a valid snapshot path.
func ParseSnapshotPath(s string) (index int, err error) {
s = filepath.Base(s)
a := snapshotPathRegex.FindStringSubmatch(s)
if a == nil {
return 0, fmt.Errorf("invalid snapshot path: %s", s)
}
i64, _ := strconv.ParseUint(a[1], 16, 64)
return int(i64), nil
}
// FormatSnapshotPath formats a snapshot filename with a given index.
func FormatSnapshotPath(index int) string {
assert(index >= 0, "snapshot index must be non-negative")
return fmt.Sprintf("%08x%s", index, SnapshotExt)
}
var snapshotPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\.snapshot\.lz4$`)
// IsWALPath returns true if s is a path to a WAL file.
func IsWALPath(s string) bool {
return walPathRegex.MatchString(s)
}
// ParseWALPath returns the index for the WAL file.
// Returns an error if the path is not a valid WAL path.
func ParseWALPath(s string) (index int, err error) {
s = filepath.Base(s)
a := walPathRegex.FindStringSubmatch(s)
if a == nil {
return 0, fmt.Errorf("invalid wal path: %s", s)
}
i64, _ := strconv.ParseUint(a[1], 16, 64)
return int(i64), nil
}
// FormatWALPath formats a WAL filename with a given index.
func FormatWALPath(index int) string {
assert(index >= 0, "wal index must be non-negative")
return fmt.Sprintf("%08x%s", index, WALExt)
}
var walPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\.wal$`)
// ParseWALSegmentPath returns the index & offset for the WAL segment file.
// Returns an error if the path is not a valid wal segment path.
func ParseWALSegmentPath(s string) (index int, offset int64, err error) {
s = filepath.Base(s)
a := walSegmentPathRegex.FindStringSubmatch(s)
if a == nil {
return 0, 0, fmt.Errorf("invalid wal segment path: %s", s)
}
i64, _ := strconv.ParseUint(a[1], 16, 64)
off64, _ := strconv.ParseUint(a[2], 16, 64)
return int(i64), int64(off64), nil
}
// FormatWALSegmentPath formats a WAL segment filename with a given index & offset.
func FormatWALSegmentPath(index int, offset int64) string {
assert(index >= 0, "wal index must be non-negative")
assert(offset >= 0, "wal offset must be non-negative")
return fmt.Sprintf("%08x_%08x%s", index, offset, WALSegmentExt)
}
var walSegmentPathRegex = regexp.MustCompile(`^([0-9a-f]{8})(?:_([0-9a-f]{8}))\.wal\.lz4$`)
// isHexChar returns true if ch is a lowercase hex character. // isHexChar returns true if ch is a lowercase hex character.
func isHexChar(ch rune) bool { func isHexChar(ch rune) bool {
return (ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'f') return (ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'f')
} }
// Tracef is used for low-level tracing.
var Tracef = func(format string, a ...interface{}) {}
func assert(condition bool, message string) { func assert(condition bool, message string) {
if !condition { if !condition {
panic("assertion failed: " + message) panic("assertion failed: " + message)

View File

@@ -40,104 +40,6 @@ func TestChecksum(t *testing.T) {
}) })
} }
func TestGenerationsPath(t *testing.T) {
t.Run("OK", func(t *testing.T) {
if got, want := litestream.GenerationsPath("foo"), "foo/generations"; got != want {
t.Fatalf("GenerationsPath()=%v, want %v", got, want)
}
})
t.Run("NoPath", func(t *testing.T) {
if got, want := litestream.GenerationsPath(""), "generations"; got != want {
t.Fatalf("GenerationsPath()=%v, want %v", got, want)
}
})
}
func TestGenerationPath(t *testing.T) {
t.Run("OK", func(t *testing.T) {
if got, err := litestream.GenerationPath("foo", "0123456701234567"); err != nil {
t.Fatal(err)
} else if want := "foo/generations/0123456701234567"; got != want {
t.Fatalf("GenerationPath()=%v, want %v", got, want)
}
})
t.Run("ErrNoGeneration", func(t *testing.T) {
if _, err := litestream.GenerationPath("foo", ""); err == nil || err.Error() != `generation required` {
t.Fatalf("expected error: %v", err)
}
})
}
func TestSnapshotsPath(t *testing.T) {
t.Run("OK", func(t *testing.T) {
if got, err := litestream.SnapshotsPath("foo", "0123456701234567"); err != nil {
t.Fatal(err)
} else if want := "foo/generations/0123456701234567/snapshots"; got != want {
t.Fatalf("SnapshotsPath()=%v, want %v", got, want)
}
})
t.Run("ErrNoGeneration", func(t *testing.T) {
if _, err := litestream.SnapshotsPath("foo", ""); err == nil || err.Error() != `generation required` {
t.Fatalf("unexpected error: %v", err)
}
})
}
func TestSnapshotPath(t *testing.T) {
t.Run("OK", func(t *testing.T) {
if got, err := litestream.SnapshotPath("foo", "0123456701234567", 1000); err != nil {
t.Fatal(err)
} else if want := "foo/generations/0123456701234567/snapshots/000003e8.snapshot.lz4"; got != want {
t.Fatalf("SnapshotPath()=%v, want %v", got, want)
}
})
t.Run("ErrNoGeneration", func(t *testing.T) {
if _, err := litestream.SnapshotPath("foo", "", 1000); err == nil || err.Error() != `generation required` {
t.Fatalf("unexpected error: %v", err)
}
})
}
func TestWALPath(t *testing.T) {
t.Run("OK", func(t *testing.T) {
if got, err := litestream.WALPath("foo", "0123456701234567"); err != nil {
t.Fatal(err)
} else if want := "foo/generations/0123456701234567/wal"; got != want {
t.Fatalf("WALPath()=%v, want %v", got, want)
}
})
t.Run("ErrNoGeneration", func(t *testing.T) {
if _, err := litestream.WALPath("foo", ""); err == nil || err.Error() != `generation required` {
t.Fatalf("unexpected error: %v", err)
}
})
}
func TestWALSegmentPath(t *testing.T) {
t.Run("OK", func(t *testing.T) {
if got, err := litestream.WALSegmentPath("foo", "0123456701234567", 1000, 1001); err != nil {
t.Fatal(err)
} else if want := "foo/generations/0123456701234567/wal/000003e8_000003e9.wal.lz4"; got != want {
t.Fatalf("WALPath()=%v, want %v", got, want)
}
})
t.Run("ErrNoGeneration", func(t *testing.T) {
if _, err := litestream.WALSegmentPath("foo", "", 1000, 0); err == nil || err.Error() != `generation required` {
t.Fatalf("unexpected error: %v", err)
}
})
}
func TestFindMinSnapshotByGeneration(t *testing.T) {
infos := []litestream.SnapshotInfo{
{Generation: "29cf4bced74e92ab", Index: 0},
{Generation: "5dfeb4aa03232553", Index: 24},
}
if got, want := litestream.FindMinSnapshotByGeneration(infos, "29cf4bced74e92ab"), &infos[0]; got != want {
t.Fatalf("info=%#v, want %#v", got, want)
}
}
func MustDecodeHexString(s string) []byte { func MustDecodeHexString(s string) []byte {
b, err := hex.DecodeString(s) b, err := hex.DecodeString(s)
if err != nil { if err != nil {

View File

@@ -2,11 +2,11 @@ package litestream
import ( import (
"context" "context"
"encoding/binary"
"fmt" "fmt"
"hash/crc64" "hash/crc64"
"io" "io"
"log/slog" "io/ioutil"
"log"
"math" "math"
"os" "os"
"path/filepath" "path/filepath"
@@ -14,7 +14,6 @@ import (
"sync" "sync"
"time" "time"
"filippo.io/age"
"github.com/benbjohnson/litestream/internal" "github.com/benbjohnson/litestream/internal"
"github.com/pierrec/lz4/v4" "github.com/pierrec/lz4/v4"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
@@ -68,9 +67,7 @@ type Replica struct {
// Set to false if replica is being used synchronously (such as in tests). // Set to false if replica is being used synchronously (such as in tests).
MonitorEnabled bool MonitorEnabled bool
// Encryption identities and recipients Logger *log.Logger
AgeIdentities []age.Identity
AgeRecipients []age.Recipient
} }
func NewReplica(db *DB, name string) *Replica { func NewReplica(db *DB, name string) *Replica {
@@ -85,6 +82,12 @@ func NewReplica(db *DB, name string) *Replica {
MonitorEnabled: true, MonitorEnabled: true,
} }
prefix := fmt.Sprintf("%s: ", r.Name())
if db != nil {
prefix = fmt.Sprintf("%s(%s): ", db.Path(), r.Name())
}
r.Logger = log.New(LogWriter, prefix, LogFlags)
return r return r
} }
@@ -96,15 +99,6 @@ func (r *Replica) Name() string {
return r.name return r.name
} }
// Logger returns the DB sub-logger for this replica.
func (r *Replica) Logger() *slog.Logger {
logger := slog.Default()
if r.db != nil {
logger = r.db.Logger
}
return logger.With("replica", r.Name())
}
// DB returns a reference to the database the replica is attached to, if any. // DB returns a reference to the database the replica is attached to, if any.
func (r *Replica) DB() *DB { return r.db } func (r *Replica) DB() *DB { return r.db }
@@ -162,183 +156,174 @@ func (r *Replica) Sync(ctx context.Context) (err error) {
}() }()
// Find current position of database. // Find current position of database.
dpos, err := r.db.Pos() dpos := r.db.Pos()
if err != nil { if dpos.IsZero() {
return fmt.Errorf("cannot determine current generation: %w", err)
} else if dpos.IsZero() {
return fmt.Errorf("no generation, waiting for data") return fmt.Errorf("no generation, waiting for data")
} }
generation := dpos.Generation generation := dpos.Generation
r.Logger().Debug("replica sync", "position", dpos.String()) // Create snapshot if no snapshots exist for generation.
snapshotN, err := r.snapshotN(generation)
// Create a new snapshot and update the current replica position if if err != nil {
// the generation on the database has changed. return err
if r.Pos().Generation != generation { } else if snapshotN == 0 {
// Create snapshot if no snapshots exist for generation. if info, err := r.Snapshot(ctx); err != nil {
snapshotN, err := r.snapshotN(ctx, generation)
if err != nil {
return err return err
} else if snapshotN == 0 { } else if info.Generation != generation {
if info, err := r.Snapshot(ctx); err != nil { return fmt.Errorf("generation changed during snapshot, exiting sync")
return err
} else if info.Generation != generation {
return fmt.Errorf("generation changed during snapshot, exiting sync")
}
} }
snapshotN = 1
}
replicaSnapshotTotalGaugeVec.WithLabelValues(r.db.Path(), r.Name()).Set(float64(snapshotN))
// Determine position, if necessary.
if r.Pos().Generation != generation {
pos, err := r.calcPos(ctx, generation) pos, err := r.calcPos(ctx, generation)
if err != nil { if err != nil {
return fmt.Errorf("cannot determine replica position: %s", err) return fmt.Errorf("cannot determine replica position: %s", err)
} }
r.Logger().Debug("replica sync: calc new pos", "position", pos.String())
r.mu.Lock() r.mu.Lock()
r.pos = pos r.pos = pos
r.mu.Unlock() r.mu.Unlock()
} }
// Read all WAL files since the last position. // Read all WAL files since the last position.
for { if err = r.syncWAL(ctx); err != nil {
if err = r.syncWAL(ctx); err == io.EOF { return err
break
} else if err != nil {
return err
}
} }
return nil return nil
} }
func (r *Replica) syncWAL(ctx context.Context) (err error) { func (r *Replica) syncWAL(ctx context.Context) (err error) {
rd, err := r.db.ShadowWALReader(r.Pos()) pos := r.Pos()
if err == io.EOF {
itr, err := r.db.WALSegments(ctx, pos.Generation)
if err != nil {
return err return err
} else if err != nil {
return fmt.Errorf("replica wal reader: %w", err)
} }
defer rd.Close() defer itr.Close()
// Group segments by index.
var segments [][]WALSegmentInfo
for itr.Next() {
info := itr.WALSegment()
if cmp, err := ComparePos(pos, info.Pos()); err != nil {
return fmt.Errorf("compare pos: %w", err)
} else if cmp == 1 {
continue // already processed, skip
}
// Start a new chunk if index has changed.
if len(segments) == 0 || segments[len(segments)-1][0].Index != info.Index {
segments = append(segments, []WALSegmentInfo{info})
continue
}
// Add segment to the end of the current index, if matching.
segments[len(segments)-1] = append(segments[len(segments)-1], info)
}
// Write out segments to replica by index so they can be combined.
for i := range segments {
if err := r.writeIndexSegments(ctx, segments[i]); err != nil {
return fmt.Errorf("write index segments: index=%d err=%w", segments[i][0].Index, err)
}
}
return nil
}
func (r *Replica) writeIndexSegments(ctx context.Context, segments []WALSegmentInfo) (err error) {
assert(len(segments) > 0, "segments required for replication")
// First segment position must be equal to last replica position or
// the start of the next index.
if pos := r.Pos(); pos != segments[0].Pos() {
nextIndexPos := pos.Truncate()
nextIndexPos.Index++
if nextIndexPos != segments[0].Pos() {
return fmt.Errorf("replica skipped position: replica=%s initial=%s", pos, segments[0].Pos())
}
}
pos := segments[0].Pos()
initialPos := pos
// Copy shadow WAL to client write via io.Pipe(). // Copy shadow WAL to client write via io.Pipe().
pr, pw := io.Pipe() pr, pw := io.Pipe()
defer func() { _ = pw.CloseWithError(err) }() defer func() { _ = pw.CloseWithError(err) }()
// Obtain initial position from shadow reader.
// It may have moved to the next index if previous position was at the end.
pos := rd.Pos()
initialPos := pos
startTime := time.Now()
var bytesWritten int
logger := r.Logger()
logger.Info("write wal segment", "position", initialPos.String())
// Copy through pipe into client from the starting position. // Copy through pipe into client from the starting position.
var g errgroup.Group var g errgroup.Group
g.Go(func() error { g.Go(func() error {
_, err := r.Client.WriteWALSegment(ctx, pos, pr) _, err := r.Client.WriteWALSegment(ctx, initialPos, pr)
// Always close pipe reader to signal writers.
if e := pr.CloseWithError(err); err == nil {
return e
}
return err return err
}) })
var ew io.WriteCloser = pw
// Add encryption if we have recipients.
if len(r.AgeRecipients) > 0 {
var err error
ew, err = age.Encrypt(pw, r.AgeRecipients...)
if err != nil {
return err
}
defer ew.Close()
}
// Wrap writer to LZ4 compress. // Wrap writer to LZ4 compress.
zw := lz4.NewWriter(ew) zw := lz4.NewWriter(pw)
// Track total WAL bytes written to replica client. // Write each segment out to the replica.
walBytesCounter := replicaWALBytesCounterVec.WithLabelValues(r.db.Path(), r.Name()) for _, info := range segments {
if err := func() error {
// Ensure segments are in order and no bytes are skipped.
if pos != info.Pos() {
return fmt.Errorf("non-contiguous segment: expected=%s current=%s", pos, info.Pos())
}
// Copy header if at offset zero. rc, err := r.db.WALSegmentReader(ctx, info.Pos())
var psalt uint64 // previous salt value if err != nil {
if pos := rd.Pos(); pos.Offset == 0 { return err
buf := make([]byte, WALHeaderSize) }
if _, err := io.ReadFull(rd, buf); err != nil { defer rc.Close()
return err
n, err := io.Copy(zw, lz4.NewReader(rc))
if err != nil {
return err
} else if err := rc.Close(); err != nil {
return err
}
// Track last position written.
pos = info.Pos()
pos.Offset += n
return nil
}(); err != nil {
return fmt.Errorf("wal segment: pos=%s err=%w", info.Pos(), err)
} }
psalt = binary.BigEndian.Uint64(buf[16:24])
n, err := zw.Write(buf)
if err != nil {
return err
}
walBytesCounter.Add(float64(n))
bytesWritten += n
} }
// Copy frames. // Flush LZ4 writer, close pipe, and wait for write to finish.
for {
pos := rd.Pos()
assert(pos.Offset == frameAlign(pos.Offset, r.db.pageSize), "shadow wal reader not frame aligned")
buf := make([]byte, WALFrameHeaderSize+r.db.pageSize)
if _, err := io.ReadFull(rd, buf); err == io.EOF {
break
} else if err != nil {
return err
}
// Verify salt matches the previous frame/header read.
salt := binary.BigEndian.Uint64(buf[8:16])
if psalt != 0 && psalt != salt {
return fmt.Errorf("replica salt mismatch: %s", pos.String())
}
psalt = salt
n, err := zw.Write(buf)
if err != nil {
return err
}
walBytesCounter.Add(float64(n))
bytesWritten += n
}
// Flush LZ4 writer, encryption writer and close pipe.
if err := zw.Close(); err != nil { if err := zw.Close(); err != nil {
return err return err
} else if err := ew.Close(); err != nil {
return err
} else if err := pw.Close(); err != nil { } else if err := pw.Close(); err != nil {
return err return err
} } else if err := g.Wait(); err != nil {
return err
// Wait for client to finish write.
if err := g.Wait(); err != nil {
return fmt.Errorf("client write: %w", err)
} }
// Save last replicated position. // Save last replicated position.
r.mu.Lock() r.mu.Lock()
r.pos = rd.Pos() r.pos = pos
r.mu.Unlock() r.mu.Unlock()
// Track current position replicaWALBytesCounterVec.WithLabelValues(r.db.Path(), r.Name()).Add(float64(pos.Offset - initialPos.Offset))
replicaWALIndexGaugeVec.WithLabelValues(r.db.Path(), r.Name()).Set(float64(rd.Pos().Index))
replicaWALOffsetGaugeVec.WithLabelValues(r.db.Path(), r.Name()).Set(float64(rd.Pos().Offset)) // Track total WAL bytes written to replica client.
replicaWALIndexGaugeVec.WithLabelValues(r.db.Path(), r.Name()).Set(float64(pos.Index))
replicaWALOffsetGaugeVec.WithLabelValues(r.db.Path(), r.Name()).Set(float64(pos.Offset))
r.Logger.Printf("wal segment written: %s sz=%d", initialPos, pos.Offset-initialPos.Offset)
logger.Info("wal segment written", "position", initialPos.String(), "elapsed", time.Since(startTime).String(), "sz", bytesWritten)
return nil return nil
} }
// snapshotN returns the number of snapshots for a generation. // snapshotN returns the number of snapshots for a generation.
func (r *Replica) snapshotN(ctx context.Context, generation string) (int, error) { func (r *Replica) snapshotN(generation string) (int, error) {
itr, err := r.Client.Snapshots(ctx, generation) itr, err := r.Client.Snapshots(context.Background(), generation)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@@ -376,16 +361,7 @@ func (r *Replica) calcPos(ctx context.Context, generation string) (pos Pos, err
} }
defer rd.Close() defer rd.Close()
if len(r.AgeIdentities) > 0 { n, err := io.Copy(ioutil.Discard, lz4.NewReader(rd))
drd, err := age.Decrypt(rd, r.AgeIdentities...)
if err != nil {
return pos, err
}
rd = io.NopCloser(drd)
}
n, err := io.Copy(io.Discard, lz4.NewReader(rd))
if err != nil { if err != nil {
return pos, err return pos, err
} }
@@ -483,15 +459,11 @@ func (r *Replica) Snapshot(ctx context.Context) (info SnapshotInfo, err error) {
defer r.muf.Unlock() defer r.muf.Unlock()
// Issue a passive checkpoint to flush any pages to disk before snapshotting. // Issue a passive checkpoint to flush any pages to disk before snapshotting.
if err := r.db.Checkpoint(ctx, CheckpointModePassive); err != nil { if _, err := r.db.db.ExecContext(ctx, `PRAGMA wal_checkpoint(PASSIVE);`); err != nil {
return info, fmt.Errorf("pre-snapshot checkpoint: %w", err) return info, fmt.Errorf("pre-snapshot checkpoint: %w", err)
} }
// Prevent internal checkpoints during snapshot. // Acquire a read lock on the database during snapshot to prevent checkpoints.
r.db.BeginSnapshot()
defer r.db.EndSnapshot()
// Acquire a read lock on the database during snapshot to prevent external checkpoints.
tx, err := r.db.db.Begin() tx, err := r.db.db.Begin()
if err != nil { if err != nil {
return info, err return info, err
@@ -502,10 +474,8 @@ func (r *Replica) Snapshot(ctx context.Context) (info SnapshotInfo, err error) {
defer func() { _ = tx.Rollback() }() defer func() { _ = tx.Rollback() }()
// Obtain current position. // Obtain current position.
pos, err := r.db.Pos() pos := r.db.Pos()
if err != nil { if pos.IsZero() {
return info, fmt.Errorf("cannot determine db position: %w", err)
} else if pos.IsZero() {
return info, ErrNoGeneration return info, ErrNoGeneration
} }
@@ -525,23 +495,7 @@ func (r *Replica) Snapshot(ctx context.Context) (info SnapshotInfo, err error) {
// Copy the database file to the LZ4 writer in a separate goroutine. // Copy the database file to the LZ4 writer in a separate goroutine.
var g errgroup.Group var g errgroup.Group
g.Go(func() error { g.Go(func() error {
// We need to ensure the pipe is closed. zr := lz4.NewWriter(pw)
defer pw.Close()
var wc io.WriteCloser = pw
// Add encryption if we have recipients.
if len(r.AgeRecipients) > 0 {
var err error
wc, err = age.Encrypt(pw, r.AgeRecipients...)
if err != nil {
pw.CloseWithError(err)
return err
}
defer wc.Close()
}
zr := lz4.NewWriter(wc)
defer zr.Close() defer zr.Close()
if _, err := io.Copy(zr, r.f); err != nil { if _, err := io.Copy(zr, r.f); err != nil {
@@ -551,13 +505,9 @@ func (r *Replica) Snapshot(ctx context.Context) (info SnapshotInfo, err error) {
pw.CloseWithError(err) pw.CloseWithError(err)
return err return err
} }
return wc.Close() return pw.Close()
}) })
logger := r.Logger()
logger.Info("write snapshot", "position", pos.String())
startTime := time.Now()
// Delegate write to client & wait for writer goroutine to finish. // Delegate write to client & wait for writer goroutine to finish.
if info, err = r.Client.WriteSnapshot(ctx, pos.Generation, pos.Index, pr); err != nil { if info, err = r.Client.WriteSnapshot(ctx, pos.Generation, pos.Index, pr); err != nil {
return info, err return info, err
@@ -565,7 +515,8 @@ func (r *Replica) Snapshot(ctx context.Context) (info SnapshotInfo, err error) {
return info, err return info, err
} }
logger.Info("snapshot written", "position", pos.String(), "elapsed", time.Since(startTime).String(), "sz", info.Size) r.Logger.Printf("snapshot written %s/%08x", pos.Generation, pos.Index)
return info, nil return info, nil
} }
@@ -632,7 +583,7 @@ func (r *Replica) deleteSnapshotsBeforeIndex(ctx context.Context, generation str
if err := r.Client.DeleteSnapshot(ctx, info.Generation, info.Index); err != nil { if err := r.Client.DeleteSnapshot(ctx, info.Generation, info.Index); err != nil {
return fmt.Errorf("delete snapshot %s/%08x: %w", info.Generation, info.Index, err) return fmt.Errorf("delete snapshot %s/%08x: %w", info.Generation, info.Index, err)
} }
r.Logger().Info("snapshot deleted", "generation", generation, "index", index) r.Logger.Printf("snapshot deleted %s/%08x", generation, index)
} }
return itr.Close() return itr.Close()
@@ -665,7 +616,10 @@ func (r *Replica) deleteWALSegmentsBeforeIndex(ctx context.Context, generation s
return fmt.Errorf("delete wal segments: %w", err) return fmt.Errorf("delete wal segments: %w", err)
} }
r.Logger().Info("wal segmented deleted before", "generation", generation, "index", index, "n", len(a)) for _, pos := range a {
r.Logger.Printf("wal segmented deleted: %s", pos)
}
return nil return nil
} }
@@ -701,7 +655,7 @@ func (r *Replica) monitor(ctx context.Context) {
// Synchronize the shadow wal into the replication directory. // Synchronize the shadow wal into the replication directory.
if err := r.Sync(ctx); err != nil { if err := r.Sync(ctx); err != nil {
r.Logger().Error("monitor error", "error", err) r.Logger.Printf("monitor error: %s", err)
continue continue
} }
} }
@@ -729,7 +683,7 @@ func (r *Replica) retainer(ctx context.Context) {
return return
case <-ticker.C: case <-ticker.C:
if err := r.EnforceRetention(ctx); err != nil { if err := r.EnforceRetention(ctx); err != nil {
r.Logger().Error("retainer error", "error", err) r.Logger.Printf("retainer error: %s", err)
continue continue
} }
} }
@@ -742,31 +696,6 @@ func (r *Replica) snapshotter(ctx context.Context) {
return return
} }
logger := r.Logger()
if pos, err := r.db.Pos(); err != nil {
logger.Error("snapshotter cannot determine generation", "error", err)
} else if !pos.IsZero() {
if snapshot, err := r.maxSnapshot(ctx, pos.Generation); err != nil {
logger.Error("snapshotter cannot determine latest snapshot", "error", err)
} else if snapshot != nil {
nextSnapshot := r.SnapshotInterval - time.Since(snapshot.CreatedAt)
if nextSnapshot < 0 {
nextSnapshot = 0
}
logger.Info("snapshot interval adjusted", "previous", snapshot.CreatedAt.Format(time.RFC3339), "next", nextSnapshot.String())
select {
case <-ctx.Done():
return
case <-time.After(nextSnapshot):
if _, err := r.Snapshot(ctx); err != nil && err != ErrNoGeneration {
logger.Error("snapshotter error", "error", err)
}
}
}
}
ticker := time.NewTicker(r.SnapshotInterval) ticker := time.NewTicker(r.SnapshotInterval)
defer ticker.Stop() defer ticker.Stop()
@@ -776,7 +705,7 @@ func (r *Replica) snapshotter(ctx context.Context) {
return return
case <-ticker.C: case <-ticker.C:
if _, err := r.Snapshot(ctx); err != nil && err != ErrNoGeneration { if _, err := r.Snapshot(ctx); err != nil && err != ErrNoGeneration {
r.Logger().Error("snapshotter error", "error", err) r.Logger.Printf("snapshotter error: %s", err)
continue continue
} }
} }
@@ -804,7 +733,7 @@ func (r *Replica) validator(ctx context.Context) {
return return
case <-ticker.C: case <-ticker.C:
if err := r.Validate(ctx); err != nil { if err := r.Validate(ctx); err != nil {
r.Logger().Error("validation error", "error", err) r.Logger.Printf("validation error: %s", err)
continue continue
} }
} }
@@ -817,7 +746,7 @@ func (r *Replica) Validate(ctx context.Context) error {
db := r.DB() db := r.DB()
// Restore replica to a temporary directory. // Restore replica to a temporary directory.
tmpdir, err := os.MkdirTemp("", "*-litestream") tmpdir, err := ioutil.TempDir("", "*-litestream")
if err != nil { if err != nil {
return err return err
} }
@@ -841,6 +770,7 @@ func (r *Replica) Validate(ctx context.Context) error {
ReplicaName: r.Name(), ReplicaName: r.Name(),
Generation: pos.Generation, Generation: pos.Generation,
Index: pos.Index - 1, Index: pos.Index - 1,
Logger: log.New(os.Stderr, "", 0),
}); err != nil { }); err != nil {
return fmt.Errorf("cannot restore: %w", err) return fmt.Errorf("cannot restore: %w", err)
} }
@@ -865,7 +795,7 @@ func (r *Replica) Validate(ctx context.Context) error {
if mismatch { if mismatch {
status = "mismatch" status = "mismatch"
} }
r.Logger().Info("validator", "status", status, "db", fmt.Sprintf("%016x", chksum0), "replica", fmt.Sprintf("%016x", chksum1), "position", pos.String()) r.Logger.Printf("validator: status=%s db=%016x replica=%016x pos=%s", status, chksum0, chksum1, pos)
// Validate checksums match. // Validate checksums match.
if mismatch { if mismatch {
@@ -905,7 +835,7 @@ func (r *Replica) waitForReplica(ctx context.Context, pos Pos) error {
// Obtain current position of replica, check if past target position. // Obtain current position of replica, check if past target position.
curr := r.Pos() curr := r.Pos()
if curr.IsZero() { if curr.IsZero() {
r.Logger().Info("validator: no replica position available") r.Logger.Printf("validator: no replica position available")
continue continue
} }
@@ -961,7 +891,6 @@ func (r *Replica) GenerationTimeBounds(ctx context.Context, generation string) (
} }
defer sitr.Close() defer sitr.Close()
minIndex, maxIndex := -1, -1
for sitr.Next() { for sitr.Next() {
info := sitr.Snapshot() info := sitr.Snapshot()
if createdAt.IsZero() || info.CreatedAt.Before(createdAt) { if createdAt.IsZero() || info.CreatedAt.Before(createdAt) {
@@ -970,12 +899,6 @@ func (r *Replica) GenerationTimeBounds(ctx context.Context, generation string) (
if updatedAt.IsZero() || info.CreatedAt.After(updatedAt) { if updatedAt.IsZero() || info.CreatedAt.After(updatedAt) {
updatedAt = info.CreatedAt updatedAt = info.CreatedAt
} }
if minIndex == -1 || info.Index < minIndex {
minIndex = info.Index
}
if info.Index > maxIndex {
maxIndex = info.Index
}
} }
if err := sitr.Close(); err != nil { if err := sitr.Close(); err != nil {
return createdAt, updatedAt, err return createdAt, updatedAt, err
@@ -990,9 +913,6 @@ func (r *Replica) GenerationTimeBounds(ctx context.Context, generation string) (
for witr.Next() { for witr.Next() {
info := witr.WALSegment() info := witr.WALSegment()
if info.Index < minIndex || info.Index > maxIndex {
continue
}
if createdAt.IsZero() || info.CreatedAt.Before(createdAt) { if createdAt.IsZero() || info.CreatedAt.Before(createdAt) {
createdAt = info.CreatedAt createdAt = info.CreatedAt
} }
@@ -1067,6 +987,17 @@ func (r *Replica) Restore(ctx context.Context, opt RestoreOptions) (err error) {
return fmt.Errorf("cannot specify index & timestamp to restore") return fmt.Errorf("cannot specify index & timestamp to restore")
} }
// Ensure logger exists.
logger := opt.Logger
if logger == nil {
logger = log.New(ioutil.Discard, "", 0)
}
logPrefix := r.Name()
if db := r.DB(); db != nil {
logPrefix = fmt.Sprintf("%s(%s)", db.Path(), r.Name())
}
// Ensure output path does not already exist. // Ensure output path does not already exist.
if _, err := os.Stat(opt.OutputPath); err == nil { if _, err := os.Stat(opt.OutputPath); err == nil {
return fmt.Errorf("cannot restore, output path already exists: %s", opt.OutputPath) return fmt.Errorf("cannot restore, output path already exists: %s", opt.OutputPath)
@@ -1087,7 +1018,7 @@ func (r *Replica) Restore(ctx context.Context, opt RestoreOptions) (err error) {
} }
// Compute list of offsets for each WAL index. // Compute list of offsets for each WAL index.
walSegmentMap, err := r.walSegmentMap(ctx, opt.Generation, minWALIndex, opt.Index, opt.Timestamp) walSegmentMap, err := r.walSegmentMap(ctx, opt.Generation, opt.Index, opt.Timestamp)
if err != nil { if err != nil {
return fmt.Errorf("cannot find max wal index for restore: %w", err) return fmt.Errorf("cannot find max wal index for restore: %w", err)
} }
@@ -1101,7 +1032,7 @@ func (r *Replica) Restore(ctx context.Context, opt RestoreOptions) (err error) {
} }
// Ensure that we found the specific index, if one was specified. // Ensure that we found the specific index, if one was specified.
if opt.Index != math.MaxInt32 && opt.Index != maxWALIndex { if opt.Index != math.MaxInt32 && opt.Index != opt.Index {
return fmt.Errorf("unable to locate index %d in generation %q, highest index was %d", opt.Index, opt.Generation, maxWALIndex) return fmt.Errorf("unable to locate index %d in generation %q, highest index was %d", opt.Index, opt.Generation, maxWALIndex)
} }
@@ -1113,19 +1044,19 @@ func (r *Replica) Restore(ctx context.Context, opt RestoreOptions) (err error) {
tmpPath := opt.OutputPath + ".tmp" tmpPath := opt.OutputPath + ".tmp"
// Copy snapshot to output path. // Copy snapshot to output path.
r.Logger().Info("restoring snapshot", "generation", opt.Generation, "index", minWALIndex, "path", tmpPath) logger.Printf("%s: restoring snapshot %s/%08x to %s", logPrefix, opt.Generation, minWALIndex, tmpPath)
if err := r.restoreSnapshot(ctx, pos.Generation, pos.Index, tmpPath); err != nil { if err := r.restoreSnapshot(ctx, pos.Generation, pos.Index, tmpPath); err != nil {
return fmt.Errorf("cannot restore snapshot: %w", err) return fmt.Errorf("cannot restore snapshot: %w", err)
} }
// If no WAL files available, move snapshot to final path & exit early. // If no WAL files available, move snapshot to final path & exit early.
if snapshotOnly { if snapshotOnly {
r.Logger().Info("snapshot only, finalizing database") logger.Printf("%s: snapshot only, finalizing database", logPrefix)
return os.Rename(tmpPath, opt.OutputPath) return os.Rename(tmpPath, opt.OutputPath)
} }
// Begin processing WAL files. // Begin processing WAL files.
r.Logger().Info("restoring wal files", "generation", opt.Generation, "index_min", minWALIndex, "index_max", maxWALIndex) logger.Printf("%s: restoring wal files: generation=%s index=[%08x,%08x]", logPrefix, opt.Generation, minWALIndex, maxWALIndex)
// Fill input channel with all WAL indexes to be loaded in order. // Fill input channel with all WAL indexes to be loaded in order.
// Verify every index has at least one offset. // Verify every index has at least one offset.
@@ -1181,9 +1112,9 @@ func (r *Replica) Restore(ctx context.Context, opt RestoreOptions) (err error) {
return err return err
} }
r.Logger().Info("downloaded wal", logger.Printf("%s: downloaded wal %s/%08x elapsed=%s",
"generation", opt.Generation, "index", index, logPrefix, opt.Generation, index,
"elapsed", time.Since(startTime).String(), time.Since(startTime).String(),
) )
} }
} }
@@ -1210,7 +1141,10 @@ func (r *Replica) Restore(ctx context.Context, opt RestoreOptions) (err error) {
if err = applyWAL(ctx, index, tmpPath); err != nil { if err = applyWAL(ctx, index, tmpPath); err != nil {
return fmt.Errorf("cannot apply wal: %w", err) return fmt.Errorf("cannot apply wal: %w", err)
} }
r.Logger().Info("applied wal", "generation", opt.Generation, "index", index, "elapsed", time.Since(startTime).String()) logger.Printf("%s: applied wal %s/%08x elapsed=%s",
logPrefix, opt.Generation, index,
time.Since(startTime).String(),
)
} }
// Ensure all goroutines finish. All errors should have been handled during // Ensure all goroutines finish. All errors should have been handled during
@@ -1220,7 +1154,7 @@ func (r *Replica) Restore(ctx context.Context, opt RestoreOptions) (err error) {
} }
// Copy file to final location. // Copy file to final location.
r.Logger().Info("renaming database from temporary location") logger.Printf("%s: renaming database from temporary location", logPrefix)
if err := os.Rename(tmpPath, opt.OutputPath); err != nil { if err := os.Rename(tmpPath, opt.OutputPath); err != nil {
return err return err
} }
@@ -1281,7 +1215,7 @@ func (r *Replica) SnapshotIndexByIndex(ctx context.Context, generation string, i
} }
// Use snapshot if it newer. // Use snapshot if it newer.
if snapshotIndex == -1 || snapshot.Index >= snapshotIndex { if snapshotIndex == -1 || snapshotIndex >= snapshotIndex {
snapshotIndex = snapshot.Index snapshotIndex = snapshot.Index
} }
} }
@@ -1295,29 +1229,22 @@ func (r *Replica) SnapshotIndexByIndex(ctx context.Context, generation string, i
// walSegmentMap returns a map of WAL indices to their segments. // walSegmentMap returns a map of WAL indices to their segments.
// Filters by a max timestamp or a max index. // Filters by a max timestamp or a max index.
func (r *Replica) walSegmentMap(ctx context.Context, generation string, minIndex, maxIndex int, maxTimestamp time.Time) (map[int][]int64, error) { func (r *Replica) walSegmentMap(ctx context.Context, generation string, maxIndex int, maxTimestamp time.Time) (map[int][]int64, error) {
itr, err := r.Client.WALSegments(ctx, generation) itr, err := r.Client.WALSegments(ctx, generation)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer itr.Close() defer itr.Close()
a := []WALSegmentInfo{}
for itr.Next() {
a = append(a, itr.WALSegment())
}
sort.Sort(WALSegmentInfoSlice(a))
m := make(map[int][]int64) m := make(map[int][]int64)
for _, info := range a { for itr.Next() {
info := itr.WALSegment()
// Exit if we go past the max timestamp or index. // Exit if we go past the max timestamp or index.
if !maxTimestamp.IsZero() && info.CreatedAt.After(maxTimestamp) { if !maxTimestamp.IsZero() && info.CreatedAt.After(maxTimestamp) {
break // after max timestamp, skip break // after max timestamp, skip
} else if info.Index > maxIndex { } else if info.Index > maxIndex {
break // after max index, skip break // after max index, skip
} else if info.Index < minIndex {
continue // before min index, continue
} }
// Verify offsets are added in order. // Verify offsets are added in order.
@@ -1358,15 +1285,6 @@ func (r *Replica) restoreSnapshot(ctx context.Context, generation string, index
} }
defer rd.Close() defer rd.Close()
if len(r.AgeIdentities) > 0 {
drd, err := age.Decrypt(rd, r.AgeIdentities...)
if err != nil {
return err
}
rd = io.NopCloser(drd)
}
if _, err := io.Copy(f, lz4.NewReader(rd)); err != nil { if _, err := io.Copy(f, lz4.NewReader(rd)); err != nil {
return err return err
} else if err := f.Sync(); err != nil { } else if err := f.Sync(); err != nil {
@@ -1393,16 +1311,6 @@ func (r *Replica) downloadWAL(ctx context.Context, generation string, index int,
return err return err
} }
defer rd.Close() defer rd.Close()
if len(r.AgeIdentities) > 0 {
drd, err := age.Decrypt(rd, r.AgeIdentities...)
if err != nil {
return err
}
rd = io.NopCloser(drd)
}
readers = append(readers, lz4.NewReader(rd)) readers = append(readers, lz4.NewReader(rd))
} }
@@ -1424,6 +1332,13 @@ func (r *Replica) downloadWAL(ctx context.Context, generation string, index int,
// Replica metrics. // Replica metrics.
var ( var (
replicaSnapshotTotalGaugeVec = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "litestream",
Subsystem: "replica",
Name: "snapshot_total",
Help: "The current number of snapshots",
}, []string{"db", "name"})
replicaWALBytesCounterVec = promauto.NewCounterVec(prometheus.CounterOpts{ replicaWALBytesCounterVec = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "litestream", Namespace: "litestream",
Subsystem: "replica", Subsystem: "replica",

View File

@@ -10,13 +10,13 @@ type ReplicaClient interface {
// Returns the type of client. // Returns the type of client.
Type() string Type() string
// Returns a list of available generations. Order is undefined. // Returns a list of available generations.
Generations(ctx context.Context) ([]string, error) Generations(ctx context.Context) ([]string, error)
// Deletes all snapshots & WAL segments within a generation. // Deletes all snapshots & WAL segments within a generation.
DeleteGeneration(ctx context.Context, generation string) error DeleteGeneration(ctx context.Context, generation string) error
// Returns an iterator of all snapshots within a generation on the replica. Order is undefined. // Returns an iterator of all snapshots within a generation on the replica.
Snapshots(ctx context.Context, generation string) (SnapshotIterator, error) Snapshots(ctx context.Context, generation string) (SnapshotIterator, error)
// Writes LZ4 compressed snapshot data to the replica at a given index // Writes LZ4 compressed snapshot data to the replica at a given index
@@ -31,7 +31,7 @@ type ReplicaClient interface {
// the snapshot does not exist. // the snapshot does not exist.
SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error)
// Returns an iterator of all WAL segments within a generation on the replica. Order is undefined. // Returns an iterator of all WAL segments within a generation on the replica.
WALSegments(ctx context.Context, generation string) (WALSegmentIterator, error) WALSegments(ctx context.Context, generation string) (WALSegmentIterator, error)
// Writes an LZ4 compressed WAL segment at a given position. // Writes an LZ4 compressed WAL segment at a given position.

View File

@@ -4,7 +4,7 @@ import (
"context" "context"
"flag" "flag"
"fmt" "fmt"
"io" "io/ioutil"
"math/rand" "math/rand"
"os" "os"
"path" "path"
@@ -12,6 +12,7 @@ import (
"sort" "sort"
"strings" "strings"
"testing" "testing"
"time"
"github.com/benbjohnson/litestream" "github.com/benbjohnson/litestream"
"github.com/benbjohnson/litestream/abs" "github.com/benbjohnson/litestream/abs"
@@ -21,6 +22,10 @@ import (
"github.com/benbjohnson/litestream/sftp" "github.com/benbjohnson/litestream/sftp"
) )
func init() {
rand.Seed(time.Now().UnixNano())
}
var ( var (
// Enables integration tests. // Enables integration tests.
integration = flag.String("integration", "file", "") integration = flag.String("integration", "file", "")
@@ -75,14 +80,10 @@ func TestReplicaClient_Generations(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
// Fetch and sort generations. // Verify returned generations.
got, err := c.Generations(context.Background()) if got, err := c.Generations(context.Background()); err != nil {
if err != nil {
t.Fatal(err) t.Fatal(err)
} } else if want := []string{"155fe292f8333c72", "5efbd8d042012dca", "b16ddcf5c697540f"}; !reflect.DeepEqual(got, want) {
sort.Strings(got)
if want := []string{"155fe292f8333c72", "5efbd8d042012dca", "b16ddcf5c697540f"}; !reflect.DeepEqual(got, want) {
t.Fatalf("Generations()=%v, want %v", got, want) t.Fatalf("Generations()=%v, want %v", got, want)
} }
}) })
@@ -176,7 +177,7 @@ func TestReplicaClient_Snapshots(t *testing.T) {
if err == nil { if err == nil {
err = itr.Close() err = itr.Close()
} }
if err == nil || err.Error() != `cannot determine snapshots path: generation required` { if err == nil || err.Error() != `generation required` {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
}) })
@@ -192,7 +193,7 @@ func TestReplicaClient_WriteSnapshot(t *testing.T) {
if r, err := c.SnapshotReader(context.Background(), "b16ddcf5c697540f", 1000); err != nil { if r, err := c.SnapshotReader(context.Background(), "b16ddcf5c697540f", 1000); err != nil {
t.Fatal(err) t.Fatal(err)
} else if buf, err := io.ReadAll(r); err != nil { } else if buf, err := ioutil.ReadAll(r); err != nil {
t.Fatal(err) t.Fatal(err)
} else if err := r.Close(); err != nil { } else if err := r.Close(); err != nil {
t.Fatal(err) t.Fatal(err)
@@ -203,7 +204,7 @@ func TestReplicaClient_WriteSnapshot(t *testing.T) {
RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel() t.Parallel()
if _, err := c.WriteSnapshot(context.Background(), "", 0, nil); err == nil || err.Error() != `cannot determine snapshot path: generation required` { if _, err := c.WriteSnapshot(context.Background(), "", 0, nil); err == nil || err.Error() != `generation required` {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
}) })
@@ -223,7 +224,7 @@ func TestReplicaClient_SnapshotReader(t *testing.T) {
} }
defer r.Close() defer r.Close()
if buf, err := io.ReadAll(r); err != nil { if buf, err := ioutil.ReadAll(r); err != nil {
t.Fatal(err) t.Fatal(err)
} else if got, want := string(buf), "foo"; got != want { } else if got, want := string(buf), "foo"; got != want {
t.Fatalf("ReadAll=%v, want %v", got, want) t.Fatalf("ReadAll=%v, want %v", got, want)
@@ -241,13 +242,13 @@ func TestReplicaClient_SnapshotReader(t *testing.T) {
RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel() t.Parallel()
if _, err := c.SnapshotReader(context.Background(), "", 1); err == nil || err.Error() != `cannot determine snapshot path: generation required` { if _, err := c.SnapshotReader(context.Background(), "", 1); err == nil || err.Error() != `generation required` {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
}) })
} }
func TestReplicaClient_WALs(t *testing.T) { func TestReplicaClient_WALSegments(t *testing.T) {
RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel() t.Parallel()
@@ -361,7 +362,7 @@ func TestReplicaClient_WALs(t *testing.T) {
if err == nil { if err == nil {
err = itr.Close() err = itr.Close()
} }
if err == nil || err.Error() != `cannot determine wal path: generation required` { if err == nil || err.Error() != `generation required` {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
}) })
@@ -377,7 +378,7 @@ func TestReplicaClient_WriteWALSegment(t *testing.T) {
if r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1000, Offset: 2000}); err != nil { if r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1000, Offset: 2000}); err != nil {
t.Fatal(err) t.Fatal(err)
} else if buf, err := io.ReadAll(r); err != nil { } else if buf, err := ioutil.ReadAll(r); err != nil {
t.Fatal(err) t.Fatal(err)
} else if err := r.Close(); err != nil { } else if err := r.Close(); err != nil {
t.Fatal(err) t.Fatal(err)
@@ -388,13 +389,13 @@ func TestReplicaClient_WriteWALSegment(t *testing.T) {
RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel() t.Parallel()
if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "", Index: 0, Offset: 0}, nil); err == nil || err.Error() != `cannot determine wal segment path: generation required` { if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "", Index: 0, Offset: 0}, nil); err == nil || err.Error() != `generation required` {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
}) })
} }
func TestReplicaClient_WALReader(t *testing.T) { func TestReplicaClient_WALSegmentReader(t *testing.T) {
RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel() t.Parallel()
@@ -408,7 +409,7 @@ func TestReplicaClient_WALReader(t *testing.T) {
} }
defer r.Close() defer r.Close()
if buf, err := io.ReadAll(r); err != nil { if buf, err := ioutil.ReadAll(r); err != nil {
t.Fatal(err) t.Fatal(err)
} else if got, want := string(buf), "foobar"; got != want { } else if got, want := string(buf), "foobar"; got != want {
t.Fatalf("ReadAll=%v, want %v", got, want) t.Fatalf("ReadAll=%v, want %v", got, want)
@@ -450,7 +451,7 @@ func TestReplicaClient_DeleteWALSegments(t *testing.T) {
RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel() t.Parallel()
if err := c.DeleteWALSegments(context.Background(), []litestream.Pos{{}}); err == nil || err.Error() != `cannot determine wal segment path: generation required` { if err := c.DeleteWALSegments(context.Background(), []litestream.Pos{{}}); err == nil || err.Error() != `generation required` {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
}) })

View File

@@ -13,13 +13,6 @@ import (
"github.com/pierrec/lz4/v4" "github.com/pierrec/lz4/v4"
) )
func nextIndex(pos litestream.Pos) litestream.Pos {
return litestream.Pos{
Generation: pos.Generation,
Index: pos.Index + 1,
}
}
func TestReplica_Name(t *testing.T) { func TestReplica_Name(t *testing.T) {
t.Run("WithName", func(t *testing.T) { t.Run("WithName", func(t *testing.T) {
if got, want := litestream.NewReplica(nil, "NAME").Name(), "NAME"; got != want { if got, want := litestream.NewReplica(nil, "NAME").Name(), "NAME"; got != want {
@@ -39,16 +32,18 @@ func TestReplica_Sync(t *testing.T) {
db, sqldb := MustOpenDBs(t) db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb) defer MustCloseDBs(t, db, sqldb)
// Execute a query to force a write to the WAL.
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err)
}
// Issue initial database sync to setup generation. // Issue initial database sync to setup generation.
if err := db.Sync(context.Background()); err != nil { if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Fetch current database position. // Fetch current database position.
dpos, err := db.Pos() dpos := db.Pos()
if err != nil {
t.Fatal(err)
}
c := file.NewReplicaClient(t.TempDir()) c := file.NewReplicaClient(t.TempDir())
r := litestream.NewReplica(db, "") r := litestream.NewReplica(db, "")
@@ -68,51 +63,14 @@ func TestReplica_Sync(t *testing.T) {
t.Fatalf("generations[0]=%v, want %v", got, want) t.Fatalf("generations[0]=%v, want %v", got, want)
} }
// Verify we synced checkpoint page to WAL.
if r, err := c.WALSegmentReader(context.Background(), nextIndex(dpos)); err != nil {
t.Fatal(err)
} else if b, err := io.ReadAll(lz4.NewReader(r)); err != nil {
t.Fatal(err)
} else if err := r.Close(); err != nil {
t.Fatal(err)
} else if len(b) == db.PageSize() {
t.Fatalf("wal mismatch: len(%d), len(%d)", len(b), db.PageSize())
}
// Reset WAL so the next write will only write out the segment we are checking.
if err := db.Checkpoint(context.Background(), litestream.CheckpointModeTruncate); err != nil {
t.Fatal(err)
}
// Execute a query to write something into the truncated WAL.
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err)
}
// Sync database to catch up the shadow WAL.
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Save position after sync, it should be after our write.
dpos, err = db.Pos()
if err != nil {
t.Fatal(err)
}
// Sync WAL segment out to replica.
if err := r.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Verify WAL matches replica WAL. // Verify WAL matches replica WAL.
if b0, err := os.ReadFile(db.Path() + "-wal"); err != nil { if b0, err := os.ReadFile(db.Path() + "-wal"); err != nil {
t.Fatal(err) t.Fatal(err)
} else if r, err := c.WALSegmentReader(context.Background(), dpos.Truncate()); err != nil { } else if r0, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: generations[0], Index: 0, Offset: 0}); err != nil {
t.Fatal(err) t.Fatal(err)
} else if b1, err := io.ReadAll(lz4.NewReader(r)); err != nil { } else if b1, err := io.ReadAll(lz4.NewReader(r0)); err != nil {
t.Fatal(err) t.Fatal(err)
} else if err := r.Close(); err != nil { } else if err := r0.Close(); err != nil {
t.Fatal(err) t.Fatal(err)
} else if !bytes.Equal(b0, b1) { } else if !bytes.Equal(b0, b1) {
t.Fatalf("wal mismatch: len(%d), len(%d)", len(b0), len(b1)) t.Fatalf("wal mismatch: len(%d), len(%d)", len(b0), len(b1))
@@ -137,12 +95,10 @@ func TestReplica_Snapshot(t *testing.T) {
} }
// Fetch current database position & snapshot. // Fetch current database position & snapshot.
pos0, err := db.Pos() pos0 := db.Pos()
if err != nil { if info, err := r.Snapshot(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} else if info, err := r.Snapshot(context.Background()); err != nil { } else if got, want := info.Pos(), pos0.Truncate(); got != want {
t.Fatal(err)
} else if got, want := info.Pos(), nextIndex(pos0); got != want {
t.Fatalf("pos=%s, want %s", got, want) t.Fatalf("pos=%s, want %s", got, want)
} }
@@ -161,25 +117,21 @@ func TestReplica_Snapshot(t *testing.T) {
} }
// Fetch current database position & snapshot. // Fetch current database position & snapshot.
pos1, err := db.Pos() pos1 := db.Pos()
if err != nil { if info, err := r.Snapshot(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} else if info, err := r.Snapshot(context.Background()); err != nil { } else if got, want := info.Pos(), pos1.Truncate(); got != want {
t.Fatal(err)
} else if got, want := info.Pos(), nextIndex(pos1); got != want {
t.Fatalf("pos=%v, want %v", got, want) t.Fatalf("pos=%v, want %v", got, want)
} }
// Verify three snapshots exist. // Verify two snapshots exist.
if infos, err := r.Snapshots(context.Background()); err != nil { if infos, err := r.Snapshots(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} else if got, want := len(infos), 3; got != want { } else if got, want := len(infos), 2; got != want {
t.Fatalf("len=%v, want %v", got, want) t.Fatalf("len=%v, want %v", got, want)
} else if got, want := infos[0].Pos(), pos0.Truncate(); got != want { } else if got, want := infos[0].Pos(), pos0.Truncate(); got != want {
t.Fatalf("info[0]=%s, want %s", got, want) t.Fatalf("info[0]=%s, want %s", got, want)
} else if got, want := infos[1].Pos(), nextIndex(pos0); got != want { } else if got, want := infos[1].Pos(), pos1.Truncate(); got != want {
t.Fatalf("info[1]=%s, want %s", got, want) t.Fatalf("info[1]=%s, want %s", got, want)
} else if got, want := infos[2].Pos(), nextIndex(pos1); got != want {
t.Fatalf("info[2]=%s, want %s", got, want)
} }
} }

View File

@@ -10,12 +10,14 @@ import (
"os" "os"
"path" "path"
"regexp" "regexp"
"strings"
"sync" "sync"
"time" "time"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/aws/aws-sdk-go/service/s3/s3manager"
@@ -92,7 +94,6 @@ func (c *ReplicaClient) Init(ctx context.Context) (err error) {
if region != "" { if region != "" {
config.Region = aws.String(region) config.Region = aws.String(region)
} }
sess, err := session.NewSession(config) sess, err := session.NewSession(config)
if err != nil { if err != nil {
return fmt.Errorf("cannot create aws session: %w", err) return fmt.Errorf("cannot create aws session: %w", err)
@@ -105,8 +106,7 @@ func (c *ReplicaClient) Init(ctx context.Context) (err error) {
// config returns the AWS configuration. Uses the default credential chain // config returns the AWS configuration. Uses the default credential chain
// unless a key/secret are explicitly set. // unless a key/secret are explicitly set.
func (c *ReplicaClient) config() *aws.Config { func (c *ReplicaClient) config() *aws.Config {
config := &aws.Config{} config := defaults.Get().Config
if c.AccessKeyID != "" || c.SecretAccessKey != "" { if c.AccessKeyID != "" || c.SecretAccessKey != "" {
config.Credentials = credentials.NewStaticCredentials(c.AccessKeyID, c.SecretAccessKey, "") config.Credentials = credentials.NewStaticCredentials(c.AccessKeyID, c.SecretAccessKey, "")
} }
@@ -136,12 +136,12 @@ func (c *ReplicaClient) findBucketRegion(ctx context.Context, bucket string) (st
// Fetch bucket location, if possible. Must be bucket owner. // Fetch bucket location, if possible. Must be bucket owner.
// This call can return a nil location which means it's in us-east-1. // This call can return a nil location which means it's in us-east-1.
if out, err := s3.New(sess).HeadBucketWithContext(ctx, &s3.HeadBucketInput{ if out, err := s3.New(sess).GetBucketLocation(&s3.GetBucketLocationInput{
Bucket: aws.String(bucket), Bucket: aws.String(bucket),
}); err != nil { }); err != nil {
return "", err return "", err
} else if out.BucketRegion != nil { } else if out.LocationConstraint != nil {
return *out.BucketRegion, nil return *out.LocationConstraint, nil
} }
return DefaultRegion, nil return DefaultRegion, nil
} }
@@ -155,13 +155,13 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) {
var generations []string var generations []string
if err := c.s3.ListObjectsPagesWithContext(ctx, &s3.ListObjectsInput{ if err := c.s3.ListObjectsPagesWithContext(ctx, &s3.ListObjectsInput{
Bucket: aws.String(c.Bucket), Bucket: aws.String(c.Bucket),
Prefix: aws.String(litestream.GenerationsPath(c.Path) + "/"), Prefix: aws.String(path.Join(c.Path, "generations") + "/"),
Delimiter: aws.String("/"), Delimiter: aws.String("/"),
}, func(page *s3.ListObjectsOutput, lastPage bool) bool { }, func(page *s3.ListObjectsOutput, lastPage bool) bool {
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
for _, prefix := range page.CommonPrefixes { for _, prefix := range page.CommonPrefixes {
name := path.Base(aws.StringValue(prefix.Prefix)) name := path.Base(*prefix.Prefix)
if !litestream.IsGenerationName(name) { if !litestream.IsGenerationName(name) {
continue continue
} }
@@ -179,18 +179,15 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) {
func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error { func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error {
if err := c.Init(ctx); err != nil { if err := c.Init(ctx); err != nil {
return err return err
} } else if generation == "" {
return fmt.Errorf("generation required")
dir, err := litestream.GenerationPath(c.Path, generation)
if err != nil {
return fmt.Errorf("cannot determine generation path: %w", err)
} }
// Collect all files for the generation. // Collect all files for the generation.
var objIDs []*s3.ObjectIdentifier var objIDs []*s3.ObjectIdentifier
if err := c.s3.ListObjectsPagesWithContext(ctx, &s3.ListObjectsInput{ if err := c.s3.ListObjectsPagesWithContext(ctx, &s3.ListObjectsInput{
Bucket: aws.String(c.Bucket), Bucket: aws.String(c.Bucket),
Prefix: aws.String(dir), Prefix: aws.String(path.Join(c.Path, "generations", generation)),
}, func(page *s3.ListObjectsOutput, lastPage bool) bool { }, func(page *s3.ListObjectsOutput, lastPage bool) bool {
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
@@ -209,14 +206,10 @@ func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string)
n = len(objIDs) n = len(objIDs)
} }
out, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{ if _, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{
Bucket: aws.String(c.Bucket), Bucket: aws.String(c.Bucket),
Delete: &s3.Delete{Objects: objIDs[:n], Quiet: aws.Bool(true)}, Delete: &s3.Delete{Objects: objIDs[:n], Quiet: aws.Bool(true)},
}) }); err != nil {
if err != nil {
return err
}
if err := deleteOutputError(out); err != nil {
return err return err
} }
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc() internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
@@ -241,12 +234,11 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (lites
func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) { func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) {
if err := c.Init(ctx); err != nil { if err := c.Init(ctx); err != nil {
return info, err return info, err
} else if generation == "" {
return info, fmt.Errorf("generation required")
} }
key, err := litestream.SnapshotPath(c.Path, generation, index) key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4")
if err != nil {
return info, fmt.Errorf("cannot determine snapshot path: %w", err)
}
startTime := time.Now() startTime := time.Now()
rc := internal.NewReadCounter(rd) rc := internal.NewReadCounter(rd)
@@ -275,12 +267,11 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in
func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) { func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) {
if err := c.Init(ctx); err != nil { if err := c.Init(ctx); err != nil {
return nil, err return nil, err
} else if generation == "" {
return nil, fmt.Errorf("generation required")
} }
key, err := litestream.SnapshotPath(c.Path, generation, index) key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4")
if err != nil {
return nil, fmt.Errorf("cannot determine snapshot path: %w", err)
}
out, err := c.s3.GetObjectWithContext(ctx, &s3.GetObjectInput{ out, err := c.s3.GetObjectWithContext(ctx, &s3.GetObjectInput{
Bucket: aws.String(c.Bucket), Bucket: aws.String(c.Bucket),
@@ -292,7 +283,7 @@ func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, i
return nil, err return nil, err
} }
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "GET").Inc() internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "GET").Inc()
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "GET").Add(float64(aws.Int64Value(out.ContentLength))) internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "GET").Add(float64(*out.ContentLength))
return out.Body, nil return out.Body, nil
} }
@@ -301,21 +292,16 @@ func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, i
func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error { func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error {
if err := c.Init(ctx); err != nil { if err := c.Init(ctx); err != nil {
return err return err
} else if generation == "" {
return fmt.Errorf("generation required")
} }
key, err := litestream.SnapshotPath(c.Path, generation, index) key := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4")
if err != nil {
return fmt.Errorf("cannot determine snapshot path: %w", err)
}
out, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{ if _, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{
Bucket: aws.String(c.Bucket), Bucket: aws.String(c.Bucket),
Delete: &s3.Delete{Objects: []*s3.ObjectIdentifier{{Key: &key}}, Quiet: aws.Bool(true)}, Delete: &s3.Delete{Objects: []*s3.ObjectIdentifier{{Key: &key}}, Quiet: aws.Bool(true)},
}) }); err != nil {
if err != nil {
return err
}
if err := deleteOutputError(out); err != nil {
return err return err
} }
@@ -335,12 +321,11 @@ func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (lit
func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) { func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) {
if err := c.Init(ctx); err != nil { if err := c.Init(ctx); err != nil {
return info, err return info, err
} else if pos.Generation == "" {
return info, fmt.Errorf("generation required")
} }
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4")
if err != nil {
return info, fmt.Errorf("cannot determine wal segment path: %w", err)
}
startTime := time.Now() startTime := time.Now()
rc := internal.NewReadCounter(rd) rc := internal.NewReadCounter(rd)
@@ -369,12 +354,11 @@ func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos,
func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) { func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) {
if err := c.Init(ctx); err != nil { if err := c.Init(ctx); err != nil {
return nil, err return nil, err
} else if pos.Generation == "" {
return nil, fmt.Errorf("generation required")
} }
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4")
if err != nil {
return nil, fmt.Errorf("cannot determine wal segment path: %w", err)
}
out, err := c.s3.GetObjectWithContext(ctx, &s3.GetObjectInput{ out, err := c.s3.GetObjectWithContext(ctx, &s3.GetObjectInput{
Bucket: aws.String(c.Bucket), Bucket: aws.String(c.Bucket),
@@ -386,7 +370,7 @@ func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos
return nil, err return nil, err
} }
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "GET").Inc() internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "GET").Inc()
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "GET").Add(float64(aws.Int64Value(out.ContentLength))) internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "GET").Add(float64(*out.ContentLength))
return out.Body, nil return out.Body, nil
} }
@@ -406,24 +390,21 @@ func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Po
// Generate a batch of object IDs for deleting the WAL segments. // Generate a batch of object IDs for deleting the WAL segments.
for i, pos := range a[:n] { for i, pos := range a[:n] {
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) if pos.Generation == "" {
if err != nil { return fmt.Errorf("generation required")
return fmt.Errorf("cannot determine wal segment path: %w", err)
} }
key := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4")
objIDs[i] = &s3.ObjectIdentifier{Key: &key} objIDs[i] = &s3.ObjectIdentifier{Key: &key}
} }
// Delete S3 objects in bulk. // Delete S3 objects in bulk.
out, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{ if _, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{
Bucket: aws.String(c.Bucket), Bucket: aws.String(c.Bucket),
Delete: &s3.Delete{Objects: objIDs[:n], Quiet: aws.Bool(true)}, Delete: &s3.Delete{Objects: objIDs[:n], Quiet: aws.Bool(true)},
}) }); err != nil {
if err != nil {
return err
}
if err := deleteOutputError(out); err != nil {
return err return err
} }
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc() internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
a = a[n:] a = a[n:]
@@ -466,14 +447,10 @@ func (c *ReplicaClient) DeleteAll(ctx context.Context) error {
n = len(objIDs) n = len(objIDs)
} }
out, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{ if _, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{
Bucket: aws.String(c.Bucket), Bucket: aws.String(c.Bucket),
Delete: &s3.Delete{Objects: objIDs[:n], Quiet: aws.Bool(true)}, Delete: &s3.Delete{Objects: objIDs[:n], Quiet: aws.Bool(true)},
}) }); err != nil {
if err != nil {
return err
}
if err := deleteOutputError(out); err != nil {
return err return err
} }
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc() internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
@@ -514,11 +491,12 @@ func newSnapshotIterator(ctx context.Context, client *ReplicaClient, generation
func (itr *snapshotIterator) fetch() error { func (itr *snapshotIterator) fetch() error {
defer close(itr.ch) defer close(itr.ch)
dir, err := litestream.SnapshotsPath(itr.client.Path, itr.generation) if itr.generation == "" {
if err != nil { return fmt.Errorf("generation required")
return fmt.Errorf("cannot determine snapshots path: %w", err)
} }
dir := path.Join(itr.client.Path, "generations", itr.generation, "snapshots")
return itr.client.s3.ListObjectsPagesWithContext(itr.ctx, &s3.ListObjectsInput{ return itr.client.s3.ListObjectsPagesWithContext(itr.ctx, &s3.ListObjectsInput{
Bucket: aws.String(itr.client.Bucket), Bucket: aws.String(itr.client.Bucket),
Prefix: aws.String(dir + "/"), Prefix: aws.String(dir + "/"),
@@ -527,8 +505,7 @@ func (itr *snapshotIterator) fetch() error {
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
for _, obj := range page.Contents { for _, obj := range page.Contents {
key := path.Base(aws.StringValue(obj.Key)) index, err := internal.ParseSnapshotPath(path.Base(*obj.Key))
index, err := litestream.ParseSnapshotPath(key)
if err != nil { if err != nil {
continue continue
} }
@@ -536,7 +513,7 @@ func (itr *snapshotIterator) fetch() error {
info := litestream.SnapshotInfo{ info := litestream.SnapshotInfo{
Generation: itr.generation, Generation: itr.generation,
Index: index, Index: index,
Size: aws.Int64Value(obj.Size), Size: *obj.Size,
CreatedAt: obj.LastModified.UTC(), CreatedAt: obj.LastModified.UTC(),
} }
@@ -617,21 +594,20 @@ func newWALSegmentIterator(ctx context.Context, client *ReplicaClient, generatio
func (itr *walSegmentIterator) fetch() error { func (itr *walSegmentIterator) fetch() error {
defer close(itr.ch) defer close(itr.ch)
dir, err := litestream.WALPath(itr.client.Path, itr.generation) if itr.generation == "" {
if err != nil { return fmt.Errorf("generation required")
return fmt.Errorf("cannot determine wal path: %w", err)
} }
prefix := path.Join(itr.client.Path, "generations", itr.generation, "wal") + "/"
return itr.client.s3.ListObjectsPagesWithContext(itr.ctx, &s3.ListObjectsInput{ return itr.client.s3.ListObjectsPagesWithContext(itr.ctx, &s3.ListObjectsInput{
Bucket: aws.String(itr.client.Bucket), Bucket: aws.String(itr.client.Bucket),
Prefix: aws.String(dir + "/"), Prefix: aws.String(prefix),
Delimiter: aws.String("/"),
}, func(page *s3.ListObjectsOutput, lastPage bool) bool { }, func(page *s3.ListObjectsOutput, lastPage bool) bool {
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc() internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
for _, obj := range page.Contents { for _, obj := range page.Contents {
key := path.Base(aws.StringValue(obj.Key)) index, offset, err := internal.ParseWALSegmentPath(strings.TrimPrefix(*obj.Key, prefix))
index, offset, err := litestream.ParseWALSegmentPath(key)
if err != nil { if err != nil {
continue continue
} }
@@ -640,7 +616,7 @@ func (itr *walSegmentIterator) fetch() error {
Generation: itr.generation, Generation: itr.generation,
Index: index, Index: index,
Offset: offset, Offset: offset,
Size: aws.Int64Value(obj.Size), Size: *obj.Size,
CreatedAt: obj.LastModified.UTC(), CreatedAt: obj.LastModified.UTC(),
} }
@@ -716,9 +692,6 @@ func ParseHost(s string) (bucket, region, endpoint string, forcePathStyle bool)
} else if a := digitalOceanRegex.FindStringSubmatch(host); a != nil { } else if a := digitalOceanRegex.FindStringSubmatch(host); a != nil {
bucket, region = a[1], a[2] bucket, region = a[1], a[2]
endpoint = fmt.Sprintf("%s.digitaloceanspaces.com", region) endpoint = fmt.Sprintf("%s.digitaloceanspaces.com", region)
} else if a := scalewayRegex.FindStringSubmatch(host); a != nil {
bucket, region = a[1], a[2]
endpoint = fmt.Sprintf("s3.%s.scw.cloud", region)
} else if a := linodeRegex.FindStringSubmatch(host); a != nil { } else if a := linodeRegex.FindStringSubmatch(host); a != nil {
bucket, region = a[1], a[2] bucket, region = a[1], a[2]
endpoint = fmt.Sprintf("%s.linodeobjects.com", region) endpoint = fmt.Sprintf("%s.linodeobjects.com", region)
@@ -745,7 +718,6 @@ var (
backblazeRegex = regexp.MustCompile(`^(?:(.+)\.)?s3.([^.]+)\.backblazeb2.com$`) backblazeRegex = regexp.MustCompile(`^(?:(.+)\.)?s3.([^.]+)\.backblazeb2.com$`)
filebaseRegex = regexp.MustCompile(`^(?:(.+)\.)?s3.filebase.com$`) filebaseRegex = regexp.MustCompile(`^(?:(.+)\.)?s3.filebase.com$`)
digitalOceanRegex = regexp.MustCompile(`^(?:(.+)\.)?([^.]+)\.digitaloceanspaces.com$`) digitalOceanRegex = regexp.MustCompile(`^(?:(.+)\.)?([^.]+)\.digitaloceanspaces.com$`)
scalewayRegex = regexp.MustCompile(`^(?:(.+)\.)?s3.([^.]+)\.scw\.cloud$`)
linodeRegex = regexp.MustCompile(`^(?:(.+)\.)?([^.]+)\.linodeobjects.com$`) linodeRegex = regexp.MustCompile(`^(?:(.+)\.)?([^.]+)\.linodeobjects.com$`)
) )
@@ -757,15 +729,3 @@ func isNotExists(err error) bool {
return false return false
} }
} }
func deleteOutputError(out *s3.DeleteObjectsOutput) error {
switch len(out.Errors) {
case 0:
return nil
case 1:
return fmt.Errorf("deleting object %s: %s - %s", aws.StringValue(out.Errors[0].Key), aws.StringValue(out.Errors[0].Code), aws.StringValue(out.Errors[0].Message))
default:
return fmt.Errorf("%d errors occurred deleting objects, %s: %s - (%s (and %d others)",
len(out.Errors), aws.StringValue(out.Errors[0].Key), aws.StringValue(out.Errors[0].Code), aws.StringValue(out.Errors[0].Message), len(out.Errors)-1)
}
}

View File

@@ -8,6 +8,8 @@ import (
"net" "net"
"os" "os"
"path" "path"
"sort"
"strings"
"sync" "sync"
"time" "time"
@@ -120,7 +122,7 @@ func (c *ReplicaClient) Generations(ctx context.Context) (_ []string, err error)
return nil, err return nil, err
} }
fis, err := sftpClient.ReadDir(litestream.GenerationsPath(c.Path)) fis, err := sftpClient.ReadDir(path.Join(c.Path, "generations"))
if os.IsNotExist(err) { if os.IsNotExist(err) {
return nil, nil return nil, nil
} else if err != nil { } else if err != nil {
@@ -140,6 +142,8 @@ func (c *ReplicaClient) Generations(ctx context.Context) (_ []string, err error)
generations = append(generations, name) generations = append(generations, name)
} }
sort.Strings(generations)
return generations, nil return generations, nil
} }
@@ -150,12 +154,11 @@ func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string)
sftpClient, err := c.Init(ctx) sftpClient, err := c.Init(ctx)
if err != nil { if err != nil {
return err return err
} else if generation == "" {
return fmt.Errorf("generation required")
} }
dir, err := litestream.GenerationPath(c.Path, generation) dir := path.Join(c.Path, "generations", generation)
if err != nil {
return fmt.Errorf("cannot determine generation path: %w", err)
}
var dirs []string var dirs []string
walker := sftpClient.Walk(dir) walker := sftpClient.Walk(dir)
@@ -195,12 +198,11 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (_ lit
sftpClient, err := c.Init(ctx) sftpClient, err := c.Init(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} else if generation == "" {
return nil, fmt.Errorf("generation required")
} }
dir, err := litestream.SnapshotsPath(c.Path, generation) dir := path.Join(c.Path, "generations", generation, "snapshots")
if err != nil {
return nil, fmt.Errorf("cannot determine snapshots path: %w", err)
}
fis, err := sftpClient.ReadDir(dir) fis, err := sftpClient.ReadDir(dir)
if os.IsNotExist(err) { if os.IsNotExist(err) {
@@ -213,7 +215,7 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (_ lit
infos := make([]litestream.SnapshotInfo, 0, len(fis)) infos := make([]litestream.SnapshotInfo, 0, len(fis))
for _, fi := range fis { for _, fi := range fis {
// Parse index from filename. // Parse index from filename.
index, err := litestream.ParseSnapshotPath(path.Base(fi.Name())) index, err := internal.ParseSnapshotPath(path.Base(fi.Name()))
if err != nil { if err != nil {
continue continue
} }
@@ -226,6 +228,8 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (_ lit
}) })
} }
sort.Sort(litestream.SnapshotInfoSlice(infos))
return litestream.NewSnapshotInfoSliceIterator(infos), nil return litestream.NewSnapshotInfoSliceIterator(infos), nil
} }
@@ -236,12 +240,11 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in
sftpClient, err := c.Init(ctx) sftpClient, err := c.Init(ctx)
if err != nil { if err != nil {
return info, err return info, err
} else if generation == "" {
return info, fmt.Errorf("generation required")
} }
filename, err := litestream.SnapshotPath(c.Path, generation, index) filename := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4")
if err != nil {
return info, fmt.Errorf("cannot determine snapshot path: %w", err)
}
startTime := time.Now() startTime := time.Now()
if err := sftpClient.MkdirAll(path.Dir(filename)); err != nil { if err := sftpClient.MkdirAll(path.Dir(filename)); err != nil {
@@ -281,12 +284,11 @@ func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, i
sftpClient, err := c.Init(ctx) sftpClient, err := c.Init(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} else if generation == "" {
return nil, fmt.Errorf("generation required")
} }
filename, err := litestream.SnapshotPath(c.Path, generation, index) filename := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4")
if err != nil {
return nil, fmt.Errorf("cannot determine snapshot path: %w", err)
}
f, err := sftpClient.Open(filename) f, err := sftpClient.Open(filename)
if err != nil { if err != nil {
@@ -305,12 +307,11 @@ func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, i
sftpClient, err := c.Init(ctx) sftpClient, err := c.Init(ctx)
if err != nil { if err != nil {
return err return err
} else if generation == "" {
return fmt.Errorf("generation required")
} }
filename, err := litestream.SnapshotPath(c.Path, generation, index) filename := path.Join(c.Path, "generations", generation, "snapshots", litestream.FormatIndex(index)+".snapshot.lz4")
if err != nil {
return fmt.Errorf("cannot determine snapshot path: %w", err)
}
if err := sftpClient.Remove(filename); err != nil && !os.IsNotExist(err) { if err := sftpClient.Remove(filename); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("cannot delete snapshot %q: %w", filename, err) return fmt.Errorf("cannot delete snapshot %q: %w", filename, err)
@@ -327,12 +328,11 @@ func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (_ l
sftpClient, err := c.Init(ctx) sftpClient, err := c.Init(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} else if generation == "" {
return nil, fmt.Errorf("generation required")
} }
dir, err := litestream.WALPath(c.Path, generation) dir := path.Join(c.Path, "generations", generation, "wal")
if err != nil {
return nil, fmt.Errorf("cannot determine wal path: %w", err)
}
fis, err := sftpClient.ReadDir(dir) fis, err := sftpClient.ReadDir(dir)
if os.IsNotExist(err) { if os.IsNotExist(err) {
@@ -342,23 +342,18 @@ func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (_ l
} }
// Iterate over every file and convert to metadata. // Iterate over every file and convert to metadata.
infos := make([]litestream.WALSegmentInfo, 0, len(fis)) indexes := make([]int, 0, len(fis))
for _, fi := range fis { for _, fi := range fis {
index, offset, err := litestream.ParseWALSegmentPath(path.Base(fi.Name())) index, err := litestream.ParseIndex(fi.Name())
if err != nil { if err != nil || !fi.IsDir() {
continue continue
} }
indexes = append(indexes, index)
infos = append(infos, litestream.WALSegmentInfo{
Generation: generation,
Index: index,
Offset: offset,
Size: fi.Size(),
CreatedAt: fi.ModTime().UTC(),
})
} }
return litestream.NewWALSegmentInfoSliceIterator(infos), nil sort.Ints(indexes)
return newWALSegmentIterator(ctx, c, dir, generation, indexes), nil
} }
// WriteWALSegment writes LZ4 compressed data from rd into a file on disk. // WriteWALSegment writes LZ4 compressed data from rd into a file on disk.
@@ -368,12 +363,11 @@ func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos,
sftpClient, err := c.Init(ctx) sftpClient, err := c.Init(ctx)
if err != nil { if err != nil {
return info, err return info, err
} else if pos.Generation == "" {
return info, fmt.Errorf("generation required")
} }
filename, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) filename := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4")
if err != nil {
return info, fmt.Errorf("cannot determine wal segment path: %w", err)
}
startTime := time.Now() startTime := time.Now()
if err := sftpClient.MkdirAll(path.Dir(filename)); err != nil { if err := sftpClient.MkdirAll(path.Dir(filename)); err != nil {
@@ -413,12 +407,11 @@ func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos
sftpClient, err := c.Init(ctx) sftpClient, err := c.Init(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} else if pos.Generation == "" {
return nil, fmt.Errorf("generation required")
} }
filename, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) filename := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4")
if err != nil {
return nil, fmt.Errorf("cannot determine wal segment path: %w", err)
}
f, err := sftpClient.Open(filename) f, err := sftpClient.Open(filename)
if err != nil { if err != nil {
@@ -440,11 +433,12 @@ func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Po
} }
for _, pos := range a { for _, pos := range a {
filename, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset) if pos.Generation == "" {
if err != nil { return fmt.Errorf("generation required")
return fmt.Errorf("cannot determine wal segment path: %w", err)
} }
filename := path.Join(c.Path, "generations", pos.Generation, "wal", litestream.FormatIndex(pos.Index), litestream.FormatOffset(pos.Offset)+".wal.lz4")
if err := sftpClient.Remove(filename); err != nil && !os.IsNotExist(err) { if err := sftpClient.Remove(filename); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("cannot delete wal segment %q: %w", filename, err) return fmt.Errorf("cannot delete wal segment %q: %w", filename, err)
} }
@@ -463,7 +457,7 @@ func (c *ReplicaClient) Cleanup(ctx context.Context) (err error) {
return err return err
} }
if err := sftpClient.RemoveDirectory(litestream.GenerationsPath(c.Path)); err != nil && !os.IsNotExist(err) { if err := sftpClient.RemoveDirectory(path.Join(c.Path, "generations")); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("cannot delete generations path: %w", err) return fmt.Errorf("cannot delete generations path: %w", err)
} else if err := sftpClient.RemoveDirectory(c.Path); err != nil && !os.IsNotExist(err) { } else if err := sftpClient.RemoveDirectory(c.Path); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("cannot delete path: %w", err) return fmt.Errorf("cannot delete path: %w", err)
@@ -486,3 +480,101 @@ func (c *ReplicaClient) resetOnConnError(err error) {
c.sshClient = nil c.sshClient = nil
} }
} }
type walSegmentIterator struct {
ctx context.Context
client *ReplicaClient
dir string
generation string
indexes []int
infos []litestream.WALSegmentInfo
err error
}
func newWALSegmentIterator(ctx context.Context, client *ReplicaClient, dir, generation string, indexes []int) *walSegmentIterator {
return &walSegmentIterator{
ctx: ctx,
client: client,
dir: dir,
generation: generation,
indexes: indexes,
}
}
func (itr *walSegmentIterator) Close() (err error) {
return itr.err
}
func (itr *walSegmentIterator) Next() bool {
sftpClient, err := itr.client.Init(itr.ctx)
if err != nil {
itr.err = err
return false
}
// Exit if an error has already occurred.
if itr.err != nil {
return false
}
for {
// Move to the next segment in cache, if available.
if len(itr.infos) > 1 {
itr.infos = itr.infos[1:]
return true
}
itr.infos = itr.infos[:0] // otherwise clear infos
// Move to the next index unless this is the first time initializing.
if itr.infos != nil && len(itr.indexes) > 0 {
itr.indexes = itr.indexes[1:]
}
// If no indexes remain, stop iteration.
if len(itr.indexes) == 0 {
return false
}
// Read segments into a cache for the current index.
index := itr.indexes[0]
fis, err := sftpClient.ReadDir(path.Join(itr.dir, litestream.FormatIndex(index)))
if err != nil {
itr.err = err
return false
}
for _, fi := range fis {
filename := path.Base(fi.Name())
if fi.IsDir() {
continue
}
offset, err := litestream.ParseOffset(strings.TrimSuffix(filename, ".wal.lz4"))
if err != nil {
continue
}
itr.infos = append(itr.infos, litestream.WALSegmentInfo{
Generation: itr.generation,
Index: index,
Offset: offset,
Size: fi.Size(),
CreatedAt: fi.ModTime().UTC(),
})
}
if len(itr.infos) > 0 {
return true
}
}
}
func (itr *walSegmentIterator) Err() error { return itr.err }
func (itr *walSegmentIterator) WALSegment() litestream.WALSegmentInfo {
if len(itr.infos) == 0 {
return litestream.WALSegmentInfo{}
}
return itr.infos[0]
}