Compare commits

...

95 Commits

Author SHA1 Message Date
Ben Johnson
2e3dda89ad Merge pull request #174 from benbjohnson/fix-snapshot-only-restore
Fix snapshot-only restore
2021-04-24 07:51:50 -06:00
Ben Johnson
331f6072bf Fix snapshot-only restore
This commit fixes a bug introduced by parallel restore (03831e2)
where snapshot-only restores were not being handled correctly and
Litestream would hang indefinitely. Now the restore will check
explicitly for snapshot-only restores and exit the restore process
early to avoid WAL handling completely.
2021-04-24 07:48:25 -06:00
Ben Johnson
6acfbcbc64 Add Docker shield to README 2021-04-23 16:04:44 -06:00
Ben Johnson
f21ebcda28 Merge pull request #171 from benbjohnson/remove-release-binary
Remove uncompressed release binary
2021-04-22 16:50:11 -06:00
Ben Johnson
e366b348cd Remove uncompressed release binary
This commit removes the uncompressed binary from the release page.
It was originally added to simplify Docker but it turns out that
having to chmod the binary will double its size.
2021-04-22 16:46:01 -06:00
Ben Johnson
064158b060 Merge pull request #170 from benbjohnson/remove-sync-lock-2
Remove SQLite write lock during WAL sync
2021-04-22 16:40:29 -06:00
Ben Johnson
1d1fd6e686 Remove SQLite write lock during WAL sync (again)
This commit reattempts a change to remove the write lock that was
previously tried in 998e831. This change will reduce the number of
locks on the database which should help reduce error messages that
applications see when they do not have busy_timeout set.

In addition to the lock removal, a passive checkpoint is issued
immediately before the read lock is obtained to prevent additional
checkpoints by the application itself. SQLite does not support
checkpoints from an active transaction so it cannot be done afterward.
2021-04-22 16:35:04 -06:00
Ben Johnson
73f8de23a6 Merge pull request #169 from benbjohnson/credentials
Allow use of LITESTREAM prefixed environment variables
2021-04-22 16:01:23 -06:00
Ben Johnson
7f4325e814 Allow use of LITESTREAM prefixed environment variables
This commit adds optional `LITESTREAM_ACCESS_KEY_ID` and
`LITESTREAM_SECRET_ACCESS_KEY` environment variables that can
be used instead of their `AWS` counterparts. The AWS-prefixed
variables have caused some confusion with users who were not
using AWS S3.
2021-04-22 15:58:17 -06:00
Ben Johnson
ad4b84410d Merge pull request #168 from benbjohnson/relicense
Relicense to Apache 2
2021-04-21 16:34:50 -06:00
Ben Johnson
0b7906aaac APLv2 2021-04-21 16:32:05 -06:00
Ben Johnson
6fc6d151b1 Merge pull request #167 from benbjohnson/parallel-restore
Download WAL files in parallel during restore
2021-04-21 16:27:09 -06:00
Ben Johnson
03831e2d06 Download WAL files in parallel during restore
This commit changes the restore to download multiple WAL files to
the local disk in parallel while another goroutine applies those
files in order. Downloading & applying the WAL files in serial
reduces the total throughput as WAL files are typically made up of
multiple small files.
2021-04-21 16:07:29 -06:00
Ben Johnson
257b625749 Merge pull request #166 from benbjohnson/fix-restore-to-index
Fix snapshot selection during restore-by-index
2021-04-21 12:15:19 -06:00
Ben Johnson
1c01af4e69 Fix snapshot selection during restore-by-index
This commit fixes a bug where restoring to a specific index will
incorrectly choose the latest snapshot instead of choosing the
latest snapshot that occurred before the given index.
2021-04-21 12:09:05 -06:00
Ben Johnson
bbd0f3b33c Merge pull request #165 from benbjohnson/restore-hex-index
Use hex encoding for restore's index flag
2021-04-19 12:07:17 -06:00
Ben Johnson
9439822763 Use hex-encoding for restore's index flag
This commit changes the `-index` flag on the `restore` command by
parsing it as a hex number instead of a decimal number. This is
done because the index is represented in hex form everywhere else
in the application.
2021-04-19 12:05:38 -06:00
Ben Johnson
63e51d2050 Merge pull request #164 from benbjohnson/improve-restore-logging
Improve restoration logging
2021-04-18 09:35:56 -06:00
Ben Johnson
84830bc4ad Improve restoration logging
This commit splits out logging for downloading a WAL file and applying
the WAL file to the database to get more accurate timing measurements.
2021-04-18 09:33:53 -06:00
Ben Johnson
ce0a5d2820 Merge pull request #163 from benbjohnson/remove-dry-run
Remove -dry-run flag in restore
2021-04-18 09:26:01 -06:00
Ben Johnson
3ad157d841 Remove -dry-run flag in restore
This flag is being removed because it's not actually that useful
in practice and it just makes the restoration code more complicated.
2021-04-18 09:21:50 -06:00
Ben Johnson
a20e35c5cc Merge pull request #162 from benbjohnson/log-micro
Use microsecond resolution for logging
2021-04-18 09:21:27 -06:00
Ben Johnson
029921299c Use microsecond resolution for logging
This commit changes `log` from second to microsecond resolution to
improve debugging when restoring WAL files.
2021-04-18 09:18:44 -06:00
Ben Johnson
f8d6969a4f Merge pull request #161 from benbjohnson/fix-flakey-wal-updated-at-test
Ensure minimum wait time for TestDB_UpdatedAt/WAL test
2021-04-17 09:41:59 -06:00
Ben Johnson
1e8bce029f Ensure minimum wait time for TestDB_UpdatedAt/WAL test
This commit fixes an issue where the test can be flakey if run on
a system with a higher time resolution. It now waits a minimum of
at least 100ms.
2021-04-17 09:40:07 -06:00
Ben Johnson
b29fb7e2ba Merge pull request #160 from benbjohnson/netgo
Add osusergo & netgo tags for static builds
2021-04-17 09:21:09 -06:00
Ben Johnson
dbb69786d3 Add osusergo & netgo tags for static builds 2021-04-17 09:18:03 -06:00
Ben Johnson
c70e9c0ba8 Merge pull request #159 from benbjohnson/strip
Reduce binary size
2021-04-17 09:14:05 -06:00
Ben Johnson
dd8fdd8c8c Reduce binary size 2021-04-17 09:12:38 -06:00
Ben Johnson
dfd1b1b92d Merge pull request #158 from benbjohnson/sqlite-flags
Omit load extensions for static builds
2021-04-17 09:08:36 -06:00
Ben Johnson
c4c30e394d Omit load extensions for static builds 2021-04-17 09:06:54 -06:00
Ben Johnson
28673aeb01 Merge pull request #157 from benbjohnson/expand-env
Configuration file environment variable expansion
2021-04-16 09:32:11 -06:00
Ben Johnson
04ae010378 Configuration file environment variable expansion
This commit adds simple variable expansion using either `$FOO`
or `${FOO}` when evaluating the config file. This can be disabled
by any command by using the `-no-expand-env` flag.
2021-04-16 09:28:01 -06:00
Ben Johnson
cefbcb0460 Merge pull request #156 from benbjohnson/docker
Docker
2021-04-16 08:37:30 -06:00
Ben Johnson
01407c3c25 Add Docker image 2021-04-16 08:35:22 -06:00
Ben Johnson
66fdb208c7 Merge pull request #154 from benbjohnson/fix-not-database-error-message
Remove reference to "wal" in first db init command
2021-04-15 11:59:28 -06:00
Ben Johnson
247896b8b7 Remove reference to "wal" in first db init command
This commit changes the error message of the first SQL command
executed during initialization. Typically, it wraps the error with
a message of "enable wal" since it is enabling the WAL mode but
that can be confusing if the DB connection or file is invalid.

Instead, the error is returned as-is and we can determine the
source of the error since it is the only unwrapped DB-related error.
2021-04-15 11:51:22 -06:00
Ben Johnson
1e6e741f55 Merge pull request #151 from benbjohnson/arm6
Add arm6 release builds
2021-04-12 09:47:54 -06:00
Ben Johnson
b31daabf52 Add arm6 release builds 2021-04-11 10:27:45 -06:00
Ben Johnson
1ccb4ef922 Merge pull request #149 from benbjohnson/arm-cgo
Enable CGO for cross compilation
2021-04-10 12:49:00 -06:00
Ben Johnson
54f0659c7b Enable CGO for cross compilation 2021-04-10 12:35:43 -06:00
Ben Johnson
5c0b8536f0 Merge pull request #148 from benbjohnson/arm
Support ARM release builds
2021-04-10 08:48:32 -06:00
Ben Johnson
462330ead6 Support ARM release builds 2021-04-10 08:39:10 -06:00
Ben Johnson
178cf836b1 Move pull request template 2021-04-07 16:24:49 -06:00
Ben Johnson
f45c0d8560 Create pull_request_template.md 2021-04-07 16:23:11 -06:00
Ben Johnson
bb146e2c09 Merge pull request #141 from benbjohnson/skip-verify
Add skip-verify flag for using self-signed certificates
2021-03-27 08:02:55 -06:00
Ben Johnson
f1d2df3e73 Add skip-verify flag for using self-signed certificates
This commit adds a `skip-verify` flag to the replica configuration
so that it can be used with self-signed certificates. This is useful
when running a local instance of MinIO with TLS for testing.
2021-03-27 08:00:09 -06:00
Ben Johnson
ef39987cc7 Merge pull request #133 from benbjohnson/sigterm
Catch sigterm & add shutdown logging
2021-03-21 09:38:38 -06:00
Ben Johnson
ee0c4c62d8 Catch sigterm & add shutdown logging
This commit changes the signal handler for `replicate` to catch
`syscall.SIGTERM` for non-Windows installations. It also adds some
logging to indicat when a shutdown has been initiated and when it
has finished.
2021-03-21 09:36:13 -06:00
Ben Johnson
e2de7e852c Merge pull request #132 from benbjohnson/sync-on-close
Sync on close
2021-03-21 08:45:26 -06:00
Ben Johnson
0529ce74b7 Sync on close
This commit changes the `replicate` command so that it performs a
final DB sync & replica sync before it exits to ensure it has
backed up all WAL frames at the time of exit.
2021-03-21 08:43:55 -06:00
Ben Johnson
421693130c Merge pull request #131 from benbjohnson/if-replica-exists
Add `-if-replica-exists` flag to restore
2021-03-21 08:09:57 -06:00
Ben Johnson
4a17c81b91 Add -if-replica-exists flag to restore
This commit adds a flag to change the exit code when restoring
from a replica where there is no existing backup. When set,
finding no backup will return a `0` exit code. The command will
still fail if other errors occur.
2021-03-21 08:08:11 -06:00
Ben Johnson
ba068ea3f8 Upload raw release binaries 2021-03-20 09:50:48 -06:00
Ben Johnson
085974fe1d Merge pull request #130 from benbjohnson/static
Add static release builds
2021-03-20 09:42:53 -06:00
Ben Johnson
18598a10e6 Add static release builds 2021-03-20 09:41:19 -06:00
Ben Johnson
16c50d1d2e Merge pull request #120 from benbjohnson/default-force-path-style
Default to force path style if endpoint set
2021-03-11 15:29:18 -07:00
Ben Johnson
929a66314c Default to force path style if endpoint set
This commit changes the replica configuration behavior to default
the `force-path-style` field to `true` when an `endpoint` is set.
This works because the only service that does not use the path
style is AWS S3 which does not use an endpoint.
2021-03-11 15:26:41 -07:00
Ben Johnson
2e7a6ae715 Merge pull request #118 from benbjohnson/default-region 2021-03-09 15:42:04 -07:00
Ben Johnson
896aef070c Default region if endpoint specified 2021-03-09 15:38:32 -07:00
Ben Johnson
3598d8b572 Merge pull request #111 from benbjohnson/linode
Add support for Linode Object Storage replica URLs
2021-03-07 08:55:29 -07:00
Ben Johnson
3183cf0e2e Add support for Linode Object Storage replica URLs
This commit adds the ability to specify Linode Object Storage
as replica URLs in the command line and configuration file:

	s3://MYBKT.us-east-1.linodeobjects.com/MYPATH
2021-03-07 08:47:24 -07:00
Ben Johnson
a59ee6ed63 Merge pull request #110 from benbjohnson/digitalocean
Add support for DigitalOcean Spaces replica URLs
2021-03-07 08:29:23 -07:00
Ben Johnson
e4c1a82eb2 Add support for DigitalOcean Spaces replica URLs
This commit adds the ability to specify DigitalOcean Spaces as
replica URLs in the command line and configuration file:

	s3://mybkt.nyc3.digitaloceanspaces.com/mypath
2021-03-07 08:25:26 -07:00
Ben Johnson
aa54e4698d Merge pull request #109 from benbjohnson/wal-mismatch-validation-info
Add WAL validation debug information
2021-03-07 07:55:02 -07:00
Ben Johnson
43e40ce8d3 Merge pull request #108 from benbjohnson/revert-lock-removal
Revert sync lock removal
2021-03-07 07:52:49 -07:00
Ben Johnson
0bd1b13b94 Add wal validation debug information on error
This commit adds the WAL header and shadow path to "wal header mismatch"
errors to help debug issues. The mismatch seems to happen more often
than I would expect on restart. This error doesn't cause any corruption;
it simply causes a generation to restart which requires a snapshot.
2021-03-07 07:48:43 -07:00
Ben Johnson
1c16aae550 Revert sync lock removal
This commit reverts the removal of the SQLite write lock during
WAL sync (998e831c5c). The change
caused validation mismatch errors during the long-running test
although the restored database did not appear to be corrupted so
perhaps it's simply a locking issue during validation.
2021-03-07 07:30:25 -07:00
Ben Johnson
49f47ea87f Merge pull request #105 from benbjohnson/db-config-fields
Expose additional DB configuration settings
2021-03-06 08:37:02 -07:00
Ben Johnson
8947adc312 Expose additional DB configuration settings
This commit exposes the monitor interval, checkpoint interval,
minimum checkpoint page count, and maximum checkpoint page count
via the YAML configuration file.
2021-03-06 08:33:19 -07:00
Ben Johnson
9341863bdb Merge pull request #104 from benbjohnson/remove-sync-lock
Remove SQLite write lock during WAL sync
2021-03-06 08:08:23 -07:00
Ben Johnson
998e831c5c Remove SQLite write lock during WAL sync
Originally, Litestream relied on a SQLite write lock to ensure
transactions were atomically replicated. However, this was changed
so that Litestream itself now validates the transaction boundaries.
As such, the write lock on the database is no longer needed. The
read lock is sufficient to prevent WAL rollover and the WAL is
append only so it is safe to read up to a known position calculated
via fstat().

WAL validation change was made in 031a526b9a

The locking code, however, was moved in this commit to the
post-checkpoint copy to ensure the end-of-file is not overwritten
by an aggressive writers.
2021-03-06 07:51:35 -07:00
Ben Johnson
b2ca113fb5 Merge pull request #103 from benbjohnson/fix-addr-log
Fix logged hostport for metrics endpoint
2021-03-06 07:30:36 -07:00
Ben Johnson
b211e82ed2 Fix logged hostport for metrics endpoint
This commit fixes a bug where the bind address is not reported
correctly in the log if a hostname is specified. Previously it
would always report the host as "localhost" even if a host was
specified (such as "0.0.0.0:9090").

This commit also adds validation to require the port to be
specified and only specifying a hostname will return an error.
2021-03-06 07:23:09 -07:00
Ben Johnson
e2779169a0 README 2021-03-02 08:11:51 -07:00
Ben Johnson
ec2f9c84d5 Merge pull request #96 from benbjohnson/acknowledgements
Acknowledgments
2021-02-28 09:19:44 -07:00
Ben Johnson
78eb8dcc53 Acknowledgments 2021-02-28 09:16:35 -07:00
Ben Johnson
cafa0f5942 Merge pull request #94 from benbjohnson/prevent-config-and-replica-url
Prevent user from specifying replica URL & config flag
2021-02-28 08:31:29 -07:00
Ben Johnson
325482a97c Prevent user from specifying replica URL & config flag
Previously, if a replica URL was specified then the `-config` flag
was silently ignored. This commit changes this behavior so that
specifying both the URL & config flag will now return an error.
2021-02-28 08:09:24 -07:00
Ben Johnson
9cee1285b9 Merge pull request #93 from benbjohnson/non-ofd-locks
Fix release of non-OFD locks
2021-02-28 07:28:00 -07:00
Ben Johnson
a14a74d678 Fix release of non-OFD locks
This commit removes short-lived `os.Open()` calls on the database
file because this can cause locks to be released when `os.File.Close()`
is later called if the operating system does not support OFD
(Open File Descriptor) locks.
2021-02-28 06:44:02 -07:00
Ben Johnson
f652186adf Merge pull request #84 from benbjohnson/snapshot-interval 2021-02-25 15:41:58 -07:00
Ben Johnson
afb8731ead Add snapshot interval
This commit adds the ability to periodically perform snapshots on
an interval that is separate from retention. For example, this lets
you retain backups for 24 hours but you can snapshot your database
every six hours to improve recovery time.
2021-02-25 15:34:13 -07:00
Ben Johnson
ce2d54cc20 Merge pull request #82 from benbjohnson/fix-db-init-failure
Fix error handling when DB.init() fails
2021-02-24 15:47:58 -07:00
Ben Johnson
d802e15b4f Fix error handling when DB.init() fails
The `DB.init()` can fail temporarily for a variety of reasons such
as the database being locked. Previously, the DB would save the
`*sql.DB` connection even if a step failed and this prevented the
database from attempting initialization again. This change makes it
so that the connection is only saved if initialization is successful.
On failure, the initialization process will be retried on next sync.
2021-02-24 15:43:28 -07:00
Ben Johnson
d6ece0b826 Merge pull request #73 from benbjohnson/fix-example-yml
Fix example litestream.yml replica configuration
2021-02-22 07:54:31 -07:00
Ben Johnson
cb007762be Fix example litestream.yml replica configuration 2021-02-22 07:52:56 -07:00
Ben Johnson
6a90714bbe Merge pull request #70 from benbjohnson/fix-global-settings
Fix global settings propagation
2021-02-22 06:40:16 -07:00
Ben Johnson
622ba82ebb Fix global settings propagation
This commit fixes an issue caused by a refactor where setting global
or local AWS credentials in a config file fails.
2021-02-22 06:37:40 -07:00
Ben Johnson
6ca010e9db Merge pull request #66 from benbjohnson/s3-compatible 2021-02-21 10:00:43 -07:00
Ben Johnson
ad9ce43127 Add support for S3-compatible object storage.
This commits adds support for non-AWS S3-compatible storage such as
MinIO, Backblaze B2, & Google Cloud Storage (GCS). Other backends
should also work but some code has been added to make URL-based
configurations work more easily.
2021-02-21 09:40:48 -07:00
Ben Johnson
167d333fcd Merge pull request #65 from benbjohnson/windows
Add Windows Service & MSI builds
2021-02-19 16:25:10 -07:00
Ben Johnson
c5390dec1d Add Windows Service & MSI builds 2021-02-19 16:21:04 -07:00
Ben Johnson
e2cbd5fb63 README 2021-02-12 08:14:55 -07:00
Ben Johnson
8d083f7a2d README 2021-02-09 07:07:23 -07:00
30 changed files with 1959 additions and 1437 deletions

7
.github/pull_request_template.md vendored Normal file
View File

@@ -0,0 +1,7 @@
Litestream is not accepting code contributions at this time. You can find a summary of why on the project's GitHub README:
https://github.com/benbjohnson/litestream#open-source-not-open-contribution
Web site & Documentation changes, however, are welcome. You can find that repository here:
https://github.com/benbjohnson/litestream.io

View File

@@ -3,10 +3,30 @@ on:
types: types:
- created - created
name: release name: release (linux)
jobs: jobs:
linux: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy:
matrix:
include:
- arch: amd64
cc: gcc
- arch: arm64
cc: aarch64-linux-gnu-gcc
- arch: arm
arm: 6
cc: arm-linux-gnueabi-gcc
- arch: arm
arm: 7
cc: arm-linux-gnueabihf-gcc
env:
GOOS: linux
GOARCH: ${{ matrix.arch }}
GOARM: ${{ matrix.arm }}
CC: ${{ matrix.cc }}
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- uses: actions/setup-go@v2 - uses: actions/setup-go@v2
@@ -16,6 +36,11 @@ jobs:
env: env:
GITHUB_TOKEN: ${{ github.token }} GITHUB_TOKEN: ${{ github.token }}
- name: Install cross-compilers
run: |
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu gcc-arm-linux-gnueabihf gcc-arm-linux-gnueabi
- name: Install nfpm - name: Install nfpm
run: | run: |
wget https://github.com/goreleaser/nfpm/releases/download/v2.2.3/nfpm_2.2.3_Linux_x86_64.tar.gz wget https://github.com/goreleaser/nfpm/releases/download/v2.2.3/nfpm_2.2.3_Linux_x86_64.tar.gz
@@ -23,23 +48,24 @@ jobs:
- name: Build litestream - name: Build litestream
run: | run: |
rm -rf dist
mkdir -p dist mkdir -p dist
cp etc/litestream.yml etc/litestream.service dist cp etc/litestream.yml etc/litestream.service dist
cat etc/nfpm.yml | LITESTREAM_VERSION=${{ steps.release.outputs.tag_name }} envsubst > dist/nfpm.yml cat etc/nfpm.yml | LITESTREAM_VERSION=${{ steps.release.outputs.tag_name }} envsubst > dist/nfpm.yml
go build -ldflags "-X 'main.Version=${{ steps.release.outputs.tag_name }}'" -o dist/litestream ./cmd/litestream CGO_ENABLED=1 go build -ldflags "-s -w -X 'main.Version=${{ steps.release.outputs.tag_name }}'" -o dist/litestream ./cmd/litestream
cd dist cd dist
tar -czvf litestream-${{ steps.release.outputs.tag_name }}-linux-amd64.tar.gz litestream tar -czvf litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.tar.gz litestream
../nfpm pkg --config nfpm.yml --packager deb --target litestream-${{ steps.release.outputs.tag_name }}-linux-amd64.deb ../nfpm pkg --config nfpm.yml --packager deb --target litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.deb
- name: Upload release binary - name: Upload release tarball
uses: actions/upload-release-asset@v1.0.2 uses: actions/upload-release-asset@v1.0.2
env: env:
GITHUB_TOKEN: ${{ github.token }} GITHUB_TOKEN: ${{ github.token }}
with: with:
upload_url: ${{ steps.release.outputs.upload_url }} upload_url: ${{ steps.release.outputs.upload_url }}
asset_path: ./dist/litestream-${{ steps.release.outputs.tag_name }}-linux-amd64.tar.gz asset_path: ./dist/litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.tar.gz
asset_name: litestream-${{ steps.release.outputs.tag_name }}-linux-amd64.tar.gz asset_name: litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.tar.gz
asset_content_type: application/gzip asset_content_type: application/gzip
- name: Upload debian package - name: Upload debian package
@@ -48,6 +74,6 @@ jobs:
GITHUB_TOKEN: ${{ github.token }} GITHUB_TOKEN: ${{ github.token }}
with: with:
upload_url: ${{ steps.release.outputs.upload_url }} upload_url: ${{ steps.release.outputs.upload_url }}
asset_path: ./dist/litestream-${{ steps.release.outputs.tag_name }}-linux-amd64.deb asset_path: ./dist/litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.deb
asset_name: litestream-${{ steps.release.outputs.tag_name }}-linux-amd64.deb asset_name: litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.deb
asset_content_type: application/octet-stream asset_content_type: application/octet-stream

View File

@@ -0,0 +1,60 @@
on:
release:
types:
- created
name: release (linux/static)
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
include:
- arch: amd64
cc: gcc
- arch: arm64
cc: aarch64-linux-gnu-gcc
- arch: arm
arm: 6
cc: arm-linux-gnueabi-gcc
- arch: arm
arm: 7
cc: arm-linux-gnueabihf-gcc
env:
GOOS: linux
GOARCH: ${{ matrix.arch }}
GOARM: ${{ matrix.arm }}
CC: ${{ matrix.cc }}
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
- id: release
uses: bruceadams/get-release@v1.2.2
env:
GITHUB_TOKEN: ${{ github.token }}
- name: Install cross-compilers
run: |
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu gcc-arm-linux-gnueabihf gcc-arm-linux-gnueabi
- name: Build litestream
run: |
rm -rf dist
mkdir -p dist
CGO_ENABLED=1 go build -ldflags "-s -w -extldflags "-static" -X 'main.Version=${{ steps.release.outputs.tag_name }}'" -tags osusergo,netgo,sqlite_omit_load_extension -o dist/litestream ./cmd/litestream
cd dist
tar -czvf litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}-static.tar.gz litestream
- name: Upload release tarball
uses: actions/upload-release-asset@v1.0.2
env:
GITHUB_TOKEN: ${{ github.token }}
with:
upload_url: ${{ steps.release.outputs.upload_url }}
asset_path: ./dist/litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}-static.tar.gz
asset_name: litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}-static.tar.gz
asset_content_type: application/gzip

11
Dockerfile Normal file
View File

@@ -0,0 +1,11 @@
FROM golang:1.16 as builder
WORKDIR /src/litestream
COPY . .
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg \
go build -ldflags '-s -w -extldflags "-static"' -tags osusergo,netgo,sqlite_omit_load_extension -o /usr/local/bin/litestream ./cmd/litestream
FROM alpine
COPY --from=builder /usr/local/bin/litestream /usr/local/bin/litestream
ENTRYPOINT ["/usr/local/bin/litestream"]
CMD []

858
LICENSE
View File

@@ -1,674 +1,202 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> Apache License
Everyone is permitted to copy and distribute verbatim copies Version 2.0, January 2004
of this license document, but changing it is not allowed. http://www.apache.org/licenses/
Preamble TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
The GNU General Public License is a free, copyleft license for 1. Definitions.
software and other kinds of works.
"License" shall mean the terms and conditions for use, reproduction,
The licenses for most software and other practical works are designed and distribution as defined by Sections 1 through 9 of this document.
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to "Licensor" shall mean the copyright owner or entity authorized by
share and change all versions of a program--to make sure it remains free the copyright owner that is granting the License.
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to "Legal Entity" shall mean the union of the acting entity and all
any other work released this way by its authors. You can apply it to other entities that control, are controlled by, or are under common
your programs, too. control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
When we speak of free software, we are referring to freedom, not direction or management of such entity, whether by contract or
price. Our General Public Licenses are designed to make sure that you otherwise, or (ii) ownership of fifty percent (50%) or more of the
have the freedom to distribute copies of free software (and charge for outstanding shares, or (iii) beneficial ownership of such entity.
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new "You" (or "Your") shall mean an individual or Legal Entity
free programs, and that you know you can do these things. exercising permissions granted by this License.
To protect your rights, we need to prevent others from denying you "Source" form shall mean the preferred form for making modifications,
these rights or asking you to surrender the rights. Therefore, you have including but not limited to software source code, documentation
certain responsibilities if you distribute copies of the software, or if source, and configuration files.
you modify it: responsibilities to respect the freedom of others.
"Object" form shall mean any form resulting from mechanical
For example, if you distribute copies of such a program, whether transformation or translation of a Source form, including but
gratis or for a fee, you must pass on to the recipients the same not limited to compiled object code, generated documentation,
freedoms that you received. You must make sure that they, too, receive and conversions to other media types.
or can get the source code. And you must show them these terms so they
know their rights. "Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
Developers that use the GNU GPL protect your rights with two steps: copyright notice that is included in or attached to the work
(1) assert copyright on the software, and (2) offer you this License (an example is provided in the Appendix below).
giving you legal permission to copy, distribute and/or modify it.
"Derivative Works" shall mean any work, whether in Source or Object
For the developers' and authors' protection, the GPL clearly explains form, that is based on (or derived from) the Work and for which the
that there is no warranty for this free software. For both users' and editorial revisions, annotations, elaborations, or other modifications
authors' sake, the GPL requires that modified versions be marked as represent, as a whole, an original work of authorship. For the purposes
changed, so that their problems will not be attributed erroneously to of this License, Derivative Works shall not include works that remain
authors of previous versions. separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer "Contribution" shall mean any work of authorship, including
can do so. This is fundamentally incompatible with the aim of the original version of the Work and any modifications or additions
protecting users' freedom to change the software. The systematic to that Work or Derivative Works thereof, that is intentionally
pattern of such abuse occurs in the area of products for individuals to submitted to Licensor for inclusion in the Work by the copyright owner
use, which is precisely where it is most unacceptable. Therefore, we or by an individual or Legal Entity authorized to submit on behalf of
have designed this version of the GPL to prohibit the practice for those the copyright owner. For the purposes of this definition, "submitted"
products. If such problems arise substantially in other domains, we means any form of electronic, verbal, or written communication sent
stand ready to extend this provision to those domains in future versions to the Licensor or its representatives, including but not limited to
of the GPL, as needed to protect the freedom of users. communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Finally, every program is threatened constantly by software patents. Licensor for the purpose of discussing and improving the Work, but
States should not allow patents to restrict development and use of excluding communication that is conspicuously marked or otherwise
software on general-purpose computers, but in those that do, we wish to designated in writing by the copyright owner as "Not a Contribution."
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that "Contributor" shall mean Licensor and any individual or Legal Entity
patents cannot be used to render the program non-free. on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
The precise terms and conditions for copying, distribution and
modification follow. 2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
TERMS AND CONDITIONS worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
0. Definitions. publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
"This License" refers to version 3 of the GNU General Public License.
3. Grant of Patent License. Subject to the terms and conditions of
"Copyright" also means copyright-like laws that apply to other kinds of this License, each Contributor hereby grants to You a perpetual,
works, such as semiconductor masks. worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
"The Program" refers to any copyrightable work licensed under this use, offer to sell, sell, import, and otherwise transfer the Work,
License. Each licensee is addressed as "you". "Licensees" and where such license applies only to those patent claims licensable
"recipients" may be individuals or organizations. by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
To "modify" a work means to copy from or adapt all or part of the work with the Work to which such Contribution(s) was submitted. If You
in a fashion requiring copyright permission, other than the making of an institute patent litigation against any entity (including a
exact copy. The resulting work is called a "modified version" of the cross-claim or counterclaim in a lawsuit) alleging that the Work
earlier work or a work "based on" the earlier work. or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
A "covered work" means either the unmodified Program or a work based granted to You under this License for that Work shall terminate
on the Program. as of the date such litigation is filed.
To "propagate" a work means to do anything with it that, without 4. Redistribution. You may reproduce and distribute copies of the
permission, would make you directly or secondarily liable for Work or Derivative Works thereof in any medium, with or without
infringement under applicable copyright law, except executing it on a modifications, and in Source or Object form, provided that You
computer or modifying a private copy. Propagation includes copying, meet the following conditions:
distribution (with or without modification), making available to the
public, and in some countries other activities as well. (a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through (b) You must cause any modified files to carry prominent notices
a computer network, with no transfer of a copy, is not conveying. stating that You changed the files; and
An interactive user interface displays "Appropriate Legal Notices" (c) You must retain, in the Source form of any Derivative Works
to the extent that it includes a convenient and prominently visible that You distribute, all copyright, patent, trademark, and
feature that (1) displays an appropriate copyright notice, and (2) attribution notices from the Source form of the Work,
tells the user that there is no warranty for the work (except to the excluding those notices that do not pertain to any part of
extent that warranties are provided), that licensees may convey the the Derivative Works; and
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a (d) If the Work includes a "NOTICE" text file as part of its
menu, a prominent item in the list meets this criterion. distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
1. Source Code. within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
The "source code" for a work means the preferred form of the work of the following places: within a NOTICE text file distributed
for making modifications to it. "Object code" means any non-source as part of the Derivative Works; within the Source form or
form of a work. documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
A "Standard Interface" means an interface that either is an official wherever such third-party notices normally appear. The contents
standard defined by a recognized standards body, or, in the case of of the NOTICE file are for informational purposes only and
interfaces specified for a particular programming language, one that do not modify the License. You may add Your own attribution
is widely used among developers working in that language. notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
The "System Libraries" of an executable work include anything, other that such additional attribution notices cannot be construed
than the work as a whole, that (a) is included in the normal form of as modifying the License.
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that You may add Your own copyright statement to Your modifications and
Major Component, or to implement a Standard Interface for which an may provide additional or different license terms and conditions
implementation is available to the public in source code form. A for use, reproduction, or distribution of Your modifications, or
"Major Component", in this context, means a major essential component for any such Derivative Works as a whole, provided Your use,
(kernel, window system, and so on) of the specific operating system reproduction, and distribution of the Work otherwise complies with
(if any) on which the executable work runs, or a compiler used to the conditions stated in this License.
produce the work, or an object code interpreter used to run it.
5. Submission of Contributions. Unless You explicitly state otherwise,
The "Corresponding Source" for a work in object code form means all any Contribution intentionally submitted for inclusion in the Work
the source code needed to generate, install, and (for an executable by You to the Licensor shall be under the terms and conditions of
work) run the object code and to modify the work, including scripts to this License, without any additional terms or conditions.
control those activities. However, it does not include the work's Notwithstanding the above, nothing herein shall supersede or modify
System Libraries, or general-purpose tools or generally available free the terms of any separate license agreement you may have executed
programs which are used unmodified in performing those activities but with Licensor regarding such Contributions.
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for 6. Trademarks. This License does not grant permission to use the trade
the work, and the source code for shared libraries and dynamically names, trademarks, service marks, or product names of the Licensor,
linked subprograms that the work is specifically designed to require, except as required for reasonable and customary use in describing the
such as by intimate data communication or control flow between those origin of the Work and reproducing the content of the NOTICE file.
subprograms and other parts of the work.
7. Disclaimer of Warranty. Unless required by applicable law or
The Corresponding Source need not include anything that users agreed to in writing, Licensor provides the Work (and each
can regenerate automatically from other parts of the Corresponding Contributor provides its Contributions) on an "AS IS" BASIS,
Source. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
The Corresponding Source for a work in source code form is that of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
same work. PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
2. Basic Permissions. risks associated with Your exercise of permissions under this License.
All rights granted under this License are granted for the term of 8. Limitation of Liability. In no event and under no legal theory,
copyright on the Program, and are irrevocable provided the stated whether in tort (including negligence), contract, or otherwise,
conditions are met. This License explicitly affirms your unlimited unless required by applicable law (such as deliberate and grossly
permission to run the unmodified Program. The output from running a negligent acts) or agreed to in writing, shall any Contributor be
covered work is covered by this License only if the output, given its liable to You for damages, including any direct, indirect, special,
content, constitutes a covered work. This License acknowledges your incidental, or consequential damages of any character arising as a
rights of fair use or other equivalent, as provided by copyright law. result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
You may make, run and propagate covered works that you do not work stoppage, computer failure or malfunction, or any and all
convey, without conditions so long as your license otherwise remains other commercial damages or losses), even if such Contributor
in force. You may convey covered works to others for the sole purpose has been advised of the possibility of such damages.
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with 9. Accepting Warranty or Additional Liability. While redistributing
the terms of this License in conveying all material for which you do the Work or Derivative Works thereof, You may choose to offer,
not control copyright. Those thus making or running the covered works and charge a fee for, acceptance of support, warranty, indemnity,
for you must do so exclusively on your behalf, under your direction or other liability obligations and/or rights consistent with this
and control, on terms that prohibit them from making any copies of License. However, in accepting such obligations, You may act only
your copyrighted material outside their relationship with you. on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
Conveying under any other circumstances is permitted solely under defend, and hold each Contributor harmless for any liability
the conditions stated below. Sublicensing is not allowed; section 10 incurred by, or claims asserted against, such Contributor by reason
makes it unnecessary. of your accepting any such warranty or additional liability.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs APPENDIX: How to apply the Apache License to your work.
If you develop a new program, and you want it to be of the greatest To apply the Apache License to your work, attach the following
possible use to the public, the best way to achieve this is to make it boilerplate notice, with the fields enclosed by brackets "[]"
free software which everyone can redistribute and change under these terms. replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
To do so, attach the following notices to the program. It is safest Copyright [yyyy] [name of copyright owner]
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.> Licensed under the Apache License, Version 2.0 (the "License");
Copyright (C) <year> <name of author> you may not use this file except in compliance with the License.
You may obtain a copy of the License at
This program is free software: you can redistribute it and/or modify http://www.apache.org/licenses/LICENSE-2.0
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful, Unless required by applicable law or agreed to in writing, software
but WITHOUT ANY WARRANTY; without even the implied warranty of distributed under the License is distributed on an "AS IS" BASIS,
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
GNU General Public License for more details. See the License for the specific language governing permissions and
limitations under the License.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

View File

@@ -1,17 +1,26 @@
default: default:
docker:
docker build -t litestream .
dist-linux: dist-linux:
mkdir -p dist mkdir -p dist
cp etc/litestream.yml dist/litestream.yml cp etc/litestream.yml dist/litestream.yml
docker run --rm -v "${PWD}":/usr/src/litestream -w /usr/src/litestream -e GOOS=linux -e GOARCH=amd64 golang:1.15 go build -v -o dist/litestream ./cmd/litestream docker run --rm -v "${PWD}":/usr/src/litestream -w /usr/src/litestream -e GOOS=linux -e GOARCH=amd64 golang:1.16 go build -v -ldflags "-s -w" -o dist/litestream ./cmd/litestream
tar -cz -f dist/litestream-linux-amd64.tar.gz -C dist litestream tar -cz -f dist/litestream-linux-amd64.tar.gz -C dist litestream
dist-linux-arm:
docker run --rm -v "${PWD}":/usr/src/litestream -w /usr/src/litestream -e CGO_ENABLED=1 -e CC=arm-linux-gnueabihf-gcc -e GOOS=linux -e GOARCH=arm golang-xc:1.16 go build -v -o dist/litestream-linux-arm ./cmd/litestream
dist-linux-arm64:
docker run --rm -v "${PWD}":/usr/src/litestream -w /usr/src/litestream -e CGO_ENABLED=1 -e CC=aarch64-linux-gnu-gcc -e GOOS=linux -e GOARCH=arm64 golang-xc:1.16 go build -v -o dist/litestream-linux-arm64 ./cmd/litestream
dist-macos: dist-macos:
ifndef LITESTREAM_VERSION ifndef LITESTREAM_VERSION
$(error LITESTREAM_VERSION is undefined) $(error LITESTREAM_VERSION is undefined)
endif endif
mkdir -p dist mkdir -p dist
go build -v -ldflags "-X 'main.Version=${LITESTREAM_VERSION}'" -o dist/litestream ./cmd/litestream go build -v -ldflags "-s -w -X 'main.Version=${LITESTREAM_VERSION}'" -o dist/litestream ./cmd/litestream
gon etc/gon.hcl gon etc/gon.hcl
mv dist/litestream.zip dist/litestream-${LITESTREAM_VERSION}-darwin-amd64.zip mv dist/litestream.zip dist/litestream-${LITESTREAM_VERSION}-darwin-amd64.zip
openssl dgst -sha256 dist/litestream-${LITESTREAM_VERSION}-darwin-amd64.zip openssl dgst -sha256 dist/litestream-${LITESTREAM_VERSION}-darwin-amd64.zip

276
README.md
View File

@@ -2,6 +2,7 @@ Litestream
![GitHub release (latest by date)](https://img.shields.io/github/v/release/benbjohnson/litestream) ![GitHub release (latest by date)](https://img.shields.io/github/v/release/benbjohnson/litestream)
![Status](https://img.shields.io/badge/status-beta-blue) ![Status](https://img.shields.io/badge/status-beta-blue)
![GitHub](https://img.shields.io/github/license/benbjohnson/litestream) ![GitHub](https://img.shields.io/github/license/benbjohnson/litestream)
[![Docker Pulls](https://img.shields.io/docker/pulls/litestream/litestream.svg?maxAge=604800)](https://hub.docker.com/r/litestream/litestream/)
![test](https://github.com/benbjohnson/litestream/workflows/test/badge.svg) ![test](https://github.com/benbjohnson/litestream/workflows/test/badge.svg)
========== ==========
@@ -10,275 +11,35 @@ background process and safely replicates changes incrementally to another file
or S3. Litestream only communicates with SQLite through the SQLite API so it or S3. Litestream only communicates with SQLite through the SQLite API so it
will not corrupt your database. will not corrupt your database.
If you need support or have ideas for improving Litestream, please visit the If you need support or have ideas for improving Litestream, please join the
[GitHub Discussions](https://github.com/benbjohnson/litestream/discussions) to [Litestream Slack][slack] or visit the [GitHub Discussions](https://github.com/benbjohnson/litestream/discussions).
chat. Please visit the [Litestream web site](https://litestream.io) for installation
instructions and documentation.
If you find this project interesting, please consider starring the project on If you find this project interesting, please consider starring the project on
GitHub. GitHub.
[slack]: https://join.slack.com/t/litestream/shared_invite/zt-n0j4s3ci-lx1JziR3bV6L2NMF723H3Q
## Installation
### Mac OS (Homebrew) ## Acknowledgements
To install from homebrew, run the following command: While the Litestream project does not accept external code patches, many
of the most valuable contributions are in the forms of testing, feedback, and
```sh documentation. These help harden software and streamline usage for other users.
$ brew install benbjohnson/litestream/litestream
```
### Linux (Debian)
You can download the `.deb` file from the [Releases page][releases] page and
then run the following:
```sh
$ sudo dpkg -i litestream-v0.3.0-linux-amd64.deb
```
Once installed, you'll need to enable & start the service:
```sh
$ sudo systemctl enable litestream
$ sudo systemctl start litestream
```
### Release binaries
You can also download the release binary for your system from the
[releases page][releases] and run it as a standalone application.
### Building from source
Download and install the [Go toolchain](https://golang.org/) and then run:
```sh
$ go install ./cmd/litestream
```
The `litestream` binary should be in your `$GOPATH/bin` folder.
## Quick Start
Litestream provides a configuration file that can be used for production
deployments but you can also specify a single database and replica on the
command line when trying it out.
First, you'll need to create an S3 bucket that we'll call `"mybkt"` in this
example. You'll also need to set your AWS credentials:
```sh
$ export AWS_ACCESS_KEY_ID=AKIAxxxxxxxxxxxxxxxx
$ export AWS_SECRET_ACCESS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/xxxxxxxxx
```
Next, you can run the `litestream replicate` command with the path to the
database you want to backup and the URL of your replica destination:
```sh
$ litestream replicate /path/to/db s3://mybkt/db
```
If you make changes to your local database, those changes will be replicated
to S3 every 10 seconds. From another terminal window, you can restore your
database from your S3 replica:
```
$ litestream restore -o /path/to/restored/db s3://mybkt/db
```
Voila! 🎉
Your database should be restored to the last replicated state that
was sent to S3. You can adjust your replication frequency and other options by
using a configuration-based approach specified below.
## Configuration
A configuration-based install gives you more replication options. By default,
the config file lives at `/etc/litestream.yml` but you can pass in a different
path to any `litestream` command using the `-config PATH` flag. You can also
set the `LITESTREAM_CONFIG` environment variable to specify a new path.
The configuration specifies one or more `dbs` and a list of one or more replica
locations for each db. Below are some common configurations:
### Replicate to S3
This will replicate the database at `/path/to/db` to the `"/db"` path inside
the S3 bucket named `"mybkt"`.
```yaml
access-key-id: AKIAxxxxxxxxxxxxxxxx
secret-access-key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/xxxxxxxxx
dbs:
- path: /path/to/db
replicas:
- url: s3://mybkt/db
```
### Replicate to another file path
This will replicate the database at `/path/to/db` to a directory named
`/path/to/replica`.
```yaml
dbs:
- path: /path/to/db
replicas:
- path: /path/to/replica
```
### Retention period
By default, replicas will retain a snapshot & subsequent WAL changes for 24
hours. When the snapshot age exceeds the retention threshold, a new snapshot
is taken and uploaded and the previous snapshot and WAL files are removed.
You can configure this setting per-replica. Times are parsed using [Go's
duration](https://golang.org/pkg/time/#ParseDuration) so time units of hours
(`h`), minutes (`m`), and seconds (`s`) are allowed but days, weeks, months, and
years are not.
```yaml
db:
- path: /path/to/db
replicas:
- url: s3://mybkt/db
retention: 1h # 1 hour retention
```
### Monitoring replication
You can also enable a Prometheus metrics endpoint to monitor replication by
specifying a bind address with the `addr` field:
```yml
addr: ":9090"
```
This will make metrics available at: http://localhost:9090/metrics
### Other configuration options
These are some additional configuration options available on replicas:
- `type`—Specify the type of replica (`"file"` or `"s3"`). Derived from `"path"`.
- `name`—Specify an optional name for the replica if you are using multiple replicas.
- `path`—File path to the replica location.
- `url`—URL to the replica location.
- `retention-check-interval`—Time between retention enforcement checks. Defaults to `1h`.
- `validation-interval`—Interval between periodic checks to ensure restored backup matches current database. Disabled by default.
These replica options are only available for S3 replicas:
- `bucket`—S3 bucket name. Derived from `"path"`.
- `region`—S3 bucket region. Looked up on startup if unspecified.
- `sync-interval`—Replication sync frequency.
## Usage
### Replication
Once your configuration is saved, you'll need to begin replication. If you
installed the `.deb` file then run:
```sh
$ sudo systemctl restart litestream
```
To run litestream on its own, run:
```sh
# Replicate using the /etc/litestream.yml configuration.
$ litestream replicate
# Replicate using a different configuration path.
$ litestream replicate -config /path/to/litestream.yml
```
The `litestream` command will initialize and then wait indefinitely for changes.
You should see your destination replica path is now populated with a
`generations` directory. Inside there should be a 16-character hex generation
directory and inside there should be snapshots & WAL files. As you make changes
to your source database, changes will be copied over to your replica incrementally.
### Restoring a backup
Litestream can restore a previous snapshot and replay all replicated WAL files.
By default, it will restore up to the latest WAL file but you can also perform
point-in-time restores.
A database can only be restored to a path that does not exist so you don't need
to worry about accidentally overwriting your current database.
```sh
# Restore database to original path.
$ litestream restore /path/to/db
# Restore database to a new location.
$ litestream restore -o /path/to/restored/db /path/to/db
# Restore from a replica URL.
$ litestream restore -o /path/to/restored/db s3://mybkt/db
# Restore database to a specific point-in-time.
$ litestream restore -timestamp 2020-01-01T00:00:00Z /path/to/db
```
Point-in-time restores only have the resolution of the timestamp of the WAL file
itself. By default, Litestream will start a new WAL file every minute so
point-in-time restores are only accurate to the minute.
## How it works
SQLite provides a WAL (write-ahead log) journaling mode which writes pages to
a `-wal` file before eventually being copied over to the original database file.
This copying process is known as checkpointing. The WAL file works as a circular
buffer so when the WAL reaches a certain size then it restarts from the beginning.
Litestream works by taking over the checkpointing process and controlling when
it is restarted to ensure that it copies every new page. Checkpointing is only
allowed when there are no read transactions so Litestream maintains a
long-running read transaction against each database until it is ready to
checkpoint.
The SQLite WAL file is copied to a separate location called the shadow WAL which
ensures that it will not be overwritten by SQLite. This shadow WAL acts as a
temporary buffer so that replicas can replicate to their destination (e.g.
another file path or to S3). The shadow WAL files are removed once they have
been fully replicated. You can find the shadow directory as a hidden directory
next to your database file. If you database file is named `/var/lib/my.db` then
the shadow directory will be `/var/lib/.my.db-litestream`.
Litestream groups a snapshot and all subsequent WAL changes into "generations".
A generation is started on initial replication of a database and a new
generation will be started if litestream detects that the WAL replication is
no longer contiguous. This can occur if the `litestream` process is stopped and
another process is allowed to checkpoint the WAL.
I want to give special thanks to individuals who invest much of their time and
energy into the project to help make it better. Shout out to [Michael
Lynch](https://github.com/mtlynch) for digging into issues and contributing to
the documentation.
## Open-source, not open-contribution ## Open-source, not open-contribution
[Similar to SQLite](https://www.sqlite.org/copyright.html), Litestream is open [Similar to SQLite](https://www.sqlite.org/copyright.html), Litestream is open
source but closed to contributions. This keeps the code base free of proprietary source but closed to code contributions. This keeps the code base free of
or licensed code but it also helps me continue to maintain and build Litestream. proprietary or licensed code but it also helps me continue to maintain and build
Litestream.
As the author of [BoltDB](https://github.com/boltdb/bolt), I found that As the author of [BoltDB](https://github.com/boltdb/bolt), I found that
accepting and maintaining third party patches contributed to my burn out and accepting and maintaining third party patches contributed to my burn out and
@@ -292,5 +53,8 @@ not wish to come off as anything but welcoming, however, I've
made the decision to keep this project closed to contributions for my own made the decision to keep this project closed to contributions for my own
mental health and long term viability of the project. mental health and long term viability of the project.
The [documentation repository][docs] is MIT licensed and pull requests are welcome there.
[releases]: https://github.com/benbjohnson/litestream/releases [releases]: https://github.com/benbjohnson/litestream/releases
[docs]: https://github.com/benbjohnson/litestream.io

View File

@@ -2,7 +2,6 @@ package main
import ( import (
"context" "context"
"errors"
"flag" "flag"
"fmt" "fmt"
"os" "os"
@@ -15,21 +14,20 @@ type DatabasesCommand struct{}
// Run executes the command. // Run executes the command.
func (c *DatabasesCommand) Run(ctx context.Context, args []string) (err error) { func (c *DatabasesCommand) Run(ctx context.Context, args []string) (err error) {
var configPath string
fs := flag.NewFlagSet("litestream-databases", flag.ContinueOnError) fs := flag.NewFlagSet("litestream-databases", flag.ContinueOnError)
registerConfigFlag(fs, &configPath) configPath, noExpandEnv := registerConfigFlag(fs)
fs.Usage = c.Usage fs.Usage = c.Usage
if err := fs.Parse(args); err != nil { if err := fs.Parse(args); err != nil {
return err return err
} else if fs.NArg() != 0 { } else if fs.NArg() != 0 {
return fmt.Errorf("too many argument") return fmt.Errorf("too many arguments")
} }
// Load configuration. // Load configuration.
if configPath == "" { if *configPath == "" {
return errors.New("-config required") *configPath = DefaultConfigPath()
} }
config, err := ReadConfigFile(configPath) config, err := ReadConfigFile(*configPath, !*noExpandEnv)
if err != nil { if err != nil {
return err return err
} }
@@ -40,7 +38,7 @@ func (c *DatabasesCommand) Run(ctx context.Context, args []string) (err error) {
fmt.Fprintln(w, "path\treplicas") fmt.Fprintln(w, "path\treplicas")
for _, dbConfig := range config.DBs { for _, dbConfig := range config.DBs {
db, err := newDBFromConfig(&config, dbConfig) db, err := NewDBFromConfig(dbConfig)
if err != nil { if err != nil {
return err return err
} }
@@ -74,6 +72,9 @@ Arguments:
Specifies the configuration file. Specifies the configuration file.
Defaults to %s Defaults to %s
-no-expand-env
Disables environment variable expansion in configuration file.
`[1:], `[1:],
DefaultConfigPath(), DefaultConfigPath(),
) )

View File

@@ -2,7 +2,6 @@ package main
import ( import (
"context" "context"
"errors"
"flag" "flag"
"fmt" "fmt"
"log" "log"
@@ -18,9 +17,8 @@ type GenerationsCommand struct{}
// Run executes the command. // Run executes the command.
func (c *GenerationsCommand) Run(ctx context.Context, args []string) (err error) { func (c *GenerationsCommand) Run(ctx context.Context, args []string) (err error) {
var configPath string
fs := flag.NewFlagSet("litestream-generations", flag.ContinueOnError) fs := flag.NewFlagSet("litestream-generations", flag.ContinueOnError)
registerConfigFlag(fs, &configPath) configPath, noExpandEnv := registerConfigFlag(fs)
replicaName := fs.String("replica", "", "replica name") replicaName := fs.String("replica", "", "replica name")
fs.Usage = c.Usage fs.Usage = c.Usage
if err := fs.Parse(args); err != nil { if err := fs.Parse(args); err != nil {
@@ -35,12 +33,19 @@ func (c *GenerationsCommand) Run(ctx context.Context, args []string) (err error)
var r litestream.Replica var r litestream.Replica
updatedAt := time.Now() updatedAt := time.Now()
if isURL(fs.Arg(0)) { if isURL(fs.Arg(0)) {
if r, err = NewReplicaFromURL(fs.Arg(0)); err != nil { if *configPath != "" {
return fmt.Errorf("cannot specify a replica URL and the -config flag")
}
if r, err = NewReplicaFromConfig(&ReplicaConfig{URL: fs.Arg(0)}, nil); err != nil {
return err return err
} }
} else if configPath != "" { } else {
if *configPath == "" {
*configPath = DefaultConfigPath()
}
// Load configuration. // Load configuration.
config, err := ReadConfigFile(configPath) config, err := ReadConfigFile(*configPath, !*noExpandEnv)
if err != nil { if err != nil {
return err return err
} }
@@ -50,7 +55,7 @@ func (c *GenerationsCommand) Run(ctx context.Context, args []string) (err error)
return err return err
} else if dbc := config.DBConfig(path); dbc == nil { } else if dbc := config.DBConfig(path); dbc == nil {
return fmt.Errorf("database not found in config: %s", path) return fmt.Errorf("database not found in config: %s", path)
} else if db, err = newDBFromConfig(&config, dbc); err != nil { } else if db, err = NewDBFromConfig(dbc); err != nil {
return err return err
} }
@@ -65,8 +70,6 @@ func (c *GenerationsCommand) Run(ctx context.Context, args []string) (err error)
if updatedAt, err = db.UpdatedAt(); err != nil { if updatedAt, err = db.UpdatedAt(); err != nil {
return err return err
} }
} else {
return errors.New("config path or replica URL required")
} }
var replicas []litestream.Replica var replicas []litestream.Replica
@@ -128,6 +131,9 @@ Arguments:
Specifies the configuration file. Specifies the configuration file.
Defaults to %s Defaults to %s
-no-expand-env
Disables environment variable expansion in configuration file.
-replica NAME -replica NAME
Optional, filters by replica. Optional, filters by replica.

View File

@@ -2,15 +2,19 @@ package main
import ( import (
"context" "context"
"errors"
"flag" "flag"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"log" "log"
"net/url" "net/url"
"os" "os"
"os/signal"
"os/user" "os/user"
"path" "path"
"path/filepath" "path/filepath"
"regexp"
"strconv"
"strings" "strings"
"time" "time"
@@ -25,14 +29,17 @@ var (
Version = "(development build)" Version = "(development build)"
) )
// errStop is a terminal error for indicating program should quit.
var errStop = errors.New("stop")
func main() { func main() {
log.SetFlags(0) log.SetFlags(0)
m := NewMain() m := NewMain()
if err := m.Run(context.Background(), os.Args[1:]); err == flag.ErrHelp { if err := m.Run(context.Background(), os.Args[1:]); err == flag.ErrHelp || err == errStop {
os.Exit(1) os.Exit(1)
} else if err != nil { } else if err != nil {
fmt.Fprintln(os.Stderr, err) log.Println(err)
os.Exit(1) os.Exit(1)
} }
} }
@@ -47,6 +54,17 @@ func NewMain() *Main {
// Run executes the program. // Run executes the program.
func (m *Main) Run(ctx context.Context, args []string) (err error) { func (m *Main) Run(ctx context.Context, args []string) (err error) {
// Execute replication command if running as a Windows service.
if isService, err := isWindowsService(); err != nil {
return err
} else if isService {
return runWindowsService(ctx)
}
// Copy "LITESTEAM" environment credentials.
applyLitestreamEnv()
// Extract command name.
var cmd string var cmd string
if len(args) > 0 { if len(args) > 0 {
cmd, args = args[0], args[1:] cmd, args = args[0], args[1:]
@@ -58,7 +76,32 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) {
case "generations": case "generations":
return (&GenerationsCommand{}).Run(ctx, args) return (&GenerationsCommand{}).Run(ctx, args)
case "replicate": case "replicate":
return (&ReplicateCommand{}).Run(ctx, args) c := NewReplicateCommand()
if err := c.ParseFlags(ctx, args); err != nil {
return err
}
// Setup signal handler.
ctx, cancel := context.WithCancel(ctx)
ch := signalChan()
go func() { <-ch; cancel() }()
if err := c.Run(ctx); err != nil {
return err
}
// Wait for signal to stop program.
<-ctx.Done()
signal.Reset()
fmt.Println("signal received, litestream shutting down")
// Gracefully close.
if err := c.Close(); err != nil {
return err
}
fmt.Println("litestream shut down")
return nil
case "restore": case "restore":
return (&RestoreCommand{}).Run(ctx, args) return (&RestoreCommand{}).Run(ctx, args)
case "snapshots": case "snapshots":
@@ -108,8 +151,20 @@ type Config struct {
// Global S3 settings // Global S3 settings
AccessKeyID string `yaml:"access-key-id"` AccessKeyID string `yaml:"access-key-id"`
SecretAccessKey string `yaml:"secret-access-key"` SecretAccessKey string `yaml:"secret-access-key"`
Region string `yaml:"region"` }
Bucket string `yaml:"bucket"`
// propagateGlobalSettings copies global S3 settings to replica configs.
func (c *Config) propagateGlobalSettings() {
for _, dbc := range c.DBs {
for _, rc := range dbc.Replicas {
if rc.AccessKeyID == "" {
rc.AccessKeyID = c.AccessKeyID
}
if rc.SecretAccessKey == "" {
rc.SecretAccessKey = c.SecretAccessKey
}
}
}
} }
// DefaultConfig returns a new instance of Config with defaults set. // DefaultConfig returns a new instance of Config with defaults set.
@@ -128,7 +183,8 @@ func (c *Config) DBConfig(path string) *DBConfig {
} }
// ReadConfigFile unmarshals config from filename. Expands path if needed. // ReadConfigFile unmarshals config from filename. Expands path if needed.
func ReadConfigFile(filename string) (_ Config, err error) { // If expandEnv is true then environment variables are expanded in the config.
func ReadConfigFile(filename string, expandEnv bool) (_ Config, err error) {
config := DefaultConfig() config := DefaultConfig()
// Expand filename, if necessary. // Expand filename, if necessary.
@@ -137,12 +193,20 @@ func ReadConfigFile(filename string) (_ Config, err error) {
return config, err return config, err
} }
// Read & deserialize configuration. // Read configuration.
if buf, err := ioutil.ReadFile(filename); os.IsNotExist(err) { buf, err := ioutil.ReadFile(filename)
if os.IsNotExist(err) {
return config, fmt.Errorf("config file not found: %s", filename) return config, fmt.Errorf("config file not found: %s", filename)
} else if err != nil { } else if err != nil {
return config, err return config, err
} else if err := yaml.Unmarshal(buf, &config); err != nil { }
// Expand environment variables, if enabled.
if expandEnv {
buf = []byte(os.ExpandEnv(string(buf)))
}
if err := yaml.Unmarshal(buf, &config); err != nil {
return config, err return config, err
} }
@@ -153,15 +217,59 @@ func ReadConfigFile(filename string) (_ Config, err error) {
} }
} }
// Propage settings from global config to replica configs.
config.propagateGlobalSettings()
return config, nil return config, nil
} }
// DBConfig represents the configuration for a single database. // DBConfig represents the configuration for a single database.
type DBConfig struct { type DBConfig struct {
Path string `yaml:"path"` Path string `yaml:"path"`
MonitorInterval *time.Duration `yaml:"monitor-interval"`
CheckpointInterval *time.Duration `yaml:"checkpoint-interval"`
MinCheckpointPageN *int `yaml:"min-checkpoint-page-count"`
MaxCheckpointPageN *int `yaml:"max-checkpoint-page-count"`
Replicas []*ReplicaConfig `yaml:"replicas"` Replicas []*ReplicaConfig `yaml:"replicas"`
} }
// NewDBFromConfig instantiates a DB based on a configuration.
func NewDBFromConfig(dbc *DBConfig) (*litestream.DB, error) {
path, err := expand(dbc.Path)
if err != nil {
return nil, err
}
// Initialize database with given path.
db := litestream.NewDB(path)
// Override default database settings if specified in configuration.
if dbc.MonitorInterval != nil {
db.MonitorInterval = *dbc.MonitorInterval
}
if dbc.CheckpointInterval != nil {
db.CheckpointInterval = *dbc.CheckpointInterval
}
if dbc.MinCheckpointPageN != nil {
db.MinCheckpointPageN = *dbc.MinCheckpointPageN
}
if dbc.MaxCheckpointPageN != nil {
db.MaxCheckpointPageN = *dbc.MaxCheckpointPageN
}
// Instantiate and attach replicas.
for _, rc := range dbc.Replicas {
r, err := NewReplicaFromConfig(rc, db)
if err != nil {
return nil, err
}
db.Replicas = append(db.Replicas, r)
}
return db, nil
}
// ReplicaConfig represents the configuration for a single replica in a database. // ReplicaConfig represents the configuration for a single replica in a database.
type ReplicaConfig struct { type ReplicaConfig struct {
Type string `yaml:"type"` // "file", "s3" Type string `yaml:"type"` // "file", "s3"
@@ -171,6 +279,7 @@ type ReplicaConfig struct {
Retention time.Duration `yaml:"retention"` Retention time.Duration `yaml:"retention"`
RetentionCheckInterval time.Duration `yaml:"retention-check-interval"` RetentionCheckInterval time.Duration `yaml:"retention-check-interval"`
SyncInterval time.Duration `yaml:"sync-interval"` // s3 only SyncInterval time.Duration `yaml:"sync-interval"` // s3 only
SnapshotInterval time.Duration `yaml:"snapshot-interval"`
ValidationInterval time.Duration `yaml:"validation-interval"` ValidationInterval time.Duration `yaml:"validation-interval"`
// S3 settings // S3 settings
@@ -178,25 +287,162 @@ type ReplicaConfig struct {
SecretAccessKey string `yaml:"secret-access-key"` SecretAccessKey string `yaml:"secret-access-key"`
Region string `yaml:"region"` Region string `yaml:"region"`
Bucket string `yaml:"bucket"` Bucket string `yaml:"bucket"`
Endpoint string `yaml:"endpoint"`
ForcePathStyle *bool `yaml:"force-path-style"`
SkipVerify bool `yaml:"skip-verify"`
} }
// NewReplicaFromURL returns a new Replica instance configured from a URL. // NewReplicaFromConfig instantiates a replica for a DB based on a config.
// The replica's database is not set. func NewReplicaFromConfig(c *ReplicaConfig, db *litestream.DB) (litestream.Replica, error) {
func NewReplicaFromURL(s string) (litestream.Replica, error) { // Ensure user did not specify URL in path.
scheme, host, path, err := ParseReplicaURL(s) if isURL(c.Path) {
if err != nil { return nil, fmt.Errorf("replica path cannot be a url, please use the 'url' field instead: %s", c.Path)
}
switch c.ReplicaType() {
case "file":
return newFileReplicaFromConfig(c, db)
case "s3":
return newS3ReplicaFromConfig(c, db)
default:
return nil, fmt.Errorf("unknown replica type in config: %q", c.Type)
}
}
// newFileReplicaFromConfig returns a new instance of FileReplica build from config.
func newFileReplicaFromConfig(c *ReplicaConfig, db *litestream.DB) (_ *litestream.FileReplica, err error) {
// Ensure URL & path are not both specified.
if c.URL != "" && c.Path != "" {
return nil, fmt.Errorf("cannot specify url & path for file replica")
}
// Parse path from URL, if specified.
path := c.Path
if c.URL != "" {
if _, _, path, err = ParseReplicaURL(c.URL); err != nil {
return nil, err
}
}
// Ensure path is set explicitly or derived from URL field.
if path == "" {
return nil, fmt.Errorf("file replica path required")
}
// Expand home prefix and return absolute path.
if path, err = expand(path); err != nil {
return nil, err return nil, err
} }
switch scheme { // Instantiate replica and apply time fields, if set.
case "file": r := litestream.NewFileReplica(db, c.Name, path)
return litestream.NewFileReplica(nil, "", path), nil if v := c.Retention; v > 0 {
case "s3": r.Retention = v
r := s3.NewReplica(nil, "") }
r.Bucket, r.Path = host, path if v := c.RetentionCheckInterval; v > 0 {
r.RetentionCheckInterval = v
}
if v := c.SnapshotInterval; v > 0 {
r.SnapshotInterval = v
}
if v := c.ValidationInterval; v > 0 {
r.ValidationInterval = v
}
return r, nil return r, nil
default: }
return nil, fmt.Errorf("invalid replica url type: %s", s)
// newS3ReplicaFromConfig returns a new instance of S3Replica build from config.
func newS3ReplicaFromConfig(c *ReplicaConfig, db *litestream.DB) (_ *s3.Replica, err error) {
// Ensure URL & constituent parts are not both specified.
if c.URL != "" && c.Path != "" {
return nil, fmt.Errorf("cannot specify url & path for s3 replica")
} else if c.URL != "" && c.Bucket != "" {
return nil, fmt.Errorf("cannot specify url & bucket for s3 replica")
}
bucket, path := c.Bucket, c.Path
region, endpoint, skipVerify := c.Region, c.Endpoint, c.SkipVerify
// Use path style if an endpoint is explicitly set. This works because the
// only service to not use path style is AWS which does not use an endpoint.
forcePathStyle := (endpoint != "")
if v := c.ForcePathStyle; v != nil {
forcePathStyle = *v
}
// Apply settings from URL, if specified.
if c.URL != "" {
_, host, upath, err := ParseReplicaURL(c.URL)
if err != nil {
return nil, err
}
ubucket, uregion, uendpoint, uforcePathStyle := s3.ParseHost(host)
// Only apply URL parts to field that have not been overridden.
if path == "" {
path = upath
}
if bucket == "" {
bucket = ubucket
}
if region == "" {
region = uregion
}
if endpoint == "" {
endpoint = uendpoint
}
if !forcePathStyle {
forcePathStyle = uforcePathStyle
}
}
// Ensure required settings are set.
if bucket == "" {
return nil, fmt.Errorf("bucket required for s3 replica")
}
// Build replica.
r := s3.NewReplica(db, c.Name)
r.AccessKeyID = c.AccessKeyID
r.SecretAccessKey = c.SecretAccessKey
r.Bucket = bucket
r.Path = path
r.Region = region
r.Endpoint = endpoint
r.ForcePathStyle = forcePathStyle
r.SkipVerify = skipVerify
if v := c.Retention; v > 0 {
r.Retention = v
}
if v := c.RetentionCheckInterval; v > 0 {
r.RetentionCheckInterval = v
}
if v := c.SyncInterval; v > 0 {
r.SyncInterval = v
}
if v := c.SnapshotInterval; v > 0 {
r.SnapshotInterval = v
}
if v := c.ValidationInterval; v > 0 {
r.ValidationInterval = v
}
return r, nil
}
// applyLitestreamEnv copies "LITESTREAM" prefixed environment variables to
// their AWS counterparts as the "AWS" prefix can be confusing when using a
// non-AWS S3-compatible service.
func applyLitestreamEnv() {
if v, ok := os.LookupEnv("LITESTREAM_ACCESS_KEY_ID"); ok {
if _, ok := os.LookupEnv("AWS_ACCESS_KEY_ID"); !ok {
os.Setenv("AWS_ACCESS_KEY_ID", v)
}
}
if v, ok := os.LookupEnv("LITESTREAM_SECRET_ACCESS_KEY"); ok {
if _, ok := os.LookupEnv("AWS_SECRET_ACCESS_KEY"); !ok {
os.Setenv("AWS_SECRET_ACCESS_KEY", v)
}
} }
} }
@@ -222,15 +468,14 @@ func ParseReplicaURL(s string) (scheme, host, urlpath string, err error) {
// isURL returns true if s can be parsed and has a scheme. // isURL returns true if s can be parsed and has a scheme.
func isURL(s string) bool { func isURL(s string) bool {
u, err := url.Parse(s) return regexp.MustCompile(`^\w+:\/\/`).MatchString(s)
return err == nil && u.Scheme != ""
} }
// ReplicaType returns the type based on the type field or extracted from the URL. // ReplicaType returns the type based on the type field or extracted from the URL.
func (c *ReplicaConfig) ReplicaType() string { func (c *ReplicaConfig) ReplicaType() string {
typ, _, _, _ := ParseReplicaURL(c.URL) scheme, _, _, _ := ParseReplicaURL(c.URL)
if typ != "" { if scheme != "" {
return typ return scheme
} else if c.Type != "" { } else if c.Type != "" {
return c.Type return c.Type
} }
@@ -242,138 +487,12 @@ func DefaultConfigPath() string {
if v := os.Getenv("LITESTREAM_CONFIG"); v != "" { if v := os.Getenv("LITESTREAM_CONFIG"); v != "" {
return v return v
} }
return "/etc/litestream.yml" return defaultConfigPath
} }
func registerConfigFlag(fs *flag.FlagSet, p *string) { func registerConfigFlag(fs *flag.FlagSet) (configPath *string, noExpandEnv *bool) {
fs.StringVar(p, "config", DefaultConfigPath(), "config path") return fs.String("config", "", "config path"),
} fs.Bool("no-expand-env", false, "do not expand env vars in config")
// newDBFromConfig instantiates a DB based on a configuration.
func newDBFromConfig(c *Config, dbc *DBConfig) (*litestream.DB, error) {
path, err := expand(dbc.Path)
if err != nil {
return nil, err
}
// Initialize database with given path.
db := litestream.NewDB(path)
// Instantiate and attach replicas.
for _, rc := range dbc.Replicas {
r, err := newReplicaFromConfig(db, c, dbc, rc)
if err != nil {
return nil, err
}
db.Replicas = append(db.Replicas, r)
}
return db, nil
}
// newReplicaFromConfig instantiates a replica for a DB based on a config.
func newReplicaFromConfig(db *litestream.DB, c *Config, dbc *DBConfig, rc *ReplicaConfig) (litestream.Replica, error) {
// Ensure user did not specify URL in path.
if isURL(rc.Path) {
return nil, fmt.Errorf("replica path cannot be a url, please use the 'url' field instead: %s", rc.Path)
}
switch rc.ReplicaType() {
case "file":
return newFileReplicaFromConfig(db, c, dbc, rc)
case "s3":
return newS3ReplicaFromConfig(db, c, dbc, rc)
default:
return nil, fmt.Errorf("unknown replica type in config: %q", rc.Type)
}
}
// newFileReplicaFromConfig returns a new instance of FileReplica build from config.
func newFileReplicaFromConfig(db *litestream.DB, c *Config, dbc *DBConfig, rc *ReplicaConfig) (_ *litestream.FileReplica, err error) {
path := rc.Path
if rc.URL != "" {
_, _, path, err = ParseReplicaURL(rc.URL)
if err != nil {
return nil, err
}
}
if path == "" {
return nil, fmt.Errorf("%s: file replica path required", db.Path())
}
if path, err = expand(path); err != nil {
return nil, err
}
r := litestream.NewFileReplica(db, rc.Name, path)
if v := rc.Retention; v > 0 {
r.Retention = v
}
if v := rc.RetentionCheckInterval; v > 0 {
r.RetentionCheckInterval = v
}
if v := rc.ValidationInterval; v > 0 {
r.ValidationInterval = v
}
return r, nil
}
// newS3ReplicaFromConfig returns a new instance of S3Replica build from config.
func newS3ReplicaFromConfig(db *litestream.DB, c *Config, dbc *DBConfig, rc *ReplicaConfig) (_ *s3.Replica, err error) {
bucket := c.Bucket
if v := rc.Bucket; v != "" {
bucket = v
}
path := rc.Path
if rc.URL != "" {
_, bucket, path, err = ParseReplicaURL(rc.URL)
if err != nil {
return nil, err
}
}
// Use global or replica-specific S3 settings.
accessKeyID := c.AccessKeyID
if v := rc.AccessKeyID; v != "" {
accessKeyID = v
}
secretAccessKey := c.SecretAccessKey
if v := rc.SecretAccessKey; v != "" {
secretAccessKey = v
}
region := c.Region
if v := rc.Region; v != "" {
region = v
}
// Ensure required settings are set.
if bucket == "" {
return nil, fmt.Errorf("%s: s3 bucket required", db.Path())
}
// Build replica.
r := s3.NewReplica(db, rc.Name)
r.AccessKeyID = accessKeyID
r.SecretAccessKey = secretAccessKey
r.Region = region
r.Bucket = bucket
r.Path = path
if v := rc.Retention; v > 0 {
r.Retention = v
}
if v := rc.RetentionCheckInterval; v > 0 {
r.RetentionCheckInterval = v
}
if v := rc.SyncInterval; v > 0 {
r.SyncInterval = v
}
if v := rc.ValidationInterval; v > 0 {
r.ValidationInterval = v
}
return r, nil
} }
// expand returns an absolute path for s. // expand returns an absolute path for s.
@@ -398,3 +517,24 @@ func expand(s string) (string, error) {
} }
return filepath.Join(u.HomeDir, strings.TrimPrefix(s, prefix)), nil return filepath.Join(u.HomeDir, strings.TrimPrefix(s, prefix)), nil
} }
// indexVar allows the flag package to parse index flags as 4-byte hexadecimal values.
type indexVar int
// Ensure type implements interface.
var _ flag.Value = (*indexVar)(nil)
// String returns an 8-character hexadecimal value.
func (v *indexVar) String() string {
return fmt.Sprintf("%08x", int(*v))
}
// Set parses s into an integer from a hexadecimal value.
func (v *indexVar) Set(s string) error {
i, err := strconv.ParseInt(s, 16, 32)
if err != nil {
return fmt.Errorf("invalid hexadecimal format")
}
*v = indexVar(i)
return nil
}

View File

@@ -0,0 +1,26 @@
// +build !windows
package main
import (
"context"
"os"
"os/signal"
"syscall"
)
const defaultConfigPath = "/etc/litestream.yml"
func isWindowsService() (bool, error) {
return false, nil
}
func runWindowsService(ctx context.Context) error {
panic("cannot run windows service as unix process")
}
func signalChan() <-chan os.Signal {
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
return ch
}

182
cmd/litestream/main_test.go Normal file
View File

@@ -0,0 +1,182 @@
package main_test
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/benbjohnson/litestream"
main "github.com/benbjohnson/litestream/cmd/litestream"
"github.com/benbjohnson/litestream/s3"
)
func TestReadConfigFile(t *testing.T) {
// Ensure global AWS settings are propagated down to replica configurations.
t.Run("PropagateGlobalSettings", func(t *testing.T) {
filename := filepath.Join(t.TempDir(), "litestream.yml")
if err := ioutil.WriteFile(filename, []byte(`
access-key-id: XXX
secret-access-key: YYY
dbs:
- path: /path/to/db
replicas:
- url: s3://foo/bar
`[1:]), 0666); err != nil {
t.Fatal(err)
}
config, err := main.ReadConfigFile(filename, true)
if err != nil {
t.Fatal(err)
} else if got, want := config.AccessKeyID, `XXX`; got != want {
t.Fatalf("AccessKeyID=%v, want %v", got, want)
} else if got, want := config.SecretAccessKey, `YYY`; got != want {
t.Fatalf("SecretAccessKey=%v, want %v", got, want)
} else if got, want := config.DBs[0].Replicas[0].AccessKeyID, `XXX`; got != want {
t.Fatalf("Replica.AccessKeyID=%v, want %v", got, want)
} else if got, want := config.DBs[0].Replicas[0].SecretAccessKey, `YYY`; got != want {
t.Fatalf("Replica.SecretAccessKey=%v, want %v", got, want)
}
})
// Ensure environment variables are expanded.
t.Run("ExpandEnv", func(t *testing.T) {
os.Setenv("LITESTREAM_TEST_0129380", "/path/to/db")
os.Setenv("LITESTREAM_TEST_1872363", "s3://foo/bar")
filename := filepath.Join(t.TempDir(), "litestream.yml")
if err := ioutil.WriteFile(filename, []byte(`
dbs:
- path: $LITESTREAM_TEST_0129380
replicas:
- url: ${LITESTREAM_TEST_1872363}
- url: ${LITESTREAM_TEST_NO_SUCH_ENV}
`[1:]), 0666); err != nil {
t.Fatal(err)
}
config, err := main.ReadConfigFile(filename, true)
if err != nil {
t.Fatal(err)
} else if got, want := config.DBs[0].Path, `/path/to/db`; got != want {
t.Fatalf("DB.Path=%v, want %v", got, want)
} else if got, want := config.DBs[0].Replicas[0].URL, `s3://foo/bar`; got != want {
t.Fatalf("Replica[0].URL=%v, want %v", got, want)
} else if got, want := config.DBs[0].Replicas[1].URL, ``; got != want {
t.Fatalf("Replica[1].URL=%v, want %v", got, want)
}
})
// Ensure environment variables are not expanded.
t.Run("NoExpandEnv", func(t *testing.T) {
os.Setenv("LITESTREAM_TEST_9847533", "s3://foo/bar")
filename := filepath.Join(t.TempDir(), "litestream.yml")
if err := ioutil.WriteFile(filename, []byte(`
dbs:
- path: /path/to/db
replicas:
- url: ${LITESTREAM_TEST_9847533}
`[1:]), 0666); err != nil {
t.Fatal(err)
}
config, err := main.ReadConfigFile(filename, false)
if err != nil {
t.Fatal(err)
} else if got, want := config.DBs[0].Replicas[0].URL, `${LITESTREAM_TEST_9847533}`; got != want {
t.Fatalf("Replica.URL=%v, want %v", got, want)
}
})
}
func TestNewFileReplicaFromConfig(t *testing.T) {
r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{Path: "/foo"}, nil)
if err != nil {
t.Fatal(err)
} else if r, ok := r.(*litestream.FileReplica); !ok {
t.Fatal("unexpected replica type")
} else if got, want := r.Path(), "/foo"; got != want {
t.Fatalf("Path=%s, want %s", got, want)
}
}
func TestNewS3ReplicaFromConfig(t *testing.T) {
t.Run("URL", func(t *testing.T) {
r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "s3://foo/bar"}, nil)
if err != nil {
t.Fatal(err)
} else if r, ok := r.(*s3.Replica); !ok {
t.Fatal("unexpected replica type")
} else if got, want := r.Bucket, "foo"; got != want {
t.Fatalf("Bucket=%s, want %s", got, want)
} else if got, want := r.Path, "bar"; got != want {
t.Fatalf("Path=%s, want %s", got, want)
} else if got, want := r.Region, ""; got != want {
t.Fatalf("Region=%s, want %s", got, want)
} else if got, want := r.Endpoint, ""; got != want {
t.Fatalf("Endpoint=%s, want %s", got, want)
} else if got, want := r.ForcePathStyle, false; got != want {
t.Fatalf("ForcePathStyle=%v, want %v", got, want)
}
})
t.Run("MinIO", func(t *testing.T) {
r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "s3://foo.localhost:9000/bar"}, nil)
if err != nil {
t.Fatal(err)
} else if r, ok := r.(*s3.Replica); !ok {
t.Fatal("unexpected replica type")
} else if got, want := r.Bucket, "foo"; got != want {
t.Fatalf("Bucket=%s, want %s", got, want)
} else if got, want := r.Path, "bar"; got != want {
t.Fatalf("Path=%s, want %s", got, want)
} else if got, want := r.Region, "us-east-1"; got != want {
t.Fatalf("Region=%s, want %s", got, want)
} else if got, want := r.Endpoint, "http://localhost:9000"; got != want {
t.Fatalf("Endpoint=%s, want %s", got, want)
} else if got, want := r.ForcePathStyle, true; got != want {
t.Fatalf("ForcePathStyle=%v, want %v", got, want)
}
})
t.Run("Backblaze", func(t *testing.T) {
r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "s3://foo.s3.us-west-000.backblazeb2.com/bar"}, nil)
if err != nil {
t.Fatal(err)
} else if r, ok := r.(*s3.Replica); !ok {
t.Fatal("unexpected replica type")
} else if got, want := r.Bucket, "foo"; got != want {
t.Fatalf("Bucket=%s, want %s", got, want)
} else if got, want := r.Path, "bar"; got != want {
t.Fatalf("Path=%s, want %s", got, want)
} else if got, want := r.Region, "us-west-000"; got != want {
t.Fatalf("Region=%s, want %s", got, want)
} else if got, want := r.Endpoint, "https://s3.us-west-000.backblazeb2.com"; got != want {
t.Fatalf("Endpoint=%s, want %s", got, want)
} else if got, want := r.ForcePathStyle, true; got != want {
t.Fatalf("ForcePathStyle=%v, want %v", got, want)
}
})
t.Run("GCS", func(t *testing.T) {
r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "s3://foo.storage.googleapis.com/bar"}, nil)
if err != nil {
t.Fatal(err)
} else if r, ok := r.(*s3.Replica); !ok {
t.Fatal("unexpected replica type")
} else if got, want := r.Bucket, "foo"; got != want {
t.Fatalf("Bucket=%s, want %s", got, want)
} else if got, want := r.Path, "bar"; got != want {
t.Fatalf("Path=%s, want %s", got, want)
} else if got, want := r.Region, "us-east-1"; got != want {
t.Fatalf("Region=%s, want %s", got, want)
} else if got, want := r.Endpoint, "https://storage.googleapis.com"; got != want {
t.Fatalf("Endpoint=%s, want %s", got, want)
} else if got, want := r.ForcePathStyle, true; got != want {
t.Fatalf("ForcePathStyle=%v, want %v", got, want)
}
})
}

View File

@@ -0,0 +1,112 @@
// +build windows
package main
import (
"context"
"io"
"log"
"os"
"os/signal"
"golang.org/x/sys/windows"
"golang.org/x/sys/windows/svc"
"golang.org/x/sys/windows/svc/eventlog"
)
const defaultConfigPath = `C:\Litestream\litestream.yml`
// serviceName is the Windows Service name.
const serviceName = "Litestream"
// isWindowsService returns true if currently executing within a Windows service.
func isWindowsService() (bool, error) {
return svc.IsWindowsService()
}
func runWindowsService(ctx context.Context) error {
// Attempt to install new log service. This will fail if already installed.
// We don't log the error because we don't have anywhere to log until we open the log.
_ = eventlog.InstallAsEventCreate(serviceName, eventlog.Error|eventlog.Warning|eventlog.Info)
elog, err := eventlog.Open(serviceName)
if err != nil {
return err
}
defer elog.Close()
// Set eventlog as log writer while running.
log.SetOutput((*eventlogWriter)(elog))
defer log.SetOutput(os.Stderr)
log.Print("Litestream service starting")
if err := svc.Run(serviceName, &windowsService{ctx: ctx}); err != nil {
return errStop
}
log.Print("Litestream service stopped")
return nil
}
// windowsService is an interface adapter for svc.Handler.
type windowsService struct {
ctx context.Context
}
func (s *windowsService) Execute(args []string, r <-chan svc.ChangeRequest, statusCh chan<- svc.Status) (svcSpecificEC bool, exitCode uint32) {
var err error
// Notify Windows that the service is starting up.
statusCh <- svc.Status{State: svc.StartPending}
// Instantiate replication command and load configuration.
c := NewReplicateCommand()
if c.Config, err = ReadConfigFile(DefaultConfigPath()); err != nil {
log.Printf("cannot load configuration: %s", err)
return true, 1
}
// Execute replication command.
if err := c.Run(s.ctx); err != nil {
log.Printf("cannot replicate: %s", err)
statusCh <- svc.Status{State: svc.StopPending}
return true, 2
}
// Notify Windows that the service is now running.
statusCh <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop}
for {
select {
case req := <-r:
switch req.Cmd {
case svc.Stop:
c.Close()
statusCh <- svc.Status{State: svc.StopPending}
return false, windows.NO_ERROR
case svc.Interrogate:
statusCh <- req.CurrentStatus
default:
log.Printf("Litestream service received unexpected change request cmd: %d", req.Cmd)
}
}
}
}
// Ensure implementation implements io.Writer interface.
var _ io.Writer = (*eventlogWriter)(nil)
// eventlogWriter is an adapter for using eventlog.Log as an io.Writer.
type eventlogWriter eventlog.Log
func (w *eventlogWriter) Write(p []byte) (n int, err error) {
elog := (*eventlog.Log)(w)
return 0, elog.Info(1, string(p))
}
func signalChan() <-chan os.Signal {
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt)
return ch
}

View File

@@ -2,7 +2,6 @@ package main
import ( import (
"context" "context"
"errors"
"flag" "flag"
"fmt" "fmt"
"log" "log"
@@ -10,7 +9,6 @@ import (
"net/http" "net/http"
_ "net/http/pprof" _ "net/http/pprof"
"os" "os"
"os/signal"
"time" "time"
"github.com/benbjohnson/litestream" "github.com/benbjohnson/litestream"
@@ -20,28 +18,34 @@ import (
// ReplicateCommand represents a command that continuously replicates SQLite databases. // ReplicateCommand represents a command that continuously replicates SQLite databases.
type ReplicateCommand struct { type ReplicateCommand struct {
ConfigPath string
Config Config Config Config
// List of managed databases specified in the config. // List of managed databases specified in the config.
DBs []*litestream.DB DBs []*litestream.DB
} }
// Run loads all databases specified in the configuration. func NewReplicateCommand() *ReplicateCommand {
func (c *ReplicateCommand) Run(ctx context.Context, args []string) (err error) { return &ReplicateCommand{}
}
// ParseFlags parses the CLI flags and loads the configuration file.
func (c *ReplicateCommand) ParseFlags(ctx context.Context, args []string) (err error) {
fs := flag.NewFlagSet("litestream-replicate", flag.ContinueOnError) fs := flag.NewFlagSet("litestream-replicate", flag.ContinueOnError)
tracePath := fs.String("trace", "", "trace path") tracePath := fs.String("trace", "", "trace path")
registerConfigFlag(fs, &c.ConfigPath) configPath, noExpandEnv := registerConfigFlag(fs)
fs.Usage = c.Usage fs.Usage = c.Usage
if err := fs.Parse(args); err != nil { if err := fs.Parse(args); err != nil {
return err return err
} }
// Load configuration or use CLI args to build db/replica. // Load configuration or use CLI args to build db/replica.
var config Config
if fs.NArg() == 1 { if fs.NArg() == 1 {
return fmt.Errorf("must specify at least one replica URL for %s", fs.Arg(0)) return fmt.Errorf("must specify at least one replica URL for %s", fs.Arg(0))
} else if fs.NArg() > 1 { } else if fs.NArg() > 1 {
if *configPath != "" {
return fmt.Errorf("cannot specify a replica URL and the -config flag")
}
dbConfig := &DBConfig{Path: fs.Arg(0)} dbConfig := &DBConfig{Path: fs.Arg(0)}
for _, u := range fs.Args()[1:] { for _, u := range fs.Args()[1:] {
dbConfig.Replicas = append(dbConfig.Replicas, &ReplicaConfig{ dbConfig.Replicas = append(dbConfig.Replicas, &ReplicaConfig{
@@ -49,14 +53,14 @@ func (c *ReplicateCommand) Run(ctx context.Context, args []string) (err error) {
SyncInterval: 1 * time.Second, SyncInterval: 1 * time.Second,
}) })
} }
config.DBs = []*DBConfig{dbConfig} c.Config.DBs = []*DBConfig{dbConfig}
} else if c.ConfigPath != "" { } else {
config, err = ReadConfigFile(c.ConfigPath) if *configPath == "" {
if err != nil { *configPath = DefaultConfigPath()
}
if c.Config, err = ReadConfigFile(*configPath, !*noExpandEnv); err != nil {
return err return err
} }
} else {
return errors.New("-config flag or database/replica arguments required")
} }
// Enable trace logging. // Enable trace logging.
@@ -66,24 +70,23 @@ func (c *ReplicateCommand) Run(ctx context.Context, args []string) (err error) {
return err return err
} }
defer f.Close() defer f.Close()
litestream.Tracef = log.New(f, "", log.LstdFlags|log.LUTC|log.Lshortfile).Printf litestream.Tracef = log.New(f, "", log.LstdFlags|log.Lmicroseconds|log.LUTC|log.Lshortfile).Printf
} }
// Setup signal handler. return nil
ctx, cancel := context.WithCancel(ctx) }
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt)
go func() { <-ch; cancel() }()
// Run loads all databases specified in the configuration.
func (c *ReplicateCommand) Run(ctx context.Context) (err error) {
// Display version information. // Display version information.
fmt.Printf("litestream %s\n", Version) log.Printf("litestream %s", Version)
if len(config.DBs) == 0 { if len(c.Config.DBs) == 0 {
fmt.Println("no databases specified in configuration") log.Println("no databases specified in configuration")
} }
for _, dbConfig := range config.DBs { for _, dbConfig := range c.Config.DBs {
db, err := newDBFromConfig(&config, dbConfig) db, err := NewDBFromConfig(dbConfig)
if err != nil { if err != nil {
return err return err
} }
@@ -97,41 +100,37 @@ func (c *ReplicateCommand) Run(ctx context.Context, args []string) (err error) {
// Notify user that initialization is done. // Notify user that initialization is done.
for _, db := range c.DBs { for _, db := range c.DBs {
fmt.Printf("initialized db: %s\n", db.Path()) log.Printf("initialized db: %s", db.Path())
for _, r := range db.Replicas { for _, r := range db.Replicas {
switch r := r.(type) { switch r := r.(type) {
case *litestream.FileReplica: case *litestream.FileReplica:
fmt.Printf("replicating to: name=%q type=%q path=%q\n", r.Name(), r.Type(), r.Path()) log.Printf("replicating to: name=%q type=%q path=%q", r.Name(), r.Type(), r.Path())
case *s3.Replica: case *s3.Replica:
fmt.Printf("replicating to: name=%q type=%q bucket=%q path=%q region=%q\n", r.Name(), r.Type(), r.Bucket, r.Path, r.Region) log.Printf("replicating to: name=%q type=%q bucket=%q path=%q region=%q endpoint=%q sync-interval=%s", r.Name(), r.Type(), r.Bucket, r.Path, r.Region, r.Endpoint, r.SyncInterval)
default: default:
fmt.Printf("replicating to: name=%q type=%q\n", r.Name(), r.Type()) log.Printf("replicating to: name=%q type=%q", r.Name(), r.Type())
} }
} }
} }
// Serve metrics over HTTP if enabled. // Serve metrics over HTTP if enabled.
if config.Addr != "" { if c.Config.Addr != "" {
_, port, _ := net.SplitHostPort(config.Addr) hostport := c.Config.Addr
fmt.Printf("serving metrics on http://localhost:%s/metrics\n", port) if host, port, _ := net.SplitHostPort(c.Config.Addr); port == "" {
return fmt.Errorf("must specify port for bind address: %q", c.Config.Addr)
} else if host == "" {
hostport = net.JoinHostPort("localhost", port)
}
log.Printf("serving metrics on http://%s/metrics", hostport)
go func() { go func() {
http.Handle("/metrics", promhttp.Handler()) http.Handle("/metrics", promhttp.Handler())
if err := http.ListenAndServe(config.Addr, nil); err != nil { if err := http.ListenAndServe(c.Config.Addr, nil); err != nil {
log.Printf("cannot start metrics server: %s", err) log.Printf("cannot start metrics server: %s", err)
} }
}() }()
} }
// Wait for signal to stop program.
<-ctx.Done()
signal.Reset()
// Gracefully close
if err := c.Close(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
return nil return nil
} }
@@ -139,12 +138,13 @@ func (c *ReplicateCommand) Run(ctx context.Context, args []string) (err error) {
func (c *ReplicateCommand) Close() (err error) { func (c *ReplicateCommand) Close() (err error) {
for _, db := range c.DBs { for _, db := range c.DBs {
if e := db.SoftClose(); e != nil { if e := db.SoftClose(); e != nil {
fmt.Printf("error closing db: path=%s err=%s\n", db.Path(), e) log.Printf("error closing db: path=%s err=%s", db.Path(), e)
if err == nil { if err == nil {
err = e err = e
} }
} }
} }
// TODO(windows): Clear DBs
return err return err
} }
@@ -168,6 +168,9 @@ Arguments:
Specifies the configuration file. Specifies the configuration file.
Defaults to %s Defaults to %s
-no-expand-env
Disables environment variable expansion in configuration file.
-trace PATH -trace PATH
Write verbose trace logging to PATH. Write verbose trace logging to PATH.

View File

@@ -7,6 +7,7 @@ import (
"fmt" "fmt"
"log" "log"
"os" "os"
"strconv"
"time" "time"
"github.com/benbjohnson/litestream" "github.com/benbjohnson/litestream"
@@ -17,17 +18,17 @@ type RestoreCommand struct{}
// Run executes the command. // Run executes the command.
func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) { func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) {
var configPath string
opt := litestream.NewRestoreOptions() opt := litestream.NewRestoreOptions()
opt.Verbose = true opt.Verbose = true
fs := flag.NewFlagSet("litestream-restore", flag.ContinueOnError) fs := flag.NewFlagSet("litestream-restore", flag.ContinueOnError)
registerConfigFlag(fs, &configPath) configPath, noExpandEnv := registerConfigFlag(fs)
fs.StringVar(&opt.OutputPath, "o", "", "output path") fs.StringVar(&opt.OutputPath, "o", "", "output path")
fs.StringVar(&opt.ReplicaName, "replica", "", "replica name") fs.StringVar(&opt.ReplicaName, "replica", "", "replica name")
fs.StringVar(&opt.Generation, "generation", "", "generation name") fs.StringVar(&opt.Generation, "generation", "", "generation name")
fs.IntVar(&opt.Index, "index", opt.Index, "wal index") fs.Var((*indexVar)(&opt.Index), "index", "wal index")
fs.BoolVar(&opt.DryRun, "dry-run", false, "dry run") fs.IntVar(&opt.Parallelism, "parallelism", opt.Parallelism, "parallelism")
ifReplicaExists := fs.Bool("if-replica-exists", false, "")
timestampStr := fs.String("timestamp", "", "timestamp") timestampStr := fs.String("timestamp", "", "timestamp")
verbose := fs.Bool("v", false, "verbose output") verbose := fs.Bool("v", false, "verbose output")
fs.Usage = c.Usage fs.Usage = c.Usage
@@ -46,32 +47,36 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) {
} }
} }
// Verbose output is automatically enabled if dry run is specified.
if opt.DryRun {
*verbose = true
}
// Instantiate logger if verbose output is enabled. // Instantiate logger if verbose output is enabled.
if *verbose { if *verbose {
opt.Logger = log.New(os.Stderr, "", log.LstdFlags) opt.Logger = log.New(os.Stderr, "", log.LstdFlags|log.Lmicroseconds)
} }
// Determine replica & generation to restore from. // Determine replica & generation to restore from.
var r litestream.Replica var r litestream.Replica
if isURL(fs.Arg(0)) { if isURL(fs.Arg(0)) {
if *configPath != "" {
return fmt.Errorf("cannot specify a replica URL and the -config flag")
}
if r, err = c.loadFromURL(ctx, fs.Arg(0), &opt); err != nil { if r, err = c.loadFromURL(ctx, fs.Arg(0), &opt); err != nil {
return err return err
} }
} else if configPath != "" { } else {
if r, err = c.loadFromConfig(ctx, fs.Arg(0), configPath, &opt); err != nil { if *configPath == "" {
*configPath = DefaultConfigPath()
}
if r, err = c.loadFromConfig(ctx, fs.Arg(0), *configPath, !*noExpandEnv, &opt); err != nil {
return err return err
} }
} else {
return errors.New("config path or replica URL required")
} }
// Return an error if no matching targets found. // Return an error if no matching targets found.
// If optional flag set, return success. Useful for automated recovery.
if opt.Generation == "" { if opt.Generation == "" {
if *ifReplicaExists {
fmt.Println("no matching backups found")
return nil
}
return fmt.Errorf("no matching backups found") return fmt.Errorf("no matching backups found")
} }
@@ -80,7 +85,7 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) {
// loadFromURL creates a replica & updates the restore options from a replica URL. // loadFromURL creates a replica & updates the restore options from a replica URL.
func (c *RestoreCommand) loadFromURL(ctx context.Context, replicaURL string, opt *litestream.RestoreOptions) (litestream.Replica, error) { func (c *RestoreCommand) loadFromURL(ctx context.Context, replicaURL string, opt *litestream.RestoreOptions) (litestream.Replica, error) {
r, err := NewReplicaFromURL(replicaURL) r, err := NewReplicaFromConfig(&ReplicaConfig{URL: replicaURL}, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -89,9 +94,9 @@ func (c *RestoreCommand) loadFromURL(ctx context.Context, replicaURL string, opt
} }
// loadFromConfig returns a replica & updates the restore options from a DB reference. // loadFromConfig returns a replica & updates the restore options from a DB reference.
func (c *RestoreCommand) loadFromConfig(ctx context.Context, dbPath, configPath string, opt *litestream.RestoreOptions) (litestream.Replica, error) { func (c *RestoreCommand) loadFromConfig(ctx context.Context, dbPath, configPath string, expandEnv bool, opt *litestream.RestoreOptions) (litestream.Replica, error) {
// Load configuration. // Load configuration.
config, err := ReadConfigFile(configPath) config, err := ReadConfigFile(configPath, expandEnv)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -104,7 +109,7 @@ func (c *RestoreCommand) loadFromConfig(ctx context.Context, dbPath, configPath
if dbConfig == nil { if dbConfig == nil {
return nil, fmt.Errorf("database not found in config: %s", dbPath) return nil, fmt.Errorf("database not found in config: %s", dbPath)
} }
db, err := newDBFromConfig(&config, dbConfig) db, err := NewDBFromConfig(dbConfig)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -141,6 +146,9 @@ Arguments:
Specifies the configuration file. Specifies the configuration file.
Defaults to %s Defaults to %s
-no-expand-env
Disables environment variable expansion in configuration file.
-replica NAME -replica NAME
Restore from a specific replica. Restore from a specific replica.
Defaults to replica with latest data. Defaults to replica with latest data.
@@ -150,7 +158,7 @@ Arguments:
Defaults to generation with latest data. Defaults to generation with latest data.
-index NUM -index NUM
Restore up to a specific WAL index (inclusive). Restore up to a specific hex-encoded WAL index (inclusive).
Defaults to use the highest available index. Defaults to use the highest available index.
-timestamp TIMESTAMP -timestamp TIMESTAMP
@@ -161,9 +169,12 @@ Arguments:
Output path of the restored database. Output path of the restored database.
Defaults to original DB path. Defaults to original DB path.
-dry-run -if-replica-exists
Prints all log output as if it were running but does Returns exit code of 0 if no backups found.
not perform actual restore.
-parallelism NUM
Determines the number of WAL files downloaded in parallel.
Defaults to `+strconv.Itoa(litestream.DefaultRestoreParallelism)+`.
-v -v
Verbose output. Verbose output.

View File

@@ -2,7 +2,6 @@ package main
import ( import (
"context" "context"
"errors"
"flag" "flag"
"fmt" "fmt"
"os" "os"
@@ -17,9 +16,8 @@ type SnapshotsCommand struct{}
// Run executes the command. // Run executes the command.
func (c *SnapshotsCommand) Run(ctx context.Context, args []string) (err error) { func (c *SnapshotsCommand) Run(ctx context.Context, args []string) (err error) {
var configPath string
fs := flag.NewFlagSet("litestream-snapshots", flag.ContinueOnError) fs := flag.NewFlagSet("litestream-snapshots", flag.ContinueOnError)
registerConfigFlag(fs, &configPath) configPath, noExpandEnv := registerConfigFlag(fs)
replicaName := fs.String("replica", "", "replica name") replicaName := fs.String("replica", "", "replica name")
fs.Usage = c.Usage fs.Usage = c.Usage
if err := fs.Parse(args); err != nil { if err := fs.Parse(args); err != nil {
@@ -33,12 +31,19 @@ func (c *SnapshotsCommand) Run(ctx context.Context, args []string) (err error) {
var db *litestream.DB var db *litestream.DB
var r litestream.Replica var r litestream.Replica
if isURL(fs.Arg(0)) { if isURL(fs.Arg(0)) {
if r, err = NewReplicaFromURL(fs.Arg(0)); err != nil { if *configPath != "" {
return fmt.Errorf("cannot specify a replica URL and the -config flag")
}
if r, err = NewReplicaFromConfig(&ReplicaConfig{URL: fs.Arg(0)}, nil); err != nil {
return err return err
} }
} else if configPath != "" { } else {
if *configPath == "" {
*configPath = DefaultConfigPath()
}
// Load configuration. // Load configuration.
config, err := ReadConfigFile(configPath) config, err := ReadConfigFile(*configPath, !*noExpandEnv)
if err != nil { if err != nil {
return err return err
} }
@@ -48,7 +53,7 @@ func (c *SnapshotsCommand) Run(ctx context.Context, args []string) (err error) {
return err return err
} else if dbc := config.DBConfig(path); dbc == nil { } else if dbc := config.DBConfig(path); dbc == nil {
return fmt.Errorf("database not found in config: %s", path) return fmt.Errorf("database not found in config: %s", path)
} else if db, err = newDBFromConfig(&config, dbc); err != nil { } else if db, err = NewDBFromConfig(dbc); err != nil {
return err return err
} }
@@ -58,8 +63,6 @@ func (c *SnapshotsCommand) Run(ctx context.Context, args []string) (err error) {
return fmt.Errorf("replica %q not found for database %q", *replicaName, db.Path()) return fmt.Errorf("replica %q not found for database %q", *replicaName, db.Path())
} }
} }
} else {
return errors.New("config path or replica URL required")
} }
// Find snapshots by db or replica. // Find snapshots by db or replica.
@@ -109,6 +112,9 @@ Arguments:
Specifies the configuration file. Specifies the configuration file.
Defaults to %s Defaults to %s
-no-expand-env
Disables environment variable expansion in configuration file.
-replica NAME -replica NAME
Optional, filter by a specific replica. Optional, filter by a specific replica.

View File

@@ -2,7 +2,6 @@ package main
import ( import (
"context" "context"
"errors"
"flag" "flag"
"fmt" "fmt"
"os" "os"
@@ -17,9 +16,8 @@ type WALCommand struct{}
// Run executes the command. // Run executes the command.
func (c *WALCommand) Run(ctx context.Context, args []string) (err error) { func (c *WALCommand) Run(ctx context.Context, args []string) (err error) {
var configPath string
fs := flag.NewFlagSet("litestream-wal", flag.ContinueOnError) fs := flag.NewFlagSet("litestream-wal", flag.ContinueOnError)
registerConfigFlag(fs, &configPath) configPath, noExpandEnv := registerConfigFlag(fs)
replicaName := fs.String("replica", "", "replica name") replicaName := fs.String("replica", "", "replica name")
generation := fs.String("generation", "", "generation name") generation := fs.String("generation", "", "generation name")
fs.Usage = c.Usage fs.Usage = c.Usage
@@ -34,12 +32,19 @@ func (c *WALCommand) Run(ctx context.Context, args []string) (err error) {
var db *litestream.DB var db *litestream.DB
var r litestream.Replica var r litestream.Replica
if isURL(fs.Arg(0)) { if isURL(fs.Arg(0)) {
if r, err = NewReplicaFromURL(fs.Arg(0)); err != nil { if *configPath != "" {
return fmt.Errorf("cannot specify a replica URL and the -config flag")
}
if r, err = NewReplicaFromConfig(&ReplicaConfig{URL: fs.Arg(0)}, nil); err != nil {
return err return err
} }
} else if configPath != "" { } else {
if *configPath == "" {
*configPath = DefaultConfigPath()
}
// Load configuration. // Load configuration.
config, err := ReadConfigFile(configPath) config, err := ReadConfigFile(*configPath, !*noExpandEnv)
if err != nil { if err != nil {
return err return err
} }
@@ -49,7 +54,7 @@ func (c *WALCommand) Run(ctx context.Context, args []string) (err error) {
return err return err
} else if dbc := config.DBConfig(path); dbc == nil { } else if dbc := config.DBConfig(path); dbc == nil {
return fmt.Errorf("database not found in config: %s", path) return fmt.Errorf("database not found in config: %s", path)
} else if db, err = newDBFromConfig(&config, dbc); err != nil { } else if db, err = NewDBFromConfig(dbc); err != nil {
return err return err
} }
@@ -59,8 +64,6 @@ func (c *WALCommand) Run(ctx context.Context, args []string) (err error) {
return fmt.Errorf("replica %q not found for database %q", *replicaName, db.Path()) return fmt.Errorf("replica %q not found for database %q", *replicaName, db.Path())
} }
} }
} else {
return errors.New("config path or replica URL required")
} }
// Find WAL files by db or replica. // Find WAL files by db or replica.
@@ -115,6 +118,9 @@ Arguments:
Specifies the configuration file. Specifies the configuration file.
Defaults to %s Defaults to %s
-no-expand-env
Disables environment variable expansion in configuration file.
-replica NAME -replica NAME
Optional, filter by a specific replica. Optional, filter by a specific replica.

372
db.go
View File

@@ -23,6 +23,7 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promauto"
"golang.org/x/sync/errgroup"
) )
// Default DB settings. // Default DB settings.
@@ -45,6 +46,7 @@ type DB struct {
mu sync.RWMutex mu sync.RWMutex
path string // part to database path string // part to database
db *sql.DB // target database db *sql.DB // target database
f *os.File // long-running db file descriptor
rtx *sql.Tx // long running read transaction rtx *sql.Tx // long running read transaction
pageSize int // page size, in bytes pageSize int // page size, in bytes
notify chan struct{} // closes on WAL change notify chan struct{} // closes on WAL change
@@ -259,6 +261,11 @@ func (db *DB) PageSize() int {
// Open initializes the background monitoring goroutine. // Open initializes the background monitoring goroutine.
func (db *DB) Open() (err error) { func (db *DB) Open() (err error) {
// Validate fields on database.
if db.MinCheckpointPageN <= 0 {
return fmt.Errorf("minimum checkpoint page count required")
}
// Validate that all replica names are unique. // Validate that all replica names are unique.
m := make(map[string]struct{}) m := make(map[string]struct{})
for _, r := range db.Replicas { for _, r := range db.Replicas {
@@ -285,15 +292,55 @@ func (db *DB) Open() (err error) {
// Close releases the read lock & closes the database. This method should only // Close releases the read lock & closes the database. This method should only
// be called by tests as it causes the underlying database to be checkpointed. // be called by tests as it causes the underlying database to be checkpointed.
func (db *DB) Close() (err error) { func (db *DB) Close() (err error) {
if e := db.SoftClose(); e != nil && err == nil { return db.close(false)
}
// SoftClose closes everything but the underlying db connection. This method
// is available because the binary needs to avoid closing the database on exit
// to prevent autocheckpointing.
func (db *DB) SoftClose() (err error) {
return db.close(true)
}
func (db *DB) close(soft bool) (err error) {
db.cancel()
db.wg.Wait()
// Start a new context for shutdown since we canceled the DB context.
ctx := context.Background()
// Perform a final db sync, if initialized.
if db.db != nil {
if e := db.Sync(ctx); e != nil && err == nil {
err = e err = e
} }
}
// Ensure replicas perform a final sync and stop replicating.
for _, r := range db.Replicas {
if db.db != nil { if db.db != nil {
if e := r.Sync(ctx); e != nil && err == nil {
err = e
}
}
r.Stop(!soft)
}
// Release the read lock to allow other applications to handle checkpointing.
if db.rtx != nil {
if e := db.releaseReadLock(); e != nil && err == nil {
err = e
}
}
// Only perform full close if this is not a soft close.
// This closes the underlying database connection which can clean up the WAL.
if !soft && db.db != nil {
if e := db.db.Close(); e != nil && err == nil { if e := db.db.Close(); e != nil && err == nil {
err = e err = e
} }
} }
return err return err
} }
@@ -381,11 +428,34 @@ func (db *DB) init() (err error) {
dsn := db.path dsn := db.path
dsn += fmt.Sprintf("?_busy_timeout=%d", BusyTimeout.Milliseconds()) dsn += fmt.Sprintf("?_busy_timeout=%d", BusyTimeout.Milliseconds())
// Connect to SQLite database & enable WAL. // Connect to SQLite database.
if db.db, err = sql.Open("sqlite3", dsn); err != nil { if db.db, err = sql.Open("sqlite3", dsn); err != nil {
return err return err
} else if _, err := db.db.Exec(`PRAGMA journal_mode = wal;`); err != nil { }
return fmt.Errorf("enable wal: %w", err)
// Open long-running database file descriptor. Required for non-OFD locks.
if db.f, err = os.Open(db.path); err != nil {
return fmt.Errorf("open db file descriptor: %w", err)
}
// Ensure database is closed if init fails.
// Initialization can retry on next sync.
defer func() {
if err != nil {
_ = db.releaseReadLock()
db.db.Close()
db.f.Close()
db.db, db.f = nil, nil
}
}()
// Enable WAL and ensure it is set. New mode should be returned on success:
// https://www.sqlite.org/pragma.html#pragma_journal_mode
var mode string
if err := db.db.QueryRow(`PRAGMA journal_mode = wal;`).Scan(&mode); err != nil {
return err
} else if mode != "wal" {
return fmt.Errorf("enable wal failed, mode=%q", mode)
} }
// Disable autocheckpoint for litestream's connection. // Disable autocheckpoint for litestream's connection.
@@ -425,7 +495,7 @@ func (db *DB) init() (err error) {
// If we have an existing shadow WAL, ensure the headers match. // If we have an existing shadow WAL, ensure the headers match.
if err := db.verifyHeadersMatch(); err != nil { if err := db.verifyHeadersMatch(); err != nil {
log.Printf("%s: init: cannot determine last wal position, clearing generation (%s)", db.path, err) log.Printf("%s: init: cannot determine last wal position, clearing generation; %s", db.path, err)
if err := os.Remove(db.GenerationNamePath()); err != nil && !os.IsNotExist(err) { if err := os.Remove(db.GenerationNamePath()); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("remove generation name: %w", err) return fmt.Errorf("remove generation name: %w", err)
} }
@@ -475,7 +545,7 @@ func (db *DB) verifyHeadersMatch() error {
} }
if !bytes.Equal(hdr0, hdr1) { if !bytes.Equal(hdr0, hdr1) {
return fmt.Errorf("wal header mismatch") return fmt.Errorf("wal header mismatch %x <> %x on %s", hdr0, hdr1, shadowWALPath)
} }
return nil return nil
} }
@@ -560,26 +630,6 @@ func (db *DB) cleanWAL() error {
return nil return nil
} }
// SoftClose closes everything but the underlying db connection. This method
// is available because the binary needs to avoid closing the database on exit
// to prevent autocheckpointing.
func (db *DB) SoftClose() (err error) {
db.cancel()
db.wg.Wait()
// Ensure replicas all stop replicating.
for _, r := range db.Replicas {
r.Stop()
}
if db.rtx != nil {
if e := db.releaseReadLock(); e != nil && err == nil {
err = e
}
}
return err
}
// acquireReadLock begins a read transaction on the database to prevent checkpointing. // acquireReadLock begins a read transaction on the database to prevent checkpointing.
func (db *DB) acquireReadLock() error { func (db *DB) acquireReadLock() error {
if db.rtx != nil { if db.rtx != nil {
@@ -674,7 +724,7 @@ func (db *DB) createGeneration() (string, error) {
} }
// Sync copies pending data from the WAL to the shadow WAL. // Sync copies pending data from the WAL to the shadow WAL.
func (db *DB) Sync() (err error) { func (db *DB) Sync(ctx context.Context) (err error) {
db.mu.Lock() db.mu.Lock()
defer db.mu.Unlock() defer db.mu.Unlock()
@@ -701,27 +751,6 @@ func (db *DB) Sync() (err error) {
return fmt.Errorf("ensure wal exists: %w", err) return fmt.Errorf("ensure wal exists: %w", err)
} }
// Start a transaction. This will be promoted immediately after.
tx, err := db.db.Begin()
if err != nil {
return fmt.Errorf("begin: %w", err)
}
// Ensure write transaction rolls back before returning.
defer func() {
if e := rollback(tx); e != nil && err == nil {
err = e
}
}()
// Insert into the lock table to promote to a write tx. The lock table
// insert will never actually occur because our tx will be rolled back,
// however, it will ensure our tx grabs the write lock. Unfortunately,
// we can't call "BEGIN IMMEDIATE" as we are already in a transaction.
if _, err := tx.ExecContext(db.ctx, `INSERT INTO _litestream_lock (id) VALUES (1);`); err != nil {
return fmt.Errorf("_litestream_lock: %w", err)
}
// Verify our last sync matches the current state of the WAL. // Verify our last sync matches the current state of the WAL.
// This ensures that we have an existing generation & that the last sync // This ensures that we have an existing generation & that the last sync
// position of the real WAL hasn't been overwritten by another process. // position of the real WAL hasn't been overwritten by another process.
@@ -768,16 +797,11 @@ func (db *DB) Sync() (err error) {
checkpoint = true checkpoint = true
} }
// Release write lock before checkpointing & exiting.
if err := tx.Rollback(); err != nil {
return fmt.Errorf("rollback write tx: %w", err)
}
// Issue the checkpoint. // Issue the checkpoint.
if checkpoint { if checkpoint {
changed = true changed = true
if err := db.checkpointAndInit(info.generation, checkpointMode); err != nil { if err := db.checkpointAndInit(ctx, info.generation, checkpointMode); err != nil {
return fmt.Errorf("checkpoint: mode=%v err=%w", checkpointMode, err) return fmt.Errorf("checkpoint: mode=%v err=%w", checkpointMode, err)
} }
} }
@@ -843,7 +867,7 @@ func (db *DB) verify() (info syncInfo, err error) {
if err != nil { if err != nil {
return info, err return info, err
} }
info.walSize = fi.Size() info.walSize = frameAlign(fi.Size(), db.pageSize)
info.walModTime = fi.ModTime() info.walModTime = fi.ModTime()
db.walSizeGauge.Set(float64(fi.Size())) db.walSizeGauge.Set(float64(fi.Size()))
@@ -867,7 +891,6 @@ func (db *DB) verify() (info syncInfo, err error) {
} }
info.shadowWALSize = frameAlign(fi.Size(), db.pageSize) info.shadowWALSize = frameAlign(fi.Size(), db.pageSize)
// Truncate shadow WAL if there is a partial page.
// Exit if shadow WAL does not contain a full header. // Exit if shadow WAL does not contain a full header.
if info.shadowWALSize < WALHeaderSize { if info.shadowWALSize < WALHeaderSize {
info.reason = "short shadow wal" info.reason = "short shadow wal"
@@ -900,9 +923,9 @@ func (db *DB) verify() (info syncInfo, err error) {
// Verify last page synced still matches. // Verify last page synced still matches.
if info.shadowWALSize > WALHeaderSize { if info.shadowWALSize > WALHeaderSize {
offset := info.shadowWALSize - int64(db.pageSize+WALFrameHeaderSize) offset := info.shadowWALSize - int64(db.pageSize+WALFrameHeaderSize)
if buf0, err := readFileAt(db.WALPath(), offset, int64(db.pageSize+WALFrameHeaderSize)); err != nil { if buf0, err := readWALFileAt(db.WALPath(), offset, int64(db.pageSize+WALFrameHeaderSize)); err != nil {
return info, fmt.Errorf("cannot read last synced wal page: %w", err) return info, fmt.Errorf("cannot read last synced wal page: %w", err)
} else if buf1, err := readFileAt(info.shadowWALPath, offset, int64(db.pageSize+WALFrameHeaderSize)); err != nil { } else if buf1, err := readWALFileAt(info.shadowWALPath, offset, int64(db.pageSize+WALFrameHeaderSize)); err != nil {
return info, fmt.Errorf("cannot read last synced shadow wal page: %w", err) return info, fmt.Errorf("cannot read last synced shadow wal page: %w", err)
} else if !bytes.Equal(buf0, buf1) { } else if !bytes.Equal(buf0, buf1) {
info.reason = "wal overwritten by another process" info.reason = "wal overwritten by another process"
@@ -1289,7 +1312,7 @@ func (db *DB) checkpoint(mode string) (err error) {
// checkpointAndInit performs a checkpoint on the WAL file and initializes a // checkpointAndInit performs a checkpoint on the WAL file and initializes a
// new shadow WAL file. // new shadow WAL file.
func (db *DB) checkpointAndInit(generation, mode string) error { func (db *DB) checkpointAndInit(ctx context.Context, generation, mode string) error {
shadowWALPath, err := db.CurrentShadowWALPath(generation) shadowWALPath, err := db.CurrentShadowWALPath(generation)
if err != nil { if err != nil {
return err return err
@@ -1321,6 +1344,21 @@ func (db *DB) checkpointAndInit(generation, mode string) error {
return nil return nil
} }
// Start a transaction. This will be promoted immediately after.
tx, err := db.db.Begin()
if err != nil {
return fmt.Errorf("begin: %w", err)
}
defer func() { _ = rollback(tx) }()
// Insert into the lock table to promote to a write tx. The lock table
// insert will never actually occur because our tx will be rolled back,
// however, it will ensure our tx grabs the write lock. Unfortunately,
// we can't call "BEGIN IMMEDIATE" as we are already in a transaction.
if _, err := tx.ExecContext(ctx, `INSERT INTO _litestream_lock (id) VALUES (1);`); err != nil {
return fmt.Errorf("_litestream_lock: %w", err)
}
// Copy the end of the previous WAL before starting a new shadow WAL. // Copy the end of the previous WAL before starting a new shadow WAL.
if _, err := db.copyToShadowWAL(shadowWALPath); err != nil { if _, err := db.copyToShadowWAL(shadowWALPath); err != nil {
return fmt.Errorf("cannot copy to end of shadow wal: %w", err) return fmt.Errorf("cannot copy to end of shadow wal: %w", err)
@@ -1338,6 +1376,10 @@ func (db *DB) checkpointAndInit(generation, mode string) error {
return fmt.Errorf("cannot init shadow wal file: name=%s err=%w", newShadowWALPath, err) return fmt.Errorf("cannot init shadow wal file: name=%s err=%w", newShadowWALPath, err)
} }
// Release write lock before checkpointing & exiting.
if err := tx.Rollback(); err != nil {
return fmt.Errorf("rollback post-checkpoint tx: %w", err)
}
return nil return nil
} }
@@ -1355,7 +1397,7 @@ func (db *DB) monitor() {
} }
// Sync the database to the shadow WAL. // Sync the database to the shadow WAL.
if err := db.Sync(); err != nil && !errors.Is(err, context.Canceled) { if err := db.Sync(db.ctx); err != nil && !errors.Is(err, context.Canceled) {
log.Printf("%s: sync error: %s", db.path, err) log.Printf("%s: sync error: %s", db.path, err)
} }
} }
@@ -1367,13 +1409,13 @@ func (db *DB) monitor() {
// replica or generation or it will automatically choose the best one. Finally, // replica or generation or it will automatically choose the best one. Finally,
// a timestamp can be specified to restore the database to a specific // a timestamp can be specified to restore the database to a specific
// point-in-time. // point-in-time.
func RestoreReplica(ctx context.Context, r Replica, opt RestoreOptions) error { func RestoreReplica(ctx context.Context, r Replica, opt RestoreOptions) (err error) {
// Validate options. // Validate options.
if opt.OutputPath == "" { if opt.OutputPath == "" {
return fmt.Errorf("output path required") return fmt.Errorf("output path required")
} else if opt.Generation == "" && opt.Index != math.MaxInt64 { } else if opt.Generation == "" && opt.Index != math.MaxInt32 {
return fmt.Errorf("must specify generation when restoring to index") return fmt.Errorf("must specify generation when restoring to index")
} else if opt.Index != math.MaxInt64 && !opt.Timestamp.IsZero() { } else if opt.Index != math.MaxInt32 && !opt.Timestamp.IsZero() {
return fmt.Errorf("cannot specify index & timestamp to restore") return fmt.Errorf("cannot specify index & timestamp to restore")
} }
@@ -1388,19 +1430,23 @@ func RestoreReplica(ctx context.Context, r Replica, opt RestoreOptions) error {
logPrefix = fmt.Sprintf("%s(%s)", db.Path(), r.Name()) logPrefix = fmt.Sprintf("%s(%s)", db.Path(), r.Name())
} }
// Ensure output path does not already exist (unless this is a dry run). // Ensure output path does not already exist.
if !opt.DryRun {
if _, err := os.Stat(opt.OutputPath); err == nil { if _, err := os.Stat(opt.OutputPath); err == nil {
return fmt.Errorf("cannot restore, output path already exists: %s", opt.OutputPath) return fmt.Errorf("cannot restore, output path already exists: %s", opt.OutputPath)
} else if err != nil && !os.IsNotExist(err) { } else if err != nil && !os.IsNotExist(err) {
return err return err
} }
}
// Find lastest snapshot that occurs before timestamp. // Find lastest snapshot that occurs before timestamp or index.
minWALIndex, err := SnapshotIndexAt(ctx, r, opt.Generation, opt.Timestamp) var minWALIndex int
if err != nil { if opt.Index < math.MaxInt32 {
return fmt.Errorf("cannot find snapshot index for restore: %w", err) if minWALIndex, err = SnapshotIndexByIndex(ctx, r, opt.Generation, opt.Index); err != nil {
return fmt.Errorf("cannot find snapshot index: %w", err)
}
} else {
if minWALIndex, err = SnapshotIndexAt(ctx, r, opt.Generation, opt.Timestamp); err != nil {
return fmt.Errorf("cannot find snapshot index by timestamp: %w", err)
}
} }
// Find the maximum WAL index that occurs before timestamp. // Find the maximum WAL index that occurs before timestamp.
@@ -1408,7 +1454,7 @@ func RestoreReplica(ctx context.Context, r Replica, opt RestoreOptions) error {
if err != nil { if err != nil {
return fmt.Errorf("cannot find max wal index for restore: %w", err) return fmt.Errorf("cannot find max wal index for restore: %w", err)
} }
logger.Printf("%s: starting restore: generation %s, index %08x-%08x", logPrefix, opt.Generation, minWALIndex, maxWALIndex) snapshotOnly := maxWALIndex == -1
// Initialize starting position. // Initialize starting position.
pos := Pos{Generation: opt.Generation, Index: minWALIndex} pos := Pos{Generation: opt.Generation, Index: minWALIndex}
@@ -1416,51 +1462,122 @@ func RestoreReplica(ctx context.Context, r Replica, opt RestoreOptions) error {
// Copy snapshot to output path. // Copy snapshot to output path.
logger.Printf("%s: restoring snapshot %s/%08x to %s", logPrefix, opt.Generation, minWALIndex, tmpPath) logger.Printf("%s: restoring snapshot %s/%08x to %s", logPrefix, opt.Generation, minWALIndex, tmpPath)
if !opt.DryRun {
if err := restoreSnapshot(ctx, r, pos.Generation, pos.Index, tmpPath); err != nil { if err := restoreSnapshot(ctx, r, pos.Generation, pos.Index, tmpPath); err != nil {
return fmt.Errorf("cannot restore snapshot: %w", err) return fmt.Errorf("cannot restore snapshot: %w", err)
} }
// If no WAL files available, move snapshot to final path & exit early.
if snapshotOnly {
logger.Printf("%s: snapshot only, finalizing database", logPrefix)
return os.Rename(tmpPath, opt.OutputPath)
} }
// Restore each WAL file until we reach our maximum index. // Begin processing WAL files.
logger.Printf("%s: restoring wal files: generation=%s index=[%08x,%08x]", logPrefix, opt.Generation, minWALIndex, maxWALIndex)
// Fill input channel with all WAL indexes to be loaded in order.
ch := make(chan int, maxWALIndex-minWALIndex+1)
for index := minWALIndex; index <= maxWALIndex; index++ { for index := minWALIndex; index <= maxWALIndex; index++ {
if !opt.DryRun { ch <- index
if err = restoreWAL(ctx, r, opt.Generation, index, tmpPath); os.IsNotExist(err) && index == minWALIndex && index == maxWALIndex {
logger.Printf("%s: no wal available, snapshot only", logPrefix)
break // snapshot file only, ignore error
} else if err != nil {
return fmt.Errorf("cannot restore wal: %w", err)
} }
close(ch)
// Track load state for each WAL.
var mu sync.Mutex
cond := sync.NewCond(&mu)
walStates := make([]walRestoreState, maxWALIndex-minWALIndex+1)
parallelism := opt.Parallelism
if parallelism < 1 {
parallelism = 1
} }
if opt.Verbose { // Download WAL files to disk in parallel.
logger.Printf("%s: restored wal %s/%08x", logPrefix, opt.Generation, index) g, ctx := errgroup.WithContext(ctx)
for i := 0; i < parallelism; i++ {
g.Go(func() error {
for {
select {
case <-ctx.Done():
cond.Broadcast()
return err
case index, ok := <-ch:
if !ok {
cond.Broadcast()
return nil
} }
startTime := time.Now()
err := downloadWAL(ctx, r, opt.Generation, index, tmpPath)
if err != nil {
err = fmt.Errorf("cannot download wal %s/%08x: %w", opt.Generation, index, err)
}
// Mark index as ready-to-apply and notify applying code.
mu.Lock()
walStates[index-minWALIndex] = walRestoreState{ready: true, err: err}
mu.Unlock()
cond.Broadcast()
// Returning the error here will cancel the other goroutines.
if err != nil {
return err
}
logger.Printf("%s: downloaded wal %s/%08x elapsed=%s",
logPrefix, opt.Generation, index,
time.Since(startTime).String(),
)
}
}
})
}
// Apply WAL files in order as they are ready.
for index := minWALIndex; index <= maxWALIndex; index++ {
// Wait until next WAL file is ready to apply.
mu.Lock()
for !walStates[index-minWALIndex].ready {
if err := ctx.Err(); err != nil {
return err
}
cond.Wait()
}
if err := walStates[index-minWALIndex].err; err != nil {
return err
}
mu.Unlock()
// Apply WAL to database file.
startTime := time.Now()
if err = applyWAL(ctx, index, tmpPath); err != nil {
return fmt.Errorf("cannot apply wal: %w", err)
}
logger.Printf("%s: applied wal %s/%08x elapsed=%s",
logPrefix, opt.Generation, index,
time.Since(startTime).String(),
)
}
// Ensure all goroutines finish. All errors should have been handled during
// the processing of WAL files but this ensures that all processing is done.
if err := g.Wait(); err != nil {
return err
} }
// Copy file to final location. // Copy file to final location.
logger.Printf("%s: renaming database from temporary location", logPrefix) logger.Printf("%s: renaming database from temporary location", logPrefix)
if !opt.DryRun {
if err := os.Rename(tmpPath, opt.OutputPath); err != nil { if err := os.Rename(tmpPath, opt.OutputPath); err != nil {
return err return err
} }
}
return nil return nil
} }
func checksumFile(filename string) (uint64, error) { type walRestoreState struct {
f, err := os.Open(filename) ready bool
if err != nil { err error
return 0, err
}
defer f.Close()
h := crc64.New(crc64.MakeTable(crc64.ISO))
if _, err := io.Copy(h, f); err != nil {
return 0, err
}
return h.Sum64(), nil
} }
// CalcRestoreTarget returns a replica & generation to restore from based on opt criteria. // CalcRestoreTarget returns a replica & generation to restore from based on opt criteria.
@@ -1572,8 +1689,10 @@ func restoreSnapshot(ctx context.Context, r Replica, generation string, index in
return f.Close() return f.Close()
} }
// restoreWAL copies a WAL file from the replica to the local WAL and forces checkpoint. // downloadWAL copies a WAL file from the replica to a local copy next to the DB.
func restoreWAL(ctx context.Context, r Replica, generation string, index int, dbPath string) error { // The WAL is later applied by applyWAL(). This function can be run in parallel
// to download multiple WAL files simultaneously.
func downloadWAL(ctx context.Context, r Replica, generation string, index int, dbPath string) error {
// Determine the user/group & mode based on the DB, if available. // Determine the user/group & mode based on the DB, if available.
uid, gid, mode := -1, -1, os.FileMode(0600) uid, gid, mode := -1, -1, os.FileMode(0600)
if db := r.DB(); db != nil { if db := r.DB(); db != nil {
@@ -1588,7 +1707,7 @@ func restoreWAL(ctx context.Context, r Replica, generation string, index int, db
defer rd.Close() defer rd.Close()
// Open handle to destination WAL path. // Open handle to destination WAL path.
f, err := createFile(dbPath+"-wal", mode, uid, gid) f, err := createFile(fmt.Sprintf("%s-%08x-wal", dbPath, index), mode, uid, gid)
if err != nil { if err != nil {
return err return err
} }
@@ -1600,6 +1719,15 @@ func restoreWAL(ctx context.Context, r Replica, generation string, index int, db
} else if err := f.Close(); err != nil { } else if err := f.Close(); err != nil {
return err return err
} }
return nil
}
// applyWAL performs a truncating checkpoint on the given database.
func applyWAL(ctx context.Context, index int, dbPath string) error {
// Copy WAL file from it's staging path to the correct "-wal" location.
if err := os.Rename(fmt.Sprintf("%s-%08x-wal", dbPath, index), dbPath+"-wal"); err != nil {
return err
}
// Open SQLite database and force a truncating checkpoint. // Open SQLite database and force a truncating checkpoint.
d, err := sql.Open("sqlite3", dbPath) d, err := sql.Open("sqlite3", dbPath)
@@ -1614,7 +1742,6 @@ func restoreWAL(ctx context.Context, r Replica, generation string, index int, db
} else if row[0] != 0 { } else if row[0] != 0 {
return fmt.Errorf("truncation checkpoint failed during restore (%d,%d,%d)", row[0], row[1], row[2]) return fmt.Errorf("truncation checkpoint failed during restore (%d,%d,%d)", row[0], row[1], row[2])
} }
return d.Close() return d.Close()
} }
@@ -1625,7 +1752,7 @@ func restoreWAL(ctx context.Context, r Replica, generation string, index int, db
// unable to checkpoint during this time. // unable to checkpoint during this time.
// //
// If dst is set, the database file is copied to that location before checksum. // If dst is set, the database file is copied to that location before checksum.
func (db *DB) CRC64() (uint64, Pos, error) { func (db *DB) CRC64(ctx context.Context) (uint64, Pos, error) {
db.mu.Lock() db.mu.Lock()
defer db.mu.Unlock() defer db.mu.Unlock()
@@ -1643,7 +1770,7 @@ func (db *DB) CRC64() (uint64, Pos, error) {
} }
// Force a RESTART checkpoint to ensure the database is at the start of the WAL. // Force a RESTART checkpoint to ensure the database is at the start of the WAL.
if err := db.checkpointAndInit(generation, CheckpointModeRestart); err != nil { if err := db.checkpointAndInit(ctx, generation, CheckpointModeRestart); err != nil {
return 0, Pos{}, err return 0, Pos{}, err
} }
@@ -1655,13 +1782,19 @@ func (db *DB) CRC64() (uint64, Pos, error) {
} }
pos.Offset = 0 pos.Offset = 0
chksum, err := checksumFile(db.Path()) // Seek to the beginning of the db file descriptor and checksum whole file.
if err != nil { h := crc64.New(crc64.MakeTable(crc64.ISO))
if _, err := db.f.Seek(0, io.SeekStart); err != nil {
return 0, pos, err
} else if _, err := io.Copy(h, db.f); err != nil {
return 0, pos, err return 0, pos, err
} }
return chksum, pos, nil return h.Sum64(), pos, nil
} }
// DefaultRestoreParallelism is the default parallelism when downloading WAL files.
const DefaultRestoreParallelism = 8
// RestoreOptions represents options for DB.Restore(). // RestoreOptions represents options for DB.Restore().
type RestoreOptions struct { type RestoreOptions struct {
// Target path to restore into. // Target path to restore into.
@@ -1677,16 +1810,15 @@ type RestoreOptions struct {
Generation string Generation string
// Specific index to restore from. // Specific index to restore from.
// Set to math.MaxInt64 to ignore index. // Set to math.MaxInt32 to ignore index.
Index int Index int
// Point-in-time to restore database. // Point-in-time to restore database.
// If zero, database restore to most recent state available. // If zero, database restore to most recent state available.
Timestamp time.Time Timestamp time.Time
// If true, no actual restore is performed. // Specifies how many WAL files are downloaded in parallel during restore.
// Only equivalent log output for a regular restore. Parallelism int
DryRun bool
// Logging settings. // Logging settings.
Logger *log.Logger Logger *log.Logger
@@ -1696,7 +1828,8 @@ type RestoreOptions struct {
// NewRestoreOptions returns a new instance of RestoreOptions with defaults. // NewRestoreOptions returns a new instance of RestoreOptions with defaults.
func NewRestoreOptions() RestoreOptions { func NewRestoreOptions() RestoreOptions {
return RestoreOptions{ return RestoreOptions{
Index: math.MaxInt64, Index: math.MaxInt32,
Parallelism: DefaultRestoreParallelism,
} }
} }
@@ -1791,24 +1924,3 @@ func headerByteOrder(hdr []byte) (binary.ByteOrder, error) {
return nil, fmt.Errorf("invalid wal header magic: %x", magic) return nil, fmt.Errorf("invalid wal header magic: %x", magic)
} }
} }
func copyFile(dst, src string) error {
r, err := os.Open(src)
if err != nil {
return err
}
defer r.Close()
w, err := os.Create(dst)
if err != nil {
return err
}
defer w.Close()
if _, err := io.Copy(w, r); err != nil {
return err
} else if err := w.Sync(); err != nil {
return err
}
return nil
}

View File

@@ -1,10 +1,12 @@
package litestream_test package litestream_test
import ( import (
"context"
"database/sql" "database/sql"
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strings"
"testing" "testing"
"time" "time"
@@ -98,9 +100,12 @@ func TestDB_UpdatedAt(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
sleepTime := 100 * time.Millisecond
if os.Getenv("CI") != "" { if os.Getenv("CI") != "" {
time.Sleep(1 * time.Second) sleepTime = 1 * time.Second
} }
time.Sleep(sleepTime)
if _, err := sqldb.Exec(`CREATE TABLE t (id INT);`); err != nil { if _, err := sqldb.Exec(`CREATE TABLE t (id INT);`); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -118,7 +123,7 @@ func TestDB_CRC64(t *testing.T) {
t.Run("ErrNotExist", func(t *testing.T) { t.Run("ErrNotExist", func(t *testing.T) {
db := MustOpenDB(t) db := MustOpenDB(t)
defer MustCloseDB(t, db) defer MustCloseDB(t, db)
if _, _, err := db.CRC64(); !os.IsNotExist(err) { if _, _, err := db.CRC64(context.Background()); !os.IsNotExist(err) {
t.Fatalf("unexpected error: %#v", err) t.Fatalf("unexpected error: %#v", err)
} }
}) })
@@ -127,11 +132,11 @@ func TestDB_CRC64(t *testing.T) {
db, sqldb := MustOpenDBs(t) db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb) defer MustCloseDBs(t, db, sqldb)
if err := db.Sync(); err != nil { if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
chksum0, _, err := db.CRC64() chksum0, _, err := db.CRC64(context.Background())
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -139,7 +144,7 @@ func TestDB_CRC64(t *testing.T) {
// Issue change that is applied to the WAL. Checksum should not change. // Issue change that is applied to the WAL. Checksum should not change.
if _, err := sqldb.Exec(`CREATE TABLE t (id INT);`); err != nil { if _, err := sqldb.Exec(`CREATE TABLE t (id INT);`); err != nil {
t.Fatal(err) t.Fatal(err)
} else if chksum1, _, err := db.CRC64(); err != nil { } else if chksum1, _, err := db.CRC64(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} else if chksum0 == chksum1 { } else if chksum0 == chksum1 {
t.Fatal("expected different checksum event after WAL change") t.Fatal("expected different checksum event after WAL change")
@@ -150,7 +155,7 @@ func TestDB_CRC64(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if chksum2, _, err := db.CRC64(); err != nil { if chksum2, _, err := db.CRC64(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} else if chksum0 == chksum2 { } else if chksum0 == chksum2 {
t.Fatal("expected different checksums after checkpoint") t.Fatal("expected different checksums after checkpoint")
@@ -164,7 +169,7 @@ func TestDB_Sync(t *testing.T) {
t.Run("NoDB", func(t *testing.T) { t.Run("NoDB", func(t *testing.T) {
db := MustOpenDB(t) db := MustOpenDB(t)
defer MustCloseDB(t, db) defer MustCloseDB(t, db)
if err := db.Sync(); err != nil { if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
}) })
@@ -174,7 +179,7 @@ func TestDB_Sync(t *testing.T) {
db, sqldb := MustOpenDBs(t) db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb) defer MustCloseDBs(t, db, sqldb)
if err := db.Sync(); err != nil { if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -212,7 +217,7 @@ func TestDB_Sync(t *testing.T) {
} }
// Perform initial sync & grab initial position. // Perform initial sync & grab initial position.
if err := db.Sync(); err != nil { if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -227,7 +232,7 @@ func TestDB_Sync(t *testing.T) {
} }
// Sync to ensure position moves forward one page. // Sync to ensure position moves forward one page.
if err := db.Sync(); err != nil { if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} else if pos1, err := db.Pos(); err != nil { } else if pos1, err := db.Pos(); err != nil {
t.Fatal(err) t.Fatal(err)
@@ -246,7 +251,7 @@ func TestDB_Sync(t *testing.T) {
defer MustCloseDBs(t, db, sqldb) defer MustCloseDBs(t, db, sqldb)
// Issue initial sync and truncate WAL. // Issue initial sync and truncate WAL.
if err := db.Sync(); err != nil { if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -275,7 +280,7 @@ func TestDB_Sync(t *testing.T) {
defer MustCloseDB(t, db) defer MustCloseDB(t, db)
// Re-sync and ensure new generation has been created. // Re-sync and ensure new generation has been created.
if err := db.Sync(); err != nil { if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -298,7 +303,7 @@ func TestDB_Sync(t *testing.T) {
} }
// Issue initial sync and truncate WAL. // Issue initial sync and truncate WAL.
if err := db.Sync(); err != nil { if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -334,7 +339,7 @@ func TestDB_Sync(t *testing.T) {
defer MustCloseDB(t, db) defer MustCloseDB(t, db)
// Re-sync and ensure new generation has been created. // Re-sync and ensure new generation has been created.
if err := db.Sync(); err != nil { if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -354,7 +359,7 @@ func TestDB_Sync(t *testing.T) {
// Execute a query to force a write to the WAL and then sync. // Execute a query to force a write to the WAL and then sync.
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err) t.Fatal(err)
} else if err := db.Sync(); err != nil { } else if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -378,7 +383,7 @@ func TestDB_Sync(t *testing.T) {
// Reopen managed database & ensure sync will still work. // Reopen managed database & ensure sync will still work.
db = MustOpenDBAt(t, db.Path()) db = MustOpenDBAt(t, db.Path())
defer MustCloseDB(t, db) defer MustCloseDB(t, db)
if err := db.Sync(); err != nil { if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -398,7 +403,7 @@ func TestDB_Sync(t *testing.T) {
// Execute a query to force a write to the WAL and then sync. // Execute a query to force a write to the WAL and then sync.
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err) t.Fatal(err)
} else if err := db.Sync(); err != nil { } else if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -417,7 +422,7 @@ func TestDB_Sync(t *testing.T) {
// Reopen managed database & ensure sync will still work. // Reopen managed database & ensure sync will still work.
db = MustOpenDBAt(t, db.Path()) db = MustOpenDBAt(t, db.Path())
defer MustCloseDB(t, db) defer MustCloseDB(t, db)
if err := db.Sync(); err != nil { if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -437,7 +442,7 @@ func TestDB_Sync(t *testing.T) {
// Execute a query to force a write to the WAL and then sync. // Execute a query to force a write to the WAL and then sync.
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err) t.Fatal(err)
} else if err := db.Sync(); err != nil { } else if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -462,7 +467,7 @@ func TestDB_Sync(t *testing.T) {
// Reopen managed database & ensure sync will still work. // Reopen managed database & ensure sync will still work.
db = MustOpenDBAt(t, db.Path()) db = MustOpenDBAt(t, db.Path())
defer MustCloseDB(t, db) defer MustCloseDB(t, db)
if err := db.Sync(); err != nil { if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -489,7 +494,7 @@ func TestDB_Sync(t *testing.T) {
// Execute a query to force a write to the WAL and then sync. // Execute a query to force a write to the WAL and then sync.
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err) t.Fatal(err)
} else if err := db.Sync(); err != nil { } else if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -508,7 +513,7 @@ func TestDB_Sync(t *testing.T) {
// Reopen managed database & ensure sync will still work. // Reopen managed database & ensure sync will still work.
db = MustOpenDBAt(t, db.Path()) db = MustOpenDBAt(t, db.Path())
defer MustCloseDB(t, db) defer MustCloseDB(t, db)
if err := db.Sync(); err != nil { if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -532,7 +537,7 @@ func TestDB_Sync(t *testing.T) {
// Execute a query to force a write to the WAL and then sync. // Execute a query to force a write to the WAL and then sync.
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err) t.Fatal(err)
} else if err := db.Sync(); err != nil { } else if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -544,7 +549,7 @@ func TestDB_Sync(t *testing.T) {
} }
// Sync to shadow WAL. // Sync to shadow WAL.
if err := db.Sync(); err != nil { if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -564,7 +569,7 @@ func TestDB_Sync(t *testing.T) {
// Execute a query to force a write to the WAL and then sync. // Execute a query to force a write to the WAL and then sync.
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil { if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err) t.Fatal(err)
} else if err := db.Sync(); err != nil { } else if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -574,7 +579,7 @@ func TestDB_Sync(t *testing.T) {
// Write to WAL & sync. // Write to WAL & sync.
if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz');`); err != nil { if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz');`); err != nil {
t.Fatal(err) t.Fatal(err)
} else if err := db.Sync(); err != nil { } else if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -589,12 +594,14 @@ func TestDB_Sync(t *testing.T) {
// MustOpenDBs returns a new instance of a DB & associated SQL DB. // MustOpenDBs returns a new instance of a DB & associated SQL DB.
func MustOpenDBs(tb testing.TB) (*litestream.DB, *sql.DB) { func MustOpenDBs(tb testing.TB) (*litestream.DB, *sql.DB) {
tb.Helper()
db := MustOpenDB(tb) db := MustOpenDB(tb)
return db, MustOpenSQLDB(tb, db.Path()) return db, MustOpenSQLDB(tb, db.Path())
} }
// MustCloseDBs closes db & sqldb and removes the parent directory. // MustCloseDBs closes db & sqldb and removes the parent directory.
func MustCloseDBs(tb testing.TB, db *litestream.DB, sqldb *sql.DB) { func MustCloseDBs(tb testing.TB, db *litestream.DB, sqldb *sql.DB) {
tb.Helper()
MustCloseDB(tb, db) MustCloseDB(tb, db)
MustCloseSQLDB(tb, sqldb) MustCloseSQLDB(tb, sqldb)
} }
@@ -619,7 +626,7 @@ func MustOpenDBAt(tb testing.TB, path string) *litestream.DB {
// MustCloseDB closes db and removes its parent directory. // MustCloseDB closes db and removes its parent directory.
func MustCloseDB(tb testing.TB, db *litestream.DB) { func MustCloseDB(tb testing.TB, db *litestream.DB) {
tb.Helper() tb.Helper()
if err := db.Close(); err != nil { if err := db.Close(); err != nil && !strings.Contains(err.Error(), `database is closed`) {
tb.Fatal(err) tb.Fatal(err)
} else if err := os.RemoveAll(filepath.Dir(db.Path())); err != nil { } else if err := os.RemoveAll(filepath.Dir(db.Path())); err != nil {
tb.Fatal(err) tb.Fatal(err)

17
etc/build.ps1 Normal file
View File

@@ -0,0 +1,17 @@
[CmdletBinding()]
Param (
[Parameter(Mandatory = $true)]
[String] $Version
)
$ErrorActionPreference = "Stop"
# Update working directory.
Push-Location $PSScriptRoot
Trap {
Pop-Location
}
Invoke-Expression "candle.exe -nologo -arch x64 -ext WixUtilExtension -out litestream.wixobj -dVersion=`"$Version`" litestream.wxs"
Invoke-Expression "light.exe -nologo -spdb -ext WixUtilExtension -out `"litestream-${Version}.msi`" litestream.wixobj"
Pop-Location

89
etc/litestream.wxs Normal file
View File

@@ -0,0 +1,89 @@
<?xml version="1.0" encoding="utf-8"?>
<Wix
xmlns="http://schemas.microsoft.com/wix/2006/wi"
xmlns:util="http://schemas.microsoft.com/wix/UtilExtension"
>
<?if $(sys.BUILDARCH)=x64 ?>
<?define PlatformProgramFiles = "ProgramFiles64Folder" ?>
<?else ?>
<?define PlatformProgramFiles = "ProgramFilesFolder" ?>
<?endif ?>
<Product
Id="*"
UpgradeCode="5371367e-58b3-4e52-be0d-46945eb71ce6"
Name="Litestream"
Version="$(var.Version)"
Manufacturer="Litestream"
Language="1033"
Codepage="1252"
>
<Package
Id="*"
Manufacturer="Litestream"
InstallScope="perMachine"
InstallerVersion="500"
Description="Litestream $(var.Version) installer"
Compressed="yes"
/>
<Media Id="1" Cabinet="litestream.cab" EmbedCab="yes"/>
<MajorUpgrade
Schedule="afterInstallInitialize"
DowngradeErrorMessage="A later version of [ProductName] is already installed. Setup will now exit."
/>
<Directory Id="TARGETDIR" Name="SourceDir">
<Directory Id="$(var.PlatformProgramFiles)">
<Directory Id="APPLICATIONROOTDIRECTORY" Name="Litestream"/>
</Directory>
</Directory>
<ComponentGroup Id="Files">
<Component Directory="APPLICATIONROOTDIRECTORY">
<File
Id="litestream.exe"
Name="litestream.exe"
Source="litestream.exe"
KeyPath="yes"
/>
<ServiceInstall
Id="InstallService"
Name="Litestream"
DisplayName="Litestream"
Description="Replicates SQLite databases"
ErrorControl="normal"
Start="auto"
Type="ownProcess"
>
<util:ServiceConfig
FirstFailureActionType="restart"
SecondFailureActionType="restart"
ThirdFailureActionType="restart"
RestartServiceDelayInSeconds="60"
/>
<ServiceDependency Id="wmiApSrv" />
</ServiceInstall>
<ServiceControl
Id="ServiceStateControl"
Name="Litestream"
Remove="uninstall"
Start="install"
Stop="both"
/>
<util:EventSource
Log="Application"
Name="Litestream"
EventMessageFile="%SystemRoot%\System32\EventCreate.exe"
/>
</Component>
</ComponentGroup>
<Feature Id="DefaultFeature" Level="1">
<ComponentGroupRef Id="Files" />
</Feature>
</Product>
</Wix>

View File

@@ -6,5 +6,5 @@
# - path: /path/to/primary/db # Database to replicate from # - path: /path/to/primary/db # Database to replicate from
# replicas: # replicas:
# - path: /path/to/replica # File-based replication # - path: /path/to/replica # File-based replication
# - path: s3://my.bucket.com/db # S3-based replication # - url: s3://my.bucket.com/db # S3-based replication

View File

@@ -1,13 +1,13 @@
name: litestream name: litestream
arch: amd64 arch: "${GOARCH}"
platform: linux platform: "${GOOS}"
version: "${LITESTREAM_VERSION}" version: "${LITESTREAM_VERSION}"
section: "default" section: "default"
priority: "extra" priority: "extra"
maintainer: "Ben Johnson <benbjohnson@yahoo.com>" maintainer: "Ben Johnson <benbjohnson@yahoo.com>"
description: Litestream is a tool for real-time replication of SQLite databases. description: Litestream is a tool for real-time replication of SQLite databases.
homepage: "https://github.com/benbjohnson/litestream" homepage: "https://github.com/benbjohnson/litestream"
license: "GPLv3" license: "Apache 2"
contents: contents:
- src: ./litestream - src: ./litestream
dst: /usr/bin/litestream dst: /usr/bin/litestream

2
go.mod
View File

@@ -8,5 +8,7 @@ require (
github.com/mattn/go-sqlite3 v1.14.5 github.com/mattn/go-sqlite3 v1.14.5
github.com/pierrec/lz4/v4 v4.1.3 github.com/pierrec/lz4/v4 v4.1.3
github.com/prometheus/client_golang v1.9.0 github.com/prometheus/client_golang v1.9.0
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
) )

2
go.sum
View File

@@ -316,6 +316,8 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=

View File

@@ -36,6 +36,7 @@ const (
// Litestream errors. // Litestream errors.
var ( var (
ErrNoGeneration = errors.New("no generation available")
ErrNoSnapshots = errors.New("no snapshots available") ErrNoSnapshots = errors.New("no snapshots available")
ErrChecksumMismatch = errors.New("invalid replica, checksum mismatch") ErrChecksumMismatch = errors.New("invalid replica, checksum mismatch")
) )
@@ -151,8 +152,9 @@ func readWALHeader(filename string) ([]byte, error) {
return buf[:n], err return buf[:n], err
} }
// readFileAt reads a slice from a file. // readWALFileAt reads a slice from a file. Do not use this with database files
func readFileAt(filename string, offset, n int64) ([]byte, error) { // as it causes problems with non-OFD locks.
func readWALFileAt(filename string, offset, n int64) ([]byte, error) {
f, err := os.Open(filename) f, err := os.Open(filename)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@@ -4,6 +4,7 @@ import (
"context" "context"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"hash/crc64"
"io" "io"
"io/ioutil" "io/ioutil"
"log" "log"
@@ -31,10 +32,13 @@ type Replica interface {
DB() *DB DB() *DB
// Starts replicating in a background goroutine. // Starts replicating in a background goroutine.
Start(ctx context.Context) Start(ctx context.Context) error
// Stops all replication processing. Blocks until processing stopped. // Stops all replication processing. Blocks until processing stopped.
Stop() Stop(hard bool) error
// Performs a backup of outstanding WAL frames to the replica.
Sync(ctx context.Context) error
// Returns the last replication position. // Returns the last replication position.
LastPos() Pos LastPos() Pos
@@ -90,6 +94,9 @@ type FileReplica struct {
mu sync.RWMutex mu sync.RWMutex
pos Pos // last position pos Pos // last position
muf sync.Mutex
f *os.File // long-running file descriptor to avoid non-OFD lock issues
wg sync.WaitGroup wg sync.WaitGroup
cancel func() cancel func()
@@ -98,8 +105,11 @@ type FileReplica struct {
walIndexGauge prometheus.Gauge walIndexGauge prometheus.Gauge
walOffsetGauge prometheus.Gauge walOffsetGauge prometheus.Gauge
// Frequency to create new snapshots.
SnapshotInterval time.Duration
// Time to keep snapshots and related WAL files. // Time to keep snapshots and related WAL files.
// Database is snapshotted after interval and older WAL files are discarded. // Database is snapshotted after interval, if needed, and older WAL files are discarded.
Retention time.Duration Retention time.Duration
// Time between checks for retention. // Time between checks for retention.
@@ -389,29 +399,45 @@ func (r *FileReplica) WALs(ctx context.Context) ([]*WALInfo, error) {
} }
// Start starts replication for a given generation. // Start starts replication for a given generation.
func (r *FileReplica) Start(ctx context.Context) { func (r *FileReplica) Start(ctx context.Context) (err error) {
// Ignore if replica is being used sychronously. // Ignore if replica is being used sychronously.
if !r.MonitorEnabled { if !r.MonitorEnabled {
return return nil
} }
// Stop previous replication. // Stop previous replication.
r.Stop() r.Stop(false)
// Wrap context with cancelation. // Wrap context with cancelation.
ctx, r.cancel = context.WithCancel(ctx) ctx, r.cancel = context.WithCancel(ctx)
// Start goroutine to replicate data. // Start goroutine to replicate data.
r.wg.Add(3) r.wg.Add(4)
go func() { defer r.wg.Done(); r.monitor(ctx) }() go func() { defer r.wg.Done(); r.monitor(ctx) }()
go func() { defer r.wg.Done(); r.retainer(ctx) }() go func() { defer r.wg.Done(); r.retainer(ctx) }()
go func() { defer r.wg.Done(); r.snapshotter(ctx) }()
go func() { defer r.wg.Done(); r.validator(ctx) }() go func() { defer r.wg.Done(); r.validator(ctx) }()
return nil
} }
// Stop cancels any outstanding replication and blocks until finished. // Stop cancels any outstanding replication and blocks until finished.
func (r *FileReplica) Stop() { //
// Performing a hard stop will close the DB file descriptor which could release
// locks on per-process locks. Hard stops should only be performed when
// stopping the entire process.
func (r *FileReplica) Stop(hard bool) (err error) {
r.cancel() r.cancel()
r.wg.Wait() r.wg.Wait()
r.muf.Lock()
defer r.muf.Unlock()
if hard && r.f != nil {
if e := r.f.Close(); e != nil && err == nil {
err = e
}
}
return err
} }
// monitor runs in a separate goroutine and continuously replicates the DB. // monitor runs in a separate goroutine and continuously replicates the DB.
@@ -446,7 +472,18 @@ func (r *FileReplica) monitor(ctx context.Context) {
// retainer runs in a separate goroutine and handles retention. // retainer runs in a separate goroutine and handles retention.
func (r *FileReplica) retainer(ctx context.Context) { func (r *FileReplica) retainer(ctx context.Context) {
ticker := time.NewTicker(r.RetentionCheckInterval) // Disable retention enforcement if retention period is non-positive.
if r.Retention <= 0 {
return
}
// Ensure check interval is not longer than retention period.
checkInterval := r.RetentionCheckInterval
if checkInterval > r.Retention {
checkInterval = r.Retention
}
ticker := time.NewTicker(checkInterval)
defer ticker.Stop() defer ticker.Stop()
for { for {
@@ -462,6 +499,28 @@ func (r *FileReplica) retainer(ctx context.Context) {
} }
} }
// snapshotter runs in a separate goroutine and handles snapshotting.
func (r *FileReplica) snapshotter(ctx context.Context) {
if r.SnapshotInterval <= 0 {
return
}
ticker := time.NewTicker(r.SnapshotInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
if err := r.Snapshot(ctx); err != nil && err != ErrNoGeneration {
log.Printf("%s(%s): snapshotter error: %s", r.db.Path(), r.Name(), err)
continue
}
}
}
}
// validator runs in a separate goroutine and handles periodic validation. // validator runs in a separate goroutine and handles periodic validation.
func (r *FileReplica) validator(ctx context.Context) { func (r *FileReplica) validator(ctx context.Context) {
// Initialize counters since validation occurs infrequently. // Initialize counters since validation occurs infrequently.
@@ -531,8 +590,28 @@ func (r *FileReplica) CalcPos(ctx context.Context, generation string) (pos Pos,
return pos, nil return pos, nil
} }
// Snapshot copies the entire database to the replica path.
func (r *FileReplica) Snapshot(ctx context.Context) error {
// Find current position of database.
pos, err := r.db.Pos()
if err != nil {
return fmt.Errorf("cannot determine current db generation: %w", err)
} else if pos.IsZero() {
return ErrNoGeneration
}
return r.snapshot(ctx, pos.Generation, pos.Index)
}
// snapshot copies the entire database to the replica path. // snapshot copies the entire database to the replica path.
func (r *FileReplica) snapshot(ctx context.Context, generation string, index int) error { func (r *FileReplica) snapshot(ctx context.Context, generation string, index int) error {
r.muf.Lock()
defer r.muf.Unlock()
// Issue a passive checkpoint to flush any pages to disk before snapshotting.
if _, err := r.db.db.ExecContext(ctx, `PRAGMA wal_checkpoint(PASSIVE);`); err != nil {
return fmt.Errorf("pre-snapshot checkpoint: %w", err)
}
// Acquire a read lock on the database during snapshot to prevent checkpoints. // Acquire a read lock on the database during snapshot to prevent checkpoints.
tx, err := r.db.db.Begin() tx, err := r.db.db.Begin()
if err != nil { if err != nil {
@@ -553,11 +632,50 @@ func (r *FileReplica) snapshot(ctx context.Context, generation string, index int
if err := mkdirAll(filepath.Dir(snapshotPath), r.db.dirmode, r.db.diruid, r.db.dirgid); err != nil { if err := mkdirAll(filepath.Dir(snapshotPath), r.db.dirmode, r.db.diruid, r.db.dirgid); err != nil {
return err return err
} else if err := compressFile(r.db.Path(), snapshotPath, r.db.uid, r.db.gid); err != nil { }
// Open db file descriptor, if not already open.
if r.f == nil {
if r.f, err = os.Open(r.db.Path()); err != nil {
return err
}
}
if _, err := r.f.Seek(0, io.SeekStart); err != nil {
return err return err
} }
log.Printf("%s(%s): snapshot: creating %s/%08x t=%s", r.db.Path(), r.Name(), generation, index, time.Since(startTime)) fi, err := r.f.Stat()
if err != nil {
return err
}
w, err := createFile(snapshotPath+".tmp", fi.Mode(), r.db.uid, r.db.gid)
if err != nil {
return err
}
defer w.Close()
zr := lz4.NewWriter(w)
defer zr.Close()
// Copy & compress file contents to temporary file.
if _, err := io.Copy(zr, r.f); err != nil {
return err
} else if err := zr.Close(); err != nil {
return err
} else if err := w.Sync(); err != nil {
return err
} else if err := w.Close(); err != nil {
return err
}
// Move compressed file to final location.
if err := os.Rename(snapshotPath+".tmp", snapshotPath); err != nil {
return err
}
log.Printf("%s(%s): snapshot: creating %s/%08x t=%s", r.db.Path(), r.Name(), generation, index, time.Since(startTime).Truncate(time.Millisecond))
return nil return nil
} }
@@ -756,7 +874,7 @@ func (r *FileReplica) compress(ctx context.Context, generation string) error {
} }
dst := filename + ".lz4" dst := filename + ".lz4"
if err := compressFile(filename, dst, r.db.uid, r.db.gid); err != nil { if err := compressWALFile(filename, dst, r.db.uid, r.db.gid); err != nil {
return err return err
} else if err := os.Remove(filename); err != nil { } else if err := os.Remove(filename); err != nil {
return err return err
@@ -951,34 +1069,65 @@ func SnapshotIndexAt(ctx context.Context, r Replica, generation string, timestam
return 0, ErrNoSnapshots return 0, ErrNoSnapshots
} }
index := -1 snapshotIndex := -1
var max time.Time var max time.Time
for _, snapshot := range snapshots { for _, snapshot := range snapshots {
if !timestamp.IsZero() && snapshot.CreatedAt.After(timestamp) { if snapshot.Generation != generation {
continue // generation mismatch, skip
} else if !timestamp.IsZero() && snapshot.CreatedAt.After(timestamp) {
continue // after timestamp, skip continue // after timestamp, skip
} }
// Use snapshot if it newer. // Use snapshot if it newer.
if max.IsZero() || snapshot.CreatedAt.After(max) { if max.IsZero() || snapshot.CreatedAt.After(max) {
index, max = snapshot.Index, snapshot.CreatedAt snapshotIndex, max = snapshot.Index, snapshot.CreatedAt
} }
} }
if index == -1 { if snapshotIndex == -1 {
return 0, ErrNoSnapshots return 0, ErrNoSnapshots
} }
return index, nil return snapshotIndex, nil
} }
// WALIndexAt returns the highest index for a WAL file that occurs before maxIndex & timestamp. // SnapshotIndexbyIndex returns the highest index for a snapshot within a generation
// If timestamp is zero, returns the highest WAL index. // that occurs before a given index. If index is MaxInt32, returns the latest snapshot.
func SnapshotIndexByIndex(ctx context.Context, r Replica, generation string, index int) (int, error) {
snapshots, err := r.Snapshots(ctx)
if err != nil {
return 0, err
} else if len(snapshots) == 0 {
return 0, ErrNoSnapshots
}
snapshotIndex := -1
for _, snapshot := range snapshots {
if index < math.MaxInt32 && snapshot.Index > index {
continue // after index, skip
}
// Use snapshot if it newer.
if snapshotIndex == -1 || snapshotIndex >= snapshotIndex {
snapshotIndex = snapshot.Index
}
}
if snapshotIndex == -1 {
return 0, ErrNoSnapshots
}
return snapshotIndex, nil
}
// WALIndexAt returns the highest index for a WAL file that occurs before
// maxIndex & timestamp. If timestamp is zero, returns the highest WAL index.
// Returns -1 if no WAL found and MaxInt32 specified.
func WALIndexAt(ctx context.Context, r Replica, generation string, maxIndex int, timestamp time.Time) (int, error) { func WALIndexAt(ctx context.Context, r Replica, generation string, maxIndex int, timestamp time.Time) (int, error) {
wals, err := r.WALs(ctx) wals, err := r.WALs(ctx)
if err != nil { if err != nil {
return 0, err return 0, err
} }
var index int index := -1
for _, wal := range wals { for _, wal := range wals {
if wal.Generation != generation { if wal.Generation != generation {
continue continue
@@ -996,14 +1145,15 @@ func WALIndexAt(ctx context.Context, r Replica, generation string, maxIndex int,
} }
// If max index is specified but not found, return an error. // If max index is specified but not found, return an error.
if maxIndex != math.MaxInt64 && index != maxIndex { if maxIndex != math.MaxInt32 && index != maxIndex {
return index, fmt.Errorf("unable to locate index %d in generation %q, highest index was %d", maxIndex, generation, index) return index, fmt.Errorf("unable to locate index %d in generation %q, highest index was %d", maxIndex, generation, index)
} }
return index, nil return index, nil
} }
// compressFile compresses a file and replaces it with a new file with a .lz4 extension. // compressWALFile compresses a file and replaces it with a new file with a .lz4 extension.
func compressFile(src, dst string, uid, gid int) error { // Do not use this on database files because of issues with non-OFD locks.
func compressWALFile(src, dst string, uid, gid int) error {
r, err := os.Open(src) r, err := os.Open(src)
if err != nil { if err != nil {
return err return err
@@ -1053,8 +1203,7 @@ func ValidateReplica(ctx context.Context, r Replica) error {
// Compute checksum of primary database under lock. This prevents a // Compute checksum of primary database under lock. This prevents a
// sync from occurring and the database will not be written. // sync from occurring and the database will not be written.
primaryPath := filepath.Join(tmpdir, "primary") chksum0, pos, err := db.CRC64(ctx)
chksum0, pos, err := db.CRC64()
if err != nil { if err != nil {
return fmt.Errorf("cannot compute checksum: %w", err) return fmt.Errorf("cannot compute checksum: %w", err)
} }
@@ -1076,10 +1225,19 @@ func ValidateReplica(ctx context.Context, r Replica) error {
} }
// Open file handle for restored database. // Open file handle for restored database.
chksum1, err := checksumFile(restorePath) // NOTE: This open is ok as the restored database is not managed by litestream.
f, err := os.Open(restorePath)
if err != nil { if err != nil {
return err return err
} }
defer f.Close()
// Read entire file into checksum.
h := crc64.New(crc64.MakeTable(crc64.ISO))
if _, err := io.Copy(h, f); err != nil {
return err
}
chksum1 := h.Sum64()
status := "ok" status := "ok"
mismatch := chksum0 != chksum1 mismatch := chksum0 != chksum1
@@ -1091,15 +1249,6 @@ func ValidateReplica(ctx context.Context, r Replica) error {
// Validate checksums match. // Validate checksums match.
if mismatch { if mismatch {
internal.ReplicaValidationTotalCounterVec.WithLabelValues(db.Path(), r.Name(), "error").Inc() internal.ReplicaValidationTotalCounterVec.WithLabelValues(db.Path(), r.Name(), "error").Inc()
// Compress mismatched databases and report temporary path for investigation.
if err := compressFile(primaryPath, primaryPath+".lz4", db.uid, db.gid); err != nil {
return fmt.Errorf("cannot compress primary db: %w", err)
} else if err := compressFile(restorePath, restorePath+".lz4", db.uid, db.gid); err != nil {
return fmt.Errorf("cannot compress replica db: %w", err)
}
log.Printf("%s(%s): validator: mismatch files @ %s", db.Path(), r.Name(), tmpdir)
return ErrChecksumMismatch return ErrChecksumMismatch
} }

View File

@@ -15,7 +15,7 @@ func TestFileReplica_Sync(t *testing.T) {
r := NewTestFileReplica(t, db) r := NewTestFileReplica(t, db)
// Sync database & then sync replica. // Sync database & then sync replica.
if err := db.Sync(); err != nil { if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} else if err := r.Sync(context.Background()); err != nil { } else if err := r.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
@@ -47,7 +47,7 @@ func TestFileReplica_Sync(t *testing.T) {
// Sync periodically. // Sync periodically.
if i%100 == 0 || i == n-1 { if i%100 == 0 || i == n-1 {
if err := db.Sync(); err != nil { if err := db.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} else if err := r.Sync(context.Background()); err != nil { } else if err := r.Sync(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)

194
s3/s3.go
View File

@@ -3,12 +3,16 @@ package s3
import ( import (
"bytes" "bytes"
"context" "context"
"crypto/tls"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"log" "log"
"net"
"net/http"
"os" "os"
"path" "path"
"regexp"
"sync" "sync"
"time" "time"
@@ -50,6 +54,9 @@ type Replica struct {
snapshotMu sync.Mutex snapshotMu sync.Mutex
pos litestream.Pos // last position pos litestream.Pos // last position
muf sync.Mutex
f *os.File // long-lived read-only db file descriptor
wg sync.WaitGroup wg sync.WaitGroup
cancel func() cancel func()
@@ -72,10 +79,16 @@ type Replica struct {
Region string Region string
Bucket string Bucket string
Path string Path string
Endpoint string
ForcePathStyle bool
SkipVerify bool
// Time between syncs with the shadow WAL. // Time between syncs with the shadow WAL.
SyncInterval time.Duration SyncInterval time.Duration
// Frequency to create new snapshots.
SnapshotInterval time.Duration
// Time to keep snapshots and related WAL files. // Time to keep snapshots and related WAL files.
// Database is snapshotted after interval and older WAL files are discarded. // Database is snapshotted after interval and older WAL files are discarded.
Retention time.Duration Retention time.Duration
@@ -410,29 +423,47 @@ func (r *Replica) WALs(ctx context.Context) ([]*litestream.WALInfo, error) {
} }
// Start starts replication for a given generation. // Start starts replication for a given generation.
func (r *Replica) Start(ctx context.Context) { func (r *Replica) Start(ctx context.Context) (err error) {
// Ignore if replica is being used sychronously. // Ignore if replica is being used sychronously.
if !r.MonitorEnabled { if !r.MonitorEnabled {
return return nil
} }
// Stop previous replication. // Stop previous replication.
r.Stop() r.Stop(false)
// Wrap context with cancelation. // Wrap context with cancelation.
ctx, r.cancel = context.WithCancel(ctx) ctx, r.cancel = context.WithCancel(ctx)
// Start goroutines to manage replica data. // Start goroutines to manage replica data.
r.wg.Add(3) r.wg.Add(4)
go func() { defer r.wg.Done(); r.monitor(ctx) }() go func() { defer r.wg.Done(); r.monitor(ctx) }()
go func() { defer r.wg.Done(); r.retainer(ctx) }() go func() { defer r.wg.Done(); r.retainer(ctx) }()
go func() { defer r.wg.Done(); r.snapshotter(ctx) }()
go func() { defer r.wg.Done(); r.validator(ctx) }() go func() { defer r.wg.Done(); r.validator(ctx) }()
return nil
} }
// Stop cancels any outstanding replication and blocks until finished. // Stop cancels any outstanding replication and blocks until finished.
func (r *Replica) Stop() { //
// Performing a hard stop will close the DB file descriptor which could release
// locks on per-process locks. Hard stops should only be performed when
// stopping the entire process.
func (r *Replica) Stop(hard bool) (err error) {
r.cancel() r.cancel()
r.wg.Wait() r.wg.Wait()
r.muf.Lock()
defer r.muf.Unlock()
if hard && r.f != nil {
if e := r.f.Close(); e != nil && err == nil {
err = e
}
}
return err
} }
// monitor runs in a separate goroutine and continuously replicates the DB. // monitor runs in a separate goroutine and continuously replicates the DB.
@@ -475,7 +506,18 @@ func (r *Replica) monitor(ctx context.Context) {
// retainer runs in a separate goroutine and handles retention. // retainer runs in a separate goroutine and handles retention.
func (r *Replica) retainer(ctx context.Context) { func (r *Replica) retainer(ctx context.Context) {
ticker := time.NewTicker(r.RetentionCheckInterval) // Disable retention enforcement if retention period is non-positive.
if r.Retention <= 0 {
return
}
// Ensure check interval is not longer than retention period.
checkInterval := r.RetentionCheckInterval
if checkInterval > r.Retention {
checkInterval = r.Retention
}
ticker := time.NewTicker(checkInterval)
defer ticker.Stop() defer ticker.Stop()
for { for {
@@ -491,6 +533,28 @@ func (r *Replica) retainer(ctx context.Context) {
} }
} }
// snapshotter runs in a separate goroutine and handles snapshotting.
func (r *Replica) snapshotter(ctx context.Context) {
if r.SnapshotInterval <= 0 {
return
}
ticker := time.NewTicker(r.SnapshotInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
if err := r.Snapshot(ctx); err != nil && err != litestream.ErrNoGeneration {
log.Printf("%s(%s): snapshotter error: %s", r.db.Path(), r.Name(), err)
continue
}
}
}
}
// validator runs in a separate goroutine and handles periodic validation. // validator runs in a separate goroutine and handles periodic validation.
func (r *Replica) validator(ctx context.Context) { func (r *Replica) validator(ctx context.Context) {
// Initialize counters since validation occurs infrequently. // Initialize counters since validation occurs infrequently.
@@ -568,8 +632,28 @@ func (r *Replica) CalcPos(ctx context.Context, generation string) (pos litestrea
return pos, nil return pos, nil
} }
// Snapshot copies the entire database to the replica path.
func (r *Replica) Snapshot(ctx context.Context) error {
// Find current position of database.
pos, err := r.db.Pos()
if err != nil {
return fmt.Errorf("cannot determine current db generation: %w", err)
} else if pos.IsZero() {
return litestream.ErrNoGeneration
}
return r.snapshot(ctx, pos.Generation, pos.Index)
}
// snapshot copies the entire database to the replica path. // snapshot copies the entire database to the replica path.
func (r *Replica) snapshot(ctx context.Context, generation string, index int) error { func (r *Replica) snapshot(ctx context.Context, generation string, index int) error {
r.muf.Lock()
defer r.muf.Unlock()
// Issue a passive checkpoint to flush any pages to disk before snapshotting.
if _, err := r.db.SQLDB().ExecContext(ctx, `PRAGMA wal_checkpoint(PASSIVE);`); err != nil {
return fmt.Errorf("pre-snapshot checkpoint: %w", err)
}
// Acquire a read lock on the database during snapshot to prevent checkpoints. // Acquire a read lock on the database during snapshot to prevent checkpoints.
tx, err := r.db.SQLDB().Begin() tx, err := r.db.SQLDB().Begin()
if err != nil { if err != nil {
@@ -580,14 +664,21 @@ func (r *Replica) snapshot(ctx context.Context, generation string, index int) er
} }
defer func() { _ = tx.Rollback() }() defer func() { _ = tx.Rollback() }()
// Open database file handle. // Open long-lived file descriptor on database.
f, err := os.Open(r.db.Path()) if r.f == nil {
if err != nil { if r.f, err = os.Open(r.db.Path()); err != nil {
return err return err
} }
defer f.Close() }
fi, err := f.Stat() // Move the file descriptor to the beginning. We only use one long lived
// file descriptor because some operating systems will remove the database
// lock when closing a separate file descriptor on the DB.
if _, err := r.f.Seek(0, io.SeekStart); err != nil {
return err
}
fi, err := r.f.Stat()
if err != nil { if err != nil {
return err return err
} }
@@ -595,7 +686,7 @@ func (r *Replica) snapshot(ctx context.Context, generation string, index int) er
pr, pw := io.Pipe() pr, pw := io.Pipe()
zw := lz4.NewWriter(pw) zw := lz4.NewWriter(pw)
go func() { go func() {
if _, err := io.Copy(zw, f); err != nil { if _, err := io.Copy(zw, r.f); err != nil {
_ = pw.CloseWithError(err) _ = pw.CloseWithError(err)
return return
} }
@@ -616,8 +707,7 @@ func (r *Replica) snapshot(ctx context.Context, generation string, index int) er
r.putOperationTotalCounter.Inc() r.putOperationTotalCounter.Inc()
r.putOperationBytesCounter.Add(float64(fi.Size())) r.putOperationBytesCounter.Add(float64(fi.Size()))
log.Printf("%s(%s): snapshot: creating %s/%08x t=%s", r.db.Path(), r.Name(), generation, index, time.Since(startTime)) log.Printf("%s(%s): snapshot: creating %s/%08x t=%s", r.db.Path(), r.Name(), generation, index, time.Since(startTime).Truncate(time.Millisecond))
return nil return nil
} }
@@ -646,17 +736,25 @@ func (r *Replica) Init(ctx context.Context) (err error) {
return nil return nil
} }
// Look up region if not specified. // Look up region if not specified and no endpoint is used.
// Endpoints are typically used for non-S3 object stores and do not
// necessarily require a region.
region := r.Region region := r.Region
if region == "" { if region == "" {
if r.Endpoint == "" {
if region, err = r.findBucketRegion(ctx, r.Bucket); err != nil { if region, err = r.findBucketRegion(ctx, r.Bucket); err != nil {
return fmt.Errorf("cannot lookup bucket region: %w", err) return fmt.Errorf("cannot lookup bucket region: %w", err)
} }
} else {
region = "us-east-1" // default for non-S3 object stores
}
} }
// Create new AWS session. // Create new AWS session.
config := r.config() config := r.config()
if region != "" {
config.Region = aws.String(region) config.Region = aws.String(region)
}
sess, err := session.NewSession(config) sess, err := session.NewSession(config)
if err != nil { if err != nil {
return fmt.Errorf("cannot create aws session: %w", err) return fmt.Errorf("cannot create aws session: %w", err)
@@ -673,6 +771,18 @@ func (r *Replica) config() *aws.Config {
if r.AccessKeyID != "" || r.SecretAccessKey != "" { if r.AccessKeyID != "" || r.SecretAccessKey != "" {
config.Credentials = credentials.NewStaticCredentials(r.AccessKeyID, r.SecretAccessKey, "") config.Credentials = credentials.NewStaticCredentials(r.AccessKeyID, r.SecretAccessKey, "")
} }
if r.Endpoint != "" {
config.Endpoint = aws.String(r.Endpoint)
}
if r.ForcePathStyle {
config.S3ForcePathStyle = aws.Bool(r.ForcePathStyle)
}
if r.SkipVerify {
config.HTTPClient = &http.Client{Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}}
}
return config return config
} }
@@ -1027,6 +1137,60 @@ func (r *Replica) deleteGenerationBefore(ctx context.Context, generation string,
return nil return nil
} }
// ParseHost extracts data from a hostname depending on the service provider.
func ParseHost(s string) (bucket, region, endpoint string, forcePathStyle bool) {
// Extract port if one is specified.
host, port, err := net.SplitHostPort(s)
if err != nil {
host = s
}
// Default to path-based URLs, except for with AWS S3 itself.
forcePathStyle = true
// Extract fields from provider-specific host formats.
scheme := "https"
if a := localhostRegex.FindStringSubmatch(host); a != nil {
bucket, region = a[1], "us-east-1"
scheme, endpoint = "http", "localhost"
} else if a := gcsRegex.FindStringSubmatch(host); a != nil {
bucket, region = a[1], "us-east-1"
endpoint = "storage.googleapis.com"
} else if a := digitalOceanRegex.FindStringSubmatch(host); a != nil {
bucket, region = a[1], a[2]
endpoint = fmt.Sprintf("%s.digitaloceanspaces.com", region)
} else if a := linodeRegex.FindStringSubmatch(host); a != nil {
bucket, region = a[1], a[2]
endpoint = fmt.Sprintf("%s.linodeobjects.com", region)
} else if a := backblazeRegex.FindStringSubmatch(host); a != nil {
bucket, region = a[1], a[2]
endpoint = fmt.Sprintf("s3.%s.backblazeb2.com", region)
} else {
bucket = host
forcePathStyle = false
}
// Add port back to endpoint, if available.
if endpoint != "" && port != "" {
endpoint = net.JoinHostPort(endpoint, port)
}
// Prepend scheme to endpoint.
if endpoint != "" {
endpoint = scheme + "://" + endpoint
}
return bucket, region, endpoint, forcePathStyle
}
var (
localhostRegex = regexp.MustCompile(`^(?:(.+)\.)?localhost$`)
digitalOceanRegex = regexp.MustCompile(`^(?:(.+)\.)?([^.]+)\.digitaloceanspaces.com$`)
linodeRegex = regexp.MustCompile(`^(?:(.+)\.)?([^.]+)\.linodeobjects.com$`)
backblazeRegex = regexp.MustCompile(`^(?:(.+)\.)?s3.([^.]+)\.backblazeb2.com$`)
gcsRegex = regexp.MustCompile(`^(?:(.+)\.)?storage.googleapis.com$`)
)
// S3 metrics. // S3 metrics.
var ( var (
operationTotalCounterVec = promauto.NewCounterVec(prometheus.CounterOpts{ operationTotalCounterVec = promauto.NewCounterVec(prometheus.CounterOpts{

80
s3/s3_test.go Normal file
View File

@@ -0,0 +1,80 @@
package s3_test
import (
"testing"
"github.com/benbjohnson/litestream/s3"
)
func TestParseHost(t *testing.T) {
// Ensure non-specific hosts return as buckets.
t.Run("S3", func(t *testing.T) {
bucket, region, endpoint, forcePathStyle := s3.ParseHost(`test.litestream.io`)
if got, want := bucket, `test.litestream.io`; got != want {
t.Fatalf("bucket=%q, want %q", got, want)
} else if got, want := region, ``; got != want {
t.Fatalf("region=%q, want %q", got, want)
} else if got, want := endpoint, ``; got != want {
t.Fatalf("endpoint=%q, want %q", got, want)
} else if got, want := forcePathStyle, false; got != want {
t.Fatalf("forcePathStyle=%v, want %v", got, want)
}
})
// Ensure localhosts use an HTTP endpoint and extract the bucket name.
t.Run("Localhost", func(t *testing.T) {
t.Run("WithPort", func(t *testing.T) {
bucket, region, endpoint, forcePathStyle := s3.ParseHost(`test.localhost:9000`)
if got, want := bucket, `test`; got != want {
t.Fatalf("bucket=%q, want %q", got, want)
} else if got, want := region, `us-east-1`; got != want {
t.Fatalf("region=%q, want %q", got, want)
} else if got, want := endpoint, `http://localhost:9000`; got != want {
t.Fatalf("endpoint=%q, want %q", got, want)
} else if got, want := forcePathStyle, true; got != want {
t.Fatalf("forcePathStyle=%v, want %v", got, want)
}
})
t.Run("WithoutPort", func(t *testing.T) {
bucket, region, endpoint, forcePathStyle := s3.ParseHost(`test.localhost`)
if got, want := bucket, `test`; got != want {
t.Fatalf("bucket=%q, want %q", got, want)
} else if got, want := region, `us-east-1`; got != want {
t.Fatalf("region=%q, want %q", got, want)
} else if got, want := endpoint, `http://localhost`; got != want {
t.Fatalf("endpoint=%q, want %q", got, want)
} else if got, want := forcePathStyle, true; got != want {
t.Fatalf("forcePathStyle=%v, want %v", got, want)
}
})
})
// Ensure backblaze B2 URLs extract bucket, region, & endpoint from host.
t.Run("Backblaze", func(t *testing.T) {
bucket, region, endpoint, forcePathStyle := s3.ParseHost(`test-123.s3.us-west-000.backblazeb2.com`)
if got, want := bucket, `test-123`; got != want {
t.Fatalf("bucket=%q, want %q", got, want)
} else if got, want := region, `us-west-000`; got != want {
t.Fatalf("region=%q, want %q", got, want)
} else if got, want := endpoint, `https://s3.us-west-000.backblazeb2.com`; got != want {
t.Fatalf("endpoint=%q, want %q", got, want)
} else if got, want := forcePathStyle, true; got != want {
t.Fatalf("forcePathStyle=%v, want %v", got, want)
}
})
// Ensure GCS URLs extract bucket & endpoint from host.
t.Run("GCS", func(t *testing.T) {
bucket, region, endpoint, forcePathStyle := s3.ParseHost(`litestream.io.storage.googleapis.com`)
if got, want := bucket, `litestream.io`; got != want {
t.Fatalf("bucket=%q, want %q", got, want)
} else if got, want := region, `us-east-1`; got != want {
t.Fatalf("region=%q, want %q", got, want)
} else if got, want := endpoint, `https://storage.googleapis.com`; got != want {
t.Fatalf("endpoint=%q, want %q", got, want)
} else if got, want := forcePathStyle, true; got != want {
t.Fatalf("forcePathStyle=%v, want %v", got, want)
}
})
}