Compare commits

..

272 Commits

Author SHA1 Message Date
c2946ef0d0 moved metapath 2024-10-12 11:21:45 +01:00
Ben Johnson
977d4a5ee4 Fix replica-without-db logger (#512) 2023-10-24 09:51:17 -05:00
Ben Johnson
c81010e7ab Fix darwin make target 2023-10-23 15:01:27 -06:00
guangwu
c1ae968188 fix: typo (#509) 2023-10-19 18:29:04 -06:00
Toni Spets
9f0e50ddf7 Add missing slog calls to commands (#508) 2023-10-19 18:28:49 -06:00
Toni Spets
fe9ab5c517 Force truncation checkpoint if WAL becomes runaway (#473) 2023-10-19 18:27:51 -06:00
Toni Spets
d02ba97453 Sync replica snapshots to previous (#480) 2023-10-19 18:27:15 -06:00
Toni Spets
b1abd6bd99 Use structured logging with slog (#475) 2023-10-16 15:05:22 -06:00
Ben Johnson
fd892eef6d Re-add arm/v7 build to Docker (#502) 2023-08-15 07:37:32 -06:00
Markus Schanz
1bfcaa4a17 Recognize Scaleway S3 replica URLs (#499) 2023-08-13 18:07:19 -06:00
Alex Garcia
a369b05ee4 Build darwin-arm64 build during make dist-macos (#500) 2023-08-09 13:00:50 -06:00
Toni Spets
e0493f979a Copy WAL frames through temp file to shadow (#474) 2023-08-08 11:40:43 -06:00
Ben Johnson
016546a3d5 Static release builds only (#497) 2023-08-08 11:33:08 -06:00
Ben Johnson
10f97f90f2 Fix GitHub Action OS (#496) 2023-08-08 10:39:41 -06:00
Ben Johnson
3de4391349 Update dependencies (#495) 2023-08-08 09:31:59 -06:00
Toni Spets
2512d35d8d Prevent checkpoints during snapshots (#477) 2023-08-07 20:20:27 -06:00
Ben Toews
749bc0d95a Allow DB.MetaPath to be configured (#485) 2023-06-09 10:26:51 -06:00
Ben Toews
2045363cd1 Don't kill exec process immediately (#484) 2023-06-08 10:57:50 -06:00
Evan Phoenix
18760d2a7a Plumb a custom logger through the core rather than only in Restore (#481) 2023-05-19 20:34:50 -06:00
Ben Johnson
ad3d65382f Update GitHub Actions (#467) 2023-05-02 17:11:41 -06:00
Tyler Davis
4abb3d15f2 fix: update go versions in mod and docker
- Update Go module to v1.19 format
- Docker builder pinned to Go v1.20.1
- Alpine image pinned to 3.17.2 (rather than `latest`)
2023-05-02 16:07:46 -06:00
Erik Kristensen
3368b7cf44 fix: remove debug code 2023-05-02 16:03:31 -06:00
Erik Kristensen
ae670b0d27 fix: aws credential chain by using aws.Config 2023-05-02 16:03:31 -06:00
Lincoln Stoll
5afd0bf161 Handle errors when deleting objects from S3
I recently noticed that the cost for ListBucket calls was increasing for an
application that was using Litestream. After investigating it seemed that the
bucket had retained the entire history of data, while Litestream was
continually logging that it was deleting the same data:

```
2022-10-30T12:00:27Z (s3): wal segmented deleted before 0792d3393bf79ced/00000233: n=1428
<snip>
2022-10-30T13:00:24Z (s3): wal segmented deleted before 0792d3393bf79ced/00000233: n=1428
```

This is occuring because the DeleteObjects call is a batch item, that returns
the individual object deletion errors in the response[1]. The S3 replica client
discards the response, and only handles errors in the original API call. I had
a misconfigured IAM policy that meant all deletes were failing, but this never
actually bubbled up as a real error.

To fix this, I added a check for the response body to handle any errors the
operation might have encountered. Because this may include a large number of
errors (in this case 1428 of them), the output is summarized to avoid an overly
large error message. When items are not found, they will not return an error[2]
- they will still be marked as deleted, so this change should be in-line with
the original intentions of this code.

1: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html#API_DeleteObjects_Example_2
2: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html
2023-05-02 16:03:31 -06:00
Jose Diaz-Gonzalez
6b93b6012a Update readme to note that this tool is for disaster recovery, not streaming replication
Refs #411
2023-05-02 16:03:31 -06:00
Ben Johnson
cca838b671 Use sqlite3_file_control(SQLITE_FCNTL_PERSIST_WAL) to persist WAL
Previously, Litestream would avoid closing the SQLite3 connection
in order to ensure that the WAL file was not cleaned up by the
database if it was the last connection. This commit changes the
behavior by introducing a file control call to perform the same
action. This allows us to close the database file normally in all
cases.
2023-05-02 16:03:31 -06:00
Toni Spets
a34a92c0b9 Client side encryption support for remote storage (#468) 2023-05-01 10:06:01 -06:00
Ben Johnson
68e60cbfdf README (#466) 2023-03-31 17:55:14 -07:00
Ben Johnson
366cfc6baa Upgrade golang.org/x/sync v0.0.0-20210220032951-036812b2e83c => v0.0.0-20220722155255-886fb9371eb4 2022-07-25 13:16:26 -06:00
Ben Johnson
adf971f669 Upgrade github.com/prometheus/client_golang v1.9.0 => v1.12.2 2022-07-25 13:16:26 -06:00
Ben Johnson
fa3f8a21c8 Upgrade github.com/pierrec/lz4/v4@v4.1.15 2022-07-25 13:16:26 -06:00
Ben Johnson
fafe08ed90 Add Docker build for v0.3.x 2022-07-25 12:39:12 -06:00
Ben Johnson
360183dc96 Fix up v0.3.x CI tests 2022-03-10 11:25:56 -07:00
Ben Johnson
cb1b1a0afe Upgrade go-sqlite3 to v1.14.12 2022-03-10 11:25:56 -07:00
Ben Johnson
393317b6f8 Fix FindMinSnapshotByGeneration() loop ref bug 2021-12-05 09:42:49 -07:00
Ben Johnson
1e6878998c Reduce snapshot check frequency
Previously, a bug was introduced that added a `LIST` operation
on every replica sync which significantly increased the cost of
running Litestream against S3. This changes the behavior to only
issue the `LIST` operation when the generation has changed.
2021-10-12 09:47:29 -06:00
Ben Johnson
55c17b9d8e Move WAL checksum validation message to trace logging
Checksum mismatch can regularly occur now that write locks have
been removed during WAL sync. This does not pose any corruption
risk but does sound scary to end users. Moving this to trace
logging instead.
2021-06-06 09:12:29 -06:00
Ben Johnson
4d41652c12 Add acknowledgements 2021-06-06 09:04:43 -06:00
Ben Johnson
8b70e3d8a8 Add replica logging 2021-06-06 08:43:31 -06:00
Ben Johnson
8fb9c910f0 Add simple subprocess execution
This commit adds the ability to run a subcommand through Litestream.
Shutting down the subcommand will cause Litestream to gracefully
shutdown. Litestream will forward interrupt signals and wait for
the subprocess to shutdown.
2021-06-03 15:30:54 -06:00
Ben Johnson
c06997789b Add support for filebase replica URL 2021-06-02 15:09:08 -06:00
Ben Johnson
403959218d Remove debug code 2021-06-02 13:41:46 -06:00
Ben Johnson
b2233cf4de Support Azure account key via environment variable 2021-06-02 13:41:01 -06:00
Ben Johnson
1c0c69a5ab Unify replica client metrics 2021-06-01 18:19:56 -06:00
Ben Johnson
88909e3bd0 Add -if-db-not-exists restore flag
This commit adds a flag to `litestream restore` to skip the restore
if the database file already exists. It is useful when using the
official Litestream Docker image and you don't have the ability to
add a script to check for the existence of the file.
2021-05-31 15:05:35 -06:00
Ben Johnson
59b025d3da Update Go version for release builds 2021-05-25 17:02:35 -06:00
Ben Johnson
48cd11a361 Disable Azure default timeout 2021-05-25 16:58:31 -06:00
Ben Johnson
18e8805798 Reduce S3 sync interval
Previously, S3 would default to sync every 10s when using a config
file but only every 1s when using the replica URL on the command
line. Obviously, this is confusing.

A sync interval of 1s could incur a cost of $1.30/month, however,
in practice applications are not receiving a constant stream of
writes so the cost is typically only a few pennies. This lower
sync interval will provide a smaller window for data loss in the
event of a catastrophic failure at a neglible cost.
2021-05-24 14:39:45 -06:00
Ben Johnson
d1ac03bd8c Add SFTP replica type 2021-05-24 14:29:57 -06:00
Ben Johnson
31da780ed3 Change GitHub Actions to run on Ubuntu 18.04 2021-05-23 08:09:02 -06:00
Ben Johnson
84dc68c09c Add Azure Blob Storage replica type 2021-05-23 07:58:45 -06:00
Ben Johnson
ac32e8e089 Add Google Cloud Storage replica 2021-05-22 07:16:14 -06:00
Ben Johnson
6c865e37f1 Move path functions to litestream package 2021-05-21 10:39:22 -06:00
Ben Johnson
fb80bc10ae Refactor replica system 2021-05-21 07:44:36 -06:00
Ben Johnson
8685e9f2d1 Fix Windows expandEnv flag 2021-04-28 08:34:06 -06:00
Ben Johnson
9019aceef8 Add version to Dockerfile 2021-04-28 08:33:51 -06:00
Ben Johnson
2e3dda89ad Merge pull request #174 from benbjohnson/fix-snapshot-only-restore
Fix snapshot-only restore
2021-04-24 07:51:50 -06:00
Ben Johnson
331f6072bf Fix snapshot-only restore
This commit fixes a bug introduced by parallel restore (03831e2)
where snapshot-only restores were not being handled correctly and
Litestream would hang indefinitely. Now the restore will check
explicitly for snapshot-only restores and exit the restore process
early to avoid WAL handling completely.
2021-04-24 07:48:25 -06:00
Ben Johnson
6acfbcbc64 Add Docker shield to README 2021-04-23 16:04:44 -06:00
Ben Johnson
f21ebcda28 Merge pull request #171 from benbjohnson/remove-release-binary
Remove uncompressed release binary
2021-04-22 16:50:11 -06:00
Ben Johnson
e366b348cd Remove uncompressed release binary
This commit removes the uncompressed binary from the release page.
It was originally added to simplify Docker but it turns out that
having to chmod the binary will double its size.
2021-04-22 16:46:01 -06:00
Ben Johnson
064158b060 Merge pull request #170 from benbjohnson/remove-sync-lock-2
Remove SQLite write lock during WAL sync
2021-04-22 16:40:29 -06:00
Ben Johnson
1d1fd6e686 Remove SQLite write lock during WAL sync (again)
This commit reattempts a change to remove the write lock that was
previously tried in 998e831. This change will reduce the number of
locks on the database which should help reduce error messages that
applications see when they do not have busy_timeout set.

In addition to the lock removal, a passive checkpoint is issued
immediately before the read lock is obtained to prevent additional
checkpoints by the application itself. SQLite does not support
checkpoints from an active transaction so it cannot be done afterward.
2021-04-22 16:35:04 -06:00
Ben Johnson
73f8de23a6 Merge pull request #169 from benbjohnson/credentials
Allow use of LITESTREAM prefixed environment variables
2021-04-22 16:01:23 -06:00
Ben Johnson
7f4325e814 Allow use of LITESTREAM prefixed environment variables
This commit adds optional `LITESTREAM_ACCESS_KEY_ID` and
`LITESTREAM_SECRET_ACCESS_KEY` environment variables that can
be used instead of their `AWS` counterparts. The AWS-prefixed
variables have caused some confusion with users who were not
using AWS S3.
2021-04-22 15:58:17 -06:00
Ben Johnson
ad4b84410d Merge pull request #168 from benbjohnson/relicense
Relicense to Apache 2
2021-04-21 16:34:50 -06:00
Ben Johnson
0b7906aaac APLv2 2021-04-21 16:32:05 -06:00
Ben Johnson
6fc6d151b1 Merge pull request #167 from benbjohnson/parallel-restore
Download WAL files in parallel during restore
2021-04-21 16:27:09 -06:00
Ben Johnson
03831e2d06 Download WAL files in parallel during restore
This commit changes the restore to download multiple WAL files to
the local disk in parallel while another goroutine applies those
files in order. Downloading & applying the WAL files in serial
reduces the total throughput as WAL files are typically made up of
multiple small files.
2021-04-21 16:07:29 -06:00
Ben Johnson
257b625749 Merge pull request #166 from benbjohnson/fix-restore-to-index
Fix snapshot selection during restore-by-index
2021-04-21 12:15:19 -06:00
Ben Johnson
1c01af4e69 Fix snapshot selection during restore-by-index
This commit fixes a bug where restoring to a specific index will
incorrectly choose the latest snapshot instead of choosing the
latest snapshot that occurred before the given index.
2021-04-21 12:09:05 -06:00
Ben Johnson
bbd0f3b33c Merge pull request #165 from benbjohnson/restore-hex-index
Use hex encoding for restore's index flag
2021-04-19 12:07:17 -06:00
Ben Johnson
9439822763 Use hex-encoding for restore's index flag
This commit changes the `-index` flag on the `restore` command by
parsing it as a hex number instead of a decimal number. This is
done because the index is represented in hex form everywhere else
in the application.
2021-04-19 12:05:38 -06:00
Ben Johnson
63e51d2050 Merge pull request #164 from benbjohnson/improve-restore-logging
Improve restoration logging
2021-04-18 09:35:56 -06:00
Ben Johnson
84830bc4ad Improve restoration logging
This commit splits out logging for downloading a WAL file and applying
the WAL file to the database to get more accurate timing measurements.
2021-04-18 09:33:53 -06:00
Ben Johnson
ce0a5d2820 Merge pull request #163 from benbjohnson/remove-dry-run
Remove -dry-run flag in restore
2021-04-18 09:26:01 -06:00
Ben Johnson
3ad157d841 Remove -dry-run flag in restore
This flag is being removed because it's not actually that useful
in practice and it just makes the restoration code more complicated.
2021-04-18 09:21:50 -06:00
Ben Johnson
a20e35c5cc Merge pull request #162 from benbjohnson/log-micro
Use microsecond resolution for logging
2021-04-18 09:21:27 -06:00
Ben Johnson
029921299c Use microsecond resolution for logging
This commit changes `log` from second to microsecond resolution to
improve debugging when restoring WAL files.
2021-04-18 09:18:44 -06:00
Ben Johnson
f8d6969a4f Merge pull request #161 from benbjohnson/fix-flakey-wal-updated-at-test
Ensure minimum wait time for TestDB_UpdatedAt/WAL test
2021-04-17 09:41:59 -06:00
Ben Johnson
1e8bce029f Ensure minimum wait time for TestDB_UpdatedAt/WAL test
This commit fixes an issue where the test can be flakey if run on
a system with a higher time resolution. It now waits a minimum of
at least 100ms.
2021-04-17 09:40:07 -06:00
Ben Johnson
b29fb7e2ba Merge pull request #160 from benbjohnson/netgo
Add osusergo & netgo tags for static builds
2021-04-17 09:21:09 -06:00
Ben Johnson
dbb69786d3 Add osusergo & netgo tags for static builds 2021-04-17 09:18:03 -06:00
Ben Johnson
c70e9c0ba8 Merge pull request #159 from benbjohnson/strip
Reduce binary size
2021-04-17 09:14:05 -06:00
Ben Johnson
dd8fdd8c8c Reduce binary size 2021-04-17 09:12:38 -06:00
Ben Johnson
dfd1b1b92d Merge pull request #158 from benbjohnson/sqlite-flags
Omit load extensions for static builds
2021-04-17 09:08:36 -06:00
Ben Johnson
c4c30e394d Omit load extensions for static builds 2021-04-17 09:06:54 -06:00
Ben Johnson
28673aeb01 Merge pull request #157 from benbjohnson/expand-env
Configuration file environment variable expansion
2021-04-16 09:32:11 -06:00
Ben Johnson
04ae010378 Configuration file environment variable expansion
This commit adds simple variable expansion using either `$FOO`
or `${FOO}` when evaluating the config file. This can be disabled
by any command by using the `-no-expand-env` flag.
2021-04-16 09:28:01 -06:00
Ben Johnson
cefbcb0460 Merge pull request #156 from benbjohnson/docker
Docker
2021-04-16 08:37:30 -06:00
Ben Johnson
01407c3c25 Add Docker image 2021-04-16 08:35:22 -06:00
Ben Johnson
66fdb208c7 Merge pull request #154 from benbjohnson/fix-not-database-error-message
Remove reference to "wal" in first db init command
2021-04-15 11:59:28 -06:00
Ben Johnson
247896b8b7 Remove reference to "wal" in first db init command
This commit changes the error message of the first SQL command
executed during initialization. Typically, it wraps the error with
a message of "enable wal" since it is enabling the WAL mode but
that can be confusing if the DB connection or file is invalid.

Instead, the error is returned as-is and we can determine the
source of the error since it is the only unwrapped DB-related error.
2021-04-15 11:51:22 -06:00
Ben Johnson
1e6e741f55 Merge pull request #151 from benbjohnson/arm6
Add arm6 release builds
2021-04-12 09:47:54 -06:00
Ben Johnson
b31daabf52 Add arm6 release builds 2021-04-11 10:27:45 -06:00
Ben Johnson
1ccb4ef922 Merge pull request #149 from benbjohnson/arm-cgo
Enable CGO for cross compilation
2021-04-10 12:49:00 -06:00
Ben Johnson
54f0659c7b Enable CGO for cross compilation 2021-04-10 12:35:43 -06:00
Ben Johnson
5c0b8536f0 Merge pull request #148 from benbjohnson/arm
Support ARM release builds
2021-04-10 08:48:32 -06:00
Ben Johnson
462330ead6 Support ARM release builds 2021-04-10 08:39:10 -06:00
Ben Johnson
178cf836b1 Move pull request template 2021-04-07 16:24:49 -06:00
Ben Johnson
f45c0d8560 Create pull_request_template.md 2021-04-07 16:23:11 -06:00
Ben Johnson
bb146e2c09 Merge pull request #141 from benbjohnson/skip-verify
Add skip-verify flag for using self-signed certificates
2021-03-27 08:02:55 -06:00
Ben Johnson
f1d2df3e73 Add skip-verify flag for using self-signed certificates
This commit adds a `skip-verify` flag to the replica configuration
so that it can be used with self-signed certificates. This is useful
when running a local instance of MinIO with TLS for testing.
2021-03-27 08:00:09 -06:00
Ben Johnson
ef39987cc7 Merge pull request #133 from benbjohnson/sigterm
Catch sigterm & add shutdown logging
2021-03-21 09:38:38 -06:00
Ben Johnson
ee0c4c62d8 Catch sigterm & add shutdown logging
This commit changes the signal handler for `replicate` to catch
`syscall.SIGTERM` for non-Windows installations. It also adds some
logging to indicat when a shutdown has been initiated and when it
has finished.
2021-03-21 09:36:13 -06:00
Ben Johnson
e2de7e852c Merge pull request #132 from benbjohnson/sync-on-close
Sync on close
2021-03-21 08:45:26 -06:00
Ben Johnson
0529ce74b7 Sync on close
This commit changes the `replicate` command so that it performs a
final DB sync & replica sync before it exits to ensure it has
backed up all WAL frames at the time of exit.
2021-03-21 08:43:55 -06:00
Ben Johnson
421693130c Merge pull request #131 from benbjohnson/if-replica-exists
Add `-if-replica-exists` flag to restore
2021-03-21 08:09:57 -06:00
Ben Johnson
4a17c81b91 Add -if-replica-exists flag to restore
This commit adds a flag to change the exit code when restoring
from a replica where there is no existing backup. When set,
finding no backup will return a `0` exit code. The command will
still fail if other errors occur.
2021-03-21 08:08:11 -06:00
Ben Johnson
ba068ea3f8 Upload raw release binaries 2021-03-20 09:50:48 -06:00
Ben Johnson
085974fe1d Merge pull request #130 from benbjohnson/static
Add static release builds
2021-03-20 09:42:53 -06:00
Ben Johnson
18598a10e6 Add static release builds 2021-03-20 09:41:19 -06:00
Ben Johnson
16c50d1d2e Merge pull request #120 from benbjohnson/default-force-path-style
Default to force path style if endpoint set
2021-03-11 15:29:18 -07:00
Ben Johnson
929a66314c Default to force path style if endpoint set
This commit changes the replica configuration behavior to default
the `force-path-style` field to `true` when an `endpoint` is set.
This works because the only service that does not use the path
style is AWS S3 which does not use an endpoint.
2021-03-11 15:26:41 -07:00
Ben Johnson
2e7a6ae715 Merge pull request #118 from benbjohnson/default-region 2021-03-09 15:42:04 -07:00
Ben Johnson
896aef070c Default region if endpoint specified 2021-03-09 15:38:32 -07:00
Ben Johnson
3598d8b572 Merge pull request #111 from benbjohnson/linode
Add support for Linode Object Storage replica URLs
2021-03-07 08:55:29 -07:00
Ben Johnson
3183cf0e2e Add support for Linode Object Storage replica URLs
This commit adds the ability to specify Linode Object Storage
as replica URLs in the command line and configuration file:

	s3://MYBKT.us-east-1.linodeobjects.com/MYPATH
2021-03-07 08:47:24 -07:00
Ben Johnson
a59ee6ed63 Merge pull request #110 from benbjohnson/digitalocean
Add support for DigitalOcean Spaces replica URLs
2021-03-07 08:29:23 -07:00
Ben Johnson
e4c1a82eb2 Add support for DigitalOcean Spaces replica URLs
This commit adds the ability to specify DigitalOcean Spaces as
replica URLs in the command line and configuration file:

	s3://mybkt.nyc3.digitaloceanspaces.com/mypath
2021-03-07 08:25:26 -07:00
Ben Johnson
aa54e4698d Merge pull request #109 from benbjohnson/wal-mismatch-validation-info
Add WAL validation debug information
2021-03-07 07:55:02 -07:00
Ben Johnson
43e40ce8d3 Merge pull request #108 from benbjohnson/revert-lock-removal
Revert sync lock removal
2021-03-07 07:52:49 -07:00
Ben Johnson
0bd1b13b94 Add wal validation debug information on error
This commit adds the WAL header and shadow path to "wal header mismatch"
errors to help debug issues. The mismatch seems to happen more often
than I would expect on restart. This error doesn't cause any corruption;
it simply causes a generation to restart which requires a snapshot.
2021-03-07 07:48:43 -07:00
Ben Johnson
1c16aae550 Revert sync lock removal
This commit reverts the removal of the SQLite write lock during
WAL sync (998e831c5c). The change
caused validation mismatch errors during the long-running test
although the restored database did not appear to be corrupted so
perhaps it's simply a locking issue during validation.
2021-03-07 07:30:25 -07:00
Ben Johnson
49f47ea87f Merge pull request #105 from benbjohnson/db-config-fields
Expose additional DB configuration settings
2021-03-06 08:37:02 -07:00
Ben Johnson
8947adc312 Expose additional DB configuration settings
This commit exposes the monitor interval, checkpoint interval,
minimum checkpoint page count, and maximum checkpoint page count
via the YAML configuration file.
2021-03-06 08:33:19 -07:00
Ben Johnson
9341863bdb Merge pull request #104 from benbjohnson/remove-sync-lock
Remove SQLite write lock during WAL sync
2021-03-06 08:08:23 -07:00
Ben Johnson
998e831c5c Remove SQLite write lock during WAL sync
Originally, Litestream relied on a SQLite write lock to ensure
transactions were atomically replicated. However, this was changed
so that Litestream itself now validates the transaction boundaries.
As such, the write lock on the database is no longer needed. The
read lock is sufficient to prevent WAL rollover and the WAL is
append only so it is safe to read up to a known position calculated
via fstat().

WAL validation change was made in 031a526b9a

The locking code, however, was moved in this commit to the
post-checkpoint copy to ensure the end-of-file is not overwritten
by an aggressive writers.
2021-03-06 07:51:35 -07:00
Ben Johnson
b2ca113fb5 Merge pull request #103 from benbjohnson/fix-addr-log
Fix logged hostport for metrics endpoint
2021-03-06 07:30:36 -07:00
Ben Johnson
b211e82ed2 Fix logged hostport for metrics endpoint
This commit fixes a bug where the bind address is not reported
correctly in the log if a hostname is specified. Previously it
would always report the host as "localhost" even if a host was
specified (such as "0.0.0.0:9090").

This commit also adds validation to require the port to be
specified and only specifying a hostname will return an error.
2021-03-06 07:23:09 -07:00
Ben Johnson
e2779169a0 README 2021-03-02 08:11:51 -07:00
Ben Johnson
ec2f9c84d5 Merge pull request #96 from benbjohnson/acknowledgements
Acknowledgments
2021-02-28 09:19:44 -07:00
Ben Johnson
78eb8dcc53 Acknowledgments 2021-02-28 09:16:35 -07:00
Ben Johnson
cafa0f5942 Merge pull request #94 from benbjohnson/prevent-config-and-replica-url
Prevent user from specifying replica URL & config flag
2021-02-28 08:31:29 -07:00
Ben Johnson
325482a97c Prevent user from specifying replica URL & config flag
Previously, if a replica URL was specified then the `-config` flag
was silently ignored. This commit changes this behavior so that
specifying both the URL & config flag will now return an error.
2021-02-28 08:09:24 -07:00
Ben Johnson
9cee1285b9 Merge pull request #93 from benbjohnson/non-ofd-locks
Fix release of non-OFD locks
2021-02-28 07:28:00 -07:00
Ben Johnson
a14a74d678 Fix release of non-OFD locks
This commit removes short-lived `os.Open()` calls on the database
file because this can cause locks to be released when `os.File.Close()`
is later called if the operating system does not support OFD
(Open File Descriptor) locks.
2021-02-28 06:44:02 -07:00
Ben Johnson
f652186adf Merge pull request #84 from benbjohnson/snapshot-interval 2021-02-25 15:41:58 -07:00
Ben Johnson
afb8731ead Add snapshot interval
This commit adds the ability to periodically perform snapshots on
an interval that is separate from retention. For example, this lets
you retain backups for 24 hours but you can snapshot your database
every six hours to improve recovery time.
2021-02-25 15:34:13 -07:00
Ben Johnson
ce2d54cc20 Merge pull request #82 from benbjohnson/fix-db-init-failure
Fix error handling when DB.init() fails
2021-02-24 15:47:58 -07:00
Ben Johnson
d802e15b4f Fix error handling when DB.init() fails
The `DB.init()` can fail temporarily for a variety of reasons such
as the database being locked. Previously, the DB would save the
`*sql.DB` connection even if a step failed and this prevented the
database from attempting initialization again. This change makes it
so that the connection is only saved if initialization is successful.
On failure, the initialization process will be retried on next sync.
2021-02-24 15:43:28 -07:00
Ben Johnson
d6ece0b826 Merge pull request #73 from benbjohnson/fix-example-yml
Fix example litestream.yml replica configuration
2021-02-22 07:54:31 -07:00
Ben Johnson
cb007762be Fix example litestream.yml replica configuration 2021-02-22 07:52:56 -07:00
Ben Johnson
6a90714bbe Merge pull request #70 from benbjohnson/fix-global-settings
Fix global settings propagation
2021-02-22 06:40:16 -07:00
Ben Johnson
622ba82ebb Fix global settings propagation
This commit fixes an issue caused by a refactor where setting global
or local AWS credentials in a config file fails.
2021-02-22 06:37:40 -07:00
Ben Johnson
6ca010e9db Merge pull request #66 from benbjohnson/s3-compatible 2021-02-21 10:00:43 -07:00
Ben Johnson
ad9ce43127 Add support for S3-compatible object storage.
This commits adds support for non-AWS S3-compatible storage such as
MinIO, Backblaze B2, & Google Cloud Storage (GCS). Other backends
should also work but some code has been added to make URL-based
configurations work more easily.
2021-02-21 09:40:48 -07:00
Ben Johnson
167d333fcd Merge pull request #65 from benbjohnson/windows
Add Windows Service & MSI builds
2021-02-19 16:25:10 -07:00
Ben Johnson
c5390dec1d Add Windows Service & MSI builds 2021-02-19 16:21:04 -07:00
Ben Johnson
e2cbd5fb63 README 2021-02-12 08:14:55 -07:00
Ben Johnson
8d083f7a2d README 2021-02-09 07:07:23 -07:00
Ben Johnson
37442babfb Revert validation mismatch temp file persistence
This commit reverts 4e469f8 which was used for debugging the validation
stall corruption issue. It can cause the disk to fill with temporary
files though so it is being reverted.
2021-02-09 06:44:42 -07:00
Ben Johnson
962a2a894b Fix tabwriter 2021-02-08 15:55:15 -07:00
Ben Johnson
0c61c9f7fe Merge pull request #39 from benbjohnson/replicate-s3-sync-interval
Reduce s3 sync interval when using replica URL
2021-02-08 14:14:20 -07:00
Ben Johnson
267b140fab Reduce s3 sync interval when using replica URL
This commit changes the default sync interval from 10s to 1s
when replicating using the inline replica URL. This approach is
used when users are first testing the software so a faster
replication interval makes it easier to see results.
2021-02-08 14:09:01 -07:00
Ben Johnson
1b194535e6 Merge pull request #38 from benbjohnson/trace-flag
Add trace file to replicate command
2021-02-06 07:34:02 -07:00
Ben Johnson
58a6c765fe Add trace file to replicate command
This commit removes the verbose flag (`-v`) and replaces it with
the trace flag (`-trace PATH`). This moves tracing to a separate
file instead of writing to STDOUT.
2021-02-06 07:31:19 -07:00
Ben Johnson
2604052a9f Merge pull request #37 from benbjohnson/fix-shadow-write
Fix shadow wal corruption on stalled validation
2021-02-06 07:31:05 -07:00
Ben Johnson
7f81890bae Fix shadow wal corruption on stalled validation
This commit fixes a timing bug that occurs in a specific scenario
where the shadow wal sync stalls because of an s3 validation and
the catch up write to the shadow wal is large enough to allow a
window between WAL reads and the final copy.

The file copy has been replaced by direct writes of the frame
buffer to the shadow to ensure that every validated byte is exactly
what is being written to the shadow wal. The one downside to this
change is that the frame buffer will grow with the transaction
size so it will use additional heap. This can be replaced by a
spill-to-disk implementation but this should work well in the
short term.
2021-02-06 07:28:15 -07:00
Ben Johnson
2ff073c735 Merge pull request #36 from benbjohnson/max-index
Enforce max WAL index
2021-02-02 15:16:42 -07:00
Ben Johnson
6fd11ccab5 Enforce max WAL index.
This commit sets a hard upper limit for the WAL index to (1<<31)-1.
The index is hex-encoded in file names as a 4-byte unsigned integer
so limit ensures all index values are below any upper limit and are
unaffected by any signed int limit.

A WAL file is typically at least 4MB so you would need to write
8 petabytes to reach this upper limit.
2021-02-02 15:11:50 -07:00
Ben Johnson
6c49fba592 Check checkpoint result during restore 2021-02-02 15:04:20 -07:00
Ben Johnson
922fa0798e Merge pull request #34 from benbjohnson/windows 2021-01-31 10:09:51 -07:00
Ben Johnson
976df182c0 Fix Windows build 2021-01-31 10:08:28 -07:00
Ben Johnson
0e28a650e6 Merge pull request #33 from benbjohnson/log-wal-checksum-mismatch
Log WAL frame checksum mismatch
2021-01-31 08:57:51 -07:00
Ben Johnson
f17768e830 Log WAL frame checksum mismatch
Currently, the WAL copy function can encounter a checksum mismatch in a
WAL frame and it will return an error. This can occur for partial writes
and is recovered from moments later. This commit changes the error to a
log write instead.
2021-01-31 08:52:12 -07:00
Ben Johnson
2c142d3a0c Merge pull request #32 from benbjohnson/persist-mismatch-validation-data
Persist primary/replica copies after validation mismatch
2021-01-31 08:50:15 -07:00
Ben Johnson
4e469f8b02 Persist primary/replica copies after validation mismatch
This commit changes `ValidateReplica()` to persist copies of the
primary & replica databases for inspection if a validation mismatch
occurs.
2021-01-31 08:47:06 -07:00
Ben Johnson
3f268b70f8 Merge pull request #31 from benbjohnson/adjust-logging
Reduce logging output
2021-01-31 08:14:57 -07:00
Ben Johnson
ad7bf7f974 Reduce logging output
Previously, there were excessive log messages for checkpoints and
retention. These have been removed or combined into a single log
message where appropriate.
2021-01-31 08:12:18 -07:00
Ben Johnson
778451f09f CONTRIBUTING 2021-01-28 13:31:28 -07:00
Ben Johnson
8e9a15933b README 2021-01-27 08:01:48 -07:00
Ben Johnson
da1d7c3183 README 2021-01-27 07:59:12 -07:00
Ben Johnson
a178ef4714 Merge pull request #27 from benbjohnson/generations-replica-url
Allow replica URLs for generations command
2021-01-27 07:50:29 -07:00
Ben Johnson
7ca2e193b9 Allow replica URLs for generations command 2021-01-27 07:48:56 -07:00
Ben Johnson
39a6fabb9f Fix restore logging. 2021-01-26 17:01:00 -07:00
Ben Johnson
0249b4e4f5 Merge pull request #25 from benbjohnson/replica-url 2021-01-26 16:40:17 -07:00
Ben Johnson
67eeb49101 Allow replica URL to be used for commands
This commit refactors the commands to allow a replica URL when
restoring a database. If the first CLI arg is a URL with a scheme,
the it is treated as a replica URL.
2021-01-26 16:33:16 -07:00
Ben Johnson
f7213ed35c Allow replication without config file.
This commit changes `litestream replicate` to accept a database
path and a replica URL instead of using the config file. This allows
people to quickly try out the tool instead of learning the config
file syntax.
2021-01-25 10:33:50 -07:00
Ben Johnson
a532a0198e README 2021-01-24 10:09:54 -07:00
Ben Johnson
16f79e5814 Merge pull request #24 from benbjohnson/document-retention-period
Document retention period configuration
2021-01-24 09:32:31 -07:00
Ben Johnson
39aefc2c02 Document retention period configuration 2021-01-24 09:28:57 -07:00
Ben Johnson
0b08669bca Merge pull request #23 from benbjohnson/disable-metrics-by-default
Disable prometheus metrics by default
2021-01-24 09:18:37 -07:00
Ben Johnson
8f5761ee13 Disable prometheus metrics by default
The HTTP server should only be enabled if a user explicitly sets a
port for it.
2021-01-24 09:16:23 -07:00
Ben Johnson
d2eb4fa5ba Remove PR action 2021-01-24 08:54:29 -07:00
Ben Johnson
ca489c5e73 Merge pull request #22 from benbjohnson/notorize
Add signed homebrew install
2021-01-24 08:50:01 -07:00
Ben Johnson
f0ae48af4c Add signed homebrew install 2021-01-24 08:47:16 -07:00
Ben Johnson
9eae39e2fa README 2021-01-21 15:01:30 -07:00
Ben Johnson
42ab293ffb README 2021-01-21 14:53:21 -07:00
Ben Johnson
c8b72bf16b Fix release action 2021-01-21 14:39:23 -07:00
Ben Johnson
9c4de6c520 Debian release action 2021-01-21 14:34:42 -07:00
Ben Johnson
94411923a7 Fix unit test 2021-01-21 13:52:35 -07:00
Ben Johnson
e92db9ef4b Enforce stricter validation on restart.
Previously, the sync would validate the last page written to ensure
that replication picked up from the last position. However, a large
WAL file followed by a series of shorter checkpointed WAL files means
that the last page could be the same even if multiple checkpoints
have occurred.

To fix this, the WAL header must match the shadow WAL header when
starting litestream since there are no guarantees about checkpoints.
2021-01-21 13:44:05 -07:00
Ben Johnson
031a526b9a Only copy committed WAL pages 2021-01-21 12:44:11 -07:00
Ben Johnson
2244be885d README 2021-01-20 17:05:47 -07:00
Ben Johnson
95bcaa5927 Fix file replica compression 2021-01-19 09:25:38 -07:00
Ben Johnson
1935ebd6f0 Fix S3 GET bytes metric 2021-01-19 06:46:13 -07:00
Ben Johnson
7fb98df240 cleanup 2021-01-18 15:58:49 -07:00
Ben Johnson
f31c22af62 Remove s3 bucket lookup log 2021-01-18 15:27:16 -07:00
Ben Johnson
139d836d7a Fix file/dir mode 2021-01-18 15:23:28 -07:00
Ben Johnson
14dad1fd5a Switch from gzip to lz4 2021-01-18 14:45:12 -07:00
Ben Johnson
35d755e7f2 Remove debugging code 2021-01-18 10:33:30 -07:00
Ben Johnson
358dcd4650 Copy shadow WAL immediately after init 2021-01-18 10:01:16 -07:00
Ben Johnson
2ce4052300 Remove write lock during db checksum 2021-01-18 07:05:27 -07:00
Ben Johnson
44af75fa98 Fix missing WAL reader error 2021-01-18 07:05:17 -07:00
Ben Johnson
3c4fd152c9 Add more checksum logging 2021-01-18 06:38:03 -07:00
Ben Johnson
d259d9b9e3 Fix checksum logging 2021-01-17 10:19:39 -07:00
Ben Johnson
90a1d959d4 Remove size from s3 filenames 2021-01-17 10:02:06 -07:00
Ben Johnson
04d75507e3 Fix checksum hex padding 2021-01-17 09:52:09 -07:00
Ben Johnson
4b65e6a88f Log validation position 2021-01-17 07:38:13 -07:00
Ben Johnson
07a65cbac7 Fix crc64 unit test 2021-01-16 10:04:03 -07:00
Ben Johnson
6ac6a8536d Obtain write lock during validation. 2021-01-16 09:27:43 -07:00
Ben Johnson
71ab15e50a Fix S3 GET stats 2021-01-16 09:22:33 -07:00
Ben Johnson
b4e5079760 Add .deb packaging 2021-01-16 09:22:02 -07:00
Ben Johnson
78563f821d Do not require databases when starting replication 2021-01-16 09:15:16 -07:00
Ben Johnson
e65536f81d Stop waiting for replica if generation changes 2021-01-16 07:47:02 -07:00
Ben Johnson
25fec29e1a Clear last position on replica sync error 2021-01-16 07:45:08 -07:00
Ben Johnson
cbc2dce6dc Add busy timeout 2021-01-16 07:33:32 -07:00
Ben Johnson
1b8cfc8a41 Add validation interval 2021-01-15 16:37:04 -07:00
Ben Johnson
290e06e60d Reduce s3 LIST operations 2021-01-15 13:31:04 -07:00
Ben Johnson
b94ee366e5 Fix snapshot only restore 2021-01-15 13:12:15 -07:00
Ben Johnson
743aeb83e1 Revert gzip compression level, fix s3 wal upload 2021-01-15 13:04:21 -07:00
Ben Johnson
a7ec05ad7a Allow global AWS settings in config. 2021-01-15 12:27:41 -07:00
Ben Johnson
28dd7b564e Lookup s3 bucket region if not specified 2021-01-15 12:18:07 -07:00
Ben Johnson
43dda4315f Allow URLs for replica config path 2021-01-15 12:04:23 -07:00
Ben Johnson
0655bf420a Fix unit tests 2021-01-14 16:13:19 -07:00
Ben Johnson
8c113cf260 Add file & s3 replica metrics 2021-01-14 16:10:02 -07:00
Ben Johnson
daa74f87b4 Add file replica metrics 2021-01-14 15:47:58 -07:00
Ben Johnson
e1c9e09161 Update wal segment naming 2021-01-14 15:26:29 -07:00
Ben Johnson
1e4e9633cc Add s3 sync interval 2021-01-14 15:04:26 -07:00
Ben Johnson
294846cce2 Add context to s3 2021-01-13 16:38:00 -07:00
Ben Johnson
9eb7bd41c2 S3 reader & retention enforcement 2021-01-13 16:21:42 -07:00
Ben Johnson
1ac4adb272 Basic s3 replication working 2021-01-13 14:23:41 -07:00
Ben Johnson
a42f83f3cb Add LITESTREAM_CONFIG env var 2021-01-13 13:17:38 -07:00
Ben Johnson
57a02a8628 S3 replica 2021-01-13 10:14:54 -07:00
Ben Johnson
faa5765745 Add retention policy, remove WAL subdir 2021-01-12 15:22:37 -07:00
Ben Johnson
1fa1313b0b Add trace logging. 2021-01-11 11:04:29 -07:00
Ben Johnson
bcdb553267 Use database owner/group 2021-01-11 09:39:08 -07:00
Ben Johnson
9828b4c1dd Rename 'databases' to 'dbs' in config 2021-01-10 10:07:07 -07:00
Ben Johnson
dde9d1042d Update generation lag calc 2021-01-10 09:54:05 -07:00
Ben Johnson
8f30ff7d93 Fix negative duration truncation. 2021-01-10 09:52:04 -07:00
Ben Johnson
aa136a17ee Fix duration truncation. 2021-01-10 09:46:39 -07:00
Ben Johnson
60cb2c97ca Set default max checkpoint. 2021-01-10 09:46:27 -07:00
Ben Johnson
0abe09526d Fix local build 2021-01-09 08:59:08 -07:00
Ben Johnson
b0a3440356 make dist 2021-01-08 16:05:07 -07:00
Ben Johnson
a8d63b54aa README 2021-01-08 10:07:26 -07:00
Ben Johnson
b22f3f100d Add FileReplica.Sync() unit tests. 2021-01-05 15:48:50 -07:00
Ben Johnson
3075b2e92b Fix WAL mod time test 2021-01-05 15:15:28 -07:00
Ben Johnson
7c3272c96f Revert "Test 1.16beta1"
This reverts commit 4294fcf4b4.
2021-01-05 15:11:29 -07:00
Ben Johnson
4294fcf4b4 Test 1.16beta1 2021-01-05 15:10:09 -07:00
Ben Johnson
ae0f51eaa9 Fix setup-go 2021-01-05 14:34:14 -07:00
Ben Johnson
8871d75a8e Fix max checkpoint size check 2021-01-05 14:17:13 -07:00
Ben Johnson
c22eea13ad Add checkpoint tests 2021-01-05 14:07:17 -07:00
Ben Johnson
f4d055916a Add DB sync tests 2021-01-05 13:59:16 -07:00
Ben Johnson
979cabcdb9 Add some DB.Sync() tests 2021-01-01 10:02:03 -07:00
Ben Johnson
5134bc3328 Add test coverage for DB.CRC64 2021-01-01 09:26:23 -07:00
Ben Johnson
78d9de6512 Add DB path tests 2021-01-01 09:00:23 -07:00
Ben Johnson
065f641526 Change validation to use CRC-64 2021-01-01 08:24:11 -07:00
Ben Johnson
f4d0d87fa7 Add DB.UpdatedAt() tests 2021-01-01 08:20:40 -07:00
Ben Johnson
9d0e79c2cf Add db metrics 2020-12-31 16:30:56 -07:00
Ben Johnson
da5087c14c Fix vet issue 2020-12-31 10:56:29 -07:00
Ben Johnson
4ac0829bf7 go mod tidy 2020-12-31 10:38:25 -07:00
Ben Johnson
d6de916c66 Rename replicator.go 2020-12-31 10:37:51 -07:00
Ben Johnson
3b9275488d Add 'validate' command 2020-12-31 10:36:48 -07:00
Ben Johnson
cff778464e Add 'databases' command. 2020-12-30 16:10:48 -07:00
Ben Johnson
11d7d22383 Add 'wal' command 2020-12-30 16:03:23 -07:00
Ben Johnson
8a7d8175fc Restrict generations command to single db 2020-12-30 15:44:21 -07:00
Ben Johnson
ffc25e2654 Add 'snapshots' command. 2020-12-30 15:31:35 -07:00
Ben Johnson
5cc78fafa0 Clean shadow WAL 2020-12-30 14:48:58 -07:00
Ben Johnson
0b12efb135 Add checkpoint interval 2020-12-30 12:07:02 -07:00
Ben Johnson
d4891f33da Refactor DB.checkpoint() to accept any mode.
This was originally meant to add a TRUNCATE checkpoint before starting
a new generation, however, there is a write lock that blocks the
checkpoint and it's more complicated to roll it back and attempt the
truncation.
2020-12-29 17:02:33 -07:00
Ben Johnson
42a33cccf4 Group WAL files in file replica 2020-12-29 16:40:28 -07:00
50 changed files with 10077 additions and 2197 deletions

18
.github/CONTRIBUTING.md vendored Normal file
View File

@@ -0,0 +1,18 @@
## Contribution Policy
Initially, Litestream was closed to outside contributions. The goal was to
reduce burnout by limiting the maintenance overhead of reviewing and validating
third-party code. However, this policy is overly broad and has prevented small,
easily testable patches from being contributed.
Litestream is now open to code contributions for bug fixes only. Features carry
a long-term maintenance burden so they will not be accepted at this time.
Please [submit an issue][new-issue] if you have a feature you'd like to
request.
If you find mistakes in the documentation, please submit a fix to the
[documentation repository][docs].
[new-issue]: https://github.com/benbjohnson/litestream/issues/new
[docs]: https://github.com/benbjohnson/litestream.io

121
.github/workflows/commit.yml vendored Normal file
View File

@@ -0,0 +1,121 @@
on: push
jobs:
build:
name: Build & Unit Test
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: '1.21'
- uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: ${{ inputs.os }}-go-
- run: go env
- run: go install ./cmd/litestream
- run: go test -v ./...
# - name: Build integration test
# run: go test -c ./integration
#
# - uses: actions/upload-artifact@v2
# with:
# name: integration.test
# path: integration.test
# if-no-files-found: error
# long-running-test:
# name: Run Long Running Unit Test
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v2
# - uses: actions/setup-go@v2
# with:
# go-version: '1.20'
# - uses: actions/cache@v2
# with:
# path: ~/go/pkg/mod
# key: ${{ inputs.os }}-go-${{ hashFiles('**/go.sum') }}
# restore-keys: ${{ inputs.os }}-go-
#
# - run: go install ./cmd/litestream
# - run: go test -v -run=TestCmd_Replicate_LongRunning ./integration -long-running-duration 1m
# s3-integration-test:
# name: Run S3 Integration Tests
# runs-on: ubuntu-latest
# needs: build
# steps:
# - uses: actions/download-artifact@v2
# with:
# name: integration.test
# - run: chmod +x integration.test
#
# - run: ./integration.test -test.v -test.run=TestReplicaClient -replica-type s3
# env:
# LITESTREAM_S3_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }}
# LITESTREAM_S3_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_S3_SECRET_ACCESS_KEY }}
# LITESTREAM_S3_REGION: us-east-1
# LITESTREAM_S3_BUCKET: integration.litestream.io
# gcp-integration-test:
# name: Run GCP Integration Tests
# runs-on: ubuntu-latest
# needs: build
# steps:
# - name: Extract GCP credentials
# run: 'echo "$GOOGLE_APPLICATION_CREDENTIALS" > /opt/gcp.json'
# shell: bash
# env:
# GOOGLE_APPLICATION_CREDENTIALS: ${{secrets.GOOGLE_APPLICATION_CREDENTIALS}}
#
# - uses: actions/download-artifact@v2
# with:
# name: integration.test
# - run: chmod +x integration.test
#
# - run: ./integration.test -test.v -test.run=TestReplicaClient -replica-type gcs
# env:
# GOOGLE_APPLICATION_CREDENTIALS: /opt/gcp.json
# LITESTREAM_GCS_BUCKET: integration.litestream.io
# abs-integration-test:
# name: Run Azure Blob Store Integration Tests
# runs-on: ubuntu-latest
# needs: build
# steps:
# - uses: actions/download-artifact@v2
# with:
# name: integration.test
# - run: chmod +x integration.test
#
# - run: ./integration.test -test.v -test.run=TestReplicaClient -replica-type abs
# env:
# LITESTREAM_ABS_ACCOUNT_NAME: ${{ secrets.LITESTREAM_ABS_ACCOUNT_NAME }}
# LITESTREAM_ABS_ACCOUNT_KEY: ${{ secrets.LITESTREAM_ABS_ACCOUNT_KEY }}
# LITESTREAM_ABS_BUCKET: integration
# sftp-integration-test:
# name: Run SFTP Integration Tests
# runs-on: ubuntu-latest
# needs: build
# steps:
# - name: Extract SSH key
# run: 'echo "$LITESTREAM_SFTP_KEY" > /opt/id_ed25519'
# shell: bash
# env:
# LITESTREAM_SFTP_KEY: ${{secrets.LITESTREAM_SFTP_KEY}}
#
# - name: Run sftp tests
# run: go test -v -run=TestReplicaClient ./integration -replica-type sftp
# env:
# LITESTREAM_SFTP_HOST: ${{ secrets.LITESTREAM_SFTP_HOST }}
# LITESTREAM_SFTP_USER: ${{ secrets.LITESTREAM_SFTP_USER }}
# LITESTREAM_SFTP_KEY_PATH: /opt/id_ed25519
# LITESTREAM_SFTP_PATH: ${{ secrets.LITESTREAM_SFTP_PATH }}

51
.github/workflows/release.docker.yml vendored Normal file
View File

@@ -0,0 +1,51 @@
on:
release:
types:
- published
pull_request:
types:
- opened
- synchronize
- reopened
branches-ignore:
- "dependabot/**"
name: Release (Docker)
jobs:
docker:
runs-on: ubuntu-latest
env:
PLATFORMS: "linux/amd64,linux/arm64,linux/arm/v7"
VERSION: "${{ github.event_name == 'release' && github.event.release.name || github.sha }}"
steps:
- uses: actions/checkout@v2
- uses: docker/setup-qemu-action@v1
- uses: docker/setup-buildx-action@v1
- uses: docker/login-action@v1
with:
username: benbjohnson
password: ${{ secrets.DOCKERHUB_TOKEN }}
- id: meta
uses: docker/metadata-action@v3
with:
images: litestream/litestream
tags: |
type=ref,event=branch
type=ref,event=pr
type=sha
type=sha,format=long
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
- uses: docker/build-push-action@v2
with:
context: .
push: true
platforms: ${{ env.PLATFORMS }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: |
LITESTREAM_VERSION=${{ env.VERSION }}

81
.github/workflows/release.linux.yml vendored Normal file
View File

@@ -0,0 +1,81 @@
on:
release:
types:
- created
name: release (linux)
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
include:
- arch: amd64
cc: gcc
- arch: arm64
cc: aarch64-linux-gnu-gcc
- arch: arm
arm: 6
cc: arm-linux-gnueabi-gcc
- arch: arm
arm: 7
cc: arm-linux-gnueabihf-gcc
env:
GOOS: linux
GOARCH: ${{ matrix.arch }}
GOARM: ${{ matrix.arm }}
CC: ${{ matrix.cc }}
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: '1.21'
- id: release
uses: bruceadams/get-release@v1.2.2
env:
GITHUB_TOKEN: ${{ github.token }}
- name: Install cross-compilers
run: |
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu gcc-arm-linux-gnueabihf gcc-arm-linux-gnueabi
- name: Install nfpm
run: |
wget https://github.com/goreleaser/nfpm/releases/download/v2.2.3/nfpm_2.2.3_Linux_x86_64.tar.gz
tar zxvf nfpm_2.2.3_Linux_x86_64.tar.gz
- name: Build litestream
run: |
rm -rf dist
mkdir -p dist
cp etc/litestream.yml etc/litestream.service dist
cat etc/nfpm.yml | LITESTREAM_VERSION=${{ steps.release.outputs.tag_name }} envsubst > dist/nfpm.yml
CGO_ENABLED=1 go build -ldflags "-s -w -extldflags "-static" -X 'main.Version=${{ steps.release.outputs.tag_name }}'" -tags osusergo,netgo,sqlite_omit_load_extension -o dist/litestream ./cmd/litestream
cd dist
tar -czvf litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.tar.gz litestream
../nfpm pkg --config nfpm.yml --packager deb --target litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.deb
- name: Upload release tarball
uses: actions/upload-release-asset@v1.0.2
env:
GITHUB_TOKEN: ${{ github.token }}
with:
upload_url: ${{ steps.release.outputs.upload_url }}
asset_path: ./dist/litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.tar.gz
asset_name: litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.tar.gz
asset_content_type: application/gzip
- name: Upload debian package
uses: actions/upload-release-asset@v1.0.2
env:
GITHUB_TOKEN: ${{ github.token }}
with:
upload_url: ${{ steps.release.outputs.upload_url }}
asset_path: ./dist/litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.deb
asset_name: litestream-${{ steps.release.outputs.tag_name }}-${{ env.GOOS }}-${{ env.GOARCH }}${{ env.GOARM }}.deb
asset_content_type: application/octet-stream

View File

@@ -1,32 +0,0 @@
on:
release:
types:
- created
name: release
jobs:
release:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
- id: release
uses: bruceadams/get-release@v1.2.2
env:
GITHUB_TOKEN: ${{ github.token }}
- name: Build litestream
run: |
go build -ldflags "-X 'main.Version=${{ steps.release.outputs.tag_name }}'" -o litestream ./cmd/litestream
tar -czvf litestream-${{ steps.release.outputs.tag_name }}-linux-amd64.tar.gz litestream
- name: Upload release binary
uses: actions/upload-release-asset@v1.0.2
env:
GITHUB_TOKEN: ${{ github.token }}
with:
upload_url: ${{ steps.release.outputs.upload_url }}
asset_path: ./litestream-${{ steps.release.outputs.tag_name }}-linux-amd64.tar.gz
asset_name: litestream-${{ steps.release.outputs.tag_name }}-linux-amd64.tar.gz
asset_content_type: application/gzip

View File

@@ -1,19 +0,0 @@
on: [push, pull_request]
name: test
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v2
- uses: actions/checkout@v2
- uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Run unit tests
run: go test -v ./...

2
.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
.DS_Store
/dist

16
Dockerfile Normal file
View File

@@ -0,0 +1,16 @@
FROM golang:1.21.3 as builder
WORKDIR /src/litestream
COPY . .
ARG LITESTREAM_VERSION=latest
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg \
go build -ldflags "-s -w -X 'main.Version=${LITESTREAM_VERSION}' -extldflags '-static'" -tags osusergo,netgo,sqlite_omit_load_extension -o /usr/local/bin/litestream ./cmd/litestream
FROM alpine:3.17.2
COPY --from=builder /usr/local/bin/litestream /usr/local/bin/litestream
ENTRYPOINT ["/usr/local/bin/litestream"]
CMD []

876
LICENSE
View File

@@ -1,674 +1,202 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

37
Makefile Normal file
View File

@@ -0,0 +1,37 @@
default:
docker:
docker build -t litestream .
dist-linux:
mkdir -p dist
cp etc/litestream.yml dist/litestream.yml
docker run --rm -v "${PWD}":/usr/src/litestream -w /usr/src/litestream -e GOOS=linux -e GOARCH=amd64 golang:1.16 go build -v -ldflags "-s -w" -o dist/litestream ./cmd/litestream
tar -cz -f dist/litestream-linux-amd64.tar.gz -C dist litestream
dist-linux-arm:
docker run --rm -v "${PWD}":/usr/src/litestream -w /usr/src/litestream -e CGO_ENABLED=1 -e CC=arm-linux-gnueabihf-gcc -e GOOS=linux -e GOARCH=arm golang-xc:1.16 go build -v -o dist/litestream-linux-arm ./cmd/litestream
dist-linux-arm64:
docker run --rm -v "${PWD}":/usr/src/litestream -w /usr/src/litestream -e CGO_ENABLED=1 -e CC=aarch64-linux-gnu-gcc -e GOOS=linux -e GOARCH=arm64 golang-xc:1.16 go build -v -o dist/litestream-linux-arm64 ./cmd/litestream
dist-macos:
ifndef LITESTREAM_VERSION
$(error LITESTREAM_VERSION is undefined)
endif
mkdir -p dist
GOOS=darwin GOARCH=amd64 CC="gcc -target amd64-apple-macos11" CGO_ENABLED=1 go build -v -ldflags "-s -w -X 'main.Version=${LITESTREAM_VERSION}'" -o dist/litestream ./cmd/litestream
gon etc/gon.hcl
mv dist/litestream.zip dist/litestream-${LITESTREAM_VERSION}-darwin-amd64.zip
openssl dgst -sha256 dist/litestream-${LITESTREAM_VERSION}-darwin-amd64.zip
GOOS=darwin GOARCH=arm64 CC="gcc -target arm64-apple-macos11" CGO_ENABLED=1 go build -v -ldflags "-s -w -X 'main.Version=${LITESTREAM_VERSION}'" -o dist/litestream ./cmd/litestream
gon etc/gon.hcl
mv dist/litestream.zip dist/litestream-${LITESTREAM_VERSION}-darwin-arm64.zip
openssl dgst -sha256 dist/litestream-${LITESTREAM_VERSION}-darwin-arm64.zip
clean:
rm -rf dist
.PHONY: default dist-linux dist-macos clean

View File

@@ -1,17 +1,61 @@
litestream
Litestream
![GitHub release (latest by date)](https://img.shields.io/github/v/release/benbjohnson/litestream)
![Status](https://img.shields.io/badge/status-beta-blue)
![GitHub](https://img.shields.io/github/license/benbjohnson/litestream)
[![Docker Pulls](https://img.shields.io/docker/pulls/litestream/litestream.svg?maxAge=604800)](https://hub.docker.com/r/litestream/litestream/)
![test](https://github.com/benbjohnson/litestream/workflows/test/badge.svg)
==========
Streaming replication for SQLite.
Litestream is a standalone disaster recovery tool for SQLite. It runs as a
background process and safely replicates changes incrementally to another file
or S3. Litestream only communicates with SQLite through the SQLite API so it
will not corrupt your database.
If you need support or have ideas for improving Litestream, please join the
[Litestream Slack][slack] or visit the [GitHub Discussions](https://github.com/benbjohnson/litestream/discussions).
Please visit the [Litestream web site](https://litestream.io) for installation
instructions and documentation.
If you find this project interesting, please consider starring the project on
GitHub.
[slack]: https://join.slack.com/t/litestream/shared_invite/zt-n0j4s3ci-lx1JziR3bV6L2NMF723H3Q
## Questions
## Acknowledgements
- How to avoid WAL checkpointing on close?
While the Litestream project does not accept external code patches, many
of the most valuable contributions are in the forms of testing, feedback, and
documentation. These help harden software and streamline usage for other users.
I want to give special thanks to individuals who invest much of their time and
energy into the project to help make it better:
- Thanks to [Cory LaNou](https://twitter.com/corylanou) for giving early feedback and testing when Litestream was still pre-release.
- Thanks to [Michael Lynch](https://github.com/mtlynch) for digging into issues and contributing to the documentation.
- Thanks to [Kurt Mackey](https://twitter.com/mrkurt) for feedback and testing.
- Thanks to [Sam Weston](https://twitter.com/cablespaghetti) for figuring out how to run Litestream on Kubernetes and writing up the docs for it.
- Thanks to [Rafael](https://github.com/netstx) & [Jungle Boogie](https://github.com/jungle-boogie) for helping to get OpenBSD release builds working.
- Thanks to [Simon Gottschlag](https://github.com/simongottschlag), [Marin](https://github.com/supermarin),[Victor Björklund](https://github.com/victorbjorklund), [Jonathan Beri](https://twitter.com/beriberikix) [Yuri](https://github.com/yurivish), [Nathan Probst](https://github.com/nprbst), [Yann Coleu](https://github.com/yanc0), and [Nicholas Grilly](https://twitter.com/ngrilly) for frequent feedback, testing, & support.
Huge thanks to fly.io for their support and for contributing credits for testing and development!
## Notes
## Contribution Policy
Initially, Litestream was closed to outside contributions. The goal was to
reduce burnout by limiting the maintenance overhead of reviewing and validating
third-party code. However, this policy is overly broad and has prevented small,
easily testable patches from being contributed.
Litestream is now open to code contributions for bug fixes only. Features carry
a long-term maintenance burden so they will not be accepted at this time.
Please [submit an issue][new-issue] if you have a feature you'd like to
request.
If you find mistakes in the documentation, please submit a fix to the
[documentation repository][docs].
[new-issue]: https://github.com/benbjohnson/litestream/issues/new
[docs]: https://github.com/benbjohnson/litestream.io
```sql
-- Disable autocheckpointing.
PRAGMA wal_autocheckpoint = 0
```

565
abs/replica_client.go Normal file
View File

@@ -0,0 +1,565 @@
package abs
import (
"context"
"fmt"
"io"
"net/url"
"os"
"path"
"strings"
"sync"
"time"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/benbjohnson/litestream"
"github.com/benbjohnson/litestream/internal"
"golang.org/x/sync/errgroup"
)
// ReplicaClientType is the client type for this package.
const ReplicaClientType = "abs"
var _ litestream.ReplicaClient = (*ReplicaClient)(nil)
// ReplicaClient is a client for writing snapshots & WAL segments to disk.
type ReplicaClient struct {
mu sync.Mutex
containerURL *azblob.ContainerURL
// Azure credentials
AccountName string
AccountKey string
Endpoint string
// Azure Blob Storage container information
Bucket string
Path string
}
// NewReplicaClient returns a new instance of ReplicaClient.
func NewReplicaClient() *ReplicaClient {
return &ReplicaClient{}
}
// Type returns "abs" as the client type.
func (c *ReplicaClient) Type() string {
return ReplicaClientType
}
// Init initializes the connection to Azure. No-op if already initialized.
func (c *ReplicaClient) Init(ctx context.Context) (err error) {
c.mu.Lock()
defer c.mu.Unlock()
if c.containerURL != nil {
return nil
}
// Read account key from environment, if available.
accountKey := c.AccountKey
if accountKey == "" {
accountKey = os.Getenv("LITESTREAM_AZURE_ACCOUNT_KEY")
}
// Authenticate to ACS.
credential, err := azblob.NewSharedKeyCredential(c.AccountName, accountKey)
if err != nil {
return err
}
// Construct & parse endpoint unless already set.
endpoint := c.Endpoint
if endpoint == "" {
endpoint = fmt.Sprintf("https://%s.blob.core.windows.net", c.AccountName)
}
endpointURL, err := url.Parse(endpoint)
if err != nil {
return fmt.Errorf("cannot parse azure endpoint: %w", err)
}
// Build pipeline and reference to container.
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{
Retry: azblob.RetryOptions{
TryTimeout: 24 * time.Hour,
},
})
containerURL := azblob.NewServiceURL(*endpointURL, pipeline).NewContainerURL(c.Bucket)
c.containerURL = &containerURL
return nil
}
// Generations returns a list of available generation names.
func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) {
if err := c.Init(ctx); err != nil {
return nil, err
}
var generations []string
var marker azblob.Marker
for marker.NotDone() {
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
resp, err := c.containerURL.ListBlobsHierarchySegment(ctx, marker, "/", azblob.ListBlobsSegmentOptions{
Prefix: litestream.GenerationsPath(c.Path) + "/",
})
if err != nil {
return nil, err
}
marker = resp.NextMarker
for _, prefix := range resp.Segment.BlobPrefixes {
name := path.Base(strings.TrimSuffix(prefix.Name, "/"))
if !litestream.IsGenerationName(name) {
continue
}
generations = append(generations, name)
}
}
return generations, nil
}
// DeleteGeneration deletes all snapshots & WAL segments within a generation.
func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error {
if err := c.Init(ctx); err != nil {
return err
}
dir, err := litestream.GenerationPath(c.Path, generation)
if err != nil {
return fmt.Errorf("cannot determine generation path: %w", err)
}
var marker azblob.Marker
for marker.NotDone() {
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
resp, err := c.containerURL.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{Prefix: dir + "/"})
if err != nil {
return err
}
marker = resp.NextMarker
for _, item := range resp.Segment.BlobItems {
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
blobURL := c.containerURL.NewBlobURL(item.Name)
if _, err := blobURL.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}); isNotExists(err) {
continue
} else if err != nil {
return err
}
}
}
// log.Printf("%s(%s): retainer: deleting generation: %s", r.db.Path(), r.Name(), generation)
return nil
}
// Snapshots returns an iterator over all available snapshots for a generation.
func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (litestream.SnapshotIterator, error) {
if err := c.Init(ctx); err != nil {
return nil, err
}
return newSnapshotIterator(ctx, generation, c), nil
}
// WriteSnapshot writes LZ4 compressed data from rd to the object storage.
func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) {
if err := c.Init(ctx); err != nil {
return info, err
}
key, err := litestream.SnapshotPath(c.Path, generation, index)
if err != nil {
return info, fmt.Errorf("cannot determine snapshot path: %w", err)
}
startTime := time.Now()
rc := internal.NewReadCounter(rd)
blobURL := c.containerURL.NewBlockBlobURL(key)
if _, err := azblob.UploadStreamToBlockBlob(ctx, rc, blobURL, azblob.UploadStreamToBlockBlobOptions{
BlobHTTPHeaders: azblob.BlobHTTPHeaders{ContentType: "application/octet-stream"},
BlobAccessTier: azblob.DefaultAccessTier,
}); err != nil {
return info, err
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc()
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(rc.N()))
// log.Printf("%s(%s): snapshot: creating %s/%08x t=%s", r.db.Path(), r.Name(), generation, index, time.Since(startTime).Truncate(time.Millisecond))
return litestream.SnapshotInfo{
Generation: generation,
Index: index,
Size: rc.N(),
CreatedAt: startTime.UTC(),
}, nil
}
// SnapshotReader returns a reader for snapshot data at the given generation/index.
func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) {
if err := c.Init(ctx); err != nil {
return nil, err
}
key, err := litestream.SnapshotPath(c.Path, generation, index)
if err != nil {
return nil, fmt.Errorf("cannot determine snapshot path: %w", err)
}
blobURL := c.containerURL.NewBlobURL(key)
resp, err := blobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
if isNotExists(err) {
return nil, os.ErrNotExist
} else if err != nil {
return nil, fmt.Errorf("cannot start new reader for %q: %w", key, err)
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "GET").Inc()
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "GET").Add(float64(resp.ContentLength()))
return resp.Body(azblob.RetryReaderOptions{}), nil
}
// DeleteSnapshot deletes a snapshot with the given generation & index.
func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error {
if err := c.Init(ctx); err != nil {
return err
}
key, err := litestream.SnapshotPath(c.Path, generation, index)
if err != nil {
return fmt.Errorf("cannot determine snapshot path: %w", err)
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
blobURL := c.containerURL.NewBlobURL(key)
if _, err := blobURL.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}); isNotExists(err) {
return nil
} else if err != nil {
return fmt.Errorf("cannot delete snapshot %q: %w", key, err)
}
return nil
}
// WALSegments returns an iterator over all available WAL files for a generation.
func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) {
if err := c.Init(ctx); err != nil {
return nil, err
}
return newWALSegmentIterator(ctx, generation, c), nil
}
// WriteWALSegment writes LZ4 compressed data from rd into a file on disk.
func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) {
if err := c.Init(ctx); err != nil {
return info, err
}
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
if err != nil {
return info, fmt.Errorf("cannot determine wal segment path: %w", err)
}
startTime := time.Now()
rc := internal.NewReadCounter(rd)
blobURL := c.containerURL.NewBlockBlobURL(key)
if _, err := azblob.UploadStreamToBlockBlob(ctx, rc, blobURL, azblob.UploadStreamToBlockBlobOptions{
BlobHTTPHeaders: azblob.BlobHTTPHeaders{ContentType: "application/octet-stream"},
BlobAccessTier: azblob.DefaultAccessTier,
}); err != nil {
return info, err
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc()
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(rc.N()))
return litestream.WALSegmentInfo{
Generation: pos.Generation,
Index: pos.Index,
Offset: pos.Offset,
Size: rc.N(),
CreatedAt: startTime.UTC(),
}, nil
}
// WALSegmentReader returns a reader for a section of WAL data at the given index.
// Returns os.ErrNotExist if no matching index/offset is found.
func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) {
if err := c.Init(ctx); err != nil {
return nil, err
}
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
if err != nil {
return nil, fmt.Errorf("cannot determine wal segment path: %w", err)
}
blobURL := c.containerURL.NewBlobURL(key)
resp, err := blobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
if isNotExists(err) {
return nil, os.ErrNotExist
} else if err != nil {
return nil, fmt.Errorf("cannot start new reader for %q: %w", key, err)
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "GET").Inc()
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "GET").Add(float64(resp.ContentLength()))
return resp.Body(azblob.RetryReaderOptions{}), nil
}
// DeleteWALSegments deletes WAL segments with at the given positions.
func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Pos) error {
if err := c.Init(ctx); err != nil {
return err
}
for _, pos := range a {
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
if err != nil {
return fmt.Errorf("cannot determine wal segment path: %w", err)
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
blobURL := c.containerURL.NewBlobURL(key)
if _, err := blobURL.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}); isNotExists(err) {
continue
} else if err != nil {
return fmt.Errorf("cannot delete wal segment %q: %w", key, err)
}
}
return nil
}
type snapshotIterator struct {
client *ReplicaClient
generation string
ch chan litestream.SnapshotInfo
g errgroup.Group
ctx context.Context
cancel func()
info litestream.SnapshotInfo
err error
}
func newSnapshotIterator(ctx context.Context, generation string, client *ReplicaClient) *snapshotIterator {
itr := &snapshotIterator{
client: client,
generation: generation,
ch: make(chan litestream.SnapshotInfo),
}
itr.ctx, itr.cancel = context.WithCancel(ctx)
itr.g.Go(itr.fetch)
return itr
}
// fetch runs in a separate goroutine to fetch pages of objects and stream them to a channel.
func (itr *snapshotIterator) fetch() error {
defer close(itr.ch)
dir, err := litestream.SnapshotsPath(itr.client.Path, itr.generation)
if err != nil {
return fmt.Errorf("cannot determine snapshots path: %w", err)
}
var marker azblob.Marker
for marker.NotDone() {
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
resp, err := itr.client.containerURL.ListBlobsFlatSegment(itr.ctx, marker, azblob.ListBlobsSegmentOptions{Prefix: dir + "/"})
if err != nil {
return err
}
marker = resp.NextMarker
for _, item := range resp.Segment.BlobItems {
key := path.Base(item.Name)
index, err := litestream.ParseSnapshotPath(key)
if err != nil {
continue
}
info := litestream.SnapshotInfo{
Generation: itr.generation,
Index: index,
Size: *item.Properties.ContentLength,
CreatedAt: item.Properties.CreationTime.UTC(),
}
select {
case <-itr.ctx.Done():
case itr.ch <- info:
}
}
}
return nil
}
func (itr *snapshotIterator) Close() (err error) {
err = itr.err
// Cancel context and wait for error group to finish.
itr.cancel()
if e := itr.g.Wait(); e != nil && err == nil {
err = e
}
return err
}
func (itr *snapshotIterator) Next() bool {
// Exit if an error has already occurred.
if itr.err != nil {
return false
}
// Return false if context was canceled or if there are no more snapshots.
// Otherwise fetch the next snapshot and store it on the iterator.
select {
case <-itr.ctx.Done():
return false
case info, ok := <-itr.ch:
if !ok {
return false
}
itr.info = info
return true
}
}
func (itr *snapshotIterator) Err() error { return itr.err }
func (itr *snapshotIterator) Snapshot() litestream.SnapshotInfo {
return itr.info
}
type walSegmentIterator struct {
client *ReplicaClient
generation string
ch chan litestream.WALSegmentInfo
g errgroup.Group
ctx context.Context
cancel func()
info litestream.WALSegmentInfo
err error
}
func newWALSegmentIterator(ctx context.Context, generation string, client *ReplicaClient) *walSegmentIterator {
itr := &walSegmentIterator{
client: client,
generation: generation,
ch: make(chan litestream.WALSegmentInfo),
}
itr.ctx, itr.cancel = context.WithCancel(ctx)
itr.g.Go(itr.fetch)
return itr
}
// fetch runs in a separate goroutine to fetch pages of objects and stream them to a channel.
func (itr *walSegmentIterator) fetch() error {
defer close(itr.ch)
dir, err := litestream.WALPath(itr.client.Path, itr.generation)
if err != nil {
return fmt.Errorf("cannot determine wal path: %w", err)
}
var marker azblob.Marker
for marker.NotDone() {
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
resp, err := itr.client.containerURL.ListBlobsFlatSegment(itr.ctx, marker, azblob.ListBlobsSegmentOptions{Prefix: dir + "/"})
if err != nil {
return err
}
marker = resp.NextMarker
for _, item := range resp.Segment.BlobItems {
key := path.Base(item.Name)
index, offset, err := litestream.ParseWALSegmentPath(key)
if err != nil {
continue
}
info := litestream.WALSegmentInfo{
Generation: itr.generation,
Index: index,
Offset: offset,
Size: *item.Properties.ContentLength,
CreatedAt: item.Properties.CreationTime.UTC(),
}
select {
case <-itr.ctx.Done():
case itr.ch <- info:
}
}
}
return nil
}
func (itr *walSegmentIterator) Close() (err error) {
err = itr.err
// Cancel context and wait for error group to finish.
itr.cancel()
if e := itr.g.Wait(); e != nil && err == nil {
err = e
}
return err
}
func (itr *walSegmentIterator) Next() bool {
// Exit if an error has already occurred.
if itr.err != nil {
return false
}
// Return false if context was canceled or if there are no more segments.
// Otherwise fetch the next segment and store it on the iterator.
select {
case <-itr.ctx.Done():
return false
case info, ok := <-itr.ch:
if !ok {
return false
}
itr.info = info
return true
}
}
func (itr *walSegmentIterator) Err() error { return itr.err }
func (itr *walSegmentIterator) WALSegment() litestream.WALSegmentInfo {
return itr.info
}
func isNotExists(err error) bool {
switch err := err.(type) {
case azblob.StorageError:
return err.ServiceCode() == azblob.ServiceCodeBlobNotFound
default:
return false
}
}

View File

@@ -0,0 +1,81 @@
package main
import (
"context"
"flag"
"fmt"
"os"
"strings"
"text/tabwriter"
)
// DatabasesCommand is a command for listing managed databases.
type DatabasesCommand struct{}
// Run executes the command.
func (c *DatabasesCommand) Run(ctx context.Context, args []string) (err error) {
fs := flag.NewFlagSet("litestream-databases", flag.ContinueOnError)
configPath, noExpandEnv := registerConfigFlag(fs)
fs.Usage = c.Usage
if err := fs.Parse(args); err != nil {
return err
} else if fs.NArg() != 0 {
return fmt.Errorf("too many arguments")
}
// Load configuration.
if *configPath == "" {
*configPath = DefaultConfigPath()
}
config, err := ReadConfigFile(*configPath, !*noExpandEnv)
if err != nil {
return err
}
// List all databases.
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
defer w.Flush()
fmt.Fprintln(w, "path\treplicas")
for _, dbConfig := range config.DBs {
db, err := NewDBFromConfig(dbConfig)
if err != nil {
return err
}
var replicaNames []string
for _, r := range db.Replicas {
replicaNames = append(replicaNames, r.Name())
}
fmt.Fprintf(w, "%s\t%s\n",
db.Path(),
strings.Join(replicaNames, ","),
)
}
return nil
}
// Usage prints the help screen to STDOUT.
func (c *DatabasesCommand) Usage() {
fmt.Printf(`
The databases command lists all databases in the configuration file.
Usage:
litestream databases [arguments]
Arguments:
-config PATH
Specifies the configuration file.
Defaults to %s
-no-expand-env
Disables environment variable expansion in configuration file.
`[1:],
DefaultConfigPath(),
)
}

View File

@@ -2,132 +2,163 @@ package main
import (
"context"
"errors"
"flag"
"fmt"
"log"
"os"
"path/filepath"
"text/tabwriter"
"time"
"github.com/benbjohnson/litestream"
)
type GenerationsCommand struct {
ConfigPath string
Config Config
DBPath string
}
func NewGenerationsCommand() *GenerationsCommand {
return &GenerationsCommand{}
}
// GenerationsCommand represents a command to list all generations for a database.
type GenerationsCommand struct{}
// Run executes the command.
func (c *GenerationsCommand) Run(ctx context.Context, args []string) (err error) {
fs := flag.NewFlagSet("litestream-generations", flag.ContinueOnError)
registerConfigFlag(fs, &c.ConfigPath)
configPath, noExpandEnv := registerConfigFlag(fs)
replicaName := fs.String("replica", "", "replica name")
fs.Usage = c.Usage
if err := fs.Parse(args); err != nil {
return err
} else if fs.NArg() == 0 || fs.Arg(0) == "" {
return fmt.Errorf("database path or replica URL required")
} else if fs.NArg() > 1 {
return fmt.Errorf("too many arguments")
}
// Load configuration.
if c.ConfigPath == "" {
return errors.New("-config required")
}
config, err := ReadConfigFile(c.ConfigPath)
if err != nil {
return err
}
// Determine absolute path for database, if specified.
if c.DBPath = fs.Arg(0); c.DBPath != "" {
if c.DBPath, err = filepath.Abs(c.DBPath); err != nil {
var db *litestream.DB
var r *litestream.Replica
dbUpdatedAt := time.Now()
if isURL(fs.Arg(0)) {
if *configPath != "" {
return fmt.Errorf("cannot specify a replica URL and the -config flag")
}
if r, err = NewReplicaFromConfig(&ReplicaConfig{URL: fs.Arg(0)}, nil); err != nil {
return err
}
}
// List each generation.
w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "db\tname\tgeneration\tlag\tstart\tend")
for _, dbConfig := range config.DBs {
// Filter database, if specified in the arguments.
if c.DBPath != "" && dbConfig.Path != c.DBPath {
continue
} else {
if *configPath == "" {
*configPath = DefaultConfigPath()
}
// Instantiate DB from from configuration.
db, err := newDBFromConfig(dbConfig)
// Load configuration.
config, err := ReadConfigFile(*configPath, !*noExpandEnv)
if err != nil {
return err
}
// Lookup database from configuration file by path.
if path, err := expand(fs.Arg(0)); err != nil {
return err
} else if dbc := config.DBConfig(path); dbc == nil {
return fmt.Errorf("database not found in config: %s", path)
} else if db, err = NewDBFromConfig(dbc); err != nil {
return err
}
// Filter by replica, if specified.
if *replicaName != "" {
if r = db.Replica(*replicaName); r == nil {
return fmt.Errorf("replica %q not found for database %q", *replicaName, db.Path())
}
}
// Determine last time database or WAL was updated.
updatedAt, err := db.UpdatedAt()
if err != nil {
if dbUpdatedAt, err = db.UpdatedAt(); err != nil {
return err
}
}
// Iterate over each replica in the database.
for _, r := range db.Replicas {
generations, err := r.Generations(ctx)
var replicas []*litestream.Replica
if r != nil {
replicas = []*litestream.Replica{r}
} else {
replicas = db.Replicas
}
// List each generation.
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
defer w.Flush()
fmt.Fprintln(w, "name\tgeneration\tlag\tstart\tend")
for _, r := range replicas {
generations, err := r.Client.Generations(ctx)
if err != nil {
r.Logger().Error("cannot list generations", "error", err)
continue
}
// Iterate over each generation for the replica.
for _, generation := range generations {
createdAt, updatedAt, err := r.GenerationTimeBounds(ctx, generation)
if err != nil {
log.Printf("%s: cannot list generations: %s", r.Name(), err)
r.Logger().Error("cannot determine generation time bounds", "error", err)
continue
}
// Iterate over each generation for the replica.
for _, generation := range generations {
stats, err := r.GenerationStats(ctx, generation)
if err != nil {
log.Printf("%s: cannot find generation stats: %s", r.Name(), err)
continue
}
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n",
db.Path(),
r.Name(),
generation,
truncateDuration(stats.UpdatedAt.Sub(updatedAt)).String(),
stats.CreatedAt.Format(time.RFC3339),
stats.UpdatedAt.Format(time.RFC3339),
)
}
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n",
r.Name(),
generation,
truncateDuration(dbUpdatedAt.Sub(updatedAt)).String(),
createdAt.Format(time.RFC3339),
updatedAt.Format(time.RFC3339),
)
}
}
w.Flush()
return nil
}
// Usage prints the help message to STDOUT.
func (c *GenerationsCommand) Usage() {
fmt.Printf(`
The generations command lists all generations across all replicas along with
stats about their lag behind the primary database and the time range they cover.
The generations command lists all generations for a database or replica. It also
lists stats about their lag behind the primary database and the time range they
cover.
Usage:
litestream generations [arguments] DB
litestream generations [arguments] DB_PATH
litestream generations [arguments] REPLICA_URL
Arguments:
-config PATH
Specifies the configuration file. Defaults to %s
Specifies the configuration file.
Defaults to %s
-no-expand-env
Disables environment variable expansion in configuration file.
-replica NAME
Optional, filters by replica.
`[1:],
DefaultConfigPath,
DefaultConfigPath(),
)
}
func truncateDuration(d time.Duration) time.Duration {
if d > time.Hour {
return d.Truncate(time.Hour)
} else if d > time.Minute {
return d.Truncate(time.Minute)
} else if d > time.Second {
if d < 0 {
if d < -10*time.Second {
return d.Truncate(time.Second)
} else if d < -time.Second {
return d.Truncate(time.Second / 10)
} else if d < -time.Millisecond {
return d.Truncate(time.Millisecond)
} else if d < -time.Microsecond {
return d.Truncate(time.Microsecond)
}
return d
}
if d > 10*time.Second {
return d.Truncate(time.Second)
} else if d > time.Second {
return d.Truncate(time.Second / 10)
} else if d > time.Millisecond {
return d.Truncate(time.Millisecond)
} else if d > time.Microsecond {

View File

@@ -2,16 +2,29 @@ package main
import (
"context"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"log/slog"
"net/url"
"os"
"os/user"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"filippo.io/age"
"github.com/benbjohnson/litestream"
"github.com/benbjohnson/litestream/abs"
"github.com/benbjohnson/litestream/file"
"github.com/benbjohnson/litestream/gcs"
"github.com/benbjohnson/litestream/s3"
"github.com/benbjohnson/litestream/sftp"
_ "github.com/mattn/go-sqlite3"
"gopkg.in/yaml.v2"
)
@@ -20,42 +33,98 @@ var (
Version = "(development build)"
)
// DefaultConfigPath is the default configuration path.
const DefaultConfigPath = "/etc/litestream.yml"
// errStop is a terminal error for indicating program should quit.
var errStop = errors.New("stop")
func main() {
log.SetFlags(0)
m := NewMain()
if err := m.Run(context.Background(), os.Args[1:]); err == flag.ErrHelp {
if err := m.Run(context.Background(), os.Args[1:]); err == flag.ErrHelp || err == errStop {
os.Exit(1)
} else if err != nil {
fmt.Fprintln(os.Stderr, err)
slog.Error("failed to run", "error", err)
os.Exit(1)
}
}
// Main represents the main program execution.
type Main struct{}
// NewMain returns a new instance of Main.
func NewMain() *Main {
return &Main{}
}
// Run executes the program.
func (m *Main) Run(ctx context.Context, args []string) (err error) {
// Execute replication command if running as a Windows service.
if isService, err := isWindowsService(); err != nil {
return err
} else if isService {
return runWindowsService(ctx)
}
// Copy "LITESTEAM" environment credentials.
applyLitestreamEnv()
// Extract command name.
var cmd string
if len(args) > 0 {
cmd, args = args[0], args[1:]
}
switch cmd {
case "databases":
return (&DatabasesCommand{}).Run(ctx, args)
case "generations":
return (&GenerationsCommand{}).Run(ctx, args)
case "replicate":
return (&ReplicateCommand{}).Run(ctx, args)
c := NewReplicateCommand()
if err := c.ParseFlags(ctx, args); err != nil {
return err
}
// Setup signal handler.
signalCh := signalChan()
if err := c.Run(); err != nil {
return err
}
// Wait for signal to stop program.
select {
case err = <-c.execCh:
slog.Info("subprocess exited, litestream shutting down")
case sig := <-signalCh:
slog.Info("signal received, litestream shutting down")
if c.cmd != nil {
slog.Info("sending signal to exec process")
if err := c.cmd.Process.Signal(sig); err != nil {
return fmt.Errorf("cannot signal exec process: %w", err)
}
slog.Info("waiting for exec process to close")
if err := <-c.execCh; err != nil && !strings.HasPrefix(err.Error(), "signal:") {
return fmt.Errorf("cannot wait for exec process: %w", err)
}
}
}
// Gracefully close.
if e := c.Close(); e != nil && err == nil {
err = e
}
slog.Info("litestream shut down")
return err
case "restore":
return (&RestoreCommand{}).Run(ctx, args)
case "snapshots":
return (&SnapshotsCommand{}).Run(ctx, args)
case "version":
return (&VersionCommand{}).Run(ctx, args)
case "wal":
return (&WALCommand{}).Run(ctx, args)
default:
if cmd == "" || cmd == "help" || strings.HasPrefix(cmd, "-") {
m.Usage()
@@ -65,6 +134,7 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) {
}
}
// Usage prints the help screen to STDOUT.
func (m *Main) Usage() {
fmt.Println(`
litestream is a tool for replicating SQLite databases.
@@ -75,16 +145,55 @@ Usage:
The commands are:
generations list available generations across all dbs & replicas
databases list databases specified in config file
generations list available generations for a database
replicate runs a server to replicate databases
restore recovers database backup from a replica
version prints the version
snapshots list available snapshots for a database
version prints the binary version
wal list available WAL files for a database
`[1:])
}
// Config represents a configuration file for the litestream daemon.
type Config struct {
DBs []*DBConfig `yaml:"databases"`
// Bind address for serving metrics.
Addr string `yaml:"addr"`
// List of databases to manage.
DBs []*DBConfig `yaml:"dbs"`
// Subcommand to execute during replication.
// Litestream will shutdown when subcommand exits.
Exec string `yaml:"exec"`
// Global S3 settings
AccessKeyID string `yaml:"access-key-id"`
SecretAccessKey string `yaml:"secret-access-key"`
// Logging
Logging LoggingConfig `yaml:"logging"`
}
// LoggingConfig configures logging.
type LoggingConfig struct {
Level string `yaml:"level"`
Type string `yaml:"type"`
Stderr bool `yaml:"stderr"`
}
// propagateGlobalSettings copies global S3 settings to replica configs.
func (c *Config) propagateGlobalSettings() {
for _, dbc := range c.DBs {
for _, rc := range dbc.Replicas {
if rc.AccessKeyID == "" {
rc.AccessKeyID = c.AccessKeyID
}
if rc.SecretAccessKey == "" {
rc.SecretAccessKey = c.SecretAccessKey
}
}
}
}
// DefaultConfig returns a new instance of Config with defaults set.
@@ -92,6 +201,7 @@ func DefaultConfig() Config {
return Config{}
}
// DBConfig returns database configuration by path.
func (c *Config) DBConfig(path string) *DBConfig {
for _, dbConfig := range c.DBs {
if dbConfig.Path == path {
@@ -102,54 +212,118 @@ func (c *Config) DBConfig(path string) *DBConfig {
}
// ReadConfigFile unmarshals config from filename. Expands path if needed.
func ReadConfigFile(filename string) (Config, error) {
// If expandEnv is true then environment variables are expanded in the config.
func ReadConfigFile(filename string, expandEnv bool) (_ Config, err error) {
config := DefaultConfig()
// Expand filename, if necessary.
if prefix := "~" + string(os.PathSeparator); strings.HasPrefix(filename, prefix) {
u, err := user.Current()
if err != nil {
return config, err
} else if u.HomeDir == "" {
return config, fmt.Errorf("home directory unset")
}
filename = filepath.Join(u.HomeDir, strings.TrimPrefix(filename, prefix))
filename, err = expand(filename)
if err != nil {
return config, err
}
// Read & deserialize configuration.
if buf, err := ioutil.ReadFile(filename); os.IsNotExist(err) {
// Read configuration.
buf, err := ioutil.ReadFile(filename)
if os.IsNotExist(err) {
return config, fmt.Errorf("config file not found: %s", filename)
} else if err != nil {
return config, err
} else if err := yaml.Unmarshal(buf, &config); err != nil {
}
// Expand environment variables, if enabled.
if expandEnv {
buf = []byte(os.ExpandEnv(string(buf)))
}
if err := yaml.Unmarshal(buf, &config); err != nil {
return config, err
}
// Normalize paths.
for _, dbConfig := range config.DBs {
if dbConfig.Path, err = expand(dbConfig.Path); err != nil {
return config, err
}
}
// Propage settings from global config to replica configs.
config.propagateGlobalSettings()
// Configure logging.
logOutput := os.Stdout
if config.Logging.Stderr {
logOutput = os.Stderr
}
logOptions := slog.HandlerOptions{
Level: slog.LevelInfo,
}
switch strings.ToUpper(config.Logging.Level) {
case "DEBUG":
logOptions.Level = slog.LevelDebug
case "WARN", "WARNING":
logOptions.Level = slog.LevelWarn
case "ERROR":
logOptions.Level = slog.LevelError
}
var logHandler slog.Handler
switch config.Logging.Type {
case "json":
logHandler = slog.NewJSONHandler(logOutput, &logOptions)
case "text", "":
logHandler = slog.NewTextHandler(logOutput, &logOptions)
}
// Set global default logger.
slog.SetDefault(slog.New(logHandler))
return config, nil
}
// DBConfig represents the configuration for a single database.
type DBConfig struct {
Path string `yaml:"path"`
Path string `yaml:"path"`
MetaPath *string `yaml:"meta-path"`
MonitorInterval *time.Duration `yaml:"monitor-interval"`
CheckpointInterval *time.Duration `yaml:"checkpoint-interval"`
MinCheckpointPageN *int `yaml:"min-checkpoint-page-count"`
MaxCheckpointPageN *int `yaml:"max-checkpoint-page-count"`
Replicas []*ReplicaConfig `yaml:"replicas"`
}
type ReplicaConfig struct {
Type string `yaml:"type"` // "file", "s3"
Name string `yaml:"name"` // name of replica, optional.
Path string `yaml:"path"` // used for file replicas
}
// NewDBFromConfig instantiates a DB based on a configuration.
func NewDBFromConfig(dbc *DBConfig) (*litestream.DB, error) {
path, err := expand(dbc.Path)
if err != nil {
return nil, err
}
func registerConfigFlag(fs *flag.FlagSet, p *string) {
fs.StringVar(p, "config", DefaultConfigPath, "config path")
}
// newDBFromConfig instantiates a DB based on a configuration.
func newDBFromConfig(config *DBConfig) (*litestream.DB, error) {
// Initialize database with given path.
db := litestream.NewDB(config.Path)
db := litestream.NewDB(path)
// Override default database settings if specified in configuration.
if dbc.MetaPath != nil {
db.SetMetaPath(*dbc.MetaPath)
}
if dbc.MonitorInterval != nil {
db.MonitorInterval = *dbc.MonitorInterval
}
if dbc.CheckpointInterval != nil {
db.CheckpointInterval = *dbc.CheckpointInterval
}
if dbc.MinCheckpointPageN != nil {
db.MinCheckpointPageN = *dbc.MinCheckpointPageN
}
if dbc.MaxCheckpointPageN != nil {
db.MaxCheckpointPageN = *dbc.MaxCheckpointPageN
}
// Instantiate and attach replicas.
for _, rconfig := range config.Replicas {
r, err := newReplicaFromConfig(db, rconfig)
for _, rc := range dbc.Replicas {
r, err := NewReplicaFromConfig(rc, db)
if err != nil {
return nil, err
}
@@ -159,20 +333,445 @@ func newDBFromConfig(config *DBConfig) (*litestream.DB, error) {
return db, nil
}
// newReplicaFromConfig instantiates a replica for a DB based on a config.
func newReplicaFromConfig(db *litestream.DB, config *ReplicaConfig) (litestream.Replica, error) {
switch config.Type {
case "", "file":
return newFileReplicaFromConfig(db, config)
// ReplicaConfig represents the configuration for a single replica in a database.
type ReplicaConfig struct {
Type string `yaml:"type"` // "file", "s3"
Name string `yaml:"name"` // name of replica, optional.
Path string `yaml:"path"`
URL string `yaml:"url"`
Retention *time.Duration `yaml:"retention"`
RetentionCheckInterval *time.Duration `yaml:"retention-check-interval"`
SyncInterval *time.Duration `yaml:"sync-interval"`
SnapshotInterval *time.Duration `yaml:"snapshot-interval"`
ValidationInterval *time.Duration `yaml:"validation-interval"`
// S3 settings
AccessKeyID string `yaml:"access-key-id"`
SecretAccessKey string `yaml:"secret-access-key"`
Region string `yaml:"region"`
Bucket string `yaml:"bucket"`
Endpoint string `yaml:"endpoint"`
ForcePathStyle *bool `yaml:"force-path-style"`
SkipVerify bool `yaml:"skip-verify"`
// ABS settings
AccountName string `yaml:"account-name"`
AccountKey string `yaml:"account-key"`
// SFTP settings
Host string `yaml:"host"`
User string `yaml:"user"`
Password string `yaml:"password"`
KeyPath string `yaml:"key-path"`
// Encryption identities and recipients
Age struct {
Identities []string `yaml:"identities"`
Recipients []string `yaml:"recipients"`
} `yaml:"age"`
}
// NewReplicaFromConfig instantiates a replica for a DB based on a config.
func NewReplicaFromConfig(c *ReplicaConfig, db *litestream.DB) (_ *litestream.Replica, err error) {
// Ensure user did not specify URL in path.
if isURL(c.Path) {
return nil, fmt.Errorf("replica path cannot be a url, please use the 'url' field instead: %s", c.Path)
}
// Build replica.
r := litestream.NewReplica(db, c.Name)
if v := c.Retention; v != nil {
r.Retention = *v
}
if v := c.RetentionCheckInterval; v != nil {
r.RetentionCheckInterval = *v
}
if v := c.SyncInterval; v != nil {
r.SyncInterval = *v
}
if v := c.SnapshotInterval; v != nil {
r.SnapshotInterval = *v
}
if v := c.ValidationInterval; v != nil {
r.ValidationInterval = *v
}
for _, str := range c.Age.Identities {
identities, err := age.ParseIdentities(strings.NewReader(str))
if err != nil {
return nil, err
}
r.AgeIdentities = append(r.AgeIdentities, identities...)
}
for _, str := range c.Age.Recipients {
recipients, err := age.ParseRecipients(strings.NewReader(str))
if err != nil {
return nil, err
}
r.AgeRecipients = append(r.AgeRecipients, recipients...)
}
// Build and set client on replica.
switch c.ReplicaType() {
case "file":
if r.Client, err = newFileReplicaClientFromConfig(c, r); err != nil {
return nil, err
}
case "s3":
if r.Client, err = newS3ReplicaClientFromConfig(c, r); err != nil {
return nil, err
}
case "gcs":
if r.Client, err = newGCSReplicaClientFromConfig(c, r); err != nil {
return nil, err
}
case "abs":
if r.Client, err = newABSReplicaClientFromConfig(c, r); err != nil {
return nil, err
}
case "sftp":
if r.Client, err = newSFTPReplicaClientFromConfig(c, r); err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("unknown replica type in config: %q", config.Type)
return nil, fmt.Errorf("unknown replica type in config: %q", c.Type)
}
return r, nil
}
// newFileReplicaClientFromConfig returns a new instance of file.ReplicaClient built from config.
func newFileReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *file.ReplicaClient, err error) {
// Ensure URL & path are not both specified.
if c.URL != "" && c.Path != "" {
return nil, fmt.Errorf("cannot specify url & path for file replica")
}
// Parse path from URL, if specified.
path := c.Path
if c.URL != "" {
if _, _, path, err = ParseReplicaURL(c.URL); err != nil {
return nil, err
}
}
// Ensure path is set explicitly or derived from URL field.
if path == "" {
return nil, fmt.Errorf("file replica path required")
}
// Expand home prefix and return absolute path.
if path, err = expand(path); err != nil {
return nil, err
}
// Instantiate replica and apply time fields, if set.
client := file.NewReplicaClient(path)
client.Replica = r
return client, nil
}
// newS3ReplicaClientFromConfig returns a new instance of s3.ReplicaClient built from config.
func newS3ReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *s3.ReplicaClient, err error) {
// Ensure URL & constituent parts are not both specified.
if c.URL != "" && c.Path != "" {
return nil, fmt.Errorf("cannot specify url & path for s3 replica")
} else if c.URL != "" && c.Bucket != "" {
return nil, fmt.Errorf("cannot specify url & bucket for s3 replica")
}
bucket, path := c.Bucket, c.Path
region, endpoint, skipVerify := c.Region, c.Endpoint, c.SkipVerify
// Use path style if an endpoint is explicitly set. This works because the
// only service to not use path style is AWS which does not use an endpoint.
forcePathStyle := (endpoint != "")
if v := c.ForcePathStyle; v != nil {
forcePathStyle = *v
}
// Apply settings from URL, if specified.
if c.URL != "" {
_, host, upath, err := ParseReplicaURL(c.URL)
if err != nil {
return nil, err
}
ubucket, uregion, uendpoint, uforcePathStyle := s3.ParseHost(host)
// Only apply URL parts to field that have not been overridden.
if path == "" {
path = upath
}
if bucket == "" {
bucket = ubucket
}
if region == "" {
region = uregion
}
if endpoint == "" {
endpoint = uendpoint
}
if !forcePathStyle {
forcePathStyle = uforcePathStyle
}
}
// Ensure required settings are set.
if bucket == "" {
return nil, fmt.Errorf("bucket required for s3 replica")
}
// Build replica.
client := s3.NewReplicaClient()
client.AccessKeyID = c.AccessKeyID
client.SecretAccessKey = c.SecretAccessKey
client.Bucket = bucket
client.Path = path
client.Region = region
client.Endpoint = endpoint
client.ForcePathStyle = forcePathStyle
client.SkipVerify = skipVerify
return client, nil
}
// newGCSReplicaClientFromConfig returns a new instance of gcs.ReplicaClient built from config.
func newGCSReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *gcs.ReplicaClient, err error) {
// Ensure URL & constituent parts are not both specified.
if c.URL != "" && c.Path != "" {
return nil, fmt.Errorf("cannot specify url & path for gcs replica")
} else if c.URL != "" && c.Bucket != "" {
return nil, fmt.Errorf("cannot specify url & bucket for gcs replica")
}
bucket, path := c.Bucket, c.Path
// Apply settings from URL, if specified.
if c.URL != "" {
_, uhost, upath, err := ParseReplicaURL(c.URL)
if err != nil {
return nil, err
}
// Only apply URL parts to field that have not been overridden.
if path == "" {
path = upath
}
if bucket == "" {
bucket = uhost
}
}
// Ensure required settings are set.
if bucket == "" {
return nil, fmt.Errorf("bucket required for gcs replica")
}
// Build replica.
client := gcs.NewReplicaClient()
client.Bucket = bucket
client.Path = path
return client, nil
}
// newABSReplicaClientFromConfig returns a new instance of abs.ReplicaClient built from config.
func newABSReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *abs.ReplicaClient, err error) {
// Ensure URL & constituent parts are not both specified.
if c.URL != "" && c.Path != "" {
return nil, fmt.Errorf("cannot specify url & path for abs replica")
} else if c.URL != "" && c.Bucket != "" {
return nil, fmt.Errorf("cannot specify url & bucket for abs replica")
}
// Build replica.
client := abs.NewReplicaClient()
client.AccountName = c.AccountName
client.AccountKey = c.AccountKey
client.Bucket = c.Bucket
client.Path = c.Path
client.Endpoint = c.Endpoint
// Apply settings from URL, if specified.
if c.URL != "" {
u, err := url.Parse(c.URL)
if err != nil {
return nil, err
}
if client.AccountName == "" && u.User != nil {
client.AccountName = u.User.Username()
}
if client.Bucket == "" {
client.Bucket = u.Host
}
if client.Path == "" {
client.Path = strings.TrimPrefix(path.Clean(u.Path), "/")
}
}
// Ensure required settings are set.
if client.Bucket == "" {
return nil, fmt.Errorf("bucket required for abs replica")
}
return client, nil
}
// newSFTPReplicaClientFromConfig returns a new instance of sftp.ReplicaClient built from config.
func newSFTPReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *sftp.ReplicaClient, err error) {
// Ensure URL & constituent parts are not both specified.
if c.URL != "" && c.Path != "" {
return nil, fmt.Errorf("cannot specify url & path for sftp replica")
} else if c.URL != "" && c.Host != "" {
return nil, fmt.Errorf("cannot specify url & host for sftp replica")
}
host, user, password, path := c.Host, c.User, c.Password, c.Path
// Apply settings from URL, if specified.
if c.URL != "" {
u, err := url.Parse(c.URL)
if err != nil {
return nil, err
}
// Only apply URL parts to field that have not been overridden.
if host == "" {
host = u.Host
}
if user == "" && u.User != nil {
user = u.User.Username()
}
if password == "" && u.User != nil {
password, _ = u.User.Password()
}
if path == "" {
path = u.Path
}
}
// Ensure required settings are set.
if host == "" {
return nil, fmt.Errorf("host required for sftp replica")
} else if user == "" {
return nil, fmt.Errorf("user required for sftp replica")
}
// Build replica.
client := sftp.NewReplicaClient()
client.Host = host
client.User = user
client.Password = password
client.Path = path
client.KeyPath = c.KeyPath
return client, nil
}
// applyLitestreamEnv copies "LITESTREAM" prefixed environment variables to
// their AWS counterparts as the "AWS" prefix can be confusing when using a
// non-AWS S3-compatible service.
func applyLitestreamEnv() {
if v, ok := os.LookupEnv("LITESTREAM_ACCESS_KEY_ID"); ok {
if _, ok := os.LookupEnv("AWS_ACCESS_KEY_ID"); !ok {
os.Setenv("AWS_ACCESS_KEY_ID", v)
}
}
if v, ok := os.LookupEnv("LITESTREAM_SECRET_ACCESS_KEY"); ok {
if _, ok := os.LookupEnv("AWS_SECRET_ACCESS_KEY"); !ok {
os.Setenv("AWS_SECRET_ACCESS_KEY", v)
}
}
}
// newFileReplicaFromConfig returns a new instance of FileReplica build from config.
func newFileReplicaFromConfig(db *litestream.DB, config *ReplicaConfig) (*litestream.FileReplica, error) {
if config.Path == "" {
return nil, fmt.Errorf("file replica path require for db %q", db.Path())
// ParseReplicaURL parses a replica URL.
func ParseReplicaURL(s string) (scheme, host, urlpath string, err error) {
u, err := url.Parse(s)
if err != nil {
return "", "", "", err
}
switch u.Scheme {
case "file":
scheme, u.Scheme = u.Scheme, ""
return scheme, "", path.Clean(u.String()), nil
case "":
return u.Scheme, u.Host, u.Path, fmt.Errorf("replica url scheme required: %s", s)
default:
return u.Scheme, u.Host, strings.TrimPrefix(path.Clean(u.Path), "/"), nil
}
return litestream.NewFileReplica(db, config.Name, config.Path), nil
}
// isURL returns true if s can be parsed and has a scheme.
func isURL(s string) bool {
return regexp.MustCompile(`^\w+:\/\/`).MatchString(s)
}
// ReplicaType returns the type based on the type field or extracted from the URL.
func (c *ReplicaConfig) ReplicaType() string {
scheme, _, _, _ := ParseReplicaURL(c.URL)
if scheme != "" {
return scheme
} else if c.Type != "" {
return c.Type
}
return "file"
}
// DefaultConfigPath returns the default config path.
func DefaultConfigPath() string {
if v := os.Getenv("LITESTREAM_CONFIG"); v != "" {
return v
}
return defaultConfigPath
}
func registerConfigFlag(fs *flag.FlagSet) (configPath *string, noExpandEnv *bool) {
return fs.String("config", "", "config path"),
fs.Bool("no-expand-env", false, "do not expand env vars in config")
}
// expand returns an absolute path for s.
func expand(s string) (string, error) {
// Just expand to absolute path if there is no home directory prefix.
prefix := "~" + string(os.PathSeparator)
if s != "~" && !strings.HasPrefix(s, prefix) {
return filepath.Abs(s)
}
// Look up home directory.
u, err := user.Current()
if err != nil {
return "", err
} else if u.HomeDir == "" {
return "", fmt.Errorf("cannot expand path %s, no home directory available", s)
}
// Return path with tilde replaced by the home directory.
if s == "~" {
return u.HomeDir, nil
}
return filepath.Join(u.HomeDir, strings.TrimPrefix(s, prefix)), nil
}
// indexVar allows the flag package to parse index flags as 4-byte hexadecimal values.
type indexVar int
// Ensure type implements interface.
var _ flag.Value = (*indexVar)(nil)
// String returns an 8-character hexadecimal value.
func (v *indexVar) String() string {
return fmt.Sprintf("%08x", int(*v))
}
// Set parses s into an integer from a hexadecimal value.
func (v *indexVar) Set(s string) error {
i, err := strconv.ParseInt(s, 16, 32)
if err != nil {
return fmt.Errorf("invalid hexadecimal format")
}
*v = indexVar(i)
return nil
}

View File

@@ -0,0 +1,26 @@
//go:build !windows
package main
import (
"context"
"os"
"os/signal"
"syscall"
)
const defaultConfigPath = "/etc/litestream.yml"
func isWindowsService() (bool, error) {
return false, nil
}
func runWindowsService(ctx context.Context) error {
panic("cannot run windows service as unix process")
}
func signalChan() <-chan os.Signal {
ch := make(chan os.Signal, 2)
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
return ch
}

177
cmd/litestream/main_test.go Normal file
View File

@@ -0,0 +1,177 @@
package main_test
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
main "github.com/benbjohnson/litestream/cmd/litestream"
"github.com/benbjohnson/litestream/file"
"github.com/benbjohnson/litestream/gcs"
"github.com/benbjohnson/litestream/s3"
)
func TestReadConfigFile(t *testing.T) {
// Ensure global AWS settings are propagated down to replica configurations.
t.Run("PropagateGlobalSettings", func(t *testing.T) {
filename := filepath.Join(t.TempDir(), "litestream.yml")
if err := ioutil.WriteFile(filename, []byte(`
access-key-id: XXX
secret-access-key: YYY
dbs:
- path: /path/to/db
replicas:
- url: s3://foo/bar
`[1:]), 0666); err != nil {
t.Fatal(err)
}
config, err := main.ReadConfigFile(filename, true)
if err != nil {
t.Fatal(err)
} else if got, want := config.AccessKeyID, `XXX`; got != want {
t.Fatalf("AccessKeyID=%v, want %v", got, want)
} else if got, want := config.SecretAccessKey, `YYY`; got != want {
t.Fatalf("SecretAccessKey=%v, want %v", got, want)
} else if got, want := config.DBs[0].Replicas[0].AccessKeyID, `XXX`; got != want {
t.Fatalf("Replica.AccessKeyID=%v, want %v", got, want)
} else if got, want := config.DBs[0].Replicas[0].SecretAccessKey, `YYY`; got != want {
t.Fatalf("Replica.SecretAccessKey=%v, want %v", got, want)
}
})
// Ensure environment variables are expanded.
t.Run("ExpandEnv", func(t *testing.T) {
os.Setenv("LITESTREAM_TEST_0129380", "/path/to/db")
os.Setenv("LITESTREAM_TEST_1872363", "s3://foo/bar")
filename := filepath.Join(t.TempDir(), "litestream.yml")
if err := ioutil.WriteFile(filename, []byte(`
dbs:
- path: $LITESTREAM_TEST_0129380
replicas:
- url: ${LITESTREAM_TEST_1872363}
- url: ${LITESTREAM_TEST_NO_SUCH_ENV}
`[1:]), 0666); err != nil {
t.Fatal(err)
}
config, err := main.ReadConfigFile(filename, true)
if err != nil {
t.Fatal(err)
} else if got, want := config.DBs[0].Path, `/path/to/db`; got != want {
t.Fatalf("DB.Path=%v, want %v", got, want)
} else if got, want := config.DBs[0].Replicas[0].URL, `s3://foo/bar`; got != want {
t.Fatalf("Replica[0].URL=%v, want %v", got, want)
} else if got, want := config.DBs[0].Replicas[1].URL, ``; got != want {
t.Fatalf("Replica[1].URL=%v, want %v", got, want)
}
})
// Ensure environment variables are not expanded.
t.Run("NoExpandEnv", func(t *testing.T) {
os.Setenv("LITESTREAM_TEST_9847533", "s3://foo/bar")
filename := filepath.Join(t.TempDir(), "litestream.yml")
if err := ioutil.WriteFile(filename, []byte(`
dbs:
- path: /path/to/db
replicas:
- url: ${LITESTREAM_TEST_9847533}
`[1:]), 0666); err != nil {
t.Fatal(err)
}
config, err := main.ReadConfigFile(filename, false)
if err != nil {
t.Fatal(err)
} else if got, want := config.DBs[0].Replicas[0].URL, `${LITESTREAM_TEST_9847533}`; got != want {
t.Fatalf("Replica.URL=%v, want %v", got, want)
}
})
}
func TestNewFileReplicaFromConfig(t *testing.T) {
r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{Path: "/foo"}, nil)
if err != nil {
t.Fatal(err)
} else if client, ok := r.Client.(*file.ReplicaClient); !ok {
t.Fatal("unexpected replica type")
} else if got, want := client.Path(), "/foo"; got != want {
t.Fatalf("Path=%s, want %s", got, want)
}
}
func TestNewS3ReplicaFromConfig(t *testing.T) {
t.Run("URL", func(t *testing.T) {
r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "s3://foo/bar"}, nil)
if err != nil {
t.Fatal(err)
} else if client, ok := r.Client.(*s3.ReplicaClient); !ok {
t.Fatal("unexpected replica type")
} else if got, want := client.Bucket, "foo"; got != want {
t.Fatalf("Bucket=%s, want %s", got, want)
} else if got, want := client.Path, "bar"; got != want {
t.Fatalf("Path=%s, want %s", got, want)
} else if got, want := client.Region, ""; got != want {
t.Fatalf("Region=%s, want %s", got, want)
} else if got, want := client.Endpoint, ""; got != want {
t.Fatalf("Endpoint=%s, want %s", got, want)
} else if got, want := client.ForcePathStyle, false; got != want {
t.Fatalf("ForcePathStyle=%v, want %v", got, want)
}
})
t.Run("MinIO", func(t *testing.T) {
r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "s3://foo.localhost:9000/bar"}, nil)
if err != nil {
t.Fatal(err)
} else if client, ok := r.Client.(*s3.ReplicaClient); !ok {
t.Fatal("unexpected replica type")
} else if got, want := client.Bucket, "foo"; got != want {
t.Fatalf("Bucket=%s, want %s", got, want)
} else if got, want := client.Path, "bar"; got != want {
t.Fatalf("Path=%s, want %s", got, want)
} else if got, want := client.Region, "us-east-1"; got != want {
t.Fatalf("Region=%s, want %s", got, want)
} else if got, want := client.Endpoint, "http://localhost:9000"; got != want {
t.Fatalf("Endpoint=%s, want %s", got, want)
} else if got, want := client.ForcePathStyle, true; got != want {
t.Fatalf("ForcePathStyle=%v, want %v", got, want)
}
})
t.Run("Backblaze", func(t *testing.T) {
r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "s3://foo.s3.us-west-000.backblazeb2.com/bar"}, nil)
if err != nil {
t.Fatal(err)
} else if client, ok := r.Client.(*s3.ReplicaClient); !ok {
t.Fatal("unexpected replica type")
} else if got, want := client.Bucket, "foo"; got != want {
t.Fatalf("Bucket=%s, want %s", got, want)
} else if got, want := client.Path, "bar"; got != want {
t.Fatalf("Path=%s, want %s", got, want)
} else if got, want := client.Region, "us-west-000"; got != want {
t.Fatalf("Region=%s, want %s", got, want)
} else if got, want := client.Endpoint, "https://s3.us-west-000.backblazeb2.com"; got != want {
t.Fatalf("Endpoint=%s, want %s", got, want)
} else if got, want := client.ForcePathStyle, true; got != want {
t.Fatalf("ForcePathStyle=%v, want %v", got, want)
}
})
}
func TestNewGCSReplicaFromConfig(t *testing.T) {
r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{URL: "gcs://foo/bar"}, nil)
if err != nil {
t.Fatal(err)
} else if client, ok := r.Client.(*gcs.ReplicaClient); !ok {
t.Fatal("unexpected replica type")
} else if got, want := client.Bucket, "foo"; got != want {
t.Fatalf("Bucket=%s, want %s", got, want)
} else if got, want := client.Path, "bar"; got != want {
t.Fatalf("Path=%s, want %s", got, want)
}
}

View File

@@ -0,0 +1,111 @@
//go:build windows
package main
import (
"context"
"io"
"os"
"os/signal"
"golang.org/x/sys/windows"
"golang.org/x/sys/windows/svc"
"golang.org/x/sys/windows/svc/eventlog"
)
const defaultConfigPath = `C:\Litestream\litestream.yml`
// serviceName is the Windows Service name.
const serviceName = "Litestream"
// isWindowsService returns true if currently executing within a Windows service.
func isWindowsService() (bool, error) {
return svc.IsWindowsService()
}
func runWindowsService(ctx context.Context) error {
// Attempt to install new log service. This will fail if already installed.
// We don't log the error because we don't have anywhere to log until we open the log.
_ = eventlog.InstallAsEventCreate(serviceName, eventlog.Error|eventlog.Warning|eventlog.Info)
elog, err := eventlog.Open(serviceName)
if err != nil {
return err
}
defer elog.Close()
// Set eventlog as log writer while running.
log.SetOutput((*eventlogWriter)(elog))
defer log.SetOutput(os.Stderr)
log.Print("Litestream service starting")
if err := svc.Run(serviceName, &windowsService{ctx: ctx}); err != nil {
return errStop
}
log.Print("Litestream service stopped")
return nil
}
// windowsService is an interface adapter for svc.Handler.
type windowsService struct {
ctx context.Context
}
func (s *windowsService) Execute(args []string, r <-chan svc.ChangeRequest, statusCh chan<- svc.Status) (svcSpecificEC bool, exitCode uint32) {
var err error
// Notify Windows that the service is starting up.
statusCh <- svc.Status{State: svc.StartPending}
// Instantiate replication command and load configuration.
c := NewReplicateCommand()
if c.Config, err = ReadConfigFile(DefaultConfigPath(), true); err != nil {
slog.Error("cannot load configuration", "error", err)
return true, 1
}
// Execute replication command.
if err := c.Run(s.ctx); err != nil {
slog.Error("cannot replicate", "error", err)
statusCh <- svc.Status{State: svc.StopPending}
return true, 2
}
// Notify Windows that the service is now running.
statusCh <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop}
for {
select {
case req := <-r:
switch req.Cmd {
case svc.Stop:
c.Close()
statusCh <- svc.Status{State: svc.StopPending}
return false, windows.NO_ERROR
case svc.Interrogate:
statusCh <- req.CurrentStatus
default:
slog.Error("Litestream service received unexpected change request", "cmd", req.Cmd)
}
}
}
}
// Ensure implementation implements io.Writer interface.
var _ io.Writer = (*eventlogWriter)(nil)
// eventlogWriter is an adapter for using eventlog.Log as an io.Writer.
type eventlogWriter eventlog.Log
func (w *eventlogWriter) Write(p []byte) (n int, err error) {
elog := (*eventlog.Log)(w)
return 0, elog.Info(1, string(p))
}
func signalChan() <-chan os.Signal {
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt)
return ch
}

View File

@@ -2,60 +2,98 @@ package main
import (
"context"
"errors"
"flag"
"fmt"
"log/slog"
"net"
"net/http"
_ "net/http/pprof"
"os"
"os/signal"
"os/exec"
"github.com/benbjohnson/litestream"
"github.com/benbjohnson/litestream/abs"
"github.com/benbjohnson/litestream/file"
"github.com/benbjohnson/litestream/gcs"
"github.com/benbjohnson/litestream/s3"
"github.com/benbjohnson/litestream/sftp"
"github.com/mattn/go-shellwords"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
// ReplicateCommand represents a command that continuously replicates SQLite databases.
type ReplicateCommand struct {
ConfigPath string
Config Config
cmd *exec.Cmd // subcommand
execCh chan error // subcommand error channel
Config Config
// List of managed databases specified in the config.
DBs []*litestream.DB
}
func NewReplicateCommand() *ReplicateCommand {
return &ReplicateCommand{}
return &ReplicateCommand{
execCh: make(chan error),
}
}
// Run loads all databases specified in the configuration.
func (c *ReplicateCommand) Run(ctx context.Context, args []string) (err error) {
// ParseFlags parses the CLI flags and loads the configuration file.
func (c *ReplicateCommand) ParseFlags(ctx context.Context, args []string) (err error) {
fs := flag.NewFlagSet("litestream-replicate", flag.ContinueOnError)
registerConfigFlag(fs, &c.ConfigPath)
execFlag := fs.String("exec", "", "execute subcommand")
configPath, noExpandEnv := registerConfigFlag(fs)
fs.Usage = c.Usage
if err := fs.Parse(args); err != nil {
return err
}
// Load configuration.
if c.ConfigPath == "" {
return errors.New("-config required")
}
config, err := ReadConfigFile(c.ConfigPath)
if err != nil {
return err
// Load configuration or use CLI args to build db/replica.
if fs.NArg() == 1 {
return fmt.Errorf("must specify at least one replica URL for %s", fs.Arg(0))
} else if fs.NArg() > 1 {
if *configPath != "" {
return fmt.Errorf("cannot specify a replica URL and the -config flag")
}
dbConfig := &DBConfig{Path: fs.Arg(0)}
for _, u := range fs.Args()[1:] {
syncInterval := litestream.DefaultSyncInterval
dbConfig.Replicas = append(dbConfig.Replicas, &ReplicaConfig{
URL: u,
SyncInterval: &syncInterval,
})
}
c.Config.DBs = []*DBConfig{dbConfig}
} else {
if *configPath == "" {
*configPath = DefaultConfigPath()
}
if c.Config, err = ReadConfigFile(*configPath, !*noExpandEnv); err != nil {
return err
}
}
// Setup signal handler.
ctx, cancel := context.WithCancel(context.Background())
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt)
go func() { <-ch; cancel() }()
// Override config exec command, if specified.
if *execFlag != "" {
c.Config.Exec = *execFlag
}
return nil
}
// Run loads all databases specified in the configuration.
func (c *ReplicateCommand) Run() (err error) {
// Display version information.
fmt.Printf("litestream %s\n", Version)
slog.Info("litestream", "version", Version)
if len(config.DBs) == 0 {
return errors.New("configuration must specify at least one database")
// Setup databases.
if len(c.Config.DBs) == 0 {
slog.Error("no databases specified in configuration")
}
for _, dbConfig := range config.DBs {
db, err := newDBFromConfig(dbConfig)
for _, dbConfig := range c.Config.DBs {
db, err := NewDBFromConfig(dbConfig)
if err != nil {
return err
}
@@ -68,16 +106,60 @@ func (c *ReplicateCommand) Run(ctx context.Context, args []string) (err error) {
}
// Notify user that initialization is done.
fmt.Printf("Initialized with %d databases.\n", len(c.DBs))
for _, db := range c.DBs {
slog.Info("initialized db", "path", db.Path())
for _, r := range db.Replicas {
slog := slog.With("name", r.Name(), "type", r.Client.Type(), "sync-interval", r.SyncInterval)
switch client := r.Client.(type) {
case *file.ReplicaClient:
slog.Info("replicating to", "path", client.Path())
case *s3.ReplicaClient:
slog.Info("replicating to", "bucket", client.Bucket, "path", client.Path, "region", client.Region, "endpoint", client.Endpoint)
case *gcs.ReplicaClient:
slog.Info("replicating to", "bucket", client.Bucket, "path", client.Path)
case *abs.ReplicaClient:
slog.Info("replicating to", "bucket", client.Bucket, "path", client.Path, "endpoint", client.Endpoint)
case *sftp.ReplicaClient:
slog.Info("replicating to", "host", client.Host, "user", client.User, "path", client.Path)
default:
slog.Info("replicating to")
}
}
}
// Wait for signal to stop program.
<-ctx.Done()
signal.Reset()
// Serve metrics over HTTP if enabled.
if c.Config.Addr != "" {
hostport := c.Config.Addr
if host, port, _ := net.SplitHostPort(c.Config.Addr); port == "" {
return fmt.Errorf("must specify port for bind address: %q", c.Config.Addr)
} else if host == "" {
hostport = net.JoinHostPort("localhost", port)
}
// Gracefully close
if err := c.Close(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
slog.Info("serving metrics on", "url", fmt.Sprintf("http://%s/metrics", hostport))
go func() {
http.Handle("/metrics", promhttp.Handler())
if err := http.ListenAndServe(c.Config.Addr, nil); err != nil {
slog.Error("cannot start metrics server", "error", err)
}
}()
}
// Parse exec commands args & start subprocess.
if c.Config.Exec != "" {
execArgs, err := shellwords.Parse(c.Config.Exec)
if err != nil {
return fmt.Errorf("cannot parse exec command: %w", err)
}
c.cmd = exec.Command(execArgs[0], execArgs[1:]...)
c.cmd.Env = os.Environ()
c.cmd.Stdout = os.Stdout
c.cmd.Stderr = os.Stderr
if err := c.cmd.Start(); err != nil {
return fmt.Errorf("cannot start exec command: %w", err)
}
go func() { c.execCh <- c.cmd.Wait() }()
}
return nil
@@ -86,8 +168,8 @@ func (c *ReplicateCommand) Run(ctx context.Context, args []string) (err error) {
// Close closes all open databases.
func (c *ReplicateCommand) Close() (err error) {
for _, db := range c.DBs {
if e := db.SoftClose(); e != nil {
fmt.Printf("error closing db: path=%s err=%s\n", db.Path(), e)
if e := db.Close(); e != nil {
db.Logger.Error("error closing db", "error", e)
if err == nil {
err = e
}
@@ -96,19 +178,32 @@ func (c *ReplicateCommand) Close() (err error) {
return err
}
// Usage prints the help screen to STDOUT.
func (c *ReplicateCommand) Usage() {
fmt.Printf(`
The replicate command starts a server to monitor & replicate databases
specified in your configuration file.
The replicate command starts a server to monitor & replicate databases.
You can specify your database & replicas in a configuration file or you can
replicate a single database file by specifying its path and its replicas in the
command line arguments.
Usage:
litestream replicate [arguments]
litestream replicate [arguments] DB_PATH REPLICA_URL [REPLICA_URL...]
Arguments:
-config PATH
Specifies the configuration file. Defaults to %s
Specifies the configuration file.
Defaults to %s
`[1:], DefaultConfigPath)
-exec CMD
Executes a subcommand. Litestream will exit when the child
process exits. Useful for simple process management.
-no-expand-env
Disables environment variable expansion in configuration file.
`[1:], DefaultConfigPath())
}

View File

@@ -5,51 +5,40 @@ import (
"errors"
"flag"
"fmt"
"log"
"log/slog"
"os"
"path/filepath"
"strconv"
"time"
"github.com/benbjohnson/litestream"
)
type RestoreCommand struct {
DBPath string
}
func NewRestoreCommand() *RestoreCommand {
return &RestoreCommand{}
}
// RestoreCommand represents a command to restore a database from a backup.
type RestoreCommand struct{}
// Run executes the command.
func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) {
var configPath string
var opt litestream.RestoreOptions
opt := litestream.NewRestoreOptions()
fs := flag.NewFlagSet("litestream-restore", flag.ContinueOnError)
registerConfigFlag(fs, &configPath)
configPath, noExpandEnv := registerConfigFlag(fs)
fs.StringVar(&opt.OutputPath, "o", "", "output path")
fs.StringVar(&opt.ReplicaName, "replica", "", "replica name")
fs.StringVar(&opt.Generation, "generation", "", "generation name")
fs.BoolVar(&opt.DryRun, "dry-run", false, "dry run")
fs.Var((*indexVar)(&opt.Index), "index", "wal index")
fs.IntVar(&opt.Parallelism, "parallelism", opt.Parallelism, "parallelism")
ifDBNotExists := fs.Bool("if-db-not-exists", false, "")
ifReplicaExists := fs.Bool("if-replica-exists", false, "")
timestampStr := fs.String("timestamp", "", "timestamp")
verbose := fs.Bool("v", false, "verbose output")
fs.Usage = c.Usage
if err := fs.Parse(args); err != nil {
return err
} else if fs.NArg() == 0 || fs.Arg(0) == "" {
return fmt.Errorf("database path required")
return fmt.Errorf("database path or replica URL required")
} else if fs.NArg() > 1 {
return fmt.Errorf("too many arguments")
}
// Load configuration.
if configPath == "" {
return errors.New("-config required")
}
config, err := ReadConfigFile(configPath)
if err != nil {
return err
}
// Parse timestamp, if specified.
if *timestampStr != "" {
if opt.Timestamp, err = time.Parse(time.RFC3339, *timestampStr); err != nil {
@@ -57,41 +46,117 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) {
}
}
// Verbose output is automatically enabled if dry run is specified.
if opt.DryRun {
*verbose = true
// Determine replica & generation to restore from.
var r *litestream.Replica
if isURL(fs.Arg(0)) {
if *configPath != "" {
return fmt.Errorf("cannot specify a replica URL and the -config flag")
}
if r, err = c.loadFromURL(ctx, fs.Arg(0), *ifDBNotExists, &opt); err == errSkipDBExists {
slog.Info("database already exists, skipping")
return nil
} else if err != nil {
return err
}
} else {
if *configPath == "" {
*configPath = DefaultConfigPath()
}
if r, err = c.loadFromConfig(ctx, fs.Arg(0), *configPath, !*noExpandEnv, *ifDBNotExists, &opt); err == errSkipDBExists {
slog.Info("database already exists, skipping")
return nil
} else if err != nil {
return err
}
}
// Instantiate logger if verbose output is enabled.
if *verbose {
opt.Logger = log.New(os.Stderr, "", log.LstdFlags)
// Return an error if no matching targets found.
// If optional flag set, return success. Useful for automated recovery.
if opt.Generation == "" {
if *ifReplicaExists {
slog.Info("no matching backups found")
return nil
}
return fmt.Errorf("no matching backups found")
}
// Determine absolute path for database, if specified.
if c.DBPath, err = filepath.Abs(fs.Arg(0)); err != nil {
return err
}
// Instantiate DB.
dbConfig := config.DBConfig(c.DBPath)
if dbConfig == nil {
return fmt.Errorf("database not found in config: %s", c.DBPath)
}
db, err := newDBFromConfig(dbConfig)
if err != nil {
return err
}
return db.Restore(ctx, opt)
return r.Restore(ctx, opt)
}
// loadFromURL creates a replica & updates the restore options from a replica URL.
func (c *RestoreCommand) loadFromURL(ctx context.Context, replicaURL string, ifDBNotExists bool, opt *litestream.RestoreOptions) (*litestream.Replica, error) {
if opt.OutputPath == "" {
return nil, fmt.Errorf("output path required")
}
// Exit successfully if the output file already exists.
if _, err := os.Stat(opt.OutputPath); !os.IsNotExist(err) && ifDBNotExists {
return nil, errSkipDBExists
}
syncInterval := litestream.DefaultSyncInterval
r, err := NewReplicaFromConfig(&ReplicaConfig{
URL: replicaURL,
SyncInterval: &syncInterval,
}, nil)
if err != nil {
return nil, err
}
opt.Generation, _, err = r.CalcRestoreTarget(ctx, *opt)
return r, err
}
// loadFromConfig returns a replica & updates the restore options from a DB reference.
func (c *RestoreCommand) loadFromConfig(ctx context.Context, dbPath, configPath string, expandEnv, ifDBNotExists bool, opt *litestream.RestoreOptions) (*litestream.Replica, error) {
// Load configuration.
config, err := ReadConfigFile(configPath, expandEnv)
if err != nil {
return nil, err
}
// Lookup database from configuration file by path.
if dbPath, err = expand(dbPath); err != nil {
return nil, err
}
dbConfig := config.DBConfig(dbPath)
if dbConfig == nil {
return nil, fmt.Errorf("database not found in config: %s", dbPath)
}
db, err := NewDBFromConfig(dbConfig)
if err != nil {
return nil, err
}
// Restore into original database path if not specified.
if opt.OutputPath == "" {
opt.OutputPath = dbPath
}
// Exit successfully if the output file already exists.
if _, err := os.Stat(opt.OutputPath); !os.IsNotExist(err) && ifDBNotExists {
return nil, errSkipDBExists
}
// Determine the appropriate replica & generation to restore from,
r, generation, err := db.CalcRestoreTarget(ctx, *opt)
if err != nil {
return nil, err
}
opt.Generation = generation
return r, nil
}
// Usage prints the help screen to STDOUT.
func (c *RestoreCommand) Usage() {
fmt.Printf(`
The restore command recovers a database from a previous snapshot and WAL.
Usage:
litestream restore [arguments] DB
litestream restore [arguments] DB_PATH
litestream restore [arguments] REPLICA_URL
Arguments:
@@ -99,6 +164,9 @@ Arguments:
Specifies the configuration file.
Defaults to %s
-no-expand-env
Disables environment variable expansion in configuration file.
-replica NAME
Restore from a specific replica.
Defaults to replica with latest data.
@@ -107,6 +175,10 @@ Arguments:
Restore from a specific generation.
Defaults to generation with latest data.
-index NUM
Restore up to a specific hex-encoded WAL index (inclusive).
Defaults to use the highest available index.
-timestamp TIMESTAMP
Restore to a specific point-in-time.
Defaults to use the latest available backup.
@@ -115,9 +187,15 @@ Arguments:
Output path of the restored database.
Defaults to original DB path.
-dry-run
Prints all log output as if it were running but does
not perform actual restore.
-if-db-not-exists
Returns exit code of 0 if the database already exists.
-if-replica-exists
Returns exit code of 0 if no backups found.
-parallelism NUM
Determines the number of WAL files downloaded in parallel.
Defaults to `+strconv.Itoa(litestream.DefaultRestoreParallelism)+`.
-v
Verbose output.
@@ -141,6 +219,8 @@ Examples:
$ litestream restore -replica s3 -generation xxxxxxxx /path/to/db
`[1:],
DefaultConfigPath,
DefaultConfigPath(),
)
}
var errSkipDBExists = errors.New("database already exists, skipping")

139
cmd/litestream/snapshots.go Normal file
View File

@@ -0,0 +1,139 @@
package main
import (
"context"
"flag"
"fmt"
"log/slog"
"os"
"text/tabwriter"
"time"
"github.com/benbjohnson/litestream"
)
// SnapshotsCommand represents a command to list snapshots for a command.
type SnapshotsCommand struct{}
// Run executes the command.
func (c *SnapshotsCommand) Run(ctx context.Context, args []string) (err error) {
fs := flag.NewFlagSet("litestream-snapshots", flag.ContinueOnError)
configPath, noExpandEnv := registerConfigFlag(fs)
replicaName := fs.String("replica", "", "replica name")
fs.Usage = c.Usage
if err := fs.Parse(args); err != nil {
return err
} else if fs.NArg() == 0 || fs.Arg(0) == "" {
return fmt.Errorf("database path required")
} else if fs.NArg() > 1 {
return fmt.Errorf("too many arguments")
}
var db *litestream.DB
var r *litestream.Replica
if isURL(fs.Arg(0)) {
if *configPath != "" {
return fmt.Errorf("cannot specify a replica URL and the -config flag")
}
if r, err = NewReplicaFromConfig(&ReplicaConfig{URL: fs.Arg(0)}, nil); err != nil {
return err
}
} else {
if *configPath == "" {
*configPath = DefaultConfigPath()
}
// Load configuration.
config, err := ReadConfigFile(*configPath, !*noExpandEnv)
if err != nil {
return err
}
// Lookup database from configuration file by path.
if path, err := expand(fs.Arg(0)); err != nil {
return err
} else if dbc := config.DBConfig(path); dbc == nil {
return fmt.Errorf("database not found in config: %s", path)
} else if db, err = NewDBFromConfig(dbc); err != nil {
return err
}
// Filter by replica, if specified.
if *replicaName != "" {
if r = db.Replica(*replicaName); r == nil {
return fmt.Errorf("replica %q not found for database %q", *replicaName, db.Path())
}
}
}
// Find snapshots by db or replica.
var replicas []*litestream.Replica
if r != nil {
replicas = []*litestream.Replica{r}
} else {
replicas = db.Replicas
}
// List all snapshots.
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
defer w.Flush()
fmt.Fprintln(w, "replica\tgeneration\tindex\tsize\tcreated")
for _, r := range replicas {
infos, err := r.Snapshots(ctx)
if err != nil {
slog.Error("cannot determine snapshots", "error", err)
continue
}
for _, info := range infos {
fmt.Fprintf(w, "%s\t%s\t%d\t%d\t%s\n",
r.Name(),
info.Generation,
info.Index,
info.Size,
info.CreatedAt.Format(time.RFC3339),
)
}
}
return nil
}
// Usage prints the help screen to STDOUT.
func (c *SnapshotsCommand) Usage() {
fmt.Printf(`
The snapshots command lists all snapshots available for a database or replica.
Usage:
litestream snapshots [arguments] DB_PATH
litestream snapshots [arguments] REPLICA_URL
Arguments:
-config PATH
Specifies the configuration file.
Defaults to %s
-no-expand-env
Disables environment variable expansion in configuration file.
-replica NAME
Optional, filter by a specific replica.
Examples:
# List all snapshots for a database.
$ litestream snapshots /path/to/db
# List all snapshots on S3.
$ litestream snapshots -replica s3 /path/to/db
# List all snapshots by replica URL.
$ litestream snapshots s3://mybkt/db
`[1:],
DefaultConfigPath(),
)
}

View File

@@ -6,8 +6,10 @@ import (
"fmt"
)
// VersionCommand represents a command to print the current version.
type VersionCommand struct{}
// Run executes the command.
func (c *VersionCommand) Run(ctx context.Context, args []string) (err error) {
fs := flag.NewFlagSet("litestream-version", flag.ContinueOnError)
fs.Usage = c.Usage
@@ -20,6 +22,7 @@ func (c *VersionCommand) Run(ctx context.Context, args []string) (err error) {
return nil
}
// Usage prints the help screen to STDOUT.
func (c *VersionCommand) Usage() {
fmt.Println(`
Prints the version.

164
cmd/litestream/wal.go Normal file
View File

@@ -0,0 +1,164 @@
package main
import (
"context"
"flag"
"fmt"
"os"
"text/tabwriter"
"time"
"github.com/benbjohnson/litestream"
)
// WALCommand represents a command to list WAL files for a database.
type WALCommand struct{}
// Run executes the command.
func (c *WALCommand) Run(ctx context.Context, args []string) (err error) {
fs := flag.NewFlagSet("litestream-wal", flag.ContinueOnError)
configPath, noExpandEnv := registerConfigFlag(fs)
replicaName := fs.String("replica", "", "replica name")
generation := fs.String("generation", "", "generation name")
fs.Usage = c.Usage
if err := fs.Parse(args); err != nil {
return err
} else if fs.NArg() == 0 || fs.Arg(0) == "" {
return fmt.Errorf("database path required")
} else if fs.NArg() > 1 {
return fmt.Errorf("too many arguments")
}
var db *litestream.DB
var r *litestream.Replica
if isURL(fs.Arg(0)) {
if *configPath != "" {
return fmt.Errorf("cannot specify a replica URL and the -config flag")
}
if r, err = NewReplicaFromConfig(&ReplicaConfig{URL: fs.Arg(0)}, nil); err != nil {
return err
}
} else {
if *configPath == "" {
*configPath = DefaultConfigPath()
}
// Load configuration.
config, err := ReadConfigFile(*configPath, !*noExpandEnv)
if err != nil {
return err
}
// Lookup database from configuration file by path.
if path, err := expand(fs.Arg(0)); err != nil {
return err
} else if dbc := config.DBConfig(path); dbc == nil {
return fmt.Errorf("database not found in config: %s", path)
} else if db, err = NewDBFromConfig(dbc); err != nil {
return err
}
// Filter by replica, if specified.
if *replicaName != "" {
if r = db.Replica(*replicaName); r == nil {
return fmt.Errorf("replica %q not found for database %q", *replicaName, db.Path())
}
}
}
// Find WAL files by db or replica.
var replicas []*litestream.Replica
if r != nil {
replicas = []*litestream.Replica{r}
} else {
replicas = db.Replicas
}
// List all WAL files.
w := tabwriter.NewWriter(os.Stdout, 0, 8, 2, ' ', 0)
defer w.Flush()
fmt.Fprintln(w, "replica\tgeneration\tindex\toffset\tsize\tcreated")
for _, r := range replicas {
var generations []string
if *generation != "" {
generations = []string{*generation}
} else {
if generations, err = r.Client.Generations(ctx); err != nil {
r.Logger().Error("cannot determine generations", "error", err)
continue
}
}
for _, generation := range generations {
if err := func() error {
itr, err := r.Client.WALSegments(ctx, generation)
if err != nil {
return err
}
defer itr.Close()
for itr.Next() {
info := itr.WALSegment()
fmt.Fprintf(w, "%s\t%s\t%d\t%d\t%d\t%s\n",
r.Name(),
info.Generation,
info.Index,
info.Offset,
info.Size,
info.CreatedAt.Format(time.RFC3339),
)
}
return itr.Close()
}(); err != nil {
r.Logger().Error("cannot fetch wal segments", "error", err)
continue
}
}
}
return nil
}
// Usage prints the help screen to STDOUT.
func (c *WALCommand) Usage() {
fmt.Printf(`
The wal command lists all wal segments available for a database.
Usage:
litestream wal [arguments] DB_PATH
litestream wal [arguments] REPLICA_URL
Arguments:
-config PATH
Specifies the configuration file.
Defaults to %s
-no-expand-env
Disables environment variable expansion in configuration file.
-replica NAME
Optional, filter by a specific replica.
-generation NAME
Optional, filter by a specific generation.
Examples:
# List all WAL segments for a database.
$ litestream wal /path/to/db
# List all WAL segments on S3 for a specific generation.
$ litestream wal -replica s3 -generation xxxxxxxx /path/to/db
# List all WAL segments for replica URL.
$ litestream wal s3://mybkt/db
`[1:],
DefaultConfigPath(),
)
}

1269
db.go

File diff suppressed because it is too large Load Diff

654
db_test.go Normal file
View File

@@ -0,0 +1,654 @@
package litestream_test
import (
"context"
"database/sql"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/benbjohnson/litestream"
)
func TestDB_Path(t *testing.T) {
db := litestream.NewDB("/tmp/db")
if got, want := db.Path(), `/tmp/db`; got != want {
t.Fatalf("Path()=%v, want %v", got, want)
}
}
func TestDB_WALPath(t *testing.T) {
db := litestream.NewDB("/tmp/db")
if got, want := db.WALPath(), `/tmp/db-wal`; got != want {
t.Fatalf("WALPath()=%v, want %v", got, want)
}
}
func TestDB_MetaPath(t *testing.T) {
t.Run("Absolute", func(t *testing.T) {
db := litestream.NewDB("/tmp/db")
if got, want := db.MetaPath(), `/tmp/.db-litestream`; got != want {
t.Fatalf("MetaPath()=%v, want %v", got, want)
}
})
t.Run("Relative", func(t *testing.T) {
db := litestream.NewDB("db")
if got, want := db.MetaPath(), `.db-litestream`; got != want {
t.Fatalf("MetaPath()=%v, want %v", got, want)
}
})
}
func TestDB_GenerationNamePath(t *testing.T) {
db := litestream.NewDB("/tmp/db")
if got, want := db.GenerationNamePath(), `/tmp/.db-litestream/generation`; got != want {
t.Fatalf("GenerationNamePath()=%v, want %v", got, want)
}
}
func TestDB_GenerationPath(t *testing.T) {
db := litestream.NewDB("/tmp/db")
if got, want := db.GenerationPath("xxxx"), `/tmp/.db-litestream/generations/xxxx`; got != want {
t.Fatalf("GenerationPath()=%v, want %v", got, want)
}
}
func TestDB_ShadowWALDir(t *testing.T) {
db := litestream.NewDB("/tmp/db")
if got, want := db.ShadowWALDir("xxxx"), `/tmp/.db-litestream/generations/xxxx/wal`; got != want {
t.Fatalf("ShadowWALDir()=%v, want %v", got, want)
}
}
func TestDB_ShadowWALPath(t *testing.T) {
db := litestream.NewDB("/tmp/db")
if got, want := db.ShadowWALPath("xxxx", 1000), `/tmp/.db-litestream/generations/xxxx/wal/000003e8.wal`; got != want {
t.Fatalf("ShadowWALPath()=%v, want %v", got, want)
}
}
// Ensure we can check the last modified time of the real database and its WAL.
func TestDB_UpdatedAt(t *testing.T) {
t.Run("ErrNotExist", func(t *testing.T) {
db := MustOpenDB(t)
defer MustCloseDB(t, db)
if _, err := db.UpdatedAt(); !os.IsNotExist(err) {
t.Fatalf("unexpected error: %#v", err)
}
})
t.Run("DB", func(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
if t0, err := db.UpdatedAt(); err != nil {
t.Fatal(err)
} else if time.Since(t0) > 10*time.Second {
t.Fatalf("unexpected updated at time: %s", t0)
}
})
t.Run("WAL", func(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
t0, err := db.UpdatedAt()
if err != nil {
t.Fatal(err)
}
sleepTime := 100 * time.Millisecond
if os.Getenv("CI") != "" {
sleepTime = 1 * time.Second
}
time.Sleep(sleepTime)
if _, err := sqldb.Exec(`CREATE TABLE t (id INT);`); err != nil {
t.Fatal(err)
}
if t1, err := db.UpdatedAt(); err != nil {
t.Fatal(err)
} else if !t1.After(t0) {
t.Fatalf("expected newer updated at time: %s > %s", t1, t0)
}
})
}
// Ensure we can compute a checksum on the real database.
func TestDB_CRC64(t *testing.T) {
t.Run("ErrNotExist", func(t *testing.T) {
db := MustOpenDB(t)
defer MustCloseDB(t, db)
if _, _, err := db.CRC64(context.Background()); !os.IsNotExist(err) {
t.Fatalf("unexpected error: %#v", err)
}
})
t.Run("DB", func(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
chksum0, _, err := db.CRC64(context.Background())
if err != nil {
t.Fatal(err)
}
// Issue change that is applied to the WAL. Checksum should not change.
if _, err := sqldb.Exec(`CREATE TABLE t (id INT);`); err != nil {
t.Fatal(err)
} else if chksum1, _, err := db.CRC64(context.Background()); err != nil {
t.Fatal(err)
} else if chksum0 == chksum1 {
t.Fatal("expected different checksum event after WAL change")
}
// Checkpoint change into database. Checksum should change.
if err := db.Checkpoint(context.Background(), litestream.CheckpointModeTruncate); err != nil {
t.Fatal(err)
}
if chksum2, _, err := db.CRC64(context.Background()); err != nil {
t.Fatal(err)
} else if chksum0 == chksum2 {
t.Fatal("expected different checksums after checkpoint")
}
})
}
// Ensure we can sync the real WAL to the shadow WAL.
func TestDB_Sync(t *testing.T) {
// Ensure sync is skipped if no database exists.
t.Run("NoDB", func(t *testing.T) {
db := MustOpenDB(t)
defer MustCloseDB(t, db)
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
})
// Ensure sync can successfully run on the initial sync.
t.Run("Initial", func(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Verify page size if now available.
if db.PageSize() == 0 {
t.Fatal("expected page size after initial sync")
}
// Obtain real WAL size.
fi, err := os.Stat(db.WALPath())
if err != nil {
t.Fatal(err)
}
// Ensure position now available.
if pos, err := db.Pos(); err != nil {
t.Fatal(err)
} else if pos.Generation == "" {
t.Fatal("expected generation")
} else if got, want := pos.Index, 0; got != want {
t.Fatalf("pos.Index=%v, want %v", got, want)
} else if got, want := pos.Offset, fi.Size(); got != want {
t.Fatalf("pos.Offset=%v, want %v", got, want)
}
})
// Ensure DB can keep in sync across multiple Sync() invocations.
t.Run("MultiSync", func(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
// Execute a query to force a write to the WAL.
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err)
}
// Perform initial sync & grab initial position.
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
pos0, err := db.Pos()
if err != nil {
t.Fatal(err)
}
// Insert into table.
if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz');`); err != nil {
t.Fatal(err)
}
// Sync to ensure position moves forward one page.
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
} else if pos1, err := db.Pos(); err != nil {
t.Fatal(err)
} else if pos0.Generation != pos1.Generation {
t.Fatal("expected the same generation")
} else if got, want := pos1.Index, pos0.Index; got != want {
t.Fatalf("Index=%v, want %v", got, want)
} else if got, want := pos1.Offset, pos0.Offset+4096+litestream.WALFrameHeaderSize; got != want {
t.Fatalf("Offset=%v, want %v", got, want)
}
})
// Ensure a WAL file is created if one does not already exist.
t.Run("NoWAL", func(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
// Issue initial sync and truncate WAL.
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Obtain initial position.
pos0, err := db.Pos()
if err != nil {
t.Fatal(err)
}
// Checkpoint & fully close which should close WAL file.
if err := db.Checkpoint(context.Background(), litestream.CheckpointModeTruncate); err != nil {
t.Fatal(err)
} else if err := db.Close(); err != nil {
t.Fatal(err)
} else if err := sqldb.Close(); err != nil {
t.Fatal(err)
}
// Remove WAL file.
if err := os.Remove(db.WALPath()); err != nil {
t.Fatal(err)
}
// Reopen the managed database.
db = MustOpenDBAt(t, db.Path())
defer MustCloseDB(t, db)
// Re-sync and ensure new generation has been created.
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Obtain initial position.
if pos1, err := db.Pos(); err != nil {
t.Fatal(err)
} else if pos0.Generation == pos1.Generation {
t.Fatal("expected new generation after truncation")
}
})
// Ensure DB can start new generation if it detects it cannot verify last position.
t.Run("OverwritePrevPosition", func(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
// Execute a query to force a write to the WAL.
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err)
}
// Issue initial sync and truncate WAL.
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Obtain initial position.
pos0, err := db.Pos()
if err != nil {
t.Fatal(err)
}
// Fully close which should close WAL file.
if err := db.Close(); err != nil {
t.Fatal(err)
} else if err := sqldb.Close(); err != nil {
t.Fatal(err)
}
// Verify WAL does not exist.
if _, err := os.Stat(db.WALPath()); !os.IsNotExist(err) {
t.Fatal(err)
}
// Insert into table multiple times to move past old offset
sqldb = MustOpenSQLDB(t, db.Path())
defer MustCloseSQLDB(t, sqldb)
for i := 0; i < 100; i++ {
if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz');`); err != nil {
t.Fatal(err)
}
}
// Reopen the managed database.
db = MustOpenDBAt(t, db.Path())
defer MustCloseDB(t, db)
// Re-sync and ensure new generation has been created.
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Obtain initial position.
if pos1, err := db.Pos(); err != nil {
t.Fatal(err)
} else if pos0.Generation == pos1.Generation {
t.Fatal("expected new generation after truncation")
}
})
// Ensure DB can handle a mismatched header-only and start new generation.
t.Run("WALHeaderMismatch", func(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
// Execute a query to force a write to the WAL and then sync.
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err)
} else if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Grab initial position & close.
pos0, err := db.Pos()
if err != nil {
t.Fatal(err)
} else if err := db.Close(); err != nil {
t.Fatal(err)
}
// Read existing file, update header checksum, and write back only header
// to simulate a header with a mismatched checksum.
shadowWALPath := db.ShadowWALPath(pos0.Generation, pos0.Index)
if buf, err := ioutil.ReadFile(shadowWALPath); err != nil {
t.Fatal(err)
} else if err := ioutil.WriteFile(shadowWALPath, append(buf[:litestream.WALHeaderSize-8], 0, 0, 0, 0, 0, 0, 0, 0), 0600); err != nil {
t.Fatal(err)
}
// Reopen managed database & ensure sync will still work.
db = MustOpenDBAt(t, db.Path())
defer MustCloseDB(t, db)
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Verify a new generation was started.
if pos1, err := db.Pos(); err != nil {
t.Fatal(err)
} else if pos0.Generation == pos1.Generation {
t.Fatal("expected new generation")
}
})
// Ensure DB can handle partial shadow WAL header write.
t.Run("PartialShadowWALHeader", func(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
// Execute a query to force a write to the WAL and then sync.
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err)
} else if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
pos0, err := db.Pos()
if err != nil {
t.Fatal(err)
}
// Close & truncate shadow WAL to simulate a partial header write.
if err := db.Close(); err != nil {
t.Fatal(err)
} else if err := os.Truncate(db.ShadowWALPath(pos0.Generation, pos0.Index), litestream.WALHeaderSize-1); err != nil {
t.Fatal(err)
}
// Reopen managed database & ensure sync will still work.
db = MustOpenDBAt(t, db.Path())
defer MustCloseDB(t, db)
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Verify a new generation was started.
if pos1, err := db.Pos(); err != nil {
t.Fatal(err)
} else if pos0.Generation == pos1.Generation {
t.Fatal("expected new generation")
}
})
// Ensure DB can handle partial shadow WAL writes.
t.Run("PartialShadowWALFrame", func(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
// Execute a query to force a write to the WAL and then sync.
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err)
} else if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
pos0, err := db.Pos()
if err != nil {
t.Fatal(err)
}
// Obtain current shadow WAL size.
fi, err := os.Stat(db.ShadowWALPath(pos0.Generation, pos0.Index))
if err != nil {
t.Fatal(err)
}
// Close & truncate shadow WAL to simulate a partial frame write.
if err := db.Close(); err != nil {
t.Fatal(err)
} else if err := os.Truncate(db.ShadowWALPath(pos0.Generation, pos0.Index), fi.Size()-1); err != nil {
t.Fatal(err)
}
// Reopen managed database & ensure sync will still work.
db = MustOpenDBAt(t, db.Path())
defer MustCloseDB(t, db)
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Verify same generation is kept.
if pos1, err := db.Pos(); err != nil {
t.Fatal(err)
} else if got, want := pos1, pos0; got != want {
t.Fatalf("Pos()=%s want %s", got, want)
}
// Ensure shadow WAL has recovered.
if fi0, err := os.Stat(db.ShadowWALPath(pos0.Generation, pos0.Index)); err != nil {
t.Fatal(err)
} else if got, want := fi0.Size(), fi.Size(); got != want {
t.Fatalf("Size()=%v, want %v", got, want)
}
})
// Ensure DB can handle a generation directory with a missing shadow WAL.
t.Run("NoShadowWAL", func(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
// Execute a query to force a write to the WAL and then sync.
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err)
} else if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
pos0, err := db.Pos()
if err != nil {
t.Fatal(err)
}
// Close & delete shadow WAL to simulate dir created but not WAL.
if err := db.Close(); err != nil {
t.Fatal(err)
} else if err := os.Remove(db.ShadowWALPath(pos0.Generation, pos0.Index)); err != nil {
t.Fatal(err)
}
// Reopen managed database & ensure sync will still work.
db = MustOpenDBAt(t, db.Path())
defer MustCloseDB(t, db)
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Verify new generation created but index/offset the same.
if pos1, err := db.Pos(); err != nil {
t.Fatal(err)
} else if pos0.Generation == pos1.Generation {
t.Fatal("expected new generation")
} else if got, want := pos1.Index, pos0.Index; got != want {
t.Fatalf("Index=%v want %v", got, want)
} else if got, want := pos1.Offset, pos0.Offset; got != want {
t.Fatalf("Offset=%v want %v", got, want)
}
})
// Ensure DB checkpoints after minimum number of pages.
t.Run("MinCheckpointPageN", func(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
// Execute a query to force a write to the WAL and then sync.
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err)
} else if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Write at least minimum number of pages to trigger rollover.
for i := 0; i < db.MinCheckpointPageN; i++ {
if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz');`); err != nil {
t.Fatal(err)
}
}
// Sync to shadow WAL.
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Ensure position is now on the second index.
if pos, err := db.Pos(); err != nil {
t.Fatal(err)
} else if got, want := pos.Index, 1; got != want {
t.Fatalf("Index=%v, want %v", got, want)
}
})
// Ensure DB checkpoints after interval.
t.Run("CheckpointInterval", func(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
// Execute a query to force a write to the WAL and then sync.
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err)
} else if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Reduce checkpoint interval to ensure a rollover is triggered.
db.CheckpointInterval = 1 * time.Nanosecond
// Write to WAL & sync.
if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz');`); err != nil {
t.Fatal(err)
} else if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Ensure position is now on the second index.
if pos, err := db.Pos(); err != nil {
t.Fatal(err)
} else if got, want := pos.Index, 1; got != want {
t.Fatalf("Index=%v, want %v", got, want)
}
})
}
// MustOpenDBs returns a new instance of a DB & associated SQL DB.
func MustOpenDBs(tb testing.TB) (*litestream.DB, *sql.DB) {
tb.Helper()
db := MustOpenDB(tb)
return db, MustOpenSQLDB(tb, db.Path())
}
// MustCloseDBs closes db & sqldb and removes the parent directory.
func MustCloseDBs(tb testing.TB, db *litestream.DB, sqldb *sql.DB) {
tb.Helper()
MustCloseDB(tb, db)
MustCloseSQLDB(tb, sqldb)
}
// MustOpenDB returns a new instance of a DB.
func MustOpenDB(tb testing.TB) *litestream.DB {
dir := tb.TempDir()
return MustOpenDBAt(tb, filepath.Join(dir, "db"))
}
// MustOpenDBAt returns a new instance of a DB for a given path.
func MustOpenDBAt(tb testing.TB, path string) *litestream.DB {
tb.Helper()
db := litestream.NewDB(path)
db.MonitorInterval = 0 // disable background goroutine
if err := db.Open(); err != nil {
tb.Fatal(err)
}
return db
}
// MustCloseDB closes db and removes its parent directory.
func MustCloseDB(tb testing.TB, db *litestream.DB) {
tb.Helper()
if err := db.Close(); err != nil && !strings.Contains(err.Error(), `database is closed`) {
tb.Fatal(err)
} else if err := os.RemoveAll(filepath.Dir(db.Path())); err != nil {
tb.Fatal(err)
}
}
// MustOpenSQLDB returns a database/sql DB.
func MustOpenSQLDB(tb testing.TB, path string) *sql.DB {
tb.Helper()
d, err := sql.Open("sqlite3", path)
if err != nil {
tb.Fatal(err)
} else if _, err := d.Exec(`PRAGMA journal_mode = wal;`); err != nil {
tb.Fatal(err)
}
return d
}
// MustCloseSQLDB closes a database/sql DB.
func MustCloseSQLDB(tb testing.TB, d *sql.DB) {
tb.Helper()
if err := d.Close(); err != nil {
tb.Fatal(err)
}
}

View File

@@ -1,88 +0,0 @@
DESIGN
======
Litestream is a sidecar process that replicates the write ahead log (WAL) for
a SQLite database. To ensure that it can replicate every page, litestream takes
control over the checkpointing process by issuing a long running read
transaction against the database to prevent checkpointing. It then releases
this transaction once it obtains a write lock and issues the checkpoint itself.
The daemon polls the database on an interval to breifly obtain a write
transaction lock and copy over new WAL pages. Once the WAL has reached a
threshold size, litestream will issue a checkpoint and a single page write
to a table called `_litestream` to start the new WAL.
## Workflow
When litestream first loads a database, it checks if there is an existing
sidecar directory which is named `.<DB>-litestream`. If not, it initializes
the directory and starts a new generation.
A generation is a snapshot of the database followed by a continuous stream of
WAL files. A new generation is started on initialization & whenever litestream
cannot verify that it has a continuous record of WAL files. This could happen
if litestream is stopped and another process checkpoints the WAL. In this case,
a new generation ID is randomly created and a snapshot is replicated to the
appropriate destinations.
Generations also prevent two servers from replicating to the same destination
and corrupting each other's data. In this case, each server would replicate
to a different generation directory. On recovery, there will be duplicate
databases and the end user can choose which generation to recover but each
database will be uncorrupted.
## File Layout
Litestream maintains a shadow WAL which is a historical record of all previous
WAL files. These files can be deleted after a time or size threshold but should
be replicated before being deleted.
### Local
Given a database file named `db`, SQLite will create a WAL file called `db-wal`.
Litestream will then create a hidden directory called `.db-litestream` that
contains the historical record of all WAL files for the current generation.
```
db # SQLite database
db-wal # SQLite WAL
.db-litestream/
generation # current generation number
generations/
xxxxxxxx/
wal/ # WAL files
000000000000001.wal
000000000000002.wal
000000000000003.wal # active WAL
```
### Remote (S3)
```
bkt/
db/ # database path
generations/
xxxxxxxx/
snapshots/ # snapshots w/ timestamp+offset
20000101T000000Z-000000000000023.snapshot
wal/ # compressed WAL files
000000000000001-0.wal.gz
000000000000001-<offset>.wal.gz
000000000000002-0.wal.gz
00000002/
snapshot/
000000000000000.snapshot
scheduled/
daily/
20000101T000000Z-000000000000023.snapshot
20000102T000000Z-000000000000036.snapshot
monthly/
20000101T000000Z-000000000000023.snapshot
wal/
000000000000001.wal.gz
```

17
etc/build.ps1 Normal file
View File

@@ -0,0 +1,17 @@
[CmdletBinding()]
Param (
[Parameter(Mandatory = $true)]
[String] $Version
)
$ErrorActionPreference = "Stop"
# Update working directory.
Push-Location $PSScriptRoot
Trap {
Pop-Location
}
Invoke-Expression "candle.exe -nologo -arch x64 -ext WixUtilExtension -out litestream.wixobj -dVersion=`"$Version`" litestream.wxs"
Invoke-Expression "light.exe -nologo -spdb -ext WixUtilExtension -out `"litestream-${Version}.msi`" litestream.wixobj"
Pop-Location

15
etc/gon.hcl Normal file
View File

@@ -0,0 +1,15 @@
source = ["./dist/litestream"]
bundle_id = "com.middlemost.litestream"
apple_id {
username = "benbjohnson@yahoo.com"
password = "@env:AC_PASSWORD"
}
sign {
application_identity = "Developer ID Application: Middlemost Systems, LLC"
}
zip {
output_path = "dist/litestream.zip"
}

9
etc/litestream.service Normal file
View File

@@ -0,0 +1,9 @@
[Unit]
Description=Litestream
[Service]
Restart=always
ExecStart=/usr/bin/litestream replicate
[Install]
WantedBy=multi-user.target

89
etc/litestream.wxs Normal file
View File

@@ -0,0 +1,89 @@
<?xml version="1.0" encoding="utf-8"?>
<Wix
xmlns="http://schemas.microsoft.com/wix/2006/wi"
xmlns:util="http://schemas.microsoft.com/wix/UtilExtension"
>
<?if $(sys.BUILDARCH)=x64 ?>
<?define PlatformProgramFiles = "ProgramFiles64Folder" ?>
<?else ?>
<?define PlatformProgramFiles = "ProgramFilesFolder" ?>
<?endif ?>
<Product
Id="*"
UpgradeCode="5371367e-58b3-4e52-be0d-46945eb71ce6"
Name="Litestream"
Version="$(var.Version)"
Manufacturer="Litestream"
Language="1033"
Codepage="1252"
>
<Package
Id="*"
Manufacturer="Litestream"
InstallScope="perMachine"
InstallerVersion="500"
Description="Litestream $(var.Version) installer"
Compressed="yes"
/>
<Media Id="1" Cabinet="litestream.cab" EmbedCab="yes"/>
<MajorUpgrade
Schedule="afterInstallInitialize"
DowngradeErrorMessage="A later version of [ProductName] is already installed. Setup will now exit."
/>
<Directory Id="TARGETDIR" Name="SourceDir">
<Directory Id="$(var.PlatformProgramFiles)">
<Directory Id="APPLICATIONROOTDIRECTORY" Name="Litestream"/>
</Directory>
</Directory>
<ComponentGroup Id="Files">
<Component Directory="APPLICATIONROOTDIRECTORY">
<File
Id="litestream.exe"
Name="litestream.exe"
Source="litestream.exe"
KeyPath="yes"
/>
<ServiceInstall
Id="InstallService"
Name="Litestream"
DisplayName="Litestream"
Description="Replicates SQLite databases"
ErrorControl="normal"
Start="auto"
Type="ownProcess"
>
<util:ServiceConfig
FirstFailureActionType="restart"
SecondFailureActionType="restart"
ThirdFailureActionType="restart"
RestartServiceDelayInSeconds="60"
/>
<ServiceDependency Id="wmiApSrv" />
</ServiceInstall>
<ServiceControl
Id="ServiceStateControl"
Name="Litestream"
Remove="uninstall"
Start="install"
Stop="both"
/>
<util:EventSource
Log="Application"
Name="Litestream"
EventMessageFile="%SystemRoot%\System32\EventCreate.exe"
/>
</Component>
</ComponentGroup>
<Feature Id="DefaultFeature" Level="1">
<ComponentGroupRef Id="Files" />
</Feature>
</Product>
</Wix>

10
etc/litestream.yml Normal file
View File

@@ -0,0 +1,10 @@
# AWS credentials
# access-key-id: AKIAxxxxxxxxxxxxxxxx
# secret-access-key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/xxxxxxxxx
# dbs:
# - path: /path/to/primary/db # Database to replicate from
# replicas:
# - path: /path/to/replica # File-based replication
# - url: s3://my.bucket.com/db # S3-based replication

19
etc/nfpm.yml Normal file
View File

@@ -0,0 +1,19 @@
name: litestream
arch: "${GOARCH}"
platform: "${GOOS}"
version: "${LITESTREAM_VERSION}"
section: "default"
priority: "extra"
maintainer: "Ben Johnson <benbjohnson@yahoo.com>"
description: Litestream is a tool for real-time replication of SQLite databases.
homepage: "https://github.com/benbjohnson/litestream"
license: "Apache 2"
contents:
- src: ./litestream
dst: /usr/bin/litestream
- src: ./litestream.yml
dst: /etc/litestream.yml
type: config
- src: ./litestream.service
dst: /usr/lib/systemd/system/litestream.service
type: config

381
file/replica_client.go Normal file
View File

@@ -0,0 +1,381 @@
package file
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"github.com/benbjohnson/litestream"
"github.com/benbjohnson/litestream/internal"
)
// ReplicaClientType is the client type for this package.
const ReplicaClientType = "file"
var _ litestream.ReplicaClient = (*ReplicaClient)(nil)
// ReplicaClient is a client for writing snapshots & WAL segments to disk.
type ReplicaClient struct {
path string // destination path
Replica *litestream.Replica
}
// NewReplicaClient returns a new instance of ReplicaClient.
func NewReplicaClient(path string) *ReplicaClient {
return &ReplicaClient{
path: path,
}
}
// db returns the database, if available.
func (c *ReplicaClient) db() *litestream.DB {
if c.Replica == nil {
return nil
}
return c.Replica.DB()
}
// Type returns "file" as the client type.
func (c *ReplicaClient) Type() string {
return ReplicaClientType
}
// Path returns the destination path to replicate the database to.
func (c *ReplicaClient) Path() string {
return c.path
}
// GenerationsDir returns the path to a generation root directory.
func (c *ReplicaClient) GenerationsDir() (string, error) {
if c.path == "" {
return "", fmt.Errorf("file replica path required")
}
return filepath.Join(c.path, "generations"), nil
}
// GenerationDir returns the path to a generation's root directory.
func (c *ReplicaClient) GenerationDir(generation string) (string, error) {
dir, err := c.GenerationsDir()
if err != nil {
return "", err
} else if generation == "" {
return "", fmt.Errorf("generation required")
}
return filepath.Join(dir, generation), nil
}
// SnapshotsDir returns the path to a generation's snapshot directory.
func (c *ReplicaClient) SnapshotsDir(generation string) (string, error) {
dir, err := c.GenerationDir(generation)
if err != nil {
return "", err
}
return filepath.Join(dir, "snapshots"), nil
}
// SnapshotPath returns the path to an uncompressed snapshot file.
func (c *ReplicaClient) SnapshotPath(generation string, index int) (string, error) {
dir, err := c.SnapshotsDir(generation)
if err != nil {
return "", err
}
return filepath.Join(dir, litestream.FormatSnapshotPath(index)), nil
}
// WALDir returns the path to a generation's WAL directory
func (c *ReplicaClient) WALDir(generation string) (string, error) {
dir, err := c.GenerationDir(generation)
if err != nil {
return "", err
}
return filepath.Join(dir, "wal"), nil
}
// WALSegmentPath returns the path to a WAL segment file.
func (c *ReplicaClient) WALSegmentPath(generation string, index int, offset int64) (string, error) {
dir, err := c.WALDir(generation)
if err != nil {
return "", err
}
return filepath.Join(dir, litestream.FormatWALSegmentPath(index, offset)), nil
}
// Generations returns a list of available generation names.
func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) {
root, err := c.GenerationsDir()
if err != nil {
return nil, fmt.Errorf("cannot determine generations path: %w", err)
}
fis, err := ioutil.ReadDir(root)
if os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, err
}
var generations []string
for _, fi := range fis {
if !litestream.IsGenerationName(fi.Name()) {
continue
} else if !fi.IsDir() {
continue
}
generations = append(generations, fi.Name())
}
return generations, nil
}
// DeleteGeneration deletes all snapshots & WAL segments within a generation.
func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error {
dir, err := c.GenerationDir(generation)
if err != nil {
return fmt.Errorf("cannot determine generation path: %w", err)
}
if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// Snapshots returns an iterator over all available snapshots for a generation.
func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (litestream.SnapshotIterator, error) {
dir, err := c.SnapshotsDir(generation)
if err != nil {
return nil, fmt.Errorf("cannot determine snapshots path: %w", err)
}
f, err := os.Open(dir)
if os.IsNotExist(err) {
return litestream.NewSnapshotInfoSliceIterator(nil), nil
} else if err != nil {
return nil, err
}
defer f.Close()
fis, err := f.Readdir(-1)
if err != nil {
return nil, err
}
// Iterate over every file and convert to metadata.
infos := make([]litestream.SnapshotInfo, 0, len(fis))
for _, fi := range fis {
// Parse index from filename.
index, err := litestream.ParseSnapshotPath(fi.Name())
if err != nil {
continue
}
infos = append(infos, litestream.SnapshotInfo{
Generation: generation,
Index: index,
Size: fi.Size(),
CreatedAt: fi.ModTime().UTC(),
})
}
sort.Sort(litestream.SnapshotInfoSlice(infos))
return litestream.NewSnapshotInfoSliceIterator(infos), nil
}
// WriteSnapshot writes LZ4 compressed data from rd into a file on disk.
func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) {
filename, err := c.SnapshotPath(generation, index)
if err != nil {
return info, fmt.Errorf("cannot determine snapshot path: %w", err)
}
var fileInfo, dirInfo os.FileInfo
if db := c.db(); db != nil {
fileInfo, dirInfo = db.FileInfo(), db.DirInfo()
}
// Ensure parent directory exists.
if err := internal.MkdirAll(filepath.Dir(filename), dirInfo); err != nil {
return info, err
}
// Write snapshot to temporary file next to destination path.
f, err := internal.CreateFile(filename+".tmp", fileInfo)
if err != nil {
return info, err
}
defer f.Close()
if _, err := io.Copy(f, rd); err != nil {
return info, err
} else if err := f.Sync(); err != nil {
return info, err
} else if err := f.Close(); err != nil {
return info, err
}
// Build metadata.
fi, err := os.Stat(filename + ".tmp")
if err != nil {
return info, err
}
info = litestream.SnapshotInfo{
Generation: generation,
Index: index,
Size: fi.Size(),
CreatedAt: fi.ModTime().UTC(),
}
// Move snapshot to final path when it has been fully written & synced to disk.
if err := os.Rename(filename+".tmp", filename); err != nil {
return info, err
}
return info, nil
}
// SnapshotReader returns a reader for snapshot data at the given generation/index.
// Returns os.ErrNotExist if no matching index is found.
func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) {
filename, err := c.SnapshotPath(generation, index)
if err != nil {
return nil, fmt.Errorf("cannot determine snapshot path: %w", err)
}
return os.Open(filename)
}
// DeleteSnapshot deletes a snapshot with the given generation & index.
func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error {
filename, err := c.SnapshotPath(generation, index)
if err != nil {
return fmt.Errorf("cannot determine snapshot path: %w", err)
}
if err := os.Remove(filename); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// WALSegments returns an iterator over all available WAL files for a generation.
func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) {
dir, err := c.WALDir(generation)
if err != nil {
return nil, fmt.Errorf("cannot determine wal path: %w", err)
}
f, err := os.Open(dir)
if os.IsNotExist(err) {
return litestream.NewWALSegmentInfoSliceIterator(nil), nil
} else if err != nil {
return nil, err
}
defer f.Close()
fis, err := f.Readdir(-1)
if err != nil {
return nil, err
}
// Iterate over every file and convert to metadata.
infos := make([]litestream.WALSegmentInfo, 0, len(fis))
for _, fi := range fis {
// Parse index from filename.
index, offset, err := litestream.ParseWALSegmentPath(fi.Name())
if err != nil {
continue
}
infos = append(infos, litestream.WALSegmentInfo{
Generation: generation,
Index: index,
Offset: offset,
Size: fi.Size(),
CreatedAt: fi.ModTime().UTC(),
})
}
sort.Sort(litestream.WALSegmentInfoSlice(infos))
return litestream.NewWALSegmentInfoSliceIterator(infos), nil
}
// WriteWALSegment writes LZ4 compressed data from rd into a file on disk.
func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) {
filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset)
if err != nil {
return info, fmt.Errorf("cannot determine wal segment path: %w", err)
}
var fileInfo, dirInfo os.FileInfo
if db := c.db(); db != nil {
fileInfo, dirInfo = db.FileInfo(), db.DirInfo()
}
// Ensure parent directory exists.
if err := internal.MkdirAll(filepath.Dir(filename), dirInfo); err != nil {
return info, err
}
// Write WAL segment to temporary file next to destination path.
f, err := internal.CreateFile(filename+".tmp", fileInfo)
if err != nil {
return info, err
}
defer f.Close()
if _, err := io.Copy(f, rd); err != nil {
return info, err
} else if err := f.Sync(); err != nil {
return info, err
} else if err := f.Close(); err != nil {
return info, err
}
// Build metadata.
fi, err := os.Stat(filename + ".tmp")
if err != nil {
return info, err
}
info = litestream.WALSegmentInfo{
Generation: pos.Generation,
Index: pos.Index,
Offset: pos.Offset,
Size: fi.Size(),
CreatedAt: fi.ModTime().UTC(),
}
// Move WAL segment to final path when it has been written & synced to disk.
if err := os.Rename(filename+".tmp", filename); err != nil {
return info, err
}
return info, nil
}
// WALSegmentReader returns a reader for a section of WAL data at the given position.
// Returns os.ErrNotExist if no matching index/offset is found.
func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) {
filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset)
if err != nil {
return nil, fmt.Errorf("cannot determine wal segment path: %w", err)
}
return os.Open(filename)
}
// DeleteWALSegments deletes WAL segments at the given positions.
func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Pos) error {
for _, pos := range a {
filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset)
if err != nil {
return fmt.Errorf("cannot determine wal segment path: %w", err)
}
if err := os.Remove(filename); err != nil && !os.IsNotExist(err) {
return err
}
}
return nil
}

223
file/replica_client_test.go Normal file
View File

@@ -0,0 +1,223 @@
package file_test
import (
"testing"
"github.com/benbjohnson/litestream/file"
)
func TestReplicaClient_Path(t *testing.T) {
c := file.NewReplicaClient("/foo/bar")
if got, want := c.Path(), "/foo/bar"; got != want {
t.Fatalf("Path()=%v, want %v", got, want)
}
}
func TestReplicaClient_Type(t *testing.T) {
if got, want := file.NewReplicaClient("").Type(), "file"; got != want {
t.Fatalf("Type()=%v, want %v", got, want)
}
}
func TestReplicaClient_GenerationsDir(t *testing.T) {
t.Run("OK", func(t *testing.T) {
if got, err := file.NewReplicaClient("/foo").GenerationsDir(); err != nil {
t.Fatal(err)
} else if want := "/foo/generations"; got != want {
t.Fatalf("GenerationsDir()=%v, want %v", got, want)
}
})
t.Run("ErrNoPath", func(t *testing.T) {
if _, err := file.NewReplicaClient("").GenerationsDir(); err == nil || err.Error() != `file replica path required` {
t.Fatalf("unexpected error: %v", err)
}
})
}
func TestReplicaClient_GenerationDir(t *testing.T) {
t.Run("OK", func(t *testing.T) {
if got, err := file.NewReplicaClient("/foo").GenerationDir("0123456701234567"); err != nil {
t.Fatal(err)
} else if want := "/foo/generations/0123456701234567"; got != want {
t.Fatalf("GenerationDir()=%v, want %v", got, want)
}
})
t.Run("ErrNoPath", func(t *testing.T) {
if _, err := file.NewReplicaClient("").GenerationDir("0123456701234567"); err == nil || err.Error() != `file replica path required` {
t.Fatalf("expected error: %v", err)
}
})
t.Run("ErrNoGeneration", func(t *testing.T) {
if _, err := file.NewReplicaClient("/foo").GenerationDir(""); err == nil || err.Error() != `generation required` {
t.Fatalf("expected error: %v", err)
}
})
}
func TestReplicaClient_SnapshotsDir(t *testing.T) {
t.Run("OK", func(t *testing.T) {
if got, err := file.NewReplicaClient("/foo").SnapshotsDir("0123456701234567"); err != nil {
t.Fatal(err)
} else if want := "/foo/generations/0123456701234567/snapshots"; got != want {
t.Fatalf("SnapshotsDir()=%v, want %v", got, want)
}
})
t.Run("ErrNoPath", func(t *testing.T) {
if _, err := file.NewReplicaClient("").SnapshotsDir("0123456701234567"); err == nil || err.Error() != `file replica path required` {
t.Fatalf("unexpected error: %v", err)
}
})
t.Run("ErrNoGeneration", func(t *testing.T) {
if _, err := file.NewReplicaClient("/foo").SnapshotsDir(""); err == nil || err.Error() != `generation required` {
t.Fatalf("unexpected error: %v", err)
}
})
}
func TestReplicaClient_SnapshotPath(t *testing.T) {
t.Run("OK", func(t *testing.T) {
if got, err := file.NewReplicaClient("/foo").SnapshotPath("0123456701234567", 1000); err != nil {
t.Fatal(err)
} else if want := "/foo/generations/0123456701234567/snapshots/000003e8.snapshot.lz4"; got != want {
t.Fatalf("SnapshotPath()=%v, want %v", got, want)
}
})
t.Run("ErrNoPath", func(t *testing.T) {
if _, err := file.NewReplicaClient("").SnapshotPath("0123456701234567", 1000); err == nil || err.Error() != `file replica path required` {
t.Fatalf("unexpected error: %v", err)
}
})
t.Run("ErrNoGeneration", func(t *testing.T) {
if _, err := file.NewReplicaClient("/foo").SnapshotPath("", 1000); err == nil || err.Error() != `generation required` {
t.Fatalf("unexpected error: %v", err)
}
})
}
func TestReplicaClient_WALDir(t *testing.T) {
t.Run("OK", func(t *testing.T) {
if got, err := file.NewReplicaClient("/foo").WALDir("0123456701234567"); err != nil {
t.Fatal(err)
} else if want := "/foo/generations/0123456701234567/wal"; got != want {
t.Fatalf("WALDir()=%v, want %v", got, want)
}
})
t.Run("ErrNoPath", func(t *testing.T) {
if _, err := file.NewReplicaClient("").WALDir("0123456701234567"); err == nil || err.Error() != `file replica path required` {
t.Fatalf("unexpected error: %v", err)
}
})
t.Run("ErrNoGeneration", func(t *testing.T) {
if _, err := file.NewReplicaClient("/foo").WALDir(""); err == nil || err.Error() != `generation required` {
t.Fatalf("unexpected error: %v", err)
}
})
}
func TestReplicaClient_WALSegmentPath(t *testing.T) {
t.Run("OK", func(t *testing.T) {
if got, err := file.NewReplicaClient("/foo").WALSegmentPath("0123456701234567", 1000, 1001); err != nil {
t.Fatal(err)
} else if want := "/foo/generations/0123456701234567/wal/000003e8_000003e9.wal.lz4"; got != want {
t.Fatalf("WALPath()=%v, want %v", got, want)
}
})
t.Run("ErrNoPath", func(t *testing.T) {
if _, err := file.NewReplicaClient("").WALSegmentPath("0123456701234567", 1000, 0); err == nil || err.Error() != `file replica path required` {
t.Fatalf("unexpected error: %v", err)
}
})
t.Run("ErrNoGeneration", func(t *testing.T) {
if _, err := file.NewReplicaClient("/foo").WALSegmentPath("", 1000, 0); err == nil || err.Error() != `generation required` {
t.Fatalf("unexpected error: %v", err)
}
})
}
/*
func TestReplica_Sync(t *testing.T) {
// Ensure replica can successfully sync after DB has sync'd.
t.Run("InitialSync", func(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
r := litestream.NewReplica(db, "", file.NewReplicaClient(t.TempDir()))
r.MonitorEnabled = false
db.Replicas = []*litestream.Replica{r}
// Sync database & then sync replica.
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
} else if err := r.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Ensure posistions match.
if want, err := db.Pos(); err != nil {
t.Fatal(err)
} else if got, err := r.Pos(context.Background()); err != nil {
t.Fatal(err)
} else if got != want {
t.Fatalf("Pos()=%v, want %v", got, want)
}
})
// Ensure replica can successfully sync multiple times.
t.Run("MultiSync", func(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
r := litestream.NewReplica(db, "", file.NewReplicaClient(t.TempDir()))
r.MonitorEnabled = false
db.Replicas = []*litestream.Replica{r}
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err)
}
// Write to the database multiple times and sync after each write.
for i, n := 0, db.MinCheckpointPageN*2; i < n; i++ {
if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz')`); err != nil {
t.Fatal(err)
}
// Sync periodically.
if i%100 == 0 || i == n-1 {
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
} else if err := r.Sync(context.Background()); err != nil {
t.Fatal(err)
}
}
}
// Ensure posistions match.
pos, err := db.Pos()
if err != nil {
t.Fatal(err)
} else if got, want := pos.Index, 2; got != want {
t.Fatalf("Index=%v, want %v", got, want)
}
if want, err := r.Pos(context.Background()); err != nil {
t.Fatal(err)
} else if got := pos; got != want {
t.Fatalf("Pos()=%v, want %v", got, want)
}
})
// Ensure replica returns an error if there is no generation available from the DB.
t.Run("ErrNoGeneration", func(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
r := litestream.NewReplica(db, "", file.NewReplicaClient(t.TempDir()))
r.MonitorEnabled = false
db.Replicas = []*litestream.Replica{r}
if err := r.Sync(context.Background()); err == nil || err.Error() != `no generation, waiting for data` {
t.Fatal(err)
}
})
}
*/

428
gcs/replica_client.go Normal file
View File

@@ -0,0 +1,428 @@
package gcs
import (
"context"
"fmt"
"io"
"os"
"path"
"strings"
"sync"
"time"
"cloud.google.com/go/storage"
"github.com/benbjohnson/litestream"
"github.com/benbjohnson/litestream/internal"
"google.golang.org/api/iterator"
)
// ReplicaClientType is the client type for this package.
const ReplicaClientType = "gcs"
var _ litestream.ReplicaClient = (*ReplicaClient)(nil)
// ReplicaClient is a client for writing snapshots & WAL segments to disk.
type ReplicaClient struct {
mu sync.Mutex
client *storage.Client // gcs client
bkt *storage.BucketHandle // gcs bucket handle
// GCS bucket information
Bucket string
Path string
}
// NewReplicaClient returns a new instance of ReplicaClient.
func NewReplicaClient() *ReplicaClient {
return &ReplicaClient{}
}
// Type returns "gcs" as the client type.
func (c *ReplicaClient) Type() string {
return ReplicaClientType
}
// Init initializes the connection to GCS. No-op if already initialized.
func (c *ReplicaClient) Init(ctx context.Context) (err error) {
c.mu.Lock()
defer c.mu.Unlock()
if c.client != nil {
return nil
}
if c.client, err = storage.NewClient(ctx); err != nil {
return err
}
c.bkt = c.client.Bucket(c.Bucket)
return nil
}
// Generations returns a list of available generation names.
func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) {
if err := c.Init(ctx); err != nil {
return nil, err
}
// Construct query to only pull generation directory names.
query := &storage.Query{
Delimiter: "/",
Prefix: litestream.GenerationsPath(c.Path) + "/",
}
// Loop over results and only build list of generation-formatted names.
it := c.bkt.Objects(ctx, query)
var generations []string
for {
attrs, err := it.Next()
if err == iterator.Done {
break
} else if err != nil {
return nil, err
}
name := path.Base(strings.TrimSuffix(attrs.Prefix, "/"))
if !litestream.IsGenerationName(name) {
continue
}
generations = append(generations, name)
}
return generations, nil
}
// DeleteGeneration deletes all snapshots & WAL segments within a generation.
func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error {
if err := c.Init(ctx); err != nil {
return err
}
dir, err := litestream.GenerationPath(c.Path, generation)
if err != nil {
return fmt.Errorf("cannot determine generation path: %w", err)
}
// Iterate over every object in generation and delete it.
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
for it := c.bkt.Objects(ctx, &storage.Query{Prefix: dir + "/"}); ; {
attrs, err := it.Next()
if err == iterator.Done {
break
} else if err != nil {
return err
}
if err := c.bkt.Object(attrs.Name).Delete(ctx); isNotExists(err) {
continue
} else if err != nil {
return fmt.Errorf("cannot delete object %q: %w", attrs.Name, err)
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
}
// log.Printf("%s(%s): retainer: deleting generation: %s", r.db.Path(), r.Name(), generation)
return nil
}
// Snapshots returns an iterator over all available snapshots for a generation.
func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (litestream.SnapshotIterator, error) {
if err := c.Init(ctx); err != nil {
return nil, err
}
dir, err := litestream.SnapshotsPath(c.Path, generation)
if err != nil {
return nil, fmt.Errorf("cannot determine snapshots path: %w", err)
}
return newSnapshotIterator(generation, c.bkt.Objects(ctx, &storage.Query{Prefix: dir + "/"})), nil
}
// WriteSnapshot writes LZ4 compressed data from rd to the object storage.
func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) {
if err := c.Init(ctx); err != nil {
return info, err
}
key, err := litestream.SnapshotPath(c.Path, generation, index)
if err != nil {
return info, fmt.Errorf("cannot determine snapshot path: %w", err)
}
startTime := time.Now()
w := c.bkt.Object(key).NewWriter(ctx)
defer w.Close()
n, err := io.Copy(w, rd)
if err != nil {
return info, err
} else if err := w.Close(); err != nil {
return info, err
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc()
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(n))
// log.Printf("%s(%s): snapshot: creating %s/%08x t=%s", r.db.Path(), r.Name(), generation, index, time.Since(startTime).Truncate(time.Millisecond))
return litestream.SnapshotInfo{
Generation: generation,
Index: index,
Size: n,
CreatedAt: startTime.UTC(),
}, nil
}
// SnapshotReader returns a reader for snapshot data at the given generation/index.
func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) {
if err := c.Init(ctx); err != nil {
return nil, err
}
key, err := litestream.SnapshotPath(c.Path, generation, index)
if err != nil {
return nil, fmt.Errorf("cannot determine snapshot path: %w", err)
}
r, err := c.bkt.Object(key).NewReader(ctx)
if isNotExists(err) {
return nil, os.ErrNotExist
} else if err != nil {
return nil, fmt.Errorf("cannot start new reader for %q: %w", key, err)
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "GET").Inc()
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "GET").Add(float64(r.Attrs.Size))
return r, nil
}
// DeleteSnapshot deletes a snapshot with the given generation & index.
func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error {
if err := c.Init(ctx); err != nil {
return err
}
key, err := litestream.SnapshotPath(c.Path, generation, index)
if err != nil {
return fmt.Errorf("cannot determine snapshot path: %w", err)
}
if err := c.bkt.Object(key).Delete(ctx); err != nil && !isNotExists(err) {
return fmt.Errorf("cannot delete snapshot %q: %w", key, err)
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
return nil
}
// WALSegments returns an iterator over all available WAL files for a generation.
func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) {
if err := c.Init(ctx); err != nil {
return nil, err
}
dir, err := litestream.WALPath(c.Path, generation)
if err != nil {
return nil, fmt.Errorf("cannot determine wal path: %w", err)
}
return newWALSegmentIterator(generation, c.bkt.Objects(ctx, &storage.Query{Prefix: dir + "/"})), nil
}
// WriteWALSegment writes LZ4 compressed data from rd into a file on disk.
func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) {
if err := c.Init(ctx); err != nil {
return info, err
}
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
if err != nil {
return info, fmt.Errorf("cannot determine wal segment path: %w", err)
}
startTime := time.Now()
w := c.bkt.Object(key).NewWriter(ctx)
defer w.Close()
n, err := io.Copy(w, rd)
if err != nil {
return info, err
} else if err := w.Close(); err != nil {
return info, err
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc()
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(n))
return litestream.WALSegmentInfo{
Generation: pos.Generation,
Index: pos.Index,
Offset: pos.Offset,
Size: n,
CreatedAt: startTime.UTC(),
}, nil
}
// WALSegmentReader returns a reader for a section of WAL data at the given index.
// Returns os.ErrNotExist if no matching index/offset is found.
func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) {
if err := c.Init(ctx); err != nil {
return nil, err
}
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
if err != nil {
return nil, fmt.Errorf("cannot determine wal segment path: %w", err)
}
r, err := c.bkt.Object(key).NewReader(ctx)
if isNotExists(err) {
return nil, os.ErrNotExist
} else if err != nil {
return nil, err
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "GET").Inc()
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "GET").Add(float64(r.Attrs.Size))
return r, nil
}
// DeleteWALSegments deletes WAL segments with at the given positions.
func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Pos) error {
if err := c.Init(ctx); err != nil {
return err
}
for _, pos := range a {
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
if err != nil {
return fmt.Errorf("cannot determine wal segment path: %w", err)
}
if err := c.bkt.Object(key).Delete(ctx); err != nil && !isNotExists(err) {
return fmt.Errorf("cannot delete wal segment %q: %w", key, err)
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
}
return nil
}
type snapshotIterator struct {
generation string
it *storage.ObjectIterator
info litestream.SnapshotInfo
err error
}
func newSnapshotIterator(generation string, it *storage.ObjectIterator) *snapshotIterator {
return &snapshotIterator{
generation: generation,
it: it,
}
}
func (itr *snapshotIterator) Close() (err error) {
return itr.err
}
func (itr *snapshotIterator) Next() bool {
// Exit if an error has already occurred.
if itr.err != nil {
return false
}
for {
// Fetch next object.
attrs, err := itr.it.Next()
if err == iterator.Done {
return false
} else if err != nil {
itr.err = err
return false
}
// Parse index, otherwise skip to the next object.
index, err := litestream.ParseSnapshotPath(path.Base(attrs.Name))
if err != nil {
continue
}
// Store current snapshot and return.
itr.info = litestream.SnapshotInfo{
Generation: itr.generation,
Index: index,
Size: attrs.Size,
CreatedAt: attrs.Created.UTC(),
}
return true
}
}
func (itr *snapshotIterator) Err() error { return itr.err }
func (itr *snapshotIterator) Snapshot() litestream.SnapshotInfo { return itr.info }
type walSegmentIterator struct {
generation string
it *storage.ObjectIterator
info litestream.WALSegmentInfo
err error
}
func newWALSegmentIterator(generation string, it *storage.ObjectIterator) *walSegmentIterator {
return &walSegmentIterator{
generation: generation,
it: it,
}
}
func (itr *walSegmentIterator) Close() (err error) {
return itr.err
}
func (itr *walSegmentIterator) Next() bool {
// Exit if an error has already occurred.
if itr.err != nil {
return false
}
for {
// Fetch next object.
attrs, err := itr.it.Next()
if err == iterator.Done {
return false
} else if err != nil {
itr.err = err
return false
}
// Parse index & offset, otherwise skip to the next object.
index, offset, err := litestream.ParseWALSegmentPath(path.Base(attrs.Name))
if err != nil {
continue
}
// Store current snapshot and return.
itr.info = litestream.WALSegmentInfo{
Generation: itr.generation,
Index: index,
Offset: offset,
Size: attrs.Size,
CreatedAt: attrs.Created.UTC(),
}
return true
}
}
func (itr *walSegmentIterator) Err() error { return itr.err }
func (itr *walSegmentIterator) WALSegment() litestream.WALSegmentInfo {
return itr.info
}
func isNotExists(err error) bool {
return err == storage.ErrObjectNotExist
}

52
go.mod
View File

@@ -1,9 +1,55 @@
module github.com/benbjohnson/litestream
go 1.15
go 1.21
require (
github.com/mattn/go-sqlite3 v1.14.5
github.com/pelletier/go-toml v1.8.1
cloud.google.com/go/storage v1.31.0
filippo.io/age v1.1.1
github.com/Azure/azure-storage-blob-go v0.15.0
github.com/aws/aws-sdk-go v1.44.318
github.com/mattn/go-shellwords v1.0.12
github.com/mattn/go-sqlite3 v1.14.17
github.com/pierrec/lz4/v4 v4.1.18
github.com/pkg/sftp v1.13.5
github.com/prometheus/client_golang v1.16.0
golang.org/x/crypto v0.12.0
golang.org/x/sync v0.3.0
golang.org/x/sys v0.11.0
google.golang.org/api v0.135.0
gopkg.in/yaml.v2 v2.4.0
)
require (
cloud.google.com/go v0.110.7 // indirect
cloud.google.com/go/compute v1.23.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v1.1.1 // indirect
github.com/Azure/azure-pipeline-go v0.2.3 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/s2a-go v0.1.4 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect
github.com/googleapis/gax-go/v2 v2.12.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/mattn/go-ieproxy v0.0.11 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.11.1 // indirect
go.opencensus.io v0.24.0 // indirect
golang.org/x/net v0.14.0 // indirect
golang.org/x/oauth2 v0.11.0 // indirect
golang.org/x/text v0.12.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230807174057-1744710a1577 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230807174057-1744710a1577 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 // indirect
google.golang.org/grpc v1.57.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
)

295
go.sum
View File

@@ -1,38 +1,291 @@
github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o=
cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI=
cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y=
cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU=
cloud.google.com/go/storage v1.31.0 h1:+S3LjjEN2zZ+L5hOwj4+1OkGCsLVe0NzpXKQ1pSdTCI=
cloud.google.com/go/storage v1.31.0/go.mod h1:81ams1PrhW16L4kF7qg+4mTq7SRs5HsbDTM0bWvrwJ0=
filippo.io/age v1.1.1 h1:pIpO7l151hCnQ4BdyBujnGP2YlUo0uj6sAVNHGBvXHg=
filippo.io/age v1.1.1/go.mod h1:l03SrzDUrBkdBx8+IILdnn2KZysqQdbEBUQ4p3sqEQE=
github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U=
github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=
github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk=
github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/aws/aws-sdk-go v1.44.318 h1:Yl66rpbQHFUbxe9JBKLcvOvRivhVgP6+zH0b9KzARX8=
github.com/aws/aws-sdk-go v1.44.318/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
github.com/mattn/go-sqlite3 v1.14.5 h1:1IdxlwTNazvbKJQSxoJ5/9ECbEeaTTyeU7sEAZ5KKTQ=
github.com/mattn/go-sqlite3 v1.14.5/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI=
github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM=
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw=
github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc=
github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM=
github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w=
github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
github.com/mattn/go-ieproxy v0.0.11 h1:MQ/5BuGSgDAHZOJe6YY80IF2UVCfGkwfo6AeD7HtHYo=
github.com/mattn/go-ieproxy v0.0.11/go.mod h1:/NsJd+kxZBmjMc5hrJCKMbP57B84rvq9BiDRbtO9AS0=
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM=
github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.13.5 h1:a3RLUqkyjYRtBTZJZ1VRrKbN3zhuPLlUc3sphVz81go=
github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s=
github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ=
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ=
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200423201157-2723c5de0d66/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/api v0.135.0 h1:6Vgfj6uPMXcyy66waYWBwmkeNB+9GmUlJDOzkukPQYQ=
google.golang.org/api v0.135.0/go.mod h1:Bp77uRFgwsSKI0BWH573F5Q6wSlznwI2NFayLOp/7mQ=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20230807174057-1744710a1577 h1:Tyk/35yqszRCvaragTn5NnkY6IiKk/XvHzEWepo71N0=
google.golang.org/genproto v0.0.0-20230807174057-1744710a1577/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
google.golang.org/genproto/googleapis/api v0.0.0-20230807174057-1744710a1577 h1:xv8KoglAClYGkprUSmDTKaILtzfD8XzG9NYVXMprjKo=
google.golang.org/genproto/googleapis/api v0.0.0-20230807174057-1744710a1577/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 h1:wukfNtZmZUurLN/atp2hiIeTKn7QJWIQdHzqmsOnAOk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

141
internal/internal.go Normal file
View File

@@ -0,0 +1,141 @@
package internal
import (
"io"
"os"
"syscall"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
// ReadCloser wraps a reader to also attach a separate closer.
type ReadCloser struct {
r io.Reader
c io.Closer
}
// NewReadCloser returns a new instance of ReadCloser.
func NewReadCloser(r io.Reader, c io.Closer) *ReadCloser {
return &ReadCloser{r, c}
}
// Read reads bytes into the underlying reader.
func (r *ReadCloser) Read(p []byte) (n int, err error) {
return r.r.Read(p)
}
// Close closes the reader (if implementing io.ReadCloser) and the Closer.
func (r *ReadCloser) Close() error {
if rc, ok := r.r.(io.Closer); ok {
if err := rc.Close(); err != nil {
r.c.Close()
return err
}
}
return r.c.Close()
}
// ReadCounter wraps an io.Reader and counts the total number of bytes read.
type ReadCounter struct {
r io.Reader
n int64
}
// NewReadCounter returns a new instance of ReadCounter that wraps r.
func NewReadCounter(r io.Reader) *ReadCounter {
return &ReadCounter{r: r}
}
// Read reads from the underlying reader into p and adds the bytes read to the counter.
func (r *ReadCounter) Read(p []byte) (int, error) {
n, err := r.r.Read(p)
r.n += int64(n)
return n, err
}
// N returns the total number of bytes read.
func (r *ReadCounter) N() int64 { return r.n }
// CreateFile creates the file and matches the mode & uid/gid of fi.
func CreateFile(filename string, fi os.FileInfo) (*os.File, error) {
mode := os.FileMode(0600)
if fi != nil {
mode = fi.Mode()
}
f, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
if err != nil {
return nil, err
}
uid, gid := Fileinfo(fi)
_ = f.Chown(uid, gid)
return f, nil
}
// MkdirAll is a copy of os.MkdirAll() except that it attempts to set the
// mode/uid/gid to match fi for each created directory.
func MkdirAll(path string, fi os.FileInfo) error {
uid, gid := Fileinfo(fi)
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
dir, err := os.Stat(path)
if err == nil {
if dir.IsDir() {
return nil
}
return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
}
// Slow path: make sure parent exists and then call Mkdir for path.
i := len(path)
for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
i--
}
j := i
for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
j--
}
if j > 1 {
// Create parent.
err = MkdirAll(fixRootDirectory(path[:j-1]), fi)
if err != nil {
return err
}
}
// Parent now exists; invoke Mkdir and use its result.
mode := os.FileMode(0700)
if fi != nil {
mode = fi.Mode()
}
err = os.Mkdir(path, mode)
if err != nil {
// Handle arguments like "foo/." by
// double-checking that directory doesn't exist.
dir, err1 := os.Lstat(path)
if err1 == nil && dir.IsDir() {
_ = os.Chown(path, uid, gid)
return nil
}
return err
}
_ = os.Chown(path, uid, gid)
return nil
}
// Shared replica metrics.
var (
OperationTotalCounterVec = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "litestream_replica_operation_total",
Help: "The number of replica operations performed",
}, []string{"replica_type", "operation"})
OperationBytesCounterVec = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "litestream_replica_operation_bytes",
Help: "The number of bytes used by replica operations",
}, []string{"replica_type", "operation"})
)

22
internal/internal_unix.go Normal file
View File

@@ -0,0 +1,22 @@
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package internal
import (
"os"
"syscall"
)
// Fileinfo returns syscall fields from a FileInfo object.
func Fileinfo(fi os.FileInfo) (uid, gid int) {
if fi == nil {
return -1, -1
}
stat := fi.Sys().(*syscall.Stat_t)
return int(stat.Uid), int(stat.Gid)
}
func fixRootDirectory(p string) string {
return p
}

View File

@@ -0,0 +1,23 @@
//go:build windows
// +build windows
package internal
import (
"os"
)
// Fileinfo returns syscall fields from a FileInfo object.
func Fileinfo(fi os.FileInfo) (uid, gid int) {
return -1, -1
}
// fixRootDirectory is copied from the standard library for use with mkdirAll()
func fixRootDirectory(p string) string {
if len(p) == len(`\\?\c:`) {
if os.IsPathSeparator(p[0]) && os.IsPathSeparator(p[1]) && p[2] == '?' && os.IsPathSeparator(p[3]) && p[5] == ':' {
return p + `\`
}
}
return p
}

View File

@@ -1,37 +1,296 @@
package litestream
import (
"compress/gzip"
"database/sql"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"os"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
_ "github.com/mattn/go-sqlite3"
"github.com/mattn/go-sqlite3"
)
// Naming constants.
const (
MetaDirSuffix = "-litestream"
WALDirName = "wal"
WALExt = ".wal"
SnapshotExt = ".snapshot"
WALDirName = "wal"
WALExt = ".wal"
WALSegmentExt = ".wal.lz4"
SnapshotExt = ".snapshot.lz4"
GenerationNameLen = 16
)
// SQLite checkpoint modes.
const (
CheckpointModePassive = "PASSIVE"
CheckpointModeFull = "FULL"
CheckpointModeRestart = "RESTART"
CheckpointModeTruncate = "TRUNCATE"
)
// Litestream errors.
var (
ErrNoSnapshots = errors.New("no snapshots available")
ErrNoGeneration = errors.New("no generation available")
ErrNoSnapshots = errors.New("no snapshots available")
ErrChecksumMismatch = errors.New("invalid replica, checksum mismatch")
)
var (
// LogWriter is the destination writer for all logging.
LogWriter = os.Stdout
// LogFlags are the flags passed to log.New().
LogFlags = 0
)
func init() {
sql.Register("litestream-sqlite3", &sqlite3.SQLiteDriver{
ConnectHook: func(conn *sqlite3.SQLiteConn) error {
if err := conn.SetFileControlInt("main", sqlite3.SQLITE_FCNTL_PERSIST_WAL, 1); err != nil {
return fmt.Errorf("cannot set file control: %w", err)
}
return nil
},
})
}
// SnapshotIterator represents an iterator over a collection of snapshot metadata.
type SnapshotIterator interface {
io.Closer
// Prepares the the next snapshot for reading with the Snapshot() method.
// Returns true if another snapshot is available. Returns false if no more
// snapshots are available or if an error occured.
Next() bool
// Returns an error that occurred during iteration.
Err() error
// Returns metadata for the currently positioned snapshot.
Snapshot() SnapshotInfo
}
// SliceSnapshotIterator returns all snapshots from an iterator as a slice.
func SliceSnapshotIterator(itr SnapshotIterator) ([]SnapshotInfo, error) {
var a []SnapshotInfo
for itr.Next() {
a = append(a, itr.Snapshot())
}
return a, itr.Close()
}
var _ SnapshotIterator = (*SnapshotInfoSliceIterator)(nil)
// SnapshotInfoSliceIterator represents an iterator for iterating over a slice of snapshots.
type SnapshotInfoSliceIterator struct {
init bool
a []SnapshotInfo
}
// NewSnapshotInfoSliceIterator returns a new instance of SnapshotInfoSliceIterator.
func NewSnapshotInfoSliceIterator(a []SnapshotInfo) *SnapshotInfoSliceIterator {
return &SnapshotInfoSliceIterator{a: a}
}
// Close always returns nil.
func (itr *SnapshotInfoSliceIterator) Close() error { return nil }
// Next moves to the next snapshot. Returns true if another snapshot is available.
func (itr *SnapshotInfoSliceIterator) Next() bool {
if !itr.init {
itr.init = true
return len(itr.a) > 0
}
itr.a = itr.a[1:]
return len(itr.a) > 0
}
// Err always returns nil.
func (itr *SnapshotInfoSliceIterator) Err() error { return nil }
// Snapshot returns the metadata from the currently positioned snapshot.
func (itr *SnapshotInfoSliceIterator) Snapshot() SnapshotInfo {
if len(itr.a) == 0 {
return SnapshotInfo{}
}
return itr.a[0]
}
// WALSegmentIterator represents an iterator over a collection of WAL segments.
type WALSegmentIterator interface {
io.Closer
// Prepares the the next WAL for reading with the WAL() method.
// Returns true if another WAL is available. Returns false if no more
// WAL files are available or if an error occured.
Next() bool
// Returns an error that occurred during iteration.
Err() error
// Returns metadata for the currently positioned WAL segment file.
WALSegment() WALSegmentInfo
}
// SliceWALSegmentIterator returns all WAL segment files from an iterator as a slice.
func SliceWALSegmentIterator(itr WALSegmentIterator) ([]WALSegmentInfo, error) {
var a []WALSegmentInfo
for itr.Next() {
a = append(a, itr.WALSegment())
}
return a, itr.Close()
}
var _ WALSegmentIterator = (*WALSegmentInfoSliceIterator)(nil)
// WALSegmentInfoSliceIterator represents an iterator for iterating over a slice of wal segments.
type WALSegmentInfoSliceIterator struct {
init bool
a []WALSegmentInfo
}
// NewWALSegmentInfoSliceIterator returns a new instance of WALSegmentInfoSliceIterator.
func NewWALSegmentInfoSliceIterator(a []WALSegmentInfo) *WALSegmentInfoSliceIterator {
return &WALSegmentInfoSliceIterator{a: a}
}
// Close always returns nil.
func (itr *WALSegmentInfoSliceIterator) Close() error { return nil }
// Next moves to the next wal segment. Returns true if another segment is available.
func (itr *WALSegmentInfoSliceIterator) Next() bool {
if !itr.init {
itr.init = true
return len(itr.a) > 0
}
itr.a = itr.a[1:]
return len(itr.a) > 0
}
// Err always returns nil.
func (itr *WALSegmentInfoSliceIterator) Err() error { return nil }
// WALSegment returns the metadata from the currently positioned wal segment.
func (itr *WALSegmentInfoSliceIterator) WALSegment() WALSegmentInfo {
if len(itr.a) == 0 {
return WALSegmentInfo{}
}
return itr.a[0]
}
// SnapshotInfo represents file information about a snapshot.
type SnapshotInfo struct {
Generation string
Index int
Size int64
CreatedAt time.Time
}
// Pos returns the WAL position when the snapshot was made.
func (info *SnapshotInfo) Pos() Pos {
return Pos{Generation: info.Generation, Index: info.Index}
}
// SnapshotInfoSlice represents a slice of snapshot metadata.
type SnapshotInfoSlice []SnapshotInfo
func (a SnapshotInfoSlice) Len() int { return len(a) }
func (a SnapshotInfoSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a SnapshotInfoSlice) Less(i, j int) bool {
if a[i].Generation != a[j].Generation {
return a[i].Generation < a[j].Generation
}
return a[i].Index < a[j].Index
}
// FilterSnapshotsAfter returns all snapshots that were created on or after t.
func FilterSnapshotsAfter(a []SnapshotInfo, t time.Time) []SnapshotInfo {
other := make([]SnapshotInfo, 0, len(a))
for _, snapshot := range a {
if !snapshot.CreatedAt.Before(t) {
other = append(other, snapshot)
}
}
return other
}
// FindMinSnapshotByGeneration finds the snapshot with the lowest index in a generation.
func FindMinSnapshotByGeneration(a []SnapshotInfo, generation string) *SnapshotInfo {
var min *SnapshotInfo
for i := range a {
snapshot := &a[i]
if snapshot.Generation != generation {
continue
} else if min == nil || snapshot.Index < min.Index {
min = snapshot
}
}
return min
}
// WALInfo represents file information about a WAL file.
type WALInfo struct {
Generation string
Index int
CreatedAt time.Time
}
// WALInfoSlice represents a slice of WAL metadata.
type WALInfoSlice []WALInfo
func (a WALInfoSlice) Len() int { return len(a) }
func (a WALInfoSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a WALInfoSlice) Less(i, j int) bool {
if a[i].Generation != a[j].Generation {
return a[i].Generation < a[j].Generation
}
return a[i].Index < a[j].Index
}
// WALSegmentInfo represents file information about a WAL segment file.
type WALSegmentInfo struct {
Generation string
Index int
Offset int64
Size int64
CreatedAt time.Time
}
// Pos returns the WAL position when the segment was made.
func (info *WALSegmentInfo) Pos() Pos {
return Pos{Generation: info.Generation, Index: info.Index, Offset: info.Offset}
}
// WALSegmentInfoSlice represents a slice of WAL segment metadata.
type WALSegmentInfoSlice []WALSegmentInfo
func (a WALSegmentInfoSlice) Len() int { return len(a) }
func (a WALSegmentInfoSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a WALSegmentInfoSlice) Less(i, j int) bool {
if a[i].Generation != a[j].Generation {
return a[i].Generation < a[j].Generation
} else if a[i].Index != a[j].Index {
return a[i].Index < a[j].Index
}
return a[i].Offset < a[j].Offset
}
// Pos is a position in the WAL for a generation.
type Pos struct {
Generation string // generation name
@@ -42,9 +301,9 @@ type Pos struct {
// String returns a string representation.
func (p Pos) String() string {
if p.IsZero() {
return "<>"
return ""
}
return fmt.Sprintf("<%s,%d,%d>", p.Generation, p.Index, p.Offset)
return fmt.Sprintf("%s/%08x:%d", p.Generation, p.Index, p.Offset)
}
// IsZero returns true if p is the zero value.
@@ -52,6 +311,11 @@ func (p Pos) IsZero() bool {
return p == (Pos{})
}
// Truncate returns p with the offset truncated to zero.
func (p Pos) Truncate() Pos {
return Pos{Generation: p.Generation, Index: p.Index}
}
// Checksum computes a running SQLite checksum over a byte slice.
func Checksum(bo binary.ByteOrder, s0, s1 uint32, b []byte) (uint32, uint32) {
assert(len(b)%8 == 0, "misaligned checksum byte slice")
@@ -98,12 +362,9 @@ func readWALHeader(filename string) ([]byte, error) {
return buf[:n], err
}
func readCheckpointSeqNo(hdr []byte) uint32 {
return binary.BigEndian.Uint32(hdr[12:])
}
// readFileAt reads a slice from a file.
func readFileAt(filename string, offset, n int64) ([]byte, error) {
// readWALFileAt reads a slice from a file. Do not use this with database files
// as it causes problems with non-OFD locks.
func readWALFileAt(filename string, offset, n int64) ([]byte, error) {
f, err := os.Open(filename)
if err != nil {
return nil, err
@@ -119,19 +380,6 @@ func readFileAt(filename string, offset, n int64) ([]byte, error) {
return buf, nil
}
func ParseWALFilename(name string) (index int, err error) {
v, err := strconv.ParseInt(strings.TrimSuffix(name, WALExt), 16, 64)
if err != nil {
return 0, fmt.Errorf("invalid wal filename: %q", name)
}
return int(v), nil
}
func FormatWALFilename(index int) string {
assert(index >= 0, "wal index must be non-negative")
return fmt.Sprintf("%016x%s", index, WALExt)
}
// removeTmpFiles recursively finds and removes .tmp files.
func removeTmpFiles(root string) error {
return filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
@@ -159,6 +407,56 @@ func IsGenerationName(s string) bool {
return true
}
// GenerationsPath returns the path to a generation root directory.
func GenerationsPath(root string) string {
return path.Join(root, "generations")
}
// GenerationPath returns the path to a generation's root directory.
func GenerationPath(root, generation string) (string, error) {
dir := GenerationsPath(root)
if generation == "" {
return "", fmt.Errorf("generation required")
}
return path.Join(dir, generation), nil
}
// SnapshotsPath returns the path to a generation's snapshot directory.
func SnapshotsPath(root, generation string) (string, error) {
dir, err := GenerationPath(root, generation)
if err != nil {
return "", err
}
return path.Join(dir, "snapshots"), nil
}
// SnapshotPath returns the path to an uncompressed snapshot file.
func SnapshotPath(root, generation string, index int) (string, error) {
dir, err := SnapshotsPath(root, generation)
if err != nil {
return "", err
}
return path.Join(dir, FormatSnapshotPath(index)), nil
}
// WALPath returns the path to a generation's WAL directory
func WALPath(root, generation string) (string, error) {
dir, err := GenerationPath(root, generation)
if err != nil {
return "", err
}
return path.Join(dir, "wal"), nil
}
// WALSegmentPath returns the path to a WAL segment file.
func WALSegmentPath(root, generation string, index int, offset int64) (string, error) {
dir, err := WALPath(root, generation)
if err != nil {
return "", err
}
return path.Join(dir, FormatWALSegmentPath(index, offset)), nil
}
// IsSnapshotPath returns true if s is a path to a snapshot file.
func IsSnapshotPath(s string) bool {
return snapshotPathRegex.MatchString(s)
@@ -166,96 +464,82 @@ func IsSnapshotPath(s string) bool {
// ParseSnapshotPath returns the index for the snapshot.
// Returns an error if the path is not a valid snapshot path.
func ParseSnapshotPath(s string) (index int, typ, ext string, err error) {
func ParseSnapshotPath(s string) (index int, err error) {
s = filepath.Base(s)
a := snapshotPathRegex.FindStringSubmatch(s)
if a == nil {
return 0, "", "", fmt.Errorf("invalid snapshot path: %s", s)
return 0, fmt.Errorf("invalid snapshot path: %s", s)
}
i64, _ := strconv.ParseUint(a[1], 16, 64)
return int(i64), a[2], a[3], nil
return int(i64), nil
}
var snapshotPathRegex = regexp.MustCompile(`^([0-9a-f]{16})(?:-(\w+))?(.snapshot(?:.gz)?)$`)
// FormatSnapshotPath formats a snapshot filename with a given index.
func FormatSnapshotPath(index int) string {
assert(index >= 0, "snapshot index must be non-negative")
return fmt.Sprintf("%08x%s", index, SnapshotExt)
}
var snapshotPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\.snapshot\.lz4$`)
// IsWALPath returns true if s is a path to a WAL file.
func IsWALPath(s string) bool {
return walPathRegex.MatchString(s)
}
// ParseWALPath returns the index & offset for the WAL file.
// Returns an error if the path is not a valid snapshot path.
func ParseWALPath(s string) (index int, offset int64, ext string, err error) {
// ParseWALPath returns the index for the WAL file.
// Returns an error if the path is not a valid WAL path.
func ParseWALPath(s string) (index int, err error) {
s = filepath.Base(s)
a := walPathRegex.FindStringSubmatch(s)
if a == nil {
return 0, 0, "", fmt.Errorf("invalid wal path: %s", s)
return 0, fmt.Errorf("invalid wal path: %s", s)
}
i64, _ := strconv.ParseUint(a[1], 16, 64)
return int(i64), nil
}
// FormatWALPath formats a WAL filename with a given index.
func FormatWALPath(index int) string {
assert(index >= 0, "wal index must be non-negative")
return fmt.Sprintf("%08x%s", index, WALExt)
}
var walPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\.wal$`)
// ParseWALSegmentPath returns the index & offset for the WAL segment file.
// Returns an error if the path is not a valid wal segment path.
func ParseWALSegmentPath(s string) (index int, offset int64, err error) {
s = filepath.Base(s)
a := walSegmentPathRegex.FindStringSubmatch(s)
if a == nil {
return 0, 0, fmt.Errorf("invalid wal segment path: %s", s)
}
i64, _ := strconv.ParseUint(a[1], 16, 64)
off64, _ := strconv.ParseUint(a[2], 16, 64)
return int(i64), int64(off64), a[3], nil
return int(i64), int64(off64), nil
}
var walPathRegex = regexp.MustCompile(`^([0-9a-f]{16})(?:_([0-9a-f]{16}))?(.wal(?:.gz)?)$`)
// FormatWALSegmentPath formats a WAL segment filename with a given index & offset.
func FormatWALSegmentPath(index int, offset int64) string {
assert(index >= 0, "wal index must be non-negative")
assert(offset >= 0, "wal offset must be non-negative")
return fmt.Sprintf("%08x_%08x%s", index, offset, WALSegmentExt)
}
var walSegmentPathRegex = regexp.MustCompile(`^([0-9a-f]{8})(?:_([0-9a-f]{8}))\.wal\.lz4$`)
// isHexChar returns true if ch is a lowercase hex character.
func isHexChar(ch rune) bool {
return (ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'f')
}
// gzipReadCloser wraps gzip.Reader to also close the underlying reader on close.
type gzipReadCloser struct {
r *gzip.Reader
closer io.ReadCloser
}
func (r *gzipReadCloser) Read(p []byte) (n int, err error) {
return r.r.Read(p)
}
func (r *gzipReadCloser) Close() error {
if err := r.r.Close(); err != nil {
r.closer.Close()
return err
}
return r.closer.Close()
}
// HexDump returns hexdump output but with duplicate lines removed.
func HexDump(b []byte) string {
const prefixN = len("00000000")
var output []string
var prev string
var ellipsis bool
lines := strings.Split(strings.TrimSpace(hex.Dump(b)), "\n")
for i, line := range lines {
// Add line to output if it is not repeating or the last line.
if i == 0 || i == len(lines)-1 || trimPrefixN(line, prefixN) != trimPrefixN(prev, prefixN) {
output = append(output, line)
prev, ellipsis = line, false
continue
}
// Add an ellipsis for the first duplicate line.
if !ellipsis {
output = append(output, "...")
ellipsis = true
continue
}
}
return strings.Join(output, "\n")
}
func trimPrefixN(s string, n int) string {
if len(s) < n {
return ""
}
return s[n:]
}
func assert(condition bool, message string) {
if !condition {
panic("assertion failed: " + message)

View File

@@ -6,6 +6,7 @@ import (
"testing"
"github.com/benbjohnson/litestream"
_ "github.com/mattn/go-sqlite3"
)
func TestChecksum(t *testing.T) {
@@ -39,6 +40,104 @@ func TestChecksum(t *testing.T) {
})
}
func TestGenerationsPath(t *testing.T) {
t.Run("OK", func(t *testing.T) {
if got, want := litestream.GenerationsPath("foo"), "foo/generations"; got != want {
t.Fatalf("GenerationsPath()=%v, want %v", got, want)
}
})
t.Run("NoPath", func(t *testing.T) {
if got, want := litestream.GenerationsPath(""), "generations"; got != want {
t.Fatalf("GenerationsPath()=%v, want %v", got, want)
}
})
}
func TestGenerationPath(t *testing.T) {
t.Run("OK", func(t *testing.T) {
if got, err := litestream.GenerationPath("foo", "0123456701234567"); err != nil {
t.Fatal(err)
} else if want := "foo/generations/0123456701234567"; got != want {
t.Fatalf("GenerationPath()=%v, want %v", got, want)
}
})
t.Run("ErrNoGeneration", func(t *testing.T) {
if _, err := litestream.GenerationPath("foo", ""); err == nil || err.Error() != `generation required` {
t.Fatalf("expected error: %v", err)
}
})
}
func TestSnapshotsPath(t *testing.T) {
t.Run("OK", func(t *testing.T) {
if got, err := litestream.SnapshotsPath("foo", "0123456701234567"); err != nil {
t.Fatal(err)
} else if want := "foo/generations/0123456701234567/snapshots"; got != want {
t.Fatalf("SnapshotsPath()=%v, want %v", got, want)
}
})
t.Run("ErrNoGeneration", func(t *testing.T) {
if _, err := litestream.SnapshotsPath("foo", ""); err == nil || err.Error() != `generation required` {
t.Fatalf("unexpected error: %v", err)
}
})
}
func TestSnapshotPath(t *testing.T) {
t.Run("OK", func(t *testing.T) {
if got, err := litestream.SnapshotPath("foo", "0123456701234567", 1000); err != nil {
t.Fatal(err)
} else if want := "foo/generations/0123456701234567/snapshots/000003e8.snapshot.lz4"; got != want {
t.Fatalf("SnapshotPath()=%v, want %v", got, want)
}
})
t.Run("ErrNoGeneration", func(t *testing.T) {
if _, err := litestream.SnapshotPath("foo", "", 1000); err == nil || err.Error() != `generation required` {
t.Fatalf("unexpected error: %v", err)
}
})
}
func TestWALPath(t *testing.T) {
t.Run("OK", func(t *testing.T) {
if got, err := litestream.WALPath("foo", "0123456701234567"); err != nil {
t.Fatal(err)
} else if want := "foo/generations/0123456701234567/wal"; got != want {
t.Fatalf("WALPath()=%v, want %v", got, want)
}
})
t.Run("ErrNoGeneration", func(t *testing.T) {
if _, err := litestream.WALPath("foo", ""); err == nil || err.Error() != `generation required` {
t.Fatalf("unexpected error: %v", err)
}
})
}
func TestWALSegmentPath(t *testing.T) {
t.Run("OK", func(t *testing.T) {
if got, err := litestream.WALSegmentPath("foo", "0123456701234567", 1000, 1001); err != nil {
t.Fatal(err)
} else if want := "foo/generations/0123456701234567/wal/000003e8_000003e9.wal.lz4"; got != want {
t.Fatalf("WALPath()=%v, want %v", got, want)
}
})
t.Run("ErrNoGeneration", func(t *testing.T) {
if _, err := litestream.WALSegmentPath("foo", "", 1000, 0); err == nil || err.Error() != `generation required` {
t.Fatalf("unexpected error: %v", err)
}
})
}
func TestFindMinSnapshotByGeneration(t *testing.T) {
infos := []litestream.SnapshotInfo{
{Generation: "29cf4bced74e92ab", Index: 0},
{Generation: "5dfeb4aa03232553", Index: 24},
}
if got, want := litestream.FindMinSnapshotByGeneration(infos, "29cf4bced74e92ab"), &infos[0]; got != want {
t.Fatalf("info=%#v, want %#v", got, want)
}
}
func MustDecodeHexString(s string) []byte {
b, err := hex.DecodeString(s)
if err != nil {

65
mock/replica_client.go Normal file
View File

@@ -0,0 +1,65 @@
package mock
import (
"context"
"io"
"github.com/benbjohnson/litestream"
)
var _ litestream.ReplicaClient = (*ReplicaClient)(nil)
type ReplicaClient struct {
GenerationsFunc func(ctx context.Context) ([]string, error)
DeleteGenerationFunc func(ctx context.Context, generation string) error
SnapshotsFunc func(ctx context.Context, generation string) (litestream.SnapshotIterator, error)
WriteSnapshotFunc func(ctx context.Context, generation string, index int, r io.Reader) (litestream.SnapshotInfo, error)
DeleteSnapshotFunc func(ctx context.Context, generation string, index int) error
SnapshotReaderFunc func(ctx context.Context, generation string, index int) (io.ReadCloser, error)
WALSegmentsFunc func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error)
WriteWALSegmentFunc func(ctx context.Context, pos litestream.Pos, r io.Reader) (litestream.WALSegmentInfo, error)
DeleteWALSegmentsFunc func(ctx context.Context, a []litestream.Pos) error
WALSegmentReaderFunc func(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error)
}
func (c *ReplicaClient) Type() string { return "mock" }
func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) {
return c.GenerationsFunc(ctx)
}
func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error {
return c.DeleteGenerationFunc(ctx, generation)
}
func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (litestream.SnapshotIterator, error) {
return c.SnapshotsFunc(ctx, generation)
}
func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, r io.Reader) (litestream.SnapshotInfo, error) {
return c.WriteSnapshotFunc(ctx, generation, index, r)
}
func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error {
return c.DeleteSnapshotFunc(ctx, generation, index)
}
func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) {
return c.SnapshotReaderFunc(ctx, generation, index)
}
func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) {
return c.WALSegmentsFunc(ctx, generation)
}
func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, r io.Reader) (litestream.WALSegmentInfo, error) {
return c.WriteWALSegmentFunc(ctx, pos, r)
}
func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Pos) error {
return c.DeleteWALSegmentsFunc(ctx, a)
}
func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) {
return c.WALSegmentReaderFunc(ctx, pos)
}

1432
replica.go Normal file

File diff suppressed because it is too large Load Diff

48
replica_client.go Normal file
View File

@@ -0,0 +1,48 @@
package litestream
import (
"context"
"io"
)
// ReplicaClient represents client to connect to a Replica.
type ReplicaClient interface {
// Returns the type of client.
Type() string
// Returns a list of available generations.
Generations(ctx context.Context) ([]string, error)
// Deletes all snapshots & WAL segments within a generation.
DeleteGeneration(ctx context.Context, generation string) error
// Returns an iterator of all snapshots within a generation on the replica.
Snapshots(ctx context.Context, generation string) (SnapshotIterator, error)
// Writes LZ4 compressed snapshot data to the replica at a given index
// within a generation. Returns metadata for the snapshot.
WriteSnapshot(ctx context.Context, generation string, index int, r io.Reader) (SnapshotInfo, error)
// Deletes a snapshot with the given generation & index.
DeleteSnapshot(ctx context.Context, generation string, index int) error
// Returns a reader that contains LZ4 compressed snapshot data for a
// given index within a generation. Returns an os.ErrNotFound error if
// the snapshot does not exist.
SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error)
// Returns an iterator of all WAL segments within a generation on the replica.
WALSegments(ctx context.Context, generation string) (WALSegmentIterator, error)
// Writes an LZ4 compressed WAL segment at a given position.
// Returns metadata for the written segment.
WriteWALSegment(ctx context.Context, pos Pos, r io.Reader) (WALSegmentInfo, error)
// Deletes one or more WAL segments at the given positions.
DeleteWALSegments(ctx context.Context, a []Pos) error
// Returns a reader that contains an LZ4 compressed WAL segment at a given
// index/offset within a generation. Returns an os.ErrNotFound error if the
// WAL segment does not exist.
WALSegmentReader(ctx context.Context, pos Pos) (io.ReadCloser, error)
}

573
replica_client_test.go Normal file
View File

@@ -0,0 +1,573 @@
package litestream_test
import (
"context"
"flag"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path"
"reflect"
"sort"
"strings"
"testing"
"time"
"github.com/benbjohnson/litestream"
"github.com/benbjohnson/litestream/abs"
"github.com/benbjohnson/litestream/file"
"github.com/benbjohnson/litestream/gcs"
"github.com/benbjohnson/litestream/s3"
"github.com/benbjohnson/litestream/sftp"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
var (
// Enables integration tests.
integration = flag.String("integration", "file", "")
)
// S3 settings
var (
// Replica client settings
s3AccessKeyID = flag.String("s3-access-key-id", os.Getenv("LITESTREAM_S3_ACCESS_KEY_ID"), "")
s3SecretAccessKey = flag.String("s3-secret-access-key", os.Getenv("LITESTREAM_S3_SECRET_ACCESS_KEY"), "")
s3Region = flag.String("s3-region", os.Getenv("LITESTREAM_S3_REGION"), "")
s3Bucket = flag.String("s3-bucket", os.Getenv("LITESTREAM_S3_BUCKET"), "")
s3Path = flag.String("s3-path", os.Getenv("LITESTREAM_S3_PATH"), "")
s3Endpoint = flag.String("s3-endpoint", os.Getenv("LITESTREAM_S3_ENDPOINT"), "")
s3ForcePathStyle = flag.Bool("s3-force-path-style", os.Getenv("LITESTREAM_S3_FORCE_PATH_STYLE") == "true", "")
s3SkipVerify = flag.Bool("s3-skip-verify", os.Getenv("LITESTREAM_S3_SKIP_VERIFY") == "true", "")
)
// Google cloud storage settings
var (
gcsBucket = flag.String("gcs-bucket", os.Getenv("LITESTREAM_GCS_BUCKET"), "")
gcsPath = flag.String("gcs-path", os.Getenv("LITESTREAM_GCS_PATH"), "")
)
// Azure blob storage settings
var (
absAccountName = flag.String("abs-account-name", os.Getenv("LITESTREAM_ABS_ACCOUNT_NAME"), "")
absAccountKey = flag.String("abs-account-key", os.Getenv("LITESTREAM_ABS_ACCOUNT_KEY"), "")
absBucket = flag.String("abs-bucket", os.Getenv("LITESTREAM_ABS_BUCKET"), "")
absPath = flag.String("abs-path", os.Getenv("LITESTREAM_ABS_PATH"), "")
)
// SFTP settings
var (
sftpHost = flag.String("sftp-host", os.Getenv("LITESTREAM_SFTP_HOST"), "")
sftpUser = flag.String("sftp-user", os.Getenv("LITESTREAM_SFTP_USER"), "")
sftpPassword = flag.String("sftp-password", os.Getenv("LITESTREAM_SFTP_PASSWORD"), "")
sftpKeyPath = flag.String("sftp-key-path", os.Getenv("LITESTREAM_SFTP_KEY_PATH"), "")
sftpPath = flag.String("sftp-path", os.Getenv("LITESTREAM_SFTP_PATH"), "")
)
func TestReplicaClient_Generations(t *testing.T) {
RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel()
// Write snapshots.
if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 0, strings.NewReader(`foo`)); err != nil {
t.Fatal(err)
} else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 0, strings.NewReader(`bar`)); err != nil {
t.Fatal(err)
} else if _, err := c.WriteSnapshot(context.Background(), "155fe292f8333c72", 0, strings.NewReader(`baz`)); err != nil {
t.Fatal(err)
}
// Verify returned generations.
if got, err := c.Generations(context.Background()); err != nil {
t.Fatal(err)
} else if want := []string{"155fe292f8333c72", "5efbd8d042012dca", "b16ddcf5c697540f"}; !reflect.DeepEqual(got, want) {
t.Fatalf("Generations()=%v, want %v", got, want)
}
})
RunWithReplicaClient(t, "NoGenerationsDir", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel()
if generations, err := c.Generations(context.Background()); err != nil {
t.Fatal(err)
} else if got, want := len(generations), 0; got != want {
t.Fatalf("len(Generations())=%v, want %v", got, want)
}
})
}
func TestReplicaClient_Snapshots(t *testing.T) {
RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel()
// Write snapshots.
if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 1, strings.NewReader(``)); err != nil {
t.Fatal(err)
} else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 5, strings.NewReader(`x`)); err != nil {
t.Fatal(err)
} else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 10, strings.NewReader(`xyz`)); err != nil {
t.Fatal(err)
}
// Fetch all snapshots by generation.
itr, err := c.Snapshots(context.Background(), "b16ddcf5c697540f")
if err != nil {
t.Fatal(err)
}
defer itr.Close()
// Read all snapshots into a slice so they can be sorted.
a, err := litestream.SliceSnapshotIterator(itr)
if err != nil {
t.Fatal(err)
} else if got, want := len(a), 2; got != want {
t.Fatalf("len=%v, want %v", got, want)
}
sort.Sort(litestream.SnapshotInfoSlice(a))
// Verify first snapshot metadata.
if got, want := a[0].Generation, "b16ddcf5c697540f"; got != want {
t.Fatalf("Generation=%v, want %v", got, want)
} else if got, want := a[0].Index, 5; got != want {
t.Fatalf("Index=%v, want %v", got, want)
} else if got, want := a[0].Size, int64(1); got != want {
t.Fatalf("Size=%v, want %v", got, want)
} else if a[0].CreatedAt.IsZero() {
t.Fatalf("expected CreatedAt")
}
// Verify second snapshot metadata.
if got, want := a[1].Generation, "b16ddcf5c697540f"; got != want {
t.Fatalf("Generation=%v, want %v", got, want)
} else if got, want := a[1].Index, 0xA; got != want {
t.Fatalf("Index=%v, want %v", got, want)
} else if got, want := a[1].Size, int64(3); got != want {
t.Fatalf("Size=%v, want %v", got, want)
} else if a[1].CreatedAt.IsZero() {
t.Fatalf("expected CreatedAt")
}
// Ensure close is clean.
if err := itr.Close(); err != nil {
t.Fatal(err)
}
})
RunWithReplicaClient(t, "NoGenerationDir", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel()
itr, err := c.Snapshots(context.Background(), "5efbd8d042012dca")
if err != nil {
t.Fatal(err)
}
defer itr.Close()
if itr.Next() {
t.Fatal("expected no snapshots")
}
})
RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel()
itr, err := c.Snapshots(context.Background(), "")
if err == nil {
err = itr.Close()
}
if err == nil || err.Error() != `cannot determine snapshots path: generation required` {
t.Fatalf("unexpected error: %v", err)
}
})
}
func TestReplicaClient_WriteSnapshot(t *testing.T) {
RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel()
if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 1000, strings.NewReader(`foobar`)); err != nil {
t.Fatal(err)
}
if r, err := c.SnapshotReader(context.Background(), "b16ddcf5c697540f", 1000); err != nil {
t.Fatal(err)
} else if buf, err := ioutil.ReadAll(r); err != nil {
t.Fatal(err)
} else if err := r.Close(); err != nil {
t.Fatal(err)
} else if got, want := string(buf), `foobar`; got != want {
t.Fatalf("data=%q, want %q", got, want)
}
})
RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel()
if _, err := c.WriteSnapshot(context.Background(), "", 0, nil); err == nil || err.Error() != `cannot determine snapshot path: generation required` {
t.Fatalf("unexpected error: %v", err)
}
})
}
func TestReplicaClient_SnapshotReader(t *testing.T) {
RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel()
if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 10, strings.NewReader(`foo`)); err != nil {
t.Fatal(err)
}
r, err := c.SnapshotReader(context.Background(), "5efbd8d042012dca", 10)
if err != nil {
t.Fatal(err)
}
defer r.Close()
if buf, err := ioutil.ReadAll(r); err != nil {
t.Fatal(err)
} else if got, want := string(buf), "foo"; got != want {
t.Fatalf("ReadAll=%v, want %v", got, want)
}
})
RunWithReplicaClient(t, "ErrNotFound", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel()
if _, err := c.SnapshotReader(context.Background(), "5efbd8d042012dca", 1); !os.IsNotExist(err) {
t.Fatalf("expected not exist, got %#v", err)
}
})
RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel()
if _, err := c.SnapshotReader(context.Background(), "", 1); err == nil || err.Error() != `cannot determine snapshot path: generation required` {
t.Fatalf("unexpected error: %v", err)
}
})
}
func TestReplicaClient_WALs(t *testing.T) {
RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel()
if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 1, Offset: 0}, strings.NewReader(``)); err != nil {
t.Fatal(err)
}
if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 2, Offset: 0}, strings.NewReader(`12345`)); err != nil {
t.Fatal(err)
} else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 2, Offset: 5}, strings.NewReader(`67`)); err != nil {
t.Fatal(err)
} else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 3, Offset: 0}, strings.NewReader(`xyz`)); err != nil {
t.Fatal(err)
}
itr, err := c.WALSegments(context.Background(), "b16ddcf5c697540f")
if err != nil {
t.Fatal(err)
}
defer itr.Close()
// Read all WAL segment files into a slice so they can be sorted.
a, err := litestream.SliceWALSegmentIterator(itr)
if err != nil {
t.Fatal(err)
} else if got, want := len(a), 3; got != want {
t.Fatalf("len=%v, want %v", got, want)
}
sort.Sort(litestream.WALSegmentInfoSlice(a))
// Verify first WAL segment metadata.
if got, want := a[0].Generation, "b16ddcf5c697540f"; got != want {
t.Fatalf("Generation=%v, want %v", got, want)
} else if got, want := a[0].Index, 2; got != want {
t.Fatalf("Index=%v, want %v", got, want)
} else if got, want := a[0].Offset, int64(0); got != want {
t.Fatalf("Offset=%v, want %v", got, want)
} else if got, want := a[0].Size, int64(5); got != want {
t.Fatalf("Size=%v, want %v", got, want)
} else if a[0].CreatedAt.IsZero() {
t.Fatalf("expected CreatedAt")
}
// Verify first WAL segment metadata.
if got, want := a[1].Generation, "b16ddcf5c697540f"; got != want {
t.Fatalf("Generation=%v, want %v", got, want)
} else if got, want := a[1].Index, 2; got != want {
t.Fatalf("Index=%v, want %v", got, want)
} else if got, want := a[1].Offset, int64(5); got != want {
t.Fatalf("Offset=%v, want %v", got, want)
} else if got, want := a[1].Size, int64(2); got != want {
t.Fatalf("Size=%v, want %v", got, want)
} else if a[1].CreatedAt.IsZero() {
t.Fatalf("expected CreatedAt")
}
// Verify third WAL segment metadata.
if got, want := a[2].Generation, "b16ddcf5c697540f"; got != want {
t.Fatalf("Generation=%v, want %v", got, want)
} else if got, want := a[2].Index, 3; got != want {
t.Fatalf("Index=%v, want %v", got, want)
} else if got, want := a[2].Offset, int64(0); got != want {
t.Fatalf("Offset=%v, want %v", got, want)
} else if got, want := a[2].Size, int64(3); got != want {
t.Fatalf("Size=%v, want %v", got, want)
} else if a[1].CreatedAt.IsZero() {
t.Fatalf("expected CreatedAt")
}
// Ensure close is clean.
if err := itr.Close(); err != nil {
t.Fatal(err)
}
})
RunWithReplicaClient(t, "NoGenerationDir", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel()
itr, err := c.WALSegments(context.Background(), "5efbd8d042012dca")
if err != nil {
t.Fatal(err)
}
defer itr.Close()
if itr.Next() {
t.Fatal("expected no wal files")
}
})
RunWithReplicaClient(t, "NoWALs", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel()
if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 0, strings.NewReader(`foo`)); err != nil {
t.Fatal(err)
}
itr, err := c.WALSegments(context.Background(), "5efbd8d042012dca")
if err != nil {
t.Fatal(err)
}
defer itr.Close()
if itr.Next() {
t.Fatal("expected no wal files")
}
})
RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel()
itr, err := c.WALSegments(context.Background(), "")
if err == nil {
err = itr.Close()
}
if err == nil || err.Error() != `cannot determine wal path: generation required` {
t.Fatalf("unexpected error: %v", err)
}
})
}
func TestReplicaClient_WriteWALSegment(t *testing.T) {
RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel()
if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1000, Offset: 2000}, strings.NewReader(`foobar`)); err != nil {
t.Fatal(err)
}
if r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1000, Offset: 2000}); err != nil {
t.Fatal(err)
} else if buf, err := ioutil.ReadAll(r); err != nil {
t.Fatal(err)
} else if err := r.Close(); err != nil {
t.Fatal(err)
} else if got, want := string(buf), `foobar`; got != want {
t.Fatalf("data=%q, want %q", got, want)
}
})
RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel()
if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "", Index: 0, Offset: 0}, nil); err == nil || err.Error() != `cannot determine wal segment path: generation required` {
t.Fatalf("unexpected error: %v", err)
}
})
}
func TestReplicaClient_WALReader(t *testing.T) {
RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel()
if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 10, Offset: 5}, strings.NewReader(`foobar`)); err != nil {
t.Fatal(err)
}
r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 10, Offset: 5})
if err != nil {
t.Fatal(err)
}
defer r.Close()
if buf, err := ioutil.ReadAll(r); err != nil {
t.Fatal(err)
} else if got, want := string(buf), "foobar"; got != want {
t.Fatalf("ReadAll=%v, want %v", got, want)
}
})
RunWithReplicaClient(t, "ErrNotFound", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel()
if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 1, Offset: 0}); !os.IsNotExist(err) {
t.Fatalf("expected not exist, got %#v", err)
}
})
}
func TestReplicaClient_DeleteWALSegments(t *testing.T) {
RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel()
if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}, strings.NewReader(`foo`)); err != nil {
t.Fatal(err)
} else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 3, Offset: 4}, strings.NewReader(`bar`)); err != nil {
t.Fatal(err)
}
if err := c.DeleteWALSegments(context.Background(), []litestream.Pos{
{Generation: "b16ddcf5c697540f", Index: 1, Offset: 2},
{Generation: "5efbd8d042012dca", Index: 3, Offset: 4},
}); err != nil {
t.Fatal(err)
}
if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}); !os.IsNotExist(err) {
t.Fatalf("expected not exist, got %#v", err)
} else if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 3, Offset: 4}); !os.IsNotExist(err) {
t.Fatalf("expected not exist, got %#v", err)
}
})
RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) {
t.Parallel()
if err := c.DeleteWALSegments(context.Background(), []litestream.Pos{{}}); err == nil || err.Error() != `cannot determine wal segment path: generation required` {
t.Fatalf("unexpected error: %v", err)
}
})
}
// RunWithReplicaClient executes fn with each replica specified by the -integration flag
func RunWithReplicaClient(t *testing.T, name string, fn func(*testing.T, litestream.ReplicaClient)) {
t.Run(name, func(t *testing.T) {
for _, typ := range strings.Split(*integration, ",") {
t.Run(typ, func(t *testing.T) {
c := NewReplicaClient(t, typ)
defer MustDeleteAll(t, c)
fn(t, c)
})
}
})
}
// NewReplicaClient returns a new client for integration testing by type name.
func NewReplicaClient(tb testing.TB, typ string) litestream.ReplicaClient {
tb.Helper()
switch typ {
case file.ReplicaClientType:
return NewFileReplicaClient(tb)
case s3.ReplicaClientType:
return NewS3ReplicaClient(tb)
case gcs.ReplicaClientType:
return NewGCSReplicaClient(tb)
case abs.ReplicaClientType:
return NewABSReplicaClient(tb)
case sftp.ReplicaClientType:
return NewSFTPReplicaClient(tb)
default:
tb.Fatalf("invalid replica client type: %q", typ)
return nil
}
}
// NewFileReplicaClient returns a new client for integration testing.
func NewFileReplicaClient(tb testing.TB) *file.ReplicaClient {
tb.Helper()
return file.NewReplicaClient(tb.TempDir())
}
// NewS3ReplicaClient returns a new client for integration testing.
func NewS3ReplicaClient(tb testing.TB) *s3.ReplicaClient {
tb.Helper()
c := s3.NewReplicaClient()
c.AccessKeyID = *s3AccessKeyID
c.SecretAccessKey = *s3SecretAccessKey
c.Region = *s3Region
c.Bucket = *s3Bucket
c.Path = path.Join(*s3Path, fmt.Sprintf("%016x", rand.Uint64()))
c.Endpoint = *s3Endpoint
c.ForcePathStyle = *s3ForcePathStyle
c.SkipVerify = *s3SkipVerify
return c
}
// NewGCSReplicaClient returns a new client for integration testing.
func NewGCSReplicaClient(tb testing.TB) *gcs.ReplicaClient {
tb.Helper()
c := gcs.NewReplicaClient()
c.Bucket = *gcsBucket
c.Path = path.Join(*gcsPath, fmt.Sprintf("%016x", rand.Uint64()))
return c
}
// NewABSReplicaClient returns a new client for integration testing.
func NewABSReplicaClient(tb testing.TB) *abs.ReplicaClient {
tb.Helper()
c := abs.NewReplicaClient()
c.AccountName = *absAccountName
c.AccountKey = *absAccountKey
c.Bucket = *absBucket
c.Path = path.Join(*absPath, fmt.Sprintf("%016x", rand.Uint64()))
return c
}
// NewSFTPReplicaClient returns a new client for integration testing.
func NewSFTPReplicaClient(tb testing.TB) *sftp.ReplicaClient {
tb.Helper()
c := sftp.NewReplicaClient()
c.Host = *sftpHost
c.User = *sftpUser
c.Password = *sftpPassword
c.KeyPath = *sftpKeyPath
c.Path = path.Join(*sftpPath, fmt.Sprintf("%016x", rand.Uint64()))
return c
}
// MustDeleteAll deletes all objects under the client's path.
func MustDeleteAll(tb testing.TB, c litestream.ReplicaClient) {
tb.Helper()
generations, err := c.Generations(context.Background())
if err != nil {
tb.Fatalf("cannot list generations for deletion: %s", err)
}
for _, generation := range generations {
if err := c.DeleteGeneration(context.Background(), generation); err != nil {
tb.Fatalf("cannot delete generation: %s", err)
}
}
switch c := c.(type) {
case *sftp.ReplicaClient:
if err := c.Cleanup(context.Background()); err != nil {
tb.Fatalf("cannot cleanup sftp: %s", err)
}
}
}

144
replica_test.go Normal file
View File

@@ -0,0 +1,144 @@
package litestream_test
import (
"bytes"
"context"
"io"
"os"
"testing"
"github.com/benbjohnson/litestream"
"github.com/benbjohnson/litestream/file"
"github.com/benbjohnson/litestream/mock"
"github.com/pierrec/lz4/v4"
)
func TestReplica_Name(t *testing.T) {
t.Run("WithName", func(t *testing.T) {
if got, want := litestream.NewReplica(nil, "NAME").Name(), "NAME"; got != want {
t.Fatalf("Name()=%v, want %v", got, want)
}
})
t.Run("WithoutName", func(t *testing.T) {
r := litestream.NewReplica(nil, "")
r.Client = &mock.ReplicaClient{}
if got, want := r.Name(), "mock"; got != want {
t.Fatalf("Name()=%v, want %v", got, want)
}
})
}
func TestReplica_Sync(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
// Execute a query to force a write to the WAL.
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err)
}
// Issue initial database sync to setup generation.
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Fetch current database position.
dpos, err := db.Pos()
if err != nil {
t.Fatal(err)
}
c := file.NewReplicaClient(t.TempDir())
r := litestream.NewReplica(db, "")
c.Replica, r.Client = r, c
if err := r.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Verify client generation matches database.
generations, err := c.Generations(context.Background())
if err != nil {
t.Fatal(err)
} else if got, want := len(generations), 1; got != want {
t.Fatalf("len(generations)=%v, want %v", got, want)
} else if got, want := generations[0], dpos.Generation; got != want {
t.Fatalf("generations[0]=%v, want %v", got, want)
}
// Verify WAL matches replica WAL.
if b0, err := os.ReadFile(db.Path() + "-wal"); err != nil {
t.Fatal(err)
} else if r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: generations[0], Index: 0, Offset: 0}); err != nil {
t.Fatal(err)
} else if b1, err := io.ReadAll(lz4.NewReader(r)); err != nil {
t.Fatal(err)
} else if err := r.Close(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(b0, b1) {
t.Fatalf("wal mismatch: len(%d), len(%d)", len(b0), len(b1))
}
}
func TestReplica_Snapshot(t *testing.T) {
db, sqldb := MustOpenDBs(t)
defer MustCloseDBs(t, db, sqldb)
c := file.NewReplicaClient(t.TempDir())
r := litestream.NewReplica(db, "")
r.Client = c
// Execute a query to force a write to the WAL.
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
t.Fatal(err)
} else if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
} else if err := r.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Fetch current database position & snapshot.
pos0, err := db.Pos()
if err != nil {
t.Fatal(err)
} else if info, err := r.Snapshot(context.Background()); err != nil {
t.Fatal(err)
} else if got, want := info.Pos(), pos0.Truncate(); got != want {
t.Fatalf("pos=%s, want %s", got, want)
}
// Sync database and then replica.
if err := db.Sync(context.Background()); err != nil {
t.Fatal(err)
} else if err := r.Sync(context.Background()); err != nil {
t.Fatal(err)
}
// Execute a query to force a write to the WAL & truncate to start new index.
if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz');`); err != nil {
t.Fatal(err)
} else if err := db.Checkpoint(context.Background(), litestream.CheckpointModeTruncate); err != nil {
t.Fatal(err)
}
// Fetch current database position & snapshot.
pos1, err := db.Pos()
if err != nil {
t.Fatal(err)
} else if info, err := r.Snapshot(context.Background()); err != nil {
t.Fatal(err)
} else if got, want := info.Pos(), pos1.Truncate(); got != want {
t.Fatalf("pos=%v, want %v", got, want)
}
// Verify two snapshots exist.
if infos, err := r.Snapshots(context.Background()); err != nil {
t.Fatal(err)
} else if got, want := len(infos), 2; got != want {
t.Fatalf("len=%v, want %v", got, want)
} else if got, want := infos[0].Pos(), pos0.Truncate(); got != want {
t.Fatalf("info[0]=%s, want %s", got, want)
} else if got, want := infos[1].Pos(), pos1.Truncate(); got != want {
t.Fatalf("info[1]=%s, want %s", got, want)
}
}

View File

@@ -1,640 +0,0 @@
package litestream
import (
"compress/gzip"
"context"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"time"
)
// Replica represents a remote destination to replicate the database & WAL.
type Replica interface {
// The name of the replica. Defaults to type if no name specified.
Name() string
// String identifier for the type of replica ("file", "s3", etc).
Type() string
// Starts replicating in a background goroutine.
Start(ctx context.Context)
// Stops all replication processing. Blocks until processing stopped.
Stop()
// Returns a list of generation names for the replica.
Generations(ctx context.Context) ([]string, error)
// Returns basic information about a generation including the number of
// snapshot & WAL files as well as the time range covered.
GenerationStats(ctx context.Context, generation string) (GenerationStats, error)
// Returns the highest index for a snapshot within a generation that occurs
// before timestamp. If timestamp is zero, returns the latest snapshot.
SnapshotIndexAt(ctx context.Context, generation string, timestamp time.Time) (int, error)
// Returns the highest index for a WAL file that occurs before timestamp.
// If timestamp is zero, returns the highest WAL index.
WALIndexAt(ctx context.Context, generation string, timestamp time.Time) (int, error)
// Returns a reader for snapshot data at the given generation/index.
SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error)
// Returns a reader for WAL data at the given position.
WALReader(ctx context.Context, generation string, index int) (io.ReadCloser, error)
}
var _ Replica = (*FileReplica)(nil)
// FileReplica is a replica that replicates a DB to a local file path.
type FileReplica struct {
db *DB // source database
name string // replica name, optional
dst string // destination path
// mu sync.RWMutex
wg sync.WaitGroup
ctx context.Context
cancel func()
}
// NewFileReplica returns a new instance of FileReplica.
func NewFileReplica(db *DB, name, dst string) *FileReplica {
return &FileReplica{
db: db,
name: name,
dst: dst,
cancel: func() {},
}
}
// Name returns the name of the replica. Returns the type if no name set.
func (r *FileReplica) Name() string {
if r.name != "" {
return r.name
}
return r.Type()
}
// Type returns the type of replica.
func (r *FileReplica) Type() string {
return "file"
}
// SnapshotDir returns the path to a generation's snapshot directory.
func (r *FileReplica) SnapshotDir(generation string) string {
return filepath.Join(r.dst, "generations", generation, "snapshots")
}
// SnapshotPath returns the path to a snapshot file.
func (r *FileReplica) SnapshotPath(generation string, index int) string {
return filepath.Join(r.SnapshotDir(generation), fmt.Sprintf("%016x.snapshot.gz", index))
}
// WALDir returns the path to a generation's WAL directory
func (r *FileReplica) WALDir(generation string) string {
return filepath.Join(r.dst, "generations", generation, "wal")
}
// WALPath returns the path to a WAL file.
func (r *FileReplica) WALPath(generation string, index int) string {
return filepath.Join(r.WALDir(generation), fmt.Sprintf("%016x.wal", index))
}
// Generations returns a list of available generation names.
func (r *FileReplica) Generations(ctx context.Context) ([]string, error) {
fis, err := ioutil.ReadDir(filepath.Join(r.dst, "generations"))
if os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, err
}
var generations []string
for _, fi := range fis {
if !IsGenerationName(fi.Name()) {
continue
} else if !fi.IsDir() {
continue
}
generations = append(generations, fi.Name())
}
return generations, nil
}
// GenerationStats returns stats for a generation.
func (r *FileReplica) GenerationStats(ctx context.Context, generation string) (stats GenerationStats, err error) {
// Determine stats for all snapshots.
n, min, max, err := r.snapshotStats(generation)
if err != nil {
return stats, err
}
stats.SnapshotN = n
stats.CreatedAt, stats.UpdatedAt = min, max
// Update stats if we have WAL files.
n, min, max, err = r.walStats(generation)
if err != nil {
return stats, err
} else if n == 0 {
return stats, nil
}
stats.WALN = n
if stats.CreatedAt.IsZero() || min.Before(stats.CreatedAt) {
stats.CreatedAt = min
}
if stats.UpdatedAt.IsZero() || max.After(stats.UpdatedAt) {
stats.UpdatedAt = max
}
return stats, nil
}
func (r *FileReplica) snapshotStats(generation string) (n int, min, max time.Time, err error) {
fis, err := ioutil.ReadDir(r.SnapshotDir(generation))
if os.IsNotExist(err) {
return n, min, max, nil
} else if err != nil {
return n, min, max, err
}
for _, fi := range fis {
if !IsSnapshotPath(fi.Name()) {
continue
}
modTime := fi.ModTime().UTC()
n++
if min.IsZero() || modTime.Before(min) {
min = modTime
}
if max.IsZero() || modTime.After(max) {
max = modTime
}
}
return n, min, max, nil
}
func (r *FileReplica) walStats(generation string) (n int, min, max time.Time, err error) {
fis, err := ioutil.ReadDir(r.WALDir(generation))
if os.IsNotExist(err) {
return n, min, max, nil
} else if err != nil {
return n, min, max, err
}
for _, fi := range fis {
if !IsWALPath(fi.Name()) {
continue
}
modTime := fi.ModTime().UTC()
n++
if min.IsZero() || modTime.Before(min) {
min = modTime
}
if max.IsZero() || modTime.After(max) {
max = modTime
}
}
return n, min, max, nil
}
type GenerationStats struct {
SnapshotN int
WALN int
CreatedAt time.Time
UpdatedAt time.Time
}
// Start starts replication for a given generation.
func (r *FileReplica) Start(ctx context.Context) {
// Stop previous replication.
r.Stop()
// Wrap context with cancelation.
ctx, r.cancel = context.WithCancel(ctx)
// Start goroutine to replicate data.
r.wg.Add(1)
go func() { defer r.wg.Done(); r.monitor(ctx) }()
}
// Stop cancels any outstanding replication and blocks until finished.
func (r *FileReplica) Stop() {
r.cancel()
r.wg.Wait()
}
// monitor runs in a separate goroutine and continuously replicates the DB.
func (r *FileReplica) monitor(ctx context.Context) {
// Clear old temporary files that my have been left from a crash.
if err := removeTmpFiles(r.dst); err != nil {
log.Printf("%s(%s): cannot remove tmp files: %s", r.db.Path(), r.Name(), err)
}
// Continuously check for new data to replicate.
ch := make(chan struct{})
close(ch)
var notify <-chan struct{} = ch
var pos Pos
var err error
for {
select {
case <-ctx.Done():
return
case <-notify:
}
// Fetch new notify channel before replicating data.
notify = r.db.Notify()
// Determine position, if necessary.
if pos.IsZero() {
if pos, err = r.pos(); err != nil {
log.Printf("%s(%s): cannot determine position: %s", r.db.Path(), r.Name(), err)
continue
} else if pos.IsZero() {
log.Printf("%s(%s): no generation, waiting for data", r.db.Path(), r.Name())
continue
}
}
// If we have no replicated WALs, start from last index in shadow WAL.
if pos.Index == 0 && pos.Offset == 0 {
if pos.Index, err = r.db.CurrentShadowWALIndex(pos.Generation); err != nil {
log.Printf("%s(%s): cannot determine latest shadow wal index: %s", r.db.Path(), r.Name(), err)
continue
}
}
// Synchronize the shadow wal into the replication directory.
if pos, err = r.sync(ctx, pos); err != nil {
log.Printf("%s(%s): sync error: %s", r.db.Path(), r.Name(), err)
continue
}
// Gzip any old WAL files.
if pos.Generation != "" {
if err := r.compress(ctx, pos.Generation); err != nil {
log.Printf("%s(%s): compress error: %s", r.db.Path(), r.Name(), err)
continue
}
}
}
}
// pos returns the position for the replica for the current generation.
// Returns a zero value if there is no active generation.
func (r *FileReplica) pos() (pos Pos, err error) {
// Find the current generation from the DB. Return zero pos if no generation.
generation, err := r.db.CurrentGeneration()
if err != nil {
return pos, err
} else if generation == "" {
return pos, nil // empty position
}
pos.Generation = generation
// Find the max WAL file.
dir := r.WALDir(generation)
fis, err := ioutil.ReadDir(dir)
if os.IsNotExist(err) {
return pos, nil // no replicated wal, start at beginning of generation
} else if err != nil {
return pos, err
}
index := -1
for _, fi := range fis {
name := fi.Name()
name = strings.TrimSuffix(name, ".gz")
if !strings.HasSuffix(name, WALExt) {
continue
}
if v, err := ParseWALFilename(filepath.Base(name)); err != nil {
continue // invalid wal filename
} else if index == -1 || v > index {
index = v
}
}
if index == -1 {
return pos, nil // wal directory exists but no wal files, return beginning pos
}
pos.Index = index
// Determine current offset.
fi, err := os.Stat(filepath.Join(dir, FormatWALFilename(pos.Index)))
if err != nil {
return pos, err
}
pos.Offset = fi.Size()
return pos, nil
}
// snapshot copies the entire database to the replica path.
func (r *FileReplica) snapshot(ctx context.Context, generation string, index int) error {
// Acquire a read lock on the database during snapshot to prevent checkpoints.
tx, err := r.db.db.Begin()
if err != nil {
return err
} else if _, err := tx.ExecContext(ctx, `SELECT COUNT(1) FROM _litestream_seq;`); err != nil {
tx.Rollback()
return err
}
defer tx.Rollback()
// Ignore if we already have a snapshot for the given WAL index.
snapshotPath := r.SnapshotPath(generation, index)
if _, err := os.Stat(snapshotPath); err == nil {
return nil
}
if err := os.MkdirAll(filepath.Dir(snapshotPath), 0700); err != nil {
return err
}
return compressFile(r.db.Path(), snapshotPath)
}
// snapshotN returns the number of snapshots for a generation.
func (r *FileReplica) snapshotN(generation string) (int, error) {
fis, err := ioutil.ReadDir(r.SnapshotDir(generation))
if os.IsNotExist(err) {
return 0, nil
} else if err != nil {
return 0, err
}
var n int
for _, fi := range fis {
if _, _, _, err := ParseSnapshotPath(fi.Name()); err == nil {
n++
}
}
return n, nil
}
func (r *FileReplica) sync(ctx context.Context, pos Pos) (_ Pos, err error) {
// Read all WAL files since the last position.
for {
if pos, err = r.syncNext(ctx, pos); err == io.EOF {
return pos, nil
} else if err != nil {
return pos, err
}
}
}
func (r *FileReplica) syncNext(ctx context.Context, pos Pos) (_ Pos, err error) {
rd, err := r.db.ShadowWALReader(pos)
if err == io.EOF {
return pos, err
} else if err != nil {
return pos, fmt.Errorf("wal reader: %w", err)
}
defer rd.Close()
// Create snapshot if no snapshots exist.
if n, err := r.snapshotN(rd.Pos().Generation); err != nil {
return pos, err
} else if n == 0 {
if err := r.snapshot(ctx, rd.Pos().Generation, rd.Pos().Index); err != nil {
return pos, err
}
}
// Ensure parent directory exists for WAL file.
filename := r.WALPath(rd.Pos().Generation, rd.Pos().Index)
if err := os.MkdirAll(filepath.Dir(filename), 0700); err != nil {
return pos, err
}
// Create a temporary file to write into so we don't have partial writes.
w, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return pos, err
}
defer w.Close()
// Seek, copy & sync WAL contents.
if _, err := w.Seek(rd.Pos().Offset, io.SeekStart); err != nil {
return pos, err
} else if _, err := io.Copy(w, rd); err != nil {
return pos, err
} else if err := w.Sync(); err != nil {
return pos, err
} else if err := w.Close(); err != nil {
return pos, err
}
// Return ending position of the reader.
return rd.Pos(), nil
}
// compress gzips all WAL files before the current one.
func (r *FileReplica) compress(ctx context.Context, generation string) error {
dir := r.WALDir(generation)
filenames, err := filepath.Glob(filepath.Join(dir, "*.wal"))
if err != nil {
return err
} else if len(filenames) <= 1 {
return nil // no uncompressed wal files or only one active file
}
// Ensure filenames are sorted & remove the last (active) WAL.
sort.Strings(filenames)
filenames = filenames[:len(filenames)-1]
// Compress each file from oldest to newest.
for _, filename := range filenames {
select {
case <-ctx.Done():
return err
default:
}
dst := filename + ".gz"
if err := compressFile(filename, dst); err != nil {
return err
} else if err := os.Remove(filename); err != nil {
return err
}
}
return nil
}
// SnapsotIndexAt returns the highest index for a snapshot within a generation
// that occurs before timestamp. If timestamp is zero, returns the latest snapshot.
func (r *FileReplica) SnapshotIndexAt(ctx context.Context, generation string, timestamp time.Time) (int, error) {
fis, err := ioutil.ReadDir(r.SnapshotDir(generation))
if os.IsNotExist(err) {
return 0, ErrNoSnapshots
} else if err != nil {
return 0, err
}
index := -1
var max time.Time
for _, fi := range fis {
// Read index from snapshot filename.
idx, _, _, err := ParseSnapshotPath(fi.Name())
if err != nil {
continue // not a snapshot, skip
} else if !timestamp.IsZero() && fi.ModTime().After(timestamp) {
continue // after timestamp, skip
}
// Use snapshot if it newer.
if max.IsZero() || fi.ModTime().After(max) {
index, max = idx, fi.ModTime()
}
}
if index == -1 {
return 0, ErrNoSnapshots
}
return index, nil
}
// Returns the highest index for a WAL file that occurs before timestamp.
// If timestamp is zero, returns the highest WAL index.
func (r *FileReplica) WALIndexAt(ctx context.Context, generation string, timestamp time.Time) (int, error) {
fis, err := ioutil.ReadDir(r.WALDir(generation))
if os.IsNotExist(err) {
return 0, nil
} else if err != nil {
return 0, err
}
index := -1
for _, fi := range fis {
// Read index from snapshot filename.
idx, _, _, err := ParseWALPath(fi.Name())
if err != nil {
continue // not a snapshot, skip
} else if !timestamp.IsZero() && fi.ModTime().After(timestamp) {
continue // after timestamp, skip
} else if idx < index {
continue // earlier index, skip
}
index = idx
}
if index == -1 {
return 0, nil
}
return index, nil
}
// SnapshotReader returns a reader for snapshot data at the given generation/index.
// Returns os.ErrNotExist if no matching index is found.
func (r *FileReplica) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) {
dir := r.SnapshotDir(generation)
fis, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
for _, fi := range fis {
// Parse index from snapshot filename. Skip if no match.
idx, _, ext, err := ParseSnapshotPath(fi.Name())
if err != nil || index != idx {
continue
}
// Open & return the file handle if uncompressed.
f, err := os.Open(filepath.Join(dir, fi.Name()))
if err != nil {
return nil, err
} else if ext == ".snapshot" {
return f, nil // not compressed, return as-is.
}
assert(ext == ".snapshot.gz", "invalid snapshot extension")
// If compressed, wrap in a gzip reader and return with wrapper to
// ensure that the underlying file is closed.
r, err := gzip.NewReader(f)
if err != nil {
f.Close()
return nil, err
}
return &gzipReadCloser{r: r, closer: f}, nil
}
return nil, os.ErrNotExist
}
// WALReader returns a reader for WAL data at the given index.
// Returns os.ErrNotExist if no matching index is found.
func (r *FileReplica) WALReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) {
filename := r.WALPath(generation, index)
// Attempt to read uncompressed file first.
f, err := os.Open(filename)
if err == nil {
return f, nil // file exist, return
} else if err != nil && !os.IsNotExist(err) {
return nil, err
}
// Otherwise read the compressed file. Return error if file doesn't exist.
f, err = os.Open(filename + ".gz")
if err != nil {
return nil, err
}
// If compressed, wrap in a gzip reader and return with wrapper to
// ensure that the underlying file is closed.
rd, err := gzip.NewReader(f)
if err != nil {
f.Close()
return nil, err
}
return &gzipReadCloser{r: rd, closer: f}, nil
}
// compressFile compresses a file and replaces it with a new file with a .gz extension.
func compressFile(src, dst string) error {
r, err := os.Open(src)
if err != nil {
return err
}
defer r.Close()
w, err := os.Create(dst + ".tmp")
if err != nil {
return err
}
defer w.Close()
gz := gzip.NewWriter(w)
defer gz.Close()
// Copy & compress file contents to temporary file.
if _, err := io.Copy(gz, r); err != nil {
return err
} else if err := gz.Close(); err != nil {
return err
} else if err := w.Sync(); err != nil {
return err
} else if err := w.Close(); err != nil {
return err
}
// Move compressed file to final location.
return os.Rename(dst+".tmp", dst)
}

771
s3/replica_client.go Normal file
View File

@@ -0,0 +1,771 @@
package s3
import (
"context"
"crypto/tls"
"fmt"
"io"
"net"
"net/http"
"os"
"path"
"regexp"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/benbjohnson/litestream"
"github.com/benbjohnson/litestream/internal"
"golang.org/x/sync/errgroup"
)
// ReplicaClientType is the client type for this package.
const ReplicaClientType = "s3"
// MaxKeys is the number of keys S3 can operate on per batch.
const MaxKeys = 1000
// DefaultRegion is the region used if one is not specified.
const DefaultRegion = "us-east-1"
var _ litestream.ReplicaClient = (*ReplicaClient)(nil)
// ReplicaClient is a client for writing snapshots & WAL segments to disk.
type ReplicaClient struct {
mu sync.Mutex
s3 *s3.S3 // s3 service
uploader *s3manager.Uploader
// AWS authentication keys.
AccessKeyID string
SecretAccessKey string
// S3 bucket information
Region string
Bucket string
Path string
Endpoint string
ForcePathStyle bool
SkipVerify bool
}
// NewReplicaClient returns a new instance of ReplicaClient.
func NewReplicaClient() *ReplicaClient {
return &ReplicaClient{}
}
// Type returns "s3" as the client type.
func (c *ReplicaClient) Type() string {
return ReplicaClientType
}
// Init initializes the connection to S3. No-op if already initialized.
func (c *ReplicaClient) Init(ctx context.Context) (err error) {
c.mu.Lock()
defer c.mu.Unlock()
if c.s3 != nil {
return nil
}
// Look up region if not specified and no endpoint is used.
// Endpoints are typically used for non-S3 object stores and do not
// necessarily require a region.
region := c.Region
if region == "" {
if c.Endpoint == "" {
if region, err = c.findBucketRegion(ctx, c.Bucket); err != nil {
return fmt.Errorf("cannot lookup bucket region: %w", err)
}
} else {
region = DefaultRegion // default for non-S3 object stores
}
}
// Create new AWS session.
config := c.config()
if region != "" {
config.Region = aws.String(region)
}
sess, err := session.NewSession(config)
if err != nil {
return fmt.Errorf("cannot create aws session: %w", err)
}
c.s3 = s3.New(sess)
c.uploader = s3manager.NewUploader(sess)
return nil
}
// config returns the AWS configuration. Uses the default credential chain
// unless a key/secret are explicitly set.
func (c *ReplicaClient) config() *aws.Config {
config := &aws.Config{}
if c.AccessKeyID != "" || c.SecretAccessKey != "" {
config.Credentials = credentials.NewStaticCredentials(c.AccessKeyID, c.SecretAccessKey, "")
}
if c.Endpoint != "" {
config.Endpoint = aws.String(c.Endpoint)
}
if c.ForcePathStyle {
config.S3ForcePathStyle = aws.Bool(c.ForcePathStyle)
}
if c.SkipVerify {
config.HTTPClient = &http.Client{Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}}
}
return config
}
func (c *ReplicaClient) findBucketRegion(ctx context.Context, bucket string) (string, error) {
// Connect to US standard region to fetch info.
config := c.config()
config.Region = aws.String(DefaultRegion)
sess, err := session.NewSession(config)
if err != nil {
return "", err
}
// Fetch bucket location, if possible. Must be bucket owner.
// This call can return a nil location which means it's in us-east-1.
if out, err := s3.New(sess).GetBucketLocation(&s3.GetBucketLocationInput{
Bucket: aws.String(bucket),
}); err != nil {
return "", err
} else if out.LocationConstraint != nil {
return *out.LocationConstraint, nil
}
return DefaultRegion, nil
}
// Generations returns a list of available generation names.
func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) {
if err := c.Init(ctx); err != nil {
return nil, err
}
var generations []string
if err := c.s3.ListObjectsPagesWithContext(ctx, &s3.ListObjectsInput{
Bucket: aws.String(c.Bucket),
Prefix: aws.String(litestream.GenerationsPath(c.Path) + "/"),
Delimiter: aws.String("/"),
}, func(page *s3.ListObjectsOutput, lastPage bool) bool {
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
for _, prefix := range page.CommonPrefixes {
name := path.Base(*prefix.Prefix)
if !litestream.IsGenerationName(name) {
continue
}
generations = append(generations, name)
}
return true
}); err != nil {
return nil, err
}
return generations, nil
}
// DeleteGeneration deletes all snapshots & WAL segments within a generation.
func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error {
if err := c.Init(ctx); err != nil {
return err
}
dir, err := litestream.GenerationPath(c.Path, generation)
if err != nil {
return fmt.Errorf("cannot determine generation path: %w", err)
}
// Collect all files for the generation.
var objIDs []*s3.ObjectIdentifier
if err := c.s3.ListObjectsPagesWithContext(ctx, &s3.ListObjectsInput{
Bucket: aws.String(c.Bucket),
Prefix: aws.String(dir),
}, func(page *s3.ListObjectsOutput, lastPage bool) bool {
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
for _, obj := range page.Contents {
objIDs = append(objIDs, &s3.ObjectIdentifier{Key: obj.Key})
}
return true
}); err != nil {
return err
}
// Delete all files in batches.
for len(objIDs) > 0 {
n := MaxKeys
if len(objIDs) < n {
n = len(objIDs)
}
out, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{
Bucket: aws.String(c.Bucket),
Delete: &s3.Delete{Objects: objIDs[:n], Quiet: aws.Bool(true)},
})
if err != nil {
return err
}
if err := deleteOutputError(out); err != nil {
return err
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
objIDs = objIDs[n:]
}
// log.Printf("%s(%s): retainer: deleting generation: %s", r.db.Path(), r.Name(), generation)
return nil
}
// Snapshots returns an iterator over all available snapshots for a generation.
func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (litestream.SnapshotIterator, error) {
if err := c.Init(ctx); err != nil {
return nil, err
}
return newSnapshotIterator(ctx, c, generation), nil
}
// WriteSnapshot writes LZ4 compressed data from rd into a file on disk.
func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) {
if err := c.Init(ctx); err != nil {
return info, err
}
key, err := litestream.SnapshotPath(c.Path, generation, index)
if err != nil {
return info, fmt.Errorf("cannot determine snapshot path: %w", err)
}
startTime := time.Now()
rc := internal.NewReadCounter(rd)
if _, err := c.uploader.UploadWithContext(ctx, &s3manager.UploadInput{
Bucket: aws.String(c.Bucket),
Key: aws.String(key),
Body: rc,
}); err != nil {
return info, err
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc()
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(rc.N()))
// log.Printf("%s(%s): snapshot: creating %s/%08x t=%s", r.db.Path(), r.Name(), generation, index, time.Since(startTime).Truncate(time.Millisecond))
return litestream.SnapshotInfo{
Generation: generation,
Index: index,
Size: rc.N(),
CreatedAt: startTime.UTC(),
}, nil
}
// SnapshotReader returns a reader for snapshot data at the given generation/index.
func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) {
if err := c.Init(ctx); err != nil {
return nil, err
}
key, err := litestream.SnapshotPath(c.Path, generation, index)
if err != nil {
return nil, fmt.Errorf("cannot determine snapshot path: %w", err)
}
out, err := c.s3.GetObjectWithContext(ctx, &s3.GetObjectInput{
Bucket: aws.String(c.Bucket),
Key: aws.String(key),
})
if isNotExists(err) {
return nil, os.ErrNotExist
} else if err != nil {
return nil, err
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "GET").Inc()
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "GET").Add(float64(*out.ContentLength))
return out.Body, nil
}
// DeleteSnapshot deletes a snapshot with the given generation & index.
func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error {
if err := c.Init(ctx); err != nil {
return err
}
key, err := litestream.SnapshotPath(c.Path, generation, index)
if err != nil {
return fmt.Errorf("cannot determine snapshot path: %w", err)
}
out, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{
Bucket: aws.String(c.Bucket),
Delete: &s3.Delete{Objects: []*s3.ObjectIdentifier{{Key: &key}}, Quiet: aws.Bool(true)},
})
if err != nil {
return err
}
if err := deleteOutputError(out); err != nil {
return err
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
return nil
}
// WALSegments returns an iterator over all available WAL files for a generation.
func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) {
if err := c.Init(ctx); err != nil {
return nil, err
}
return newWALSegmentIterator(ctx, c, generation), nil
}
// WriteWALSegment writes LZ4 compressed data from rd into a file on disk.
func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) {
if err := c.Init(ctx); err != nil {
return info, err
}
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
if err != nil {
return info, fmt.Errorf("cannot determine wal segment path: %w", err)
}
startTime := time.Now()
rc := internal.NewReadCounter(rd)
if _, err := c.uploader.UploadWithContext(ctx, &s3manager.UploadInput{
Bucket: aws.String(c.Bucket),
Key: aws.String(key),
Body: rc,
}); err != nil {
return info, err
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc()
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(rc.N()))
return litestream.WALSegmentInfo{
Generation: pos.Generation,
Index: pos.Index,
Offset: pos.Offset,
Size: rc.N(),
CreatedAt: startTime.UTC(),
}, nil
}
// WALSegmentReader returns a reader for a section of WAL data at the given index.
// Returns os.ErrNotExist if no matching index/offset is found.
func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) {
if err := c.Init(ctx); err != nil {
return nil, err
}
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
if err != nil {
return nil, fmt.Errorf("cannot determine wal segment path: %w", err)
}
out, err := c.s3.GetObjectWithContext(ctx, &s3.GetObjectInput{
Bucket: aws.String(c.Bucket),
Key: aws.String(key),
})
if isNotExists(err) {
return nil, os.ErrNotExist
} else if err != nil {
return nil, err
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "GET").Inc()
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "GET").Add(float64(*out.ContentLength))
return out.Body, nil
}
// DeleteWALSegments deletes WAL segments with at the given positions.
func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Pos) error {
if err := c.Init(ctx); err != nil {
return err
}
objIDs := make([]*s3.ObjectIdentifier, MaxKeys)
for len(a) > 0 {
n := MaxKeys
if len(a) < n {
n = len(a)
}
// Generate a batch of object IDs for deleting the WAL segments.
for i, pos := range a[:n] {
key, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
if err != nil {
return fmt.Errorf("cannot determine wal segment path: %w", err)
}
objIDs[i] = &s3.ObjectIdentifier{Key: &key}
}
// Delete S3 objects in bulk.
out, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{
Bucket: aws.String(c.Bucket),
Delete: &s3.Delete{Objects: objIDs[:n], Quiet: aws.Bool(true)},
})
if err != nil {
return err
}
if err := deleteOutputError(out); err != nil {
return err
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
a = a[n:]
}
return nil
}
// DeleteAll deletes everything on the remote path. Mainly used for testing.
func (c *ReplicaClient) DeleteAll(ctx context.Context) error {
if err := c.Init(ctx); err != nil {
return err
}
prefix := c.Path
if prefix != "" {
prefix += "/"
}
// Collect all files for the generation.
var objIDs []*s3.ObjectIdentifier
if err := c.s3.ListObjectsPagesWithContext(ctx, &s3.ListObjectsInput{
Bucket: aws.String(c.Bucket),
Prefix: aws.String(prefix),
}, func(page *s3.ListObjectsOutput, lastPage bool) bool {
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
for _, obj := range page.Contents {
objIDs = append(objIDs, &s3.ObjectIdentifier{Key: obj.Key})
}
return true
}); err != nil {
return err
}
// Delete all files in batches.
for len(objIDs) > 0 {
n := MaxKeys
if len(objIDs) < n {
n = len(objIDs)
}
out, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{
Bucket: aws.String(c.Bucket),
Delete: &s3.Delete{Objects: objIDs[:n], Quiet: aws.Bool(true)},
})
if err != nil {
return err
}
if err := deleteOutputError(out); err != nil {
return err
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
objIDs = objIDs[n:]
}
return nil
}
type snapshotIterator struct {
client *ReplicaClient
generation string
ch chan litestream.SnapshotInfo
g errgroup.Group
ctx context.Context
cancel func()
info litestream.SnapshotInfo
err error
}
func newSnapshotIterator(ctx context.Context, client *ReplicaClient, generation string) *snapshotIterator {
itr := &snapshotIterator{
client: client,
generation: generation,
ch: make(chan litestream.SnapshotInfo),
}
itr.ctx, itr.cancel = context.WithCancel(ctx)
itr.g.Go(itr.fetch)
return itr
}
// fetch runs in a separate goroutine to fetch pages of objects and stream them to a channel.
func (itr *snapshotIterator) fetch() error {
defer close(itr.ch)
dir, err := litestream.SnapshotsPath(itr.client.Path, itr.generation)
if err != nil {
return fmt.Errorf("cannot determine snapshots path: %w", err)
}
return itr.client.s3.ListObjectsPagesWithContext(itr.ctx, &s3.ListObjectsInput{
Bucket: aws.String(itr.client.Bucket),
Prefix: aws.String(dir + "/"),
Delimiter: aws.String("/"),
}, func(page *s3.ListObjectsOutput, lastPage bool) bool {
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
for _, obj := range page.Contents {
key := path.Base(*obj.Key)
index, err := litestream.ParseSnapshotPath(key)
if err != nil {
continue
}
info := litestream.SnapshotInfo{
Generation: itr.generation,
Index: index,
Size: *obj.Size,
CreatedAt: obj.LastModified.UTC(),
}
select {
case <-itr.ctx.Done():
case itr.ch <- info:
}
}
return true
})
}
func (itr *snapshotIterator) Close() (err error) {
err = itr.err
// Cancel context and wait for error group to finish.
itr.cancel()
if e := itr.g.Wait(); e != nil && err == nil {
err = e
}
return err
}
func (itr *snapshotIterator) Next() bool {
// Exit if an error has already occurred.
if itr.err != nil {
return false
}
// Return false if context was canceled or if there are no more snapshots.
// Otherwise fetch the next snapshot and store it on the iterator.
select {
case <-itr.ctx.Done():
return false
case info, ok := <-itr.ch:
if !ok {
return false
}
itr.info = info
return true
}
}
func (itr *snapshotIterator) Err() error { return itr.err }
func (itr *snapshotIterator) Snapshot() litestream.SnapshotInfo {
return itr.info
}
type walSegmentIterator struct {
client *ReplicaClient
generation string
ch chan litestream.WALSegmentInfo
g errgroup.Group
ctx context.Context
cancel func()
info litestream.WALSegmentInfo
err error
}
func newWALSegmentIterator(ctx context.Context, client *ReplicaClient, generation string) *walSegmentIterator {
itr := &walSegmentIterator{
client: client,
generation: generation,
ch: make(chan litestream.WALSegmentInfo),
}
itr.ctx, itr.cancel = context.WithCancel(ctx)
itr.g.Go(itr.fetch)
return itr
}
// fetch runs in a separate goroutine to fetch pages of objects and stream them to a channel.
func (itr *walSegmentIterator) fetch() error {
defer close(itr.ch)
dir, err := litestream.WALPath(itr.client.Path, itr.generation)
if err != nil {
return fmt.Errorf("cannot determine wal path: %w", err)
}
return itr.client.s3.ListObjectsPagesWithContext(itr.ctx, &s3.ListObjectsInput{
Bucket: aws.String(itr.client.Bucket),
Prefix: aws.String(dir + "/"),
Delimiter: aws.String("/"),
}, func(page *s3.ListObjectsOutput, lastPage bool) bool {
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "LIST").Inc()
for _, obj := range page.Contents {
key := path.Base(*obj.Key)
index, offset, err := litestream.ParseWALSegmentPath(key)
if err != nil {
continue
}
info := litestream.WALSegmentInfo{
Generation: itr.generation,
Index: index,
Offset: offset,
Size: *obj.Size,
CreatedAt: obj.LastModified.UTC(),
}
select {
case <-itr.ctx.Done():
return false
case itr.ch <- info:
}
}
return true
})
}
func (itr *walSegmentIterator) Close() (err error) {
err = itr.err
// Cancel context and wait for error group to finish.
itr.cancel()
if e := itr.g.Wait(); e != nil && err == nil {
err = e
}
return err
}
func (itr *walSegmentIterator) Next() bool {
// Exit if an error has already occurred.
if itr.err != nil {
return false
}
// Return false if context was canceled or if there are no more segments.
// Otherwise fetch the next segment and store it on the iterator.
select {
case <-itr.ctx.Done():
return false
case info, ok := <-itr.ch:
if !ok {
return false
}
itr.info = info
return true
}
}
func (itr *walSegmentIterator) Err() error { return itr.err }
func (itr *walSegmentIterator) WALSegment() litestream.WALSegmentInfo {
return itr.info
}
// ParseHost extracts data from a hostname depending on the service provider.
func ParseHost(s string) (bucket, region, endpoint string, forcePathStyle bool) {
// Extract port if one is specified.
host, port, err := net.SplitHostPort(s)
if err != nil {
host = s
}
// Default to path-based URLs, except for with AWS S3 itself.
forcePathStyle = true
// Extract fields from provider-specific host formats.
scheme := "https"
if a := localhostRegex.FindStringSubmatch(host); a != nil {
bucket, region = a[1], "us-east-1"
scheme, endpoint = "http", "localhost"
} else if a := backblazeRegex.FindStringSubmatch(host); a != nil {
bucket, region = a[1], a[2]
endpoint = fmt.Sprintf("s3.%s.backblazeb2.com", region)
} else if a := filebaseRegex.FindStringSubmatch(host); a != nil {
bucket, endpoint = a[1], "s3.filebase.com"
} else if a := digitalOceanRegex.FindStringSubmatch(host); a != nil {
bucket, region = a[1], a[2]
endpoint = fmt.Sprintf("%s.digitaloceanspaces.com", region)
} else if a := scalewayRegex.FindStringSubmatch(host); a != nil {
bucket, region = a[1], a[2]
endpoint = fmt.Sprintf("s3.%s.scw.cloud", region)
} else if a := linodeRegex.FindStringSubmatch(host); a != nil {
bucket, region = a[1], a[2]
endpoint = fmt.Sprintf("%s.linodeobjects.com", region)
} else {
bucket = host
forcePathStyle = false
}
// Add port back to endpoint, if available.
if endpoint != "" && port != "" {
endpoint = net.JoinHostPort(endpoint, port)
}
// Prepend scheme to endpoint.
if endpoint != "" {
endpoint = scheme + "://" + endpoint
}
return bucket, region, endpoint, forcePathStyle
}
var (
localhostRegex = regexp.MustCompile(`^(?:(.+)\.)?localhost$`)
backblazeRegex = regexp.MustCompile(`^(?:(.+)\.)?s3.([^.]+)\.backblazeb2.com$`)
filebaseRegex = regexp.MustCompile(`^(?:(.+)\.)?s3.filebase.com$`)
digitalOceanRegex = regexp.MustCompile(`^(?:(.+)\.)?([^.]+)\.digitaloceanspaces.com$`)
scalewayRegex = regexp.MustCompile(`^(?:(.+)\.)?s3.([^.]+)\.scw\.cloud$`)
linodeRegex = regexp.MustCompile(`^(?:(.+)\.)?([^.]+)\.linodeobjects.com$`)
)
func isNotExists(err error) bool {
switch err := err.(type) {
case awserr.Error:
return err.Code() == `NoSuchKey`
default:
return false
}
}
func deleteOutputError(out *s3.DeleteObjectsOutput) error {
switch len(out.Errors) {
case 0:
return nil
case 1:
return fmt.Errorf("deleting object %s: %s - %s", *out.Errors[0].Key, *out.Errors[0].Code, *out.Errors[0].Message)
default:
return fmt.Errorf("%d errors occurred deleting objects, %s: %s - (%s (and %d others)",
len(out.Errors), *out.Errors[0].Key, *out.Errors[0].Code, *out.Errors[0].Message, len(out.Errors)-1)
}
}

495
sftp/replica_client.go Normal file
View File

@@ -0,0 +1,495 @@
package sftp
import (
"context"
"errors"
"fmt"
"io"
"net"
"os"
"path"
"sort"
"sync"
"time"
"github.com/benbjohnson/litestream"
"github.com/benbjohnson/litestream/internal"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
)
// ReplicaClientType is the client type for this package.
const ReplicaClientType = "sftp"
// Default settings for replica client.
const (
DefaultDialTimeout = 30 * time.Second
)
var _ litestream.ReplicaClient = (*ReplicaClient)(nil)
// ReplicaClient is a client for writing snapshots & WAL segments to disk.
type ReplicaClient struct {
mu sync.Mutex
sshClient *ssh.Client
sftpClient *sftp.Client
// SFTP connection info
Host string
User string
Password string
Path string
KeyPath string
DialTimeout time.Duration
}
// NewReplicaClient returns a new instance of ReplicaClient.
func NewReplicaClient() *ReplicaClient {
return &ReplicaClient{
DialTimeout: DefaultDialTimeout,
}
}
// Type returns "gcs" as the client type.
func (c *ReplicaClient) Type() string {
return ReplicaClientType
}
// Init initializes the connection to GCS. No-op if already initialized.
func (c *ReplicaClient) Init(ctx context.Context) (_ *sftp.Client, err error) {
c.mu.Lock()
defer c.mu.Unlock()
if c.sftpClient != nil {
return c.sftpClient, nil
}
if c.User == "" {
return nil, fmt.Errorf("sftp user required")
}
// Build SSH configuration & auth methods
config := &ssh.ClientConfig{
User: c.User,
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
BannerCallback: ssh.BannerDisplayStderr(),
}
if c.Password != "" {
config.Auth = append(config.Auth, ssh.Password(c.Password))
}
if c.KeyPath != "" {
buf, err := os.ReadFile(c.KeyPath)
if err != nil {
return nil, fmt.Errorf("cannot read sftp key path: %w", err)
}
signer, err := ssh.ParsePrivateKey(buf)
if err != nil {
return nil, fmt.Errorf("cannot parse sftp key path: %w", err)
}
config.Auth = append(config.Auth, ssh.PublicKeys(signer))
}
// Append standard port, if necessary.
host := c.Host
if _, _, err := net.SplitHostPort(c.Host); err != nil {
host = net.JoinHostPort(c.Host, "22")
}
// Connect via SSH.
if c.sshClient, err = ssh.Dial("tcp", host, config); err != nil {
return nil, err
}
// Wrap connection with an SFTP client.
if c.sftpClient, err = sftp.NewClient(c.sshClient); err != nil {
c.sshClient.Close()
c.sshClient = nil
return nil, err
}
return c.sftpClient, nil
}
// Generations returns a list of available generation names.
func (c *ReplicaClient) Generations(ctx context.Context) (_ []string, err error) {
defer func() { c.resetOnConnError(err) }()
sftpClient, err := c.Init(ctx)
if err != nil {
return nil, err
}
fis, err := sftpClient.ReadDir(litestream.GenerationsPath(c.Path))
if os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, err
}
var generations []string
for _, fi := range fis {
if !fi.IsDir() {
continue
}
name := path.Base(fi.Name())
if !litestream.IsGenerationName(name) {
continue
}
generations = append(generations, name)
}
sort.Strings(generations)
return generations, nil
}
// DeleteGeneration deletes all snapshots & WAL segments within a generation.
func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) (err error) {
defer func() { c.resetOnConnError(err) }()
sftpClient, err := c.Init(ctx)
if err != nil {
return err
}
dir, err := litestream.GenerationPath(c.Path, generation)
if err != nil {
return fmt.Errorf("cannot determine generation path: %w", err)
}
var dirs []string
walker := sftpClient.Walk(dir)
for walker.Step() {
if err := walker.Err(); err != nil {
return fmt.Errorf("cannot walk path %q: %w", walker.Path(), err)
}
if walker.Stat().IsDir() {
dirs = append(dirs, walker.Path())
continue
}
if err := sftpClient.Remove(walker.Path()); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("cannot delete file %q: %w", walker.Path(), err)
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
}
// Remove directories in reverse order after they have been emptied.
for i := len(dirs) - 1; i >= 0; i-- {
filename := dirs[i]
if err := sftpClient.RemoveDirectory(filename); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("cannot delete directory %q: %w", filename, err)
}
}
// log.Printf("%s(%s): retainer: deleting generation: %s", r.db.Path(), r.Name(), generation)
return nil
}
// Snapshots returns an iterator over all available snapshots for a generation.
func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (_ litestream.SnapshotIterator, err error) {
defer func() { c.resetOnConnError(err) }()
sftpClient, err := c.Init(ctx)
if err != nil {
return nil, err
}
dir, err := litestream.SnapshotsPath(c.Path, generation)
if err != nil {
return nil, fmt.Errorf("cannot determine snapshots path: %w", err)
}
fis, err := sftpClient.ReadDir(dir)
if os.IsNotExist(err) {
return litestream.NewSnapshotInfoSliceIterator(nil), nil
} else if err != nil {
return nil, err
}
// Iterate over every file and convert to metadata.
infos := make([]litestream.SnapshotInfo, 0, len(fis))
for _, fi := range fis {
// Parse index from filename.
index, err := litestream.ParseSnapshotPath(path.Base(fi.Name()))
if err != nil {
continue
}
infos = append(infos, litestream.SnapshotInfo{
Generation: generation,
Index: index,
Size: fi.Size(),
CreatedAt: fi.ModTime().UTC(),
})
}
sort.Sort(litestream.SnapshotInfoSlice(infos))
return litestream.NewSnapshotInfoSliceIterator(infos), nil
}
// WriteSnapshot writes LZ4 compressed data from rd to the object storage.
func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) {
defer func() { c.resetOnConnError(err) }()
sftpClient, err := c.Init(ctx)
if err != nil {
return info, err
}
filename, err := litestream.SnapshotPath(c.Path, generation, index)
if err != nil {
return info, fmt.Errorf("cannot determine snapshot path: %w", err)
}
startTime := time.Now()
if err := sftpClient.MkdirAll(path.Dir(filename)); err != nil {
return info, fmt.Errorf("cannot make parent wal segment directory %q: %w", path.Dir(filename), err)
}
f, err := sftpClient.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
if err != nil {
return info, fmt.Errorf("cannot open snapshot file for writing: %w", err)
}
defer f.Close()
n, err := io.Copy(f, rd)
if err != nil {
return info, err
} else if err := f.Close(); err != nil {
return info, err
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc()
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(n))
// log.Printf("%s(%s): snapshot: creating %s/%08x t=%s", r.db.Path(), r.Name(), generation, index, time.Since(startTime).Truncate(time.Millisecond))
return litestream.SnapshotInfo{
Generation: generation,
Index: index,
Size: n,
CreatedAt: startTime.UTC(),
}, nil
}
// SnapshotReader returns a reader for snapshot data at the given generation/index.
func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (_ io.ReadCloser, err error) {
defer func() { c.resetOnConnError(err) }()
sftpClient, err := c.Init(ctx)
if err != nil {
return nil, err
}
filename, err := litestream.SnapshotPath(c.Path, generation, index)
if err != nil {
return nil, fmt.Errorf("cannot determine snapshot path: %w", err)
}
f, err := sftpClient.Open(filename)
if err != nil {
return nil, err
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "GET").Inc()
return f, nil
}
// DeleteSnapshot deletes a snapshot with the given generation & index.
func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) (err error) {
defer func() { c.resetOnConnError(err) }()
sftpClient, err := c.Init(ctx)
if err != nil {
return err
}
filename, err := litestream.SnapshotPath(c.Path, generation, index)
if err != nil {
return fmt.Errorf("cannot determine snapshot path: %w", err)
}
if err := sftpClient.Remove(filename); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("cannot delete snapshot %q: %w", filename, err)
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
return nil
}
// WALSegments returns an iterator over all available WAL files for a generation.
func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (_ litestream.WALSegmentIterator, err error) {
defer func() { c.resetOnConnError(err) }()
sftpClient, err := c.Init(ctx)
if err != nil {
return nil, err
}
dir, err := litestream.WALPath(c.Path, generation)
if err != nil {
return nil, fmt.Errorf("cannot determine wal path: %w", err)
}
fis, err := sftpClient.ReadDir(dir)
if os.IsNotExist(err) {
return litestream.NewWALSegmentInfoSliceIterator(nil), nil
} else if err != nil {
return nil, err
}
// Iterate over every file and convert to metadata.
infos := make([]litestream.WALSegmentInfo, 0, len(fis))
for _, fi := range fis {
index, offset, err := litestream.ParseWALSegmentPath(path.Base(fi.Name()))
if err != nil {
continue
}
infos = append(infos, litestream.WALSegmentInfo{
Generation: generation,
Index: index,
Offset: offset,
Size: fi.Size(),
CreatedAt: fi.ModTime().UTC(),
})
}
sort.Sort(litestream.WALSegmentInfoSlice(infos))
return litestream.NewWALSegmentInfoSliceIterator(infos), nil
}
// WriteWALSegment writes LZ4 compressed data from rd into a file on disk.
func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) {
defer func() { c.resetOnConnError(err) }()
sftpClient, err := c.Init(ctx)
if err != nil {
return info, err
}
filename, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
if err != nil {
return info, fmt.Errorf("cannot determine wal segment path: %w", err)
}
startTime := time.Now()
if err := sftpClient.MkdirAll(path.Dir(filename)); err != nil {
return info, fmt.Errorf("cannot make parent snapshot directory %q: %w", path.Dir(filename), err)
}
f, err := sftpClient.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
if err != nil {
return info, fmt.Errorf("cannot open snapshot file for writing: %w", err)
}
defer f.Close()
n, err := io.Copy(f, rd)
if err != nil {
return info, err
} else if err := f.Close(); err != nil {
return info, err
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "PUT").Inc()
internal.OperationBytesCounterVec.WithLabelValues(ReplicaClientType, "PUT").Add(float64(n))
return litestream.WALSegmentInfo{
Generation: pos.Generation,
Index: pos.Index,
Offset: pos.Offset,
Size: n,
CreatedAt: startTime.UTC(),
}, nil
}
// WALSegmentReader returns a reader for a section of WAL data at the given index.
// Returns os.ErrNotExist if no matching index/offset is found.
func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (_ io.ReadCloser, err error) {
defer func() { c.resetOnConnError(err) }()
sftpClient, err := c.Init(ctx)
if err != nil {
return nil, err
}
filename, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
if err != nil {
return nil, fmt.Errorf("cannot determine wal segment path: %w", err)
}
f, err := sftpClient.Open(filename)
if err != nil {
return nil, err
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "GET").Inc()
return f, nil
}
// DeleteWALSegments deletes WAL segments with at the given positions.
func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Pos) (err error) {
defer func() { c.resetOnConnError(err) }()
sftpClient, err := c.Init(ctx)
if err != nil {
return err
}
for _, pos := range a {
filename, err := litestream.WALSegmentPath(c.Path, pos.Generation, pos.Index, pos.Offset)
if err != nil {
return fmt.Errorf("cannot determine wal segment path: %w", err)
}
if err := sftpClient.Remove(filename); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("cannot delete wal segment %q: %w", filename, err)
}
internal.OperationTotalCounterVec.WithLabelValues(ReplicaClientType, "DELETE").Inc()
}
return nil
}
// Cleanup deletes path & generations directories after empty.
func (c *ReplicaClient) Cleanup(ctx context.Context) (err error) {
defer func() { c.resetOnConnError(err) }()
sftpClient, err := c.Init(ctx)
if err != nil {
return err
}
if err := sftpClient.RemoveDirectory(litestream.GenerationsPath(c.Path)); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("cannot delete generations path: %w", err)
} else if err := sftpClient.RemoveDirectory(c.Path); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("cannot delete path: %w", err)
}
return nil
}
// resetOnConnError closes & clears the client if a connection error occurs.
func (c *ReplicaClient) resetOnConnError(err error) {
if !errors.Is(err, sftp.ErrSSHFxConnectionLost) {
return
}
if c.sftpClient != nil {
c.sftpClient.Close()
c.sftpClient = nil
}
if c.sshClient != nil {
c.sshClient.Close()
c.sshClient = nil
}
}