Compare commits
99 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a178ef4714 | ||
|
|
7ca2e193b9 | ||
|
|
39a6fabb9f | ||
|
|
0249b4e4f5 | ||
|
|
67eeb49101 | ||
|
|
f7213ed35c | ||
|
|
a532a0198e | ||
|
|
16f79e5814 | ||
|
|
39aefc2c02 | ||
|
|
0b08669bca | ||
|
|
8f5761ee13 | ||
|
|
d2eb4fa5ba | ||
|
|
ca489c5e73 | ||
|
|
f0ae48af4c | ||
|
|
9eae39e2fa | ||
|
|
42ab293ffb | ||
|
|
c8b72bf16b | ||
|
|
9c4de6c520 | ||
|
|
94411923a7 | ||
|
|
e92db9ef4b | ||
|
|
031a526b9a | ||
|
|
2244be885d | ||
|
|
95bcaa5927 | ||
|
|
1935ebd6f0 | ||
|
|
7fb98df240 | ||
|
|
f31c22af62 | ||
|
|
139d836d7a | ||
|
|
14dad1fd5a | ||
|
|
35d755e7f2 | ||
|
|
358dcd4650 | ||
|
|
2ce4052300 | ||
|
|
44af75fa98 | ||
|
|
3c4fd152c9 | ||
|
|
d259d9b9e3 | ||
|
|
90a1d959d4 | ||
|
|
04d75507e3 | ||
|
|
4b65e6a88f | ||
|
|
07a65cbac7 | ||
|
|
6ac6a8536d | ||
|
|
71ab15e50a | ||
|
|
b4e5079760 | ||
|
|
78563f821d | ||
|
|
e65536f81d | ||
|
|
25fec29e1a | ||
|
|
cbc2dce6dc | ||
|
|
1b8cfc8a41 | ||
|
|
290e06e60d | ||
|
|
b94ee366e5 | ||
|
|
743aeb83e1 | ||
|
|
a7ec05ad7a | ||
|
|
28dd7b564e | ||
|
|
43dda4315f | ||
|
|
0655bf420a | ||
|
|
8c113cf260 | ||
|
|
daa74f87b4 | ||
|
|
e1c9e09161 | ||
|
|
1e4e9633cc | ||
|
|
294846cce2 | ||
|
|
9eb7bd41c2 | ||
|
|
1ac4adb272 | ||
|
|
a42f83f3cb | ||
|
|
57a02a8628 | ||
|
|
faa5765745 | ||
|
|
1fa1313b0b | ||
|
|
bcdb553267 | ||
|
|
9828b4c1dd | ||
|
|
dde9d1042d | ||
|
|
8f30ff7d93 | ||
|
|
aa136a17ee | ||
|
|
60cb2c97ca | ||
|
|
0abe09526d | ||
|
|
b0a3440356 | ||
|
|
a8d63b54aa | ||
|
|
b22f3f100d | ||
|
|
3075b2e92b | ||
|
|
7c3272c96f | ||
|
|
4294fcf4b4 | ||
|
|
ae0f51eaa9 | ||
|
|
8871d75a8e | ||
|
|
c22eea13ad | ||
|
|
f4d055916a | ||
|
|
979cabcdb9 | ||
|
|
5134bc3328 | ||
|
|
78d9de6512 | ||
|
|
065f641526 | ||
|
|
f4d0d87fa7 | ||
|
|
9d0e79c2cf | ||
|
|
da5087c14c | ||
|
|
4ac0829bf7 | ||
|
|
d6de916c66 | ||
|
|
3b9275488d | ||
|
|
cff778464e | ||
|
|
11d7d22383 | ||
|
|
8a7d8175fc | ||
|
|
ffc25e2654 | ||
|
|
5cc78fafa0 | ||
|
|
0b12efb135 | ||
|
|
d4891f33da | ||
|
|
42a33cccf4 |
27
.github/workflows/release.yml
vendored
27
.github/workflows/release.yml
vendored
@@ -5,7 +5,7 @@ on:
|
||||
|
||||
name: release
|
||||
jobs:
|
||||
release:
|
||||
linux:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@@ -16,10 +16,21 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
|
||||
- name: Install nfpm
|
||||
run: |
|
||||
wget https://github.com/goreleaser/nfpm/releases/download/v2.2.3/nfpm_2.2.3_Linux_x86_64.tar.gz
|
||||
tar zxvf nfpm_2.2.3_Linux_x86_64.tar.gz
|
||||
|
||||
- name: Build litestream
|
||||
run: |
|
||||
go build -ldflags "-X 'main.Version=${{ steps.release.outputs.tag_name }}'" -o litestream ./cmd/litestream
|
||||
mkdir -p dist
|
||||
cp etc/litestream.yml etc/litestream.service dist
|
||||
cat etc/nfpm.yml | LITESTREAM_VERSION=${{ steps.release.outputs.tag_name }} envsubst > dist/nfpm.yml
|
||||
go build -ldflags "-X 'main.Version=${{ steps.release.outputs.tag_name }}'" -o dist/litestream ./cmd/litestream
|
||||
|
||||
cd dist
|
||||
tar -czvf litestream-${{ steps.release.outputs.tag_name }}-linux-amd64.tar.gz litestream
|
||||
../nfpm pkg --config nfpm.yml --packager deb --target litestream-${{ steps.release.outputs.tag_name }}-linux-amd64.deb
|
||||
|
||||
- name: Upload release binary
|
||||
uses: actions/upload-release-asset@v1.0.2
|
||||
@@ -27,6 +38,16 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
upload_url: ${{ steps.release.outputs.upload_url }}
|
||||
asset_path: ./litestream-${{ steps.release.outputs.tag_name }}-linux-amd64.tar.gz
|
||||
asset_path: ./dist/litestream-${{ steps.release.outputs.tag_name }}-linux-amd64.tar.gz
|
||||
asset_name: litestream-${{ steps.release.outputs.tag_name }}-linux-amd64.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload debian package
|
||||
uses: actions/upload-release-asset@v1.0.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
upload_url: ${{ steps.release.outputs.upload_url }}
|
||||
asset_path: ./dist/litestream-${{ steps.release.outputs.tag_name }}-linux-amd64.deb
|
||||
asset_name: litestream-${{ steps.release.outputs.tag_name }}-linux-amd64.deb
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
4
.github/workflows/test.yml
vendored
4
.github/workflows/test.yml
vendored
@@ -1,10 +1,12 @@
|
||||
on: [push, pull_request]
|
||||
on: push
|
||||
name: test
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.15'
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
|
||||
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
.DS_Store
|
||||
/dist
|
||||
22
Makefile
Normal file
22
Makefile
Normal file
@@ -0,0 +1,22 @@
|
||||
default:
|
||||
|
||||
dist-linux:
|
||||
mkdir -p dist
|
||||
cp etc/litestream.yml dist/litestream.yml
|
||||
docker run --rm -v "${PWD}":/usr/src/litestream -w /usr/src/litestream -e GOOS=linux -e GOARCH=amd64 golang:1.15 go build -v -o dist/litestream ./cmd/litestream
|
||||
tar -cz -f dist/litestream-linux-amd64.tar.gz -C dist litestream
|
||||
|
||||
dist-macos:
|
||||
ifndef LITESTREAM_VERSION
|
||||
$(error LITESTREAM_VERSION is undefined)
|
||||
endif
|
||||
mkdir -p dist
|
||||
go build -v -ldflags "-X 'main.Version=${LITESTREAM_VERSION}'" -o dist/litestream ./cmd/litestream
|
||||
gon etc/gon.hcl
|
||||
mv dist/litestream.zip dist/litestream-${LITESTREAM_VERSION}-darwin-amd64.zip
|
||||
openssl dgst -sha256 dist/litestream-${LITESTREAM_VERSION}-darwin-amd64.zip
|
||||
|
||||
clean:
|
||||
rm -rf dist
|
||||
|
||||
.PHONY: default dist-linux dist-macos clean
|
||||
297
README.md
297
README.md
@@ -1,17 +1,296 @@
|
||||
litestream
|
||||
Litestream
|
||||

|
||||

|
||||

|
||||

|
||||
==========
|
||||
|
||||
Streaming replication for SQLite.
|
||||
Litestream is a standalone streaming replication tool for SQLite. It runs as a
|
||||
background process and safely replicates changes incrementally to another file
|
||||
or S3. Litestream only communicates with SQLite through the SQLite API so it
|
||||
will not corrupt your database.
|
||||
|
||||
If you need support or have ideas for improving Litestream, please visit the
|
||||
[GitHub Discussions](https://github.com/benbjohnson/litestream/discussions) to
|
||||
chat.
|
||||
|
||||
If you find this project interesting, please consider starring the project on
|
||||
GitHub.
|
||||
|
||||
|
||||
## Questions
|
||||
## Installation
|
||||
|
||||
- How to avoid WAL checkpointing on close?
|
||||
### Mac OS (Homebrew)
|
||||
|
||||
To install from homebrew, first add the Litestream tap and then install:
|
||||
|
||||
## Notes
|
||||
|
||||
```sql
|
||||
-- Disable autocheckpointing.
|
||||
PRAGMA wal_autocheckpoint = 0
|
||||
```sh
|
||||
$ brew install benbjohnson/litestream/litestream
|
||||
```
|
||||
|
||||
|
||||
### Linux (Debian)
|
||||
|
||||
You can download the `.deb` file from the [Releases page][releases] page and
|
||||
then run the following:
|
||||
|
||||
```sh
|
||||
$ sudo dpkg -i litestream-v0.3.0-linux-amd64.deb
|
||||
```
|
||||
|
||||
Once installed, you'll need to enable & start the service:
|
||||
|
||||
```sh
|
||||
$ sudo systemctl enable litestream
|
||||
$ sudo systemctl start litestream
|
||||
```
|
||||
|
||||
|
||||
### Release binaries
|
||||
|
||||
You can also download the release binary for your system from the
|
||||
[releases page][releases] and run it as a standalone application.
|
||||
|
||||
|
||||
### Building from source
|
||||
|
||||
Download and install the [Go toolchain](https://golang.org/) and then run:
|
||||
|
||||
```sh
|
||||
$ go install ./cmd/litestream
|
||||
```
|
||||
|
||||
The `litestream` binary should be in your `$GOPATH/bin` folder.
|
||||
|
||||
|
||||
## Quick Start
|
||||
|
||||
Litestream provides a configuration file that can be used for production
|
||||
deployments but you can also specify a single database and replica on the
|
||||
command line for testing.
|
||||
|
||||
First, you'll need to create an S3 bucket that we'll call `"mybkt"` in this
|
||||
example. You'll also need to set your AWS credentials:
|
||||
|
||||
```sh
|
||||
$ export AWS_ACCESS_KEY_ID=AKIAxxxxxxxxxxxxxxxx
|
||||
$ export AWS_SECRET_ACCESS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/xxxxxxxxx
|
||||
```
|
||||
|
||||
Next you can run the `litestream replicate` command with the path to the
|
||||
database you want to backup and the URL of your replica destination:
|
||||
|
||||
```sh
|
||||
$ litestream replicate /path/to/db s3://mybkt/db
|
||||
```
|
||||
|
||||
If you make changes to your local database, those changes will be replicated
|
||||
to S3 every 10 seconds. From another terminal window, you can restore your
|
||||
database from your S3 replica:
|
||||
|
||||
```
|
||||
$ litestream restore -v -o /path/to/restored/db s3://mybkt/db
|
||||
```
|
||||
|
||||
Voila! 🎉
|
||||
|
||||
Your database should be restored to the last replicated state that
|
||||
was sent to S3. You can adjust your replication frequency and other options by
|
||||
using a configuration-based approach specified below.
|
||||
|
||||
|
||||
## Configuration
|
||||
|
||||
A configuration-based install gives you more replication options. By default,
|
||||
the config file lives at `/etc/litestream.yml` but you can pass in a different
|
||||
path to any `litestream` command using the `-config PATH` flag. You can also
|
||||
set the `LITESTREAM_CONFIG` environment variable to specify a new path.
|
||||
|
||||
The configuration specifies one or more `dbs` and a list of one or more replica
|
||||
locations for each db. Below are some common configurations:
|
||||
|
||||
### Replicate to S3
|
||||
|
||||
This will replicate the database at `/path/to/db` to the `"/db"` path inside
|
||||
the S3 bucket named `"mybkt"`.
|
||||
|
||||
```yaml
|
||||
access-key-id: AKIAxxxxxxxxxxxxxxxx
|
||||
secret-access-key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/xxxxxxxxx
|
||||
|
||||
dbs:
|
||||
- path: /path/to/db
|
||||
replicas:
|
||||
- url: s3://mybkt/db
|
||||
```
|
||||
|
||||
### Replicate to another file path
|
||||
|
||||
This will replicate the database at `/path/to/db` to a directory named
|
||||
`/path/to/replica`.
|
||||
|
||||
```yaml
|
||||
dbs:
|
||||
- path: /path/to/db
|
||||
replicas:
|
||||
- path: /path/to/replica
|
||||
```
|
||||
|
||||
### Retention period
|
||||
|
||||
By default, replicas will retain a snapshot & subsequent WAL changes for 24
|
||||
hours. When the snapshot age exceeds the retention threshold, a new snapshot
|
||||
is taken and uploaded and the previous snapshot and WAL files are removed.
|
||||
|
||||
You can configure this setting per-replica. Times are parsed using [Go's
|
||||
duration](https://golang.org/pkg/time/#ParseDuration) so time units of hours
|
||||
(`h`), minutes (`m`), and seconds (`s`) are allowed but days, weeks, months, and
|
||||
years are not.
|
||||
|
||||
|
||||
```yaml
|
||||
db:
|
||||
- path: /path/to/db
|
||||
replicas:
|
||||
- url: s3://mybkt/db
|
||||
retention: 1h # 1 hour retention
|
||||
```
|
||||
|
||||
|
||||
### Monitoring replication
|
||||
|
||||
You can also enable a Prometheus metrics endpoint to monitor replication by
|
||||
specifying a bind address with the `addr` field:
|
||||
|
||||
```yml
|
||||
addr: ":9090"
|
||||
```
|
||||
|
||||
This will make metrics available at: http://localhost:9090/metrics
|
||||
|
||||
|
||||
### Other configuration options
|
||||
|
||||
These are some additional configuration options available on replicas:
|
||||
|
||||
- `type`—Specify the type of replica (`"file"` or `"s3"`). Derived from `"path"`.
|
||||
- `name`—Specify an optional name for the replica if you are using multiple replicas.
|
||||
- `path`—File path to the replica location.
|
||||
- `url`—URL to the replica location.
|
||||
- `retention-check-interval`—Time between retention enforcement checks. Defaults to `1h`.
|
||||
- `validation-interval`—Interval between periodic checks to ensure restored backup matches current database. Disabled by default.
|
||||
|
||||
These replica options are only available for S3 replicas:
|
||||
|
||||
- `bucket`—S3 bucket name. Derived from `"path"`.
|
||||
- `region`—S3 bucket region. Looked up on startup if unspecified.
|
||||
- `sync-interval`—Replication sync frequency.
|
||||
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
### Replication
|
||||
|
||||
Once your configuration is saved, you'll need to begin replication. If you
|
||||
installed the `.deb` file then run:
|
||||
|
||||
```sh
|
||||
$ sudo systemctl restart litestream
|
||||
```
|
||||
|
||||
To run litestream on its own, run:
|
||||
|
||||
```sh
|
||||
# Replicate using the /etc/litestream.yml configuration.
|
||||
$ litestream replicate
|
||||
|
||||
# Replicate using a different configuration path.
|
||||
$ litestream replicate -config /path/to/litestream.yml
|
||||
```
|
||||
|
||||
The `litestream` command will initialize and then wait indefinitely for changes.
|
||||
You should see your destination replica path is now populated with a
|
||||
`generations` directory. Inside there should be a 16-character hex generation
|
||||
directory and inside there should be snapshots & WAL files. As you make changes
|
||||
to your source database, changes will be copied over to your replica incrementally.
|
||||
|
||||
|
||||
### Restoring a backup
|
||||
|
||||
Litestream can restore a previous snapshot and replay all replicated WAL files.
|
||||
By default, it will restore up to the latest WAL file but you can also perform
|
||||
point-in-time restores.
|
||||
|
||||
A database can only be restored to a path that does not exist so you don't need
|
||||
to worry about accidentally overwriting your current database.
|
||||
|
||||
```sh
|
||||
# Restore database to original path.
|
||||
$ litestream restore /path/to/db
|
||||
|
||||
# Restore database to a new location.
|
||||
$ litestream restore -o /path/to/restored/db /path/to/db
|
||||
|
||||
# Restore from a replica URL.
|
||||
$ litestream restore -o /path/to/restored/db s3://mybkt/db
|
||||
|
||||
# Restore database to a specific point-in-time.
|
||||
$ litestream restore -timestamp 2020-01-01T00:00:00Z /path/to/db
|
||||
```
|
||||
|
||||
Point-in-time restores only have the resolution of the timestamp of the WAL file
|
||||
itself. By default, litestream will start a new WAL file every minute so
|
||||
point-in-time restores are only accurate to the minute.
|
||||
|
||||
|
||||
|
||||
## How it works
|
||||
|
||||
SQLite provides a WAL (write-ahead log) journaling mode which writes pages to
|
||||
a `-wal` file before eventually being copied over to the original database file.
|
||||
This copying process is known as checkpointing. The WAL file works as a circular
|
||||
buffer so when the WAL reaches a certain size then it restarts from the beginning.
|
||||
|
||||
Litestream works by taking over the checkpointing process and controlling when
|
||||
it is restarted to ensure that it copies every new page. Checkpointing is only
|
||||
allowed when there are no read transactions so Litestream maintains a
|
||||
long-running read transaction against each database until it is ready to
|
||||
checkpoint.
|
||||
|
||||
The SQLite WAL file is copied to a separate location called the shadow WAL which
|
||||
ensures that it will not be overwritten by SQLite. This shadow WAL acts as a
|
||||
temporary buffer so that replicas can replicate to their destination (e.g.
|
||||
another file path or to S3). The shadow WAL files are removed once they have
|
||||
been fully replicated. You can find the shadow directory as a hidden directory
|
||||
next to your database file. If you database file is named `/var/lib/my.db` then
|
||||
the shadow directory will be `/var/lib/.my.db-litestream`.
|
||||
|
||||
Litestream groups a snapshot and all subsequent WAL changes into "generations".
|
||||
A generation is started on initial replication of a database and a new
|
||||
generation will be started if litestream detects that the WAL replication is
|
||||
no longer contiguous. This can occur if the `litestream` process is stopped and
|
||||
another process is allowed to checkpoint the WAL.
|
||||
|
||||
|
||||
|
||||
## Open-source, not open-contribution
|
||||
|
||||
[Similar to SQLite](https://www.sqlite.org/copyright.html), litestream is open
|
||||
source but closed to contributions. This keeps the code base free of proprietary
|
||||
or licensed code but it also helps me continue to maintain and build litestream.
|
||||
|
||||
As the author of [BoltDB](https://github.com/boltdb/bolt), I found that
|
||||
accepting and maintaining third party patches contributed to my burn out and
|
||||
I eventually archived the project. Writing databases & low-level replication
|
||||
tools involves nuance and simple one line changes can have profound and
|
||||
unexpected changes in correctness and performance. Small contributions
|
||||
typically required hours of my time to properly test and validate them.
|
||||
|
||||
I am grateful for community involvement, bug reports, & feature requests. I do
|
||||
not wish to come off as anything but welcoming, however, I've
|
||||
made the decision to keep this project closed to contributions for my own
|
||||
mental health and long term viability of the project.
|
||||
|
||||
|
||||
[releases]: https://github.com/benbjohnson/litestream/releases
|
||||
|
||||
79
cmd/litestream/databases.go
Normal file
79
cmd/litestream/databases.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
)
|
||||
|
||||
// DatabasesCommand is a command for listing managed databases.
|
||||
type DatabasesCommand struct{}
|
||||
|
||||
// Run executes the command.
|
||||
func (c *DatabasesCommand) Run(ctx context.Context, args []string) (err error) {
|
||||
var configPath string
|
||||
fs := flag.NewFlagSet("litestream-databases", flag.ContinueOnError)
|
||||
registerConfigFlag(fs, &configPath)
|
||||
fs.Usage = c.Usage
|
||||
if err := fs.Parse(args); err != nil {
|
||||
return err
|
||||
} else if fs.NArg() != 0 {
|
||||
return fmt.Errorf("too many argument")
|
||||
}
|
||||
|
||||
// Load configuration.
|
||||
if configPath == "" {
|
||||
return errors.New("-config required")
|
||||
}
|
||||
config, err := ReadConfigFile(configPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// List all databases.
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0)
|
||||
fmt.Fprintln(w, "path\treplicas")
|
||||
for _, dbConfig := range config.DBs {
|
||||
db, err := newDBFromConfig(&config, dbConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var replicaNames []string
|
||||
for _, r := range db.Replicas {
|
||||
replicaNames = append(replicaNames, r.Name())
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "%s\t%s\n",
|
||||
db.Path(),
|
||||
strings.Join(replicaNames, ","),
|
||||
)
|
||||
}
|
||||
w.Flush()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Usage prints the help screen to STDOUT.
|
||||
func (c *DatabasesCommand) Usage() {
|
||||
fmt.Printf(`
|
||||
The databases command lists all databases in the configuration file.
|
||||
|
||||
Usage:
|
||||
|
||||
litestream databases [arguments]
|
||||
|
||||
Arguments:
|
||||
|
||||
-config PATH
|
||||
Specifies the configuration file.
|
||||
Defaults to %s
|
||||
|
||||
`[1:],
|
||||
DefaultConfigPath(),
|
||||
)
|
||||
}
|
||||
@@ -7,94 +7,101 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/benbjohnson/litestream"
|
||||
)
|
||||
|
||||
type GenerationsCommand struct {
|
||||
ConfigPath string
|
||||
Config Config
|
||||
|
||||
DBPath string
|
||||
}
|
||||
|
||||
func NewGenerationsCommand() *GenerationsCommand {
|
||||
return &GenerationsCommand{}
|
||||
}
|
||||
// GenerationsCommand represents a command to list all generations for a database.
|
||||
type GenerationsCommand struct{}
|
||||
|
||||
// Run executes the command.
|
||||
func (c *GenerationsCommand) Run(ctx context.Context, args []string) (err error) {
|
||||
var configPath string
|
||||
fs := flag.NewFlagSet("litestream-generations", flag.ContinueOnError)
|
||||
registerConfigFlag(fs, &c.ConfigPath)
|
||||
registerConfigFlag(fs, &configPath)
|
||||
replicaName := fs.String("replica", "", "replica name")
|
||||
fs.Usage = c.Usage
|
||||
if err := fs.Parse(args); err != nil {
|
||||
return err
|
||||
} else if fs.NArg() == 0 || fs.Arg(0) == "" {
|
||||
return fmt.Errorf("database path or replica URL required")
|
||||
} else if fs.NArg() > 1 {
|
||||
return fmt.Errorf("too many arguments")
|
||||
}
|
||||
|
||||
// Load configuration.
|
||||
if c.ConfigPath == "" {
|
||||
return errors.New("-config required")
|
||||
}
|
||||
config, err := ReadConfigFile(c.ConfigPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Determine absolute path for database, if specified.
|
||||
if c.DBPath = fs.Arg(0); c.DBPath != "" {
|
||||
if c.DBPath, err = filepath.Abs(c.DBPath); err != nil {
|
||||
var db *litestream.DB
|
||||
var r litestream.Replica
|
||||
updatedAt := time.Now()
|
||||
if isURL(fs.Arg(0)) {
|
||||
if r, err = NewReplicaFromURL(fs.Arg(0)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if configPath != "" {
|
||||
// Load configuration.
|
||||
config, err := ReadConfigFile(configPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Lookup database from configuration file by path.
|
||||
if path, err := expand(fs.Arg(0)); err != nil {
|
||||
return err
|
||||
} else if dbc := config.DBConfig(path); dbc == nil {
|
||||
return fmt.Errorf("database not found in config: %s", path)
|
||||
} else if db, err = newDBFromConfig(&config, dbc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Filter by replica, if specified.
|
||||
if *replicaName != "" {
|
||||
if r = db.Replica(*replicaName); r == nil {
|
||||
return fmt.Errorf("replica %q not found for database %q", *replicaName, db.Path())
|
||||
}
|
||||
}
|
||||
|
||||
// Determine last time database or WAL was updated.
|
||||
if updatedAt, err = db.UpdatedAt(); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return errors.New("config path or replica URL required")
|
||||
}
|
||||
|
||||
var replicas []litestream.Replica
|
||||
if r != nil {
|
||||
replicas = []litestream.Replica{r}
|
||||
} else {
|
||||
replicas = db.Replicas
|
||||
}
|
||||
|
||||
// List each generation.
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0)
|
||||
fmt.Fprintln(w, "db\tname\tgeneration\tlag\tstart\tend")
|
||||
for _, dbConfig := range config.DBs {
|
||||
// Filter database, if specified in the arguments.
|
||||
if c.DBPath != "" && dbConfig.Path != c.DBPath {
|
||||
fmt.Fprintln(w, "name\tgeneration\tlag\tstart\tend")
|
||||
for _, r := range replicas {
|
||||
generations, err := r.Generations(ctx)
|
||||
if err != nil {
|
||||
log.Printf("%s: cannot list generations: %s", r.Name(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Instantiate DB from from configuration.
|
||||
db, err := newDBFromConfig(dbConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Determine last time database or WAL was updated.
|
||||
updatedAt, err := db.UpdatedAt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Iterate over each replica in the database.
|
||||
for _, r := range db.Replicas {
|
||||
generations, err := r.Generations(ctx)
|
||||
// Iterate over each generation for the replica.
|
||||
for _, generation := range generations {
|
||||
stats, err := r.GenerationStats(ctx, generation)
|
||||
if err != nil {
|
||||
log.Printf("%s: cannot list generations: %s", r.Name(), err)
|
||||
log.Printf("%s: cannot find generation stats: %s", r.Name(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Iterate over each generation for the replica.
|
||||
for _, generation := range generations {
|
||||
stats, err := r.GenerationStats(ctx, generation)
|
||||
if err != nil {
|
||||
log.Printf("%s: cannot find generation stats: %s", r.Name(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n",
|
||||
db.Path(),
|
||||
r.Name(),
|
||||
generation,
|
||||
truncateDuration(stats.UpdatedAt.Sub(updatedAt)).String(),
|
||||
stats.CreatedAt.Format(time.RFC3339),
|
||||
stats.UpdatedAt.Format(time.RFC3339),
|
||||
)
|
||||
}
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n",
|
||||
r.Name(),
|
||||
generation,
|
||||
truncateDuration(updatedAt.Sub(stats.UpdatedAt)).String(),
|
||||
stats.CreatedAt.Format(time.RFC3339),
|
||||
stats.UpdatedAt.Format(time.RFC3339),
|
||||
)
|
||||
w.Flush()
|
||||
}
|
||||
}
|
||||
w.Flush()
|
||||
@@ -102,32 +109,51 @@ func (c *GenerationsCommand) Run(ctx context.Context, args []string) (err error)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Usage prints the help message to STDOUT.
|
||||
func (c *GenerationsCommand) Usage() {
|
||||
fmt.Printf(`
|
||||
The generations command lists all generations across all replicas along with
|
||||
stats about their lag behind the primary database and the time range they cover.
|
||||
The generations command lists all generations for a database or replica. It also
|
||||
lists stats about their lag behind the primary database and the time range they
|
||||
cover.
|
||||
|
||||
Usage:
|
||||
|
||||
litestream generations [arguments] DB
|
||||
litestream generations [arguments] DB_PATH
|
||||
|
||||
litestream generations [arguments] REPLICA_URL
|
||||
|
||||
Arguments:
|
||||
|
||||
-config PATH
|
||||
Specifies the configuration file. Defaults to %s
|
||||
Specifies the configuration file.
|
||||
Defaults to %s
|
||||
|
||||
-replica NAME
|
||||
Optional, filters by replica.
|
||||
|
||||
`[1:],
|
||||
DefaultConfigPath,
|
||||
DefaultConfigPath(),
|
||||
)
|
||||
}
|
||||
|
||||
func truncateDuration(d time.Duration) time.Duration {
|
||||
if d > time.Hour {
|
||||
return d.Truncate(time.Hour)
|
||||
} else if d > time.Minute {
|
||||
return d.Truncate(time.Minute)
|
||||
} else if d > time.Second {
|
||||
if d < 0 {
|
||||
if d < -10*time.Second {
|
||||
return d.Truncate(time.Second)
|
||||
} else if d < -time.Second {
|
||||
return d.Truncate(time.Second / 10)
|
||||
} else if d < -time.Millisecond {
|
||||
return d.Truncate(time.Millisecond)
|
||||
} else if d < -time.Microsecond {
|
||||
return d.Truncate(time.Microsecond)
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
if d > 10*time.Second {
|
||||
return d.Truncate(time.Second)
|
||||
} else if d > time.Second {
|
||||
return d.Truncate(time.Second / 10)
|
||||
} else if d > time.Millisecond {
|
||||
return d.Truncate(time.Millisecond)
|
||||
} else if d > time.Microsecond {
|
||||
|
||||
@@ -6,12 +6,17 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/user"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/benbjohnson/litestream"
|
||||
"github.com/benbjohnson/litestream/s3"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
@@ -20,9 +25,6 @@ var (
|
||||
Version = "(development build)"
|
||||
)
|
||||
|
||||
// DefaultConfigPath is the default configuration path.
|
||||
const DefaultConfigPath = "/etc/litestream.yml"
|
||||
|
||||
func main() {
|
||||
log.SetFlags(0)
|
||||
|
||||
@@ -35,12 +37,15 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
// Main represents the main program execution.
|
||||
type Main struct{}
|
||||
|
||||
// NewMain returns a new instance of Main.
|
||||
func NewMain() *Main {
|
||||
return &Main{}
|
||||
}
|
||||
|
||||
// Run executes the program.
|
||||
func (m *Main) Run(ctx context.Context, args []string) (err error) {
|
||||
var cmd string
|
||||
if len(args) > 0 {
|
||||
@@ -48,14 +53,20 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) {
|
||||
}
|
||||
|
||||
switch cmd {
|
||||
case "databases":
|
||||
return (&DatabasesCommand{}).Run(ctx, args)
|
||||
case "generations":
|
||||
return (&GenerationsCommand{}).Run(ctx, args)
|
||||
case "replicate":
|
||||
return (&ReplicateCommand{}).Run(ctx, args)
|
||||
case "restore":
|
||||
return (&RestoreCommand{}).Run(ctx, args)
|
||||
case "snapshots":
|
||||
return (&SnapshotsCommand{}).Run(ctx, args)
|
||||
case "version":
|
||||
return (&VersionCommand{}).Run(ctx, args)
|
||||
case "wal":
|
||||
return (&WALCommand{}).Run(ctx, args)
|
||||
default:
|
||||
if cmd == "" || cmd == "help" || strings.HasPrefix(cmd, "-") {
|
||||
m.Usage()
|
||||
@@ -65,6 +76,7 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Usage prints the help screen to STDOUT.
|
||||
func (m *Main) Usage() {
|
||||
fmt.Println(`
|
||||
litestream is a tool for replicating SQLite databases.
|
||||
@@ -75,16 +87,29 @@ Usage:
|
||||
|
||||
The commands are:
|
||||
|
||||
generations list available generations across all dbs & replicas
|
||||
databases list databases specified in config file
|
||||
generations list available generations for a database
|
||||
replicate runs a server to replicate databases
|
||||
restore recovers database backup from a replica
|
||||
version prints the version
|
||||
snapshots list available snapshots for a database
|
||||
version prints the binary version
|
||||
wal list available WAL files for a database
|
||||
`[1:])
|
||||
}
|
||||
|
||||
// Config represents a configuration file for the litestream daemon.
|
||||
type Config struct {
|
||||
DBs []*DBConfig `yaml:"databases"`
|
||||
// Bind address for serving metrics.
|
||||
Addr string `yaml:"addr"`
|
||||
|
||||
// List of databases to manage.
|
||||
DBs []*DBConfig `yaml:"dbs"`
|
||||
|
||||
// Global S3 settings
|
||||
AccessKeyID string `yaml:"access-key-id"`
|
||||
SecretAccessKey string `yaml:"secret-access-key"`
|
||||
Region string `yaml:"region"`
|
||||
Bucket string `yaml:"bucket"`
|
||||
}
|
||||
|
||||
// DefaultConfig returns a new instance of Config with defaults set.
|
||||
@@ -92,6 +117,7 @@ func DefaultConfig() Config {
|
||||
return Config{}
|
||||
}
|
||||
|
||||
// DBConfig returns database configuration by path.
|
||||
func (c *Config) DBConfig(path string) *DBConfig {
|
||||
for _, dbConfig := range c.DBs {
|
||||
if dbConfig.Path == path {
|
||||
@@ -102,18 +128,13 @@ func (c *Config) DBConfig(path string) *DBConfig {
|
||||
}
|
||||
|
||||
// ReadConfigFile unmarshals config from filename. Expands path if needed.
|
||||
func ReadConfigFile(filename string) (Config, error) {
|
||||
func ReadConfigFile(filename string) (_ Config, err error) {
|
||||
config := DefaultConfig()
|
||||
|
||||
// Expand filename, if necessary.
|
||||
if prefix := "~" + string(os.PathSeparator); strings.HasPrefix(filename, prefix) {
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return config, err
|
||||
} else if u.HomeDir == "" {
|
||||
return config, fmt.Errorf("home directory unset")
|
||||
}
|
||||
filename = filepath.Join(u.HomeDir, strings.TrimPrefix(filename, prefix))
|
||||
filename, err = expand(filename)
|
||||
if err != nil {
|
||||
return config, err
|
||||
}
|
||||
|
||||
// Read & deserialize configuration.
|
||||
@@ -124,32 +145,123 @@ func ReadConfigFile(filename string) (Config, error) {
|
||||
} else if err := yaml.Unmarshal(buf, &config); err != nil {
|
||||
return config, err
|
||||
}
|
||||
|
||||
// Normalize paths.
|
||||
for _, dbConfig := range config.DBs {
|
||||
if dbConfig.Path, err = expand(dbConfig.Path); err != nil {
|
||||
return config, err
|
||||
}
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// DBConfig represents the configuration for a single database.
|
||||
type DBConfig struct {
|
||||
Path string `yaml:"path"`
|
||||
Replicas []*ReplicaConfig `yaml:"replicas"`
|
||||
}
|
||||
|
||||
// ReplicaConfig represents the configuration for a single replica in a database.
|
||||
type ReplicaConfig struct {
|
||||
Type string `yaml:"type"` // "file", "s3"
|
||||
Name string `yaml:"name"` // name of replica, optional.
|
||||
Path string `yaml:"path"` // used for file replicas
|
||||
Type string `yaml:"type"` // "file", "s3"
|
||||
Name string `yaml:"name"` // name of replica, optional.
|
||||
Path string `yaml:"path"`
|
||||
URL string `yaml:"url"`
|
||||
Retention time.Duration `yaml:"retention"`
|
||||
RetentionCheckInterval time.Duration `yaml:"retention-check-interval"`
|
||||
SyncInterval time.Duration `yaml:"sync-interval"` // s3 only
|
||||
ValidationInterval time.Duration `yaml:"validation-interval"`
|
||||
|
||||
// S3 settings
|
||||
AccessKeyID string `yaml:"access-key-id"`
|
||||
SecretAccessKey string `yaml:"secret-access-key"`
|
||||
Region string `yaml:"region"`
|
||||
Bucket string `yaml:"bucket"`
|
||||
}
|
||||
|
||||
// NewReplicaFromURL returns a new Replica instance configured from a URL.
|
||||
// The replica's database is not set.
|
||||
func NewReplicaFromURL(s string) (litestream.Replica, error) {
|
||||
scheme, host, path, err := ParseReplicaURL(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch scheme {
|
||||
case "file":
|
||||
return litestream.NewFileReplica(nil, "", path), nil
|
||||
case "s3":
|
||||
r := s3.NewReplica(nil, "")
|
||||
r.Bucket, r.Path = host, path
|
||||
return r, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid replica url type: %s", s)
|
||||
}
|
||||
}
|
||||
|
||||
// ParseReplicaURL parses a replica URL.
|
||||
func ParseReplicaURL(s string) (scheme, host, urlpath string, err error) {
|
||||
u, err := url.Parse(s)
|
||||
if err != nil {
|
||||
return "", "", "", err
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "file":
|
||||
scheme, u.Scheme = u.Scheme, ""
|
||||
return scheme, "", path.Clean(u.String()), nil
|
||||
|
||||
case "":
|
||||
return u.Scheme, u.Host, u.Path, fmt.Errorf("replica url scheme required: %s", s)
|
||||
|
||||
default:
|
||||
return u.Scheme, u.Host, strings.TrimPrefix(path.Clean(u.Path), "/"), nil
|
||||
}
|
||||
}
|
||||
|
||||
// isURL returns true if s can be parsed and has a scheme.
|
||||
func isURL(s string) bool {
|
||||
u, err := url.Parse(s)
|
||||
return err == nil && u.Scheme != ""
|
||||
}
|
||||
|
||||
// ReplicaType returns the type based on the type field or extracted from the URL.
|
||||
func (c *ReplicaConfig) ReplicaType() string {
|
||||
typ, _, _, _ := ParseReplicaURL(c.URL)
|
||||
if typ != "" {
|
||||
return typ
|
||||
} else if c.Type != "" {
|
||||
return c.Type
|
||||
}
|
||||
return "file"
|
||||
}
|
||||
|
||||
// DefaultConfigPath returns the default config path.
|
||||
func DefaultConfigPath() string {
|
||||
if v := os.Getenv("LITESTREAM_CONFIG"); v != "" {
|
||||
return v
|
||||
}
|
||||
return "/etc/litestream.yml"
|
||||
}
|
||||
|
||||
func registerConfigFlag(fs *flag.FlagSet, p *string) {
|
||||
fs.StringVar(p, "config", DefaultConfigPath, "config path")
|
||||
fs.StringVar(p, "config", DefaultConfigPath(), "config path")
|
||||
}
|
||||
|
||||
// newDBFromConfig instantiates a DB based on a configuration.
|
||||
func newDBFromConfig(config *DBConfig) (*litestream.DB, error) {
|
||||
func newDBFromConfig(c *Config, dbc *DBConfig) (*litestream.DB, error) {
|
||||
path, err := expand(dbc.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize database with given path.
|
||||
db := litestream.NewDB(config.Path)
|
||||
db := litestream.NewDB(path)
|
||||
|
||||
// Instantiate and attach replicas.
|
||||
for _, rconfig := range config.Replicas {
|
||||
r, err := newReplicaFromConfig(db, rconfig)
|
||||
for _, rc := range dbc.Replicas {
|
||||
r, err := newReplicaFromConfig(db, c, dbc, rc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -160,19 +272,129 @@ func newDBFromConfig(config *DBConfig) (*litestream.DB, error) {
|
||||
}
|
||||
|
||||
// newReplicaFromConfig instantiates a replica for a DB based on a config.
|
||||
func newReplicaFromConfig(db *litestream.DB, config *ReplicaConfig) (litestream.Replica, error) {
|
||||
switch config.Type {
|
||||
case "", "file":
|
||||
return newFileReplicaFromConfig(db, config)
|
||||
func newReplicaFromConfig(db *litestream.DB, c *Config, dbc *DBConfig, rc *ReplicaConfig) (litestream.Replica, error) {
|
||||
// Ensure user did not specify URL in path.
|
||||
if isURL(rc.Path) {
|
||||
return nil, fmt.Errorf("replica path cannot be a url, please use the 'url' field instead: %s", rc.Path)
|
||||
}
|
||||
|
||||
switch rc.ReplicaType() {
|
||||
case "file":
|
||||
return newFileReplicaFromConfig(db, c, dbc, rc)
|
||||
case "s3":
|
||||
return newS3ReplicaFromConfig(db, c, dbc, rc)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown replica type in config: %q", config.Type)
|
||||
return nil, fmt.Errorf("unknown replica type in config: %q", rc.Type)
|
||||
}
|
||||
}
|
||||
|
||||
// newFileReplicaFromConfig returns a new instance of FileReplica build from config.
|
||||
func newFileReplicaFromConfig(db *litestream.DB, config *ReplicaConfig) (*litestream.FileReplica, error) {
|
||||
if config.Path == "" {
|
||||
return nil, fmt.Errorf("file replica path require for db %q", db.Path())
|
||||
func newFileReplicaFromConfig(db *litestream.DB, c *Config, dbc *DBConfig, rc *ReplicaConfig) (_ *litestream.FileReplica, err error) {
|
||||
path := rc.Path
|
||||
if rc.URL != "" {
|
||||
_, _, path, err = ParseReplicaURL(rc.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return litestream.NewFileReplica(db, config.Name, config.Path), nil
|
||||
|
||||
if path == "" {
|
||||
return nil, fmt.Errorf("%s: file replica path required", db.Path())
|
||||
}
|
||||
|
||||
if path, err = expand(path); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := litestream.NewFileReplica(db, rc.Name, path)
|
||||
if v := rc.Retention; v > 0 {
|
||||
r.Retention = v
|
||||
}
|
||||
if v := rc.RetentionCheckInterval; v > 0 {
|
||||
r.RetentionCheckInterval = v
|
||||
}
|
||||
if v := rc.ValidationInterval; v > 0 {
|
||||
r.ValidationInterval = v
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// newS3ReplicaFromConfig returns a new instance of S3Replica build from config.
|
||||
func newS3ReplicaFromConfig(db *litestream.DB, c *Config, dbc *DBConfig, rc *ReplicaConfig) (_ *s3.Replica, err error) {
|
||||
bucket := c.Bucket
|
||||
if v := rc.Bucket; v != "" {
|
||||
bucket = v
|
||||
}
|
||||
|
||||
path := rc.Path
|
||||
if rc.URL != "" {
|
||||
_, bucket, path, err = ParseReplicaURL(rc.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Use global or replica-specific S3 settings.
|
||||
accessKeyID := c.AccessKeyID
|
||||
if v := rc.AccessKeyID; v != "" {
|
||||
accessKeyID = v
|
||||
}
|
||||
secretAccessKey := c.SecretAccessKey
|
||||
if v := rc.SecretAccessKey; v != "" {
|
||||
secretAccessKey = v
|
||||
}
|
||||
region := c.Region
|
||||
if v := rc.Region; v != "" {
|
||||
region = v
|
||||
}
|
||||
|
||||
// Ensure required settings are set.
|
||||
if bucket == "" {
|
||||
return nil, fmt.Errorf("%s: s3 bucket required", db.Path())
|
||||
}
|
||||
|
||||
// Build replica.
|
||||
r := s3.NewReplica(db, rc.Name)
|
||||
r.AccessKeyID = accessKeyID
|
||||
r.SecretAccessKey = secretAccessKey
|
||||
r.Region = region
|
||||
r.Bucket = bucket
|
||||
r.Path = path
|
||||
|
||||
if v := rc.Retention; v > 0 {
|
||||
r.Retention = v
|
||||
}
|
||||
if v := rc.RetentionCheckInterval; v > 0 {
|
||||
r.RetentionCheckInterval = v
|
||||
}
|
||||
if v := rc.SyncInterval; v > 0 {
|
||||
r.SyncInterval = v
|
||||
}
|
||||
if v := rc.ValidationInterval; v > 0 {
|
||||
r.ValidationInterval = v
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// expand returns an absolute path for s.
|
||||
func expand(s string) (string, error) {
|
||||
// Just expand to absolute path if there is no home directory prefix.
|
||||
prefix := "~" + string(os.PathSeparator)
|
||||
if s != "~" && !strings.HasPrefix(s, prefix) {
|
||||
return filepath.Abs(s)
|
||||
}
|
||||
|
||||
// Look up home directory.
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if u.HomeDir == "" {
|
||||
return "", fmt.Errorf("cannot expand path %s, no home directory available", s)
|
||||
}
|
||||
|
||||
// Return path with tilde replaced by the home directory.
|
||||
if s == "~" {
|
||||
return u.HomeDir, nil
|
||||
}
|
||||
return filepath.Join(u.HomeDir, strings.TrimPrefix(s, prefix)), nil
|
||||
}
|
||||
|
||||
@@ -5,12 +5,19 @@ import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"github.com/benbjohnson/litestream"
|
||||
"github.com/benbjohnson/litestream/s3"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
// ReplicateCommand represents a command that continuously replicates SQLite databases.
|
||||
type ReplicateCommand struct {
|
||||
ConfigPath string
|
||||
Config Config
|
||||
@@ -19,30 +26,42 @@ type ReplicateCommand struct {
|
||||
DBs []*litestream.DB
|
||||
}
|
||||
|
||||
func NewReplicateCommand() *ReplicateCommand {
|
||||
return &ReplicateCommand{}
|
||||
}
|
||||
|
||||
// Run loads all databases specified in the configuration.
|
||||
func (c *ReplicateCommand) Run(ctx context.Context, args []string) (err error) {
|
||||
fs := flag.NewFlagSet("litestream-replicate", flag.ContinueOnError)
|
||||
verbose := fs.Bool("v", false, "verbose logging")
|
||||
registerConfigFlag(fs, &c.ConfigPath)
|
||||
fs.Usage = c.Usage
|
||||
if err := fs.Parse(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Load configuration.
|
||||
if c.ConfigPath == "" {
|
||||
return errors.New("-config required")
|
||||
// Load configuration or use CLI args to build db/replica.
|
||||
var config Config
|
||||
if fs.NArg() == 1 {
|
||||
return fmt.Errorf("must specify at least one replica URL for %s", fs.Arg(0))
|
||||
} else if fs.NArg() > 1 {
|
||||
dbConfig := &DBConfig{Path: fs.Arg(0)}
|
||||
for _, u := range fs.Args()[1:] {
|
||||
dbConfig.Replicas = append(dbConfig.Replicas, &ReplicaConfig{URL: u})
|
||||
}
|
||||
config.DBs = []*DBConfig{dbConfig}
|
||||
} else if c.ConfigPath != "" {
|
||||
config, err = ReadConfigFile(c.ConfigPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return errors.New("-config flag or database/replica arguments required")
|
||||
}
|
||||
config, err := ReadConfigFile(c.ConfigPath)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
// Enable trace logging.
|
||||
if *verbose {
|
||||
litestream.Tracef = log.Printf
|
||||
}
|
||||
|
||||
// Setup signal handler.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
ch := make(chan os.Signal, 1)
|
||||
signal.Notify(ch, os.Interrupt)
|
||||
go func() { <-ch; cancel() }()
|
||||
@@ -51,11 +70,11 @@ func (c *ReplicateCommand) Run(ctx context.Context, args []string) (err error) {
|
||||
fmt.Printf("litestream %s\n", Version)
|
||||
|
||||
if len(config.DBs) == 0 {
|
||||
return errors.New("configuration must specify at least one database")
|
||||
fmt.Println("no databases specified in configuration")
|
||||
}
|
||||
|
||||
for _, dbConfig := range config.DBs {
|
||||
db, err := newDBFromConfig(dbConfig)
|
||||
db, err := newDBFromConfig(&config, dbConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -68,7 +87,31 @@ func (c *ReplicateCommand) Run(ctx context.Context, args []string) (err error) {
|
||||
}
|
||||
|
||||
// Notify user that initialization is done.
|
||||
fmt.Printf("Initialized with %d databases.\n", len(c.DBs))
|
||||
for _, db := range c.DBs {
|
||||
fmt.Printf("initialized db: %s\n", db.Path())
|
||||
for _, r := range db.Replicas {
|
||||
switch r := r.(type) {
|
||||
case *litestream.FileReplica:
|
||||
fmt.Printf("replicating to: name=%q type=%q path=%q\n", r.Name(), r.Type(), r.Path())
|
||||
case *s3.Replica:
|
||||
fmt.Printf("replicating to: name=%q type=%q bucket=%q path=%q region=%q\n", r.Name(), r.Type(), r.Bucket, r.Path, r.Region)
|
||||
default:
|
||||
fmt.Printf("replicating to: name=%q type=%q\n", r.Name(), r.Type())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Serve metrics over HTTP if enabled.
|
||||
if config.Addr != "" {
|
||||
_, port, _ := net.SplitHostPort(config.Addr)
|
||||
fmt.Printf("serving metrics on http://localhost:%s/metrics\n", port)
|
||||
go func() {
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
if err := http.ListenAndServe(config.Addr, nil); err != nil {
|
||||
log.Printf("cannot start metrics server: %s", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait for signal to stop program.
|
||||
<-ctx.Done()
|
||||
@@ -96,19 +139,28 @@ func (c *ReplicateCommand) Close() (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Usage prints the help screen to STDOUT.
|
||||
func (c *ReplicateCommand) Usage() {
|
||||
fmt.Printf(`
|
||||
The replicate command starts a server to monitor & replicate databases
|
||||
specified in your configuration file.
|
||||
The replicate command starts a server to monitor & replicate databases.
|
||||
You can specify your database & replicas in a configuration file or you can
|
||||
replicate a single database file by specifying its path and its replicas in the
|
||||
command line arguments.
|
||||
|
||||
Usage:
|
||||
|
||||
litestream replicate [arguments]
|
||||
|
||||
litestream replicate [arguments] DB_PATH REPLICA_URL [REPLICA_URL...]
|
||||
|
||||
Arguments:
|
||||
|
||||
-config PATH
|
||||
Specifies the configuration file. Defaults to %s
|
||||
Specifies the configuration file.
|
||||
Defaults to %s
|
||||
|
||||
`[1:], DefaultConfigPath)
|
||||
-v
|
||||
Enable verbose logging output.
|
||||
|
||||
`[1:], DefaultConfigPath())
|
||||
}
|
||||
|
||||
@@ -7,28 +7,24 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/benbjohnson/litestream"
|
||||
)
|
||||
|
||||
type RestoreCommand struct {
|
||||
DBPath string
|
||||
}
|
||||
|
||||
func NewRestoreCommand() *RestoreCommand {
|
||||
return &RestoreCommand{}
|
||||
}
|
||||
// RestoreCommand represents a command to restore a database from a backup.
|
||||
type RestoreCommand struct{}
|
||||
|
||||
// Run executes the command.
|
||||
func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) {
|
||||
var configPath string
|
||||
var opt litestream.RestoreOptions
|
||||
opt := litestream.NewRestoreOptions()
|
||||
fs := flag.NewFlagSet("litestream-restore", flag.ContinueOnError)
|
||||
registerConfigFlag(fs, &configPath)
|
||||
fs.StringVar(&opt.OutputPath, "o", "", "output path")
|
||||
fs.StringVar(&opt.ReplicaName, "replica", "", "replica name")
|
||||
fs.StringVar(&opt.Generation, "generation", "", "generation name")
|
||||
fs.IntVar(&opt.Index, "index", opt.Index, "wal index")
|
||||
fs.BoolVar(&opt.DryRun, "dry-run", false, "dry run")
|
||||
timestampStr := fs.String("timestamp", "", "timestamp")
|
||||
verbose := fs.Bool("v", false, "verbose output")
|
||||
@@ -36,20 +32,11 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) {
|
||||
if err := fs.Parse(args); err != nil {
|
||||
return err
|
||||
} else if fs.NArg() == 0 || fs.Arg(0) == "" {
|
||||
return fmt.Errorf("database path required")
|
||||
return fmt.Errorf("database path or replica URL required")
|
||||
} else if fs.NArg() > 1 {
|
||||
return fmt.Errorf("too many arguments")
|
||||
}
|
||||
|
||||
// Load configuration.
|
||||
if configPath == "" {
|
||||
return errors.New("-config required")
|
||||
}
|
||||
config, err := ReadConfigFile(configPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse timestamp, if specified.
|
||||
if *timestampStr != "" {
|
||||
if opt.Timestamp, err = time.Parse(time.RFC3339, *timestampStr); err != nil {
|
||||
@@ -67,31 +54,84 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) {
|
||||
opt.Logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
}
|
||||
|
||||
// Determine absolute path for database, if specified.
|
||||
if c.DBPath, err = filepath.Abs(fs.Arg(0)); err != nil {
|
||||
return err
|
||||
// Determine replica & generation to restore from.
|
||||
var r litestream.Replica
|
||||
if isURL(fs.Arg(0)) {
|
||||
if r, err = c.loadFromURL(ctx, fs.Arg(0), &opt); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if configPath != "" {
|
||||
if r, err = c.loadFromConfig(ctx, fs.Arg(0), configPath, &opt); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return errors.New("config path or replica URL required")
|
||||
}
|
||||
|
||||
// Instantiate DB.
|
||||
dbConfig := config.DBConfig(c.DBPath)
|
||||
if dbConfig == nil {
|
||||
return fmt.Errorf("database not found in config: %s", c.DBPath)
|
||||
}
|
||||
db, err := newDBFromConfig(dbConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
// Return an error if no matching targets found.
|
||||
if opt.Generation == "" {
|
||||
return fmt.Errorf("no matching backups found")
|
||||
}
|
||||
|
||||
return db.Restore(ctx, opt)
|
||||
return litestream.RestoreReplica(ctx, r, opt)
|
||||
}
|
||||
|
||||
// loadFromURL creates a replica & updates the restore options from a replica URL.
|
||||
func (c *RestoreCommand) loadFromURL(ctx context.Context, replicaURL string, opt *litestream.RestoreOptions) (litestream.Replica, error) {
|
||||
r, err := NewReplicaFromURL(replicaURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opt.Generation, _, err = litestream.CalcReplicaRestoreTarget(ctx, r, *opt)
|
||||
return r, err
|
||||
}
|
||||
|
||||
// loadFromConfig returns a replica & updates the restore options from a DB reference.
|
||||
func (c *RestoreCommand) loadFromConfig(ctx context.Context, dbPath, configPath string, opt *litestream.RestoreOptions) (litestream.Replica, error) {
|
||||
// Load configuration.
|
||||
config, err := ReadConfigFile(configPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Lookup database from configuration file by path.
|
||||
if dbPath, err = expand(dbPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbConfig := config.DBConfig(dbPath)
|
||||
if dbConfig == nil {
|
||||
return nil, fmt.Errorf("database not found in config: %s", dbPath)
|
||||
}
|
||||
db, err := newDBFromConfig(&config, dbConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Restore into original database path if not specified.
|
||||
if opt.OutputPath == "" {
|
||||
opt.OutputPath = dbPath
|
||||
}
|
||||
|
||||
// Determine the appropriate replica & generation to restore from,
|
||||
r, generation, err := db.CalcRestoreTarget(ctx, *opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opt.Generation = generation
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Usage prints the help screen to STDOUT.
|
||||
func (c *RestoreCommand) Usage() {
|
||||
fmt.Printf(`
|
||||
The restore command recovers a database from a previous snapshot and WAL.
|
||||
|
||||
Usage:
|
||||
|
||||
litestream restore [arguments] DB
|
||||
litestream restore [arguments] DB_PATH
|
||||
|
||||
litestream restore [arguments] REPLICA_URL
|
||||
|
||||
Arguments:
|
||||
|
||||
@@ -107,6 +147,10 @@ Arguments:
|
||||
Restore from a specific generation.
|
||||
Defaults to generation with latest data.
|
||||
|
||||
-index NUM
|
||||
Restore up to a specific WAL index (inclusive).
|
||||
Defaults to use the highest available index.
|
||||
|
||||
-timestamp TIMESTAMP
|
||||
Restore to a specific point-in-time.
|
||||
Defaults to use the latest available backup.
|
||||
@@ -141,6 +185,6 @@ Examples:
|
||||
$ litestream restore -replica s3 -generation xxxxxxxx /path/to/db
|
||||
|
||||
`[1:],
|
||||
DefaultConfigPath,
|
||||
DefaultConfigPath(),
|
||||
)
|
||||
}
|
||||
|
||||
128
cmd/litestream/snapshots.go
Normal file
128
cmd/litestream/snapshots.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/benbjohnson/litestream"
|
||||
)
|
||||
|
||||
// SnapshotsCommand represents a command to list snapshots for a command.
|
||||
type SnapshotsCommand struct{}
|
||||
|
||||
// Run executes the command.
|
||||
func (c *SnapshotsCommand) Run(ctx context.Context, args []string) (err error) {
|
||||
var configPath string
|
||||
fs := flag.NewFlagSet("litestream-snapshots", flag.ContinueOnError)
|
||||
registerConfigFlag(fs, &configPath)
|
||||
replicaName := fs.String("replica", "", "replica name")
|
||||
fs.Usage = c.Usage
|
||||
if err := fs.Parse(args); err != nil {
|
||||
return err
|
||||
} else if fs.NArg() == 0 || fs.Arg(0) == "" {
|
||||
return fmt.Errorf("database path required")
|
||||
} else if fs.NArg() > 1 {
|
||||
return fmt.Errorf("too many arguments")
|
||||
}
|
||||
|
||||
var db *litestream.DB
|
||||
var r litestream.Replica
|
||||
if isURL(fs.Arg(0)) {
|
||||
if r, err = NewReplicaFromURL(fs.Arg(0)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if configPath != "" {
|
||||
// Load configuration.
|
||||
config, err := ReadConfigFile(configPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Lookup database from configuration file by path.
|
||||
if path, err := expand(fs.Arg(0)); err != nil {
|
||||
return err
|
||||
} else if dbc := config.DBConfig(path); dbc == nil {
|
||||
return fmt.Errorf("database not found in config: %s", path)
|
||||
} else if db, err = newDBFromConfig(&config, dbc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Filter by replica, if specified.
|
||||
if *replicaName != "" {
|
||||
if r = db.Replica(*replicaName); r == nil {
|
||||
return fmt.Errorf("replica %q not found for database %q", *replicaName, db.Path())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return errors.New("config path or replica URL required")
|
||||
}
|
||||
|
||||
// Find snapshots by db or replica.
|
||||
var infos []*litestream.SnapshotInfo
|
||||
if r != nil {
|
||||
if infos, err = r.Snapshots(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if infos, err = db.Snapshots(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// List all snapshots.
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0)
|
||||
fmt.Fprintln(w, "replica\tgeneration\tindex\tsize\tcreated")
|
||||
for _, info := range infos {
|
||||
fmt.Fprintf(w, "%s\t%s\t%d\t%d\t%s\n",
|
||||
info.Replica,
|
||||
info.Generation,
|
||||
info.Index,
|
||||
info.Size,
|
||||
info.CreatedAt.Format(time.RFC3339),
|
||||
)
|
||||
}
|
||||
w.Flush()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Usage prints the help screen to STDOUT.
|
||||
func (c *SnapshotsCommand) Usage() {
|
||||
fmt.Printf(`
|
||||
The snapshots command lists all snapshots available for a database or replica.
|
||||
|
||||
Usage:
|
||||
|
||||
litestream snapshots [arguments] DB_PATH
|
||||
|
||||
litestream snapshots [arguments] REPLICA_URL
|
||||
|
||||
Arguments:
|
||||
|
||||
-config PATH
|
||||
Specifies the configuration file.
|
||||
Defaults to %s
|
||||
|
||||
-replica NAME
|
||||
Optional, filter by a specific replica.
|
||||
|
||||
Examples:
|
||||
|
||||
# List all snapshots for a database.
|
||||
$ litestream snapshots /path/to/db
|
||||
|
||||
# List all snapshots on S3.
|
||||
$ litestream snapshots -replica s3 /path/to/db
|
||||
|
||||
# List all snapshots by replica URL.
|
||||
$ litestream snapshots s3://mybkt/db
|
||||
|
||||
`[1:],
|
||||
DefaultConfigPath(),
|
||||
)
|
||||
}
|
||||
@@ -6,8 +6,10 @@ import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// VersionCommand represents a command to print the current version.
|
||||
type VersionCommand struct{}
|
||||
|
||||
// Run executes the command.
|
||||
func (c *VersionCommand) Run(ctx context.Context, args []string) (err error) {
|
||||
fs := flag.NewFlagSet("litestream-version", flag.ContinueOnError)
|
||||
fs.Usage = c.Usage
|
||||
@@ -20,6 +22,7 @@ func (c *VersionCommand) Run(ctx context.Context, args []string) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Usage prints the help screen to STDOUT.
|
||||
func (c *VersionCommand) Usage() {
|
||||
fmt.Println(`
|
||||
Prints the version.
|
||||
|
||||
137
cmd/litestream/wal.go
Normal file
137
cmd/litestream/wal.go
Normal file
@@ -0,0 +1,137 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/benbjohnson/litestream"
|
||||
)
|
||||
|
||||
// WALCommand represents a command to list WAL files for a database.
|
||||
type WALCommand struct{}
|
||||
|
||||
// Run executes the command.
|
||||
func (c *WALCommand) Run(ctx context.Context, args []string) (err error) {
|
||||
var configPath string
|
||||
fs := flag.NewFlagSet("litestream-wal", flag.ContinueOnError)
|
||||
registerConfigFlag(fs, &configPath)
|
||||
replicaName := fs.String("replica", "", "replica name")
|
||||
generation := fs.String("generation", "", "generation name")
|
||||
fs.Usage = c.Usage
|
||||
if err := fs.Parse(args); err != nil {
|
||||
return err
|
||||
} else if fs.NArg() == 0 || fs.Arg(0) == "" {
|
||||
return fmt.Errorf("database path required")
|
||||
} else if fs.NArg() > 1 {
|
||||
return fmt.Errorf("too many arguments")
|
||||
}
|
||||
|
||||
var db *litestream.DB
|
||||
var r litestream.Replica
|
||||
if isURL(fs.Arg(0)) {
|
||||
if r, err = NewReplicaFromURL(fs.Arg(0)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if configPath != "" {
|
||||
// Load configuration.
|
||||
config, err := ReadConfigFile(configPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Lookup database from configuration file by path.
|
||||
if path, err := expand(fs.Arg(0)); err != nil {
|
||||
return err
|
||||
} else if dbc := config.DBConfig(path); dbc == nil {
|
||||
return fmt.Errorf("database not found in config: %s", path)
|
||||
} else if db, err = newDBFromConfig(&config, dbc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Filter by replica, if specified.
|
||||
if *replicaName != "" {
|
||||
if r = db.Replica(*replicaName); r == nil {
|
||||
return fmt.Errorf("replica %q not found for database %q", *replicaName, db.Path())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return errors.New("config path or replica URL required")
|
||||
}
|
||||
|
||||
// Find WAL files by db or replica.
|
||||
var infos []*litestream.WALInfo
|
||||
if r != nil {
|
||||
if infos, err = r.WALs(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if infos, err = db.WALs(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// List all WAL files.
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0)
|
||||
fmt.Fprintln(w, "replica\tgeneration\tindex\toffset\tsize\tcreated")
|
||||
for _, info := range infos {
|
||||
if *generation != "" && info.Generation != *generation {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "%s\t%s\t%d\t%d\t%d\t%s\n",
|
||||
info.Replica,
|
||||
info.Generation,
|
||||
info.Index,
|
||||
info.Offset,
|
||||
info.Size,
|
||||
info.CreatedAt.Format(time.RFC3339),
|
||||
)
|
||||
}
|
||||
w.Flush()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Usage prints the help screen to STDOUT.
|
||||
func (c *WALCommand) Usage() {
|
||||
fmt.Printf(`
|
||||
The wal command lists all wal files available for a database.
|
||||
|
||||
Usage:
|
||||
|
||||
litestream wal [arguments] DB_PATH
|
||||
|
||||
litestream wal [arguments] REPLICA_URL
|
||||
|
||||
Arguments:
|
||||
|
||||
-config PATH
|
||||
Specifies the configuration file.
|
||||
Defaults to %s
|
||||
|
||||
-replica NAME
|
||||
Optional, filter by a specific replica.
|
||||
|
||||
-generation NAME
|
||||
Optional, filter by a specific generation.
|
||||
|
||||
Examples:
|
||||
|
||||
# List all WAL files for a database.
|
||||
$ litestream wal /path/to/db
|
||||
|
||||
# List all WAL files on S3 for a specific generation.
|
||||
$ litestream wal -replica s3 -generation xxxxxxxx /path/to/db
|
||||
|
||||
# List all WAL files for replica URL.
|
||||
$ litestream wal s3://mybkt/db
|
||||
|
||||
`[1:],
|
||||
DefaultConfigPath(),
|
||||
)
|
||||
}
|
||||
647
db_test.go
Normal file
647
db_test.go
Normal file
@@ -0,0 +1,647 @@
|
||||
package litestream_test
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/benbjohnson/litestream"
|
||||
)
|
||||
|
||||
func TestDB_Path(t *testing.T) {
|
||||
db := litestream.NewDB("/tmp/db")
|
||||
if got, want := db.Path(), `/tmp/db`; got != want {
|
||||
t.Fatalf("Path()=%v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDB_WALPath(t *testing.T) {
|
||||
db := litestream.NewDB("/tmp/db")
|
||||
if got, want := db.WALPath(), `/tmp/db-wal`; got != want {
|
||||
t.Fatalf("WALPath()=%v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDB_MetaPath(t *testing.T) {
|
||||
t.Run("Absolute", func(t *testing.T) {
|
||||
db := litestream.NewDB("/tmp/db")
|
||||
if got, want := db.MetaPath(), `/tmp/.db-litestream`; got != want {
|
||||
t.Fatalf("MetaPath()=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
t.Run("Relative", func(t *testing.T) {
|
||||
db := litestream.NewDB("db")
|
||||
if got, want := db.MetaPath(), `.db-litestream`; got != want {
|
||||
t.Fatalf("MetaPath()=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestDB_GenerationNamePath(t *testing.T) {
|
||||
db := litestream.NewDB("/tmp/db")
|
||||
if got, want := db.GenerationNamePath(), `/tmp/.db-litestream/generation`; got != want {
|
||||
t.Fatalf("GenerationNamePath()=%v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDB_GenerationPath(t *testing.T) {
|
||||
db := litestream.NewDB("/tmp/db")
|
||||
if got, want := db.GenerationPath("xxxx"), `/tmp/.db-litestream/generations/xxxx`; got != want {
|
||||
t.Fatalf("GenerationPath()=%v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDB_ShadowWALDir(t *testing.T) {
|
||||
db := litestream.NewDB("/tmp/db")
|
||||
if got, want := db.ShadowWALDir("xxxx"), `/tmp/.db-litestream/generations/xxxx/wal`; got != want {
|
||||
t.Fatalf("ShadowWALDir()=%v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDB_ShadowWALPath(t *testing.T) {
|
||||
db := litestream.NewDB("/tmp/db")
|
||||
if got, want := db.ShadowWALPath("xxxx", 1000), `/tmp/.db-litestream/generations/xxxx/wal/000003e8.wal`; got != want {
|
||||
t.Fatalf("ShadowWALPath()=%v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure we can check the last modified time of the real database and its WAL.
|
||||
func TestDB_UpdatedAt(t *testing.T) {
|
||||
t.Run("ErrNotExist", func(t *testing.T) {
|
||||
db := MustOpenDB(t)
|
||||
defer MustCloseDB(t, db)
|
||||
if _, err := db.UpdatedAt(); !os.IsNotExist(err) {
|
||||
t.Fatalf("unexpected error: %#v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("DB", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
defer MustCloseDBs(t, db, sqldb)
|
||||
|
||||
if t0, err := db.UpdatedAt(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if time.Since(t0) > 10*time.Second {
|
||||
t.Fatalf("unexpected updated at time: %s", t0)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("WAL", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
defer MustCloseDBs(t, db, sqldb)
|
||||
|
||||
t0, err := db.UpdatedAt()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if os.Getenv("CI") != "" {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
if _, err := sqldb.Exec(`CREATE TABLE t (id INT);`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if t1, err := db.UpdatedAt(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if !t1.After(t0) {
|
||||
t.Fatalf("expected newer updated at time: %s > %s", t1, t0)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure we can compute a checksum on the real database.
|
||||
func TestDB_CRC64(t *testing.T) {
|
||||
t.Run("ErrNotExist", func(t *testing.T) {
|
||||
db := MustOpenDB(t)
|
||||
defer MustCloseDB(t, db)
|
||||
if _, _, err := db.CRC64(); !os.IsNotExist(err) {
|
||||
t.Fatalf("unexpected error: %#v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("DB", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
defer MustCloseDBs(t, db, sqldb)
|
||||
|
||||
if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
chksum0, _, err := db.CRC64()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Issue change that is applied to the WAL. Checksum should not change.
|
||||
if _, err := sqldb.Exec(`CREATE TABLE t (id INT);`); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if chksum1, _, err := db.CRC64(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if chksum0 == chksum1 {
|
||||
t.Fatal("expected different checksum event after WAL change")
|
||||
}
|
||||
|
||||
// Checkpoint change into database. Checksum should change.
|
||||
if _, err := sqldb.Exec(`PRAGMA wal_checkpoint(TRUNCATE);`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if chksum2, _, err := db.CRC64(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if chksum0 == chksum2 {
|
||||
t.Fatal("expected different checksums after checkpoint")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure we can sync the real WAL to the shadow WAL.
|
||||
func TestDB_Sync(t *testing.T) {
|
||||
// Ensure sync is skipped if no database exists.
|
||||
t.Run("NoDB", func(t *testing.T) {
|
||||
db := MustOpenDB(t)
|
||||
defer MustCloseDB(t, db)
|
||||
if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Ensure sync can successfully run on the initial sync.
|
||||
t.Run("Initial", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
defer MustCloseDBs(t, db, sqldb)
|
||||
|
||||
if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify page size if now available.
|
||||
if db.PageSize() == 0 {
|
||||
t.Fatal("expected page size after initial sync")
|
||||
}
|
||||
|
||||
// Obtain real WAL size.
|
||||
fi, err := os.Stat(db.WALPath())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Ensure position now available.
|
||||
if pos, err := db.Pos(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if pos.Generation == "" {
|
||||
t.Fatal("expected generation")
|
||||
} else if got, want := pos.Index, 0; got != want {
|
||||
t.Fatalf("pos.Index=%v, want %v", got, want)
|
||||
} else if got, want := pos.Offset, fi.Size(); got != want {
|
||||
t.Fatalf("pos.Offset=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
// Ensure DB can keep in sync across multiple Sync() invocations.
|
||||
t.Run("MultiSync", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
defer MustCloseDBs(t, db, sqldb)
|
||||
|
||||
// Execute a query to force a write to the WAL.
|
||||
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Perform initial sync & grab initial position.
|
||||
if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pos0, err := db.Pos()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Insert into table.
|
||||
if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz');`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Sync to ensure position moves forward one page.
|
||||
if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if pos1, err := db.Pos(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if pos0.Generation != pos1.Generation {
|
||||
t.Fatal("expected the same generation")
|
||||
} else if got, want := pos1.Index, pos0.Index; got != want {
|
||||
t.Fatalf("Index=%v, want %v", got, want)
|
||||
} else if got, want := pos1.Offset, pos0.Offset+4096+litestream.WALFrameHeaderSize; got != want {
|
||||
t.Fatalf("Offset=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
// Ensure a WAL file is created if one does not already exist.
|
||||
t.Run("NoWAL", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
defer MustCloseDBs(t, db, sqldb)
|
||||
|
||||
// Issue initial sync and truncate WAL.
|
||||
if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Obtain initial position.
|
||||
pos0, err := db.Pos()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Checkpoint & fully close which should close WAL file.
|
||||
if err := db.Checkpoint(litestream.CheckpointModeTruncate); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := db.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := sqldb.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify WAL does not exist.
|
||||
if _, err := os.Stat(db.WALPath()); !os.IsNotExist(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Reopen the managed database.
|
||||
db = MustOpenDBAt(t, db.Path())
|
||||
defer MustCloseDB(t, db)
|
||||
|
||||
// Re-sync and ensure new generation has been created.
|
||||
if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Obtain initial position.
|
||||
if pos1, err := db.Pos(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if pos0.Generation == pos1.Generation {
|
||||
t.Fatal("expected new generation after truncation")
|
||||
}
|
||||
})
|
||||
|
||||
// Ensure DB can start new generation if it detects it cannot verify last position.
|
||||
t.Run("OverwritePrevPosition", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
defer MustCloseDBs(t, db, sqldb)
|
||||
|
||||
// Execute a query to force a write to the WAL.
|
||||
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Issue initial sync and truncate WAL.
|
||||
if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Obtain initial position.
|
||||
pos0, err := db.Pos()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Fully close which should close WAL file.
|
||||
if err := db.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := sqldb.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify WAL does not exist.
|
||||
if _, err := os.Stat(db.WALPath()); !os.IsNotExist(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Insert into table multiple times to move past old offset
|
||||
sqldb = MustOpenSQLDB(t, db.Path())
|
||||
defer MustCloseSQLDB(t, sqldb)
|
||||
for i := 0; i < 100; i++ {
|
||||
if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz');`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Reopen the managed database.
|
||||
db = MustOpenDBAt(t, db.Path())
|
||||
defer MustCloseDB(t, db)
|
||||
|
||||
// Re-sync and ensure new generation has been created.
|
||||
if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Obtain initial position.
|
||||
if pos1, err := db.Pos(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if pos0.Generation == pos1.Generation {
|
||||
t.Fatal("expected new generation after truncation")
|
||||
}
|
||||
})
|
||||
|
||||
// Ensure DB can handle a mismatched header-only and start new generation.
|
||||
t.Run("WALHeaderMismatch", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
defer MustCloseDBs(t, db, sqldb)
|
||||
|
||||
// Execute a query to force a write to the WAL and then sync.
|
||||
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Grab initial position & close.
|
||||
pos0, err := db.Pos()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := db.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Read existing file, update header checksum, and write back only header
|
||||
// to simulate a header with a mismatched checksum.
|
||||
shadowWALPath := db.ShadowWALPath(pos0.Generation, pos0.Index)
|
||||
if buf, err := ioutil.ReadFile(shadowWALPath); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := ioutil.WriteFile(shadowWALPath, append(buf[:litestream.WALHeaderSize-8], 0, 0, 0, 0, 0, 0, 0, 0), 0600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Reopen managed database & ensure sync will still work.
|
||||
db = MustOpenDBAt(t, db.Path())
|
||||
defer MustCloseDB(t, db)
|
||||
if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify a new generation was started.
|
||||
if pos1, err := db.Pos(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if pos0.Generation == pos1.Generation {
|
||||
t.Fatal("expected new generation")
|
||||
}
|
||||
})
|
||||
|
||||
// Ensure DB can handle partial shadow WAL header write.
|
||||
t.Run("PartialShadowWALHeader", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
defer MustCloseDBs(t, db, sqldb)
|
||||
|
||||
// Execute a query to force a write to the WAL and then sync.
|
||||
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pos0, err := db.Pos()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Close & truncate shadow WAL to simulate a partial header write.
|
||||
if err := db.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := os.Truncate(db.ShadowWALPath(pos0.Generation, pos0.Index), litestream.WALHeaderSize-1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Reopen managed database & ensure sync will still work.
|
||||
db = MustOpenDBAt(t, db.Path())
|
||||
defer MustCloseDB(t, db)
|
||||
if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify a new generation was started.
|
||||
if pos1, err := db.Pos(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if pos0.Generation == pos1.Generation {
|
||||
t.Fatal("expected new generation")
|
||||
}
|
||||
})
|
||||
|
||||
// Ensure DB can handle partial shadow WAL writes.
|
||||
t.Run("PartialShadowWALFrame", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
defer MustCloseDBs(t, db, sqldb)
|
||||
|
||||
// Execute a query to force a write to the WAL and then sync.
|
||||
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pos0, err := db.Pos()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Obtain current shadow WAL size.
|
||||
fi, err := os.Stat(db.ShadowWALPath(pos0.Generation, pos0.Index))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Close & truncate shadow WAL to simulate a partial frame write.
|
||||
if err := db.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := os.Truncate(db.ShadowWALPath(pos0.Generation, pos0.Index), fi.Size()-1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Reopen managed database & ensure sync will still work.
|
||||
db = MustOpenDBAt(t, db.Path())
|
||||
defer MustCloseDB(t, db)
|
||||
if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify same generation is kept.
|
||||
if pos1, err := db.Pos(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got, want := pos1, pos0; got != want {
|
||||
t.Fatalf("Pos()=%s want %s", got, want)
|
||||
}
|
||||
|
||||
// Ensure shadow WAL has recovered.
|
||||
if fi0, err := os.Stat(db.ShadowWALPath(pos0.Generation, pos0.Index)); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got, want := fi0.Size(), fi.Size(); got != want {
|
||||
t.Fatalf("Size()=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
// Ensure DB can handle a generation directory with a missing shadow WAL.
|
||||
t.Run("NoShadowWAL", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
defer MustCloseDBs(t, db, sqldb)
|
||||
|
||||
// Execute a query to force a write to the WAL and then sync.
|
||||
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pos0, err := db.Pos()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Close & delete shadow WAL to simulate dir created but not WAL.
|
||||
if err := db.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := os.Remove(db.ShadowWALPath(pos0.Generation, pos0.Index)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Reopen managed database & ensure sync will still work.
|
||||
db = MustOpenDBAt(t, db.Path())
|
||||
defer MustCloseDB(t, db)
|
||||
if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify new generation created but index/offset the same.
|
||||
if pos1, err := db.Pos(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if pos0.Generation == pos1.Generation {
|
||||
t.Fatal("expected new generation")
|
||||
} else if got, want := pos1.Index, pos0.Index; got != want {
|
||||
t.Fatalf("Index=%v want %v", got, want)
|
||||
} else if got, want := pos1.Offset, pos0.Offset; got != want {
|
||||
t.Fatalf("Offset=%v want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
// Ensure DB checkpoints after minimum number of pages.
|
||||
t.Run("MinCheckpointPageN", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
defer MustCloseDBs(t, db, sqldb)
|
||||
|
||||
// Execute a query to force a write to the WAL and then sync.
|
||||
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Write at least minimum number of pages to trigger rollover.
|
||||
for i := 0; i < db.MinCheckpointPageN; i++ {
|
||||
if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz');`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync to shadow WAL.
|
||||
if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Ensure position is now on the second index.
|
||||
if pos, err := db.Pos(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got, want := pos.Index, 1; got != want {
|
||||
t.Fatalf("Index=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
// Ensure DB checkpoints after interval.
|
||||
t.Run("CheckpointInterval", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
defer MustCloseDBs(t, db, sqldb)
|
||||
|
||||
// Execute a query to force a write to the WAL and then sync.
|
||||
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Reduce checkpoint interval to ensure a rollover is triggered.
|
||||
db.CheckpointInterval = 1 * time.Nanosecond
|
||||
|
||||
// Write to WAL & sync.
|
||||
if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz');`); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Ensure position is now on the second index.
|
||||
if pos, err := db.Pos(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got, want := pos.Index, 1; got != want {
|
||||
t.Fatalf("Index=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// MustOpenDBs returns a new instance of a DB & associated SQL DB.
|
||||
func MustOpenDBs(tb testing.TB) (*litestream.DB, *sql.DB) {
|
||||
db := MustOpenDB(tb)
|
||||
return db, MustOpenSQLDB(tb, db.Path())
|
||||
}
|
||||
|
||||
// MustCloseDBs closes db & sqldb and removes the parent directory.
|
||||
func MustCloseDBs(tb testing.TB, db *litestream.DB, sqldb *sql.DB) {
|
||||
MustCloseDB(tb, db)
|
||||
MustCloseSQLDB(tb, sqldb)
|
||||
}
|
||||
|
||||
// MustOpenDB returns a new instance of a DB.
|
||||
func MustOpenDB(tb testing.TB) *litestream.DB {
|
||||
dir := tb.TempDir()
|
||||
return MustOpenDBAt(tb, filepath.Join(dir, "db"))
|
||||
}
|
||||
|
||||
// MustOpenDBAt returns a new instance of a DB for a given path.
|
||||
func MustOpenDBAt(tb testing.TB, path string) *litestream.DB {
|
||||
tb.Helper()
|
||||
db := litestream.NewDB(path)
|
||||
db.MonitorInterval = 0 // disable background goroutine
|
||||
if err := db.Open(); err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
return db
|
||||
}
|
||||
|
||||
// MustCloseDB closes db and removes its parent directory.
|
||||
func MustCloseDB(tb testing.TB, db *litestream.DB) {
|
||||
tb.Helper()
|
||||
if err := db.Close(); err != nil {
|
||||
tb.Fatal(err)
|
||||
} else if err := os.RemoveAll(filepath.Dir(db.Path())); err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// MustOpenSQLDB returns a database/sql DB.
|
||||
func MustOpenSQLDB(tb testing.TB, path string) *sql.DB {
|
||||
tb.Helper()
|
||||
d, err := sql.Open("sqlite3", path)
|
||||
if err != nil {
|
||||
tb.Fatal(err)
|
||||
} else if _, err := d.Exec(`PRAGMA journal_mode = wal;`); err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// MustCloseSQLDB closes a database/sql DB.
|
||||
func MustCloseSQLDB(tb testing.TB, d *sql.DB) {
|
||||
tb.Helper()
|
||||
if err := d.Close(); err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
DESIGN
|
||||
======
|
||||
|
||||
Litestream is a sidecar process that replicates the write ahead log (WAL) for
|
||||
a SQLite database. To ensure that it can replicate every page, litestream takes
|
||||
control over the checkpointing process by issuing a long running read
|
||||
transaction against the database to prevent checkpointing. It then releases
|
||||
this transaction once it obtains a write lock and issues the checkpoint itself.
|
||||
|
||||
The daemon polls the database on an interval to breifly obtain a write
|
||||
transaction lock and copy over new WAL pages. Once the WAL has reached a
|
||||
threshold size, litestream will issue a checkpoint and a single page write
|
||||
to a table called `_litestream` to start the new WAL.
|
||||
|
||||
|
||||
## Workflow
|
||||
|
||||
When litestream first loads a database, it checks if there is an existing
|
||||
sidecar directory which is named `.<DB>-litestream`. If not, it initializes
|
||||
the directory and starts a new generation.
|
||||
|
||||
A generation is a snapshot of the database followed by a continuous stream of
|
||||
WAL files. A new generation is started on initialization & whenever litestream
|
||||
cannot verify that it has a continuous record of WAL files. This could happen
|
||||
if litestream is stopped and another process checkpoints the WAL. In this case,
|
||||
a new generation ID is randomly created and a snapshot is replicated to the
|
||||
appropriate destinations.
|
||||
|
||||
Generations also prevent two servers from replicating to the same destination
|
||||
and corrupting each other's data. In this case, each server would replicate
|
||||
to a different generation directory. On recovery, there will be duplicate
|
||||
databases and the end user can choose which generation to recover but each
|
||||
database will be uncorrupted.
|
||||
|
||||
|
||||
## File Layout
|
||||
|
||||
Litestream maintains a shadow WAL which is a historical record of all previous
|
||||
WAL files. These files can be deleted after a time or size threshold but should
|
||||
be replicated before being deleted.
|
||||
|
||||
### Local
|
||||
|
||||
Given a database file named `db`, SQLite will create a WAL file called `db-wal`.
|
||||
Litestream will then create a hidden directory called `.db-litestream` that
|
||||
contains the historical record of all WAL files for the current generation.
|
||||
|
||||
```
|
||||
db # SQLite database
|
||||
db-wal # SQLite WAL
|
||||
.db-litestream/
|
||||
generation # current generation number
|
||||
generations/
|
||||
xxxxxxxx/
|
||||
wal/ # WAL files
|
||||
000000000000001.wal
|
||||
000000000000002.wal
|
||||
000000000000003.wal # active WAL
|
||||
```
|
||||
|
||||
### Remote (S3)
|
||||
|
||||
```
|
||||
bkt/
|
||||
db/ # database path
|
||||
generations/
|
||||
xxxxxxxx/
|
||||
snapshots/ # snapshots w/ timestamp+offset
|
||||
20000101T000000Z-000000000000023.snapshot
|
||||
wal/ # compressed WAL files
|
||||
000000000000001-0.wal.gz
|
||||
000000000000001-<offset>.wal.gz
|
||||
000000000000002-0.wal.gz
|
||||
00000002/
|
||||
snapshot/
|
||||
000000000000000.snapshot
|
||||
scheduled/
|
||||
daily/
|
||||
20000101T000000Z-000000000000023.snapshot
|
||||
20000102T000000Z-000000000000036.snapshot
|
||||
monthly/
|
||||
20000101T000000Z-000000000000023.snapshot
|
||||
|
||||
wal/
|
||||
000000000000001.wal.gz
|
||||
```
|
||||
|
||||
|
||||
15
etc/gon.hcl
Normal file
15
etc/gon.hcl
Normal file
@@ -0,0 +1,15 @@
|
||||
source = ["./dist/litestream"]
|
||||
bundle_id = "com.middlemost.litestream"
|
||||
|
||||
apple_id {
|
||||
username = "benbjohnson@yahoo.com"
|
||||
password = "@env:AC_PASSWORD"
|
||||
}
|
||||
|
||||
sign {
|
||||
application_identity = "Developer ID Application: Middlemost Systems, LLC"
|
||||
}
|
||||
|
||||
zip {
|
||||
output_path = "dist/litestream.zip"
|
||||
}
|
||||
9
etc/litestream.service
Normal file
9
etc/litestream.service
Normal file
@@ -0,0 +1,9 @@
|
||||
[Unit]
|
||||
Description=Litestream
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
ExecStart=/usr/bin/litestream replicate
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
10
etc/litestream.yml
Normal file
10
etc/litestream.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
# AWS credentials
|
||||
# access-key-id: AKIAxxxxxxxxxxxxxxxx
|
||||
# secret-access-key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/xxxxxxxxx
|
||||
|
||||
# dbs:
|
||||
# - path: /path/to/primary/db # Database to replicate from
|
||||
# replicas:
|
||||
# - path: /path/to/replica # File-based replication
|
||||
# - path: s3://my.bucket.com/db # S3-based replication
|
||||
|
||||
19
etc/nfpm.yml
Normal file
19
etc/nfpm.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
name: litestream
|
||||
arch: amd64
|
||||
platform: linux
|
||||
version: "${LITESTREAM_VERSION}"
|
||||
section: "default"
|
||||
priority: "extra"
|
||||
maintainer: "Ben Johnson <benbjohnson@yahoo.com>"
|
||||
description: Litestream is a tool for real-time replication of SQLite databases.
|
||||
homepage: "https://github.com/benbjohnson/litestream"
|
||||
license: "GPLv3"
|
||||
contents:
|
||||
- src: ./litestream
|
||||
dst: /usr/bin/litestream
|
||||
- src: ./litestream.yml
|
||||
dst: /etc/litestream.yml
|
||||
type: config
|
||||
- src: ./litestream.service
|
||||
dst: /usr/lib/systemd/system/litestream.service
|
||||
type: config
|
||||
5
go.mod
5
go.mod
@@ -3,7 +3,10 @@ module github.com/benbjohnson/litestream
|
||||
go 1.15
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go v1.27.0
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/mattn/go-sqlite3 v1.14.5
|
||||
github.com/pelletier/go-toml v1.8.1
|
||||
github.com/pierrec/lz4/v4 v4.1.3
|
||||
github.com/prometheus/client_golang v1.9.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
404
go.sum
404
go.sum
@@ -1,38 +1,410 @@
|
||||
github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
|
||||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.27.0 h1:0xphMHGMLBrPMfxR2AmVjZKcMEESEgWF8Kru94BNByk=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
|
||||
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-sqlite3 v1.14.5 h1:1IdxlwTNazvbKJQSxoJ5/9ECbEeaTTyeU7sEAZ5KKTQ=
|
||||
github.com/mattn/go-sqlite3 v1.14.5/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI=
|
||||
github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM=
|
||||
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
||||
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
|
||||
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
|
||||
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
|
||||
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
|
||||
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
|
||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A=
|
||||
github.com/pierrec/lz4/v4 v4.1.3 h1:/dvQpkb0o1pVlSgKNQqfkavlnXaIK+hJ0LXsKRUN9D4=
|
||||
github.com/pierrec/lz4/v4 v4.1.3/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s=
|
||||
github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU=
|
||||
github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM=
|
||||
github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
|
||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ=
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e h1:AyodaIpKjppX+cBfTASF2E1US3H2JFBj920Ot3rtDjs=
|
||||
golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200423201157-2723c5de0d66/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
|
||||
|
||||
32
internal/internal.go
Normal file
32
internal/internal.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// ReadCloser wraps a reader to also attach a separate closer.
|
||||
type ReadCloser struct {
|
||||
r io.Reader
|
||||
c io.Closer
|
||||
}
|
||||
|
||||
// NewReadCloser returns a new instance of ReadCloser.
|
||||
func NewReadCloser(r io.Reader, c io.Closer) *ReadCloser {
|
||||
return &ReadCloser{r, c}
|
||||
}
|
||||
|
||||
// Read reads bytes into the underlying reader.
|
||||
func (r *ReadCloser) Read(p []byte) (n int, err error) {
|
||||
return r.r.Read(p)
|
||||
}
|
||||
|
||||
// Close closes the reader (if implementing io.ReadCloser) and the Closer.
|
||||
func (r *ReadCloser) Close() error {
|
||||
if rc, ok := r.r.(io.Closer); ok {
|
||||
if err := rc.Close(); err != nil {
|
||||
r.c.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
return r.c.Close()
|
||||
}
|
||||
44
internal/metrics.go
Normal file
44
internal/metrics.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
// Shared replica metrics.
|
||||
var (
|
||||
ReplicaSnapshotTotalGaugeVec = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: "litestream",
|
||||
Subsystem: "replica",
|
||||
Name: "snapshot_total",
|
||||
Help: "The current number of snapshots",
|
||||
}, []string{"db", "name"})
|
||||
|
||||
ReplicaWALBytesCounterVec = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "litestream",
|
||||
Subsystem: "replica",
|
||||
Name: "wal_bytes",
|
||||
Help: "The number wal bytes written",
|
||||
}, []string{"db", "name"})
|
||||
|
||||
ReplicaWALIndexGaugeVec = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: "litestream",
|
||||
Subsystem: "replica",
|
||||
Name: "wal_index",
|
||||
Help: "The current WAL index",
|
||||
}, []string{"db", "name"})
|
||||
|
||||
ReplicaWALOffsetGaugeVec = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: "litestream",
|
||||
Subsystem: "replica",
|
||||
Name: "wal_offset",
|
||||
Help: "The current WAL offset",
|
||||
}, []string{"db", "name"})
|
||||
|
||||
ReplicaValidationTotalCounterVec = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "litestream",
|
||||
Subsystem: "replica",
|
||||
Name: "validation_total",
|
||||
Help: "The number of validations performed",
|
||||
}, []string{"db", "name", "status"})
|
||||
)
|
||||
206
litestream.go
206
litestream.go
@@ -1,10 +1,8 @@
|
||||
package litestream
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"database/sql"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -13,10 +11,11 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Naming constants.
|
||||
const (
|
||||
MetaDirSuffix = "-litestream"
|
||||
|
||||
@@ -27,11 +26,65 @@ const (
|
||||
GenerationNameLen = 16
|
||||
)
|
||||
|
||||
// SQLite checkpoint modes.
|
||||
const (
|
||||
CheckpointModePassive = "PASSIVE"
|
||||
CheckpointModeFull = "FULL"
|
||||
CheckpointModeRestart = "RESTART"
|
||||
CheckpointModeTruncate = "TRUNCATE"
|
||||
)
|
||||
|
||||
// Litestream errors.
|
||||
var (
|
||||
ErrNoSnapshots = errors.New("no snapshots available")
|
||||
ErrNoSnapshots = errors.New("no snapshots available")
|
||||
ErrChecksumMismatch = errors.New("invalid replica, checksum mismatch")
|
||||
)
|
||||
|
||||
// SnapshotInfo represents file information about a snapshot.
|
||||
type SnapshotInfo struct {
|
||||
Name string
|
||||
Replica string
|
||||
Generation string
|
||||
Index int
|
||||
Size int64
|
||||
CreatedAt time.Time
|
||||
}
|
||||
|
||||
// FilterSnapshotsAfter returns all snapshots that were created on or after t.
|
||||
func FilterSnapshotsAfter(a []*SnapshotInfo, t time.Time) []*SnapshotInfo {
|
||||
other := make([]*SnapshotInfo, 0, len(a))
|
||||
for _, snapshot := range a {
|
||||
if !snapshot.CreatedAt.Before(t) {
|
||||
other = append(other, snapshot)
|
||||
}
|
||||
}
|
||||
return other
|
||||
}
|
||||
|
||||
// FindMinSnapshotByGeneration finds the snapshot with the lowest index in a generation.
|
||||
func FindMinSnapshotByGeneration(a []*SnapshotInfo, generation string) *SnapshotInfo {
|
||||
var min *SnapshotInfo
|
||||
for _, snapshot := range a {
|
||||
if snapshot.Generation != generation {
|
||||
continue
|
||||
} else if min == nil || snapshot.Index < min.Index {
|
||||
min = snapshot
|
||||
}
|
||||
}
|
||||
return min
|
||||
}
|
||||
|
||||
// WALInfo represents file information about a WAL file.
|
||||
type WALInfo struct {
|
||||
Name string
|
||||
Replica string
|
||||
Generation string
|
||||
Index int
|
||||
Offset int64
|
||||
Size int64
|
||||
CreatedAt time.Time
|
||||
}
|
||||
|
||||
// Pos is a position in the WAL for a generation.
|
||||
type Pos struct {
|
||||
Generation string // generation name
|
||||
@@ -44,7 +97,7 @@ func (p Pos) String() string {
|
||||
if p.IsZero() {
|
||||
return "<>"
|
||||
}
|
||||
return fmt.Sprintf("<%s,%d,%d>", p.Generation, p.Index, p.Offset)
|
||||
return fmt.Sprintf("<%s,%08x,%d>", p.Generation, p.Index, p.Offset)
|
||||
}
|
||||
|
||||
// IsZero returns true if p is the zero value.
|
||||
@@ -98,10 +151,6 @@ func readWALHeader(filename string) ([]byte, error) {
|
||||
return buf[:n], err
|
||||
}
|
||||
|
||||
func readCheckpointSeqNo(hdr []byte) uint32 {
|
||||
return binary.BigEndian.Uint32(hdr[12:])
|
||||
}
|
||||
|
||||
// readFileAt reads a slice from a file.
|
||||
func readFileAt(filename string, offset, n int64) ([]byte, error) {
|
||||
f, err := os.Open(filename)
|
||||
@@ -119,19 +168,6 @@ func readFileAt(filename string, offset, n int64) ([]byte, error) {
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func ParseWALFilename(name string) (index int, err error) {
|
||||
v, err := strconv.ParseInt(strings.TrimSuffix(name, WALExt), 16, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid wal filename: %q", name)
|
||||
}
|
||||
return int(v), nil
|
||||
}
|
||||
|
||||
func FormatWALFilename(index int) string {
|
||||
assert(index >= 0, "wal index must be non-negative")
|
||||
return fmt.Sprintf("%016x%s", index, WALExt)
|
||||
}
|
||||
|
||||
// removeTmpFiles recursively finds and removes .tmp files.
|
||||
func removeTmpFiles(root string) error {
|
||||
return filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
|
||||
@@ -166,17 +202,19 @@ func IsSnapshotPath(s string) bool {
|
||||
|
||||
// ParseSnapshotPath returns the index for the snapshot.
|
||||
// Returns an error if the path is not a valid snapshot path.
|
||||
func ParseSnapshotPath(s string) (index int, typ, ext string, err error) {
|
||||
func ParseSnapshotPath(s string) (index int, ext string, err error) {
|
||||
s = filepath.Base(s)
|
||||
|
||||
a := snapshotPathRegex.FindStringSubmatch(s)
|
||||
if a == nil {
|
||||
return 0, "", "", fmt.Errorf("invalid snapshot path: %s", s)
|
||||
return 0, "", fmt.Errorf("invalid snapshot path: %s", s)
|
||||
}
|
||||
|
||||
i64, _ := strconv.ParseUint(a[1], 16, 64)
|
||||
return int(i64), a[2], a[3], nil
|
||||
return int(i64), a[2], nil
|
||||
}
|
||||
|
||||
var snapshotPathRegex = regexp.MustCompile(`^([0-9a-f]{16})(?:-(\w+))?(.snapshot(?:.gz)?)$`)
|
||||
var snapshotPathRegex = regexp.MustCompile(`^([0-9a-f]{8})(.snapshot(?:.lz4)?)$`)
|
||||
|
||||
// IsWALPath returns true if s is a path to a WAL file.
|
||||
func IsWALPath(s string) bool {
|
||||
@@ -186,6 +224,8 @@ func IsWALPath(s string) bool {
|
||||
// ParseWALPath returns the index & offset for the WAL file.
|
||||
// Returns an error if the path is not a valid snapshot path.
|
||||
func ParseWALPath(s string) (index int, offset int64, ext string, err error) {
|
||||
s = filepath.Base(s)
|
||||
|
||||
a := walPathRegex.FindStringSubmatch(s)
|
||||
if a == nil {
|
||||
return 0, 0, "", fmt.Errorf("invalid wal path: %s", s)
|
||||
@@ -196,65 +236,85 @@ func ParseWALPath(s string) (index int, offset int64, ext string, err error) {
|
||||
return int(i64), int64(off64), a[3], nil
|
||||
}
|
||||
|
||||
var walPathRegex = regexp.MustCompile(`^([0-9a-f]{16})(?:_([0-9a-f]{16}))?(.wal(?:.gz)?)$`)
|
||||
// FormatWALPath formats a WAL filename with a given index.
|
||||
func FormatWALPath(index int) string {
|
||||
assert(index >= 0, "wal index must be non-negative")
|
||||
return fmt.Sprintf("%08x%s", index, WALExt)
|
||||
}
|
||||
|
||||
// FormatWALPathWithOffset formats a WAL filename with a given index & offset.
|
||||
func FormatWALPathWithOffset(index int, offset int64) string {
|
||||
assert(index >= 0, "wal index must be non-negative")
|
||||
assert(offset >= 0, "wal offset must be non-negative")
|
||||
return fmt.Sprintf("%08x_%08x%s", index, offset, WALExt)
|
||||
}
|
||||
|
||||
var walPathRegex = regexp.MustCompile(`^([0-9a-f]{8})(?:_([0-9a-f]{8}))?(.wal(?:.lz4)?)$`)
|
||||
|
||||
// isHexChar returns true if ch is a lowercase hex character.
|
||||
func isHexChar(ch rune) bool {
|
||||
return (ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'f')
|
||||
}
|
||||
|
||||
// gzipReadCloser wraps gzip.Reader to also close the underlying reader on close.
|
||||
type gzipReadCloser struct {
|
||||
r *gzip.Reader
|
||||
closer io.ReadCloser
|
||||
// createFile creates the file and attempts to set the UID/GID.
|
||||
func createFile(filename string, perm os.FileMode, uid, gid int) (*os.File, error) {
|
||||
f, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, perm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_ = f.Chown(uid, gid)
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (r *gzipReadCloser) Read(p []byte) (n int, err error) {
|
||||
return r.r.Read(p)
|
||||
}
|
||||
// mkdirAll is a copy of os.MkdirAll() except that it attempts to set the
|
||||
// uid/gid for each created directory.
|
||||
func mkdirAll(path string, perm os.FileMode, uid, gid int) error {
|
||||
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
|
||||
dir, err := os.Stat(path)
|
||||
if err == nil {
|
||||
if dir.IsDir() {
|
||||
return nil
|
||||
}
|
||||
return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
|
||||
}
|
||||
|
||||
func (r *gzipReadCloser) Close() error {
|
||||
if err := r.r.Close(); err != nil {
|
||||
r.closer.Close()
|
||||
// Slow path: make sure parent exists and then call Mkdir for path.
|
||||
i := len(path)
|
||||
for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
|
||||
i--
|
||||
}
|
||||
|
||||
j := i
|
||||
for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
|
||||
j--
|
||||
}
|
||||
|
||||
if j > 1 {
|
||||
// Create parent.
|
||||
err = mkdirAll(fixRootDirectory(path[:j-1]), perm, uid, gid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Parent now exists; invoke Mkdir and use its result.
|
||||
err = os.Mkdir(path, perm)
|
||||
if err != nil {
|
||||
// Handle arguments like "foo/." by
|
||||
// double-checking that directory doesn't exist.
|
||||
dir, err1 := os.Lstat(path)
|
||||
if err1 == nil && dir.IsDir() {
|
||||
_ = os.Chown(path, uid, gid)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
return r.closer.Close()
|
||||
_ = os.Chown(path, uid, gid)
|
||||
return nil
|
||||
}
|
||||
|
||||
// HexDump returns hexdump output but with duplicate lines removed.
|
||||
func HexDump(b []byte) string {
|
||||
const prefixN = len("00000000")
|
||||
|
||||
var output []string
|
||||
var prev string
|
||||
var ellipsis bool
|
||||
|
||||
lines := strings.Split(strings.TrimSpace(hex.Dump(b)), "\n")
|
||||
for i, line := range lines {
|
||||
// Add line to output if it is not repeating or the last line.
|
||||
if i == 0 || i == len(lines)-1 || trimPrefixN(line, prefixN) != trimPrefixN(prev, prefixN) {
|
||||
output = append(output, line)
|
||||
prev, ellipsis = line, false
|
||||
continue
|
||||
}
|
||||
|
||||
// Add an ellipsis for the first duplicate line.
|
||||
if !ellipsis {
|
||||
output = append(output, "...")
|
||||
ellipsis = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(output, "\n")
|
||||
}
|
||||
|
||||
func trimPrefixN(s string, n int) string {
|
||||
if len(s) < n {
|
||||
return ""
|
||||
}
|
||||
return s[n:]
|
||||
}
|
||||
// Tracef is used for low-level tracing.
|
||||
var Tracef = func(format string, a ...interface{}) {}
|
||||
|
||||
func assert(condition bool, message string) {
|
||||
if !condition {
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/benbjohnson/litestream"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
func TestChecksum(t *testing.T) {
|
||||
|
||||
18
litestream_unix.go
Normal file
18
litestream_unix.go
Normal file
@@ -0,0 +1,18 @@
|
||||
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
package litestream
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// fileinfo returns syscall fields from a FileInfo object.
|
||||
func fileinfo(fi os.FileInfo) (uid, gid int) {
|
||||
stat := fi.Sys().(*syscall.Stat_t)
|
||||
return int(stat.Uid), int(stat.Gid)
|
||||
}
|
||||
|
||||
func fixRootDirectory(p string) string {
|
||||
return p
|
||||
}
|
||||
23
litestream_windows.go
Normal file
23
litestream_windows.go
Normal file
@@ -0,0 +1,23 @@
|
||||
// +build windows
|
||||
|
||||
package litestream
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// fileinfo returns syscall fields from a FileInfo object.
|
||||
func fileinfo(fi os.FileInfo) (uid, gid int) {
|
||||
return -1, -1
|
||||
}
|
||||
|
||||
// fixRootDirectory is copied from the standard library for use with mkdirAll()
|
||||
func fixRootDirectory(p string) string {
|
||||
if len(p) == len(`\\?\c:`) {
|
||||
if IsPathSeparator(p[0]) && IsPathSeparator(p[1]) && p[2] == '?' && IsPathSeparator(p[3]) && p[5] == ':' {
|
||||
return p + `\`
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
1093
replica.go
Normal file
1093
replica.go
Normal file
File diff suppressed because it is too large
Load Diff
90
replica_test.go
Normal file
90
replica_test.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package litestream_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/benbjohnson/litestream"
|
||||
)
|
||||
|
||||
func TestFileReplica_Sync(t *testing.T) {
|
||||
// Ensure replica can successfully sync after DB has sync'd.
|
||||
t.Run("InitialSync", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
defer MustCloseDBs(t, db, sqldb)
|
||||
r := NewTestFileReplica(t, db)
|
||||
|
||||
// Sync database & then sync replica.
|
||||
if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := r.Sync(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Ensure posistions match.
|
||||
if pos, err := db.Pos(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got, want := r.LastPos(), pos; got != want {
|
||||
t.Fatalf("LastPos()=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
// Ensure replica can successfully sync multiple times.
|
||||
t.Run("MultiSync", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
defer MustCloseDBs(t, db, sqldb)
|
||||
r := NewTestFileReplica(t, db)
|
||||
|
||||
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Write to the database multiple times and sync after each write.
|
||||
for i, n := 0, db.MinCheckpointPageN*2; i < n; i++ {
|
||||
if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz')`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Sync periodically.
|
||||
if i%100 == 0 || i == n-1 {
|
||||
if err := db.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := r.Sync(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure posistions match.
|
||||
if pos, err := db.Pos(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got, want := pos.Index, 2; got != want {
|
||||
t.Fatalf("Index=%v, want %v", got, want)
|
||||
} else if calcPos, err := r.CalcPos(context.Background(), pos.Generation); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got, want := calcPos, pos; got != want {
|
||||
t.Fatalf("CalcPos()=%v, want %v", got, want)
|
||||
} else if got, want := r.LastPos(), pos; got != want {
|
||||
t.Fatalf("LastPos()=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
// Ensure replica returns an error if there is no generation available from the DB.
|
||||
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
defer MustCloseDBs(t, db, sqldb)
|
||||
r := NewTestFileReplica(t, db)
|
||||
|
||||
if err := r.Sync(context.Background()); err == nil || err.Error() != `no generation, waiting for data` {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// NewTestFileReplica returns a new replica using a temp directory & with monitoring disabled.
|
||||
func NewTestFileReplica(tb testing.TB, db *litestream.DB) *litestream.FileReplica {
|
||||
r := litestream.NewFileReplica(db, "", tb.TempDir())
|
||||
r.MonitorEnabled = false
|
||||
db.Replicas = []litestream.Replica{r}
|
||||
return r
|
||||
}
|
||||
640
replicator.go
640
replicator.go
@@ -1,640 +0,0 @@
|
||||
package litestream
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Replica represents a remote destination to replicate the database & WAL.
|
||||
type Replica interface {
|
||||
// The name of the replica. Defaults to type if no name specified.
|
||||
Name() string
|
||||
|
||||
// String identifier for the type of replica ("file", "s3", etc).
|
||||
Type() string
|
||||
|
||||
// Starts replicating in a background goroutine.
|
||||
Start(ctx context.Context)
|
||||
|
||||
// Stops all replication processing. Blocks until processing stopped.
|
||||
Stop()
|
||||
|
||||
// Returns a list of generation names for the replica.
|
||||
Generations(ctx context.Context) ([]string, error)
|
||||
|
||||
// Returns basic information about a generation including the number of
|
||||
// snapshot & WAL files as well as the time range covered.
|
||||
GenerationStats(ctx context.Context, generation string) (GenerationStats, error)
|
||||
|
||||
// Returns the highest index for a snapshot within a generation that occurs
|
||||
// before timestamp. If timestamp is zero, returns the latest snapshot.
|
||||
SnapshotIndexAt(ctx context.Context, generation string, timestamp time.Time) (int, error)
|
||||
|
||||
// Returns the highest index for a WAL file that occurs before timestamp.
|
||||
// If timestamp is zero, returns the highest WAL index.
|
||||
WALIndexAt(ctx context.Context, generation string, timestamp time.Time) (int, error)
|
||||
|
||||
// Returns a reader for snapshot data at the given generation/index.
|
||||
SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error)
|
||||
|
||||
// Returns a reader for WAL data at the given position.
|
||||
WALReader(ctx context.Context, generation string, index int) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
var _ Replica = (*FileReplica)(nil)
|
||||
|
||||
// FileReplica is a replica that replicates a DB to a local file path.
|
||||
type FileReplica struct {
|
||||
db *DB // source database
|
||||
name string // replica name, optional
|
||||
dst string // destination path
|
||||
|
||||
// mu sync.RWMutex
|
||||
wg sync.WaitGroup
|
||||
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
}
|
||||
|
||||
// NewFileReplica returns a new instance of FileReplica.
|
||||
func NewFileReplica(db *DB, name, dst string) *FileReplica {
|
||||
return &FileReplica{
|
||||
db: db,
|
||||
name: name,
|
||||
dst: dst,
|
||||
cancel: func() {},
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the replica. Returns the type if no name set.
|
||||
func (r *FileReplica) Name() string {
|
||||
if r.name != "" {
|
||||
return r.name
|
||||
}
|
||||
return r.Type()
|
||||
}
|
||||
|
||||
// Type returns the type of replica.
|
||||
func (r *FileReplica) Type() string {
|
||||
return "file"
|
||||
}
|
||||
|
||||
// SnapshotDir returns the path to a generation's snapshot directory.
|
||||
func (r *FileReplica) SnapshotDir(generation string) string {
|
||||
return filepath.Join(r.dst, "generations", generation, "snapshots")
|
||||
}
|
||||
|
||||
// SnapshotPath returns the path to a snapshot file.
|
||||
func (r *FileReplica) SnapshotPath(generation string, index int) string {
|
||||
return filepath.Join(r.SnapshotDir(generation), fmt.Sprintf("%016x.snapshot.gz", index))
|
||||
}
|
||||
|
||||
// WALDir returns the path to a generation's WAL directory
|
||||
func (r *FileReplica) WALDir(generation string) string {
|
||||
return filepath.Join(r.dst, "generations", generation, "wal")
|
||||
}
|
||||
|
||||
// WALPath returns the path to a WAL file.
|
||||
func (r *FileReplica) WALPath(generation string, index int) string {
|
||||
return filepath.Join(r.WALDir(generation), fmt.Sprintf("%016x.wal", index))
|
||||
}
|
||||
|
||||
// Generations returns a list of available generation names.
|
||||
func (r *FileReplica) Generations(ctx context.Context) ([]string, error) {
|
||||
fis, err := ioutil.ReadDir(filepath.Join(r.dst, "generations"))
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var generations []string
|
||||
for _, fi := range fis {
|
||||
if !IsGenerationName(fi.Name()) {
|
||||
continue
|
||||
} else if !fi.IsDir() {
|
||||
continue
|
||||
}
|
||||
generations = append(generations, fi.Name())
|
||||
}
|
||||
return generations, nil
|
||||
}
|
||||
|
||||
// GenerationStats returns stats for a generation.
|
||||
func (r *FileReplica) GenerationStats(ctx context.Context, generation string) (stats GenerationStats, err error) {
|
||||
// Determine stats for all snapshots.
|
||||
n, min, max, err := r.snapshotStats(generation)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
stats.SnapshotN = n
|
||||
stats.CreatedAt, stats.UpdatedAt = min, max
|
||||
|
||||
// Update stats if we have WAL files.
|
||||
n, min, max, err = r.walStats(generation)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
} else if n == 0 {
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
stats.WALN = n
|
||||
if stats.CreatedAt.IsZero() || min.Before(stats.CreatedAt) {
|
||||
stats.CreatedAt = min
|
||||
}
|
||||
if stats.UpdatedAt.IsZero() || max.After(stats.UpdatedAt) {
|
||||
stats.UpdatedAt = max
|
||||
}
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func (r *FileReplica) snapshotStats(generation string) (n int, min, max time.Time, err error) {
|
||||
fis, err := ioutil.ReadDir(r.SnapshotDir(generation))
|
||||
if os.IsNotExist(err) {
|
||||
return n, min, max, nil
|
||||
} else if err != nil {
|
||||
return n, min, max, err
|
||||
}
|
||||
|
||||
for _, fi := range fis {
|
||||
if !IsSnapshotPath(fi.Name()) {
|
||||
continue
|
||||
}
|
||||
modTime := fi.ModTime().UTC()
|
||||
|
||||
n++
|
||||
if min.IsZero() || modTime.Before(min) {
|
||||
min = modTime
|
||||
}
|
||||
if max.IsZero() || modTime.After(max) {
|
||||
max = modTime
|
||||
}
|
||||
}
|
||||
return n, min, max, nil
|
||||
}
|
||||
|
||||
func (r *FileReplica) walStats(generation string) (n int, min, max time.Time, err error) {
|
||||
fis, err := ioutil.ReadDir(r.WALDir(generation))
|
||||
if os.IsNotExist(err) {
|
||||
return n, min, max, nil
|
||||
} else if err != nil {
|
||||
return n, min, max, err
|
||||
}
|
||||
|
||||
for _, fi := range fis {
|
||||
if !IsWALPath(fi.Name()) {
|
||||
continue
|
||||
}
|
||||
modTime := fi.ModTime().UTC()
|
||||
|
||||
n++
|
||||
if min.IsZero() || modTime.Before(min) {
|
||||
min = modTime
|
||||
}
|
||||
if max.IsZero() || modTime.After(max) {
|
||||
max = modTime
|
||||
}
|
||||
}
|
||||
return n, min, max, nil
|
||||
}
|
||||
|
||||
type GenerationStats struct {
|
||||
SnapshotN int
|
||||
WALN int
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
}
|
||||
|
||||
// Start starts replication for a given generation.
|
||||
func (r *FileReplica) Start(ctx context.Context) {
|
||||
// Stop previous replication.
|
||||
r.Stop()
|
||||
|
||||
// Wrap context with cancelation.
|
||||
ctx, r.cancel = context.WithCancel(ctx)
|
||||
|
||||
// Start goroutine to replicate data.
|
||||
r.wg.Add(1)
|
||||
go func() { defer r.wg.Done(); r.monitor(ctx) }()
|
||||
}
|
||||
|
||||
// Stop cancels any outstanding replication and blocks until finished.
|
||||
func (r *FileReplica) Stop() {
|
||||
r.cancel()
|
||||
r.wg.Wait()
|
||||
}
|
||||
|
||||
// monitor runs in a separate goroutine and continuously replicates the DB.
|
||||
func (r *FileReplica) monitor(ctx context.Context) {
|
||||
// Clear old temporary files that my have been left from a crash.
|
||||
if err := removeTmpFiles(r.dst); err != nil {
|
||||
log.Printf("%s(%s): cannot remove tmp files: %s", r.db.Path(), r.Name(), err)
|
||||
}
|
||||
|
||||
// Continuously check for new data to replicate.
|
||||
ch := make(chan struct{})
|
||||
close(ch)
|
||||
var notify <-chan struct{} = ch
|
||||
|
||||
var pos Pos
|
||||
var err error
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-notify:
|
||||
}
|
||||
|
||||
// Fetch new notify channel before replicating data.
|
||||
notify = r.db.Notify()
|
||||
|
||||
// Determine position, if necessary.
|
||||
if pos.IsZero() {
|
||||
if pos, err = r.pos(); err != nil {
|
||||
log.Printf("%s(%s): cannot determine position: %s", r.db.Path(), r.Name(), err)
|
||||
continue
|
||||
} else if pos.IsZero() {
|
||||
log.Printf("%s(%s): no generation, waiting for data", r.db.Path(), r.Name())
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// If we have no replicated WALs, start from last index in shadow WAL.
|
||||
if pos.Index == 0 && pos.Offset == 0 {
|
||||
if pos.Index, err = r.db.CurrentShadowWALIndex(pos.Generation); err != nil {
|
||||
log.Printf("%s(%s): cannot determine latest shadow wal index: %s", r.db.Path(), r.Name(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Synchronize the shadow wal into the replication directory.
|
||||
if pos, err = r.sync(ctx, pos); err != nil {
|
||||
log.Printf("%s(%s): sync error: %s", r.db.Path(), r.Name(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Gzip any old WAL files.
|
||||
if pos.Generation != "" {
|
||||
if err := r.compress(ctx, pos.Generation); err != nil {
|
||||
log.Printf("%s(%s): compress error: %s", r.db.Path(), r.Name(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// pos returns the position for the replica for the current generation.
|
||||
// Returns a zero value if there is no active generation.
|
||||
func (r *FileReplica) pos() (pos Pos, err error) {
|
||||
// Find the current generation from the DB. Return zero pos if no generation.
|
||||
generation, err := r.db.CurrentGeneration()
|
||||
if err != nil {
|
||||
return pos, err
|
||||
} else if generation == "" {
|
||||
return pos, nil // empty position
|
||||
}
|
||||
pos.Generation = generation
|
||||
|
||||
// Find the max WAL file.
|
||||
dir := r.WALDir(generation)
|
||||
fis, err := ioutil.ReadDir(dir)
|
||||
if os.IsNotExist(err) {
|
||||
return pos, nil // no replicated wal, start at beginning of generation
|
||||
} else if err != nil {
|
||||
return pos, err
|
||||
}
|
||||
|
||||
index := -1
|
||||
for _, fi := range fis {
|
||||
name := fi.Name()
|
||||
name = strings.TrimSuffix(name, ".gz")
|
||||
|
||||
if !strings.HasSuffix(name, WALExt) {
|
||||
continue
|
||||
}
|
||||
|
||||
if v, err := ParseWALFilename(filepath.Base(name)); err != nil {
|
||||
continue // invalid wal filename
|
||||
} else if index == -1 || v > index {
|
||||
index = v
|
||||
}
|
||||
}
|
||||
if index == -1 {
|
||||
return pos, nil // wal directory exists but no wal files, return beginning pos
|
||||
}
|
||||
pos.Index = index
|
||||
|
||||
// Determine current offset.
|
||||
fi, err := os.Stat(filepath.Join(dir, FormatWALFilename(pos.Index)))
|
||||
if err != nil {
|
||||
return pos, err
|
||||
}
|
||||
pos.Offset = fi.Size()
|
||||
|
||||
return pos, nil
|
||||
}
|
||||
|
||||
// snapshot copies the entire database to the replica path.
|
||||
func (r *FileReplica) snapshot(ctx context.Context, generation string, index int) error {
|
||||
// Acquire a read lock on the database during snapshot to prevent checkpoints.
|
||||
tx, err := r.db.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
} else if _, err := tx.ExecContext(ctx, `SELECT COUNT(1) FROM _litestream_seq;`); err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// Ignore if we already have a snapshot for the given WAL index.
|
||||
snapshotPath := r.SnapshotPath(generation, index)
|
||||
if _, err := os.Stat(snapshotPath); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(snapshotPath), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return compressFile(r.db.Path(), snapshotPath)
|
||||
}
|
||||
|
||||
// snapshotN returns the number of snapshots for a generation.
|
||||
func (r *FileReplica) snapshotN(generation string) (int, error) {
|
||||
fis, err := ioutil.ReadDir(r.SnapshotDir(generation))
|
||||
if os.IsNotExist(err) {
|
||||
return 0, nil
|
||||
} else if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var n int
|
||||
for _, fi := range fis {
|
||||
if _, _, _, err := ParseSnapshotPath(fi.Name()); err == nil {
|
||||
n++
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (r *FileReplica) sync(ctx context.Context, pos Pos) (_ Pos, err error) {
|
||||
// Read all WAL files since the last position.
|
||||
for {
|
||||
if pos, err = r.syncNext(ctx, pos); err == io.EOF {
|
||||
return pos, nil
|
||||
} else if err != nil {
|
||||
return pos, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *FileReplica) syncNext(ctx context.Context, pos Pos) (_ Pos, err error) {
|
||||
rd, err := r.db.ShadowWALReader(pos)
|
||||
if err == io.EOF {
|
||||
return pos, err
|
||||
} else if err != nil {
|
||||
return pos, fmt.Errorf("wal reader: %w", err)
|
||||
}
|
||||
defer rd.Close()
|
||||
|
||||
// Create snapshot if no snapshots exist.
|
||||
if n, err := r.snapshotN(rd.Pos().Generation); err != nil {
|
||||
return pos, err
|
||||
} else if n == 0 {
|
||||
if err := r.snapshot(ctx, rd.Pos().Generation, rd.Pos().Index); err != nil {
|
||||
return pos, err
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure parent directory exists for WAL file.
|
||||
filename := r.WALPath(rd.Pos().Generation, rd.Pos().Index)
|
||||
if err := os.MkdirAll(filepath.Dir(filename), 0700); err != nil {
|
||||
return pos, err
|
||||
}
|
||||
|
||||
// Create a temporary file to write into so we don't have partial writes.
|
||||
w, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return pos, err
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
// Seek, copy & sync WAL contents.
|
||||
if _, err := w.Seek(rd.Pos().Offset, io.SeekStart); err != nil {
|
||||
return pos, err
|
||||
} else if _, err := io.Copy(w, rd); err != nil {
|
||||
return pos, err
|
||||
} else if err := w.Sync(); err != nil {
|
||||
return pos, err
|
||||
} else if err := w.Close(); err != nil {
|
||||
return pos, err
|
||||
}
|
||||
|
||||
// Return ending position of the reader.
|
||||
return rd.Pos(), nil
|
||||
}
|
||||
|
||||
// compress gzips all WAL files before the current one.
|
||||
func (r *FileReplica) compress(ctx context.Context, generation string) error {
|
||||
dir := r.WALDir(generation)
|
||||
filenames, err := filepath.Glob(filepath.Join(dir, "*.wal"))
|
||||
if err != nil {
|
||||
return err
|
||||
} else if len(filenames) <= 1 {
|
||||
return nil // no uncompressed wal files or only one active file
|
||||
}
|
||||
|
||||
// Ensure filenames are sorted & remove the last (active) WAL.
|
||||
sort.Strings(filenames)
|
||||
filenames = filenames[:len(filenames)-1]
|
||||
|
||||
// Compress each file from oldest to newest.
|
||||
for _, filename := range filenames {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return err
|
||||
default:
|
||||
}
|
||||
|
||||
dst := filename + ".gz"
|
||||
if err := compressFile(filename, dst); err != nil {
|
||||
return err
|
||||
} else if err := os.Remove(filename); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SnapsotIndexAt returns the highest index for a snapshot within a generation
|
||||
// that occurs before timestamp. If timestamp is zero, returns the latest snapshot.
|
||||
func (r *FileReplica) SnapshotIndexAt(ctx context.Context, generation string, timestamp time.Time) (int, error) {
|
||||
fis, err := ioutil.ReadDir(r.SnapshotDir(generation))
|
||||
if os.IsNotExist(err) {
|
||||
return 0, ErrNoSnapshots
|
||||
} else if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
index := -1
|
||||
var max time.Time
|
||||
for _, fi := range fis {
|
||||
// Read index from snapshot filename.
|
||||
idx, _, _, err := ParseSnapshotPath(fi.Name())
|
||||
if err != nil {
|
||||
continue // not a snapshot, skip
|
||||
} else if !timestamp.IsZero() && fi.ModTime().After(timestamp) {
|
||||
continue // after timestamp, skip
|
||||
}
|
||||
|
||||
// Use snapshot if it newer.
|
||||
if max.IsZero() || fi.ModTime().After(max) {
|
||||
index, max = idx, fi.ModTime()
|
||||
}
|
||||
}
|
||||
|
||||
if index == -1 {
|
||||
return 0, ErrNoSnapshots
|
||||
}
|
||||
return index, nil
|
||||
}
|
||||
|
||||
// Returns the highest index for a WAL file that occurs before timestamp.
|
||||
// If timestamp is zero, returns the highest WAL index.
|
||||
func (r *FileReplica) WALIndexAt(ctx context.Context, generation string, timestamp time.Time) (int, error) {
|
||||
fis, err := ioutil.ReadDir(r.WALDir(generation))
|
||||
if os.IsNotExist(err) {
|
||||
return 0, nil
|
||||
} else if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
index := -1
|
||||
for _, fi := range fis {
|
||||
// Read index from snapshot filename.
|
||||
idx, _, _, err := ParseWALPath(fi.Name())
|
||||
if err != nil {
|
||||
continue // not a snapshot, skip
|
||||
} else if !timestamp.IsZero() && fi.ModTime().After(timestamp) {
|
||||
continue // after timestamp, skip
|
||||
} else if idx < index {
|
||||
continue // earlier index, skip
|
||||
}
|
||||
|
||||
index = idx
|
||||
}
|
||||
|
||||
if index == -1 {
|
||||
return 0, nil
|
||||
}
|
||||
return index, nil
|
||||
}
|
||||
|
||||
// SnapshotReader returns a reader for snapshot data at the given generation/index.
|
||||
// Returns os.ErrNotExist if no matching index is found.
|
||||
func (r *FileReplica) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) {
|
||||
dir := r.SnapshotDir(generation)
|
||||
fis, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, fi := range fis {
|
||||
// Parse index from snapshot filename. Skip if no match.
|
||||
idx, _, ext, err := ParseSnapshotPath(fi.Name())
|
||||
if err != nil || index != idx {
|
||||
continue
|
||||
}
|
||||
|
||||
// Open & return the file handle if uncompressed.
|
||||
f, err := os.Open(filepath.Join(dir, fi.Name()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if ext == ".snapshot" {
|
||||
return f, nil // not compressed, return as-is.
|
||||
}
|
||||
assert(ext == ".snapshot.gz", "invalid snapshot extension")
|
||||
|
||||
// If compressed, wrap in a gzip reader and return with wrapper to
|
||||
// ensure that the underlying file is closed.
|
||||
r, err := gzip.NewReader(f)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
return &gzipReadCloser{r: r, closer: f}, nil
|
||||
}
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
// WALReader returns a reader for WAL data at the given index.
|
||||
// Returns os.ErrNotExist if no matching index is found.
|
||||
func (r *FileReplica) WALReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) {
|
||||
filename := r.WALPath(generation, index)
|
||||
|
||||
// Attempt to read uncompressed file first.
|
||||
f, err := os.Open(filename)
|
||||
if err == nil {
|
||||
return f, nil // file exist, return
|
||||
} else if err != nil && !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Otherwise read the compressed file. Return error if file doesn't exist.
|
||||
f, err = os.Open(filename + ".gz")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If compressed, wrap in a gzip reader and return with wrapper to
|
||||
// ensure that the underlying file is closed.
|
||||
rd, err := gzip.NewReader(f)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
return &gzipReadCloser{r: rd, closer: f}, nil
|
||||
}
|
||||
|
||||
// compressFile compresses a file and replaces it with a new file with a .gz extension.
|
||||
func compressFile(src, dst string) error {
|
||||
r, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
w, err := os.Create(dst + ".tmp")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
gz := gzip.NewWriter(w)
|
||||
defer gz.Close()
|
||||
|
||||
// Copy & compress file contents to temporary file.
|
||||
if _, err := io.Copy(gz, r); err != nil {
|
||||
return err
|
||||
} else if err := gz.Close(); err != nil {
|
||||
return err
|
||||
} else if err := w.Sync(); err != nil {
|
||||
return err
|
||||
} else if err := w.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Move compressed file to final location.
|
||||
return os.Rename(dst+".tmp", dst)
|
||||
}
|
||||
Reference in New Issue
Block a user