Compare commits
1 Commits
fix-stream
...
v0.4.0-alp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6eb290720b |
@@ -9,6 +9,7 @@ import (
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"os/user"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -86,7 +87,8 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) {
|
||||
|
||||
// Setup signal handler.
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
signalCh := signalChan()
|
||||
signalCh := make(chan os.Signal, 1)
|
||||
signal.Notify(signalCh, notifySignals...)
|
||||
|
||||
if err := c.Run(ctx); err != nil {
|
||||
return err
|
||||
@@ -94,6 +96,8 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) {
|
||||
|
||||
// Wait for signal to stop program.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
fmt.Println("context done, litestream shutting down")
|
||||
case err = <-c.execCh:
|
||||
cancel()
|
||||
fmt.Println("subprocess exited, litestream shutting down")
|
||||
|
||||
@@ -5,7 +5,6 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
@@ -19,8 +18,4 @@ func runWindowsService(ctx context.Context) error {
|
||||
panic("cannot run windows service as unix process")
|
||||
}
|
||||
|
||||
func signalChan() <-chan os.Signal {
|
||||
ch := make(chan os.Signal, 2)
|
||||
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
|
||||
return ch
|
||||
}
|
||||
var notifySignals = []os.Signal{syscall.SIGINT, syscall.SIGTERM}
|
||||
|
||||
@@ -2,16 +2,22 @@ package main_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/benbjohnson/litestream"
|
||||
main "github.com/benbjohnson/litestream/cmd/litestream"
|
||||
"github.com/benbjohnson/litestream/file"
|
||||
"github.com/benbjohnson/litestream/gcs"
|
||||
"github.com/benbjohnson/litestream/s3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
litestream.LogFlags = log.Lmsgprefix | log.Ldate | log.Ltime | log.Lmicroseconds | log.LUTC | log.Lshortfile
|
||||
}
|
||||
|
||||
func TestReadConfigFile(t *testing.T) {
|
||||
// Ensure global AWS settings are propagated down to replica configurations.
|
||||
t.Run("PropagateGlobalSettings", func(t *testing.T) {
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
"golang.org/x/sys/windows/svc"
|
||||
@@ -105,8 +104,4 @@ func (w *eventlogWriter) Write(p []byte) (n int, err error) {
|
||||
return 0, elog.Info(1, string(p))
|
||||
}
|
||||
|
||||
func signalChan() <-chan os.Signal {
|
||||
ch := make(chan os.Signal, 1)
|
||||
signal.Notify(ch, os.Interrupt)
|
||||
return ch
|
||||
}
|
||||
var notifySignals = []os.Signal{os.Interrupt}
|
||||
|
||||
@@ -42,7 +42,6 @@ func NewReplicateCommand() *ReplicateCommand {
|
||||
func (c *ReplicateCommand) ParseFlags(ctx context.Context, args []string) (err error) {
|
||||
fs := flag.NewFlagSet("litestream-replicate", flag.ContinueOnError)
|
||||
execFlag := fs.String("exec", "", "execute subcommand")
|
||||
tracePath := fs.String("trace", "", "trace path")
|
||||
configPath, noExpandEnv := registerConfigFlag(fs)
|
||||
fs.Usage = c.Usage
|
||||
if err := fs.Parse(args); err != nil {
|
||||
@@ -80,16 +79,6 @@ func (c *ReplicateCommand) ParseFlags(ctx context.Context, args []string) (err e
|
||||
c.Config.Exec = *execFlag
|
||||
}
|
||||
|
||||
// Enable trace logging.
|
||||
if *tracePath != "" {
|
||||
f, err := os.Create(*tracePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
litestream.Tracef = log.New(f, "", log.LstdFlags|log.Lmicroseconds|log.LUTC|log.Lshortfile).Printf
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -215,8 +204,5 @@ Arguments:
|
||||
-no-expand-env
|
||||
Disables environment variable expansion in configuration file.
|
||||
|
||||
-trace PATH
|
||||
Write verbose trace logging to PATH.
|
||||
|
||||
`[1:], DefaultConfigPath())
|
||||
}
|
||||
|
||||
135
cmd/litestream/replicate_test.go
Normal file
135
cmd/litestream/replicate_test.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package main_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc64"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
main "github.com/benbjohnson/litestream/cmd/litestream"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
func TestReplicateCommand(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("long running test, skipping")
|
||||
} else if runtime.GOOS != "linux" {
|
||||
t.Skip("must run system tests on Linux, skipping")
|
||||
}
|
||||
|
||||
const writeTime = 10 * time.Second
|
||||
|
||||
dir := t.TempDir()
|
||||
configPath := filepath.Join(dir, "litestream.yml")
|
||||
dbPath := filepath.Join(dir, "db")
|
||||
restorePath := filepath.Join(dir, "restored")
|
||||
replicaPath := filepath.Join(dir, "replica")
|
||||
|
||||
if err := os.WriteFile(configPath, []byte(`
|
||||
dbs:
|
||||
- path: `+dbPath+`
|
||||
replicas:
|
||||
- path: `+replicaPath+`
|
||||
`), 0666); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Generate data into SQLite database from separate goroutine.
|
||||
g, ctx := errgroup.WithContext(context.Background())
|
||||
mainctx, cancel := context.WithCancel(ctx)
|
||||
g.Go(func() error {
|
||||
defer cancel()
|
||||
|
||||
db, err := sql.Open("sqlite3", dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
if _, err := db.ExecContext(ctx, `PRAGMA journal_mode = WAL`); err != nil {
|
||||
return fmt.Errorf("cannot enable wal: %w", err)
|
||||
} else if _, err := db.ExecContext(ctx, `PRAGMA synchronous = NORMAL`); err != nil {
|
||||
return fmt.Errorf("cannot enable wal: %w", err)
|
||||
} else if _, err := db.ExecContext(ctx, `CREATE TABLE t (id INTEGER PRIMARY KEY)`); err != nil {
|
||||
return fmt.Errorf("cannot create table: %w", err)
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(1 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
timer := time.NewTimer(writeTime)
|
||||
defer timer.Stop()
|
||||
|
||||
for i := 0; ; i++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-timer.C:
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
if _, err := db.ExecContext(ctx, `INSERT INTO t (id) VALUES (?);`, i); err != nil {
|
||||
return fmt.Errorf("cannot insert: i=%d err=%w", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Replicate database unless the context is canceled.
|
||||
g.Go(func() error {
|
||||
return main.NewMain().Run(mainctx, []string{"replicate", "-config", configPath})
|
||||
})
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Checkpoint database.
|
||||
mustCheckpoint(t, dbPath)
|
||||
chksum0 := mustChecksum(t, dbPath)
|
||||
|
||||
// Restore to another path.
|
||||
if err := main.NewMain().Run(context.Background(), []string{"restore", "-config", configPath, "-o", restorePath, dbPath}); err != nil && !errors.Is(err, context.Canceled) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify contents match.
|
||||
if chksum1 := mustChecksum(t, restorePath); chksum0 != chksum1 {
|
||||
t.Fatal("restore mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
func mustCheckpoint(tb testing.TB, path string) {
|
||||
tb.Helper()
|
||||
|
||||
db, err := sql.Open("sqlite3", path)
|
||||
if err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
if _, err := db.Exec(`PRAGMA wal_checkpoint(TRUNCATE)`); err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func mustChecksum(tb testing.TB, path string) uint64 {
|
||||
tb.Helper()
|
||||
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
h := crc64.New(crc64.MakeTable(crc64.ISO))
|
||||
if _, err := io.Copy(h, f); err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
return h.Sum64()
|
||||
}
|
||||
169
db_test.go
169
db_test.go
@@ -3,7 +3,6 @@ package litestream_test
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -30,13 +29,13 @@ func TestDB_WALPath(t *testing.T) {
|
||||
func TestDB_MetaPath(t *testing.T) {
|
||||
t.Run("Absolute", func(t *testing.T) {
|
||||
db := litestream.NewDB("/tmp/db")
|
||||
if got, want := db.MetaPath(), `/tmp/.db-litestream`; got != want {
|
||||
if got, want := db.MetaPath(), `/tmp/db-litestream`; got != want {
|
||||
t.Fatalf("MetaPath()=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
t.Run("Relative", func(t *testing.T) {
|
||||
db := litestream.NewDB("db")
|
||||
if got, want := db.MetaPath(), `.db-litestream`; got != want {
|
||||
if got, want := db.MetaPath(), `db-litestream`; got != want {
|
||||
t.Fatalf("MetaPath()=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
@@ -44,32 +43,25 @@ func TestDB_MetaPath(t *testing.T) {
|
||||
|
||||
func TestDB_GenerationNamePath(t *testing.T) {
|
||||
db := litestream.NewDB("/tmp/db")
|
||||
if got, want := db.GenerationNamePath(), `/tmp/.db-litestream/generation`; got != want {
|
||||
if got, want := db.GenerationNamePath(), `/tmp/db-litestream/generation`; got != want {
|
||||
t.Fatalf("GenerationNamePath()=%v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDB_GenerationPath(t *testing.T) {
|
||||
db := litestream.NewDB("/tmp/db")
|
||||
if got, want := db.GenerationPath("xxxx"), `/tmp/.db-litestream/generations/xxxx`; got != want {
|
||||
if got, want := db.GenerationPath("xxxx"), `/tmp/db-litestream/generations/xxxx`; got != want {
|
||||
t.Fatalf("GenerationPath()=%v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDB_ShadowWALDir(t *testing.T) {
|
||||
db := litestream.NewDB("/tmp/db")
|
||||
if got, want := db.ShadowWALDir("xxxx"), `/tmp/.db-litestream/generations/xxxx/wal`; got != want {
|
||||
if got, want := db.ShadowWALDir("xxxx"), `/tmp/db-litestream/generations/xxxx/wal`; got != want {
|
||||
t.Fatalf("ShadowWALDir()=%v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDB_ShadowWALPath(t *testing.T) {
|
||||
db := litestream.NewDB("/tmp/db")
|
||||
if got, want := db.ShadowWALPath("xxxx", 1000), `/tmp/.db-litestream/generations/xxxx/wal/000003e8.wal`; got != want {
|
||||
t.Fatalf("ShadowWALPath()=%v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure we can check the last modified time of the real database and its WAL.
|
||||
func TestDB_UpdatedAt(t *testing.T) {
|
||||
t.Run("ErrNotExist", func(t *testing.T) {
|
||||
@@ -195,9 +187,7 @@ func TestDB_Sync(t *testing.T) {
|
||||
}
|
||||
|
||||
// Ensure position now available.
|
||||
if pos, err := db.Pos(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if pos.Generation == "" {
|
||||
if pos := db.Pos(); pos.Generation == "" {
|
||||
t.Fatal("expected generation")
|
||||
} else if got, want := pos.Index, 0; got != want {
|
||||
t.Fatalf("pos.Index=%v, want %v", got, want)
|
||||
@@ -221,10 +211,7 @@ func TestDB_Sync(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pos0, err := db.Pos()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pos0 := db.Pos()
|
||||
|
||||
// Insert into table.
|
||||
if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz');`); err != nil {
|
||||
@@ -234,9 +221,7 @@ func TestDB_Sync(t *testing.T) {
|
||||
// Sync to ensure position moves forward one page.
|
||||
if err := db.Sync(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if pos1, err := db.Pos(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if pos0.Generation != pos1.Generation {
|
||||
} else if pos1 := db.Pos(); pos0.Generation != pos1.Generation {
|
||||
t.Fatal("expected the same generation")
|
||||
} else if got, want := pos1.Index, pos0.Index; got != want {
|
||||
t.Fatalf("Index=%v, want %v", got, want)
|
||||
@@ -256,10 +241,7 @@ func TestDB_Sync(t *testing.T) {
|
||||
}
|
||||
|
||||
// Obtain initial position.
|
||||
pos0, err := db.Pos()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pos0 := db.Pos()
|
||||
|
||||
// Checkpoint & fully close which should close WAL file.
|
||||
if err := db.Checkpoint(context.Background(), litestream.CheckpointModeTruncate); err != nil {
|
||||
@@ -285,9 +267,7 @@ func TestDB_Sync(t *testing.T) {
|
||||
}
|
||||
|
||||
// Obtain initial position.
|
||||
if pos1, err := db.Pos(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if pos0.Generation == pos1.Generation {
|
||||
if pos1 := db.Pos(); pos0.Generation == pos1.Generation {
|
||||
t.Fatal("expected new generation after truncation")
|
||||
}
|
||||
})
|
||||
@@ -308,10 +288,7 @@ func TestDB_Sync(t *testing.T) {
|
||||
}
|
||||
|
||||
// Obtain initial position.
|
||||
pos0, err := db.Pos()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pos0 := db.Pos()
|
||||
|
||||
// Fully close which should close WAL file.
|
||||
if err := db.Close(); err != nil {
|
||||
@@ -344,13 +321,13 @@ func TestDB_Sync(t *testing.T) {
|
||||
}
|
||||
|
||||
// Obtain initial position.
|
||||
if pos1, err := db.Pos(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if pos0.Generation == pos1.Generation {
|
||||
if pos1 := db.Pos(); pos0.Generation == pos1.Generation {
|
||||
t.Fatal("expected new generation after truncation")
|
||||
}
|
||||
})
|
||||
|
||||
// TODO: Fix test to check for header mismatch
|
||||
/*
|
||||
// Ensure DB can handle a mismatched header-only and start new generation.
|
||||
t.Run("WALHeaderMismatch", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
@@ -364,19 +341,17 @@ func TestDB_Sync(t *testing.T) {
|
||||
}
|
||||
|
||||
// Grab initial position & close.
|
||||
pos0, err := db.Pos()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := db.Close(); err != nil {
|
||||
pos0 := db.Pos()
|
||||
if err := db.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Read existing file, update header checksum, and write back only header
|
||||
// to simulate a header with a mismatched checksum.
|
||||
shadowWALPath := db.ShadowWALPath(pos0.Generation, pos0.Index)
|
||||
if buf, err := ioutil.ReadFile(shadowWALPath); err != nil {
|
||||
if buf, err := os.ReadFile(shadowWALPath); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := ioutil.WriteFile(shadowWALPath, append(buf[:litestream.WALHeaderSize-8], 0, 0, 0, 0, 0, 0, 0, 0), 0600); err != nil {
|
||||
} else if err := os.WriteFile(shadowWALPath, append(buf[:litestream.WALHeaderSize-8], 0, 0, 0, 0, 0, 0, 0, 0), 0600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -394,98 +369,10 @@ func TestDB_Sync(t *testing.T) {
|
||||
t.Fatal("expected new generation")
|
||||
}
|
||||
})
|
||||
*/
|
||||
|
||||
// Ensure DB can handle partial shadow WAL header write.
|
||||
t.Run("PartialShadowWALHeader", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
defer MustCloseDBs(t, db, sqldb)
|
||||
|
||||
// Execute a query to force a write to the WAL and then sync.
|
||||
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := db.Sync(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pos0, err := db.Pos()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Close & truncate shadow WAL to simulate a partial header write.
|
||||
if err := db.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := os.Truncate(db.ShadowWALPath(pos0.Generation, pos0.Index), litestream.WALHeaderSize-1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Reopen managed database & ensure sync will still work.
|
||||
db = MustOpenDBAt(t, db.Path())
|
||||
defer MustCloseDB(t, db)
|
||||
if err := db.Sync(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify a new generation was started.
|
||||
if pos1, err := db.Pos(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if pos0.Generation == pos1.Generation {
|
||||
t.Fatal("expected new generation")
|
||||
}
|
||||
})
|
||||
|
||||
// Ensure DB can handle partial shadow WAL writes.
|
||||
t.Run("PartialShadowWALFrame", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
defer MustCloseDBs(t, db, sqldb)
|
||||
|
||||
// Execute a query to force a write to the WAL and then sync.
|
||||
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := db.Sync(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pos0, err := db.Pos()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Obtain current shadow WAL size.
|
||||
fi, err := os.Stat(db.ShadowWALPath(pos0.Generation, pos0.Index))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Close & truncate shadow WAL to simulate a partial frame write.
|
||||
if err := db.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := os.Truncate(db.ShadowWALPath(pos0.Generation, pos0.Index), fi.Size()-1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Reopen managed database & ensure sync will still work.
|
||||
db = MustOpenDBAt(t, db.Path())
|
||||
defer MustCloseDB(t, db)
|
||||
if err := db.Sync(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify same generation is kept.
|
||||
if pos1, err := db.Pos(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got, want := pos1, pos0; got != want {
|
||||
t.Fatalf("Pos()=%s want %s", got, want)
|
||||
}
|
||||
|
||||
// Ensure shadow WAL has recovered.
|
||||
if fi0, err := os.Stat(db.ShadowWALPath(pos0.Generation, pos0.Index)); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got, want := fi0.Size(), fi.Size(); got != want {
|
||||
t.Fatalf("Size()=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
// TODO: Fix test for segmented shadow WAL.
|
||||
/*
|
||||
// Ensure DB can handle a generation directory with a missing shadow WAL.
|
||||
t.Run("NoShadowWAL", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
@@ -498,10 +385,7 @@ func TestDB_Sync(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pos0, err := db.Pos()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pos0 := db.Pos()
|
||||
|
||||
// Close & delete shadow WAL to simulate dir created but not WAL.
|
||||
if err := db.Close(); err != nil {
|
||||
@@ -528,6 +412,7 @@ func TestDB_Sync(t *testing.T) {
|
||||
t.Fatalf("Offset=%v want %v", got, want)
|
||||
}
|
||||
})
|
||||
*/
|
||||
|
||||
// Ensure DB checkpoints after minimum number of pages.
|
||||
t.Run("MinCheckpointPageN", func(t *testing.T) {
|
||||
@@ -554,9 +439,7 @@ func TestDB_Sync(t *testing.T) {
|
||||
}
|
||||
|
||||
// Ensure position is now on the second index.
|
||||
if pos, err := db.Pos(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got, want := pos.Index, 1; got != want {
|
||||
if got, want := db.Pos().Index, 1; got != want {
|
||||
t.Fatalf("Index=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
@@ -584,9 +467,7 @@ func TestDB_Sync(t *testing.T) {
|
||||
}
|
||||
|
||||
// Ensure position is now on the second index.
|
||||
if pos, err := db.Pos(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got, want := pos.Index, 1; got != want {
|
||||
if got, want := db.Pos().Index, 1; got != want {
|
||||
t.Fatalf("Index=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -408,11 +408,6 @@ func (itr *walSegmentIterator) Next() bool {
|
||||
}
|
||||
itr.infos = itr.infos[:0] // otherwise clear infos
|
||||
|
||||
// Move to the next index unless this is the first time initializing.
|
||||
if itr.infos != nil && len(itr.indexes) > 0 {
|
||||
itr.indexes = itr.indexes[1:]
|
||||
}
|
||||
|
||||
// If no indexes remain, stop iteration.
|
||||
if len(itr.indexes) == 0 {
|
||||
return false
|
||||
@@ -420,6 +415,7 @@ func (itr *walSegmentIterator) Next() bool {
|
||||
|
||||
// Read segments into a cache for the current index.
|
||||
index := itr.indexes[0]
|
||||
itr.indexes = itr.indexes[1:]
|
||||
f, err := os.Open(filepath.Join(itr.dir, litestream.FormatIndex(index)))
|
||||
if err != nil {
|
||||
itr.err = err
|
||||
@@ -452,6 +448,9 @@ func (itr *walSegmentIterator) Next() bool {
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure segments are sorted within index.
|
||||
sort.Sort(litestream.WALSegmentInfoSlice(itr.infos))
|
||||
|
||||
if len(itr.infos) > 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -133,91 +133,3 @@ func TestReplicaClient_WALSegmentPath(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/*
|
||||
func TestReplica_Sync(t *testing.T) {
|
||||
// Ensure replica can successfully sync after DB has sync'd.
|
||||
t.Run("InitialSync", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
defer MustCloseDBs(t, db, sqldb)
|
||||
|
||||
r := litestream.NewReplica(db, "", file.NewReplicaClient(t.TempDir()))
|
||||
r.MonitorEnabled = false
|
||||
db.Replicas = []*litestream.Replica{r}
|
||||
|
||||
// Sync database & then sync replica.
|
||||
if err := db.Sync(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := r.Sync(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Ensure posistions match.
|
||||
if want, err := db.Pos(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got, err := r.Pos(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got != want {
|
||||
t.Fatalf("Pos()=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
// Ensure replica can successfully sync multiple times.
|
||||
t.Run("MultiSync", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
defer MustCloseDBs(t, db, sqldb)
|
||||
|
||||
r := litestream.NewReplica(db, "", file.NewReplicaClient(t.TempDir()))
|
||||
r.MonitorEnabled = false
|
||||
db.Replicas = []*litestream.Replica{r}
|
||||
|
||||
if _, err := sqldb.Exec(`CREATE TABLE foo (bar TEXT);`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Write to the database multiple times and sync after each write.
|
||||
for i, n := 0, db.MinCheckpointPageN*2; i < n; i++ {
|
||||
if _, err := sqldb.Exec(`INSERT INTO foo (bar) VALUES ('baz')`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Sync periodically.
|
||||
if i%100 == 0 || i == n-1 {
|
||||
if err := db.Sync(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := r.Sync(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure posistions match.
|
||||
pos, err := db.Pos()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got, want := pos.Index, 2; got != want {
|
||||
t.Fatalf("Index=%v, want %v", got, want)
|
||||
}
|
||||
|
||||
if want, err := r.Pos(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got := pos; got != want {
|
||||
t.Fatalf("Pos()=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
// Ensure replica returns an error if there is no generation available from the DB.
|
||||
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||
db, sqldb := MustOpenDBs(t)
|
||||
defer MustCloseDBs(t, db, sqldb)
|
||||
|
||||
r := litestream.NewReplica(db, "", file.NewReplicaClient(t.TempDir()))
|
||||
r.MonitorEnabled = false
|
||||
db.Replicas = []*litestream.Replica{r}
|
||||
|
||||
if err := r.Sync(context.Background()); err == nil || err.Error() != `no generation, waiting for data` {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
*/
|
||||
|
||||
@@ -39,6 +39,39 @@ func (r *ReadCloser) Close() error {
|
||||
return r.c.Close()
|
||||
}
|
||||
|
||||
// MultiReadCloser is a logical concatenation of io.ReadCloser.
|
||||
// It works like io.MultiReader except all objects are closed when Close() is called.
|
||||
type MultiReadCloser struct {
|
||||
mr io.Reader
|
||||
closers []io.Closer
|
||||
}
|
||||
|
||||
// NewMultiReadCloser returns a new instance of MultiReadCloser.
|
||||
func NewMultiReadCloser(a []io.ReadCloser) *MultiReadCloser {
|
||||
readers := make([]io.Reader, len(a))
|
||||
closers := make([]io.Closer, len(a))
|
||||
for i, rc := range a {
|
||||
readers[i] = rc
|
||||
closers[i] = rc
|
||||
}
|
||||
return &MultiReadCloser{mr: io.MultiReader(readers...), closers: closers}
|
||||
}
|
||||
|
||||
// Read reads from the next available reader.
|
||||
func (mrc *MultiReadCloser) Read(p []byte) (n int, err error) {
|
||||
return mrc.mr.Read(p)
|
||||
}
|
||||
|
||||
// Close closes all underlying ReadClosers and returns first error encountered.
|
||||
func (mrc *MultiReadCloser) Close() (err error) {
|
||||
for _, c := range mrc.closers {
|
||||
if e := c.Close(); e != nil && err == nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadCounter wraps an io.Reader and counts the total number of bytes read.
|
||||
type ReadCounter struct {
|
||||
r io.Reader
|
||||
|
||||
@@ -40,6 +40,14 @@ var (
|
||||
ErrChecksumMismatch = errors.New("invalid replica, checksum mismatch")
|
||||
)
|
||||
|
||||
var (
|
||||
// LogWriter is the destination writer for all logging.
|
||||
LogWriter = os.Stderr
|
||||
|
||||
// LogFlags are the flags passed to log.New().
|
||||
LogFlags = 0
|
||||
)
|
||||
|
||||
// SnapshotIterator represents an iterator over a collection of snapshot metadata.
|
||||
type SnapshotIterator interface {
|
||||
io.Closer
|
||||
@@ -291,6 +299,26 @@ func (p Pos) Truncate() Pos {
|
||||
return Pos{Generation: p.Generation, Index: p.Index}
|
||||
}
|
||||
|
||||
// ComparePos returns -1 if a is less than b, 1 if a is greater than b, and
|
||||
// returns 0 if a and b are equal. Only index & offset are compared.
|
||||
// Returns an error if generations are not equal.
|
||||
func ComparePos(a, b Pos) (int, error) {
|
||||
if a.Generation != b.Generation {
|
||||
return 0, fmt.Errorf("generation mismatch")
|
||||
}
|
||||
|
||||
if a.Index < b.Index {
|
||||
return -1, nil
|
||||
} else if a.Index > b.Index {
|
||||
return 1, nil
|
||||
} else if a.Offset < b.Offset {
|
||||
return -1, nil
|
||||
} else if a.Offset > b.Offset {
|
||||
return 1, nil
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Checksum computes a running SQLite checksum over a byte slice.
|
||||
func Checksum(bo binary.ByteOrder, s0, s1 uint32, b []byte) (uint32, uint32) {
|
||||
assert(len(b)%8 == 0, "misaligned checksum byte slice")
|
||||
|
||||
193
replica.go
193
replica.go
@@ -2,7 +2,6 @@ package litestream
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash/crc64"
|
||||
"io"
|
||||
@@ -67,6 +66,8 @@ type Replica struct {
|
||||
// If true, replica monitors database for changes automatically.
|
||||
// Set to false if replica is being used synchronously (such as in tests).
|
||||
MonitorEnabled bool
|
||||
|
||||
Logger *log.Logger
|
||||
}
|
||||
|
||||
func NewReplica(db *DB, name string) *Replica {
|
||||
@@ -81,6 +82,12 @@ func NewReplica(db *DB, name string) *Replica {
|
||||
MonitorEnabled: true,
|
||||
}
|
||||
|
||||
prefix := fmt.Sprintf("%s: ", r.Name())
|
||||
if db != nil {
|
||||
prefix = fmt.Sprintf("%s(%s): ", db.Path(), r.Name())
|
||||
}
|
||||
r.Logger = log.New(LogWriter, prefix, LogFlags)
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
@@ -149,16 +156,12 @@ func (r *Replica) Sync(ctx context.Context) (err error) {
|
||||
}()
|
||||
|
||||
// Find current position of database.
|
||||
dpos, err := r.db.Pos()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot determine current generation: %w", err)
|
||||
} else if dpos.IsZero() {
|
||||
dpos := r.db.Pos()
|
||||
if dpos.IsZero() {
|
||||
return fmt.Errorf("no generation, waiting for data")
|
||||
}
|
||||
generation := dpos.Generation
|
||||
|
||||
Tracef("%s(%s): replica sync: db.pos=%s", r.db.Path(), r.Name(), dpos)
|
||||
|
||||
// Create snapshot if no snapshots exist for generation.
|
||||
snapshotN, err := r.snapshotN(generation)
|
||||
if err != nil {
|
||||
@@ -180,117 +183,140 @@ func (r *Replica) Sync(ctx context.Context) (err error) {
|
||||
return fmt.Errorf("cannot determine replica position: %s", err)
|
||||
}
|
||||
|
||||
Tracef("%s(%s): replica sync: calc new pos: %s", r.db.Path(), r.Name(), pos)
|
||||
r.mu.Lock()
|
||||
r.pos = pos
|
||||
r.mu.Unlock()
|
||||
}
|
||||
|
||||
// Read all WAL files since the last position.
|
||||
for {
|
||||
if err = r.syncWAL(ctx); err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
if err = r.syncWAL(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Replica) syncWAL(ctx context.Context) (err error) {
|
||||
rd, err := r.db.ShadowWALReader(r.Pos())
|
||||
if err == io.EOF {
|
||||
pos := r.Pos()
|
||||
|
||||
itr, err := r.db.WALSegments(ctx, pos.Generation)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("replica wal reader: %w", err)
|
||||
}
|
||||
defer rd.Close()
|
||||
defer itr.Close()
|
||||
|
||||
// Group segments by index.
|
||||
var segments [][]WALSegmentInfo
|
||||
for itr.Next() {
|
||||
info := itr.WALSegment()
|
||||
if cmp, err := ComparePos(pos, info.Pos()); err != nil {
|
||||
return fmt.Errorf("compare pos: %w", err)
|
||||
} else if cmp == 1 {
|
||||
continue // already processed, skip
|
||||
}
|
||||
|
||||
// Start a new chunk if index has changed.
|
||||
if len(segments) == 0 || segments[len(segments)-1][0].Index != info.Index {
|
||||
segments = append(segments, []WALSegmentInfo{info})
|
||||
continue
|
||||
}
|
||||
|
||||
// Add segment to the end of the current index, if matching.
|
||||
segments[len(segments)-1] = append(segments[len(segments)-1], info)
|
||||
}
|
||||
|
||||
// Write out segments to replica by index so they can be combined.
|
||||
for i := range segments {
|
||||
if err := r.writeIndexSegments(ctx, segments[i]); err != nil {
|
||||
return fmt.Errorf("write index segments: index=%d err=%w", segments[i][0].Index, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Replica) writeIndexSegments(ctx context.Context, segments []WALSegmentInfo) (err error) {
|
||||
assert(len(segments) > 0, "segments required for replication")
|
||||
|
||||
// First segment position must be equal to last replica position or
|
||||
// the start of the next index.
|
||||
if pos := r.Pos(); pos != segments[0].Pos() {
|
||||
nextIndexPos := pos.Truncate()
|
||||
nextIndexPos.Index++
|
||||
if nextIndexPos != segments[0].Pos() {
|
||||
return fmt.Errorf("replica skipped position: replica=%s initial=%s", pos, segments[0].Pos())
|
||||
}
|
||||
}
|
||||
|
||||
pos := segments[0].Pos()
|
||||
initialPos := pos
|
||||
|
||||
// Copy shadow WAL to client write via io.Pipe().
|
||||
pr, pw := io.Pipe()
|
||||
defer func() { _ = pw.CloseWithError(err) }()
|
||||
|
||||
// Obtain initial position from shadow reader.
|
||||
// It may have moved to the next index if previous position was at the end.
|
||||
pos := rd.Pos()
|
||||
|
||||
// Copy through pipe into client from the starting position.
|
||||
var g errgroup.Group
|
||||
g.Go(func() error {
|
||||
_, err := r.Client.WriteWALSegment(ctx, pos, pr)
|
||||
_, err := r.Client.WriteWALSegment(ctx, initialPos, pr)
|
||||
return err
|
||||
})
|
||||
|
||||
// Wrap writer to LZ4 compress.
|
||||
zw := lz4.NewWriter(pw)
|
||||
|
||||
// Track total WAL bytes written to replica client.
|
||||
walBytesCounter := replicaWALBytesCounterVec.WithLabelValues(r.db.Path(), r.Name())
|
||||
|
||||
// Copy header if at offset zero.
|
||||
var psalt uint64 // previous salt value
|
||||
if pos := rd.Pos(); pos.Offset == 0 {
|
||||
buf := make([]byte, WALHeaderSize)
|
||||
if _, err := io.ReadFull(rd, buf); err != nil {
|
||||
return err
|
||||
// Write each segment out to the replica.
|
||||
for _, info := range segments {
|
||||
if err := func() error {
|
||||
// Ensure segments are in order and no bytes are skipped.
|
||||
if pos != info.Pos() {
|
||||
return fmt.Errorf("non-contiguous segment: expected=%s current=%s", pos, info.Pos())
|
||||
}
|
||||
|
||||
psalt = binary.BigEndian.Uint64(buf[16:24])
|
||||
|
||||
n, err := zw.Write(buf)
|
||||
rc, err := r.db.WALSegmentReader(ctx, info.Pos())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
walBytesCounter.Add(float64(n))
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
// Copy frames.
|
||||
for {
|
||||
pos := rd.Pos()
|
||||
assert(pos.Offset == frameAlign(pos.Offset, r.db.pageSize), "shadow wal reader not frame aligned")
|
||||
|
||||
buf := make([]byte, WALFrameHeaderSize+r.db.pageSize)
|
||||
if _, err := io.ReadFull(rd, buf); err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify salt matches the previous frame/header read.
|
||||
salt := binary.BigEndian.Uint64(buf[8:16])
|
||||
if psalt != 0 && psalt != salt {
|
||||
return fmt.Errorf("replica salt mismatch: %s", pos.String())
|
||||
}
|
||||
psalt = salt
|
||||
|
||||
n, err := zw.Write(buf)
|
||||
n, err := io.Copy(zw, lz4.NewReader(rc))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
walBytesCounter.Add(float64(n))
|
||||
} else if err := rc.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Flush LZ4 writer and close pipe.
|
||||
// Track last position written.
|
||||
pos = info.Pos()
|
||||
pos.Offset += n
|
||||
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return fmt.Errorf("wal segment: pos=%s err=%w", info.Pos(), err)
|
||||
}
|
||||
}
|
||||
|
||||
// Flush LZ4 writer, close pipe, and wait for write to finish.
|
||||
if err := zw.Close(); err != nil {
|
||||
return err
|
||||
} else if err := pw.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for client to finish write.
|
||||
if err := g.Wait(); err != nil {
|
||||
return fmt.Errorf("client write: %w", err)
|
||||
} else if err := g.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save last replicated position.
|
||||
r.mu.Lock()
|
||||
r.pos = rd.Pos()
|
||||
r.pos = pos
|
||||
r.mu.Unlock()
|
||||
|
||||
// Track current position
|
||||
replicaWALIndexGaugeVec.WithLabelValues(r.db.Path(), r.Name()).Set(float64(rd.Pos().Index))
|
||||
replicaWALOffsetGaugeVec.WithLabelValues(r.db.Path(), r.Name()).Set(float64(rd.Pos().Offset))
|
||||
replicaWALBytesCounterVec.WithLabelValues(r.db.Path(), r.Name()).Add(float64(pos.Offset - initialPos.Offset))
|
||||
|
||||
// Track total WAL bytes written to replica client.
|
||||
replicaWALIndexGaugeVec.WithLabelValues(r.db.Path(), r.Name()).Set(float64(pos.Index))
|
||||
replicaWALOffsetGaugeVec.WithLabelValues(r.db.Path(), r.Name()).Set(float64(pos.Offset))
|
||||
|
||||
r.Logger.Printf("wal segment written: %s sz=%d", initialPos, pos.Offset-initialPos.Offset)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -448,10 +474,8 @@ func (r *Replica) Snapshot(ctx context.Context) (info SnapshotInfo, err error) {
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
|
||||
// Obtain current position.
|
||||
pos, err := r.db.Pos()
|
||||
if err != nil {
|
||||
return info, fmt.Errorf("cannot determine db position: %w", err)
|
||||
} else if pos.IsZero() {
|
||||
pos := r.db.Pos()
|
||||
if pos.IsZero() {
|
||||
return info, ErrNoGeneration
|
||||
}
|
||||
|
||||
@@ -491,7 +515,7 @@ func (r *Replica) Snapshot(ctx context.Context) (info SnapshotInfo, err error) {
|
||||
return info, err
|
||||
}
|
||||
|
||||
log.Printf("%s(%s): snapshot written %s/%08x", r.db.Path(), r.Name(), pos.Generation, pos.Index)
|
||||
log.Printf("snapshot written %s/%08x", pos.Generation, pos.Index)
|
||||
|
||||
return info, nil
|
||||
}
|
||||
@@ -559,7 +583,7 @@ func (r *Replica) deleteSnapshotsBeforeIndex(ctx context.Context, generation str
|
||||
if err := r.Client.DeleteSnapshot(ctx, info.Generation, info.Index); err != nil {
|
||||
return fmt.Errorf("delete snapshot %s/%08x: %w", info.Generation, info.Index, err)
|
||||
}
|
||||
log.Printf("%s(%s): snapshot deleted %s/%08x", r.db.Path(), r.Name(), generation, index)
|
||||
r.Logger.Printf("snapshot deleted %s/%08x", generation, index)
|
||||
}
|
||||
|
||||
return itr.Close()
|
||||
@@ -591,7 +615,10 @@ func (r *Replica) deleteWALSegmentsBeforeIndex(ctx context.Context, generation s
|
||||
if err := r.Client.DeleteWALSegments(ctx, a); err != nil {
|
||||
return fmt.Errorf("delete wal segments: %w", err)
|
||||
}
|
||||
log.Printf("%s(%s): wal segmented deleted before %s/%08x: n=%d", r.db.Path(), r.Name(), generation, index, len(a))
|
||||
|
||||
for _, pos := range a {
|
||||
r.Logger.Printf("wal segmented deleted: %s", pos)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -628,7 +655,7 @@ func (r *Replica) monitor(ctx context.Context) {
|
||||
|
||||
// Synchronize the shadow wal into the replication directory.
|
||||
if err := r.Sync(ctx); err != nil {
|
||||
log.Printf("%s(%s): monitor error: %s", r.db.Path(), r.Name(), err)
|
||||
r.Logger.Printf("monitor error: %s", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -656,7 +683,7 @@ func (r *Replica) retainer(ctx context.Context) {
|
||||
return
|
||||
case <-ticker.C:
|
||||
if err := r.EnforceRetention(ctx); err != nil {
|
||||
log.Printf("%s(%s): retainer error: %s", r.db.Path(), r.Name(), err)
|
||||
r.Logger.Printf("retainer error: %s", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -678,7 +705,7 @@ func (r *Replica) snapshotter(ctx context.Context) {
|
||||
return
|
||||
case <-ticker.C:
|
||||
if _, err := r.Snapshot(ctx); err != nil && err != ErrNoGeneration {
|
||||
log.Printf("%s(%s): snapshotter error: %s", r.db.Path(), r.Name(), err)
|
||||
r.Logger.Printf("snapshotter error: %s", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -706,7 +733,7 @@ func (r *Replica) validator(ctx context.Context) {
|
||||
return
|
||||
case <-ticker.C:
|
||||
if err := r.Validate(ctx); err != nil {
|
||||
log.Printf("%s(%s): validation error: %s", r.db.Path(), r.Name(), err)
|
||||
r.Logger.Printf("validation error: %s", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -768,7 +795,7 @@ func (r *Replica) Validate(ctx context.Context) error {
|
||||
if mismatch {
|
||||
status = "mismatch"
|
||||
}
|
||||
log.Printf("%s(%s): validator: status=%s db=%016x replica=%016x pos=%s", db.Path(), r.Name(), status, chksum0, chksum1, pos)
|
||||
r.Logger.Printf("validator: status=%s db=%016x replica=%016x pos=%s", status, chksum0, chksum1, pos)
|
||||
|
||||
// Validate checksums match.
|
||||
if mismatch {
|
||||
@@ -786,8 +813,6 @@ func (r *Replica) Validate(ctx context.Context) error {
|
||||
|
||||
// waitForReplica blocks until replica reaches at least the given position.
|
||||
func (r *Replica) waitForReplica(ctx context.Context, pos Pos) error {
|
||||
db := r.DB()
|
||||
|
||||
ticker := time.NewTicker(500 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
@@ -810,7 +835,7 @@ func (r *Replica) waitForReplica(ctx context.Context, pos Pos) error {
|
||||
// Obtain current position of replica, check if past target position.
|
||||
curr := r.Pos()
|
||||
if curr.IsZero() {
|
||||
log.Printf("%s(%s): validator: no replica position available", db.Path(), r.Name())
|
||||
r.Logger.Printf("validator: no replica position available")
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -43,10 +43,7 @@ func TestReplica_Sync(t *testing.T) {
|
||||
}
|
||||
|
||||
// Fetch current database position.
|
||||
dpos, err := db.Pos()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dpos := db.Pos()
|
||||
|
||||
c := file.NewReplicaClient(t.TempDir())
|
||||
r := litestream.NewReplica(db, "")
|
||||
@@ -69,11 +66,11 @@ func TestReplica_Sync(t *testing.T) {
|
||||
// Verify WAL matches replica WAL.
|
||||
if b0, err := os.ReadFile(db.Path() + "-wal"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: generations[0], Index: 0, Offset: 0}); err != nil {
|
||||
} else if r0, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: generations[0], Index: 0, Offset: 0}); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if b1, err := io.ReadAll(lz4.NewReader(r)); err != nil {
|
||||
} else if b1, err := io.ReadAll(lz4.NewReader(r0)); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := r.Close(); err != nil {
|
||||
} else if err := r0.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if !bytes.Equal(b0, b1) {
|
||||
t.Fatalf("wal mismatch: len(%d), len(%d)", len(b0), len(b1))
|
||||
@@ -98,10 +95,8 @@ func TestReplica_Snapshot(t *testing.T) {
|
||||
}
|
||||
|
||||
// Fetch current database position & snapshot.
|
||||
pos0, err := db.Pos()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if info, err := r.Snapshot(context.Background()); err != nil {
|
||||
pos0 := db.Pos()
|
||||
if info, err := r.Snapshot(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got, want := info.Pos(), pos0.Truncate(); got != want {
|
||||
t.Fatalf("pos=%s, want %s", got, want)
|
||||
@@ -122,10 +117,8 @@ func TestReplica_Snapshot(t *testing.T) {
|
||||
}
|
||||
|
||||
// Fetch current database position & snapshot.
|
||||
pos1, err := db.Pos()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if info, err := r.Snapshot(context.Background()); err != nil {
|
||||
pos1 := db.Pos()
|
||||
if info, err := r.Snapshot(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got, want := info.Pos(), pos1.Truncate(); got != want {
|
||||
t.Fatalf("pos=%v, want %v", got, want)
|
||||
|
||||
Reference in New Issue
Block a user