diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 4d3122b..aabaa6e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -30,31 +30,31 @@ jobs: LITESTREAM_SFTP_KEY: ${{secrets.LITESTREAM_SFTP_KEY}} - name: Run unit tests - run: go test -v ./... + run: make testdata && go test -v ./... - name: Run aws s3 tests - run: go test -v -run=TestReplicaClient . -integration s3 + run: go test -v -run=TestReplicaClient ./integration -replica-type s3 env: LITESTREAM_S3_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }} LITESTREAM_S3_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_S3_SECRET_ACCESS_KEY }} - LITESTREAM_S3_REGION: ${{ secrets.LITESTREAM_S3_REGION }} - LITESTREAM_S3_BUCKET: ${{ secrets.LITESTREAM_S3_BUCKET }} + LITESTREAM_S3_REGION: us-east-1 + LITESTREAM_S3_BUCKET: integration.litestream.io - name: Run google cloud storage (gcs) tests - run: go test -v -run=TestReplicaClient . -integration gcs + run: go test -v -run=TestReplicaClient ./integration -replica-type gcs env: GOOGLE_APPLICATION_CREDENTIALS: /opt/gcp.json - LITESTREAM_GCS_BUCKET: ${{ secrets.LITESTREAM_GCS_BUCKET }} + LITESTREAM_GCS_BUCKET: integration.litestream.io - name: Run azure blob storage (abs) tests - run: go test -v -run=TestReplicaClient . -integration abs + run: go test -v -run=TestReplicaClient ./integration -replica-type abs env: LITESTREAM_ABS_ACCOUNT_NAME: ${{ secrets.LITESTREAM_ABS_ACCOUNT_NAME }} LITESTREAM_ABS_ACCOUNT_KEY: ${{ secrets.LITESTREAM_ABS_ACCOUNT_KEY }} - LITESTREAM_ABS_BUCKET: ${{ secrets.LITESTREAM_ABS_BUCKET }} + LITESTREAM_ABS_BUCKET: integration - name: Run sftp tests - run: go test -v -run=TestReplicaClient . -integration sftp + run: go test -v -run=TestReplicaClient ./integration -replica-type sftp env: LITESTREAM_SFTP_HOST: ${{ secrets.LITESTREAM_SFTP_HOST }} LITESTREAM_SFTP_USER: ${{ secrets.LITESTREAM_SFTP_USER }} diff --git a/Makefile b/Makefile index e3d75e4..70d3709 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,9 @@ -default: +.PHONY: default +default: testdata + +.PHONY: testdata +testdata: + make -C testdata docker: docker build -t litestream . diff --git a/cmd/litestream/databases.go b/cmd/litestream/databases.go index 236c01e..dd7747c 100644 --- a/cmd/litestream/databases.go +++ b/cmd/litestream/databases.go @@ -10,12 +10,15 @@ import ( ) // DatabasesCommand is a command for listing managed databases. -type DatabasesCommand struct{} +type DatabasesCommand struct { + configPath string + noExpandEnv bool +} // Run executes the command. func (c *DatabasesCommand) Run(ctx context.Context, args []string) (err error) { fs := flag.NewFlagSet("litestream-databases", flag.ContinueOnError) - configPath, noExpandEnv := registerConfigFlag(fs) + registerConfigFlag(fs, &c.configPath, &c.noExpandEnv) fs.Usage = c.Usage if err := fs.Parse(args); err != nil { return err @@ -24,10 +27,10 @@ func (c *DatabasesCommand) Run(ctx context.Context, args []string) (err error) { } // Load configuration. - if *configPath == "" { - *configPath = DefaultConfigPath() + if c.configPath == "" { + c.configPath = DefaultConfigPath() } - config, err := ReadConfigFile(*configPath, !*noExpandEnv) + config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) if err != nil { return err } diff --git a/cmd/litestream/generations.go b/cmd/litestream/generations.go index fefa40c..e4f9faf 100644 --- a/cmd/litestream/generations.go +++ b/cmd/litestream/generations.go @@ -13,12 +13,15 @@ import ( ) // GenerationsCommand represents a command to list all generations for a database. -type GenerationsCommand struct{} +type GenerationsCommand struct { + configPath string + noExpandEnv bool +} // Run executes the command. func (c *GenerationsCommand) Run(ctx context.Context, args []string) (err error) { fs := flag.NewFlagSet("litestream-generations", flag.ContinueOnError) - configPath, noExpandEnv := registerConfigFlag(fs) + registerConfigFlag(fs, &c.configPath, &c.noExpandEnv) replicaName := fs.String("replica", "", "replica name") fs.Usage = c.Usage if err := fs.Parse(args); err != nil { @@ -33,19 +36,19 @@ func (c *GenerationsCommand) Run(ctx context.Context, args []string) (err error) var r *litestream.Replica dbUpdatedAt := time.Now() if isURL(fs.Arg(0)) { - if *configPath != "" { + if c.configPath != "" { return fmt.Errorf("cannot specify a replica URL and the -config flag") } if r, err = NewReplicaFromConfig(&ReplicaConfig{URL: fs.Arg(0)}, nil); err != nil { return err } } else { - if *configPath == "" { - *configPath = DefaultConfigPath() + if c.configPath == "" { + c.configPath = DefaultConfigPath() } // Load configuration. - config, err := ReadConfigFile(*configPath, !*noExpandEnv) + config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) if err != nil { return err } @@ -93,7 +96,7 @@ func (c *GenerationsCommand) Run(ctx context.Context, args []string) (err error) // Iterate over each generation for the replica. for _, generation := range generations { - createdAt, updatedAt, err := r.GenerationTimeBounds(ctx, generation) + createdAt, updatedAt, err := litestream.GenerationTimeBounds(ctx, r.Client, generation) if err != nil { log.Printf("%s: cannot determine generation time bounds: %s", r.Name(), err) continue diff --git a/cmd/litestream/main.go b/cmd/litestream/main.go index 783f73e..7f6f101 100644 --- a/cmd/litestream/main.go +++ b/cmd/litestream/main.go @@ -20,7 +20,6 @@ import ( "github.com/benbjohnson/litestream" "github.com/benbjohnson/litestream/abs" - "github.com/benbjohnson/litestream/file" "github.com/benbjohnson/litestream/gcs" "github.com/benbjohnson/litestream/s3" "github.com/benbjohnson/litestream/sftp" @@ -126,7 +125,7 @@ func (m *Main) Run(ctx context.Context, args []string) (err error) { return err case "restore": - return (&RestoreCommand{}).Run(ctx, args) + return NewRestoreCommand().Run(ctx, args) case "snapshots": return (&SnapshotsCommand{}).Run(ctx, args) case "version": @@ -383,8 +382,8 @@ func NewReplicaFromConfig(c *ReplicaConfig, db *litestream.DB) (_ *litestream.Re return r, nil } -// newFileReplicaClientFromConfig returns a new instance of file.ReplicaClient built from config. -func newFileReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *file.ReplicaClient, err error) { +// newFileReplicaClientFromConfig returns a new instance of FileReplicaClient built from config. +func newFileReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ *litestream.FileReplicaClient, err error) { // Ensure URL & path are not both specified. if c.URL != "" && c.Path != "" { return nil, fmt.Errorf("cannot specify url & path for file replica") @@ -409,9 +408,7 @@ func newFileReplicaClientFromConfig(c *ReplicaConfig, r *litestream.Replica) (_ } // Instantiate replica and apply time fields, if set. - client := file.NewReplicaClient(path) - client.Replica = r - return client, nil + return litestream.NewFileReplicaClient(path), nil } // newS3ReplicaClientFromConfig returns a new instance of s3.ReplicaClient built from config. @@ -669,9 +666,9 @@ func DefaultConfigPath() string { return defaultConfigPath } -func registerConfigFlag(fs *flag.FlagSet) (configPath *string, noExpandEnv *bool) { - return fs.String("config", "", "config path"), - fs.Bool("no-expand-env", false, "do not expand env vars in config") +func registerConfigFlag(fs *flag.FlagSet, configPath *string, noExpandEnv *bool) { + fs.StringVar(configPath, "config", "", "config path") + fs.BoolVar(noExpandEnv, "no-expand-env", false, "do not expand env vars in config") } // expand returns an absolute path for s. diff --git a/cmd/litestream/main_test.go b/cmd/litestream/main_test.go index 75131e4..3886095 100644 --- a/cmd/litestream/main_test.go +++ b/cmd/litestream/main_test.go @@ -9,7 +9,6 @@ import ( "github.com/benbjohnson/litestream" main "github.com/benbjohnson/litestream/cmd/litestream" - "github.com/benbjohnson/litestream/file" "github.com/benbjohnson/litestream/gcs" "github.com/benbjohnson/litestream/s3" ) @@ -103,7 +102,7 @@ func TestNewFileReplicaFromConfig(t *testing.T) { r, err := main.NewReplicaFromConfig(&main.ReplicaConfig{Path: "/foo"}, nil) if err != nil { t.Fatal(err) - } else if client, ok := r.Client.(*file.ReplicaClient); !ok { + } else if client, ok := r.Client.(*litestream.FileReplicaClient); !ok { t.Fatal("unexpected replica type") } else if got, want := client.Path(), "/foo"; got != want { t.Fatalf("Path=%s, want %s", got, want) diff --git a/cmd/litestream/replicate.go b/cmd/litestream/replicate.go index fdaebd2..3da238f 100644 --- a/cmd/litestream/replicate.go +++ b/cmd/litestream/replicate.go @@ -13,7 +13,6 @@ import ( "github.com/benbjohnson/litestream" "github.com/benbjohnson/litestream/abs" - "github.com/benbjohnson/litestream/file" "github.com/benbjohnson/litestream/gcs" "github.com/benbjohnson/litestream/s3" "github.com/benbjohnson/litestream/sftp" @@ -23,6 +22,9 @@ import ( // ReplicateCommand represents a command that continuously replicates SQLite databases. type ReplicateCommand struct { + configPath string + noExpandEnv bool + cmd *exec.Cmd // subcommand execCh chan error // subcommand error channel @@ -42,7 +44,7 @@ func NewReplicateCommand() *ReplicateCommand { func (c *ReplicateCommand) ParseFlags(ctx context.Context, args []string) (err error) { fs := flag.NewFlagSet("litestream-replicate", flag.ContinueOnError) execFlag := fs.String("exec", "", "execute subcommand") - configPath, noExpandEnv := registerConfigFlag(fs) + registerConfigFlag(fs, &c.configPath, &c.noExpandEnv) fs.Usage = c.Usage if err := fs.Parse(args); err != nil { return err @@ -52,7 +54,7 @@ func (c *ReplicateCommand) ParseFlags(ctx context.Context, args []string) (err e if fs.NArg() == 1 { return fmt.Errorf("must specify at least one replica URL for %s", fs.Arg(0)) } else if fs.NArg() > 1 { - if *configPath != "" { + if c.configPath != "" { return fmt.Errorf("cannot specify a replica URL and the -config flag") } @@ -66,10 +68,10 @@ func (c *ReplicateCommand) ParseFlags(ctx context.Context, args []string) (err e } c.Config.DBs = []*DBConfig{dbConfig} } else { - if *configPath == "" { - *configPath = DefaultConfigPath() + if c.configPath == "" { + c.configPath = DefaultConfigPath() } - if c.Config, err = ReadConfigFile(*configPath, !*noExpandEnv); err != nil { + if c.Config, err = ReadConfigFile(c.configPath, !c.noExpandEnv); err != nil { return err } } @@ -110,7 +112,7 @@ func (c *ReplicateCommand) Run(ctx context.Context) (err error) { log.Printf("initialized db: %s", db.Path()) for _, r := range db.Replicas { switch client := r.Client.(type) { - case *file.ReplicaClient: + case *litestream.FileReplicaClient: log.Printf("replicating to: name=%q type=%q path=%q", r.Name(), client.Type(), client.Path()) case *s3.ReplicaClient: log.Printf("replicating to: name=%q type=%q bucket=%q path=%q region=%q endpoint=%q sync-interval=%s", r.Name(), client.Type(), client.Bucket, client.Path, client.Region, client.Endpoint, r.SyncInterval) diff --git a/cmd/litestream/restore.go b/cmd/litestream/restore.go index 28c20fc..9e3dca1 100644 --- a/cmd/litestream/restore.go +++ b/cmd/litestream/restore.go @@ -7,31 +7,46 @@ import ( "fmt" "log" "os" + "path/filepath" "strconv" - "time" "github.com/benbjohnson/litestream" ) // RestoreCommand represents a command to restore a database from a backup. -type RestoreCommand struct{} +type RestoreCommand struct { + snapshotIndex int // index of snapshot to start from + + // CLI options + configPath string // path to config file + noExpandEnv bool // if true, do not expand env variables in config + outputPath string // path to restore database to + replicaName string // optional, name of replica to restore from + generation string // optional, generation to restore + targetIndex int // optional, last WAL index to replay + ifDBNotExists bool // if true, skips restore if output path already exists + ifReplicaExists bool // if true, skips if no backups exist + opt litestream.RestoreOptions +} + +func NewRestoreCommand() *RestoreCommand { + return &RestoreCommand{ + targetIndex: -1, + opt: litestream.NewRestoreOptions(), + } +} // Run executes the command. func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) { - opt := litestream.NewRestoreOptions() - opt.Verbose = true - fs := flag.NewFlagSet("litestream-restore", flag.ContinueOnError) - configPath, noExpandEnv := registerConfigFlag(fs) - fs.StringVar(&opt.OutputPath, "o", "", "output path") - fs.StringVar(&opt.ReplicaName, "replica", "", "replica name") - fs.StringVar(&opt.Generation, "generation", "", "generation name") - fs.Var((*indexVar)(&opt.Index), "index", "wal index") - fs.IntVar(&opt.Parallelism, "parallelism", opt.Parallelism, "parallelism") - ifDBNotExists := fs.Bool("if-db-not-exists", false, "") - ifReplicaExists := fs.Bool("if-replica-exists", false, "") - timestampStr := fs.String("timestamp", "", "timestamp") - verbose := fs.Bool("v", false, "verbose output") + registerConfigFlag(fs, &c.configPath, &c.noExpandEnv) + fs.StringVar(&c.outputPath, "o", "", "output path") + fs.StringVar(&c.replicaName, "replica", "", "replica name") + fs.StringVar(&c.generation, "generation", "", "generation name") + fs.Var((*indexVar)(&c.targetIndex), "index", "wal index") + fs.IntVar(&c.opt.Parallelism, "parallelism", c.opt.Parallelism, "parallelism") + fs.BoolVar(&c.ifDBNotExists, "if-db-not-exists", false, "") + fs.BoolVar(&c.ifReplicaExists, "if-replica-exists", false, "") fs.Usage = c.Usage if err := fs.Parse(args); err != nil { return err @@ -40,83 +55,100 @@ func (c *RestoreCommand) Run(ctx context.Context, args []string) (err error) { } else if fs.NArg() > 1 { return fmt.Errorf("too many arguments") } + arg := fs.Arg(0) - // Parse timestamp, if specified. - if *timestampStr != "" { - if opt.Timestamp, err = time.Parse(time.RFC3339, *timestampStr); err != nil { - return errors.New("invalid -timestamp, must specify in ISO 8601 format (e.g. 2000-01-01T00:00:00Z)") - } + // Ensure a generation is specified if target index is specified. + if c.targetIndex != -1 && c.generation == "" { + return fmt.Errorf("must specify -generation when using -index flag") } - // Instantiate logger if verbose output is enabled. - if *verbose { - opt.Logger = log.New(os.Stderr, "", log.LstdFlags|log.Lmicroseconds) + // Default to original database path if output path not specified. + if !isURL(arg) && c.outputPath == "" { + c.outputPath = arg } - // Determine replica & generation to restore from. - var r *litestream.Replica - if isURL(fs.Arg(0)) { - if *configPath != "" { - return fmt.Errorf("cannot specify a replica URL and the -config flag") - } - if r, err = c.loadFromURL(ctx, fs.Arg(0), *ifDBNotExists, &opt); err == errSkipDBExists { - fmt.Println("database already exists, skipping") - return nil + // Exit successfully if the output file already exists and flag is set. + if _, err := os.Stat(c.outputPath); !os.IsNotExist(err) && c.ifDBNotExists { + fmt.Println("database already exists, skipping") + return nil + } + + // Create parent directory if it doesn't already exist. + if err := os.MkdirAll(filepath.Dir(c.outputPath), 0700); err != nil { + return fmt.Errorf("cannot create parent directory: %w", err) + } + + // Build replica from either a URL or config. + r, err := c.loadReplica(ctx, arg) + if err != nil { + return err + } + + // Determine latest generation if one is not specified. + if c.generation == "" { + if c.generation, err = litestream.FindLatestGeneration(ctx, r.Client); err == litestream.ErrNoGeneration { + // Return an error if no matching targets found. + // If optional flag set, return success. Useful for automated recovery. + if c.ifReplicaExists { + fmt.Println("no matching backups found") + return nil + } + return fmt.Errorf("no matching backups found") } else if err != nil { - return err - } - } else { - if *configPath == "" { - *configPath = DefaultConfigPath() - } - if r, err = c.loadFromConfig(ctx, fs.Arg(0), *configPath, !*noExpandEnv, *ifDBNotExists, &opt); err == errSkipDBExists { - fmt.Println("database already exists, skipping") - return nil - } else if err != nil { - return err + return fmt.Errorf("cannot determine latest generation: %w", err) } } - // Return an error if no matching targets found. - // If optional flag set, return success. Useful for automated recovery. - if opt.Generation == "" { - if *ifReplicaExists { - fmt.Println("no matching backups found") - return nil + // Determine the maximum available index for the generation if one is not specified. + if c.targetIndex == -1 { + if c.targetIndex, err = litestream.FindMaxIndexByGeneration(ctx, r.Client, c.generation); err != nil { + return fmt.Errorf("cannot determine latest index in generation %q: %w", c.generation, err) } - return fmt.Errorf("no matching backups found") } - return r.Restore(ctx, opt) + // Find lastest snapshot that occurs before the index. + // TODO: Optionally allow -snapshot-index + if c.snapshotIndex, err = litestream.FindSnapshotForIndex(ctx, r.Client, c.generation, c.targetIndex); err != nil { + return fmt.Errorf("cannot find snapshot index: %w", err) + } + + c.opt.Logger = log.New(os.Stderr, "", log.LstdFlags|log.Lmicroseconds) + + return litestream.Restore(ctx, r.Client, c.outputPath, c.generation, c.snapshotIndex, c.targetIndex, c.opt) } -// loadFromURL creates a replica & updates the restore options from a replica URL. -func (c *RestoreCommand) loadFromURL(ctx context.Context, replicaURL string, ifDBNotExists bool, opt *litestream.RestoreOptions) (*litestream.Replica, error) { - if opt.OutputPath == "" { +func (c *RestoreCommand) loadReplica(ctx context.Context, arg string) (*litestream.Replica, error) { + if isURL(arg) { + return c.loadReplicaFromURL(ctx, arg) + } + return c.loadReplicaFromConfig(ctx, arg) +} + +// loadReplicaFromURL creates a replica & updates the restore options from a replica URL. +func (c *RestoreCommand) loadReplicaFromURL(ctx context.Context, replicaURL string) (*litestream.Replica, error) { + if c.configPath != "" { + return nil, fmt.Errorf("cannot specify a replica URL and the -config flag") + } else if c.replicaName != "" { + return nil, fmt.Errorf("cannot specify a replica URL and the -replica flag") + } else if c.outputPath == "" { return nil, fmt.Errorf("output path required") } - // Exit successfully if the output file already exists. - if _, err := os.Stat(opt.OutputPath); !os.IsNotExist(err) && ifDBNotExists { - return nil, errSkipDBExists - } - syncInterval := litestream.DefaultSyncInterval - r, err := NewReplicaFromConfig(&ReplicaConfig{ + return NewReplicaFromConfig(&ReplicaConfig{ URL: replicaURL, SyncInterval: &syncInterval, }, nil) - if err != nil { - return nil, err - } - opt.Generation, _, err = r.CalcRestoreTarget(ctx, *opt) - return r, err } -// loadFromConfig returns a replica & updates the restore options from a DB reference. -func (c *RestoreCommand) loadFromConfig(ctx context.Context, dbPath, configPath string, expandEnv, ifDBNotExists bool, opt *litestream.RestoreOptions) (*litestream.Replica, error) { +// loadReplicaFromConfig returns replicas based on the specific config path. +func (c *RestoreCommand) loadReplicaFromConfig(ctx context.Context, dbPath string) (*litestream.Replica, error) { + if c.configPath == "" { + c.configPath = DefaultConfigPath() + } + // Load configuration. - config, err := ReadConfigFile(configPath, expandEnv) + config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) if err != nil { return nil, err } @@ -132,25 +164,34 @@ func (c *RestoreCommand) loadFromConfig(ctx context.Context, dbPath, configPath db, err := NewDBFromConfig(dbConfig) if err != nil { return nil, err + } else if len(db.Replicas) == 0 { + return nil, fmt.Errorf("database has no replicas: %s", dbPath) } - // Restore into original database path if not specified. - if opt.OutputPath == "" { - opt.OutputPath = dbPath + // Filter by replica name if specified. + if c.replicaName != "" { + r := db.Replica(c.replicaName) + if r == nil { + return nil, fmt.Errorf("replica %q not found", c.replicaName) + } + return r, nil } - // Exit successfully if the output file already exists. - if _, err := os.Stat(opt.OutputPath); !os.IsNotExist(err) && ifDBNotExists { - return nil, errSkipDBExists + // Choose only replica if only one available and no name is specified. + if len(db.Replicas) == 1 { + return db.Replicas[0], nil } - // Determine the appropriate replica & generation to restore from, - r, generation, err := db.CalcRestoreTarget(ctx, *opt) + // A replica must be specified when restoring a specific generation with multiple replicas. + if c.generation != "" { + return nil, fmt.Errorf("must specify -replica when restoring from a specific generation") + } + + // Determine latest replica to restore from. + r, err := litestream.LatestReplica(ctx, db.Replicas) if err != nil { - return nil, err + return nil, fmt.Errorf("cannot determine latest replica: %w", err) } - opt.Generation = generation - return r, nil } @@ -186,10 +227,6 @@ Arguments: Restore up to a specific hex-encoded WAL index (inclusive). Defaults to use the highest available index. - -timestamp TIMESTAMP - Restore to a specific point-in-time. - Defaults to use the latest available backup. - -o PATH Output path of the restored database. Defaults to original DB path. @@ -213,9 +250,6 @@ Examples: # Restore latest replica for database to original location. $ litestream restore /path/to/db - # Restore replica for database to a given point in time. - $ litestream restore -timestamp 2020-01-01T00:00:00Z /path/to/db - # Restore latest replica for database to new /tmp directory $ litestream restore -o /tmp/db /path/to/db diff --git a/cmd/litestream/snapshots.go b/cmd/litestream/snapshots.go index 574ec64..d8f84fa 100644 --- a/cmd/litestream/snapshots.go +++ b/cmd/litestream/snapshots.go @@ -14,12 +14,15 @@ import ( ) // SnapshotsCommand represents a command to list snapshots for a command. -type SnapshotsCommand struct{} +type SnapshotsCommand struct { + configPath string + noExpandEnv bool +} // Run executes the command. func (c *SnapshotsCommand) Run(ctx context.Context, args []string) (err error) { fs := flag.NewFlagSet("litestream-snapshots", flag.ContinueOnError) - configPath, noExpandEnv := registerConfigFlag(fs) + registerConfigFlag(fs, &c.configPath, &c.noExpandEnv) replicaName := fs.String("replica", "", "replica name") fs.Usage = c.Usage if err := fs.Parse(args); err != nil { @@ -33,19 +36,19 @@ func (c *SnapshotsCommand) Run(ctx context.Context, args []string) (err error) { var db *litestream.DB var r *litestream.Replica if isURL(fs.Arg(0)) { - if *configPath != "" { + if c.configPath != "" { return fmt.Errorf("cannot specify a replica URL and the -config flag") } if r, err = NewReplicaFromConfig(&ReplicaConfig{URL: fs.Arg(0)}, nil); err != nil { return err } } else { - if *configPath == "" { - *configPath = DefaultConfigPath() + if c.configPath == "" { + c.configPath = DefaultConfigPath() } // Load configuration. - config, err := ReadConfigFile(*configPath, !*noExpandEnv) + config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) if err != nil { return err } diff --git a/cmd/litestream/wal.go b/cmd/litestream/wal.go index 9b7b9ef..d3cc681 100644 --- a/cmd/litestream/wal.go +++ b/cmd/litestream/wal.go @@ -13,12 +13,15 @@ import ( ) // WALCommand represents a command to list WAL files for a database. -type WALCommand struct{} +type WALCommand struct { + configPath string + noExpandEnv bool +} // Run executes the command. func (c *WALCommand) Run(ctx context.Context, args []string) (err error) { fs := flag.NewFlagSet("litestream-wal", flag.ContinueOnError) - configPath, noExpandEnv := registerConfigFlag(fs) + registerConfigFlag(fs, &c.configPath, &c.noExpandEnv) replicaName := fs.String("replica", "", "replica name") generation := fs.String("generation", "", "generation name") fs.Usage = c.Usage @@ -33,19 +36,19 @@ func (c *WALCommand) Run(ctx context.Context, args []string) (err error) { var db *litestream.DB var r *litestream.Replica if isURL(fs.Arg(0)) { - if *configPath != "" { + if c.configPath != "" { return fmt.Errorf("cannot specify a replica URL and the -config flag") } if r, err = NewReplicaFromConfig(&ReplicaConfig{URL: fs.Arg(0)}, nil); err != nil { return err } } else { - if *configPath == "" { - *configPath = DefaultConfigPath() + if c.configPath == "" { + c.configPath = DefaultConfigPath() } // Load configuration. - config, err := ReadConfigFile(*configPath, !*noExpandEnv) + config, err := ReadConfigFile(c.configPath, !c.noExpandEnv) if err != nil { return err } diff --git a/db.go b/db.go index 8c224cc..f56bdc5 100644 --- a/db.go +++ b/db.go @@ -12,7 +12,6 @@ import ( "io" "io/ioutil" "log" - "math" "math/rand" "os" "path/filepath" @@ -62,8 +61,9 @@ type DB struct { chksum0, chksum1 uint32 byteOrder binary.ByteOrder - fileInfo os.FileInfo // db info cached during init - dirInfo os.FileInfo // parent dir info cached during init + fileMode os.FileMode // db mode cached during init + dirMode os.FileMode // parent dir mode cached during init + uid, gid int // db user & group id cached during init ctx context.Context cancel func() @@ -180,16 +180,6 @@ func (db *DB) ShadowWALDir(generation string) string { return filepath.Join(db.GenerationPath(generation), "wal") } -// FileInfo returns the cached file stats for the database file when it was initialized. -func (db *DB) FileInfo() os.FileInfo { - return db.fileInfo -} - -// DirInfo returns the cached file stats for the parent directory of the database file when it was initialized. -func (db *DB) DirInfo() os.FileInfo { - return db.dirInfo -} - // Replica returns a replica by name. func (db *DB) Replica(name string) *Replica { for _, r := range db.Replicas { @@ -505,13 +495,14 @@ func (db *DB) init() (err error) { } else if err != nil { return err } - db.fileInfo = fi + db.fileMode = fi.Mode() + db.uid, db.gid = internal.Fileinfo(fi) // Obtain permissions for parent directory. if fi, err = os.Stat(filepath.Dir(db.path)); err != nil { return err } - db.dirInfo = fi + db.dirMode = fi.Mode() dsn := db.path dsn += fmt.Sprintf("?_busy_timeout=%d", BusyTimeout.Milliseconds()) @@ -577,7 +568,7 @@ func (db *DB) init() (err error) { } // Ensure meta directory structure exists. - if err := internal.MkdirAll(db.MetaPath(), db.dirInfo); err != nil { + if err := internal.MkdirAll(db.MetaPath(), db.dirMode, db.uid, db.gid); err != nil { return err } @@ -785,7 +776,7 @@ func (db *DB) createGeneration(ctx context.Context) (string, error) { // Generate new directory. dir := filepath.Join(db.MetaPath(), "generations", generation) - if err := internal.MkdirAll(dir, db.dirInfo); err != nil { + if err := internal.MkdirAll(dir, db.dirMode, db.uid, db.gid); err != nil { return "", err } @@ -796,15 +787,10 @@ func (db *DB) createGeneration(ctx context.Context) (string, error) { // Atomically write generation name as current generation. generationNamePath := db.GenerationNamePath() - mode := os.FileMode(0600) - if db.fileInfo != nil { - mode = db.fileInfo.Mode() - } - if err := os.WriteFile(generationNamePath+".tmp", []byte(generation+"\n"), mode); err != nil { + if err := os.WriteFile(generationNamePath+".tmp", []byte(generation+"\n"), db.fileMode); err != nil { return "", fmt.Errorf("write generation temp file: %w", err) } - uid, gid := internal.Fileinfo(db.fileInfo) - _ = os.Chown(generationNamePath+".tmp", uid, gid) + _ = os.Chown(generationNamePath+".tmp", db.uid, db.gid) if err := os.Rename(generationNamePath+".tmp", generationNamePath); err != nil { return "", fmt.Errorf("rename generation file: %w", err) } @@ -1086,7 +1072,7 @@ func (db *DB) copyToShadowWAL(ctx context.Context) error { tempFilename := filepath.Join(db.ShadowWALDir(pos.Generation), FormatIndex(pos.Index), FormatOffset(pos.Offset)+".wal.tmp") defer os.Remove(tempFilename) - f, err := internal.CreateFile(tempFilename, db.fileInfo) + f, err := internal.CreateFile(tempFilename, db.fileMode, db.uid, db.gid) if err != nil { return err } @@ -1214,12 +1200,12 @@ func (db *DB) writeWALSegment(ctx context.Context, pos Pos, rd io.Reader) error filename := filepath.Join(db.ShadowWALDir(pos.Generation), FormatIndex(pos.Index), FormatOffset(pos.Offset)+".wal.lz4") // Ensure parent directory exists. - if err := internal.MkdirAll(filepath.Dir(filename), db.dirInfo); err != nil { + if err := internal.MkdirAll(filepath.Dir(filename), db.dirMode, db.uid, db.gid); err != nil { return err } // Write WAL segment to temporary file next to destination path. - f, err := internal.CreateFile(filename+".tmp", db.fileInfo) + f, err := internal.CreateFile(filename+".tmp", db.fileMode, db.uid, db.gid) if err != nil { return err } @@ -1542,39 +1528,10 @@ func (db *DB) monitor() { } } -// CalcRestoreTarget returns a replica & generation to restore from based on opt criteria. -func (db *DB) CalcRestoreTarget(ctx context.Context, opt RestoreOptions) (*Replica, string, error) { - var target struct { - replica *Replica - generation string - updatedAt time.Time - } - - for _, r := range db.Replicas { - // Skip replica if it does not match filter. - if opt.ReplicaName != "" && r.Name() != opt.ReplicaName { - continue - } - - generation, updatedAt, err := r.CalcRestoreTarget(ctx, opt) - if err != nil { - return nil, "", err - } - - // Use the latest replica if we have multiple candidates. - if !updatedAt.After(target.updatedAt) { - continue - } - - target.replica, target.generation, target.updatedAt = r, generation, updatedAt - } - return target.replica, target.generation, nil -} - -// applyWAL performs a truncating checkpoint on the given database. -func applyWAL(ctx context.Context, index int, dbPath string) error { +// ApplyWAL performs a truncating checkpoint on the given database. +func ApplyWAL(ctx context.Context, dbPath, walPath string) error { // Copy WAL file from it's staging path to the correct "-wal" location. - if err := os.Rename(fmt.Sprintf("%s-%08x-wal", dbPath, index), dbPath+"-wal"); err != nil { + if err := os.Rename(walPath, dbPath+"-wal"); err != nil { return err } @@ -1583,7 +1540,7 @@ func applyWAL(ctx context.Context, index int, dbPath string) error { if err != nil { return err } - defer d.Close() + defer func() { _ = d.Close() }() var row [3]int if err := d.QueryRow(`PRAGMA wal_checkpoint(TRUNCATE);`).Scan(&row[0], &row[1], &row[2]); err != nil { @@ -1660,47 +1617,6 @@ func formatWALPath(index int) string { var walPathRegex = regexp.MustCompile(`^([0-9a-f]{8})\.wal$`) -// DefaultRestoreParallelism is the default parallelism when downloading WAL files. -const DefaultRestoreParallelism = 8 - -// RestoreOptions represents options for DB.Restore(). -type RestoreOptions struct { - // Target path to restore into. - // If blank, the original DB path is used. - OutputPath string - - // Specific replica to restore from. - // If blank, all replicas are considered. - ReplicaName string - - // Specific generation to restore from. - // If blank, all generations considered. - Generation string - - // Specific index to restore from. - // Set to math.MaxInt32 to ignore index. - Index int - - // Point-in-time to restore database. - // If zero, database restore to most recent state available. - Timestamp time.Time - - // Specifies how many WAL files are downloaded in parallel during restore. - Parallelism int - - // Logging settings. - Logger *log.Logger - Verbose bool -} - -// NewRestoreOptions returns a new instance of RestoreOptions with defaults. -func NewRestoreOptions() RestoreOptions { - return RestoreOptions{ - Index: math.MaxInt32, - Parallelism: DefaultRestoreParallelism, - } -} - // ReadWALFields iterates over the header & frames in the WAL data in r. // Returns salt, checksum, byte order & the last frame. WAL data must start // from the beginning of the WAL header and must end on either the WAL header diff --git a/file/replica_client.go b/file_replica_client.go similarity index 64% rename from file/replica_client.go rename to file_replica_client.go index ef7d7b9..a8873f0 100644 --- a/file/replica_client.go +++ b/file_replica_client.go @@ -1,4 +1,4 @@ -package file +package litestream import ( "context" @@ -10,49 +10,46 @@ import ( "sort" "strings" - "github.com/benbjohnson/litestream" "github.com/benbjohnson/litestream/internal" ) -// ReplicaClientType is the client type for this package. -const ReplicaClientType = "file" +// FileReplicaClientType is the client type for file replica clients. +const FileReplicaClientType = "file" -var _ litestream.ReplicaClient = (*ReplicaClient)(nil) +var _ ReplicaClient = (*FileReplicaClient)(nil) -// ReplicaClient is a client for writing snapshots & WAL segments to disk. -type ReplicaClient struct { +// FileReplicaClient is a client for writing snapshots & WAL segments to disk. +type FileReplicaClient struct { path string // destination path - Replica *litestream.Replica + // File info + FileMode os.FileMode + DirMode os.FileMode + Uid, Gid int } -// NewReplicaClient returns a new instance of ReplicaClient. -func NewReplicaClient(path string) *ReplicaClient { - return &ReplicaClient{ +// NewFileReplicaClient returns a new instance of FileReplicaClient. +func NewFileReplicaClient(path string) *FileReplicaClient { + return &FileReplicaClient{ path: path, - } -} -// db returns the database, if available. -func (c *ReplicaClient) db() *litestream.DB { - if c.Replica == nil { - return nil + FileMode: 0600, + DirMode: 0700, } - return c.Replica.DB() } // Type returns "file" as the client type. -func (c *ReplicaClient) Type() string { - return ReplicaClientType +func (c *FileReplicaClient) Type() string { + return FileReplicaClientType } // Path returns the destination path to replicate the database to. -func (c *ReplicaClient) Path() string { +func (c *FileReplicaClient) Path() string { return c.path } // GenerationsDir returns the path to a generation root directory. -func (c *ReplicaClient) GenerationsDir() (string, error) { +func (c *FileReplicaClient) GenerationsDir() (string, error) { if c.path == "" { return "", fmt.Errorf("file replica path required") } @@ -60,7 +57,7 @@ func (c *ReplicaClient) GenerationsDir() (string, error) { } // GenerationDir returns the path to a generation's root directory. -func (c *ReplicaClient) GenerationDir(generation string) (string, error) { +func (c *FileReplicaClient) GenerationDir(generation string) (string, error) { dir, err := c.GenerationsDir() if err != nil { return "", err @@ -71,7 +68,7 @@ func (c *ReplicaClient) GenerationDir(generation string) (string, error) { } // SnapshotsDir returns the path to a generation's snapshot directory. -func (c *ReplicaClient) SnapshotsDir(generation string) (string, error) { +func (c *FileReplicaClient) SnapshotsDir(generation string) (string, error) { dir, err := c.GenerationDir(generation) if err != nil { return "", err @@ -80,16 +77,16 @@ func (c *ReplicaClient) SnapshotsDir(generation string) (string, error) { } // SnapshotPath returns the path to an uncompressed snapshot file. -func (c *ReplicaClient) SnapshotPath(generation string, index int) (string, error) { +func (c *FileReplicaClient) SnapshotPath(generation string, index int) (string, error) { dir, err := c.SnapshotsDir(generation) if err != nil { return "", err } - return filepath.Join(dir, litestream.FormatIndex(index)+".snapshot.lz4"), nil + return filepath.Join(dir, FormatIndex(index)+".snapshot.lz4"), nil } // WALDir returns the path to a generation's WAL directory -func (c *ReplicaClient) WALDir(generation string) (string, error) { +func (c *FileReplicaClient) WALDir(generation string) (string, error) { dir, err := c.GenerationDir(generation) if err != nil { return "", err @@ -98,16 +95,16 @@ func (c *ReplicaClient) WALDir(generation string) (string, error) { } // WALSegmentPath returns the path to a WAL segment file. -func (c *ReplicaClient) WALSegmentPath(generation string, index int, offset int64) (string, error) { +func (c *FileReplicaClient) WALSegmentPath(generation string, index int, offset int64) (string, error) { dir, err := c.WALDir(generation) if err != nil { return "", err } - return filepath.Join(dir, litestream.FormatIndex(index), fmt.Sprintf("%08x.wal.lz4", offset)), nil + return filepath.Join(dir, FormatIndex(index), fmt.Sprintf("%08x.wal.lz4", offset)), nil } // Generations returns a list of available generation names. -func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) { +func (c *FileReplicaClient) Generations(ctx context.Context) ([]string, error) { root, err := c.GenerationsDir() if err != nil { return nil, fmt.Errorf("cannot determine generations path: %w", err) @@ -122,7 +119,7 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) { var generations []string for _, fi := range fis { - if !litestream.IsGenerationName(fi.Name()) { + if !IsGenerationName(fi.Name()) { continue } else if !fi.IsDir() { continue @@ -133,7 +130,7 @@ func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) { } // DeleteGeneration deletes all snapshots & WAL segments within a generation. -func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error { +func (c *FileReplicaClient) DeleteGeneration(ctx context.Context, generation string) error { dir, err := c.GenerationDir(generation) if err != nil { return fmt.Errorf("cannot determine generation path: %w", err) @@ -146,7 +143,7 @@ func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) } // Snapshots returns an iterator over all available snapshots for a generation. -func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { +func (c *FileReplicaClient) Snapshots(ctx context.Context, generation string) (SnapshotIterator, error) { dir, err := c.SnapshotsDir(generation) if err != nil { return nil, err @@ -154,7 +151,7 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (lites f, err := os.Open(dir) if os.IsNotExist(err) { - return litestream.NewSnapshotInfoSliceIterator(nil), nil + return NewSnapshotInfoSliceIterator(nil), nil } else if err != nil { return nil, err } @@ -166,7 +163,7 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (lites } // Iterate over every file and convert to metadata. - infos := make([]litestream.SnapshotInfo, 0, len(fis)) + infos := make([]SnapshotInfo, 0, len(fis)) for _, fi := range fis { // Parse index from filename. index, err := internal.ParseSnapshotPath(filepath.Base(fi.Name())) @@ -174,7 +171,7 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (lites continue } - infos = append(infos, litestream.SnapshotInfo{ + infos = append(infos, SnapshotInfo{ Generation: generation, Index: index, Size: fi.Size(), @@ -182,30 +179,25 @@ func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (lites }) } - sort.Sort(litestream.SnapshotInfoSlice(infos)) + sort.Sort(SnapshotInfoSlice(infos)) - return litestream.NewSnapshotInfoSliceIterator(infos), nil + return NewSnapshotInfoSliceIterator(infos), nil } // WriteSnapshot writes LZ4 compressed data from rd into a file on disk. -func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) { +func (c *FileReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info SnapshotInfo, err error) { filename, err := c.SnapshotPath(generation, index) if err != nil { return info, err } - var fileInfo, dirInfo os.FileInfo - if db := c.db(); db != nil { - fileInfo, dirInfo = db.FileInfo(), db.DirInfo() - } - // Ensure parent directory exists. - if err := internal.MkdirAll(filepath.Dir(filename), dirInfo); err != nil { + if err := internal.MkdirAll(filepath.Dir(filename), c.DirMode, c.Uid, c.Gid); err != nil { return info, err } // Write snapshot to temporary file next to destination path. - f, err := internal.CreateFile(filename+".tmp", fileInfo) + f, err := internal.CreateFile(filename+".tmp", c.FileMode, c.Uid, c.Gid) if err != nil { return info, err } @@ -224,7 +216,7 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in if err != nil { return info, err } - info = litestream.SnapshotInfo{ + info = SnapshotInfo{ Generation: generation, Index: index, Size: fi.Size(), @@ -241,7 +233,7 @@ func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, in // SnapshotReader returns a reader for snapshot data at the given generation/index. // Returns os.ErrNotExist if no matching index is found. -func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) { +func (c *FileReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) { filename, err := c.SnapshotPath(generation, index) if err != nil { return nil, err @@ -250,7 +242,7 @@ func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, i } // DeleteSnapshot deletes a snapshot with the given generation & index. -func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error { +func (c *FileReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error { filename, err := c.SnapshotPath(generation, index) if err != nil { return fmt.Errorf("cannot determine snapshot path: %w", err) @@ -262,7 +254,7 @@ func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, i } // WALSegments returns an iterator over all available WAL files for a generation. -func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { +func (c *FileReplicaClient) WALSegments(ctx context.Context, generation string) (WALSegmentIterator, error) { dir, err := c.WALDir(generation) if err != nil { return nil, err @@ -270,7 +262,7 @@ func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (lit f, err := os.Open(dir) if os.IsNotExist(err) { - return litestream.NewWALSegmentInfoSliceIterator(nil), nil + return NewWALSegmentInfoSliceIterator(nil), nil } else if err != nil { return nil, err } @@ -284,7 +276,7 @@ func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (lit // Iterate over every file and convert to metadata. indexes := make([]int, 0, len(fis)) for _, fi := range fis { - index, err := litestream.ParseIndex(fi.Name()) + index, err := ParseIndex(fi.Name()) if err != nil || !fi.IsDir() { continue } @@ -293,28 +285,23 @@ func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (lit sort.Ints(indexes) - return newWALSegmentIterator(dir, generation, indexes), nil + return newFileWALSegmentIterator(dir, generation, indexes), nil } // WriteWALSegment writes LZ4 compressed data from rd into a file on disk. -func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) { +func (c *FileReplicaClient) WriteWALSegment(ctx context.Context, pos Pos, rd io.Reader) (info WALSegmentInfo, err error) { filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset) if err != nil { return info, err } - var fileInfo, dirInfo os.FileInfo - if db := c.db(); db != nil { - fileInfo, dirInfo = db.FileInfo(), db.DirInfo() - } - // Ensure parent directory exists. - if err := internal.MkdirAll(filepath.Dir(filename), dirInfo); err != nil { + if err := internal.MkdirAll(filepath.Dir(filename), c.DirMode, c.Uid, c.Gid); err != nil { return info, err } // Write WAL segment to temporary file next to destination path. - f, err := internal.CreateFile(filename+".tmp", fileInfo) + f, err := internal.CreateFile(filename+".tmp", c.FileMode, c.Uid, c.Gid) if err != nil { return info, err } @@ -333,7 +320,7 @@ func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, if err != nil { return info, err } - info = litestream.WALSegmentInfo{ + info = WALSegmentInfo{ Generation: pos.Generation, Index: pos.Index, Offset: pos.Offset, @@ -351,7 +338,7 @@ func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, // WALSegmentReader returns a reader for a section of WAL data at the given position. // Returns os.ErrNotExist if no matching index/offset is found. -func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) { +func (c *FileReplicaClient) WALSegmentReader(ctx context.Context, pos Pos) (io.ReadCloser, error) { filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset) if err != nil { return nil, err @@ -360,7 +347,7 @@ func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos } // DeleteWALSegments deletes WAL segments at the given positions. -func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Pos) error { +func (c *FileReplicaClient) DeleteWALSegments(ctx context.Context, a []Pos) error { for _, pos := range a { filename, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset) if err != nil { @@ -373,28 +360,28 @@ func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Po return nil } -type walSegmentIterator struct { +type fileWalSegmentIterator struct { dir string generation string indexes []int - infos []litestream.WALSegmentInfo + infos []WALSegmentInfo err error } -func newWALSegmentIterator(dir, generation string, indexes []int) *walSegmentIterator { - return &walSegmentIterator{ +func newFileWALSegmentIterator(dir, generation string, indexes []int) *fileWalSegmentIterator { + return &fileWalSegmentIterator{ dir: dir, generation: generation, indexes: indexes, } } -func (itr *walSegmentIterator) Close() (err error) { +func (itr *fileWalSegmentIterator) Close() (err error) { return itr.err } -func (itr *walSegmentIterator) Next() bool { +func (itr *fileWalSegmentIterator) Next() bool { // Exit if an error has already occurred. if itr.err != nil { return false @@ -416,7 +403,7 @@ func (itr *walSegmentIterator) Next() bool { // Read segments into a cache for the current index. index := itr.indexes[0] itr.indexes = itr.indexes[1:] - f, err := os.Open(filepath.Join(itr.dir, litestream.FormatIndex(index))) + f, err := os.Open(filepath.Join(itr.dir, FormatIndex(index))) if err != nil { itr.err = err return false @@ -438,12 +425,12 @@ func (itr *walSegmentIterator) Next() bool { continue } - offset, err := litestream.ParseOffset(strings.TrimSuffix(filename, ".wal.lz4")) + offset, err := ParseOffset(strings.TrimSuffix(filename, ".wal.lz4")) if err != nil { continue } - itr.infos = append(itr.infos, litestream.WALSegmentInfo{ + itr.infos = append(itr.infos, WALSegmentInfo{ Generation: itr.generation, Index: index, Offset: offset, @@ -453,7 +440,7 @@ func (itr *walSegmentIterator) Next() bool { } // Ensure segments are sorted within index. - sort.Sort(litestream.WALSegmentInfoSlice(itr.infos)) + sort.Sort(WALSegmentInfoSlice(itr.infos)) if len(itr.infos) > 0 { return true @@ -461,11 +448,11 @@ func (itr *walSegmentIterator) Next() bool { } } -func (itr *walSegmentIterator) Err() error { return itr.err } +func (itr *fileWalSegmentIterator) Err() error { return itr.err } -func (itr *walSegmentIterator) WALSegment() litestream.WALSegmentInfo { +func (itr *fileWalSegmentIterator) WALSegment() WALSegmentInfo { if len(itr.infos) == 0 { - return litestream.WALSegmentInfo{} + return WALSegmentInfo{} } return itr.infos[0] } diff --git a/file/replica_client_test.go b/file_replica_client_test.go similarity index 54% rename from file/replica_client_test.go rename to file_replica_client_test.go index 465e835..65dcb11 100644 --- a/file/replica_client_test.go +++ b/file_replica_client_test.go @@ -1,34 +1,34 @@ -package file_test +package litestream_test import ( "testing" - "github.com/benbjohnson/litestream/file" + "github.com/benbjohnson/litestream" ) func TestReplicaClient_Path(t *testing.T) { - c := file.NewReplicaClient("/foo/bar") + c := litestream.NewFileReplicaClient("/foo/bar") if got, want := c.Path(), "/foo/bar"; got != want { t.Fatalf("Path()=%v, want %v", got, want) } } func TestReplicaClient_Type(t *testing.T) { - if got, want := file.NewReplicaClient("").Type(), "file"; got != want { + if got, want := litestream.NewFileReplicaClient("").Type(), "file"; got != want { t.Fatalf("Type()=%v, want %v", got, want) } } func TestReplicaClient_GenerationsDir(t *testing.T) { t.Run("OK", func(t *testing.T) { - if got, err := file.NewReplicaClient("/foo").GenerationsDir(); err != nil { + if got, err := litestream.NewFileReplicaClient("/foo").GenerationsDir(); err != nil { t.Fatal(err) } else if want := "/foo/generations"; got != want { t.Fatalf("GenerationsDir()=%v, want %v", got, want) } }) t.Run("ErrNoPath", func(t *testing.T) { - if _, err := file.NewReplicaClient("").GenerationsDir(); err == nil || err.Error() != `file replica path required` { + if _, err := litestream.NewFileReplicaClient("").GenerationsDir(); err == nil || err.Error() != `file replica path required` { t.Fatalf("unexpected error: %v", err) } }) @@ -36,19 +36,19 @@ func TestReplicaClient_GenerationsDir(t *testing.T) { func TestReplicaClient_GenerationDir(t *testing.T) { t.Run("OK", func(t *testing.T) { - if got, err := file.NewReplicaClient("/foo").GenerationDir("0123456701234567"); err != nil { + if got, err := litestream.NewFileReplicaClient("/foo").GenerationDir("0123456701234567"); err != nil { t.Fatal(err) } else if want := "/foo/generations/0123456701234567"; got != want { t.Fatalf("GenerationDir()=%v, want %v", got, want) } }) t.Run("ErrNoPath", func(t *testing.T) { - if _, err := file.NewReplicaClient("").GenerationDir("0123456701234567"); err == nil || err.Error() != `file replica path required` { + if _, err := litestream.NewFileReplicaClient("").GenerationDir("0123456701234567"); err == nil || err.Error() != `file replica path required` { t.Fatalf("expected error: %v", err) } }) t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := file.NewReplicaClient("/foo").GenerationDir(""); err == nil || err.Error() != `generation required` { + if _, err := litestream.NewFileReplicaClient("/foo").GenerationDir(""); err == nil || err.Error() != `generation required` { t.Fatalf("expected error: %v", err) } }) @@ -56,19 +56,19 @@ func TestReplicaClient_GenerationDir(t *testing.T) { func TestReplicaClient_SnapshotsDir(t *testing.T) { t.Run("OK", func(t *testing.T) { - if got, err := file.NewReplicaClient("/foo").SnapshotsDir("0123456701234567"); err != nil { + if got, err := litestream.NewFileReplicaClient("/foo").SnapshotsDir("0123456701234567"); err != nil { t.Fatal(err) } else if want := "/foo/generations/0123456701234567/snapshots"; got != want { t.Fatalf("SnapshotsDir()=%v, want %v", got, want) } }) t.Run("ErrNoPath", func(t *testing.T) { - if _, err := file.NewReplicaClient("").SnapshotsDir("0123456701234567"); err == nil || err.Error() != `file replica path required` { + if _, err := litestream.NewFileReplicaClient("").SnapshotsDir("0123456701234567"); err == nil || err.Error() != `file replica path required` { t.Fatalf("unexpected error: %v", err) } }) t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := file.NewReplicaClient("/foo").SnapshotsDir(""); err == nil || err.Error() != `generation required` { + if _, err := litestream.NewFileReplicaClient("/foo").SnapshotsDir(""); err == nil || err.Error() != `generation required` { t.Fatalf("unexpected error: %v", err) } }) @@ -76,19 +76,19 @@ func TestReplicaClient_SnapshotsDir(t *testing.T) { func TestReplicaClient_SnapshotPath(t *testing.T) { t.Run("OK", func(t *testing.T) { - if got, err := file.NewReplicaClient("/foo").SnapshotPath("0123456701234567", 1000); err != nil { + if got, err := litestream.NewFileReplicaClient("/foo").SnapshotPath("0123456701234567", 1000); err != nil { t.Fatal(err) } else if want := "/foo/generations/0123456701234567/snapshots/000003e8.snapshot.lz4"; got != want { t.Fatalf("SnapshotPath()=%v, want %v", got, want) } }) t.Run("ErrNoPath", func(t *testing.T) { - if _, err := file.NewReplicaClient("").SnapshotPath("0123456701234567", 1000); err == nil || err.Error() != `file replica path required` { + if _, err := litestream.NewFileReplicaClient("").SnapshotPath("0123456701234567", 1000); err == nil || err.Error() != `file replica path required` { t.Fatalf("unexpected error: %v", err) } }) t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := file.NewReplicaClient("/foo").SnapshotPath("", 1000); err == nil || err.Error() != `generation required` { + if _, err := litestream.NewFileReplicaClient("/foo").SnapshotPath("", 1000); err == nil || err.Error() != `generation required` { t.Fatalf("unexpected error: %v", err) } }) @@ -96,19 +96,19 @@ func TestReplicaClient_SnapshotPath(t *testing.T) { func TestReplicaClient_WALDir(t *testing.T) { t.Run("OK", func(t *testing.T) { - if got, err := file.NewReplicaClient("/foo").WALDir("0123456701234567"); err != nil { + if got, err := litestream.NewFileReplicaClient("/foo").WALDir("0123456701234567"); err != nil { t.Fatal(err) } else if want := "/foo/generations/0123456701234567/wal"; got != want { t.Fatalf("WALDir()=%v, want %v", got, want) } }) t.Run("ErrNoPath", func(t *testing.T) { - if _, err := file.NewReplicaClient("").WALDir("0123456701234567"); err == nil || err.Error() != `file replica path required` { + if _, err := litestream.NewFileReplicaClient("").WALDir("0123456701234567"); err == nil || err.Error() != `file replica path required` { t.Fatalf("unexpected error: %v", err) } }) t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := file.NewReplicaClient("/foo").WALDir(""); err == nil || err.Error() != `generation required` { + if _, err := litestream.NewFileReplicaClient("/foo").WALDir(""); err == nil || err.Error() != `generation required` { t.Fatalf("unexpected error: %v", err) } }) @@ -116,19 +116,19 @@ func TestReplicaClient_WALDir(t *testing.T) { func TestReplicaClient_WALSegmentPath(t *testing.T) { t.Run("OK", func(t *testing.T) { - if got, err := file.NewReplicaClient("/foo").WALSegmentPath("0123456701234567", 1000, 1001); err != nil { + if got, err := litestream.NewFileReplicaClient("/foo").WALSegmentPath("0123456701234567", 1000, 1001); err != nil { t.Fatal(err) } else if want := "/foo/generations/0123456701234567/wal/000003e8/000003e9.wal.lz4"; got != want { t.Fatalf("WALPath()=%v, want %v", got, want) } }) t.Run("ErrNoPath", func(t *testing.T) { - if _, err := file.NewReplicaClient("").WALSegmentPath("0123456701234567", 1000, 0); err == nil || err.Error() != `file replica path required` { + if _, err := litestream.NewFileReplicaClient("").WALSegmentPath("0123456701234567", 1000, 0); err == nil || err.Error() != `file replica path required` { t.Fatalf("unexpected error: %v", err) } }) t.Run("ErrNoGeneration", func(t *testing.T) { - if _, err := file.NewReplicaClient("/foo").WALSegmentPath("", 1000, 0); err == nil || err.Error() != `generation required` { + if _, err := litestream.NewFileReplicaClient("/foo").WALSegmentPath("", 1000, 0); err == nil || err.Error() != `generation required` { t.Fatalf("unexpected error: %v", err) } }) diff --git a/integration/replica_client_test.go b/integration/replica_client_test.go new file mode 100644 index 0000000..109f4f3 --- /dev/null +++ b/integration/replica_client_test.go @@ -0,0 +1,566 @@ +package integration_test + +import ( + "context" + "flag" + "fmt" + "io/ioutil" + "math/rand" + "os" + "path" + "reflect" + "sort" + "strings" + "testing" + "time" + + "github.com/benbjohnson/litestream" + "github.com/benbjohnson/litestream/abs" + "github.com/benbjohnson/litestream/gcs" + "github.com/benbjohnson/litestream/s3" + "github.com/benbjohnson/litestream/sftp" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +var ( + // Enables integration tests. + replicaType = flag.String("replica-type", "file", "") +) + +// S3 settings +var ( + // Replica client settings + s3AccessKeyID = flag.String("s3-access-key-id", os.Getenv("LITESTREAM_S3_ACCESS_KEY_ID"), "") + s3SecretAccessKey = flag.String("s3-secret-access-key", os.Getenv("LITESTREAM_S3_SECRET_ACCESS_KEY"), "") + s3Region = flag.String("s3-region", os.Getenv("LITESTREAM_S3_REGION"), "") + s3Bucket = flag.String("s3-bucket", os.Getenv("LITESTREAM_S3_BUCKET"), "") + s3Path = flag.String("s3-path", os.Getenv("LITESTREAM_S3_PATH"), "") + s3Endpoint = flag.String("s3-endpoint", os.Getenv("LITESTREAM_S3_ENDPOINT"), "") + s3ForcePathStyle = flag.Bool("s3-force-path-style", os.Getenv("LITESTREAM_S3_FORCE_PATH_STYLE") == "true", "") + s3SkipVerify = flag.Bool("s3-skip-verify", os.Getenv("LITESTREAM_S3_SKIP_VERIFY") == "true", "") +) + +// Google cloud storage settings +var ( + gcsBucket = flag.String("gcs-bucket", os.Getenv("LITESTREAM_GCS_BUCKET"), "") + gcsPath = flag.String("gcs-path", os.Getenv("LITESTREAM_GCS_PATH"), "") +) + +// Azure blob storage settings +var ( + absAccountName = flag.String("abs-account-name", os.Getenv("LITESTREAM_ABS_ACCOUNT_NAME"), "") + absAccountKey = flag.String("abs-account-key", os.Getenv("LITESTREAM_ABS_ACCOUNT_KEY"), "") + absBucket = flag.String("abs-bucket", os.Getenv("LITESTREAM_ABS_BUCKET"), "") + absPath = flag.String("abs-path", os.Getenv("LITESTREAM_ABS_PATH"), "") +) + +// SFTP settings +var ( + sftpHost = flag.String("sftp-host", os.Getenv("LITESTREAM_SFTP_HOST"), "") + sftpUser = flag.String("sftp-user", os.Getenv("LITESTREAM_SFTP_USER"), "") + sftpPassword = flag.String("sftp-password", os.Getenv("LITESTREAM_SFTP_PASSWORD"), "") + sftpKeyPath = flag.String("sftp-key-path", os.Getenv("LITESTREAM_SFTP_KEY_PATH"), "") + sftpPath = flag.String("sftp-path", os.Getenv("LITESTREAM_SFTP_PATH"), "") +) + +func TestReplicaClient_Generations(t *testing.T) { + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + // Write snapshots. + if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 0, strings.NewReader(`foo`)); err != nil { + t.Fatal(err) + } else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 0, strings.NewReader(`bar`)); err != nil { + t.Fatal(err) + } else if _, err := c.WriteSnapshot(context.Background(), "155fe292f8333c72", 0, strings.NewReader(`baz`)); err != nil { + t.Fatal(err) + } + + // Verify returned generations. + if got, err := c.Generations(context.Background()); err != nil { + t.Fatal(err) + } else if want := []string{"155fe292f8333c72", "5efbd8d042012dca", "b16ddcf5c697540f"}; !reflect.DeepEqual(got, want) { + t.Fatalf("Generations()=%v, want %v", got, want) + } + }) + + RunWithReplicaClient(t, "NoGenerationsDir", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if generations, err := c.Generations(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := len(generations), 0; got != want { + t.Fatalf("len(Generations())=%v, want %v", got, want) + } + }) +} + +func TestReplicaClient_Snapshots(t *testing.T) { + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + // Write snapshots. + if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 1, strings.NewReader(``)); err != nil { + t.Fatal(err) + } else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 5, strings.NewReader(`x`)); err != nil { + t.Fatal(err) + } else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 10, strings.NewReader(`xyz`)); err != nil { + t.Fatal(err) + } + + // Fetch all snapshots by generation. + itr, err := c.Snapshots(context.Background(), "b16ddcf5c697540f") + if err != nil { + t.Fatal(err) + } + defer itr.Close() + + // Read all snapshots into a slice so they can be sorted. + a, err := litestream.SliceSnapshotIterator(itr) + if err != nil { + t.Fatal(err) + } else if got, want := len(a), 2; got != want { + t.Fatalf("len=%v, want %v", got, want) + } + sort.Sort(litestream.SnapshotInfoSlice(a)) + + // Verify first snapshot metadata. + if got, want := a[0].Generation, "b16ddcf5c697540f"; got != want { + t.Fatalf("Generation=%v, want %v", got, want) + } else if got, want := a[0].Index, 5; got != want { + t.Fatalf("Index=%v, want %v", got, want) + } else if got, want := a[0].Size, int64(1); got != want { + t.Fatalf("Size=%v, want %v", got, want) + } else if a[0].CreatedAt.IsZero() { + t.Fatalf("expected CreatedAt") + } + + // Verify second snapshot metadata. + if got, want := a[1].Generation, "b16ddcf5c697540f"; got != want { + t.Fatalf("Generation=%v, want %v", got, want) + } else if got, want := a[1].Index, 0xA; got != want { + t.Fatalf("Index=%v, want %v", got, want) + } else if got, want := a[1].Size, int64(3); got != want { + t.Fatalf("Size=%v, want %v", got, want) + } else if a[1].CreatedAt.IsZero() { + t.Fatalf("expected CreatedAt") + } + + // Ensure close is clean. + if err := itr.Close(); err != nil { + t.Fatal(err) + } + }) + + RunWithReplicaClient(t, "NoGenerationDir", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + itr, err := c.Snapshots(context.Background(), "5efbd8d042012dca") + if err != nil { + t.Fatal(err) + } + defer itr.Close() + + if itr.Next() { + t.Fatal("expected no snapshots") + } + }) + + RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + itr, err := c.Snapshots(context.Background(), "") + if err == nil { + err = itr.Close() + } + if err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestReplicaClient_WriteSnapshot(t *testing.T) { + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 1000, strings.NewReader(`foobar`)); err != nil { + t.Fatal(err) + } + + if r, err := c.SnapshotReader(context.Background(), "b16ddcf5c697540f", 1000); err != nil { + t.Fatal(err) + } else if buf, err := ioutil.ReadAll(r); err != nil { + t.Fatal(err) + } else if err := r.Close(); err != nil { + t.Fatal(err) + } else if got, want := string(buf), `foobar`; got != want { + t.Fatalf("data=%q, want %q", got, want) + } + }) + + RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + if _, err := c.WriteSnapshot(context.Background(), "", 0, nil); err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestReplicaClient_SnapshotReader(t *testing.T) { + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 10, strings.NewReader(`foo`)); err != nil { + t.Fatal(err) + } + + r, err := c.SnapshotReader(context.Background(), "5efbd8d042012dca", 10) + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if buf, err := ioutil.ReadAll(r); err != nil { + t.Fatal(err) + } else if got, want := string(buf), "foo"; got != want { + t.Fatalf("ReadAll=%v, want %v", got, want) + } + }) + + RunWithReplicaClient(t, "ErrNotFound", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.SnapshotReader(context.Background(), "5efbd8d042012dca", 1); !os.IsNotExist(err) { + t.Fatalf("expected not exist, got %#v", err) + } + }) + + RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.SnapshotReader(context.Background(), "", 1); err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestReplicaClient_WALSegments(t *testing.T) { + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 1, Offset: 0}, strings.NewReader(``)); err != nil { + t.Fatal(err) + } + if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 2, Offset: 0}, strings.NewReader(`12345`)); err != nil { + t.Fatal(err) + } else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 2, Offset: 5}, strings.NewReader(`67`)); err != nil { + t.Fatal(err) + } else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 3, Offset: 0}, strings.NewReader(`xyz`)); err != nil { + t.Fatal(err) + } + + itr, err := c.WALSegments(context.Background(), "b16ddcf5c697540f") + if err != nil { + t.Fatal(err) + } + defer itr.Close() + + // Read all WAL segment files into a slice so they can be sorted. + a, err := litestream.SliceWALSegmentIterator(itr) + if err != nil { + t.Fatal(err) + } else if got, want := len(a), 3; got != want { + t.Fatalf("len=%v, want %v", got, want) + } + sort.Sort(litestream.WALSegmentInfoSlice(a)) + + // Verify first WAL segment metadata. + if got, want := a[0].Generation, "b16ddcf5c697540f"; got != want { + t.Fatalf("Generation=%v, want %v", got, want) + } else if got, want := a[0].Index, 2; got != want { + t.Fatalf("Index=%v, want %v", got, want) + } else if got, want := a[0].Offset, int64(0); got != want { + t.Fatalf("Offset=%v, want %v", got, want) + } else if got, want := a[0].Size, int64(5); got != want { + t.Fatalf("Size=%v, want %v", got, want) + } else if a[0].CreatedAt.IsZero() { + t.Fatalf("expected CreatedAt") + } + + // Verify first WAL segment metadata. + if got, want := a[1].Generation, "b16ddcf5c697540f"; got != want { + t.Fatalf("Generation=%v, want %v", got, want) + } else if got, want := a[1].Index, 2; got != want { + t.Fatalf("Index=%v, want %v", got, want) + } else if got, want := a[1].Offset, int64(5); got != want { + t.Fatalf("Offset=%v, want %v", got, want) + } else if got, want := a[1].Size, int64(2); got != want { + t.Fatalf("Size=%v, want %v", got, want) + } else if a[1].CreatedAt.IsZero() { + t.Fatalf("expected CreatedAt") + } + + // Verify third WAL segment metadata. + if got, want := a[2].Generation, "b16ddcf5c697540f"; got != want { + t.Fatalf("Generation=%v, want %v", got, want) + } else if got, want := a[2].Index, 3; got != want { + t.Fatalf("Index=%v, want %v", got, want) + } else if got, want := a[2].Offset, int64(0); got != want { + t.Fatalf("Offset=%v, want %v", got, want) + } else if got, want := a[2].Size, int64(3); got != want { + t.Fatalf("Size=%v, want %v", got, want) + } else if a[1].CreatedAt.IsZero() { + t.Fatalf("expected CreatedAt") + } + + // Ensure close is clean. + if err := itr.Close(); err != nil { + t.Fatal(err) + } + }) + + RunWithReplicaClient(t, "NoGenerationDir", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + itr, err := c.WALSegments(context.Background(), "5efbd8d042012dca") + if err != nil { + t.Fatal(err) + } + defer itr.Close() + + if itr.Next() { + t.Fatal("expected no wal files") + } + }) + + RunWithReplicaClient(t, "NoWALs", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 0, strings.NewReader(`foo`)); err != nil { + t.Fatal(err) + } + + itr, err := c.WALSegments(context.Background(), "5efbd8d042012dca") + if err != nil { + t.Fatal(err) + } + defer itr.Close() + + if itr.Next() { + t.Fatal("expected no wal files") + } + }) + + RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + itr, err := c.WALSegments(context.Background(), "") + if err == nil { + err = itr.Close() + } + if err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestReplicaClient_WriteWALSegment(t *testing.T) { + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1000, Offset: 2000}, strings.NewReader(`foobar`)); err != nil { + t.Fatal(err) + } + + if r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1000, Offset: 2000}); err != nil { + t.Fatal(err) + } else if buf, err := ioutil.ReadAll(r); err != nil { + t.Fatal(err) + } else if err := r.Close(); err != nil { + t.Fatal(err) + } else if got, want := string(buf), `foobar`; got != want { + t.Fatalf("data=%q, want %q", got, want) + } + }) + + RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "", Index: 0, Offset: 0}, nil); err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +func TestReplicaClient_WALSegmentReader(t *testing.T) { + + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 10, Offset: 5}, strings.NewReader(`foobar`)); err != nil { + t.Fatal(err) + } + + r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 10, Offset: 5}) + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if buf, err := ioutil.ReadAll(r); err != nil { + t.Fatal(err) + } else if got, want := string(buf), "foobar"; got != want { + t.Fatalf("ReadAll=%v, want %v", got, want) + } + }) + + RunWithReplicaClient(t, "ErrNotFound", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 1, Offset: 0}); !os.IsNotExist(err) { + t.Fatalf("expected not exist, got %#v", err) + } + }) +} + +func TestReplicaClient_DeleteWALSegments(t *testing.T) { + RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + + if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}, strings.NewReader(`foo`)); err != nil { + t.Fatal(err) + } else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 3, Offset: 4}, strings.NewReader(`bar`)); err != nil { + t.Fatal(err) + } + + if err := c.DeleteWALSegments(context.Background(), []litestream.Pos{ + {Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}, + {Generation: "5efbd8d042012dca", Index: 3, Offset: 4}, + }); err != nil { + t.Fatal(err) + } + + if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}); !os.IsNotExist(err) { + t.Fatalf("expected not exist, got %#v", err) + } else if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 3, Offset: 4}); !os.IsNotExist(err) { + t.Fatalf("expected not exist, got %#v", err) + } + }) + + RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { + t.Parallel() + if err := c.DeleteWALSegments(context.Background(), []litestream.Pos{{}}); err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %v", err) + } + }) +} + +// RunWithReplicaClient executes fn with each replica specified by the -replica-type flag +func RunWithReplicaClient(t *testing.T, name string, fn func(*testing.T, litestream.ReplicaClient)) { + t.Run(name, func(t *testing.T) { + for _, typ := range strings.Split(*replicaType, ",") { + t.Run(typ, func(t *testing.T) { + c := NewReplicaClient(t, typ) + defer MustDeleteAll(t, c) + + fn(t, c) + }) + } + }) +} + +// NewReplicaClient returns a new client for integration testing by type name. +func NewReplicaClient(tb testing.TB, typ string) litestream.ReplicaClient { + tb.Helper() + + switch typ { + case litestream.FileReplicaClientType: + return litestream.NewFileReplicaClient(tb.TempDir()) + case s3.ReplicaClientType: + return NewS3ReplicaClient(tb) + case gcs.ReplicaClientType: + return NewGCSReplicaClient(tb) + case abs.ReplicaClientType: + return NewABSReplicaClient(tb) + case sftp.ReplicaClientType: + return NewSFTPReplicaClient(tb) + default: + tb.Fatalf("invalid replica client type: %q", typ) + return nil + } +} + +// NewS3ReplicaClient returns a new client for integration testing. +func NewS3ReplicaClient(tb testing.TB) *s3.ReplicaClient { + tb.Helper() + + c := s3.NewReplicaClient() + c.AccessKeyID = *s3AccessKeyID + c.SecretAccessKey = *s3SecretAccessKey + c.Region = *s3Region + c.Bucket = *s3Bucket + c.Path = path.Join(*s3Path, fmt.Sprintf("%016x", rand.Uint64())) + c.Endpoint = *s3Endpoint + c.ForcePathStyle = *s3ForcePathStyle + c.SkipVerify = *s3SkipVerify + return c +} + +// NewGCSReplicaClient returns a new client for integration testing. +func NewGCSReplicaClient(tb testing.TB) *gcs.ReplicaClient { + tb.Helper() + + c := gcs.NewReplicaClient() + c.Bucket = *gcsBucket + c.Path = path.Join(*gcsPath, fmt.Sprintf("%016x", rand.Uint64())) + return c +} + +// NewABSReplicaClient returns a new client for integration testing. +func NewABSReplicaClient(tb testing.TB) *abs.ReplicaClient { + tb.Helper() + + c := abs.NewReplicaClient() + c.AccountName = *absAccountName + c.AccountKey = *absAccountKey + c.Bucket = *absBucket + c.Path = path.Join(*absPath, fmt.Sprintf("%016x", rand.Uint64())) + return c +} + +// NewSFTPReplicaClient returns a new client for integration testing. +func NewSFTPReplicaClient(tb testing.TB) *sftp.ReplicaClient { + tb.Helper() + + c := sftp.NewReplicaClient() + c.Host = *sftpHost + c.User = *sftpUser + c.Password = *sftpPassword + c.KeyPath = *sftpKeyPath + c.Path = path.Join(*sftpPath, fmt.Sprintf("%016x", rand.Uint64())) + return c +} + +// MustDeleteAll deletes all objects under the client's path. +func MustDeleteAll(tb testing.TB, c litestream.ReplicaClient) { + tb.Helper() + + generations, err := c.Generations(context.Background()) + if err != nil { + tb.Fatalf("cannot list generations for deletion: %s", err) + } + + for _, generation := range generations { + if err := c.DeleteGeneration(context.Background(), generation); err != nil { + tb.Fatalf("cannot delete generation: %s", err) + } + } + + switch c := c.(type) { + case *sftp.ReplicaClient: + if err := c.Cleanup(context.Background()); err != nil { + tb.Fatalf("cannot cleanup sftp: %s", err) + } + } +} diff --git a/internal/internal.go b/internal/internal.go index 26d55aa..f8e5c60 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -94,27 +94,19 @@ func (r *ReadCounter) Read(p []byte) (int, error) { func (r *ReadCounter) N() int64 { return r.n } // CreateFile creates the file and matches the mode & uid/gid of fi. -func CreateFile(filename string, fi os.FileInfo) (*os.File, error) { - mode := os.FileMode(0600) - if fi != nil { - mode = fi.Mode() - } - +func CreateFile(filename string, mode os.FileMode, uid, gid int) (*os.File, error) { f, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) if err != nil { return nil, err } - uid, gid := Fileinfo(fi) _ = f.Chown(uid, gid) return f, nil } // MkdirAll is a copy of os.MkdirAll() except that it attempts to set the // mode/uid/gid to match fi for each created directory. -func MkdirAll(path string, fi os.FileInfo) error { - uid, gid := Fileinfo(fi) - +func MkdirAll(path string, mode os.FileMode, uid, gid int) error { // Fast path: if we can tell whether path is a directory or file, stop with success or error. dir, err := os.Stat(path) if err == nil { @@ -137,17 +129,13 @@ func MkdirAll(path string, fi os.FileInfo) error { if j > 1 { // Create parent. - err = MkdirAll(fixRootDirectory(path[:j-1]), fi) + err = MkdirAll(fixRootDirectory(path[:j-1]), mode, uid, gid) if err != nil { return err } } // Parent now exists; invoke Mkdir and use its result. - mode := os.FileMode(0700) - if fi != nil { - mode = fi.Mode() - } err = os.Mkdir(path, mode) if err != nil { // Handle arguments like "foo/." by diff --git a/litestream.go b/litestream.go index 46eb033..e962f14 100644 --- a/litestream.go +++ b/litestream.go @@ -37,6 +37,7 @@ const ( var ( ErrNoGeneration = errors.New("no generation available") ErrNoSnapshots = errors.New("no snapshots available") + ErrNoWALSegments = errors.New("no wal segments available") ErrChecksumMismatch = errors.New("invalid replica, checksum mismatch") ) @@ -440,6 +441,20 @@ func ParseOffset(s string) (int64, error) { return v, nil } +// removeDBFiles deletes the database and related files (journal, shm, wal). +func removeDBFiles(filename string) error { + if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("cannot delete database %q: %w", filename, err) + } else if err := os.Remove(filename + "-journal"); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("cannot delete journal for %q: %w", filename, err) + } else if err := os.Remove(filename + "-shm"); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("cannot delete shared memory for %q: %w", filename, err) + } else if err := os.Remove(filename + "-wal"); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("cannot delete wal for %q: %w", filename, err) + } + return nil +} + // isHexChar returns true if ch is a lowercase hex character. func isHexChar(ch rune) bool { return (ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'f') diff --git a/litestream_test.go b/litestream_test.go index 93327df..9878fbd 100644 --- a/litestream_test.go +++ b/litestream_test.go @@ -1,12 +1,16 @@ package litestream_test import ( + "bytes" "encoding/binary" "encoding/hex" + "io" + "os" "testing" "github.com/benbjohnson/litestream" _ "github.com/mattn/go-sqlite3" + "github.com/pierrec/lz4/v4" ) func TestChecksum(t *testing.T) { @@ -26,14 +30,14 @@ func TestChecksum(t *testing.T) { // Ensure we get the same result as OnePass even if we split up into multiple calls. t.Run("Incremental", func(t *testing.T) { // Compute checksum for beginning of WAL header. - s0, s1 := litestream.Checksum(binary.LittleEndian, 0, 0, MustDecodeHexString("377f0682002de218000010000000000052382eac857b1a4e")) + s0, s1 := litestream.Checksum(binary.LittleEndian, 0, 0, decodeHexString(t, "377f0682002de218000010000000000052382eac857b1a4e")) if got, want := [2]uint32{s0, s1}, [2]uint32{0x81153b65, 0x87178e8f}; got != want { t.Fatalf("Checksum()=%x, want %x", got, want) } // Continue checksum with WAL frame header & frame contents. - s0a, s1a := litestream.Checksum(binary.LittleEndian, s0, s1, MustDecodeHexString("0000000200000002")) - s0b, s1b := litestream.Checksum(binary.LittleEndian, s0a, s1a, MustDecodeHexString(`0d000000080fe0000ffc0ff80ff40ff00fec0fe80fe40fe000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000208020902070209020602090205020902040209020302090202020902010209`)) + s0a, s1a := litestream.Checksum(binary.LittleEndian, s0, s1, decodeHexString(t, "0000000200000002")) + s0b, s1b := litestream.Checksum(binary.LittleEndian, s0a, s1a, decodeHexString(t, `0d000000080fe0000ffc0ff80ff40ff00fec0fe80fe40fe000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000208020902070209020602090205020902040209020302090202020902010209`)) if got, want := [2]uint32{s0b, s1b}, [2]uint32{0xdc2f3e84, 0x540488d3}; got != want { t.Fatalf("Checksum()=%x, want %x", got, want) } @@ -50,10 +54,52 @@ func TestFindMinSnapshotByGeneration(t *testing.T) { } } -func MustDecodeHexString(s string) []byte { +func decodeHexString(tb testing.TB, s string) []byte { + tb.Helper() + b, err := hex.DecodeString(s) if err != nil { - panic(err) + tb.Fatal(err) } return b } + +// fileEqual returns true if files at x and y have equal contents. +func fileEqual(tb testing.TB, x, y string) bool { + tb.Helper() + + bx, err := os.ReadFile(x) + if err != nil { + tb.Fatal(err) + } + + by, err := os.ReadFile(y) + if err != nil { + tb.Fatal(err) + } + + return bytes.Equal(bx, by) +} + +func compressLZ4(tb testing.TB, b []byte) []byte { + tb.Helper() + + var buf bytes.Buffer + zw := lz4.NewWriter(&buf) + if _, err := zw.Write(b); err != nil { + tb.Fatal(err) + } else if err := zw.Close(); err != nil { + tb.Fatal(err) + } + return buf.Bytes() +} + +func decompressLZ4(tb testing.TB, b []byte) []byte { + tb.Helper() + + buf, err := io.ReadAll(lz4.NewReader(bytes.NewReader(b))) + if err != nil { + tb.Fatal(err) + } + return buf +} diff --git a/mock/read_closer.go b/mock/read_closer.go new file mode 100644 index 0000000..a473e96 --- /dev/null +++ b/mock/read_closer.go @@ -0,0 +1,14 @@ +package mock + +type ReadCloser struct { + CloseFunc func() error + ReadFunc func([]byte) (int, error) +} + +func (r *ReadCloser) Close() error { + return r.CloseFunc() +} + +func (r *ReadCloser) Read(b []byte) (int, error) { + return r.ReadFunc(b) +} diff --git a/mock/snapshot_iterator.go b/mock/snapshot_iterator.go new file mode 100644 index 0000000..8f167d6 --- /dev/null +++ b/mock/snapshot_iterator.go @@ -0,0 +1,28 @@ +package mock + +import ( + "github.com/benbjohnson/litestream" +) + +type SnapshotIterator struct { + CloseFunc func() error + NextFunc func() bool + ErrFunc func() error + SnapshotFunc func() litestream.SnapshotInfo +} + +func (itr *SnapshotIterator) Close() error { + return itr.CloseFunc() +} + +func (itr *SnapshotIterator) Next() bool { + return itr.NextFunc() +} + +func (itr *SnapshotIterator) Err() error { + return itr.ErrFunc() +} + +func (itr *SnapshotIterator) Snapshot() litestream.SnapshotInfo { + return itr.SnapshotFunc() +} diff --git a/mock/wal_segment_iterator.go b/mock/wal_segment_iterator.go new file mode 100644 index 0000000..f1d62cd --- /dev/null +++ b/mock/wal_segment_iterator.go @@ -0,0 +1,28 @@ +package mock + +import ( + "github.com/benbjohnson/litestream" +) + +type WALSegmentIterator struct { + CloseFunc func() error + NextFunc func() bool + ErrFunc func() error + WALSegmentFunc func() litestream.WALSegmentInfo +} + +func (itr *WALSegmentIterator) Close() error { + return itr.CloseFunc() +} + +func (itr *WALSegmentIterator) Next() bool { + return itr.NextFunc() +} + +func (itr *WALSegmentIterator) Err() error { + return itr.ErrFunc() +} + +func (itr *WALSegmentIterator) WALSegment() litestream.WALSegmentInfo { + return itr.WALSegmentFunc() +} diff --git a/replica.go b/replica.go index c6d0f3f..67e9d14 100644 --- a/replica.go +++ b/replica.go @@ -7,14 +7,12 @@ import ( "io" "io/ioutil" "log" - "math" "os" "path/filepath" "sort" "sync" "time" - "github.com/benbjohnson/litestream/internal" "github.com/pierrec/lz4/v4" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -144,6 +142,15 @@ func (r *Replica) Stop(hard bool) (err error) { return err } +// logPrefix returns the prefix used when logging from the replica. +// This includes the replica name as well as the database path, if available. +func (r *Replica) logPrefix() string { + if db := r.DB(); db != nil { + return fmt.Sprintf("%s(%s): ", db.Path(), r.Name()) + } + return r.Name() + ": " +} + // Sync copies new WAL frames from the shadow WAL to the replica client. func (r *Replica) Sync(ctx context.Context) (err error) { // Clear last position if if an error occurs during sync. @@ -766,14 +773,18 @@ func (r *Replica) Validate(ctx context.Context) error { return fmt.Errorf("cannot wait for replica: %w", err) } + // Find lastest snapshot that occurs before the index. + snapshotIndex, err := FindSnapshotForIndex(ctx, r.Client, pos.Generation, pos.Index-1) + if err != nil { + return fmt.Errorf("cannot find snapshot index: %w", err) + } + restorePath := filepath.Join(tmpdir, "replica") - if err := r.Restore(ctx, RestoreOptions{ - OutputPath: restorePath, - ReplicaName: r.Name(), - Generation: pos.Generation, - Index: pos.Index - 1, - Logger: log.New(os.Stderr, "", 0), - }); err != nil { + opt := RestoreOptions{ + Logger: log.New(os.Stderr, "", 0), + LogPrefix: r.logPrefix(), + } + if err := Restore(ctx, r.Client, restorePath, pos.Generation, snapshotIndex, pos.Index-1, opt); err != nil { return fmt.Errorf("cannot restore: %w", err) } @@ -883,295 +894,6 @@ func (r *Replica) GenerationCreatedAt(ctx context.Context, generation string) (t return min, itr.Close() } -// GenerationTimeBounds returns the creation time & last updated time of a generation. -// Returns zero time if no snapshots or WAL segments exist. -func (r *Replica) GenerationTimeBounds(ctx context.Context, generation string) (createdAt, updatedAt time.Time, err error) { - // Iterate over snapshots. - sitr, err := r.Client.Snapshots(ctx, generation) - if err != nil { - return createdAt, updatedAt, err - } - defer sitr.Close() - - for sitr.Next() { - info := sitr.Snapshot() - if createdAt.IsZero() || info.CreatedAt.Before(createdAt) { - createdAt = info.CreatedAt - } - if updatedAt.IsZero() || info.CreatedAt.After(updatedAt) { - updatedAt = info.CreatedAt - } - } - if err := sitr.Close(); err != nil { - return createdAt, updatedAt, err - } - - // Iterate over WAL segments. - witr, err := r.Client.WALSegments(ctx, generation) - if err != nil { - return createdAt, updatedAt, err - } - defer witr.Close() - - for witr.Next() { - info := witr.WALSegment() - if createdAt.IsZero() || info.CreatedAt.Before(createdAt) { - createdAt = info.CreatedAt - } - if updatedAt.IsZero() || info.CreatedAt.After(updatedAt) { - updatedAt = info.CreatedAt - } - } - if err := witr.Close(); err != nil { - return createdAt, updatedAt, err - } - - return createdAt, updatedAt, nil -} - -// CalcRestoreTarget returns a generation to restore from. -func (r *Replica) CalcRestoreTarget(ctx context.Context, opt RestoreOptions) (generation string, updatedAt time.Time, err error) { - var target struct { - generation string - updatedAt time.Time - } - - generations, err := r.Client.Generations(ctx) - if err != nil { - return "", time.Time{}, fmt.Errorf("cannot fetch generations: %w", err) - } - - // Search generations for one that contains the requested timestamp. - for _, generation := range generations { - // Skip generation if it does not match filter. - if opt.Generation != "" && generation != opt.Generation { - continue - } - - // Determine the time bounds for the generation. - createdAt, updatedAt, err := r.GenerationTimeBounds(ctx, generation) - if err != nil { - return "", time.Time{}, fmt.Errorf("generation created at: %w", err) - } - - // Skip if it does not contain timestamp. - if !opt.Timestamp.IsZero() { - if opt.Timestamp.Before(createdAt) || opt.Timestamp.After(updatedAt) { - continue - } - } - - // Use the latest replica if we have multiple candidates. - if !updatedAt.After(target.updatedAt) { - continue - } - - target.generation = generation - target.updatedAt = updatedAt - } - - return target.generation, target.updatedAt, nil -} - -// Replica restores the database from a replica based on the options given. -// This method will restore into opt.OutputPath, if specified, or into the -// DB's original database path. It can optionally restore from a specific -// replica or generation or it will automatically choose the best one. Finally, -// a timestamp can be specified to restore the database to a specific -// point-in-time. -func (r *Replica) Restore(ctx context.Context, opt RestoreOptions) (err error) { - // Validate options. - if opt.OutputPath == "" { - if r.db.path == "" { - return fmt.Errorf("output path required") - } - opt.OutputPath = r.db.path - } else if opt.Generation == "" && opt.Index != math.MaxInt32 { - return fmt.Errorf("must specify generation when restoring to index") - } else if opt.Index != math.MaxInt32 && !opt.Timestamp.IsZero() { - return fmt.Errorf("cannot specify index & timestamp to restore") - } - - // Ensure logger exists. - logger := opt.Logger - if logger == nil { - logger = log.New(ioutil.Discard, "", 0) - } - - logPrefix := r.Name() - if db := r.DB(); db != nil { - logPrefix = fmt.Sprintf("%s(%s)", db.Path(), r.Name()) - } - - // Ensure output path does not already exist. - if _, err := os.Stat(opt.OutputPath); err == nil { - return fmt.Errorf("cannot restore, output path already exists: %s", opt.OutputPath) - } else if err != nil && !os.IsNotExist(err) { - return err - } - - // Find lastest snapshot that occurs before timestamp or index. - var minWALIndex int - if opt.Index < math.MaxInt32 { - if minWALIndex, err = r.SnapshotIndexByIndex(ctx, opt.Generation, opt.Index); err != nil { - return fmt.Errorf("cannot find snapshot index: %w", err) - } - } else { - if minWALIndex, err = r.SnapshotIndexAt(ctx, opt.Generation, opt.Timestamp); err != nil { - return fmt.Errorf("cannot find snapshot index by timestamp: %w", err) - } - } - - // Compute list of offsets for each WAL index. - walSegmentMap, err := r.walSegmentMap(ctx, opt.Generation, opt.Index, opt.Timestamp) - if err != nil { - return fmt.Errorf("cannot find max wal index for restore: %w", err) - } - - // Find the maximum WAL index that occurs before timestamp. - maxWALIndex := -1 - for index := range walSegmentMap { - if index > maxWALIndex { - maxWALIndex = index - } - } - - // Ensure that we found the specific index, if one was specified. - if opt.Index != math.MaxInt32 && opt.Index != opt.Index { - return fmt.Errorf("unable to locate index %d in generation %q, highest index was %d", opt.Index, opt.Generation, maxWALIndex) - } - - // If no WAL files were found, mark this as a snapshot-only restore. - snapshotOnly := maxWALIndex == -1 - - // Initialize starting position. - pos := Pos{Generation: opt.Generation, Index: minWALIndex} - tmpPath := opt.OutputPath + ".tmp" - - // Copy snapshot to output path. - logger.Printf("%s: restoring snapshot %s/%08x to %s", logPrefix, opt.Generation, minWALIndex, tmpPath) - if err := r.restoreSnapshot(ctx, pos.Generation, pos.Index, tmpPath); err != nil { - return fmt.Errorf("cannot restore snapshot: %w", err) - } - - // If no WAL files available, move snapshot to final path & exit early. - if snapshotOnly { - logger.Printf("%s: snapshot only, finalizing database", logPrefix) - return os.Rename(tmpPath, opt.OutputPath) - } - - // Begin processing WAL files. - logger.Printf("%s: restoring wal files: generation=%s index=[%08x,%08x]", logPrefix, opt.Generation, minWALIndex, maxWALIndex) - - // Fill input channel with all WAL indexes to be loaded in order. - // Verify every index has at least one offset. - ch := make(chan int, maxWALIndex-minWALIndex+1) - for index := minWALIndex; index <= maxWALIndex; index++ { - if len(walSegmentMap[index]) == 0 { - return fmt.Errorf("missing WAL index: %s/%08x", opt.Generation, index) - } - ch <- index - } - close(ch) - - // Track load state for each WAL. - var mu sync.Mutex - cond := sync.NewCond(&mu) - walStates := make([]walRestoreState, maxWALIndex-minWALIndex+1) - - parallelism := opt.Parallelism - if parallelism < 1 { - parallelism = 1 - } - - // Download WAL files to disk in parallel. - g, ctx := errgroup.WithContext(ctx) - for i := 0; i < parallelism; i++ { - g.Go(func() error { - for { - select { - case <-ctx.Done(): - cond.Broadcast() - return err - case index, ok := <-ch: - if !ok { - cond.Broadcast() - return nil - } - - startTime := time.Now() - - err := r.downloadWAL(ctx, opt.Generation, index, walSegmentMap[index], tmpPath) - if err != nil { - err = fmt.Errorf("cannot download wal %s/%08x: %w", opt.Generation, index, err) - } - - // Mark index as ready-to-apply and notify applying code. - mu.Lock() - walStates[index-minWALIndex] = walRestoreState{ready: true, err: err} - mu.Unlock() - cond.Broadcast() - - // Returning the error here will cancel the other goroutines. - if err != nil { - return err - } - - logger.Printf("%s: downloaded wal %s/%08x elapsed=%s", - logPrefix, opt.Generation, index, - time.Since(startTime).String(), - ) - } - } - }) - } - - // Apply WAL files in order as they are ready. - for index := minWALIndex; index <= maxWALIndex; index++ { - // Wait until next WAL file is ready to apply. - mu.Lock() - for !walStates[index-minWALIndex].ready { - if err := ctx.Err(); err != nil { - return err - } - cond.Wait() - } - if err := walStates[index-minWALIndex].err; err != nil { - return err - } - mu.Unlock() - - // Apply WAL to database file. - startTime := time.Now() - if err = applyWAL(ctx, index, tmpPath); err != nil { - return fmt.Errorf("cannot apply wal: %w", err) - } - logger.Printf("%s: applied wal %s/%08x elapsed=%s", - logPrefix, opt.Generation, index, - time.Since(startTime).String(), - ) - } - - // Ensure all goroutines finish. All errors should have been handled during - // the processing of WAL files but this ensures that all processing is done. - if err := g.Wait(); err != nil { - return err - } - - // Copy file to final location. - logger.Printf("%s: renaming database from temporary location", logPrefix) - if err := os.Rename(tmpPath, opt.OutputPath); err != nil { - return err - } - - return nil -} - -type walRestoreState struct { - ready bool - err error -} - // SnapshotIndexAt returns the highest index for a snapshot within a generation // that occurs before timestamp. If timestamp is zero, returns the latest snapshot. func (r *Replica) SnapshotIndexAt(ctx context.Context, generation string, timestamp time.Time) (int, error) { @@ -1202,137 +924,19 @@ func (r *Replica) SnapshotIndexAt(ctx context.Context, generation string, timest return snapshotIndex, nil } -// SnapshotIndexbyIndex returns the highest index for a snapshot within a generation -// that occurs before a given index. If index is MaxInt32, returns the latest snapshot. -func (r *Replica) SnapshotIndexByIndex(ctx context.Context, generation string, index int) (int, error) { - itr, err := r.Client.Snapshots(ctx, generation) - if err != nil { - return 0, err - } - defer itr.Close() - - snapshotIndex := -1 - for itr.Next() { - snapshot := itr.Snapshot() - - if index < math.MaxInt32 && snapshot.Index > index { - continue // after index, skip - } - - // Use snapshot if it newer. - if snapshotIndex == -1 || snapshotIndex >= snapshotIndex { - snapshotIndex = snapshot.Index - } - } - if err := itr.Close(); err != nil { - return 0, err - } else if snapshotIndex == -1 { - return 0, ErrNoSnapshots - } - return snapshotIndex, nil -} - -// walSegmentMap returns a map of WAL indices to their segments. -// Filters by a max timestamp or a max index. -func (r *Replica) walSegmentMap(ctx context.Context, generation string, maxIndex int, maxTimestamp time.Time) (map[int][]int64, error) { - itr, err := r.Client.WALSegments(ctx, generation) - if err != nil { - return nil, err - } - defer itr.Close() - - m := make(map[int][]int64) - for itr.Next() { - info := itr.WALSegment() - - // Exit if we go past the max timestamp or index. - if !maxTimestamp.IsZero() && info.CreatedAt.After(maxTimestamp) { - break // after max timestamp, skip - } else if info.Index > maxIndex { - break // after max index, skip - } - - // Verify offsets are added in order. - offsets := m[info.Index] - if len(offsets) == 0 && info.Offset != 0 { - return nil, fmt.Errorf("missing initial wal segment: generation=%s index=%08x offset=%d", generation, info.Index, info.Offset) - } else if len(offsets) > 0 && offsets[len(offsets)-1] >= info.Offset { - return nil, fmt.Errorf("wal segments out of order: generation=%s index=%08x offsets=(%d,%d)", generation, info.Index, offsets[len(offsets)-1], info.Offset) - } - - // Append to the end of the WAL file. - m[info.Index] = append(offsets, info.Offset) - } - return m, itr.Close() -} - -// restoreSnapshot copies a snapshot from the replica to a file. -func (r *Replica) restoreSnapshot(ctx context.Context, generation string, index int, filename string) error { - // Determine the user/group & mode based on the DB, if available. - var fileInfo, dirInfo os.FileInfo - if db := r.DB(); db != nil { - fileInfo, dirInfo = db.fileInfo, db.dirInfo - } - - if err := internal.MkdirAll(filepath.Dir(filename), dirInfo); err != nil { - return err - } - - f, err := internal.CreateFile(filename, fileInfo) - if err != nil { - return err - } - defer f.Close() - - rd, err := r.Client.SnapshotReader(ctx, generation, index) - if err != nil { - return err - } - defer rd.Close() - - if _, err := io.Copy(f, lz4.NewReader(rd)); err != nil { - return err - } else if err := f.Sync(); err != nil { - return err - } - return f.Close() -} - -// downloadWAL copies a WAL file from the replica to a local copy next to the DB. -// The WAL is later applied by applyWAL(). This function can be run in parallel -// to download multiple WAL files simultaneously. -func (r *Replica) downloadWAL(ctx context.Context, generation string, index int, offsets []int64, dbPath string) (err error) { - // Determine the user/group & mode based on the DB, if available. - var fileInfo os.FileInfo - if db := r.DB(); db != nil { - fileInfo = db.fileInfo - } - - // Open readers for every segment in the WAL file, in order. - var readers []io.Reader - for _, offset := range offsets { - rd, err := r.Client.WALSegmentReader(ctx, Pos{Generation: generation, Index: index, Offset: offset}) +// LatestReplica returns the most recently updated replica. +func LatestReplica(ctx context.Context, replicas []*Replica) (*Replica, error) { + var t time.Time + var r *Replica + for i := range replicas { + _, max, err := ReplicaClientTimeBounds(ctx, replicas[i].Client) if err != nil { - return err + return nil, err + } else if r == nil || max.After(t) { + r, t = replicas[i], max } - defer rd.Close() - readers = append(readers, lz4.NewReader(rd)) } - - // Open handle to destination WAL path. - f, err := internal.CreateFile(fmt.Sprintf("%s-%08x-wal", dbPath, index), fileInfo) - if err != nil { - return err - } - defer f.Close() - - // Combine segments together and copy WAL to target path. - if _, err := io.Copy(f, io.MultiReader(readers...)); err != nil { - return err - } else if err := f.Close(); err != nil { - return err - } - return nil + return r, nil } // Replica metrics. diff --git a/replica_client.go b/replica_client.go index 3a914e4..3bf01b1 100644 --- a/replica_client.go +++ b/replica_client.go @@ -2,9 +2,19 @@ package litestream import ( "context" + "fmt" "io" + "log" + "os" + "time" + + "github.com/benbjohnson/litestream/internal" + "github.com/pierrec/lz4/v4" ) +// DefaultRestoreParallelism is the default parallelism when downloading WAL files. +const DefaultRestoreParallelism = 8 + // ReplicaClient represents client to connect to a Replica. type ReplicaClient interface { // Returns the type of client. @@ -46,3 +56,382 @@ type ReplicaClient interface { // WAL segment does not exist. WALSegmentReader(ctx context.Context, pos Pos) (io.ReadCloser, error) } + +// FindSnapshotForIndex returns the highest index for a snapshot within a +// generation that occurs before a given index. +func FindSnapshotForIndex(ctx context.Context, client ReplicaClient, generation string, index int) (int, error) { + itr, err := client.Snapshots(ctx, generation) + if err != nil { + return 0, fmt.Errorf("snapshots: %w", err) + } + defer itr.Close() + + // Iterate over all snapshots to find the closest to our given index. + snapshotIndex := -1 + var n int + for ; itr.Next(); n++ { + info := itr.Snapshot() + if info.Index > index { + continue // after given index, skip + } + + // Use snapshot if it's more recent. + if info.Index >= snapshotIndex { + snapshotIndex = info.Index + } + } + if err := itr.Close(); err != nil { + return 0, fmt.Errorf("snapshot iteration: %w", err) + } + + // Ensure we find at least one snapshot and that it's before the given index. + if n == 0 { + return 0, ErrNoSnapshots + } else if snapshotIndex == -1 { + return 0, fmt.Errorf("no snapshots available at or before index %08x", index) + } + return snapshotIndex, nil +} + +// GenerationTimeBounds returns the creation time & last updated time of a generation. +// Returns ErrNoSnapshots if no data exists for the generation. +func GenerationTimeBounds(ctx context.Context, client ReplicaClient, generation string) (createdAt, updatedAt time.Time, err error) { + // Determine bounds for snapshots only first. + // This will return ErrNoSnapshots if no snapshots exist. + if createdAt, updatedAt, err = SnapshotTimeBounds(ctx, client, generation); err != nil { + return createdAt, updatedAt, err + } + + // Update ending time bounds if WAL segments exist after the last snapshot. + _, max, err := WALTimeBounds(ctx, client, generation) + if err != nil && err != ErrNoWALSegments { + return createdAt, updatedAt, err + } else if max.After(updatedAt) { + updatedAt = max + } + + return createdAt, updatedAt, nil +} + +// SnapshotTimeBounds returns the minimum and maximum snapshot timestamps within a generation. +// Returns ErrNoSnapshots if no data exists for the generation. +func SnapshotTimeBounds(ctx context.Context, client ReplicaClient, generation string) (min, max time.Time, err error) { + itr, err := client.Snapshots(ctx, generation) + if err != nil { + return min, max, fmt.Errorf("snapshots: %w", err) + } + defer itr.Close() + + // Iterate over all snapshots to find the oldest and newest. + var n int + for ; itr.Next(); n++ { + info := itr.Snapshot() + if min.IsZero() || info.CreatedAt.Before(min) { + min = info.CreatedAt + } + if max.IsZero() || info.CreatedAt.After(max) { + max = info.CreatedAt + } + } + if err := itr.Close(); err != nil { + return min, max, fmt.Errorf("snapshot iteration: %w", err) + } + + // Return error if no snapshots exist. + if n == 0 { + return min, max, ErrNoSnapshots + } + return min, max, nil +} + +// WALTimeBounds returns the minimum and maximum snapshot timestamps. +// Returns ErrNoWALSegments if no data exists for the generation. +func WALTimeBounds(ctx context.Context, client ReplicaClient, generation string) (min, max time.Time, err error) { + itr, err := client.WALSegments(ctx, generation) + if err != nil { + return min, max, fmt.Errorf("wal segments: %w", err) + } + defer itr.Close() + + // Iterate over all WAL segments to find oldest and newest. + var n int + for ; itr.Next(); n++ { + info := itr.WALSegment() + if min.IsZero() || info.CreatedAt.Before(min) { + min = info.CreatedAt + } + if max.IsZero() || info.CreatedAt.After(max) { + max = info.CreatedAt + } + } + if err := itr.Close(); err != nil { + return min, max, fmt.Errorf("wal segment iteration: %w", err) + } + + if n == 0 { + return min, max, ErrNoWALSegments + } + return min, max, nil +} + +// FindLatestGeneration returns the most recent generation for a client. +func FindLatestGeneration(ctx context.Context, client ReplicaClient) (generation string, err error) { + generations, err := client.Generations(ctx) + if err != nil { + return "", fmt.Errorf("generations: %w", err) + } + + // Search generations for one latest updated. + var maxTime time.Time + for i := range generations { + // Determine the latest update for the generation. + _, updatedAt, err := GenerationTimeBounds(ctx, client, generations[i]) + if err != nil { + return "", fmt.Errorf("generation time bounds: %w", err) + } + + // Use the latest replica if we have multiple candidates. + if updatedAt.After(maxTime) { + maxTime = updatedAt + generation = generations[i] + } + } + + if generation == "" { + return "", ErrNoGeneration + } + return generation, nil +} + +// ReplicaClientTimeBounds returns time range covered by a replica client +// across all generations. It scans the time range of all generations and +// computes the lower and upper bounds of them. +func ReplicaClientTimeBounds(ctx context.Context, client ReplicaClient) (min, max time.Time, err error) { + generations, err := client.Generations(ctx) + if err != nil { + return min, max, fmt.Errorf("generations: %w", err) + } else if len(generations) == 0 { + return min, max, ErrNoGeneration + } + + // Iterate over generations to determine outer bounds. + for i := range generations { + // Determine the time range for the generation. + createdAt, updatedAt, err := GenerationTimeBounds(ctx, client, generations[i]) + if err != nil { + return min, max, fmt.Errorf("generation time bounds: %w", err) + } + + // Update time bounds. + if min.IsZero() || createdAt.Before(min) { + min = createdAt + } + if max.IsZero() || updatedAt.After(max) { + max = updatedAt + } + } + + return min, max, nil +} + +// FindMaxIndexByGeneration returns the last index within a generation. +// Returns ErrNoSnapshots if no index exists on the replica for the generation. +func FindMaxIndexByGeneration(ctx context.Context, client ReplicaClient, generation string) (index int, err error) { + // Determine the highest available snapshot index. Returns an error if no + // snapshot are available as WALs are not useful without snapshots. + snapshotIndex, err := FindMaxSnapshotIndexByGeneration(ctx, client, generation) + if err == ErrNoSnapshots { + return index, err + } else if err != nil { + return index, fmt.Errorf("max snapshot index: %w", err) + } + + // Determine the highest available WAL index. + walIndex, err := FindMaxWALIndexByGeneration(ctx, client, generation) + if err != nil && err != ErrNoWALSegments { + return index, fmt.Errorf("max wal index: %w", err) + } + + // Use snapshot index if it's after the last WAL index. + if snapshotIndex > walIndex { + return snapshotIndex, nil + } + return walIndex, nil +} + +// FindMaxSnapshotIndexByGeneration returns the last snapshot index within a generation. +// Returns ErrNoSnapshots if no snapshots exist for the generation on the replica. +func FindMaxSnapshotIndexByGeneration(ctx context.Context, client ReplicaClient, generation string) (index int, err error) { + itr, err := client.Snapshots(ctx, generation) + if err != nil { + return 0, fmt.Errorf("snapshots: %w", err) + } + defer func() { _ = itr.Close() }() + + // Iterate over snapshots to find the highest index. + var n int + for ; itr.Next(); n++ { + if info := itr.Snapshot(); info.Index > index { + index = info.Index + } + } + if err := itr.Close(); err != nil { + return 0, fmt.Errorf("snapshot iteration: %w", err) + } + + // Return an error if no snapshots were found. + if n == 0 { + return 0, ErrNoSnapshots + } + return index, nil +} + +// FindMaxWALIndexByGeneration returns the last WAL index within a generation. +// Returns ErrNoWALSegments if no segments exist for the generation on the replica. +func FindMaxWALIndexByGeneration(ctx context.Context, client ReplicaClient, generation string) (index int, err error) { + itr, err := client.WALSegments(ctx, generation) + if err != nil { + return 0, fmt.Errorf("wal segments: %w", err) + } + defer func() { _ = itr.Close() }() + + // Iterate over WAL segments to find the highest index. + var n int + for ; itr.Next(); n++ { + if info := itr.WALSegment(); info.Index > index { + index = info.Index + } + } + if err := itr.Close(); err != nil { + return 0, fmt.Errorf("wal segment iteration: %w", err) + } + + // Return an error if no WAL segments were found. + if n == 0 { + return 0, ErrNoWALSegments + } + return index, nil +} + +// Restore restores the database to the given index on a generation. +func Restore(ctx context.Context, client ReplicaClient, filename, generation string, snapshotIndex, targetIndex int, opt RestoreOptions) (err error) { + // Validate options. + if filename == "" { + return fmt.Errorf("restore path required") + } else if generation == "" { + return fmt.Errorf("generation required") + } else if snapshotIndex < 0 { + return fmt.Errorf("snapshot index required") + } else if targetIndex < 0 { + return fmt.Errorf("target index required") + } + + // Require a default level of parallelism. + if opt.Parallelism < 1 { + opt.Parallelism = DefaultRestoreParallelism + } + + // Ensure logger exists. + logger := opt.Logger + if logger == nil { + logger = log.New(io.Discard, "", 0) + } + + // Ensure output path does not already exist. + // If doesn't exist, also remove the journal, shm, & wal if left behind. + if _, err := os.Stat(filename); err == nil { + return fmt.Errorf("cannot restore, output path already exists: %s", filename) + } else if err != nil && !os.IsNotExist(err) { + return err + } else if err := removeDBFiles(filename); err != nil { + return err + } + + // Copy snapshot to output path. + tmpPath := filename + ".tmp" + logger.Printf("%srestoring snapshot %s/%08x to %s", opt.LogPrefix, generation, snapshotIndex, tmpPath) + if err := RestoreSnapshot(ctx, client, tmpPath, generation, snapshotIndex, opt.Mode, opt.Uid, opt.Gid); err != nil { + return fmt.Errorf("cannot restore snapshot: %w", err) + } + + // Download & apply all WAL files between the snapshot & the target index. + d := NewWALDownloader(client, tmpPath, generation, snapshotIndex, targetIndex) + d.Parallelism = opt.Parallelism + d.Mode = opt.Mode + d.Uid, d.Gid = opt.Uid, opt.Gid + + for { + // Read next WAL file from downloader. + walIndex, walPath, err := d.Next(ctx) + if err == io.EOF { + break + } + + // If we are only reading a single index, a WAL file may not be found. + if _, ok := err.(*WALNotFoundError); ok && snapshotIndex == targetIndex { + logger.Printf("%sno wal files found, snapshot only", opt.LogPrefix) + break + } else if err != nil { + return fmt.Errorf("cannot download WAL: %w", err) + } + + // Apply WAL file. + startTime := time.Now() + if err = ApplyWAL(ctx, tmpPath, walPath); err != nil { + return fmt.Errorf("cannot apply wal: %w", err) + } + logger.Printf("%sapplied wal %s/%08x elapsed=%s", opt.LogPrefix, generation, walIndex, time.Since(startTime).String()) + } + + // Copy file to final location. + logger.Printf("%srenaming database from temporary location", opt.LogPrefix) + if err := os.Rename(tmpPath, filename); err != nil { + return err + } + + return nil +} + +// RestoreOptions represents options for DB.Restore(). +type RestoreOptions struct { + // File info used for restored snapshot & WAL files. + Mode os.FileMode + Uid, Gid int + + // Specifies how many WAL files are downloaded in parallel during restore. + Parallelism int + + // Logging settings. + Logger *log.Logger + LogPrefix string +} + +// NewRestoreOptions returns a new instance of RestoreOptions with defaults. +func NewRestoreOptions() RestoreOptions { + return RestoreOptions{ + Mode: 0600, + Parallelism: DefaultRestoreParallelism, + } +} + +// RestoreSnapshot copies a snapshot from the replica client to a file. +func RestoreSnapshot(ctx context.Context, client ReplicaClient, filename, generation string, index int, mode os.FileMode, uid, gid int) error { + f, err := internal.CreateFile(filename, mode, uid, gid) + if err != nil { + return err + } + defer f.Close() + + rd, err := client.SnapshotReader(ctx, generation, index) + if err != nil { + return err + } + defer rd.Close() + + if _, err := io.Copy(f, lz4.NewReader(rd)); err != nil { + return err + } else if err := f.Sync(); err != nil { + return err + } + return f.Close() +} diff --git a/replica_client_test.go b/replica_client_test.go index ec2d841..65d5d81 100644 --- a/replica_client_test.go +++ b/replica_client_test.go @@ -2,572 +2,582 @@ package litestream_test import ( "context" - "flag" "fmt" - "io/ioutil" - "math/rand" "os" - "path" - "reflect" - "sort" + "path/filepath" "strings" "testing" "time" "github.com/benbjohnson/litestream" - "github.com/benbjohnson/litestream/abs" - "github.com/benbjohnson/litestream/file" - "github.com/benbjohnson/litestream/gcs" - "github.com/benbjohnson/litestream/s3" - "github.com/benbjohnson/litestream/sftp" + "github.com/benbjohnson/litestream/mock" ) -func init() { - rand.Seed(time.Now().UnixNano()) -} - -var ( - // Enables integration tests. - integration = flag.String("integration", "file", "") -) - -// S3 settings -var ( - // Replica client settings - s3AccessKeyID = flag.String("s3-access-key-id", os.Getenv("LITESTREAM_S3_ACCESS_KEY_ID"), "") - s3SecretAccessKey = flag.String("s3-secret-access-key", os.Getenv("LITESTREAM_S3_SECRET_ACCESS_KEY"), "") - s3Region = flag.String("s3-region", os.Getenv("LITESTREAM_S3_REGION"), "") - s3Bucket = flag.String("s3-bucket", os.Getenv("LITESTREAM_S3_BUCKET"), "") - s3Path = flag.String("s3-path", os.Getenv("LITESTREAM_S3_PATH"), "") - s3Endpoint = flag.String("s3-endpoint", os.Getenv("LITESTREAM_S3_ENDPOINT"), "") - s3ForcePathStyle = flag.Bool("s3-force-path-style", os.Getenv("LITESTREAM_S3_FORCE_PATH_STYLE") == "true", "") - s3SkipVerify = flag.Bool("s3-skip-verify", os.Getenv("LITESTREAM_S3_SKIP_VERIFY") == "true", "") -) - -// Google cloud storage settings -var ( - gcsBucket = flag.String("gcs-bucket", os.Getenv("LITESTREAM_GCS_BUCKET"), "") - gcsPath = flag.String("gcs-path", os.Getenv("LITESTREAM_GCS_PATH"), "") -) - -// Azure blob storage settings -var ( - absAccountName = flag.String("abs-account-name", os.Getenv("LITESTREAM_ABS_ACCOUNT_NAME"), "") - absAccountKey = flag.String("abs-account-key", os.Getenv("LITESTREAM_ABS_ACCOUNT_KEY"), "") - absBucket = flag.String("abs-bucket", os.Getenv("LITESTREAM_ABS_BUCKET"), "") - absPath = flag.String("abs-path", os.Getenv("LITESTREAM_ABS_PATH"), "") -) - -// SFTP settings -var ( - sftpHost = flag.String("sftp-host", os.Getenv("LITESTREAM_SFTP_HOST"), "") - sftpUser = flag.String("sftp-user", os.Getenv("LITESTREAM_SFTP_USER"), "") - sftpPassword = flag.String("sftp-password", os.Getenv("LITESTREAM_SFTP_PASSWORD"), "") - sftpKeyPath = flag.String("sftp-key-path", os.Getenv("LITESTREAM_SFTP_KEY_PATH"), "") - sftpPath = flag.String("sftp-path", os.Getenv("LITESTREAM_SFTP_PATH"), "") -) - -func TestReplicaClient_Generations(t *testing.T) { - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - - // Write snapshots. - if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 0, strings.NewReader(`foo`)); err != nil { +func TestFindSnapshotForIndex(t *testing.T) { + t.Run("BeforeIndex", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "find-snapshot-for-index", "ok")) + if snapshotIndex, err := litestream.FindSnapshotForIndex(context.Background(), client, "0000000000000000", 0x000007d0); err != nil { t.Fatal(err) - } else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 0, strings.NewReader(`bar`)); err != nil { - t.Fatal(err) - } else if _, err := c.WriteSnapshot(context.Background(), "155fe292f8333c72", 0, strings.NewReader(`baz`)); err != nil { - t.Fatal(err) - } - - // Verify returned generations. - if got, err := c.Generations(context.Background()); err != nil { - t.Fatal(err) - } else if want := []string{"155fe292f8333c72", "5efbd8d042012dca", "b16ddcf5c697540f"}; !reflect.DeepEqual(got, want) { - t.Fatalf("Generations()=%v, want %v", got, want) + } else if got, want := snapshotIndex, 0x000003e8; got != want { + t.Fatalf("index=%08x, want %08x", got, want) } }) - RunWithReplicaClient(t, "NoGenerationsDir", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - - if generations, err := c.Generations(context.Background()); err != nil { + t.Run("AtIndex", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "find-snapshot-for-index", "ok")) + if snapshotIndex, err := litestream.FindSnapshotForIndex(context.Background(), client, "0000000000000000", 0x000003e8); err != nil { t.Fatal(err) - } else if got, want := len(generations), 0; got != want { - t.Fatalf("len(Generations())=%v, want %v", got, want) + } else if got, want := snapshotIndex, 0x000003e8; got != want { + t.Fatalf("index=%08x, want %08x", got, want) + } + }) + + t.Run("ErrNoSnapshotsBeforeIndex", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "find-snapshot-for-index", "no-snapshots-before-index")) + _, err := litestream.FindSnapshotForIndex(context.Background(), client, "0000000000000000", 0x000003e8) + if err == nil || err.Error() != `no snapshots available at or before index 000003e8` { + t.Fatalf("unexpected error: %#v", err) + } + }) + + t.Run("ErrNoSnapshots", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "find-snapshot-for-index", "no-snapshots")) + _, err := litestream.FindSnapshotForIndex(context.Background(), client, "0000000000000000", 0x000003e8) + if err != litestream.ErrNoSnapshots { + t.Fatalf("unexpected error: %#v", err) + } + }) + + t.Run("ErrSnapshots", func(t *testing.T) { + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return nil, fmt.Errorf("marker") + } + _, err := litestream.FindSnapshotForIndex(context.Background(), &client, "0000000000000000", 0x000003e8) + if err == nil || err.Error() != `snapshots: marker` { + t.Fatalf("unexpected error: %#v", err) + } + }) + + t.Run("ErrSnapshotIterator", func(t *testing.T) { + var itr mock.SnapshotIterator + itr.NextFunc = func() bool { return false } + itr.CloseFunc = func() error { return fmt.Errorf("marker") } + + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return &itr, nil + } + + _, err := litestream.FindSnapshotForIndex(context.Background(), &client, "0000000000000000", 0x000003e8) + if err == nil || err.Error() != `snapshot iteration: marker` { + t.Fatalf("unexpected error: %#v", err) } }) } -func TestReplicaClient_Snapshots(t *testing.T) { - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - - // Write snapshots. - if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 1, strings.NewReader(``)); err != nil { - t.Fatal(err) - } else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 5, strings.NewReader(`x`)); err != nil { - t.Fatal(err) - } else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 10, strings.NewReader(`xyz`)); err != nil { - t.Fatal(err) - } - - // Fetch all snapshots by generation. - itr, err := c.Snapshots(context.Background(), "b16ddcf5c697540f") - if err != nil { - t.Fatal(err) - } - defer itr.Close() - - // Read all snapshots into a slice so they can be sorted. - a, err := litestream.SliceSnapshotIterator(itr) - if err != nil { - t.Fatal(err) - } else if got, want := len(a), 2; got != want { - t.Fatalf("len=%v, want %v", got, want) - } - sort.Sort(litestream.SnapshotInfoSlice(a)) - - // Verify first snapshot metadata. - if got, want := a[0].Generation, "b16ddcf5c697540f"; got != want { - t.Fatalf("Generation=%v, want %v", got, want) - } else if got, want := a[0].Index, 5; got != want { - t.Fatalf("Index=%v, want %v", got, want) - } else if got, want := a[0].Size, int64(1); got != want { - t.Fatalf("Size=%v, want %v", got, want) - } else if a[0].CreatedAt.IsZero() { - t.Fatalf("expected CreatedAt") - } - - // Verify second snapshot metadata. - if got, want := a[1].Generation, "b16ddcf5c697540f"; got != want { - t.Fatalf("Generation=%v, want %v", got, want) - } else if got, want := a[1].Index, 0xA; got != want { - t.Fatalf("Index=%v, want %v", got, want) - } else if got, want := a[1].Size, int64(3); got != want { - t.Fatalf("Size=%v, want %v", got, want) - } else if a[1].CreatedAt.IsZero() { - t.Fatalf("expected CreatedAt") - } - - // Ensure close is clean. - if err := itr.Close(); err != nil { +func TestSnapshotTimeBounds(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "snapshot-time-bounds", "ok")) + if min, max, err := litestream.SnapshotTimeBounds(context.Background(), client, "0000000000000000"); err != nil { t.Fatal(err) + } else if got, want := min, time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("min=%s, want %s", got, want) + } else if got, want := max, time.Date(2000, 1, 3, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("max=%s, want %s", got, want) } }) - RunWithReplicaClient(t, "NoGenerationDir", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - - itr, err := c.Snapshots(context.Background(), "5efbd8d042012dca") - if err != nil { - t.Fatal(err) - } - defer itr.Close() - - if itr.Next() { - t.Fatal("expected no snapshots") + t.Run("ErrNoSnapshots", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "snapshot-time-bounds", "no-snapshots")) + if _, _, err := litestream.SnapshotTimeBounds(context.Background(), client, "0000000000000000"); err != litestream.ErrNoSnapshots { + t.Fatalf("unexpected error: %#v", err) } }) - RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - - itr, err := c.Snapshots(context.Background(), "") - if err == nil { - err = itr.Close() + t.Run("ErrSnapshots", func(t *testing.T) { + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return nil, fmt.Errorf("marker") } - if err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) + + _, _, err := litestream.SnapshotTimeBounds(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `snapshots: marker` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrSnapshotIterator", func(t *testing.T) { + var itr mock.SnapshotIterator + itr.NextFunc = func() bool { return false } + itr.CloseFunc = func() error { return fmt.Errorf("marker") } + + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return &itr, nil + } + + _, _, err := litestream.SnapshotTimeBounds(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `snapshot iteration: marker` { + t.Fatalf("unexpected error: %s", err) } }) } -func TestReplicaClient_WriteSnapshot(t *testing.T) { - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - - if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 1000, strings.NewReader(`foobar`)); err != nil { +func TestWALTimeBounds(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "wal-time-bounds", "ok")) + if min, max, err := litestream.WALTimeBounds(context.Background(), client, "0000000000000000"); err != nil { t.Fatal(err) - } - - if r, err := c.SnapshotReader(context.Background(), "b16ddcf5c697540f", 1000); err != nil { - t.Fatal(err) - } else if buf, err := ioutil.ReadAll(r); err != nil { - t.Fatal(err) - } else if err := r.Close(); err != nil { - t.Fatal(err) - } else if got, want := string(buf), `foobar`; got != want { - t.Fatalf("data=%q, want %q", got, want) + } else if got, want := min, time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("min=%s, want %s", got, want) + } else if got, want := max, time.Date(2000, 1, 3, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("max=%s, want %s", got, want) } }) - RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - if _, err := c.WriteSnapshot(context.Background(), "", 0, nil); err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) + t.Run("ErrNoWALSegments", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "wal-time-bounds", "no-wal-segments")) + if _, _, err := litestream.WALTimeBounds(context.Background(), client, "0000000000000000"); err != litestream.ErrNoWALSegments { + t.Fatalf("unexpected error: %#v", err) + } + }) + + t.Run("ErrWALSegments", func(t *testing.T) { + var client mock.ReplicaClient + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return nil, fmt.Errorf("marker") + } + + _, _, err := litestream.WALTimeBounds(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `wal segments: marker` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrWALSegmentIterator", func(t *testing.T) { + var itr mock.WALSegmentIterator + itr.NextFunc = func() bool { return false } + itr.CloseFunc = func() error { return fmt.Errorf("marker") } + + var client mock.ReplicaClient + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return &itr, nil + } + + _, _, err := litestream.WALTimeBounds(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `wal segment iteration: marker` { + t.Fatalf("unexpected error: %s", err) } }) } -func TestReplicaClient_SnapshotReader(t *testing.T) { - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - - if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 10, strings.NewReader(`foo`)); err != nil { +func TestGenerationTimeBounds(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "generation-time-bounds", "ok")) + if min, max, err := litestream.GenerationTimeBounds(context.Background(), client, "0000000000000000"); err != nil { t.Fatal(err) - } - - r, err := c.SnapshotReader(context.Background(), "5efbd8d042012dca", 10) - if err != nil { - t.Fatal(err) - } - defer r.Close() - - if buf, err := ioutil.ReadAll(r); err != nil { - t.Fatal(err) - } else if got, want := string(buf), "foo"; got != want { - t.Fatalf("ReadAll=%v, want %v", got, want) + } else if got, want := min, time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("min=%s, want %s", got, want) + } else if got, want := max, time.Date(2000, 1, 3, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("max=%s, want %s", got, want) } }) - RunWithReplicaClient(t, "ErrNotFound", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - - if _, err := c.SnapshotReader(context.Background(), "5efbd8d042012dca", 1); !os.IsNotExist(err) { - t.Fatalf("expected not exist, got %#v", err) + t.Run("SnapshotsOnly", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "generation-time-bounds", "snapshots-only")) + if min, max, err := litestream.GenerationTimeBounds(context.Background(), client, "0000000000000000"); err != nil { + t.Fatal(err) + } else if got, want := min, time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("min=%s, want %s", got, want) + } else if got, want := max, time.Date(2000, 1, 2, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("max=%s, want %s", got, want) } }) - RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrNoSnapshots", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "generation-time-bounds", "no-snapshots")) + if _, _, err := litestream.GenerationTimeBounds(context.Background(), client, "0000000000000000"); err != litestream.ErrNoSnapshots { + t.Fatalf("unexpected error: %#v", err) + } + }) - if _, err := c.SnapshotReader(context.Background(), "", 1); err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) + t.Run("ErrWALSegments", func(t *testing.T) { + var snapshotN int + var itr mock.SnapshotIterator + itr.NextFunc = func() bool { + snapshotN++ + return snapshotN == 1 + } + itr.SnapshotFunc = func() litestream.SnapshotInfo { + return litestream.SnapshotInfo{CreatedAt: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)} + } + itr.CloseFunc = func() error { return nil } + + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return &itr, nil + } + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return nil, fmt.Errorf("marker") + } + + _, _, err := litestream.GenerationTimeBounds(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `wal segments: marker` { + t.Fatalf("unexpected error: %s", err) } }) } -func TestReplicaClient_WALSegments(t *testing.T) { - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - - if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 1, Offset: 0}, strings.NewReader(``)); err != nil { - t.Fatal(err) - } - if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 2, Offset: 0}, strings.NewReader(`12345`)); err != nil { - t.Fatal(err) - } else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 2, Offset: 5}, strings.NewReader(`67`)); err != nil { - t.Fatal(err) - } else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 3, Offset: 0}, strings.NewReader(`xyz`)); err != nil { - t.Fatal(err) - } - - itr, err := c.WALSegments(context.Background(), "b16ddcf5c697540f") - if err != nil { - t.Fatal(err) - } - defer itr.Close() - - // Read all WAL segment files into a slice so they can be sorted. - a, err := litestream.SliceWALSegmentIterator(itr) - if err != nil { - t.Fatal(err) - } else if got, want := len(a), 3; got != want { - t.Fatalf("len=%v, want %v", got, want) - } - sort.Sort(litestream.WALSegmentInfoSlice(a)) - - // Verify first WAL segment metadata. - if got, want := a[0].Generation, "b16ddcf5c697540f"; got != want { - t.Fatalf("Generation=%v, want %v", got, want) - } else if got, want := a[0].Index, 2; got != want { - t.Fatalf("Index=%v, want %v", got, want) - } else if got, want := a[0].Offset, int64(0); got != want { - t.Fatalf("Offset=%v, want %v", got, want) - } else if got, want := a[0].Size, int64(5); got != want { - t.Fatalf("Size=%v, want %v", got, want) - } else if a[0].CreatedAt.IsZero() { - t.Fatalf("expected CreatedAt") - } - - // Verify first WAL segment metadata. - if got, want := a[1].Generation, "b16ddcf5c697540f"; got != want { - t.Fatalf("Generation=%v, want %v", got, want) - } else if got, want := a[1].Index, 2; got != want { - t.Fatalf("Index=%v, want %v", got, want) - } else if got, want := a[1].Offset, int64(5); got != want { - t.Fatalf("Offset=%v, want %v", got, want) - } else if got, want := a[1].Size, int64(2); got != want { - t.Fatalf("Size=%v, want %v", got, want) - } else if a[1].CreatedAt.IsZero() { - t.Fatalf("expected CreatedAt") - } - - // Verify third WAL segment metadata. - if got, want := a[2].Generation, "b16ddcf5c697540f"; got != want { - t.Fatalf("Generation=%v, want %v", got, want) - } else if got, want := a[2].Index, 3; got != want { - t.Fatalf("Index=%v, want %v", got, want) - } else if got, want := a[2].Offset, int64(0); got != want { - t.Fatalf("Offset=%v, want %v", got, want) - } else if got, want := a[2].Size, int64(3); got != want { - t.Fatalf("Size=%v, want %v", got, want) - } else if a[1].CreatedAt.IsZero() { - t.Fatalf("expected CreatedAt") - } - - // Ensure close is clean. - if err := itr.Close(); err != nil { +func TestFindLatestGeneration(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "find-latest-generation", "ok")) + if generation, err := litestream.FindLatestGeneration(context.Background(), client); err != nil { t.Fatal(err) + } else if got, want := generation, "0000000000000001"; got != want { + t.Fatalf("generation=%s, want %s", got, want) } }) - RunWithReplicaClient(t, "NoGenerationDir", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - - itr, err := c.WALSegments(context.Background(), "5efbd8d042012dca") - if err != nil { - t.Fatal(err) - } - defer itr.Close() - - if itr.Next() { - t.Fatal("expected no wal files") + t.Run("ErrNoSnapshots", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "find-latest-generation", "no-generations")) + if generation, err := litestream.FindLatestGeneration(context.Background(), client); err != litestream.ErrNoGeneration { + t.Fatalf("unexpected error: %s", err) + } else if got, want := generation, ""; got != want { + t.Fatalf("generation=%s, want %s", got, want) } }) - RunWithReplicaClient(t, "NoWALs", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - - if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 0, strings.NewReader(`foo`)); err != nil { - t.Fatal(err) + t.Run("ErrGenerations", func(t *testing.T) { + var client mock.ReplicaClient + client.GenerationsFunc = func(ctx context.Context) ([]string, error) { + return nil, fmt.Errorf("marker") } - itr, err := c.WALSegments(context.Background(), "5efbd8d042012dca") - if err != nil { - t.Fatal(err) - } - defer itr.Close() - - if itr.Next() { - t.Fatal("expected no wal files") + _, err := litestream.FindLatestGeneration(context.Background(), &client) + if err == nil || err.Error() != `generations: marker` { + t.Fatalf("unexpected error: %s", err) } }) - RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - - itr, err := c.WALSegments(context.Background(), "") - if err == nil { - err = itr.Close() + t.Run("ErrSnapshots", func(t *testing.T) { + var client mock.ReplicaClient + client.GenerationsFunc = func(ctx context.Context) ([]string, error) { + return []string{"0000000000000000"}, nil } - if err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return nil, fmt.Errorf("marker") + } + + _, err := litestream.FindLatestGeneration(context.Background(), &client) + if err == nil || err.Error() != `generation time bounds: snapshots: marker` { + t.Fatalf("unexpected error: %s", err) } }) } -func TestReplicaClient_WriteWALSegment(t *testing.T) { - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - - if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1000, Offset: 2000}, strings.NewReader(`foobar`)); err != nil { +func TestReplicaClientTimeBounds(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "find-latest-generation", "ok")) + if min, max, err := litestream.ReplicaClientTimeBounds(context.Background(), client); err != nil { t.Fatal(err) - } - - if r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1000, Offset: 2000}); err != nil { - t.Fatal(err) - } else if buf, err := ioutil.ReadAll(r); err != nil { - t.Fatal(err) - } else if err := r.Close(); err != nil { - t.Fatal(err) - } else if got, want := string(buf), `foobar`; got != want { - t.Fatalf("data=%q, want %q", got, want) + } else if got, want := min, time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("min=%s, want %s", got, want) + } else if got, want := max, time.Date(2000, 1, 3, 0, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Fatalf("max=%s, want %s", got, want) } }) - RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "", Index: 0, Offset: 0}, nil); err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) + t.Run("ErrNoGeneration", func(t *testing.T) { + var client mock.ReplicaClient + client.GenerationsFunc = func(ctx context.Context) ([]string, error) { + return nil, nil + } + + _, _, err := litestream.ReplicaClientTimeBounds(context.Background(), &client) + if err != litestream.ErrNoGeneration { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrGenerations", func(t *testing.T) { + var client mock.ReplicaClient + client.GenerationsFunc = func(ctx context.Context) ([]string, error) { + return nil, fmt.Errorf("marker") + } + + _, _, err := litestream.ReplicaClientTimeBounds(context.Background(), &client) + if err == nil || err.Error() != `generations: marker` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrSnapshots", func(t *testing.T) { + var client mock.ReplicaClient + client.GenerationsFunc = func(ctx context.Context) ([]string, error) { + return []string{"0000000000000000"}, nil + } + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return nil, fmt.Errorf("marker") + } + + _, _, err := litestream.ReplicaClientTimeBounds(context.Background(), &client) + if err == nil || err.Error() != `generation time bounds: snapshots: marker` { + t.Fatalf("unexpected error: %s", err) } }) } -func TestReplicaClient_WALSegmentReader(t *testing.T) { - - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 10, Offset: 5}, strings.NewReader(`foobar`)); err != nil { +func TestFindMaxSnapshotIndexByGeneration(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-snapshot-index", "ok")) + if index, err := litestream.FindMaxSnapshotIndexByGeneration(context.Background(), client, "0000000000000000"); err != nil { t.Fatal(err) - } - - r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 10, Offset: 5}) - if err != nil { - t.Fatal(err) - } - defer r.Close() - - if buf, err := ioutil.ReadAll(r); err != nil { - t.Fatal(err) - } else if got, want := string(buf), "foobar"; got != want { - t.Fatalf("ReadAll=%v, want %v", got, want) + } else if got, want := index, 0x000007d0; got != want { + t.Fatalf("index=%d, want %d", got, want) } }) - RunWithReplicaClient(t, "ErrNotFound", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() + t.Run("ErrNoSnapshots", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-snapshot-index", "no-snapshots")) - if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 1, Offset: 0}); !os.IsNotExist(err) { - t.Fatalf("expected not exist, got %#v", err) + _, err := litestream.FindMaxSnapshotIndexByGeneration(context.Background(), client, "0000000000000000") + if err != litestream.ErrNoSnapshots { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrSnapshots", func(t *testing.T) { + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return nil, fmt.Errorf("marker") + } + + _, err := litestream.FindMaxSnapshotIndexByGeneration(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `snapshots: marker` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrSnapshotIteration", func(t *testing.T) { + var itr mock.SnapshotIterator + itr.NextFunc = func() bool { return false } + itr.CloseFunc = func() error { return fmt.Errorf("marker") } + + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return &itr, nil + } + + _, err := litestream.FindMaxSnapshotIndexByGeneration(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `snapshot iteration: marker` { + t.Fatalf("unexpected error: %s", err) } }) } -func TestReplicaClient_DeleteWALSegments(t *testing.T) { - RunWithReplicaClient(t, "OK", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - - if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}, strings.NewReader(`foo`)); err != nil { +func TestFindMaxWALIndexByGeneration(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-wal-index", "ok")) + if index, err := litestream.FindMaxWALIndexByGeneration(context.Background(), client, "0000000000000000"); err != nil { t.Fatal(err) - } else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 3, Offset: 4}, strings.NewReader(`bar`)); err != nil { - t.Fatal(err) - } - - if err := c.DeleteWALSegments(context.Background(), []litestream.Pos{ - {Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}, - {Generation: "5efbd8d042012dca", Index: 3, Offset: 4}, - }); err != nil { - t.Fatal(err) - } - - if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}); !os.IsNotExist(err) { - t.Fatalf("expected not exist, got %#v", err) - } else if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 3, Offset: 4}); !os.IsNotExist(err) { - t.Fatalf("expected not exist, got %#v", err) + } else if got, want := index, 1; got != want { + t.Fatalf("index=%d, want %d", got, want) } }) - RunWithReplicaClient(t, "ErrNoGeneration", func(t *testing.T, c litestream.ReplicaClient) { - t.Parallel() - if err := c.DeleteWALSegments(context.Background(), []litestream.Pos{{}}); err == nil || err.Error() != `generation required` { - t.Fatalf("unexpected error: %v", err) + t.Run("ErrNoWALSegments", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-wal-index", "no-wal")) + + _, err := litestream.FindMaxWALIndexByGeneration(context.Background(), client, "0000000000000000") + if err != litestream.ErrNoWALSegments { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrWALSegments", func(t *testing.T) { + var client mock.ReplicaClient + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return nil, fmt.Errorf("marker") + } + + _, err := litestream.FindMaxWALIndexByGeneration(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `wal segments: marker` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrWALSegmentIteration", func(t *testing.T) { + var itr mock.WALSegmentIterator + itr.NextFunc = func() bool { return false } + itr.CloseFunc = func() error { return fmt.Errorf("marker") } + + var client mock.ReplicaClient + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return &itr, nil + } + + _, err := litestream.FindMaxWALIndexByGeneration(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `wal segment iteration: marker` { + t.Fatalf("unexpected error: %s", err) } }) } -// RunWithReplicaClient executes fn with each replica specified by the -integration flag -func RunWithReplicaClient(t *testing.T, name string, fn func(*testing.T, litestream.ReplicaClient)) { - t.Run(name, func(t *testing.T) { - for _, typ := range strings.Split(*integration, ",") { - t.Run(typ, func(t *testing.T) { - c := NewReplicaClient(t, typ) - defer MustDeleteAll(t, c) +func TestFindMaxIndexByGeneration(t *testing.T) { + t.Run("OK", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-index", "ok")) + if index, err := litestream.FindMaxIndexByGeneration(context.Background(), client, "0000000000000000"); err != nil { + t.Fatal(err) + } else if got, want := index, 0x00000002; got != want { + t.Fatalf("index=%d, want %d", got, want) + } + }) - fn(t, c) - }) + t.Run("NoWAL", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-index", "no-wal")) + if index, err := litestream.FindMaxIndexByGeneration(context.Background(), client, "0000000000000000"); err != nil { + t.Fatal(err) + } else if got, want := index, 0x00000001; got != want { + t.Fatalf("index=%d, want %d", got, want) + } + }) + + t.Run("SnapshotLaterThanWAL", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-index", "snapshot-later-than-wal")) + if index, err := litestream.FindMaxIndexByGeneration(context.Background(), client, "0000000000000000"); err != nil { + t.Fatal(err) + } else if got, want := index, 0x00000001; got != want { + t.Fatalf("index=%d, want %d", got, want) + } + }) + + t.Run("ErrNoSnapshots", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "max-index", "no-snapshots")) + + _, err := litestream.FindMaxIndexByGeneration(context.Background(), client, "0000000000000000") + if err != litestream.ErrNoSnapshots { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrSnapshots", func(t *testing.T) { + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return nil, fmt.Errorf("marker") + } + + _, err := litestream.FindMaxIndexByGeneration(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `max snapshot index: snapshots: marker` { + t.Fatalf("unexpected error: %s", err) + } + }) + + t.Run("ErrWALSegments", func(t *testing.T) { + var client mock.ReplicaClient + client.SnapshotsFunc = func(ctx context.Context, generation string) (litestream.SnapshotIterator, error) { + return litestream.NewSnapshotInfoSliceIterator([]litestream.SnapshotInfo{{Index: 0x00000001}}), nil + } + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return nil, fmt.Errorf("marker") + } + + _, err := litestream.FindMaxIndexByGeneration(context.Background(), &client, "0000000000000000") + if err == nil || err.Error() != `max wal index: wal segments: marker` { + t.Fatalf("unexpected error: %s", err) } }) } -// NewReplicaClient returns a new client for integration testing by type name. -func NewReplicaClient(tb testing.TB, typ string) litestream.ReplicaClient { - tb.Helper() +func TestRestoreSnapshot(t *testing.T) { t.Skip("TODO") } - switch typ { - case file.ReplicaClientType: - return NewFileReplicaClient(tb) - case s3.ReplicaClientType: - return NewS3ReplicaClient(tb) - case gcs.ReplicaClientType: - return NewGCSReplicaClient(tb) - case abs.ReplicaClientType: - return NewABSReplicaClient(tb) - case sftp.ReplicaClientType: - return NewSFTPReplicaClient(tb) - default: - tb.Fatalf("invalid replica client type: %q", typ) - return nil - } -} +func TestRestore(t *testing.T) { + t.Run("OK", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "ok") + tempDir := t.TempDir() -// NewFileReplicaClient returns a new client for integration testing. -func NewFileReplicaClient(tb testing.TB) *file.ReplicaClient { - tb.Helper() - return file.NewReplicaClient(tb.TempDir()) -} - -// NewS3ReplicaClient returns a new client for integration testing. -func NewS3ReplicaClient(tb testing.TB) *s3.ReplicaClient { - tb.Helper() - - c := s3.NewReplicaClient() - c.AccessKeyID = *s3AccessKeyID - c.SecretAccessKey = *s3SecretAccessKey - c.Region = *s3Region - c.Bucket = *s3Bucket - c.Path = path.Join(*s3Path, fmt.Sprintf("%016x", rand.Uint64())) - c.Endpoint = *s3Endpoint - c.ForcePathStyle = *s3ForcePathStyle - c.SkipVerify = *s3SkipVerify - return c -} - -// NewGCSReplicaClient returns a new client for integration testing. -func NewGCSReplicaClient(tb testing.TB) *gcs.ReplicaClient { - tb.Helper() - - c := gcs.NewReplicaClient() - c.Bucket = *gcsBucket - c.Path = path.Join(*gcsPath, fmt.Sprintf("%016x", rand.Uint64())) - return c -} - -// NewABSReplicaClient returns a new client for integration testing. -func NewABSReplicaClient(tb testing.TB) *abs.ReplicaClient { - tb.Helper() - - c := abs.NewReplicaClient() - c.AccountName = *absAccountName - c.AccountKey = *absAccountKey - c.Bucket = *absBucket - c.Path = path.Join(*absPath, fmt.Sprintf("%016x", rand.Uint64())) - return c -} - -// NewSFTPReplicaClient returns a new client for integration testing. -func NewSFTPReplicaClient(tb testing.TB) *sftp.ReplicaClient { - tb.Helper() - - c := sftp.NewReplicaClient() - c.Host = *sftpHost - c.User = *sftpUser - c.Password = *sftpPassword - c.KeyPath = *sftpKeyPath - c.Path = path.Join(*sftpPath, fmt.Sprintf("%016x", rand.Uint64())) - return c -} - -// MustDeleteAll deletes all objects under the client's path. -func MustDeleteAll(tb testing.TB, c litestream.ReplicaClient) { - tb.Helper() - - generations, err := c.Generations(context.Background()) - if err != nil { - tb.Fatalf("cannot list generations for deletion: %s", err) - } - - for _, generation := range generations { - if err := c.DeleteGeneration(context.Background(), generation); err != nil { - tb.Fatalf("cannot delete generation: %s", err) + client := litestream.NewFileReplicaClient(testDir) + if err := litestream.Restore(context.Background(), client, filepath.Join(tempDir, "db"), "0000000000000000", 0, 2, litestream.NewRestoreOptions()); err != nil { + t.Fatal(err) + } else if !fileEqual(t, filepath.Join(testDir, "00000002.db"), filepath.Join(tempDir, "db")) { + t.Fatalf("file mismatch") } - } + }) - switch c := c.(type) { - case *sftp.ReplicaClient: - if err := c.Cleanup(context.Background()); err != nil { - tb.Fatalf("cannot cleanup sftp: %s", err) + t.Run("SnapshotOnly", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "snapshot-only") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + if err := litestream.Restore(context.Background(), client, filepath.Join(tempDir, "db"), "0000000000000000", 0, 0, litestream.NewRestoreOptions()); err != nil { + t.Fatal(err) + } else if !fileEqual(t, filepath.Join(testDir, "00000000.db"), filepath.Join(tempDir, "db")) { + t.Fatalf("file mismatch") } - } + }) + + t.Run("DefaultParallelism", func(t *testing.T) { + testDir := filepath.Join("testdata", "restore", "ok") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + opt := litestream.NewRestoreOptions() + opt.Parallelism = 0 + if err := litestream.Restore(context.Background(), client, filepath.Join(tempDir, "db"), "0000000000000000", 0, 2, opt); err != nil { + t.Fatal(err) + } else if !fileEqual(t, filepath.Join(testDir, "00000002.db"), filepath.Join(tempDir, "db")) { + t.Fatalf("file mismatch") + } + }) + + t.Run("ErrPathRequired", func(t *testing.T) { + var client mock.ReplicaClient + if err := litestream.Restore(context.Background(), &client, "", "0000000000000000", 0, 0, litestream.NewRestoreOptions()); err == nil || err.Error() != `restore path required` { + t.Fatalf("unexpected error: %#v", err) + } + }) + + t.Run("ErrGenerationRequired", func(t *testing.T) { + var client mock.ReplicaClient + if err := litestream.Restore(context.Background(), &client, t.TempDir(), "", 0, 0, litestream.NewRestoreOptions()); err == nil || err.Error() != `generation required` { + t.Fatalf("unexpected error: %#v", err) + } + }) + + t.Run("ErrSnapshotIndexRequired", func(t *testing.T) { + var client mock.ReplicaClient + if err := litestream.Restore(context.Background(), &client, t.TempDir(), "0000000000000000", -1, 0, litestream.NewRestoreOptions()); err == nil || err.Error() != `snapshot index required` { + t.Fatalf("unexpected error: %#v", err) + } + }) + + t.Run("ErrTargetIndexRequired", func(t *testing.T) { + var client mock.ReplicaClient + if err := litestream.Restore(context.Background(), &client, t.TempDir(), "0000000000000000", 0, -1, litestream.NewRestoreOptions()); err == nil || err.Error() != `target index required` { + t.Fatalf("unexpected error: %#v", err) + } + }) + + t.Run("ErrPathExists", func(t *testing.T) { + filename := filepath.Join(t.TempDir(), "db") + if err := os.WriteFile(filename, []byte("foo"), 0600); err != nil { + t.Fatal(err) + } + var client mock.ReplicaClient + if err := litestream.Restore(context.Background(), &client, filename, "0000000000000000", 0, 0, litestream.NewRestoreOptions()); err == nil || !strings.Contains(err.Error(), `cannot restore, output path already exists`) { + t.Fatalf("unexpected error: %#v", err) + } + }) + + t.Run("ErrPathPermissions", func(t *testing.T) { + dir := t.TempDir() + if err := os.Chmod(dir, 0000); err != nil { + t.Fatal(err) + } + + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "restore", "bad-permissions")) + if err := litestream.Restore(context.Background(), client, filepath.Join(dir, "db"), "0000000000000000", 0, 0, litestream.NewRestoreOptions()); err == nil || !strings.Contains(err.Error(), `permission denied`) { + t.Fatalf("unexpected error: %#v", err) + } + }) } diff --git a/replica_test.go b/replica_test.go index 1a64cc0..a0220bb 100644 --- a/replica_test.go +++ b/replica_test.go @@ -8,7 +8,6 @@ import ( "testing" "github.com/benbjohnson/litestream" - "github.com/benbjohnson/litestream/file" "github.com/benbjohnson/litestream/mock" "github.com/pierrec/lz4/v4" ) @@ -45,9 +44,9 @@ func TestReplica_Sync(t *testing.T) { // Fetch current database position. dpos := db.Pos() - c := file.NewReplicaClient(t.TempDir()) + c := litestream.NewFileReplicaClient(t.TempDir()) r := litestream.NewReplica(db, "") - c.Replica, r.Client = r, c + r.Client = c if err := r.Sync(context.Background()); err != nil { t.Fatal(err) @@ -81,7 +80,7 @@ func TestReplica_Snapshot(t *testing.T) { db, sqldb := MustOpenDBs(t) defer MustCloseDBs(t, db, sqldb) - c := file.NewReplicaClient(t.TempDir()) + c := litestream.NewFileReplicaClient(t.TempDir()) r := litestream.NewReplica(db, "") r.Client = c diff --git a/testdata/Makefile b/testdata/Makefile new file mode 100644 index 0000000..b87ebd5 --- /dev/null +++ b/testdata/Makefile @@ -0,0 +1,8 @@ +.PHONY: default +default: + make -C find-latest-generation/ok + make -C generation-time-bounds/ok + make -C generation-time-bounds/snapshots-only + make -C replica-client-time-bounds/ok + make -C snapshot-time-bounds/ok + make -C wal-time-bounds/ok diff --git a/testdata/find-latest-generation/no-generations/.gitignore b/testdata/find-latest-generation/no-generations/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/testdata/find-latest-generation/ok/Makefile b/testdata/find-latest-generation/ok/Makefile new file mode 100644 index 0000000..847b844 --- /dev/null +++ b/testdata/find-latest-generation/ok/Makefile @@ -0,0 +1,7 @@ +.PHONY: default +default: + TZ=UTC touch -t 200001010000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -t 200001020000 generations/0000000000000000/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -t 200001010000 generations/0000000000000001/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -t 200001030000 generations/0000000000000001/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -t 200001010000 generations/0000000000000002/snapshots/00000000.snapshot.lz4 diff --git a/testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 differ diff --git a/testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/find-latest-generation/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 differ diff --git a/testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/00000000.snapshot.lz4 b/testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/00000000.snapshot.lz4 differ diff --git a/testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/00000001.snapshot.lz4 b/testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/find-latest-generation/ok/generations/0000000000000001/snapshots/00000001.snapshot.lz4 differ diff --git a/testdata/find-latest-generation/ok/generations/0000000000000002/snapshots/00000000.snapshot.lz4 b/testdata/find-latest-generation/ok/generations/0000000000000002/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/find-latest-generation/ok/generations/0000000000000002/snapshots/00000000.snapshot.lz4 differ diff --git a/testdata/find-snapshot-for-index/no-snapshots-before-index/generations/0000000000000000/snapshots/00000bb8.snapshot.lz4 b/testdata/find-snapshot-for-index/no-snapshots-before-index/generations/0000000000000000/snapshots/00000bb8.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/find-snapshot-for-index/no-snapshots-before-index/generations/0000000000000000/snapshots/00000bb8.snapshot.lz4 differ diff --git a/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 differ diff --git a/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/000003e8.snapshot.lz4 b/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/000003e8.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/000003e8.snapshot.lz4 differ diff --git a/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/00000bb8.snapshot.lz4 b/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/00000bb8.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/find-snapshot-for-index/ok/generations/0000000000000000/snapshots/00000bb8.snapshot.lz4 differ diff --git a/testdata/generation-time-bounds/no-snapshots/generations/0000000000000000/snapshots/.gitignore b/testdata/generation-time-bounds/no-snapshots/generations/0000000000000000/snapshots/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/testdata/generation-time-bounds/ok/Makefile b/testdata/generation-time-bounds/ok/Makefile new file mode 100644 index 0000000..06d5044 --- /dev/null +++ b/testdata/generation-time-bounds/ok/Makefile @@ -0,0 +1,8 @@ +.PHONY: default +default: + TZ=UTC touch -t 200001010000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -t 200001020000 generations/0000000000000000/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -t 200001010000 generations/0000000000000000/wal/00000000/00000000.wal.lz4 + TZ=UTC touch -t 200001020000 generations/0000000000000000/wal/00000000/00000001.wal.lz4 + TZ=UTC touch -t 200001030000 generations/0000000000000000/wal/00000001/00000000.wal.lz4 + diff --git a/testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 differ diff --git a/testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/generation-time-bounds/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 differ diff --git a/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 differ diff --git a/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000000/00000001.wal.lz4 b/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000000/00000001.wal.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000000/00000001.wal.lz4 differ diff --git a/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/generation-time-bounds/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 differ diff --git a/testdata/generation-time-bounds/snapshots-only/Makefile b/testdata/generation-time-bounds/snapshots-only/Makefile new file mode 100644 index 0000000..18b382a --- /dev/null +++ b/testdata/generation-time-bounds/snapshots-only/Makefile @@ -0,0 +1,5 @@ +.PHONY: default +default: + TZ=UTC touch -t 200001010000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -t 200001020000 generations/0000000000000000/snapshots/00000001.snapshot.lz4 + diff --git a/testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/00000000.snapshot.lz4 differ diff --git a/testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/generation-time-bounds/snapshots-only/generations/0000000000000000/snapshots/00000001.snapshot.lz4 differ diff --git a/testdata/max-index/no-snapshots/generations/0000000000000000/.gitignore b/testdata/max-index/no-snapshots/generations/0000000000000000/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/testdata/max-index/no-wal/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/max-index/no-wal/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/max-index/no-wal/generations/0000000000000000/snapshots/00000000.snapshot.lz4 differ diff --git a/testdata/max-index/no-wal/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/testdata/max-index/no-wal/generations/0000000000000000/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/max-index/no-wal/generations/0000000000000000/snapshots/00000001.snapshot.lz4 differ diff --git a/testdata/max-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/max-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/max-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 differ diff --git a/testdata/max-index/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/testdata/max-index/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/max-index/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 differ diff --git a/testdata/max-index/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/max-index/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/max-index/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 differ diff --git a/testdata/max-index/ok/generations/0000000000000000/wal/00000000/00001234.wal.lz4 b/testdata/max-index/ok/generations/0000000000000000/wal/00000000/00001234.wal.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/max-index/ok/generations/0000000000000000/wal/00000000/00001234.wal.lz4 differ diff --git a/testdata/max-index/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/testdata/max-index/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/max-index/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 differ diff --git a/testdata/max-index/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 b/testdata/max-index/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/max-index/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 differ diff --git a/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/00000000.snapshot.lz4 differ diff --git a/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/snapshots/00000001.snapshot.lz4 differ diff --git a/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 differ diff --git a/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/00000000/00001234.wal.lz4 b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/00000000/00001234.wal.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/max-index/snapshot-later-than-wal/generations/0000000000000000/wal/00000000/00001234.wal.lz4 differ diff --git a/testdata/max-snapshot-index/no-snapshots/generations/0000000000000000/.gitignore b/testdata/max-snapshot-index/no-snapshots/generations/0000000000000000/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/testdata/max-snapshot-index/ok/Makefile b/testdata/max-snapshot-index/ok/Makefile new file mode 100644 index 0000000..3d808b7 --- /dev/null +++ b/testdata/max-snapshot-index/ok/Makefile @@ -0,0 +1,6 @@ +.PHONY: default +default: + TZ=UTC touch -t 200001020000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -t 200001010000 generations/0000000000000001/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -t 200001030000 generations/0000000000000001/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -t 200001010000 generations/0000000000000002/snapshots/00000000.snapshot.lz4 diff --git a/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 differ diff --git a/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/000003e8.snapshot.lz4 b/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/000003e8.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/000003e8.snapshot.lz4 differ diff --git a/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/000007d0.snapshot.lz4 b/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/000007d0.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/max-snapshot-index/ok/generations/0000000000000000/snapshots/000007d0.snapshot.lz4 differ diff --git a/testdata/max-wal-index/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/max-wal-index/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/max-wal-index/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 differ diff --git a/testdata/max-wal-index/ok/generations/0000000000000000/wal/00000000/00001234.wal.lz4 b/testdata/max-wal-index/ok/generations/0000000000000000/wal/00000000/00001234.wal.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/max-wal-index/ok/generations/0000000000000000/wal/00000000/00001234.wal.lz4 differ diff --git a/testdata/max-wal-index/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/testdata/max-wal-index/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/max-wal-index/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 differ diff --git a/testdata/replica-client-time-bounds/ok/Makefile b/testdata/replica-client-time-bounds/ok/Makefile new file mode 100644 index 0000000..3d808b7 --- /dev/null +++ b/testdata/replica-client-time-bounds/ok/Makefile @@ -0,0 +1,6 @@ +.PHONY: default +default: + TZ=UTC touch -t 200001020000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -t 200001010000 generations/0000000000000001/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -t 200001030000 generations/0000000000000001/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -t 200001010000 generations/0000000000000002/snapshots/00000000.snapshot.lz4 diff --git a/testdata/replica-client-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/replica-client-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/replica-client-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 differ diff --git a/testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/00000000.snapshot.lz4 b/testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/00000000.snapshot.lz4 differ diff --git a/testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/00000001.snapshot.lz4 b/testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/replica-client-time-bounds/ok/generations/0000000000000001/snapshots/00000001.snapshot.lz4 differ diff --git a/testdata/replica-client-time-bounds/ok/generations/0000000000000002/snapshots/00000000.snapshot.lz4 b/testdata/replica-client-time-bounds/ok/generations/0000000000000002/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/replica-client-time-bounds/ok/generations/0000000000000002/snapshots/00000000.snapshot.lz4 differ diff --git a/testdata/restore/bad-permissions/00000000.db b/testdata/restore/bad-permissions/00000000.db new file mode 100644 index 0000000..86bbea7 Binary files /dev/null and b/testdata/restore/bad-permissions/00000000.db differ diff --git a/testdata/restore/bad-permissions/README b/testdata/restore/bad-permissions/README new file mode 100644 index 0000000..9450f45 --- /dev/null +++ b/testdata/restore/bad-permissions/README @@ -0,0 +1,36 @@ +To reproduce this testdata, run sqlite3 and execute: + + PRAGMA journal_mode = WAL; + CREATE TABLE t (x); + INSERT INTO t (x) VALUES (1); + INSERT INTO t (x) VALUES (2); + + sl3 split -o generations/0000000000000000/wal/00000000 db-wal + cp db generations/0000000000000000/snapshots/00000000.snapshot + lz4 -c --rm generations/0000000000000000/snapshots/00000000.snapshot + + +Then execute: + + PRAGMA wal_checkpoint(TRUNCATE); + INSERT INTO t (x) VALUES (3); + + sl3 split -o generations/0000000000000000/wal/00000001 db-wal + + +Then execute: + + PRAGMA wal_checkpoint(TRUNCATE); + INSERT INTO t (x) VALUES (4); + INSERT INTO t (x) VALUES (5); + + sl3 split -o generations/0000000000000000/wal/00000002 db-wal + + +Finally, obtain the final snapshot: + + PRAGMA wal_checkpoint(TRUNCATE); + + cp db 00000002.db + rm db* + diff --git a/testdata/restore/bad-permissions/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/restore/bad-permissions/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/restore/bad-permissions/generations/0000000000000000/snapshots/00000000.snapshot.lz4 differ diff --git a/testdata/restore/ok/00000002.db b/testdata/restore/ok/00000002.db new file mode 100644 index 0000000..cfd2b8d Binary files /dev/null and b/testdata/restore/ok/00000002.db differ diff --git a/testdata/restore/ok/README b/testdata/restore/ok/README new file mode 100644 index 0000000..9450f45 --- /dev/null +++ b/testdata/restore/ok/README @@ -0,0 +1,36 @@ +To reproduce this testdata, run sqlite3 and execute: + + PRAGMA journal_mode = WAL; + CREATE TABLE t (x); + INSERT INTO t (x) VALUES (1); + INSERT INTO t (x) VALUES (2); + + sl3 split -o generations/0000000000000000/wal/00000000 db-wal + cp db generations/0000000000000000/snapshots/00000000.snapshot + lz4 -c --rm generations/0000000000000000/snapshots/00000000.snapshot + + +Then execute: + + PRAGMA wal_checkpoint(TRUNCATE); + INSERT INTO t (x) VALUES (3); + + sl3 split -o generations/0000000000000000/wal/00000001 db-wal + + +Then execute: + + PRAGMA wal_checkpoint(TRUNCATE); + INSERT INTO t (x) VALUES (4); + INSERT INTO t (x) VALUES (5); + + sl3 split -o generations/0000000000000000/wal/00000002 db-wal + + +Finally, obtain the final snapshot: + + PRAGMA wal_checkpoint(TRUNCATE); + + cp db 00000002.db + rm db* + diff --git a/testdata/restore/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/restore/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/restore/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 differ diff --git a/testdata/restore/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/restore/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000..37e1dcf Binary files /dev/null and b/testdata/restore/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 differ diff --git a/testdata/restore/ok/generations/0000000000000000/wal/00000000/00002050.wal.lz4 b/testdata/restore/ok/generations/0000000000000000/wal/00000000/00002050.wal.lz4 new file mode 100644 index 0000000..3bd7ab7 Binary files /dev/null and b/testdata/restore/ok/generations/0000000000000000/wal/00000000/00002050.wal.lz4 differ diff --git a/testdata/restore/ok/generations/0000000000000000/wal/00000000/00003068.wal.lz4 b/testdata/restore/ok/generations/0000000000000000/wal/00000000/00003068.wal.lz4 new file mode 100644 index 0000000..c73bf2c Binary files /dev/null and b/testdata/restore/ok/generations/0000000000000000/wal/00000000/00003068.wal.lz4 differ diff --git a/testdata/restore/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/testdata/restore/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 new file mode 100644 index 0000000..64a4899 Binary files /dev/null and b/testdata/restore/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 differ diff --git a/testdata/restore/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 b/testdata/restore/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 new file mode 100644 index 0000000..2265d0e Binary files /dev/null and b/testdata/restore/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 differ diff --git a/testdata/restore/ok/generations/0000000000000000/wal/00000002/00001038.wal.lz4 b/testdata/restore/ok/generations/0000000000000000/wal/00000002/00001038.wal.lz4 new file mode 100644 index 0000000..c7dc94f Binary files /dev/null and b/testdata/restore/ok/generations/0000000000000000/wal/00000002/00001038.wal.lz4 differ diff --git a/testdata/restore/snapshot-only/00000000.db b/testdata/restore/snapshot-only/00000000.db new file mode 100644 index 0000000..86bbea7 Binary files /dev/null and b/testdata/restore/snapshot-only/00000000.db differ diff --git a/testdata/restore/snapshot-only/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/restore/snapshot-only/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/restore/snapshot-only/generations/0000000000000000/snapshots/00000000.snapshot.lz4 differ diff --git a/testdata/snapshot-time-bounds/no-snapshots/generations/0000000000000000/snapshots/.gitignore b/testdata/snapshot-time-bounds/no-snapshots/generations/0000000000000000/snapshots/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/testdata/snapshot-time-bounds/ok/Makefile b/testdata/snapshot-time-bounds/ok/Makefile new file mode 100644 index 0000000..0a3ea13 --- /dev/null +++ b/testdata/snapshot-time-bounds/ok/Makefile @@ -0,0 +1,6 @@ +.PHONY: default +default: + TZ=UTC touch -t 200001010000 generations/0000000000000000/snapshots/00000000.snapshot.lz4 + TZ=UTC touch -t 200001020000 generations/0000000000000000/snapshots/00000001.snapshot.lz4 + TZ=UTC touch -t 200001030000 generations/0000000000000000/snapshots/00000002.snapshot.lz4 + diff --git a/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 b/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000000.snapshot.lz4 differ diff --git a/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 b/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000001.snapshot.lz4 differ diff --git a/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000002.snapshot.lz4 b/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000002.snapshot.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/snapshot-time-bounds/ok/generations/0000000000000000/snapshots/00000002.snapshot.lz4 differ diff --git a/testdata/wal-downloader/err-download-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/wal-downloader/err-download-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000..03f56a3 Binary files /dev/null and b/testdata/wal-downloader/err-download-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 differ diff --git a/testdata/wal-downloader/err-read-wal-segment/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/wal-downloader/err-read-wal-segment/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000..03f56a3 Binary files /dev/null and b/testdata/wal-downloader/err-read-wal-segment/generations/0000000000000000/wal/00000000/00000000.wal.lz4 differ diff --git a/testdata/wal-downloader/err-write-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/wal-downloader/err-write-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000..03f56a3 Binary files /dev/null and b/testdata/wal-downloader/err-write-wal/generations/0000000000000000/wal/00000000/00000000.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000..03f56a3 Binary files /dev/null and b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00000000.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00002050.wal.lz4 b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00002050.wal.lz4 new file mode 100644 index 0000000..d8c9ab6 Binary files /dev/null and b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00002050.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00003068.wal.lz4 b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00003068.wal.lz4 new file mode 100644 index 0000000..16be189 Binary files /dev/null and b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000000/00003068.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000001/00000000.wal.lz4 new file mode 100644 index 0000000..46d706b Binary files /dev/null and b/testdata/wal-downloader/missing-ending-index/generations/0000000000000000/wal/00000001/00000000.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000001/00000000.wal.lz4 new file mode 100644 index 0000000..46d706b Binary files /dev/null and b/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000001/00000000.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000002/00000000.wal.lz4 b/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000002/00000000.wal.lz4 new file mode 100644 index 0000000..5366ae2 Binary files /dev/null and b/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000002/00000000.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000002/00001038.wal.lz4 b/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000002/00001038.wal.lz4 new file mode 100644 index 0000000..6fdb481 Binary files /dev/null and b/testdata/wal-downloader/missing-initial-index/generations/0000000000000000/wal/00000002/00001038.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000..03f56a3 Binary files /dev/null and b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000000/00000000.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000000/00002050.wal.lz4 b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000000/00002050.wal.lz4 new file mode 100644 index 0000000..d8c9ab6 Binary files /dev/null and b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000000/00002050.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000000/00003068.wal.lz4 b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000000/00003068.wal.lz4 new file mode 100644 index 0000000..16be189 Binary files /dev/null and b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000000/00003068.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000002/00000000.wal.lz4 b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000002/00000000.wal.lz4 new file mode 100644 index 0000000..5366ae2 Binary files /dev/null and b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000002/00000000.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000002/00001038.wal.lz4 b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000002/00001038.wal.lz4 new file mode 100644 index 0000000..6fdb481 Binary files /dev/null and b/testdata/wal-downloader/missing-middle-index/generations/0000000000000000/wal/00000002/00001038.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000..03f56a3 Binary files /dev/null and b/testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/00000000/00000000.wal.lz4 differ diff --git a/testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/00000000/00003068.wal.lz4 b/testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/00000000/00003068.wal.lz4 new file mode 100644 index 0000000..16be189 Binary files /dev/null and b/testdata/wal-downloader/missing-offset/generations/0000000000000000/wal/00000000/00003068.wal.lz4 differ diff --git a/testdata/wal-downloader/ok/00000000.wal b/testdata/wal-downloader/ok/00000000.wal new file mode 100644 index 0000000..c04d8f0 Binary files /dev/null and b/testdata/wal-downloader/ok/00000000.wal differ diff --git a/testdata/wal-downloader/ok/00000001.wal b/testdata/wal-downloader/ok/00000001.wal new file mode 100644 index 0000000..1a59daa Binary files /dev/null and b/testdata/wal-downloader/ok/00000001.wal differ diff --git a/testdata/wal-downloader/ok/00000002.wal b/testdata/wal-downloader/ok/00000002.wal new file mode 100644 index 0000000..e8bb526 Binary files /dev/null and b/testdata/wal-downloader/ok/00000002.wal differ diff --git a/testdata/wal-downloader/ok/README b/testdata/wal-downloader/ok/README new file mode 100644 index 0000000..63eda96 --- /dev/null +++ b/testdata/wal-downloader/ok/README @@ -0,0 +1,40 @@ +To reproduce this testdata, run sqlite3 and execute: + + PRAGMA journal_mode = WAL; + CREATE TABLE t (x); + INSERT INTO t (x) VALUES (1); + INSERT INTO t (x) VALUES (2); + +And copy & split the WAL into segments: + + sl3 split -o generations/0000000000000000/wal/00000000 db-wal + cp db-wal 00000000.wal + + +Then execute: + + PRAGMA wal_checkpoint(TRUNCATE); + INSERT INTO t (x) VALUES (3); + +And split again: + + sl3 split -o generations/0000000000000000/wal/00000001 db-wal + cp db-wal 00000001.wal + + +Then execute: + + PRAGMA wal_checkpoint(TRUNCATE); + INSERT INTO t (x) VALUES (4); + INSERT INTO t (x) VALUES (5); + +And split again: + + sl3 split -o generations/0000000000000000/wal/00000002 db-wal + cp db-wal 00000002.wal + + +Finally, remove the original database files: + + rm db* + diff --git a/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000..03f56a3 Binary files /dev/null and b/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 differ diff --git a/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000000/00002050.wal.lz4 b/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000000/00002050.wal.lz4 new file mode 100644 index 0000000..d8c9ab6 Binary files /dev/null and b/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000000/00002050.wal.lz4 differ diff --git a/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000000/00003068.wal.lz4 b/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000000/00003068.wal.lz4 new file mode 100644 index 0000000..16be189 Binary files /dev/null and b/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000000/00003068.wal.lz4 differ diff --git a/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 new file mode 100644 index 0000000..46d706b Binary files /dev/null and b/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 differ diff --git a/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 b/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 new file mode 100644 index 0000000..5366ae2 Binary files /dev/null and b/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000002/00000000.wal.lz4 differ diff --git a/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000002/00001038.wal.lz4 b/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000002/00001038.wal.lz4 new file mode 100644 index 0000000..6fdb481 Binary files /dev/null and b/testdata/wal-downloader/ok/generations/0000000000000000/wal/00000002/00001038.wal.lz4 differ diff --git a/testdata/wal-downloader/one/00000000.wal b/testdata/wal-downloader/one/00000000.wal new file mode 100644 index 0000000..c04d8f0 Binary files /dev/null and b/testdata/wal-downloader/one/00000000.wal differ diff --git a/testdata/wal-downloader/one/README b/testdata/wal-downloader/one/README new file mode 100644 index 0000000..afe550f --- /dev/null +++ b/testdata/wal-downloader/one/README @@ -0,0 +1,17 @@ +To reproduce this testdata, run sqlite3 and execute: + + PRAGMA journal_mode = WAL; + CREATE TABLE t (x); + INSERT INTO t (x) VALUES (1); + INSERT INTO t (x) VALUES (2); + +And copy & split the WAL into segments: + + sl3 split -o generations/0000000000000000/wal/00000000 db-wal + cp db-wal 00000000.wal + + +Finally, remove the original database files: + + rm db* + diff --git a/testdata/wal-downloader/one/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/wal-downloader/one/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000..03f56a3 Binary files /dev/null and b/testdata/wal-downloader/one/generations/0000000000000000/wal/00000000/00000000.wal.lz4 differ diff --git a/testdata/wal-downloader/one/generations/0000000000000000/wal/00000000/00002050.wal.lz4 b/testdata/wal-downloader/one/generations/0000000000000000/wal/00000000/00002050.wal.lz4 new file mode 100644 index 0000000..d8c9ab6 Binary files /dev/null and b/testdata/wal-downloader/one/generations/0000000000000000/wal/00000000/00002050.wal.lz4 differ diff --git a/testdata/wal-downloader/one/generations/0000000000000000/wal/00000000/00003068.wal.lz4 b/testdata/wal-downloader/one/generations/0000000000000000/wal/00000000/00003068.wal.lz4 new file mode 100644 index 0000000..16be189 Binary files /dev/null and b/testdata/wal-downloader/one/generations/0000000000000000/wal/00000000/00003068.wal.lz4 differ diff --git a/testdata/wal-time-bounds/no-wal-segments/generations/0000000000000000/wal/.gitignore b/testdata/wal-time-bounds/no-wal-segments/generations/0000000000000000/wal/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/testdata/wal-time-bounds/ok/Makefile b/testdata/wal-time-bounds/ok/Makefile new file mode 100644 index 0000000..875381c --- /dev/null +++ b/testdata/wal-time-bounds/ok/Makefile @@ -0,0 +1,6 @@ +.PHONY: default +default: + TZ=UTC touch -t 200001010000 generations/0000000000000000/wal/00000000/00000000.wal.lz4 + TZ=UTC touch -t 200001020000 generations/0000000000000000/wal/00000000/00000001.wal.lz4 + TZ=UTC touch -t 200001030000 generations/0000000000000000/wal/00000001/00000000.wal.lz4 + diff --git a/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 b/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/00000000/00000000.wal.lz4 differ diff --git a/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/00000000/00000001.wal.lz4 b/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/00000000/00000001.wal.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/00000000/00000001.wal.lz4 differ diff --git a/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 b/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 new file mode 100644 index 0000000..7536340 Binary files /dev/null and b/testdata/wal-time-bounds/ok/generations/0000000000000000/wal/00000001/00000000.wal.lz4 differ diff --git a/wal_downloader.go b/wal_downloader.go new file mode 100644 index 0000000..b87c245 --- /dev/null +++ b/wal_downloader.go @@ -0,0 +1,335 @@ +package litestream + +import ( + "context" + "fmt" + "io" + "os" + "sync" + + "github.com/benbjohnson/litestream/internal" + "github.com/pierrec/lz4/v4" + "golang.org/x/sync/errgroup" +) + +// WALDownloader represents a parallel downloader of WAL files from a replica client. +// +// It works on a per-index level so WAL files are always downloaded in their +// entirety and are not segmented. WAL files are downloaded from minIndex to +// maxIndex, inclusively, and are written to a path prefix. WAL files are named +// with the prefix and suffixed with the WAL index. It is the responsibility of +// the caller to clean up these WAL files. +// +// The purpose of the parallization is that RTT & WAL apply time can consume +// much of the restore time so it's useful to download multiple WAL files in +// the background to minimize the latency. While some WAL indexes may be +// downloaded out of order, the WALDownloader ensures that Next() always +// returns the WAL files sequentially. +type WALDownloader struct { + ctx context.Context // context used for early close/cancellation + cancel func() + + client ReplicaClient // client to read WAL segments with + generation string // generation to download WAL files from + minIndex int // starting WAL index (inclusive) + maxIndex int // ending WAL index (inclusive) + prefix string // output file prefix + + err error // error occuring during init, propagated to Next() + n int // number of WAL files returned by Next() + + // Concurrency coordination + mu sync.Mutex // used to serialize sending of next WAL index + cond *sync.Cond // used with mu above + g *errgroup.Group // manages worker goroutines for downloading + input chan walDownloadInput // holds ordered WAL indices w/ offsets + output chan walDownloadOutput // always sends next sequential WAL; used by Next() + nextIndex int // tracks next WAL index to send to output channel + + // File info used for downloaded WAL files. + Mode os.FileMode + Uid, Gid int + + // Number of downloads occurring in parallel. + Parallelism int +} + +// NewWALDownloader returns a new instance of WALDownloader. +func NewWALDownloader(client ReplicaClient, prefix string, generation string, minIndex, maxIndex int) *WALDownloader { + d := &WALDownloader{ + client: client, + prefix: prefix, + generation: generation, + minIndex: minIndex, + maxIndex: maxIndex, + + Mode: 0600, + Parallelism: 1, + } + + d.ctx, d.cancel = context.WithCancel(context.Background()) + d.cond = sync.NewCond(&d.mu) + + return d +} + +// Close cancels all downloads and returns any error that has occurred. +func (d *WALDownloader) Close() (err error) { + if d.err != nil { + err = d.err + } + + d.cancel() + + if d.g != nil { + if e := d.g.Wait(); err != nil && e != context.Canceled { + err = e + } + } + return err +} + +// init initializes the downloader on the first invocation only. It generates +// the input channel with all WAL indices & offsets needed, it initializes +// the output channel that Next() waits on, and starts the worker goroutines +// that begin downloading WAL files in the background. +func (d *WALDownloader) init(ctx context.Context) error { + if d.input != nil { + return nil // already initialized + } else if d.minIndex < 0 { + return fmt.Errorf("minimum index required") + } else if d.maxIndex < 0 { + return fmt.Errorf("maximum index required") + } else if d.maxIndex < d.minIndex { + return fmt.Errorf("minimum index cannot be larger than maximum index") + } else if d.Parallelism < 1 { + return fmt.Errorf("parallelism must be at least one") + } + + // Populate input channel with indices & offsets. + if err := d.initInputCh(ctx); err != nil { + return err + } + d.nextIndex = d.minIndex + + // Generate output channel that Next() pulls from. + d.output = make(chan walDownloadOutput) + + // Spawn worker goroutines to download WALs. + d.g, d.ctx = errgroup.WithContext(d.ctx) + for i := 0; i < d.Parallelism; i++ { + d.g.Go(func() error { return d.downloader(d.ctx) }) + } + + return nil +} + +// initInputCh populates the input channel with each WAL index between minIndex +// and maxIndex. It also includes all offsets needed with the index. +func (d *WALDownloader) initInputCh(ctx context.Context) error { + itr, err := d.client.WALSegments(ctx, d.generation) + if err != nil { + return fmt.Errorf("wal segments: %w", err) + } + defer func() { _ = itr.Close() }() + + d.input = make(chan walDownloadInput, d.maxIndex-d.minIndex+1) + defer close(d.input) + + index := d.minIndex - 1 + var offsets []int64 + for itr.Next() { + info := itr.WALSegment() + + // Restrict segments to within our index range. + if info.Index < d.minIndex { + continue // haven't reached minimum index, skip + } else if info.Index > d.maxIndex { + break // after max index, stop + } + + // Flush index & offsets when index changes. + if info.Index != index { + if info.Index != index+1 { // must be sequential + return &WALNotFoundError{Generation: d.generation, Index: index + 1} + } + + if len(offsets) > 0 { + d.input <- walDownloadInput{index: index, offsets: offsets} + offsets = make([]int64, 0) + } + + index = info.Index + } + + // Append to the end of the WAL file. + offsets = append(offsets, info.Offset) + } + + // Ensure we read to the last index. + if index != d.maxIndex { + return &WALNotFoundError{Generation: d.generation, Index: index + 1} + } + + // Flush if we have remaining offsets. + if len(offsets) > 0 { + d.input <- walDownloadInput{index: index, offsets: offsets} + } + + return itr.Close() +} + +// N returns the number of WAL files returned by Next(). +func (d *WALDownloader) N() int { return d.n } + +// Next returns the index & local file path of the next downloaded WAL file. +func (d *WALDownloader) Next(ctx context.Context) (int, string, error) { + if d.err != nil { + return 0, "", d.err + } else if d.err = d.init(ctx); d.err != nil { + return 0, "", d.err + } + + select { + case <-ctx.Done(): + return 0, "", ctx.Err() + case <-d.ctx.Done(): + return 0, "", d.ctx.Err() + case v, ok := <-d.output: + if !ok { + return 0, "", io.EOF + } + + d.n++ + return v.index, v.path, v.err + } +} + +// downloader runs in a separate goroutine and downloads the next input index. +func (d *WALDownloader) downloader(ctx context.Context) error { + for { + select { + case <-ctx.Done(): + d.cond.Broadcast() + return ctx.Err() + + case input, ok := <-d.input: + if !ok { + return nil // no more input + } + + // Wait until next index equals input index and then send file to + // output to ensure sorted order. + if err := func() error { + walPath, err := d.downloadWAL(ctx, input.index, input.offsets) + + d.mu.Lock() + defer d.mu.Unlock() + + // Notify other downloader goroutines when we escape this + // anonymous function. + defer d.cond.Broadcast() + + // Keep looping until our index matches the next index to send. + for d.nextIndex != input.index { + if ctxErr := ctx.Err(); ctxErr != nil { + return ctxErr + } + d.cond.Wait() + } + + // Still under lock, wait until Next() requests next index. + select { + case <-ctx.Done(): + return ctx.Err() + + case d.output <- walDownloadOutput{ + index: input.index, + path: walPath, + err: err, + }: + // At the last index, close out output channel to notify + // the Next() method to return io.EOF. + if d.nextIndex == d.maxIndex { + close(d.output) + return nil + } + + // Update next expected index now that our send is successful. + d.nextIndex++ + } + + return err + }(); err != nil { + return err + } + } + } +} + +// downloadWAL sequentially downloads all the segments for WAL index from the +// replica client and appends them to a single on-disk file. Returns the name +// of the on-disk file on success. +func (d *WALDownloader) downloadWAL(ctx context.Context, index int, offsets []int64) (string, error) { + // Open handle to destination WAL path. + walPath := fmt.Sprintf("%s-%08x-wal", d.prefix, index) + f, err := internal.CreateFile(walPath, d.Mode, d.Uid, d.Gid) + if err != nil { + return "", err + } + defer f.Close() + + // Open readers for every segment in the WAL file, in order. + var written int64 + for _, offset := range offsets { + if err := func() error { + // Ensure next offset is our current position in the file. + if written != offset { + return fmt.Errorf("missing WAL offset: generation=%s index=%08x offset=%08x", d.generation, index, written) + } + + rd, err := d.client.WALSegmentReader(ctx, Pos{Generation: d.generation, Index: index, Offset: offset}) + if err != nil { + return fmt.Errorf("read WAL segment: %w", err) + } + defer rd.Close() + + n, err := io.Copy(f, lz4.NewReader(rd)) + if err != nil { + return fmt.Errorf("copy WAL segment: %w", err) + } + written += n + + return nil + }(); err != nil { + return "", err + } + } + + if err := f.Close(); err != nil { + return "", err + } + return walPath, nil +} + +type walDownloadInput struct { + index int + offsets []int64 +} + +type walDownloadOutput struct { + path string + index int + err error +} + +// WALNotFoundError is returned by WALDownloader if an WAL index is not found. +type WALNotFoundError struct { + Generation string + Index int +} + +// Error returns the error string. +func (e *WALNotFoundError) Error() string { + return fmt.Sprintf("wal not found: generation=%s index=%08x", e.Generation, e.Index) +} diff --git a/wal_downloader_test.go b/wal_downloader_test.go new file mode 100644 index 0000000..65d1c8b --- /dev/null +++ b/wal_downloader_test.go @@ -0,0 +1,534 @@ +package litestream_test + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/benbjohnson/litestream" + "github.com/benbjohnson/litestream/mock" +) + +// TestWALDownloader runs downloader tests against different levels of parallelism. +func TestWALDownloader(t *testing.T) { + for _, parallelism := range []int{1, 8, 1024} { + t.Run(fmt.Sprint(parallelism), func(t *testing.T) { + testWALDownloader(t, parallelism) + }) + } +} + +func testWALDownloader(t *testing.T, parallelism int) { + // Ensure WAL files can be downloaded from file replica on disk. + t.Run("OK", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "ok") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 2) + defer d.Close() + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 0; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "00000000.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 1; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "00000001.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 2; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "00000002.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if _, _, err := d.Next(context.Background()); err != io.EOF { + t.Fatalf("unexpected error: %#v", err) + } else if got, want := d.N(), 3; got != want { + t.Fatalf("N=%d, want %d", got, want) + } + + if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure a single WAL index can be downloaded. + t.Run("One", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "one") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 0) + defer d.Close() + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 0; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "00000000.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if _, _, err := d.Next(context.Background()); err != io.EOF { + t.Fatalf("unexpected error: %#v", err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure a subset of WAL indexes can be downloaded. + t.Run("Slice", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "ok") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 1, 1) + defer d.Close() + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 1; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "00000001.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if _, _, err := d.Next(context.Background()); err != io.EOF { + t.Fatalf("unexpected error: %#v", err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure a subset of WAL indexes can be downloaded starting from zero. + t.Run("SliceLeft", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "ok") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 1) + defer d.Close() + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 0; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "00000000.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 1; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "00000001.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if _, _, err := d.Next(context.Background()); err != io.EOF { + t.Fatalf("unexpected error: %#v", err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure a subset of WAL indexes can be downloaded ending at the last index. + t.Run("SliceRight", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "ok") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 1, 2) + defer d.Close() + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 1; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "00000001.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 2; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "00000002.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if _, _, err := d.Next(context.Background()); err != io.EOF { + t.Fatalf("unexpected error: %#v", err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure a large, generated set of WAL files can be downloaded in the correct order. + t.Run("Large", func(t *testing.T) { + if testing.Short() { + t.Skip("short mode, skipping") + } + + // Generate WAL files. + const n = 1000 + tempDir := t.TempDir() + for i := 0; i < n; i++ { + filename := filepath.Join(tempDir, "generations", "0000000000000000", "wal", fmt.Sprintf("%08x", i), "00000000.wal.lz4") + if err := os.MkdirAll(filepath.Dir(filename), 0777); err != nil { + t.Fatal(err) + } else if err := os.WriteFile(filename, compressLZ4(t, []byte(fmt.Sprint(i))), 0666); err != nil { + t.Fatal(err) + } + } + + client := litestream.NewFileReplicaClient(tempDir) + d := litestream.NewWALDownloader(client, filepath.Join(t.TempDir(), "wal"), "0000000000000000", 0, n-1) + d.Parallelism = parallelism + defer d.Close() + + for i := 0; i < n; i++ { + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, i; got != want { + t.Fatalf("index[%d]=%d, want %d", i, got, want) + } else if buf, err := os.ReadFile(filename); err != nil { + t.Fatal(err) + } else if got, want := fmt.Sprint(i), string(buf); got != want { + t.Fatalf("file[%d]=%q, want %q", i, got, want) + } + } + + if _, _, err := d.Next(context.Background()); err != io.EOF { + t.Fatalf("unexpected error: %#v", err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure a non-existent WAL directory returns error. + t.Run("ErrEmptyGenerationDir", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "empty-generation-dir") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 0) + defer d.Close() + + var e *litestream.WALNotFoundError + if _, _, err := d.Next(context.Background()); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 0}) { + t.Fatalf("unexpected error: %#v", err) + } else if got, want := d.N(), 0; got != want { + t.Fatalf("N=%d, want %d", got, want) + } + + // Reinvoking Next() should return the same error. + if _, _, err := d.Next(context.Background()); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 0}) { + t.Fatalf("unexpected error: %#v", err) + } + + // Close should return the same error. + if err := d.Close(); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 0}) { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure an empty WAL directory returns error. + t.Run("EmptyWALDir", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "empty-wal-dir") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 0) + defer d.Close() + + var e *litestream.WALNotFoundError + if _, _, err := d.Next(context.Background()); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 0}) { + t.Fatalf("unexpected error: %#v", err) + } else if got, want := d.N(), 0; got != want { + t.Fatalf("N=%d, want %d", got, want) + } + }) + + // Ensure an empty WAL index directory returns EOF. + t.Run("EmptyWALIndexDir", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "empty-wal-index-dir") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 0) + defer d.Close() + + var e *litestream.WALNotFoundError + if _, _, err := d.Next(context.Background()); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 0}) { + t.Fatalf("unexpected error: %#v", err) + } else if got, want := d.N(), 0; got != want { + t.Fatalf("N=%d, want %d", got, want) + } + }) + + // Ensure closing downloader before calling Next() does not panic. + t.Run("CloseWithoutNext", func(t *testing.T) { + client := litestream.NewFileReplicaClient(t.TempDir()) + d := litestream.NewWALDownloader(client, filepath.Join(t.TempDir(), "wal"), "0000000000000000", 0, 2) + if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure downloader closes successfully if invoked after Next() but before last index. + t.Run("CloseEarly", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "ok") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 2) + defer d.Close() + + if index, filename, err := d.Next(context.Background()); err != nil { + t.Fatal(err) + } else if got, want := index, 0; got != want { + t.Fatalf("index=%d, want %d", got, want) + } else if !fileEqual(t, filepath.Join(testDir, "00000000.wal"), filename) { + t.Fatalf("output file mismatch: %s", filename) + } + + if err := d.Close(); err != nil { + t.Fatal(err) + } + + if _, _, err := d.Next(context.Background()); err == nil { + t.Fatal("expected error") + } + }) + + // Ensure downloader without a minimum index returns an error. + t.Run("ErrMinIndexRequired", func(t *testing.T) { + d := litestream.NewWALDownloader(litestream.NewFileReplicaClient(t.TempDir()), t.TempDir(), "0000000000000000", -1, 2) + defer d.Close() + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `minimum index required` { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure downloader without a maximum index returns an error. + t.Run("ErrMinIndexRequired", func(t *testing.T) { + d := litestream.NewWALDownloader(litestream.NewFileReplicaClient(t.TempDir()), t.TempDir(), "0000000000000000", 1, -1) + defer d.Close() + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `maximum index required` { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure downloader with invalid min/max indexes returns an error. + t.Run("ErrMinIndexTooLarge", func(t *testing.T) { + d := litestream.NewWALDownloader(litestream.NewFileReplicaClient(t.TempDir()), t.TempDir(), "0000000000000000", 2, 1) + defer d.Close() + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `minimum index cannot be larger than maximum index` { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure downloader returns error if parallelism field is invalid. + t.Run("ErrParallelismRequired", func(t *testing.T) { + d := litestream.NewWALDownloader(litestream.NewFileReplicaClient(t.TempDir()), t.TempDir(), "0000000000000000", 0, 0) + d.Parallelism = -1 + defer d.Close() + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `parallelism must be at least one` { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure a missing index at the beginning returns an error. + t.Run("ErrMissingInitialIndex", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "missing-initial-index") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 2) + defer d.Close() + + var e *litestream.WALNotFoundError + if _, _, err := d.Next(context.Background()); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 0}) { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure a gap in indicies returns an error. + t.Run("ErrMissingMiddleIndex", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "missing-middle-index") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 2) + defer d.Close() + + var e *litestream.WALNotFoundError + if _, _, err := d.Next(context.Background()); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 1}) { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure a missing index at the end returns an error. + t.Run("ErrMissingEndingIndex", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "missing-ending-index") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 2) + defer d.Close() + + var e *litestream.WALNotFoundError + if _, _, err := d.Next(context.Background()); !errors.As(err, &e) { + t.Fatalf("unexpected error type: %#v", err) + } else if *e != (litestream.WALNotFoundError{Generation: "0000000000000000", Index: 2}) { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure downloader returns error WAL segment iterator creation returns error. + t.Run("ErrWALSegments", func(t *testing.T) { + var client mock.ReplicaClient + client.WALSegmentsFunc = func(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) { + return nil, errors.New("marker") + } + + d := litestream.NewWALDownloader(&client, filepath.Join(t.TempDir(), "wal"), "0000000000000000", 0, 2) + defer d.Close() + + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `wal segments: marker` { + t.Fatalf("unexpected error: %#v", err) + } + }) + + // Ensure downloader returns error if WAL segments have a gap in offsets. + t.Run("ErrMissingOffset", func(t *testing.T) { + testDir := filepath.Join("testdata", "wal-downloader", "missing-offset") + tempDir := t.TempDir() + + client := litestream.NewFileReplicaClient(testDir) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "wal"), "0000000000000000", 0, 0) + defer d.Close() + + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `missing WAL offset: generation=0000000000000000 index=00000000 offset=00002050` { + t.Fatal(err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure downloader returns error if context is canceled. + t.Run("ErrContextCanceled", func(t *testing.T) { + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "wal-downloader", "ok")) + d := litestream.NewWALDownloader(client, filepath.Join(t.TempDir(), "wal"), "0000000000000000", 0, 2) + defer d.Close() + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + if _, _, err := d.Next(ctx); err != context.Canceled { + t.Fatal(err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure downloader returns error if error occurs while writing WAL to disk. + t.Run("ErrWriteWAL", func(t *testing.T) { + // Create a subdirectory that is not writable. + tempDir := t.TempDir() + if err := os.Mkdir(filepath.Join(tempDir, "nowrite"), 0000); err != nil { + t.Fatal(err) + } + + client := litestream.NewFileReplicaClient(filepath.Join("testdata", "wal-downloader", "err-write-wal")) + d := litestream.NewWALDownloader(client, filepath.Join(tempDir, "nowrite", "wal"), "0000000000000000", 0, 0) + defer d.Close() + + if _, _, err := d.Next(context.Background()); err == nil || !strings.Contains(err.Error(), `permission denied`) { + t.Fatal(err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure downloader returns error if error occurs while downloading WAL. + t.Run("ErrDownloadWAL", func(t *testing.T) { + fileClient := litestream.NewFileReplicaClient(filepath.Join("testdata", "wal-downloader", "err-download-wal")) + + var client mock.ReplicaClient + client.WALSegmentsFunc = fileClient.WALSegments + client.WALSegmentReaderFunc = func(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) { + return nil, fmt.Errorf("marker") + } + + d := litestream.NewWALDownloader(&client, filepath.Join(t.TempDir(), "wal"), "0000000000000000", 0, 0) + defer d.Close() + + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `read WAL segment: marker` { + t.Fatalf("unexpected error: %#v", err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) + + // Ensure downloader returns error if reading the segment fails. + t.Run("ErrReadWALSegment", func(t *testing.T) { + fileClient := litestream.NewFileReplicaClient(filepath.Join("testdata", "wal-downloader", "err-read-wal-segment")) + + var client mock.ReplicaClient + client.WALSegmentsFunc = fileClient.WALSegments + client.WALSegmentReaderFunc = func(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) { + var rc mock.ReadCloser + rc.ReadFunc = func([]byte) (int, error) { return 0, errors.New("marker") } + rc.CloseFunc = func() error { return nil } + return &rc, nil + } + + d := litestream.NewWALDownloader(&client, filepath.Join(t.TempDir(), "wal"), "0000000000000000", 0, 0) + defer d.Close() + + if _, _, err := d.Next(context.Background()); err == nil || err.Error() != `copy WAL segment: marker` { + t.Fatalf("unexpected error: %#v", err) + } else if err := d.Close(); err != nil { + t.Fatal(err) + } + }) +} + +func TestWALNotFoundError(t *testing.T) { + err := &litestream.WALNotFoundError{Generation: "0123456789abcdef", Index: 1000} + if got, want := err.Error(), `wal not found: generation=0123456789abcdef index=000003e8`; got != want { + t.Fatalf("Error()=%q, want %q", got, want) + } +}