Refactor replica system
This commit is contained in:
746
s3/replica_client.go
Normal file
746
s3/replica_client.go
Normal file
@@ -0,0 +1,746 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/defaults"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/benbjohnson/litestream"
|
||||
"github.com/benbjohnson/litestream/internal"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// MaxKeys is the number of keys S3 can operate on per batch.
|
||||
const MaxKeys = 1000
|
||||
|
||||
// DefaultRegion is the region used if one is not specified.
|
||||
const DefaultRegion = "us-east-1"
|
||||
|
||||
var _ litestream.ReplicaClient = (*ReplicaClient)(nil)
|
||||
|
||||
// ReplicaClient is a client for writing snapshots & WAL segments to disk.
|
||||
type ReplicaClient struct {
|
||||
mu sync.Mutex
|
||||
s3 *s3.S3 // s3 service
|
||||
uploader *s3manager.Uploader
|
||||
|
||||
// AWS authentication keys.
|
||||
AccessKeyID string
|
||||
SecretAccessKey string
|
||||
|
||||
// S3 bucket information
|
||||
Region string
|
||||
Bucket string
|
||||
Path string
|
||||
Endpoint string
|
||||
ForcePathStyle bool
|
||||
SkipVerify bool
|
||||
}
|
||||
|
||||
// NewReplicaClient returns a new instance of ReplicaClient.
|
||||
func NewReplicaClient() *ReplicaClient {
|
||||
return &ReplicaClient{}
|
||||
}
|
||||
|
||||
// Type returns "s3" as the client type.
|
||||
func (c *ReplicaClient) Type() string {
|
||||
return "s3"
|
||||
}
|
||||
|
||||
// GenerationsDir returns the path to a generation root directory.
|
||||
func (c *ReplicaClient) GenerationsDir() string {
|
||||
return path.Join(c.Path, "generations")
|
||||
}
|
||||
|
||||
// GenerationDir returns the path to a generation's root directory.
|
||||
func (c *ReplicaClient) GenerationDir(generation string) (string, error) {
|
||||
dir := c.GenerationsDir()
|
||||
if generation == "" {
|
||||
return "", fmt.Errorf("generation required")
|
||||
}
|
||||
return path.Join(dir, generation), nil
|
||||
}
|
||||
|
||||
// SnapshotsDir returns the path to a generation's snapshot directory.
|
||||
func (c *ReplicaClient) SnapshotsDir(generation string) (string, error) {
|
||||
dir, err := c.GenerationDir(generation)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return path.Join(dir, "snapshots"), nil
|
||||
}
|
||||
|
||||
// SnapshotPath returns the path to an uncompressed snapshot file.
|
||||
func (c *ReplicaClient) SnapshotPath(generation string, index int) (string, error) {
|
||||
dir, err := c.SnapshotsDir(generation)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return path.Join(dir, litestream.FormatSnapshotPath(index)), nil
|
||||
}
|
||||
|
||||
// WALDir returns the path to a generation's WAL directory
|
||||
func (c *ReplicaClient) WALDir(generation string) (string, error) {
|
||||
dir, err := c.GenerationDir(generation)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return path.Join(dir, "wal"), nil
|
||||
}
|
||||
|
||||
// WALSegmentPath returns the path to a WAL segment file.
|
||||
func (c *ReplicaClient) WALSegmentPath(generation string, index int, offset int64) (string, error) {
|
||||
dir, err := c.WALDir(generation)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return path.Join(dir, litestream.FormatWALSegmentPath(index, offset)), nil
|
||||
}
|
||||
|
||||
// Init initializes the connection to S3. No-op if already initialized.
|
||||
func (c *ReplicaClient) Init(ctx context.Context) (err error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.s3 != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Look up region if not specified and no endpoint is used.
|
||||
// Endpoints are typically used for non-S3 object stores and do not
|
||||
// necessarily require a region.
|
||||
region := c.Region
|
||||
if region == "" {
|
||||
if c.Endpoint == "" {
|
||||
if region, err = c.findBucketRegion(ctx, c.Bucket); err != nil {
|
||||
return fmt.Errorf("cannot lookup bucket region: %w", err)
|
||||
}
|
||||
} else {
|
||||
region = DefaultRegion // default for non-S3 object stores
|
||||
}
|
||||
}
|
||||
|
||||
// Create new AWS session.
|
||||
config := c.config()
|
||||
if region != "" {
|
||||
config.Region = aws.String(region)
|
||||
}
|
||||
sess, err := session.NewSession(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create aws session: %w", err)
|
||||
}
|
||||
c.s3 = s3.New(sess)
|
||||
c.uploader = s3manager.NewUploader(sess)
|
||||
return nil
|
||||
}
|
||||
|
||||
// config returns the AWS configuration. Uses the default credential chain
|
||||
// unless a key/secret are explicitly set.
|
||||
func (c *ReplicaClient) config() *aws.Config {
|
||||
config := defaults.Get().Config
|
||||
if c.AccessKeyID != "" || c.SecretAccessKey != "" {
|
||||
config.Credentials = credentials.NewStaticCredentials(c.AccessKeyID, c.SecretAccessKey, "")
|
||||
}
|
||||
if c.Endpoint != "" {
|
||||
config.Endpoint = aws.String(c.Endpoint)
|
||||
}
|
||||
if c.ForcePathStyle {
|
||||
config.S3ForcePathStyle = aws.Bool(c.ForcePathStyle)
|
||||
}
|
||||
if c.SkipVerify {
|
||||
config.HTTPClient = &http.Client{Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}}
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
func (c *ReplicaClient) findBucketRegion(ctx context.Context, bucket string) (string, error) {
|
||||
// Connect to US standard region to fetch info.
|
||||
config := c.config()
|
||||
config.Region = aws.String(DefaultRegion)
|
||||
sess, err := session.NewSession(config)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Fetch bucket location, if possible. Must be bucket owner.
|
||||
// This call can return a nil location which means it's in us-east-1.
|
||||
if out, err := s3.New(sess).GetBucketLocation(&s3.GetBucketLocationInput{
|
||||
Bucket: aws.String(bucket),
|
||||
}); err != nil {
|
||||
return "", err
|
||||
} else if out.LocationConstraint != nil {
|
||||
return *out.LocationConstraint, nil
|
||||
}
|
||||
return DefaultRegion, nil
|
||||
}
|
||||
|
||||
// Generations returns a list of available generation names.
|
||||
func (c *ReplicaClient) Generations(ctx context.Context) ([]string, error) {
|
||||
if err := c.Init(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var generations []string
|
||||
if err := c.s3.ListObjectsPagesWithContext(ctx, &s3.ListObjectsInput{
|
||||
Bucket: aws.String(c.Bucket),
|
||||
Prefix: aws.String(c.GenerationsDir() + "/"),
|
||||
Delimiter: aws.String("/"),
|
||||
}, func(page *s3.ListObjectsOutput, lastPage bool) bool {
|
||||
operationTotalCounterVec.WithLabelValues("LIST").Inc()
|
||||
|
||||
for _, prefix := range page.CommonPrefixes {
|
||||
name := path.Base(*prefix.Prefix)
|
||||
if !litestream.IsGenerationName(name) {
|
||||
continue
|
||||
}
|
||||
generations = append(generations, name)
|
||||
}
|
||||
return true
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return generations, nil
|
||||
}
|
||||
|
||||
// DeleteGeneration deletes all snapshots & WAL segments within a generation.
|
||||
func (c *ReplicaClient) DeleteGeneration(ctx context.Context, generation string) error {
|
||||
if err := c.Init(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dir, err := c.GenerationDir(generation)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot determine generation directory path: %w", err)
|
||||
}
|
||||
|
||||
// Collect all files for the generation.
|
||||
var objIDs []*s3.ObjectIdentifier
|
||||
if err := c.s3.ListObjectsPagesWithContext(ctx, &s3.ListObjectsInput{
|
||||
Bucket: aws.String(c.Bucket),
|
||||
Prefix: aws.String(dir),
|
||||
}, func(page *s3.ListObjectsOutput, lastPage bool) bool {
|
||||
operationTotalCounterVec.WithLabelValues("LIST").Inc()
|
||||
|
||||
for _, obj := range page.Contents {
|
||||
objIDs = append(objIDs, &s3.ObjectIdentifier{Key: obj.Key})
|
||||
}
|
||||
return true
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete all files in batches.
|
||||
for len(objIDs) > 0 {
|
||||
n := MaxKeys
|
||||
if len(objIDs) < n {
|
||||
n = len(objIDs)
|
||||
}
|
||||
|
||||
if _, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{
|
||||
Bucket: aws.String(c.Bucket),
|
||||
Delete: &s3.Delete{Objects: objIDs[:n], Quiet: aws.Bool(true)},
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
operationTotalCounterVec.WithLabelValues("DELETE").Inc()
|
||||
|
||||
objIDs = objIDs[n:]
|
||||
}
|
||||
|
||||
// log.Printf("%s(%s): retainer: deleting generation: %s", r.db.Path(), r.Name(), generation)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Snapshots returns an iterator over all available snapshots for a generation.
|
||||
func (c *ReplicaClient) Snapshots(ctx context.Context, generation string) (litestream.SnapshotIterator, error) {
|
||||
if err := c.Init(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newSnapshotIterator(ctx, c, generation), nil
|
||||
}
|
||||
|
||||
// WriteSnapshot writes LZ4 compressed data from rd into a file on disk.
|
||||
func (c *ReplicaClient) WriteSnapshot(ctx context.Context, generation string, index int, rd io.Reader) (info litestream.SnapshotInfo, err error) {
|
||||
if err := c.Init(ctx); err != nil {
|
||||
return info, err
|
||||
}
|
||||
|
||||
key, err := c.SnapshotPath(generation, index)
|
||||
if err != nil {
|
||||
return info, fmt.Errorf("cannot determine snapshot path: %w", err)
|
||||
}
|
||||
startTime := time.Now()
|
||||
|
||||
rc := internal.NewReadCounter(rd)
|
||||
if _, err := c.uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
||||
Bucket: aws.String(c.Bucket),
|
||||
Key: aws.String(key),
|
||||
Body: rc,
|
||||
}); err != nil {
|
||||
return info, err
|
||||
}
|
||||
|
||||
operationTotalCounterVec.WithLabelValues("PUT").Inc()
|
||||
operationBytesCounterVec.WithLabelValues("PUT").Add(float64(rc.N()))
|
||||
|
||||
// log.Printf("%s(%s): snapshot: creating %s/%08x t=%s", r.db.Path(), r.Name(), generation, index, time.Since(startTime).Truncate(time.Millisecond))
|
||||
|
||||
return litestream.SnapshotInfo{
|
||||
Generation: generation,
|
||||
Index: index,
|
||||
Size: rc.N(),
|
||||
CreatedAt: startTime.UTC(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SnapshotReader returns a reader for snapshot data at the given generation/index.
|
||||
func (c *ReplicaClient) SnapshotReader(ctx context.Context, generation string, index int) (io.ReadCloser, error) {
|
||||
if err := c.Init(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key, err := c.SnapshotPath(generation, index)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot determine snapshot path: %w", err)
|
||||
}
|
||||
|
||||
out, err := c.s3.GetObjectWithContext(ctx, &s3.GetObjectInput{
|
||||
Bucket: aws.String(c.Bucket),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if isNotExists(err) {
|
||||
return nil, os.ErrNotExist
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
operationTotalCounterVec.WithLabelValues("GET").Inc()
|
||||
operationBytesCounterVec.WithLabelValues("GET").Add(float64(*out.ContentLength))
|
||||
|
||||
return out.Body, nil
|
||||
}
|
||||
|
||||
// DeleteSnapshot deletes a snapshot with the given generation & index.
|
||||
func (c *ReplicaClient) DeleteSnapshot(ctx context.Context, generation string, index int) error {
|
||||
if err := c.Init(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key, err := c.SnapshotPath(generation, index)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot determine snapshot path: %w", err)
|
||||
}
|
||||
|
||||
if _, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{
|
||||
Bucket: aws.String(c.Bucket),
|
||||
Delete: &s3.Delete{Objects: []*s3.ObjectIdentifier{{Key: &key}}, Quiet: aws.Bool(true)},
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
operationTotalCounterVec.WithLabelValues("DELETE").Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// WALSegments returns an iterator over all available WAL files for a generation.
|
||||
func (c *ReplicaClient) WALSegments(ctx context.Context, generation string) (litestream.WALSegmentIterator, error) {
|
||||
if err := c.Init(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newWALSegmentIterator(ctx, c, generation), nil
|
||||
}
|
||||
|
||||
// WriteWALSegment writes LZ4 compressed data from rd into a file on disk.
|
||||
func (c *ReplicaClient) WriteWALSegment(ctx context.Context, pos litestream.Pos, rd io.Reader) (info litestream.WALSegmentInfo, err error) {
|
||||
if err := c.Init(ctx); err != nil {
|
||||
return info, err
|
||||
}
|
||||
|
||||
key, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset)
|
||||
if err != nil {
|
||||
return info, fmt.Errorf("cannot determine wal segment path: %w", err)
|
||||
}
|
||||
startTime := time.Now()
|
||||
|
||||
rc := internal.NewReadCounter(rd)
|
||||
if _, err := c.uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
||||
Bucket: aws.String(c.Bucket),
|
||||
Key: aws.String(key),
|
||||
Body: rc,
|
||||
}); err != nil {
|
||||
return info, err
|
||||
}
|
||||
|
||||
operationTotalCounterVec.WithLabelValues("PUT").Inc()
|
||||
operationBytesCounterVec.WithLabelValues("PUT").Add(float64(rc.N()))
|
||||
|
||||
return litestream.WALSegmentInfo{
|
||||
Generation: pos.Generation,
|
||||
Index: pos.Index,
|
||||
Offset: pos.Offset,
|
||||
Size: rc.N(),
|
||||
CreatedAt: startTime.UTC(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WALSegmentReader returns a reader for a section of WAL data at the given index.
|
||||
// Returns os.ErrNotExist if no matching index/offset is found.
|
||||
func (c *ReplicaClient) WALSegmentReader(ctx context.Context, pos litestream.Pos) (io.ReadCloser, error) {
|
||||
if err := c.Init(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot determine wal segment path: %w", err)
|
||||
}
|
||||
|
||||
out, err := c.s3.GetObjectWithContext(ctx, &s3.GetObjectInput{
|
||||
Bucket: aws.String(c.Bucket),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if isNotExists(err) {
|
||||
return nil, os.ErrNotExist
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
operationTotalCounterVec.WithLabelValues("GET").Inc()
|
||||
operationBytesCounterVec.WithLabelValues("GET").Add(float64(*out.ContentLength))
|
||||
|
||||
return out.Body, nil
|
||||
}
|
||||
|
||||
// DeleteWALSegments deletes WAL segments with at the given positions.
|
||||
func (c *ReplicaClient) DeleteWALSegments(ctx context.Context, a []litestream.Pos) error {
|
||||
if err := c.Init(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
objIDs := make([]*s3.ObjectIdentifier, MaxKeys)
|
||||
for len(a) > 0 {
|
||||
n := MaxKeys
|
||||
if len(a) < n {
|
||||
n = len(a)
|
||||
}
|
||||
|
||||
// Generate a batch of object IDs for deleting the WAL segments.
|
||||
for i, pos := range a[:n] {
|
||||
key, err := c.WALSegmentPath(pos.Generation, pos.Index, pos.Offset)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot determine wal segment path: %w", err)
|
||||
}
|
||||
objIDs[i] = &s3.ObjectIdentifier{Key: &key}
|
||||
}
|
||||
|
||||
// Delete S3 objects in bulk.
|
||||
if _, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{
|
||||
Bucket: aws.String(c.Bucket),
|
||||
Delete: &s3.Delete{Objects: objIDs[:n], Quiet: aws.Bool(true)},
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
operationTotalCounterVec.WithLabelValues("DELETE").Inc()
|
||||
|
||||
a = a[n:]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteAll deletes everything on the remote path. Mainly used for testing.
|
||||
func (c *ReplicaClient) DeleteAll(ctx context.Context) error {
|
||||
if err := c.Init(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
prefix := c.Path
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
|
||||
// Collect all files for the generation.
|
||||
var objIDs []*s3.ObjectIdentifier
|
||||
if err := c.s3.ListObjectsPagesWithContext(ctx, &s3.ListObjectsInput{
|
||||
Bucket: aws.String(c.Bucket),
|
||||
Prefix: aws.String(prefix),
|
||||
}, func(page *s3.ListObjectsOutput, lastPage bool) bool {
|
||||
operationTotalCounterVec.WithLabelValues("LIST").Inc()
|
||||
|
||||
for _, obj := range page.Contents {
|
||||
objIDs = append(objIDs, &s3.ObjectIdentifier{Key: obj.Key})
|
||||
}
|
||||
return true
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete all files in batches.
|
||||
for len(objIDs) > 0 {
|
||||
n := MaxKeys
|
||||
if len(objIDs) < n {
|
||||
n = len(objIDs)
|
||||
}
|
||||
|
||||
if _, err := c.s3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{
|
||||
Bucket: aws.String(c.Bucket),
|
||||
Delete: &s3.Delete{Objects: objIDs[:n], Quiet: aws.Bool(true)},
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
operationTotalCounterVec.WithLabelValues("DELETE").Inc()
|
||||
|
||||
objIDs = objIDs[n:]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type snapshotIterator struct {
|
||||
client *ReplicaClient
|
||||
generation string
|
||||
|
||||
ch chan litestream.SnapshotInfo
|
||||
g errgroup.Group
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
|
||||
info litestream.SnapshotInfo
|
||||
err error
|
||||
}
|
||||
|
||||
func newSnapshotIterator(ctx context.Context, client *ReplicaClient, generation string) *snapshotIterator {
|
||||
itr := &snapshotIterator{
|
||||
client: client,
|
||||
generation: generation,
|
||||
ch: make(chan litestream.SnapshotInfo),
|
||||
}
|
||||
|
||||
itr.ctx, itr.cancel = context.WithCancel(ctx)
|
||||
itr.g.Go(itr.fetch)
|
||||
|
||||
return itr
|
||||
}
|
||||
|
||||
// fetch runs in a separate goroutine to fetch pages of objects and stream them to a channel.
|
||||
func (itr *snapshotIterator) fetch() error {
|
||||
defer close(itr.ch)
|
||||
|
||||
dir, err := itr.client.SnapshotsDir(itr.generation)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot determine snapshot directory path: %w", err)
|
||||
}
|
||||
|
||||
return itr.client.s3.ListObjectsPagesWithContext(itr.ctx, &s3.ListObjectsInput{
|
||||
Bucket: aws.String(itr.client.Bucket),
|
||||
Prefix: aws.String(dir + "/"),
|
||||
Delimiter: aws.String("/"),
|
||||
}, func(page *s3.ListObjectsOutput, lastPage bool) bool {
|
||||
operationTotalCounterVec.WithLabelValues("LIST").Inc()
|
||||
|
||||
for _, obj := range page.Contents {
|
||||
key := path.Base(*obj.Key)
|
||||
index, err := litestream.ParseSnapshotPath(key)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
info := litestream.SnapshotInfo{
|
||||
Generation: itr.generation,
|
||||
Index: index,
|
||||
Size: *obj.Size,
|
||||
CreatedAt: obj.LastModified.UTC(),
|
||||
}
|
||||
|
||||
select {
|
||||
case <-itr.ctx.Done():
|
||||
case itr.ch <- info:
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (itr *snapshotIterator) Close() (err error) {
|
||||
err = itr.err
|
||||
|
||||
// Cancel context and wait for error group to finish.
|
||||
itr.cancel()
|
||||
if e := itr.g.Wait(); e != nil && err == nil {
|
||||
err = e
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (itr *snapshotIterator) Next() bool {
|
||||
// Exit if an error has already occurred.
|
||||
if itr.err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Return false if context was canceled or if there are no more snapshots.
|
||||
// Otherwise fetch the next snapshot and store it on the iterator.
|
||||
select {
|
||||
case <-itr.ctx.Done():
|
||||
return false
|
||||
case info, ok := <-itr.ch:
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
itr.info = info
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (itr *snapshotIterator) Err() error { return itr.err }
|
||||
|
||||
func (itr *snapshotIterator) Snapshot() litestream.SnapshotInfo {
|
||||
return itr.info
|
||||
}
|
||||
|
||||
type walSegmentIterator struct {
|
||||
client *ReplicaClient
|
||||
generation string
|
||||
|
||||
ch chan litestream.WALSegmentInfo
|
||||
g errgroup.Group
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
|
||||
info litestream.WALSegmentInfo
|
||||
err error
|
||||
}
|
||||
|
||||
func newWALSegmentIterator(ctx context.Context, client *ReplicaClient, generation string) *walSegmentIterator {
|
||||
itr := &walSegmentIterator{
|
||||
client: client,
|
||||
generation: generation,
|
||||
ch: make(chan litestream.WALSegmentInfo),
|
||||
}
|
||||
|
||||
itr.ctx, itr.cancel = context.WithCancel(ctx)
|
||||
itr.g.Go(itr.fetch)
|
||||
|
||||
return itr
|
||||
}
|
||||
|
||||
// fetch runs in a separate goroutine to fetch pages of objects and stream them to a channel.
|
||||
func (itr *walSegmentIterator) fetch() error {
|
||||
defer close(itr.ch)
|
||||
|
||||
dir, err := itr.client.WALDir(itr.generation)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot determine wal directory path: %w", err)
|
||||
}
|
||||
|
||||
return itr.client.s3.ListObjectsPagesWithContext(itr.ctx, &s3.ListObjectsInput{
|
||||
Bucket: aws.String(itr.client.Bucket),
|
||||
Prefix: aws.String(dir + "/"),
|
||||
Delimiter: aws.String("/"),
|
||||
}, func(page *s3.ListObjectsOutput, lastPage bool) bool {
|
||||
operationTotalCounterVec.WithLabelValues("LIST").Inc()
|
||||
|
||||
for _, obj := range page.Contents {
|
||||
key := path.Base(*obj.Key)
|
||||
index, offset, err := litestream.ParseWALSegmentPath(key)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
info := litestream.WALSegmentInfo{
|
||||
Generation: itr.generation,
|
||||
Index: index,
|
||||
Offset: offset,
|
||||
Size: *obj.Size,
|
||||
CreatedAt: obj.LastModified.UTC(),
|
||||
}
|
||||
|
||||
select {
|
||||
case <-itr.ctx.Done():
|
||||
return false
|
||||
case itr.ch <- info:
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (itr *walSegmentIterator) Close() (err error) {
|
||||
err = itr.err
|
||||
|
||||
// Cancel context and wait for error group to finish.
|
||||
itr.cancel()
|
||||
if e := itr.g.Wait(); e != nil && err == nil {
|
||||
err = e
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (itr *walSegmentIterator) Next() bool {
|
||||
// Exit if an error has already occurred.
|
||||
if itr.err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Return false if context was canceled or if there are no more segments.
|
||||
// Otherwise fetch the next segment and store it on the iterator.
|
||||
select {
|
||||
case <-itr.ctx.Done():
|
||||
return false
|
||||
case info, ok := <-itr.ch:
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
itr.info = info
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (itr *walSegmentIterator) Err() error { return itr.err }
|
||||
|
||||
func (itr *walSegmentIterator) WALSegment() litestream.WALSegmentInfo {
|
||||
return itr.info
|
||||
}
|
||||
|
||||
func isNotExists(err error) bool {
|
||||
switch err := err.(type) {
|
||||
case awserr.Error:
|
||||
return err.Code() == `NoSuchKey`
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// S3 metrics.
|
||||
var (
|
||||
operationTotalCounterVec = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "litestream_s3_operation_total",
|
||||
Help: "The number of S3 operations performed",
|
||||
}, []string{"type"})
|
||||
|
||||
operationBytesCounterVec = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "litestream_s3_operation_bytes",
|
||||
Help: "The number of bytes used by S3 operations",
|
||||
}, []string{"type"})
|
||||
)
|
||||
605
s3/replica_client_test.go
Normal file
605
s3/replica_client_test.go
Normal file
@@ -0,0 +1,605 @@
|
||||
package s3_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/benbjohnson/litestream"
|
||||
"github.com/benbjohnson/litestream/s3"
|
||||
)
|
||||
|
||||
var (
|
||||
// Enables integration tests.
|
||||
integration = flag.Bool("integration", false, "")
|
||||
|
||||
// Replica client settings
|
||||
accessKeyID = flag.String("access-key-id", os.Getenv("LITESTREAM_S3_ACCESS_KEY_ID"), "")
|
||||
secretAccessKey = flag.String("secret-access-key", os.Getenv("LITESTREAM_S3_SECRET_ACCESS_KEY"), "")
|
||||
region = flag.String("region", os.Getenv("LITESTREAM_S3_REGION"), "")
|
||||
bucket = flag.String("bucket", os.Getenv("LITESTREAM_S3_BUCKET"), "")
|
||||
pathFlag = flag.String("path", os.Getenv("LITESTREAM_S3_PATH"), "")
|
||||
endpoint = flag.String("endpoint", os.Getenv("LITESTREAM_S3_ENDPOINT"), "")
|
||||
forcePathStyle = flag.Bool("force-path-style", os.Getenv("LITESTREAM_S3_FORCE_PATH_STYLE") == "true", "")
|
||||
skipVerify = flag.Bool("skip-verify", os.Getenv("LITESTREAM_S3_SKIP_VERIFY") == "true", "")
|
||||
)
|
||||
|
||||
func TestReplicaClient_Type(t *testing.T) {
|
||||
if got, want := s3.NewReplicaClient().Type(), "s3"; got != want {
|
||||
t.Fatalf("Type()=%v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicaClient_GenerationsDir(t *testing.T) {
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
c := s3.NewReplicaClient()
|
||||
c.Path = "foo"
|
||||
if got, want := c.GenerationsDir(), "foo/generations"; got != want {
|
||||
t.Fatalf("GenerationsDir()=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
t.Run("NoPath", func(t *testing.T) {
|
||||
if got, want := s3.NewReplicaClient().GenerationsDir(), "generations"; got != want {
|
||||
t.Fatalf("GenerationsDir()=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReplicaClient_GenerationDir(t *testing.T) {
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
c := s3.NewReplicaClient()
|
||||
c.Path = "foo"
|
||||
if got, err := c.GenerationDir("0123456701234567"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if want := "foo/generations/0123456701234567"; got != want {
|
||||
t.Fatalf("GenerationDir()=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||
if _, err := s3.NewReplicaClient().GenerationDir(""); err == nil || err.Error() != `generation required` {
|
||||
t.Fatalf("expected error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReplicaClient_SnapshotsDir(t *testing.T) {
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
c := s3.NewReplicaClient()
|
||||
c.Path = "foo"
|
||||
if got, err := c.SnapshotsDir("0123456701234567"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if want := "foo/generations/0123456701234567/snapshots"; got != want {
|
||||
t.Fatalf("SnapshotsDir()=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||
if _, err := s3.NewReplicaClient().SnapshotsDir(""); err == nil || err.Error() != `generation required` {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReplicaClient_SnapshotPath(t *testing.T) {
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
c := s3.NewReplicaClient()
|
||||
c.Path = "foo"
|
||||
if got, err := c.SnapshotPath("0123456701234567", 1000); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if want := "foo/generations/0123456701234567/snapshots/000003e8.snapshot.lz4"; got != want {
|
||||
t.Fatalf("SnapshotPath()=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||
if _, err := s3.NewReplicaClient().SnapshotPath("", 1000); err == nil || err.Error() != `generation required` {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReplicaClient_WALDir(t *testing.T) {
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
c := s3.NewReplicaClient()
|
||||
c.Path = "foo"
|
||||
if got, err := c.WALDir("0123456701234567"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if want := "foo/generations/0123456701234567/wal"; got != want {
|
||||
t.Fatalf("WALDir()=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||
if _, err := s3.NewReplicaClient().WALDir(""); err == nil || err.Error() != `generation required` {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReplicaClient_WALSegmentPath(t *testing.T) {
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
c := s3.NewReplicaClient()
|
||||
c.Path = "foo"
|
||||
if got, err := c.WALSegmentPath("0123456701234567", 1000, 1001); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if want := "foo/generations/0123456701234567/wal/000003e8_000003e9.wal.lz4"; got != want {
|
||||
t.Fatalf("WALPath()=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||
if _, err := s3.NewReplicaClient().WALSegmentPath("", 1000, 0); err == nil || err.Error() != `generation required` {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReplicaClient_Generations(t *testing.T) {
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
c := NewIntegrationReplicaClient(t)
|
||||
defer MustDeleteAll(t, c)
|
||||
|
||||
// Write snapshots.
|
||||
if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 0, strings.NewReader(`foo`)); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 0, strings.NewReader(`bar`)); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if _, err := c.WriteSnapshot(context.Background(), "155fe292f8333c72", 0, strings.NewReader(`baz`)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify returned generations.
|
||||
if got, err := c.Generations(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if want := []string{"155fe292f8333c72", "5efbd8d042012dca", "b16ddcf5c697540f"}; !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("Generations()=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("NoGenerationsDir", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
c := NewIntegrationReplicaClient(t)
|
||||
defer MustDeleteAll(t, c)
|
||||
if generations, err := c.Generations(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got, want := len(generations), 0; got != want {
|
||||
t.Fatalf("len(Generations())=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReplicaClient_Snapshots(t *testing.T) {
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
c := NewIntegrationReplicaClient(t)
|
||||
defer MustDeleteAll(t, c)
|
||||
|
||||
// Write snapshots.
|
||||
if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 1, strings.NewReader(``)); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 5, strings.NewReader(`x`)); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 10, strings.NewReader(`xyz`)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Fetch all snapshots by generation.
|
||||
itr, err := c.Snapshots(context.Background(), "b16ddcf5c697540f")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer itr.Close()
|
||||
|
||||
// Read all snapshots into a slice so they can be sorted.
|
||||
a, err := litestream.SliceSnapshotIterator(itr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got, want := len(a), 2; got != want {
|
||||
t.Fatalf("len=%v, want %v", got, want)
|
||||
}
|
||||
sort.Sort(litestream.SnapshotInfoSlice(a))
|
||||
|
||||
// Verify first snapshot metadata.
|
||||
if got, want := a[0].Generation, "b16ddcf5c697540f"; got != want {
|
||||
t.Fatalf("Generation=%v, want %v", got, want)
|
||||
} else if got, want := a[0].Index, 5; got != want {
|
||||
t.Fatalf("Index=%v, want %v", got, want)
|
||||
} else if got, want := a[0].Size, int64(1); got != want {
|
||||
t.Fatalf("Size=%v, want %v", got, want)
|
||||
} else if a[0].CreatedAt.IsZero() {
|
||||
t.Fatalf("expected CreatedAt")
|
||||
}
|
||||
|
||||
// Verify second snapshot metadata.
|
||||
if got, want := a[1].Generation, "b16ddcf5c697540f"; got != want {
|
||||
t.Fatalf("Generation=%v, want %v", got, want)
|
||||
} else if got, want := a[1].Index, 0xA; got != want {
|
||||
t.Fatalf("Index=%v, want %v", got, want)
|
||||
} else if got, want := a[1].Size, int64(3); got != want {
|
||||
t.Fatalf("Size=%v, want %v", got, want)
|
||||
} else if a[1].CreatedAt.IsZero() {
|
||||
t.Fatalf("expected CreatedAt")
|
||||
}
|
||||
|
||||
// Ensure close is clean.
|
||||
if err := itr.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("NoGenerationDir", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
c := NewIntegrationReplicaClient(t)
|
||||
defer MustDeleteAll(t, c)
|
||||
|
||||
itr, err := c.Snapshots(context.Background(), "5efbd8d042012dca")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer itr.Close()
|
||||
|
||||
if itr.Next() {
|
||||
t.Fatal("expected no snapshots")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
c := NewIntegrationReplicaClient(t)
|
||||
defer MustDeleteAll(t, c)
|
||||
|
||||
if itr, err := c.Snapshots(context.Background(), ""); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := itr.Close(); err == nil || err.Error() != `cannot determine snapshot directory path: generation required` {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReplicaClient_WriteSnapshot(t *testing.T) {
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
c := NewIntegrationReplicaClient(t)
|
||||
defer MustDeleteAll(t, c)
|
||||
|
||||
if _, err := c.WriteSnapshot(context.Background(), "b16ddcf5c697540f", 1000, strings.NewReader(`foobar`)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if r, err := c.SnapshotReader(context.Background(), "b16ddcf5c697540f", 1000); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if buf, err := ioutil.ReadAll(r); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := r.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got, want := string(buf), `foobar`; got != want {
|
||||
t.Fatalf("data=%q, want %q", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if _, err := NewIntegrationReplicaClient(t).WriteSnapshot(context.Background(), "", 0, nil); err == nil || err.Error() != `cannot determine snapshot path: generation required` {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReplicaClient_SnapshotReader(t *testing.T) {
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
c := NewIntegrationReplicaClient(t)
|
||||
defer MustDeleteAll(t, c)
|
||||
|
||||
if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 10, strings.NewReader(`foo`)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
r, err := c.SnapshotReader(context.Background(), "5efbd8d042012dca", 10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
if buf, err := ioutil.ReadAll(r); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got, want := string(buf), "foo"; got != want {
|
||||
t.Fatalf("ReadAll=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("ErrNotFound", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
c := NewIntegrationReplicaClient(t)
|
||||
defer MustDeleteAll(t, c)
|
||||
|
||||
if _, err := c.SnapshotReader(context.Background(), "5efbd8d042012dca", 1); !os.IsNotExist(err) {
|
||||
t.Fatalf("expected not exist, got %#v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("ErrGeneration", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
c := NewIntegrationReplicaClient(t)
|
||||
defer MustDeleteAll(t, c)
|
||||
|
||||
if _, err := c.SnapshotReader(context.Background(), "", 1); err == nil || err.Error() != `cannot determine snapshot path: generation required` {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReplicaClient_WALs(t *testing.T) {
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
c := NewIntegrationReplicaClient(t)
|
||||
defer MustDeleteAll(t, c)
|
||||
|
||||
if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 1, Offset: 0}, strings.NewReader(``)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 2, Offset: 0}, strings.NewReader(`12345`)); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 2, Offset: 5}, strings.NewReader(`67`)); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 3, Offset: 0}, strings.NewReader(`xyz`)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
itr, err := c.WALSegments(context.Background(), "b16ddcf5c697540f")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer itr.Close()
|
||||
|
||||
// Read all WAL segment files into a slice so they can be sorted.
|
||||
a, err := litestream.SliceWALSegmentIterator(itr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got, want := len(a), 3; got != want {
|
||||
t.Fatalf("len=%v, want %v", got, want)
|
||||
}
|
||||
sort.Sort(litestream.WALSegmentInfoSlice(a))
|
||||
|
||||
// Verify first WAL segment metadata.
|
||||
if got, want := a[0].Generation, "b16ddcf5c697540f"; got != want {
|
||||
t.Fatalf("Generation=%v, want %v", got, want)
|
||||
} else if got, want := a[0].Index, 2; got != want {
|
||||
t.Fatalf("Index=%v, want %v", got, want)
|
||||
} else if got, want := a[0].Offset, int64(0); got != want {
|
||||
t.Fatalf("Offset=%v, want %v", got, want)
|
||||
} else if got, want := a[0].Size, int64(5); got != want {
|
||||
t.Fatalf("Size=%v, want %v", got, want)
|
||||
} else if a[0].CreatedAt.IsZero() {
|
||||
t.Fatalf("expected CreatedAt")
|
||||
}
|
||||
|
||||
// Verify first WAL segment metadata.
|
||||
if got, want := a[1].Generation, "b16ddcf5c697540f"; got != want {
|
||||
t.Fatalf("Generation=%v, want %v", got, want)
|
||||
} else if got, want := a[1].Index, 2; got != want {
|
||||
t.Fatalf("Index=%v, want %v", got, want)
|
||||
} else if got, want := a[1].Offset, int64(5); got != want {
|
||||
t.Fatalf("Offset=%v, want %v", got, want)
|
||||
} else if got, want := a[1].Size, int64(2); got != want {
|
||||
t.Fatalf("Size=%v, want %v", got, want)
|
||||
} else if a[1].CreatedAt.IsZero() {
|
||||
t.Fatalf("expected CreatedAt")
|
||||
}
|
||||
|
||||
// Verify third WAL segment metadata.
|
||||
if got, want := a[2].Generation, "b16ddcf5c697540f"; got != want {
|
||||
t.Fatalf("Generation=%v, want %v", got, want)
|
||||
} else if got, want := a[2].Index, 3; got != want {
|
||||
t.Fatalf("Index=%v, want %v", got, want)
|
||||
} else if got, want := a[2].Offset, int64(0); got != want {
|
||||
t.Fatalf("Offset=%v, want %v", got, want)
|
||||
} else if got, want := a[2].Size, int64(3); got != want {
|
||||
t.Fatalf("Size=%v, want %v", got, want)
|
||||
} else if a[1].CreatedAt.IsZero() {
|
||||
t.Fatalf("expected CreatedAt")
|
||||
}
|
||||
|
||||
// Ensure close is clean.
|
||||
if err := itr.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("NoGenerationDir", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
c := NewIntegrationReplicaClient(t)
|
||||
defer MustDeleteAll(t, c)
|
||||
|
||||
itr, err := c.WALSegments(context.Background(), "5efbd8d042012dca")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer itr.Close()
|
||||
|
||||
if itr.Next() {
|
||||
t.Fatal("expected no wal files")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("NoWALs", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
c := NewIntegrationReplicaClient(t)
|
||||
defer MustDeleteAll(t, c)
|
||||
|
||||
if _, err := c.WriteSnapshot(context.Background(), "5efbd8d042012dca", 0, strings.NewReader(`foo`)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
itr, err := c.WALSegments(context.Background(), "5efbd8d042012dca")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer itr.Close()
|
||||
|
||||
if itr.Next() {
|
||||
t.Fatal("expected no wal files")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
c := NewIntegrationReplicaClient(t)
|
||||
defer MustDeleteAll(t, c)
|
||||
|
||||
if itr, err := c.WALSegments(context.Background(), ""); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := itr.Close(); err == nil || err.Error() != `cannot determine wal directory path: generation required` {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReplicaClient_WriteWALSegment(t *testing.T) {
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
c := NewIntegrationReplicaClient(t)
|
||||
defer MustDeleteAll(t, c)
|
||||
|
||||
if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1000, Offset: 2000}, strings.NewReader(`foobar`)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1000, Offset: 2000}); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if buf, err := ioutil.ReadAll(r); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := r.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got, want := string(buf), `foobar`; got != want {
|
||||
t.Fatalf("data=%q, want %q", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if _, err := NewIntegrationReplicaClient(t).WriteWALSegment(context.Background(), litestream.Pos{Generation: "", Index: 0, Offset: 0}, nil); err == nil || err.Error() != `cannot determine wal segment path: generation required` {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReplicaClient_WALReader(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
c := NewIntegrationReplicaClient(t)
|
||||
defer MustDeleteAll(t, c)
|
||||
|
||||
if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 10, Offset: 5}, strings.NewReader(`foobar`)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
r, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 10, Offset: 5})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
if buf, err := ioutil.ReadAll(r); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if got, want := string(buf), "foobar"; got != want {
|
||||
t.Fatalf("ReadAll=%v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("ErrNotFound", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
c := NewIntegrationReplicaClient(t)
|
||||
defer MustDeleteAll(t, c)
|
||||
|
||||
if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 1, Offset: 0}); !os.IsNotExist(err) {
|
||||
t.Fatalf("expected not exist, got %#v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReplicaClient_DeleteWALSegments(t *testing.T) {
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
c := NewIntegrationReplicaClient(t)
|
||||
defer MustDeleteAll(t, c)
|
||||
|
||||
if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}, strings.NewReader(`foo`)); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if _, err := c.WriteWALSegment(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 3, Offset: 4}, strings.NewReader(`bar`)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := c.DeleteWALSegments(context.Background(), []litestream.Pos{
|
||||
{Generation: "b16ddcf5c697540f", Index: 1, Offset: 2},
|
||||
{Generation: "5efbd8d042012dca", Index: 3, Offset: 4},
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "b16ddcf5c697540f", Index: 1, Offset: 2}); !os.IsNotExist(err) {
|
||||
t.Fatalf("expected not exist, got %#v", err)
|
||||
} else if _, err := c.WALSegmentReader(context.Background(), litestream.Pos{Generation: "5efbd8d042012dca", Index: 3, Offset: 4}); !os.IsNotExist(err) {
|
||||
t.Fatalf("expected not exist, got %#v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("ErrNoGeneration", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if err := NewIntegrationReplicaClient(t).DeleteWALSegments(context.Background(), []litestream.Pos{{}}); err == nil || err.Error() != `cannot determine wal segment path: generation required` {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// NewIntegrationReplicaClient returns a new client for integration testing.
|
||||
// If integration flag is not set then test/benchmark is skipped.
|
||||
func NewIntegrationReplicaClient(tb testing.TB) *s3.ReplicaClient {
|
||||
tb.Helper()
|
||||
|
||||
if !*integration {
|
||||
tb.Skip("integration tests disabled")
|
||||
}
|
||||
|
||||
c := s3.NewReplicaClient()
|
||||
c.AccessKeyID = *accessKeyID
|
||||
c.SecretAccessKey = *secretAccessKey
|
||||
c.Region = *region
|
||||
c.Bucket = *bucket
|
||||
c.Path = path.Join(*pathFlag, fmt.Sprintf("%016x", rand.Uint64()))
|
||||
c.Endpoint = *endpoint
|
||||
c.ForcePathStyle = *forcePathStyle
|
||||
c.SkipVerify = *skipVerify
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// MustDeleteAll deletes all objects under the client's path.
|
||||
func MustDeleteAll(tb testing.TB, c *s3.ReplicaClient) {
|
||||
tb.Helper()
|
||||
if err := c.DeleteAll(context.Background()); err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user