forked from benbjohnson/litestream
-
Notifications
You must be signed in to change notification settings - Fork 0
/
db.go
1676 lines (1418 loc) · 49.3 KB
/
db.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
package litestream
import (
"bytes"
"context"
"database/sql"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"time"
"github.com/benbjohnson/litestream/internal"
"github.com/pierrec/lz4/v4"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"golang.org/x/sync/errgroup"
)
// Default DB settings.
const (
DefaultMonitorDelayInterval = 10 * time.Millisecond
DefaultCheckpointInterval = 1 * time.Minute
DefaultMinCheckpointPageN = 1000
DefaultMaxCheckpointPageN = 10000
DefaultShadowRetentionN = 32
)
// MaxIndex is the maximum possible WAL index.
// If this index is reached then a new generation will be started.
const MaxIndex = 0x7FFFFFFF
// BusyTimeout is the timeout to wait for EBUSY from SQLite.
const BusyTimeout = 1 * time.Second
// DB represents a managed instance of a SQLite database in the file system.
type DB struct {
mu sync.RWMutex
path string // part to database
db *sql.DB // target database
f *os.File // long-running db file descriptor
rtx *sql.Tx // long running read transaction
pos Pos // cached position
pageSize int // page size, in bytes
notifyCh chan struct{} // notifies DB of changes
// Cached salt & checksum from current shadow header.
hdr []byte
frame []byte
salt0, salt1 uint32
chksum0, chksum1 uint32
byteOrder binary.ByteOrder
fileMode os.FileMode // db mode cached during init
dirMode os.FileMode // parent dir mode cached during init
uid, gid int // db user & group id cached during init
ctx context.Context
cancel func()
g errgroup.Group
// Metrics
dbSizeGauge prometheus.Gauge
walSizeGauge prometheus.Gauge
totalWALBytesCounter prometheus.Counter
shadowWALIndexGauge prometheus.Gauge
shadowWALSizeGauge prometheus.Gauge
syncNCounter prometheus.Counter
syncErrorNCounter prometheus.Counter
syncSecondsCounter prometheus.Counter
checkpointNCounterVec *prometheus.CounterVec
checkpointErrorNCounterVec *prometheus.CounterVec
checkpointSecondsCounterVec *prometheus.CounterVec
// Minimum threshold of WAL size, in pages, before a passive checkpoint.
// A passive checkpoint will attempt a checkpoint but fail if there are
// active transactions occurring at the same time.
MinCheckpointPageN int
// Maximum threshold of WAL size, in pages, before a forced checkpoint.
// A forced checkpoint will block new transactions and wait for existing
// transactions to finish before issuing a checkpoint and resetting the WAL.
//
// If zero, no checkpoints are forced. This can cause the WAL to grow
// unbounded if there are always read transactions occurring.
MaxCheckpointPageN int
// Number of shadow WAL indexes to retain. This keeps files long enough for
// live replicas to retrieve the data but allows files to eventually be removed.
ShadowRetentionN int
// Time after receiving change notification before reading next WAL segment.
// Used for batching changes into fewer files instead of every transaction
// creating its own file.
MonitorDelayInterval time.Duration
// Time between automatic checkpoints in the WAL. This is done to allow
// more fine-grained WAL files so that restores can be performed with
// better precision.
CheckpointInterval time.Duration
// List of replicas for the database.
// Must be set before calling Open().
Replicas []*Replica
Logger *log.Logger
}
// NewDB returns a new instance of DB for a given path.
func NewDB(path string) *DB {
db := &DB{
path: path,
notifyCh: make(chan struct{}, 1),
MinCheckpointPageN: DefaultMinCheckpointPageN,
MaxCheckpointPageN: DefaultMaxCheckpointPageN,
ShadowRetentionN: DefaultShadowRetentionN,
MonitorDelayInterval: DefaultMonitorDelayInterval,
CheckpointInterval: DefaultCheckpointInterval,
Logger: log.New(LogWriter, fmt.Sprintf("%s: ", logPrefixPath(path)), LogFlags),
}
db.dbSizeGauge = dbSizeGaugeVec.WithLabelValues(db.path)
db.walSizeGauge = walSizeGaugeVec.WithLabelValues(db.path)
db.totalWALBytesCounter = totalWALBytesCounterVec.WithLabelValues(db.path)
db.shadowWALIndexGauge = shadowWALIndexGaugeVec.WithLabelValues(db.path)
db.shadowWALSizeGauge = shadowWALSizeGaugeVec.WithLabelValues(db.path)
db.syncNCounter = syncNCounterVec.WithLabelValues(db.path)
db.syncErrorNCounter = syncErrorNCounterVec.WithLabelValues(db.path)
db.syncSecondsCounter = syncSecondsCounterVec.WithLabelValues(db.path)
db.checkpointNCounterVec = checkpointNCounterVec.MustCurryWith(prometheus.Labels{"db": db.path})
db.checkpointErrorNCounterVec = checkpointErrorNCounterVec.MustCurryWith(prometheus.Labels{"db": db.path})
db.checkpointSecondsCounterVec = checkpointSecondsCounterVec.MustCurryWith(prometheus.Labels{"db": db.path})
db.ctx, db.cancel = context.WithCancel(context.Background())
return db
}
// SQLDB returns a reference to the underlying sql.DB connection.
func (db *DB) SQLDB() *sql.DB {
return db.db
}
// Path returns the path to the database.
func (db *DB) Path() string {
return db.path
}
// WALPath returns the path to the database's WAL file.
func (db *DB) WALPath() string {
return db.path + "-wal"
}
// SHMPath returns the path to the database's shared memory file.
func (db *DB) SHMPath() string {
return db.path + "-shm"
}
// MetaPath returns the path to the database metadata.
func (db *DB) MetaPath() string {
dir, file := filepath.Split(db.path)
return filepath.Join(dir, file+MetaDirSuffix)
}
// GenerationNamePath returns the path of the name of the current generation.
func (db *DB) GenerationNamePath() string {
return filepath.Join(db.MetaPath(), "generation")
}
// GenerationPath returns the path of a single generation.
// Panics if generation is blank.
func (db *DB) GenerationPath(generation string) string {
assert(generation != "", "generation name required")
return filepath.Join(db.MetaPath(), "generations", generation)
}
// PositionPath returns the path of the file that stores the current position.
// This file is only used to communicate state to external processes.
func (db *DB) PositionPath() string {
return filepath.Join(db.MetaPath(), "position")
}
// ShadowWALDir returns the path of the shadow wal directory.
// Panics if generation is blank.
func (db *DB) ShadowWALDir(generation string) string {
return filepath.Join(db.GenerationPath(generation), "wal")
}
// Replica returns a replica by name.
func (db *DB) Replica(name string) *Replica {
for _, r := range db.Replicas {
if r.Name() == name {
return r
}
}
return nil
}
// Pos returns the cached position of the database.
// Returns a zero position if no position has been calculated or if there is no generation.
func (db *DB) Pos() Pos {
db.mu.RLock()
defer db.mu.RUnlock()
return db.pos
}
// reset clears all cached data.
func (db *DB) reset() {
db.pos = Pos{}
db.hdr, db.frame = nil, nil
db.salt0, db.salt1 = 0, 0
db.chksum0, db.chksum1 = 0, 0
db.byteOrder = nil
}
// invalidate refreshes cached position, salt, & checksum from on-disk data.
func (db *DB) invalidate(ctx context.Context) (err error) {
// Clear cached data before starting.
db.reset()
// If any error occurs, ensure all cached data is cleared.
defer func() {
if err != nil {
db.reset()
}
}()
// Determine the last position of the current generation.
if err := db.invalidatePos(ctx); err != nil {
return fmt.Errorf("cannot determine pos: %w", err)
} else if db.pos.IsZero() {
db.Logger.Printf("init: no wal files available, clearing generation")
if err := db.clearGeneration(ctx); err != nil {
return fmt.Errorf("clear generation: %w", err)
}
return nil // no position, exit
}
// Determine salt & last checksum.
if err := db.invalidateChecksum(ctx); err != nil {
return fmt.Errorf("cannot determine last salt/checksum: %w", err)
}
return nil
}
func (db *DB) invalidatePos(ctx context.Context) error {
// Determine generation based off "generation" file in meta directory.
generation, err := db.CurrentGeneration()
if err != nil {
return err
} else if generation == "" {
return nil
}
// Iterate over all segments to find the last one.
itr, err := db.walSegments(context.Background(), generation)
if err != nil {
return err
}
defer itr.Close()
var pos Pos
for itr.Next() {
info := itr.WALSegment()
pos = info.Pos()
}
if err := itr.Close(); err != nil {
return err
}
// Exit if no WAL segments exist.
if pos.IsZero() {
return nil
}
// Read size of last segment to determine ending position.
rd, err := db.WALSegmentReader(ctx, pos)
if err != nil {
return fmt.Errorf("cannot read last wal segment: %w", err)
}
defer rd.Close()
n, err := io.Copy(ioutil.Discard, lz4.NewReader(rd))
if err != nil {
return err
}
pos.Offset += n
// Save position to cache.
db.pos = pos
return nil
}
func (db *DB) invalidateChecksum(ctx context.Context) error {
assert(!db.pos.IsZero(), "position required to invalidate checksum")
// Read entire WAL from combined segments.
rc, err := db.WALReader(ctx, db.pos.Generation, db.pos.Index)
if err != nil {
return fmt.Errorf("cannot read last wal: %w", err)
}
defer func() { _ = rc.Close() }()
// Ensure we don't read past our position.
r := &io.LimitedReader{R: rc, N: db.pos.Offset}
// Determine cache values from the current WAL file.
db.salt0, db.salt1, db.chksum0, db.chksum1, db.byteOrder, db.hdr, db.frame, err = ReadWALFields(r, db.pageSize)
if err != nil {
return fmt.Errorf("calc checksum: %w", err)
}
return nil
}
// WALReader returns the entire uncompressed WAL file for a given index.
func (db *DB) WALReader(ctx context.Context, generation string, index int) (_ io.ReadCloser, err error) {
// If any error occurs, we need to clean up all open handles.
var rcs []io.ReadCloser
defer func() {
if err != nil {
for _, rc := range rcs {
rc.Close()
}
}
}()
offsets, err := db.walSegmentOffsetsByIndex(generation, index)
if err != nil {
return nil, fmt.Errorf("wal segment offsets: %w", err)
}
for _, offset := range offsets {
f, err := os.Open(filepath.Join(db.ShadowWALDir(generation), FormatIndex(index), FormatOffset(offset)+".wal.lz4"))
if err != nil {
return nil, err
}
rcs = append(rcs, internal.NewReadCloser(lz4.NewReader(f), f))
}
return internal.NewMultiReadCloser(rcs), nil
}
func (db *DB) walSegmentOffsetsByIndex(generation string, index int) ([]int64, error) {
// Read files from index directory.
ents, err := os.ReadDir(filepath.Join(db.ShadowWALDir(generation), FormatIndex(index)))
if err != nil {
return nil, err
}
var offsets []int64
for _, ent := range ents {
if !strings.HasSuffix(ent.Name(), ".wal.lz4") {
continue
}
offset, err := ParseOffset(strings.TrimSuffix(filepath.Base(ent.Name()), ".wal.lz4"))
if err != nil {
continue
}
offsets = append(offsets, offset)
}
// Sort before returning.
sort.Slice(offsets, func(i, j int) bool { return offsets[i] < offsets[j] })
return offsets, nil
}
// NotifyCh returns a channel that can be used to signal changes in the DB.
func (db *DB) NotifyCh() chan<- struct{} {
return db.notifyCh
}
// PageSize returns the page size of the underlying database.
// Only valid after database exists & Init() has successfully run.
func (db *DB) PageSize() int {
db.mu.RLock()
defer db.mu.RUnlock()
return db.pageSize
}
// Open initializes the background monitoring goroutine.
func (db *DB) Open() (err error) {
// Validate fields on database.
if db.MinCheckpointPageN <= 0 {
return fmt.Errorf("minimum checkpoint page count required")
}
// Validate that all replica names are unique.
m := make(map[string]struct{})
for _, r := range db.Replicas {
if _, ok := m[r.Name()]; ok {
return fmt.Errorf("duplicate replica name: %q", r.Name())
}
m[r.Name()] = struct{}{}
}
// Clear old temporary files that my have been left from a crash.
if err := removeTmpFiles(db.MetaPath()); err != nil {
return fmt.Errorf("cannot remove tmp files: %w", err)
}
// Continually monitor local changes in a separate goroutine.
db.g.Go(func() error { return db.monitor(db.ctx) })
return nil
}
// Close flushes outstanding WAL writes to replicas, releases the read lock,
// and closes the database.
func (db *DB) Close() (err error) {
db.cancel()
if e := db.g.Wait(); e != nil && err == nil {
err = e
}
// Start a new context for shutdown since we canceled the DB context.
ctx := context.Background()
// Perform a final db sync, if initialized.
if db.db != nil {
if e := db.Sync(ctx); e != nil && err == nil {
err = e
}
}
// Ensure replicas stop replicating and perform a final sync.
for _, r := range db.Replicas {
// Stop normal background sync.
r.Stop()
// Force one final sync if DB is open.
if db.db != nil {
if e := r.Sync(ctx); e != nil && err == nil {
err = e
}
}
// Close out replica.
if e := r.Close(); e != nil && err == nil {
err = e
}
}
// Release the read lock to allow other applications to handle checkpointing.
if db.rtx != nil {
if e := db.releaseReadLock(); e != nil && err == nil {
err = e
}
}
if db.db != nil {
if e := db.db.Close(); e != nil && err == nil {
err = e
}
}
return err
}
// UpdatedAt returns the last modified time of the database or WAL file.
func (db *DB) UpdatedAt() (time.Time, error) {
// Determine database modified time.
fi, err := os.Stat(db.Path())
if err != nil {
return time.Time{}, err
}
t := fi.ModTime().UTC()
// Use WAL modified time, if available & later.
if fi, err := os.Stat(db.WALPath()); os.IsNotExist(err) {
return t, nil
} else if err != nil {
return t, err
} else if fi.ModTime().After(t) {
t = fi.ModTime().UTC()
}
return t, nil
}
// init initializes the connection to the database. Skipped if already
// initialized or if the database file does not exist.
func (db *DB) init() (err error) {
// Exit if already initialized.
if db.db != nil {
return nil
}
// Exit if no database file exists.
if _, err := os.Stat(db.path); os.IsNotExist(err) {
return nil
} else if err != nil {
return err
}
// Obtain permissions for parent directory.
fi, err := os.Stat(filepath.Dir(db.path))
if err != nil {
return err
}
db.dirMode = fi.Mode()
dsn := db.path
dsn += fmt.Sprintf("?_busy_timeout=%d", BusyTimeout.Milliseconds())
// Connect to SQLite database. Use the driver registered with a hook to
// prevent WAL files from being removed.
if db.db, err = sql.Open("litestream-sqlite3", dsn); err != nil {
return err
}
// Enable WAL and ensure it is set. New mode should be returned on success:
// https://www.sqlite.org/pragma.html#pragma_journal_mode
var mode string
if err := db.db.QueryRow(`PRAGMA journal_mode = wal;`).Scan(&mode); err != nil {
return err
} else if mode != "wal" {
return fmt.Errorf("enable wal failed, mode=%q", mode)
}
// Disable autocheckpoint for litestream's connection.
if _, err := db.db.ExecContext(db.ctx, `PRAGMA wal_autocheckpoint = 0;`); err != nil {
return fmt.Errorf("disable autocheckpoint: %w", err)
}
// Create a table to force writes to the WAL when empty.
// There should only ever be one row with id=1.
if _, err := db.db.ExecContext(db.ctx, `CREATE TABLE IF NOT EXISTS _litestream_seq (id INTEGER PRIMARY KEY, seq INTEGER);`); err != nil {
return fmt.Errorf("create _litestream_seq table: %w", err)
}
// Create a lock table to force write locks during sync.
// The sync write transaction always rolls back so no data should be in this table.
if _, err := db.db.ExecContext(db.ctx, `CREATE TABLE IF NOT EXISTS _litestream_lock (id INTEGER);`); err != nil {
return fmt.Errorf("create _litestream_lock table: %w", err)
}
// Open long-running database file descriptor. Required for non-OFD locks.
if db.f, err = os.Open(db.path); err != nil {
return fmt.Errorf("open db file descriptor: %w", err)
}
// Ensure database is closed if init fails.
// Initialization can retry on next sync.
defer func() {
if err != nil {
_ = db.releaseReadLock()
db.db.Close()
db.f.Close()
db.db, db.f = nil, nil
}
}()
// Obtain file info once we know the database exists.
fi, err = os.Stat(db.path)
if err != nil {
return fmt.Errorf("init file stat: %w", err)
}
db.fileMode = fi.Mode()
db.uid, db.gid = internal.Fileinfo(fi)
// Pass permissions to file replicas, if they exist.
for _, r := range db.Replicas {
if client, ok := r.Client().(*FileReplicaClient); ok {
client.FileMode = db.fileMode
client.DirMode = db.dirMode
client.Uid = db.uid
client.Gid = db.gid
}
}
// Start a long-running read transaction to prevent other transactions
// from checkpointing.
if err := db.acquireReadLock(); err != nil {
return fmt.Errorf("acquire read lock: %w", err)
}
// Read page size.
if err := db.db.QueryRowContext(db.ctx, `PRAGMA page_size;`).Scan(&db.pageSize); err != nil {
return fmt.Errorf("read page size: %w", err)
} else if db.pageSize <= 0 {
return fmt.Errorf("invalid db page size: %d", db.pageSize)
}
// Ensure meta directory structure exists.
if err := internal.MkdirAll(db.MetaPath(), db.dirMode, db.uid, db.gid); err != nil {
return err
}
// Determine current position, if available.
if err := db.invalidate(db.ctx); err != nil {
return fmt.Errorf("invalidate: %w", err)
}
// If we have an existing shadow WAL, ensure the headers match.
if err := db.verifyHeadersMatch(); err != nil {
db.Logger.Printf("init: cannot determine last wal position, clearing generation; %s", err)
if err := db.clearGeneration(db.ctx); err != nil {
return fmt.Errorf("clear generation: %w", err)
}
}
// Clean up previous generations.
if err := db.clean(db.ctx); err != nil {
return fmt.Errorf("clean: %w", err)
}
// Start replication.
for _, r := range db.Replicas {
r.Start(db.ctx)
}
return nil
}
func (db *DB) clearGeneration(ctx context.Context) error {
if err := os.Remove(db.GenerationNamePath()); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// verifyHeadersMatch returns true if the primary WAL and last shadow WAL header match.
func (db *DB) verifyHeadersMatch() error {
// Skip verification if we have no current position.
if db.pos.IsZero() {
return nil
}
// Read header from the real WAL file.
hdr, err := readWALHeader(db.WALPath())
if os.IsNotExist(err) {
return fmt.Errorf("no primary wal: %w", err)
} else if err != nil {
return fmt.Errorf("primary wal header: %w", err)
}
// Compare real WAL header with shadow WAL header.
// If there is a mismatch then the real WAL has been restarted outside Litestream.
if !bytes.Equal(hdr, db.hdr) {
return fmt.Errorf("wal header mismatch at %s", db.pos.Truncate())
}
return nil
}
// clean removes old generations & WAL files.
func (db *DB) clean(ctx context.Context) error {
if err := db.cleanGenerations(ctx); err != nil {
return err
}
return db.cleanWAL(ctx)
}
// cleanGenerations removes old generations.
func (db *DB) cleanGenerations(ctx context.Context) error {
generation, err := db.CurrentGeneration()
if err != nil {
return err
}
dir := filepath.Join(db.MetaPath(), "generations")
fis, err := ioutil.ReadDir(dir)
if os.IsNotExist(err) {
return nil
} else if err != nil {
return err
}
for _, fi := range fis {
// Skip the current generation.
if filepath.Base(fi.Name()) == generation {
continue
}
// Delete all other generations.
if err := os.RemoveAll(filepath.Join(dir, fi.Name())); err != nil {
return err
}
}
return nil
}
// cleanWAL removes WAL files that have been replicated.
func (db *DB) cleanWAL(ctx context.Context) error {
generation, err := db.CurrentGeneration()
if err != nil {
return fmt.Errorf("current generation: %w", err)
} else if generation == "" {
return nil
}
// Determine lowest index that's been replicated to all replicas.
minReplicaIndex := -1
for _, r := range db.Replicas {
pos := r.Pos().Truncate()
if pos.Generation != generation {
continue // different generation, skip
} else if minReplicaIndex == -1 || pos.Index < minReplicaIndex {
minReplicaIndex = pos.Index
}
}
// Retain a certain number of WAL indexes since
minRetentionIndex := db.pos.Index - db.ShadowRetentionN
// Skip if we have replicas but none have replicated this generation yet.
if len(db.Replicas) > 0 && minReplicaIndex <= 0 {
return nil
}
// Delete all WAL index directories below the minimum position.
dir := db.ShadowWALDir(generation)
ents, err := os.ReadDir(dir)
if err != nil {
return err
}
for _, ent := range ents {
index, err := ParseIndex(ent.Name())
if err != nil {
continue
} else if len(db.Replicas) > 0 && index >= minReplicaIndex {
continue // not replicated yet, skip
} else if index >= minRetentionIndex {
continue // retain certain number of indexes, skip
}
if err := os.RemoveAll(filepath.Join(dir, FormatIndex(index))); err != nil {
return err
}
db.Logger.Printf("remove shadow index: %s/%s", generation, FormatIndex(index))
}
return nil
}
// acquireReadLock begins a read transaction on the database to prevent checkpointing.
func (db *DB) acquireReadLock() error {
if db.rtx != nil {
return nil
}
// Start long running read-transaction to prevent checkpoints.
tx, err := db.db.Begin()
if err != nil {
return err
}
// Execute read query to obtain read lock.
if _, err := tx.Exec(`SELECT COUNT(1) FROM _litestream_seq;`); err != nil {
_ = tx.Rollback()
return err
}
// Track transaction so we can release it later before checkpoint.
db.rtx = tx
return nil
}
// releaseReadLock rolls back the long-running read transaction.
func (db *DB) releaseReadLock() error {
// Ignore if we do not have a read lock.
if db.rtx == nil {
return nil
}
// Rollback & clear read transaction.
err := db.rtx.Rollback()
db.rtx = nil
return err
}
// CurrentGeneration returns the name of the generation saved to the "generation"
// file in the meta data directory. Returns empty string if none exists.
func (db *DB) CurrentGeneration() (string, error) {
buf, err := os.ReadFile(db.GenerationNamePath())
if os.IsNotExist(err) {
return "", nil
} else if err != nil {
return "", err
}
generation := strings.TrimSpace(string(buf))
if len(generation) != GenerationNameLen {
return "", nil
}
return generation, nil
}
// createGeneration starts a new generation by creating the generation
// directory, snapshotting to each replica, and updating the current
// generation name.
func (db *DB) createGeneration(ctx context.Context) (string, error) {
// Generate random generation hex name.
buf := make([]byte, GenerationNameLen/2)
_, _ = rand.New(rand.NewSource(time.Now().UnixNano())).Read(buf)
generation := hex.EncodeToString(buf)
// Generate new directory.
dir := filepath.Join(db.MetaPath(), "generations", generation)
if err := internal.MkdirAll(dir, db.dirMode, db.uid, db.gid); err != nil {
return "", err
}
// Initialize shadow WAL with copy of header.
if err := db.initShadowWALIndex(ctx, Pos{Generation: generation}); err != nil {
return "", fmt.Errorf("initialize shadow wal: %w", err)
}
// Atomically write generation name as current generation.
generationNamePath := db.GenerationNamePath()
if err := os.WriteFile(generationNamePath+".tmp", []byte(generation+"\n"), db.fileMode); err != nil {
return "", fmt.Errorf("write generation temp file: %w", err)
}
_ = os.Chown(generationNamePath+".tmp", db.uid, db.gid)
if err := os.Rename(generationNamePath+".tmp", generationNamePath); err != nil {
return "", fmt.Errorf("rename generation file: %w", err)
}
// Remove old generations.
if err := db.clean(db.ctx); err != nil {
return "", err
}
return generation, nil
}
// Sync copies pending data from the WAL to the shadow WAL.
func (db *DB) Sync(ctx context.Context) error {
const retryN = 5
for i := 0; i < retryN; i++ {
if err := func() error {
db.mu.Lock()
defer db.mu.Unlock()
return db.sync(ctx)
}(); err != nil {
db.Logger.Printf("sync error, retrying: %s", err)
} else {
break
}
}
return nil
}
func (db *DB) sync(ctx context.Context) (err error) {
// Initialize database, if necessary. Exit if no DB exists.
if err := db.init(); err != nil {
return err
} else if db.db == nil {
return nil
}
// Ensure the cached position exists.
if db.pos.IsZero() {
if err := db.invalidate(ctx); err != nil {
return fmt.Errorf("invalidate: %w", err)
}
}
// If sync fails, reset position & cache.
defer func() {
if err != nil {
db.reset()
}
}()
// Track total sync metrics.
t := time.Now()
defer func() {
db.syncNCounter.Inc()
if err != nil {
db.syncErrorNCounter.Inc()
}
db.syncSecondsCounter.Add(float64(time.Since(t).Seconds()))
}()
// Ensure WAL has at least one frame in it.
if err := db.ensureWALExists(); err != nil {
return fmt.Errorf("ensure wal exists: %w", err)
}
// Verify our last sync matches the current state of the WAL.
// This ensures that we have an existing generation & that the last sync
// position of the real WAL hasn't been overwritten by another process.
info, err := db.verify()
if err != nil {
return fmt.Errorf("cannot verify wal state: %w", err)
}
// If we are unable to verify the WAL state then we start a new generation.
if info.reason != "" {
// Start new generation & notify user via log message.
if info.generation, err = db.createGeneration(ctx); err != nil {
return fmt.Errorf("create generation: %w", err)
}
db.Logger.Printf("sync: new generation %q, %s", info.generation, info.reason)
// Clear shadow wal info.
info.restart = false
info.reason = ""
}
// Synchronize real WAL with current shadow WAL.
if err := db.copyToShadowWAL(ctx); err != nil {
return fmt.Errorf("cannot copy to shadow wal: %w", err)
}
// If we are at the end of the WAL file, start a new index.
if info.restart {
// Move to beginning of next index.
pos := db.pos.Truncate()
pos.Index++
// Attempt to restart WAL from beginning of new index.
// Position is only committed to cache if successful.
if err := db.initShadowWALIndex(ctx, pos); err != nil {
return fmt.Errorf("cannot init shadow wal: pos=%s err=%w", pos, err)
}
}
// If WAL size is great than max threshold, force checkpoint.
// If WAL size is greater than min threshold, attempt checkpoint.
var checkpoint bool
checkpointMode := CheckpointModePassive
if db.MaxCheckpointPageN > 0 && db.pos.Offset >= calcWALSize(db.pageSize, db.MaxCheckpointPageN) {
checkpoint, checkpointMode = true, CheckpointModeRestart
} else if db.pos.Offset >= calcWALSize(db.pageSize, db.MinCheckpointPageN) {
checkpoint = true
} else if db.CheckpointInterval > 0 && !info.dbModTime.IsZero() && time.Since(info.dbModTime) > db.CheckpointInterval && db.pos.Offset > calcWALSize(db.pageSize, 1) {
checkpoint = true
}
// Issue the checkpoint.
if checkpoint {
// Under rare circumstances, a checkpoint can be unable to verify continuity
// and will require a restart.
if err := db.checkpoint(ctx, info.generation, checkpointMode); errors.Is(err, errRestartGeneration) {
generation, err := db.createGeneration(ctx)
if err != nil {
return fmt.Errorf("create generation: %w", err)
}
db.Logger.Printf("sync: new generation %q, possible WAL overrun occurred", generation)
} else if err != nil {
return fmt.Errorf("checkpoint: mode=%v err=%w", checkpointMode, err)
}
}
// Clean up any old files.
if err := db.clean(ctx); err != nil {
return fmt.Errorf("cannot clean: %w", err)
}
// Compute current index and total shadow WAL size.
// This is only for metrics so we ignore any errors that occur.
db.shadowWALIndexGauge.Set(float64(db.pos.Index))
db.shadowWALSizeGauge.Set(float64(db.pos.Offset))
return nil
}
// ensureWALExists checks that the real WAL exists and has a header.
func (db *DB) ensureWALExists() (err error) {
// Exit early if WAL header exists.
if fi, err := os.Stat(db.WALPath()); err == nil && fi.Size() >= WALHeaderSize {
return nil
}
// Otherwise create transaction that updates the internal litestream table.
_, err = db.db.Exec(`INSERT INTO _litestream_seq (id, seq) VALUES (1, 1) ON CONFLICT (id) DO UPDATE SET seq = seq + 1`)
return err
}
// verify ensures the current shadow WAL state matches where it left off from
// the real WAL. Returns generation & WAL sync information. If info.reason is
// not blank, verification failed and a new generation should be started.
func (db *DB) verify() (info syncInfo, err error) {
// Look up existing generation.
generation, err := db.CurrentGeneration()
if err != nil {
return info, fmt.Errorf("cannot find current generation: %w", err)
} else if generation == "" {
info.reason = "no generation exists"
return info, nil
}
info.generation = generation