forked from benbjohnson/litestream
-
Notifications
You must be signed in to change notification settings - Fork 0
/
db.go
1670 lines (1418 loc) · 49.3 KB
/
db.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
package litestream
import (
"bytes"
"context"
"database/sql"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"hash/crc64"
"io"
"log/slog"
"math"
"math/rand"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/benbjohnson/litestream/internal"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
// Default DB settings.
const (
DefaultMonitorInterval = 1 * time.Second
DefaultCheckpointInterval = 1 * time.Minute
DefaultMinCheckpointPageN = 1000
DefaultMaxCheckpointPageN = 10000
DefaultTruncatePageN = 500000
)
// MaxIndex is the maximum possible WAL index.
// If this index is reached then a new generation will be started.
const MaxIndex = 0x7FFFFFFF
// BusyTimeout is the timeout to wait for EBUSY from SQLite.
const BusyTimeout = 1 * time.Second
// DB represents a managed instance of a SQLite database in the file system.
type DB struct {
mu sync.RWMutex
path string // part to database
metaPath string // Path to the database metadata.
db *sql.DB // target database
f *os.File // long-running db file descriptor
rtx *sql.Tx // long running read transaction
pageSize int // page size, in bytes
notify chan struct{} // closes on WAL change
chkMu sync.Mutex // checkpoint lock
fileInfo os.FileInfo // db info cached during init
dirInfo os.FileInfo // parent dir info cached during init
ctx context.Context
cancel func()
wg sync.WaitGroup
// Metrics
dbSizeGauge prometheus.Gauge
walSizeGauge prometheus.Gauge
totalWALBytesCounter prometheus.Counter
shadowWALIndexGauge prometheus.Gauge
shadowWALSizeGauge prometheus.Gauge
syncNCounter prometheus.Counter
syncErrorNCounter prometheus.Counter
syncSecondsCounter prometheus.Counter
checkpointNCounterVec *prometheus.CounterVec
checkpointErrorNCounterVec *prometheus.CounterVec
checkpointSecondsCounterVec *prometheus.CounterVec
// Minimum threshold of WAL size, in pages, before a passive checkpoint.
// A passive checkpoint will attempt a checkpoint but fail if there are
// active transactions occurring at the same time.
MinCheckpointPageN int
// Maximum threshold of WAL size, in pages, before a forced checkpoint.
// A forced checkpoint will block new transactions and wait for existing
// transactions to finish before issuing a checkpoint and resetting the WAL.
//
// If zero, no checkpoints are forced. This can cause the WAL to grow
// unbounded if there are always read transactions occurring.
MaxCheckpointPageN int
// Threshold of WAL size, in pages, before a forced truncation checkpoint.
// A forced truncation checkpoint will block new transactions and wait for
// existing transactions to finish before issuing a checkpoint and
// truncating the WAL.
//
// If zero, no truncates are forced. This can cause the WAL to grow
// unbounded if there's a sudden spike of changes between other
// checkpoints.
TruncatePageN int
// Time between automatic checkpoints in the WAL. This is done to allow
// more fine-grained WAL files so that restores can be performed with
// better precision.
CheckpointInterval time.Duration
// Frequency at which to perform db sync.
MonitorInterval time.Duration
// List of replicas for the database.
// Must be set before calling Open().
Replicas []*Replica
// Where to send log messages, defaults to global slog with databas epath.
Logger *slog.Logger
}
// NewDB returns a new instance of DB for a given path.
func NewDB(path string) *DB {
dir, file := filepath.Split(path)
db := &DB{
path: path,
metaPath: filepath.Join(dir, "."+file+MetaDirSuffix),
notify: make(chan struct{}),
MinCheckpointPageN: DefaultMinCheckpointPageN,
MaxCheckpointPageN: DefaultMaxCheckpointPageN,
TruncatePageN: DefaultTruncatePageN,
CheckpointInterval: DefaultCheckpointInterval,
MonitorInterval: DefaultMonitorInterval,
Logger: slog.With("db", path),
}
db.dbSizeGauge = dbSizeGaugeVec.WithLabelValues(db.path)
db.walSizeGauge = walSizeGaugeVec.WithLabelValues(db.path)
db.totalWALBytesCounter = totalWALBytesCounterVec.WithLabelValues(db.path)
db.shadowWALIndexGauge = shadowWALIndexGaugeVec.WithLabelValues(db.path)
db.shadowWALSizeGauge = shadowWALSizeGaugeVec.WithLabelValues(db.path)
db.syncNCounter = syncNCounterVec.WithLabelValues(db.path)
db.syncErrorNCounter = syncErrorNCounterVec.WithLabelValues(db.path)
db.syncSecondsCounter = syncSecondsCounterVec.WithLabelValues(db.path)
db.checkpointNCounterVec = checkpointNCounterVec.MustCurryWith(prometheus.Labels{"db": db.path})
db.checkpointErrorNCounterVec = checkpointErrorNCounterVec.MustCurryWith(prometheus.Labels{"db": db.path})
db.checkpointSecondsCounterVec = checkpointSecondsCounterVec.MustCurryWith(prometheus.Labels{"db": db.path})
db.ctx, db.cancel = context.WithCancel(context.Background())
return db
}
// SQLDB returns a reference to the underlying sql.DB connection.
func (db *DB) SQLDB() *sql.DB {
return db.db
}
// Path returns the path to the database.
func (db *DB) Path() string {
return db.path
}
// WALPath returns the path to the database's WAL file.
func (db *DB) WALPath() string {
return db.path + "-wal"
}
// MetaPath returns the path to the database metadata.
func (db *DB) MetaPath() string {
return db.metaPath
}
// SetMetaPath sets the path to database metadata.
func (db *DB) SetMetaPath(mp string) {
db.metaPath = mp
}
// GenerationNamePath returns the path of the name of the current generation.
func (db *DB) GenerationNamePath() string {
return filepath.Join(db.metaPath, "generation")
}
// GenerationPath returns the path of a single generation.
// Panics if generation is blank.
func (db *DB) GenerationPath(generation string) string {
assert(generation != "", "generation name required")
return filepath.Join(db.metaPath, "generations", generation)
}
// ShadowWALDir returns the path of the shadow wal directory.
// Panics if generation is blank.
func (db *DB) ShadowWALDir(generation string) string {
return filepath.Join(db.GenerationPath(generation), "wal")
}
// ShadowWALPath returns the path of a single shadow WAL file.
// Panics if generation is blank or index is negative.
func (db *DB) ShadowWALPath(generation string, index int) string {
assert(index >= 0, "shadow wal index cannot be negative")
return filepath.Join(db.ShadowWALDir(generation), FormatWALPath(index))
}
// CurrentShadowWALPath returns the path to the last shadow WAL in a generation.
func (db *DB) CurrentShadowWALPath(generation string) (string, error) {
index, _, err := db.CurrentShadowWALIndex(generation)
if err != nil {
return "", err
}
return db.ShadowWALPath(generation, index), nil
}
// CurrentShadowWALIndex returns the current WAL index & total size.
func (db *DB) CurrentShadowWALIndex(generation string) (index int, size int64, err error) {
des, err := os.ReadDir(filepath.Join(db.GenerationPath(generation), "wal"))
if os.IsNotExist(err) {
return 0, 0, nil // no wal files written for generation
} else if err != nil {
return 0, 0, err
}
// Find highest wal index.
for _, de := range des {
fi, err := de.Info()
if os.IsNotExist(err) {
continue // file was deleted after os.ReadDir returned
} else if err != nil {
return 0, 0, err
}
if v, err := ParseWALPath(fi.Name()); err != nil {
continue // invalid wal filename
} else if v > index {
index = v
}
size += fi.Size()
}
return index, size, nil
}
// FileInfo returns the cached file stats for the database file when it was initialized.
func (db *DB) FileInfo() os.FileInfo {
return db.fileInfo
}
// DirInfo returns the cached file stats for the parent directory of the database file when it was initialized.
func (db *DB) DirInfo() os.FileInfo {
return db.dirInfo
}
// Replica returns a replica by name.
func (db *DB) Replica(name string) *Replica {
for _, r := range db.Replicas {
if r.Name() == name {
return r
}
}
return nil
}
// Pos returns the current position of the database.
func (db *DB) Pos() (Pos, error) {
generation, err := db.CurrentGeneration()
if err != nil {
return Pos{}, err
} else if generation == "" {
return Pos{}, nil
}
index, _, err := db.CurrentShadowWALIndex(generation)
if err != nil {
return Pos{}, err
}
fi, err := os.Stat(db.ShadowWALPath(generation, index))
if os.IsNotExist(err) {
return Pos{Generation: generation, Index: index}, nil
} else if err != nil {
return Pos{}, err
}
return Pos{Generation: generation, Index: index, Offset: frameAlign(fi.Size(), db.pageSize)}, nil
}
// Notify returns a channel that closes when the shadow WAL changes.
func (db *DB) Notify() <-chan struct{} {
db.mu.RLock()
defer db.mu.RUnlock()
return db.notify
}
// PageSize returns the page size of the underlying database.
// Only valid after database exists & Init() has successfully run.
func (db *DB) PageSize() int {
db.mu.RLock()
defer db.mu.RUnlock()
return db.pageSize
}
// Open initializes the background monitoring goroutine.
func (db *DB) Open() (err error) {
// Validate fields on database.
if db.MinCheckpointPageN <= 0 {
return fmt.Errorf("minimum checkpoint page count required")
}
// Validate that all replica names are unique.
m := make(map[string]struct{})
for _, r := range db.Replicas {
if _, ok := m[r.Name()]; ok {
return fmt.Errorf("duplicate replica name: %q", r.Name())
}
m[r.Name()] = struct{}{}
}
// Clear old temporary files that my have been left from a crash.
if err := removeTmpFiles(db.metaPath); err != nil {
return fmt.Errorf("cannot remove tmp files: %w", err)
}
// Start monitoring SQLite database in a separate goroutine.
if db.MonitorInterval > 0 {
db.wg.Add(1)
go func() { defer db.wg.Done(); db.monitor() }()
}
return nil
}
// Close flushes outstanding WAL writes to replicas, releases the read lock,
// and closes the database. Takes a context for final sync.
func (db *DB) Close(ctx context.Context) (err error) {
db.cancel()
db.wg.Wait()
// Perform a final db sync, if initialized.
if db.db != nil {
if e := db.Sync(ctx); e != nil && err == nil {
err = e
}
}
// Ensure replicas perform a final sync and stop replicating.
for _, r := range db.Replicas {
if db.db != nil {
if e := r.Sync(ctx); e != nil && err == nil {
err = e
}
}
r.Stop(true)
}
// Release the read lock to allow other applications to handle checkpointing.
if db.rtx != nil {
if e := db.releaseReadLock(); e != nil && err == nil {
err = e
}
}
if db.db != nil {
if e := db.db.Close(); e != nil && err == nil {
err = e
}
}
if db.f != nil {
if e := db.f.Close(); e != nil && err == nil {
err = e
}
}
return err
}
// UpdatedAt returns the last modified time of the database or WAL file.
func (db *DB) UpdatedAt() (time.Time, error) {
// Determine database modified time.
fi, err := os.Stat(db.Path())
if err != nil {
return time.Time{}, err
}
t := fi.ModTime().UTC()
// Use WAL modified time, if available & later.
if fi, err := os.Stat(db.WALPath()); os.IsNotExist(err) {
return t, nil
} else if err != nil {
return t, err
} else if fi.ModTime().After(t) {
t = fi.ModTime().UTC()
}
return t, nil
}
// init initializes the connection to the database.
// Skipped if already initialized or if the database file does not exist.
func (db *DB) init() (err error) {
// Exit if already initialized.
if db.db != nil {
return nil
}
// Exit if no database file exists.
fi, err := os.Stat(db.path)
if os.IsNotExist(err) {
return nil
} else if err != nil {
return err
}
db.fileInfo = fi
// Obtain permissions for parent directory.
if fi, err = os.Stat(filepath.Dir(db.path)); err != nil {
return err
}
db.dirInfo = fi
dsn := db.path
dsn += fmt.Sprintf("?_busy_timeout=%d", BusyTimeout.Milliseconds())
// Connect to SQLite database. Use the driver registered with a hook to
// prevent WAL files from being removed.
if db.db, err = sql.Open("litestream-sqlite3", dsn); err != nil {
return err
}
// Open long-running database file descriptor. Required for non-OFD locks.
if db.f, err = os.Open(db.path); err != nil {
return fmt.Errorf("open db file descriptor: %w", err)
}
// Ensure database is closed if init fails.
// Initialization can retry on next sync.
defer func() {
if err != nil {
_ = db.releaseReadLock()
db.db.Close()
db.f.Close()
db.db, db.f = nil, nil
}
}()
// Enable WAL and ensure it is set. New mode should be returned on success:
// https://www.sqlite.org/pragma.html#pragma_journal_mode
var mode string
if err := db.db.QueryRow(`PRAGMA journal_mode = wal;`).Scan(&mode); err != nil {
return err
} else if mode != "wal" {
return fmt.Errorf("enable wal failed, mode=%q", mode)
}
// Disable autocheckpoint for litestream's connection.
if _, err := db.db.ExecContext(db.ctx, `PRAGMA wal_autocheckpoint = 0;`); err != nil {
return fmt.Errorf("disable autocheckpoint: %w", err)
}
// Create a table to force writes to the WAL when empty.
// There should only ever be one row with id=1.
if _, err := db.db.Exec(`CREATE TABLE IF NOT EXISTS _litestream_seq (id INTEGER PRIMARY KEY, seq INTEGER);`); err != nil {
return fmt.Errorf("create _litestream_seq table: %w", err)
}
// Create a lock table to force write locks during sync.
// The sync write transaction always rolls back so no data should be in this table.
if _, err := db.db.Exec(`CREATE TABLE IF NOT EXISTS _litestream_lock (id INTEGER);`); err != nil {
return fmt.Errorf("create _litestream_lock table: %w", err)
}
// Start a long-running read transaction to prevent other transactions
// from checkpointing.
if err := db.acquireReadLock(); err != nil {
return fmt.Errorf("acquire read lock: %w", err)
}
// Read page size.
if err := db.db.QueryRow(`PRAGMA page_size;`).Scan(&db.pageSize); err != nil {
return fmt.Errorf("read page size: %w", err)
} else if db.pageSize <= 0 {
return fmt.Errorf("invalid db page size: %d", db.pageSize)
}
// Ensure meta directory structure exists.
if err := internal.MkdirAll(db.metaPath, db.dirInfo); err != nil {
return err
}
// If we have an existing shadow WAL, ensure the headers match.
if err := db.verifyHeadersMatch(); err != nil {
db.Logger.Warn("init: cannot determine last wal position, clearing generation", "error", err)
if err := os.Remove(db.GenerationNamePath()); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("remove generation name: %w", err)
}
}
// Clean up previous generations.
if err := db.clean(); err != nil {
return fmt.Errorf("clean: %w", err)
}
// Start replication.
for _, r := range db.Replicas {
r.Start(db.ctx)
}
return nil
}
// verifyHeadersMatch returns true if the primary WAL and last shadow WAL header match.
func (db *DB) verifyHeadersMatch() error {
// Determine current generation.
generation, err := db.CurrentGeneration()
if err != nil {
return err
} else if generation == "" {
return nil
}
// Find current generation & latest shadow WAL.
shadowWALPath, err := db.CurrentShadowWALPath(generation)
if err != nil {
return fmt.Errorf("cannot determine current shadow wal path: %w", err)
}
hdr0, err := readWALHeader(db.WALPath())
if os.IsNotExist(err) {
return fmt.Errorf("no primary wal: %w", err)
} else if err != nil {
return fmt.Errorf("primary wal header: %w", err)
}
hdr1, err := readWALHeader(shadowWALPath)
if os.IsNotExist(err) {
return fmt.Errorf("no shadow wal")
} else if err != nil {
return fmt.Errorf("shadow wal header: %w", err)
}
if !bytes.Equal(hdr0, hdr1) {
return fmt.Errorf("wal header mismatch %x <> %x on %s", hdr0, hdr1, shadowWALPath)
}
return nil
}
// clean removes old generations & WAL files.
func (db *DB) clean() error {
if err := db.cleanGenerations(); err != nil {
return err
}
return db.cleanWAL()
}
// cleanGenerations removes old generations.
func (db *DB) cleanGenerations() error {
generation, err := db.CurrentGeneration()
if err != nil {
return err
}
dir := filepath.Join(db.metaPath, "generations")
fis, err := os.ReadDir(dir)
if os.IsNotExist(err) {
return nil
} else if err != nil {
return err
}
for _, fi := range fis {
// Skip the current generation.
if filepath.Base(fi.Name()) == generation {
continue
}
// Delete all other generations.
if err := os.RemoveAll(filepath.Join(dir, fi.Name())); err != nil {
return err
}
}
return nil
}
// cleanWAL removes WAL files that have been replicated.
func (db *DB) cleanWAL() error {
generation, err := db.CurrentGeneration()
if err != nil {
return err
}
// Determine lowest index that's been replicated to all replicas.
min := -1
for _, r := range db.Replicas {
pos := r.Pos()
if pos.Generation != generation {
pos = Pos{} // different generation, reset index to zero
}
if min == -1 || pos.Index < min {
min = pos.Index
}
}
// Skip if our lowest index is too small.
if min <= 0 {
return nil
}
min-- // Keep an extra WAL file.
// Remove all WAL files for the generation before the lowest index.
dir := db.ShadowWALDir(generation)
fis, err := os.ReadDir(dir)
if os.IsNotExist(err) {
return nil
} else if err != nil {
return err
}
for _, fi := range fis {
if idx, err := ParseWALPath(fi.Name()); err != nil || idx >= min {
continue
}
if err := os.Remove(filepath.Join(dir, fi.Name())); err != nil {
return err
}
}
return nil
}
// acquireReadLock begins a read transaction on the database to prevent checkpointing.
func (db *DB) acquireReadLock() error {
if db.rtx != nil {
return nil
}
// Start long running read-transaction to prevent checkpoints.
tx, err := db.db.Begin()
if err != nil {
return err
}
// Execute read query to obtain read lock.
if _, err := tx.ExecContext(db.ctx, `SELECT COUNT(1) FROM _litestream_seq;`); err != nil {
_ = tx.Rollback()
return err
}
// Track transaction so we can release it later before checkpoint.
db.rtx = tx
return nil
}
// releaseReadLock rolls back the long-running read transaction.
func (db *DB) releaseReadLock() error {
// Ignore if we do not have a read lock.
if db.rtx == nil {
return nil
}
// Rollback & clear read transaction.
err := db.rtx.Rollback()
db.rtx = nil
return err
}
// CurrentGeneration returns the name of the generation saved to the "generation"
// file in the meta data directory. Returns empty string if none exists.
func (db *DB) CurrentGeneration() (string, error) {
buf, err := os.ReadFile(db.GenerationNamePath())
if os.IsNotExist(err) {
return "", nil
} else if err != nil {
return "", err
}
// TODO: Verify if generation directory exists. If not, delete name file.
generation := strings.TrimSpace(string(buf))
if len(generation) != GenerationNameLen {
return "", nil
}
return generation, nil
}
// createGeneration starts a new generation by creating the generation
// directory, snapshotting to each replica, and updating the current
// generation name.
func (db *DB) createGeneration() (string, error) {
// Generate random generation hex name.
buf := make([]byte, GenerationNameLen/2)
_, _ = rand.New(rand.NewSource(time.Now().UnixNano())).Read(buf)
generation := hex.EncodeToString(buf)
// Generate new directory.
dir := filepath.Join(db.metaPath, "generations", generation)
if err := internal.MkdirAll(dir, db.dirInfo); err != nil {
return "", err
}
// Initialize shadow WAL with copy of header.
if _, err := db.initShadowWALFile(db.ShadowWALPath(generation, 0)); err != nil {
return "", fmt.Errorf("initialize shadow wal: %w", err)
}
// Atomically write generation name as current generation.
generationNamePath := db.GenerationNamePath()
mode := os.FileMode(0600)
if db.fileInfo != nil {
mode = db.fileInfo.Mode()
}
if err := os.WriteFile(generationNamePath+".tmp", []byte(generation+"\n"), mode); err != nil {
return "", fmt.Errorf("write generation temp file: %w", err)
}
uid, gid := internal.Fileinfo(db.fileInfo)
_ = os.Chown(generationNamePath+".tmp", uid, gid)
if err := os.Rename(generationNamePath+".tmp", generationNamePath); err != nil {
return "", fmt.Errorf("rename generation file: %w", err)
}
// Remove old generations.
if err := db.clean(); err != nil {
return "", err
}
return generation, nil
}
// Sync copies pending data from the WAL to the shadow WAL.
func (db *DB) Sync(ctx context.Context) (err error) {
db.mu.Lock()
defer db.mu.Unlock()
// Initialize database, if necessary. Exit if no DB exists.
if err := db.init(); err != nil {
return err
} else if db.db == nil {
db.Logger.Debug("sync: no database found")
return nil
}
// Track total sync metrics.
t := time.Now()
defer func() {
db.syncNCounter.Inc()
if err != nil {
db.syncErrorNCounter.Inc()
}
db.syncSecondsCounter.Add(float64(time.Since(t).Seconds()))
}()
// Ensure WAL has at least one frame in it.
if err := db.ensureWALExists(); err != nil {
return fmt.Errorf("ensure wal exists: %w", err)
}
// Verify our last sync matches the current state of the WAL.
// This ensures that we have an existing generation & that the last sync
// position of the real WAL hasn't been overwritten by another process.
info, err := db.verify()
if err != nil {
return fmt.Errorf("cannot verify wal state: %w", err)
}
db.Logger.Debug("sync", "info", &info)
// Track if anything in the shadow WAL changes and then notify at the end.
changed := info.walSize != info.shadowWALSize || info.restart || info.reason != ""
// If we are unable to verify the WAL state then we start a new generation.
if info.reason != "" {
// Start new generation & notify user via log message.
if info.generation, err = db.createGeneration(); err != nil {
return fmt.Errorf("create generation: %w", err)
}
db.Logger.Info("sync: new generation", "generation", info.generation, "reason", info.reason)
// Clear shadow wal info.
info.shadowWALPath = db.ShadowWALPath(info.generation, 0)
info.shadowWALSize = WALHeaderSize
info.restart = false
info.reason = ""
}
// Synchronize real WAL with current shadow WAL.
origWALSize, newWALSize, err := db.syncWAL(info)
if err != nil {
return fmt.Errorf("sync wal: %w", err)
}
// If WAL size is great than max threshold, force checkpoint.
// If WAL size is greater than min threshold, attempt checkpoint.
var checkpoint bool
checkpointMode := CheckpointModePassive
if db.TruncatePageN > 0 && origWALSize >= calcWALSize(db.pageSize, db.TruncatePageN) {
checkpoint, checkpointMode = true, CheckpointModeTruncate
} else if db.MaxCheckpointPageN > 0 && newWALSize >= calcWALSize(db.pageSize, db.MaxCheckpointPageN) {
checkpoint, checkpointMode = true, CheckpointModeRestart
} else if newWALSize >= calcWALSize(db.pageSize, db.MinCheckpointPageN) {
checkpoint = true
} else if db.CheckpointInterval > 0 && !info.dbModTime.IsZero() && time.Since(info.dbModTime) > db.CheckpointInterval && newWALSize > calcWALSize(db.pageSize, 1) {
checkpoint = true
}
// Issue the checkpoint.
if checkpoint {
changed = true
if err := db.checkpoint(ctx, info.generation, checkpointMode); err != nil {
return fmt.Errorf("checkpoint: mode=%v err=%w", checkpointMode, err)
}
}
// Clean up any old files.
if err := db.clean(); err != nil {
return fmt.Errorf("cannot clean: %w", err)
}
// Compute current index and total shadow WAL size.
// This is only for metrics so we ignore any errors that occur.
index, size, _ := db.CurrentShadowWALIndex(info.generation)
db.shadowWALIndexGauge.Set(float64(index))
db.shadowWALSizeGauge.Set(float64(size))
// Notify replicas of WAL changes.
if changed {
close(db.notify)
db.notify = make(chan struct{})
}
db.Logger.Debug("sync: ok")
return nil
}
// ensureWALExists checks that the real WAL exists and has a header.
func (db *DB) ensureWALExists() (err error) {
// Exit early if WAL header exists.
if fi, err := os.Stat(db.WALPath()); err == nil && fi.Size() >= WALHeaderSize {
return nil
}
// Otherwise create transaction that updates the internal litestream table.
_, err = db.db.Exec(`INSERT INTO _litestream_seq (id, seq) VALUES (1, 1) ON CONFLICT (id) DO UPDATE SET seq = seq + 1`)
return err
}
// verify ensures the current shadow WAL state matches where it left off from
// the real WAL. Returns generation & WAL sync information. If info.reason is
// not blank, verification failed and a new generation should be started.
func (db *DB) verify() (info syncInfo, err error) {
// Look up existing generation.
generation, err := db.CurrentGeneration()
if err != nil {
return info, fmt.Errorf("cannot find current generation: %w", err)
} else if generation == "" {
info.reason = "no generation exists"
return info, nil
}
info.generation = generation
// Determine total bytes of real DB for metrics.
fi, err := os.Stat(db.Path())
if err != nil {
return info, err
}
info.dbModTime = fi.ModTime()
db.dbSizeGauge.Set(float64(fi.Size()))
// Determine total bytes of real WAL.
fi, err = os.Stat(db.WALPath())
if err != nil {
return info, err
}
info.walSize = frameAlign(fi.Size(), db.pageSize)
info.walModTime = fi.ModTime()
db.walSizeGauge.Set(float64(fi.Size()))
// Open shadow WAL to copy append to.
index, _, err := db.CurrentShadowWALIndex(info.generation)
if err != nil {
return info, fmt.Errorf("cannot determine shadow WAL index: %w", err)
} else if index >= MaxIndex {
info.reason = "max index exceeded"
return info, nil
}
info.shadowWALPath = db.ShadowWALPath(generation, index)
// Determine shadow WAL current size.
fi, err = os.Stat(info.shadowWALPath)
if os.IsNotExist(err) {
info.reason = "no shadow wal"
return info, nil
} else if err != nil {
return info, err
}
info.shadowWALSize = frameAlign(fi.Size(), db.pageSize)
// Exit if shadow WAL does not contain a full header.
if info.shadowWALSize < WALHeaderSize {
info.reason = "short shadow wal"
return info, nil
}
// If shadow WAL is larger than real WAL then the WAL has been truncated
// so we cannot determine our last state.
if info.shadowWALSize > info.walSize {
info.reason = "wal truncated by another process"
return info, nil
}
// Compare WAL headers. Start a new shadow WAL if they are mismatched.
if hdr0, err := readWALHeader(db.WALPath()); err != nil {
return info, fmt.Errorf("cannot read wal header: %w", err)
} else if hdr1, err := readWALHeader(info.shadowWALPath); err != nil {
return info, fmt.Errorf("cannot read shadow wal header: %w", err)
} else if !bytes.Equal(hdr0, hdr1) {
info.restart = !bytes.Equal(hdr0, hdr1)
}
// If we only have a header then ensure header matches.
// Otherwise we need to start a new generation.
if info.shadowWALSize == WALHeaderSize && info.restart {
info.reason = "wal header only, mismatched"
return info, nil
}
// Verify last page synced still matches.
if info.shadowWALSize > WALHeaderSize {
offset := info.shadowWALSize - int64(db.pageSize+WALFrameHeaderSize)
if buf0, err := readWALFileAt(db.WALPath(), offset, int64(db.pageSize+WALFrameHeaderSize)); err != nil {
return info, fmt.Errorf("cannot read last synced wal page: %w", err)
} else if buf1, err := readWALFileAt(info.shadowWALPath, offset, int64(db.pageSize+WALFrameHeaderSize)); err != nil {
return info, fmt.Errorf("cannot read last synced shadow wal page: %w", err)
} else if !bytes.Equal(buf0, buf1) {
info.reason = "wal overwritten by another process"
return info, nil
}
}
return info, nil
}
type syncInfo struct {
generation string // generation name
dbModTime time.Time // last modified date of real DB file
walSize int64 // size of real WAL file
walModTime time.Time // last modified date of real WAL file
shadowWALPath string // name of last shadow WAL file
shadowWALSize int64 // size of last shadow WAL file
restart bool // if true, real WAL header does not match shadow WAL
reason string // if non-blank, reason for sync failure
}
// syncWAL copies pending bytes from the real WAL to the shadow WAL.
func (db *DB) syncWAL(info syncInfo) (origSize int64, newSize int64, err error) {
// Copy WAL starting from end of shadow WAL. Exit if no new shadow WAL needed.
origSize, newSize, err = db.copyToShadowWAL(info.shadowWALPath)
if err != nil {
return origSize, newSize, fmt.Errorf("cannot copy to shadow wal: %w", err)
} else if !info.restart {
return origSize, newSize, nil // If no restart required, exit.
}
// Parse index of current shadow WAL file.
dir, base := filepath.Split(info.shadowWALPath)
index, err := ParseWALPath(base)
if err != nil {
return 0, 0, fmt.Errorf("cannot parse shadow wal filename: %s", base)
}
// Start a new shadow WAL file with next index.
newShadowWALPath := filepath.Join(dir, FormatWALPath(index+1))
newSize, err = db.initShadowWALFile(newShadowWALPath)
if err != nil {
return 0, 0, fmt.Errorf("cannot init shadow wal file: name=%s err=%w", newShadowWALPath, err)
}
return origSize, newSize, nil
}
func (db *DB) initShadowWALFile(filename string) (int64, error) {
hdr, err := readWALHeader(db.WALPath())
if err != nil {
return 0, fmt.Errorf("read header: %w", err)
}
// Determine byte order for checksumming from header magic.
bo, err := headerByteOrder(hdr)
if err != nil {
return 0, err
}
// Verify checksum.
s0 := binary.BigEndian.Uint32(hdr[24:])
s1 := binary.BigEndian.Uint32(hdr[28:])
if v0, v1 := Checksum(bo, 0, 0, hdr[:24]); v0 != s0 || v1 != s1 {
return 0, fmt.Errorf("invalid header checksum: (%x,%x) != (%x,%x)", v0, v1, s0, s1)
}
// Write header to new WAL shadow file.
mode := os.FileMode(0600)
if fi := db.fileInfo; fi != nil {
mode = fi.Mode()
}
if err := internal.MkdirAll(filepath.Dir(filename), db.dirInfo); err != nil {
return 0, err
} else if err := os.WriteFile(filename, hdr, mode); err != nil {
return 0, err
}
uid, gid := internal.Fileinfo(db.fileInfo)
_ = os.Chown(filename, uid, gid)