Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

WIP: New schema for orders, authorizations, and validations #7773

Draft
wants to merge 5 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion core/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,10 @@ import (
"unicode"

"github.com/go-jose/go-jose/v4"
"github.com/letsencrypt/boulder/identifier"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/timestamppb"

"github.com/letsencrypt/boulder/identifier"
)

const Unspecified = "Unspecified"
Expand Down
16 changes: 16 additions & 0 deletions features/features.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,22 @@ type Config struct {
// get the AUTO_INCREMENT ID of each new authz without relying on MariaDB's
// unique "INSERT ... RETURNING" functionality.
InsertAuthzsIndividually bool

// ReadNewOrderSchema causes the SA to attempt to read from the new orders,
// authorizations, and validations tables. This allows us to continue reading
// from these tables even if we have to roll back the flag which causes us
// to write to them.
// - Simple select-by-id go to whichever schema hosts the row being selected
// - Complex queries go solely to the new schema (this means that authz and
// order reuse work only in the new schema).
ReadNewOrderSchema bool

// WriteNewOrderSchema causes the SA to write to the new orders,
// authorizations, and validations tables. Do not enable this flag unless
// ReadNewOrderSchema is also enabled.
// - Inserts go solely to the new schema
// - Updates go to whichver schema hosts the row being updated
WriteNewOrderSchema bool
}

var fMu = new(sync.RWMutex)
Expand Down
4 changes: 4 additions & 0 deletions sa/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,10 @@ func initTables(dbMap *borp.DbMap) {
dbMap.AddTableWithName(revokedCertModel{}, "revokedCertificates").SetKeys(true, "ID")
dbMap.AddTableWithName(replacementOrderModel{}, "replacementOrders").SetKeys(true, "ID")
dbMap.AddTableWithName(pausedModel{}, "paused")
dbMap.AddTableWithName(orders2Model{}, "orders2")
dbMap.AddTableWithName(authorizationsModel{}, "authorizations")
dbMap.AddTableWithName(validationsModel{}, "validations")
dbMap.AddTableWithName(authzReuseModel{}, "authzReuse")

// Read-only maps used for selecting subsets of columns.
dbMap.AddTableWithName(CertStatusMetadata{}, "certificateStatus")
Expand Down
80 changes: 80 additions & 0 deletions sa/db-next/boulder_sa/20240801000000_OrderSchema.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
-- +migrate Up
-- SQL in section 'Up' is executed when this migration is applied

-- The orders2 table holds one row per ACME Order object. The authorizations
-- column contains an opaque JSON blob which the SA can use to find the
-- associated authorizations without requiring db-level foreign keys. Most
-- orders are created with status "pending", but may be created with status
-- "ready" if all of their authorizations are reused and already valid. Orders
-- transition to status "processing" when finalization begins. The error field
-- is populated only if an error occurs during finalization and the order moves
-- to the "invalid" state; errors during validation are reflected elsewhere.
CREATE TABLE `orders2` (
`id` bigint(20) UNSIGNED NOT NULL,
`registrationID` bigint(20) UNSIGNED NOT NULL,
`created` datetime NOT NULL,
`expires` datetime NOT NULL,
`authorizationIDs` json NOT NULL,
`profile` varchar(255) NOT NULL,
`beganProcessing` boolean NOT NULL,
`error` mediumblob DEFAULT NULL,
`certificateSerial` varchar(255) DEFAULT NULL,
PRIMARY KEY (`id`),
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
PARTITION BY RANGE(id)
(PARTITION p_start VALUES LESS THAN (MAXVALUE));

-- The authorizations table holds one row per ACME Authorization object and
-- associated challenges. It is always created with status "pending". After
-- one of its challenges is attempted, it will transition into either status
-- "valid" or "invalid", and the validations column will be updated to point
-- to a new row in the validations table containing the record of that attempt.
CREATE TABLE `authorizations` (
`id` bigint(20) UNSIGNED NOT NULL,
`registrationID` bigint(20) UNSIGNED NOT NULL,
`identifierType` tinyint(4) NOT NULL,
`identifierValue` varchar(255) NOT NULL,
`created` datetime NOT NULL,
`expires` datetime NOT NULL,
`profile` varchar(255) NOT NULL,
`challenges` tinyint(4) NOT NULL,
`token` binary(32) NOT NULL,
`status` tinyint(4) NOT NULL,
`validationIDs` json DEFAULT NULL,
PRIMARY KEY (`id`),
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
PARTITION BY RANGE(id)
(PARTITION p_start VALUES LESS THAN (MAXVALUE));


-- The validations table holds records of completed validation attempts,
-- including the validation method used, the resulting status (valid or
-- invalid), and an opaque blob of our audit record.
CREATE TABLE `validations` (
`id` bigint(20) UNSIGNED NOT NULL,
`challenge` tinyint(4) NOT NULL,
`attemptedAt` datetime NOT NULL,
`status` tinyint(4) NOT NULL,
`record` json NOT NULL,
PRIMARY KEY (`id`),
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
PARTITION BY RANGE(id)
(PARTITION p_start VALUES LESS THAN (MAXVALUE));

-- The authzReuse table exists solely to allow cheap lookups of reusable authz
-- IDs. This allos us to not have expensive indices on the authorizations table.
CREATE TABLE `authzReuse` (
`accountID_identifier` VARCHAR(300) NOT NULL,
`authzID` bigint(20) UNSIGNED NOT NULL,
`expires` DATETIME NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
PARTITION BY RANGE(id)
(PARTITION p_start VALUES LESS THAN (MAXVALUE));

-- +migrate Down
-- SQL section 'Down' is executed when this migration is rolled back

DROP TABLE `authzReuse`;
DROP TABLE `validations`;
DROP TABLE `authorizations`;
DROP TABLE `orders2`;
95 changes: 95 additions & 0 deletions sa/model.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,12 @@ package sa

import (
"context"
"crypto/rand"
"crypto/sha256"
"crypto/x509"
"database/sql"
"encoding/base64"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
Expand All @@ -17,6 +19,7 @@ import (
"time"

"github.com/go-jose/go-jose/v4"
"github.com/jmhodges/clock"
"google.golang.org/protobuf/types/known/timestamppb"

"github.com/letsencrypt/boulder/core"
Expand Down Expand Up @@ -59,6 +62,54 @@ func badJSONError(msg string, jsonData []byte, err error) error {
}
}

// newRandomID creates a 64-bit mostly-random number to be used as the
// unique ID column in a table which no longer uses auto_increment IDs. It takes
// the clock as an argument so that it can include the current "epoch" as the
// first byte of the ID, for the sake of easily dropping old data.
func newRandomID(clk clock.Clock) (int64, error) {
idBytes := make([]byte, 8) // 8 bytes is 64 bits

// Read random bits into the lower 7 bytes of the id.
_, err := rand.Read(idBytes[1:])
if err != nil {
return 0, fmt.Errorf("while generating unique database id: %w", err)
}

// Epochs are arbitrarily chosen to be 90 day chunks counting from the start
// of 2024. This gives us 127 * 90 = ~31 years worth of epochs before we have
// to worry about a rollover.
epoch := uint8(clk.Now().Sub(time.Date(2024, 01, 01, 00, 00, 00, 00, time.UTC)) / (90 * 24 * time.Hour))
if epoch&0x80 != 0 {
// If the first bit is a 1, either the current date is before the epoch
// start date, or we've gone too far into the future. Error out before we
// accidentally generate a negative ID.
return 0, fmt.Errorf("invalid epoch: %d", epoch)
}
idBytes[0] = epoch

id := binary.BigEndian.Uint64(idBytes)
return int64(id), nil
}

// looksLikeRandomID returns true if the input ID looks like it might belong to
// the new schema which uses epoch-prefixed random IDs instead of auto-increment
// columns. This is only necessary during the migration period when we are
// reading from both the old and new schemas simultaneously.
func looksLikeRandomID(id int64, clk clock.Clock) bool {
// Compute the current and previous epochs. If the input ID starts with one of
// those two epochs, it's one of ours. Otherwise, it came from somewhere
// unknown and we should ask the old schema about it just in case.
currEpoch := uint8(clk.Now().Sub(time.Date(2024, 01, 01, 00, 00, 00, 00, time.UTC)) / (90 * 24 * time.Hour))
prevEpoch := uint8(clk.Now().Add(-90*24*time.Hour).Sub(time.Date(2024, 01, 01, 00, 00, 00, 00, time.UTC)) / (90 * 24 * time.Hour))

buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, uint64(id))
if buf[0] == currEpoch || buf[0] == prevEpoch {
return true
}
return false
}

const regFields = "id, jwk, jwk_sha256, contact, agreement, initialIP, createdAt, LockCol, status"

// ClearEmail removes the provided email address from one specified registration. If
Expand Down Expand Up @@ -1412,3 +1463,47 @@ type pausedModel struct {
PausedAt time.Time `db:"pausedAt"`
UnpausedAt *time.Time `db:"unpausedAt"`
}

// orders2Model represents a row in the "orders2" table.
type orders2Model struct {
ID int64
RegistrationID int64
Created time.Time
Expires time.Time
AuthorizationIDs []int64 // Actually a JSON list of ints
Profile string
BeganProcessing bool
Error []byte
CertificateSerial string
}

// authorizationsModel represents a row in the "authorizations" table.
type authorizationsModel struct {
ID int64
RegistrationID int64
IdentifierType uint8
IdentifierValue string
Created time.Time
Expires time.Time
Profile string
Challenges uint8
Token []byte
Status uint8
ValidationIDs []int64 // Actually a JSON list of ints
}

// validationsModel represents a row in the "validations" table.
type validationsModel struct {
ID int64
Challenge uint8
AttemptedAt time.Time
Status uint8
Record string
}

// authzReuseModel represents a row in the "authzReuse" table.
type authzReuseModel struct {
ID int64 `db:"accountID_identifier"`
AuthzID int64
Expires time.Time
}
54 changes: 54 additions & 0 deletions sa/model_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"crypto/x509/pkix"
"database/sql"
"encoding/base64"
"encoding/binary"
"fmt"
"math/big"
"net"
Expand All @@ -28,6 +29,59 @@ import (
"github.com/letsencrypt/boulder/test"
)

func TestNewRandomID(t *testing.T) {
t.Parallel()

testCases := []struct {
name string
date time.Time
expectPrefix uint8
expectError string
}{
{
name: "in the past",
date: time.Date(2023, 01, 01, 00, 00, 00, 00, time.UTC),
expectError: "invalid epoch",
},
{
name: "first epoch",
date: time.Date(2024, 05, 01, 00, 00, 00, 00, time.UTC),
expectPrefix: 1,
},
{
name: "last epoch",
date: time.Date(2055, 07, 01, 00, 00, 00, 00, time.UTC),
expectPrefix: 127,
},
{
name: "far future",
date: time.Date(2056, 01, 01, 00, 00, 00, 00, time.UTC),
expectError: "invalid epoch",
},
}

for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
fc := clock.NewFake()
fc.Set(tc.date)
id, err := newRandomID(fc)

if tc.expectPrefix != 0 {
test.AssertNotError(t, err, "expected success")
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, uint64(id))
test.AssertEquals(t, buf[0], tc.expectPrefix)
}

if tc.expectError != "" {
test.AssertError(t, err, "expected error")
test.AssertContains(t, err.Error(), tc.expectError)
}
})
}
}

func TestRegistrationModelToPb(t *testing.T) {
badCases := []struct {
name string
Expand Down
8 changes: 8 additions & 0 deletions sa/sa.go
Original file line number Diff line number Diff line change
Expand Up @@ -458,6 +458,14 @@ func (ssa *SQLStorageAuthority) DeactivateAuthorization2(ctx context.Context, re
// authorizations are created, but then their corresponding order is never
// created, leading to "invisible" pending authorizations.
func (ssa *SQLStorageAuthority) NewOrderAndAuthzs(ctx context.Context, req *sapb.NewOrderAndAuthzsRequest) (*corepb.Order, error) {
if !features.Get().WriteNewOrderSchema {
return ssa.deprecatedNewOrderAndAuthzs(ctx, req)
}

return nil, nil
}

func (ssa *SQLStorageAuthority) deprecatedNewOrderAndAuthzs(ctx context.Context, req *sapb.NewOrderAndAuthzsRequest) (*corepb.Order, error) {
if req.NewOrder == nil {
return nil, errIncompleteRequest
}
Expand Down
Loading
Loading