diff --git a/.dockerignore b/.dockerignore
index 1b4e5f1f0f9..3647c99be6d 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -3,14 +3,15 @@
docker
packages/cosmic-swingset/t[0-9]
packages/cosmic-swingset/t[0-9].*
-packages/cosmic-swingset/lib/lib*.h
-packages/cosmic-swingset/lib/lib*.so
+golang/cosmos/build
+packages/deployment
packages/swingset-runner
packages/stat-logger
**/deployment.json
**/vars.tf
**/*.log
**/build
+**/bundles
**/__pycache__
**/*.egg-info
**/swingset-kernel-state
diff --git a/.github/actions/restore-golang/action.yml b/.github/actions/restore-golang/action.yml
index e2524c71056..6fd719e84f0 100644
--- a/.github/actions/restore-golang/action.yml
+++ b/.github/actions/restore-golang/action.yml
@@ -5,6 +5,10 @@ inputs:
go-version:
description: 'The version of Go to use'
required: true
+ path:
+ description: 'The relative path to the agoric-sdk directory'
+ required: false
+ default: '.'
runs:
using: composite
@@ -14,10 +18,12 @@ runs:
shell: bash
- uses: actions/checkout@v3
with:
+ path: ${{ inputs.path }}
clean: 'false'
submodules: 'true'
- uses: actions/setup-go@v4
with:
+ cache-dependency-path: ${{ inputs.path }}/golang/cosmos/go.sum
go-version: ${{ inputs.go-version }}
- uses: kenchan0130/actions-system-info@master
id: system-info
@@ -26,10 +32,11 @@ runs:
uses: actions/cache@v3
with:
path: ${{ env.GOPATH }}/pkg/mod
- key: ${{ runner.os }}-${{ runner.arch }}-${{ steps.system-info.outputs.release }}-go-${{ inputs.go-version }}-built-${{ hashFiles('go.sum') }}
+ key: ${{ runner.os }}-${{ runner.arch }}-${{ steps.system-info.outputs.release }}-go-${{ inputs.go-version }}-built-${{ hashFiles('golang/**/go.sum') }}
restore-keys: |
${{ runner.os }}-${{ runner.arch }}-${{ steps.system-info.outputs.release }}-go-${{ inputs.go-version }}-built-
- name: go mod download
+ working-directory: ${{ inputs.path }}/golang/cosmos
run: go mod download
shell: bash
if: steps.cache.outputs.cache-hit != 'true'
diff --git a/.github/workflows/deployment-test.yml b/.github/workflows/deployment-test.yml
index 07f413162f4..c32253ee49e 100644
--- a/.github/workflows/deployment-test.yml
+++ b/.github/workflows/deployment-test.yml
@@ -25,13 +25,17 @@ jobs:
- uses: actions/checkout@v3
with:
submodules: 'true'
+ path: ./agoric-sdk
- run: sudo packages/deployment/scripts/install-deps.sh
- - uses: ./.github/actions/restore-golang
+ working-directory: ./agoric-sdk
+ - uses: ./agoric-sdk/.github/actions/restore-golang
with:
go-version: '1.20'
- - uses: ./.github/actions/restore-node
+ path: ./agoric-sdk
+ - uses: ./agoric-sdk/.github/actions/restore-node
with:
node-version: 18.x
+ path: ./agoric-sdk
# Forces xsnap to initialize all memory to random data, which increases
# the chances the content of snapshots may deviate between validators
xsnap-random-init: '1'
@@ -61,54 +65,55 @@ jobs:
uses: actions/checkout@v3
with:
repository: Agoric/testnet-load-generator
- path: testnet-load-generator
+ path: ./testnet-load-generator
ref: ${{steps.get-loadgen-branch.outputs.result}}
- - name: Put repos under /usr/src where scripts expect them
- run: |
- set -e
- sudo mv "$GITHUB_WORKSPACE/testnet-load-generator" /usr/src/testnet-load-generator
- sudo cp -a "$GITHUB_WORKSPACE" /usr/src/agoric-sdk
- ln -s /usr/src/agoric-sdk/packages/deployment/bin/ag-setup-cosmos /usr/local/bin/ag-setup-cosmos
- working-directory: /
-
- name: Build cosmic-swingset dependencies
+ working-directory: ./agoric-sdk
run: |
set -e
cd packages/cosmic-swingset
make install
- working-directory: /usr/src/agoric-sdk
- - run: /usr/src/agoric-sdk/packages/deployment/scripts/integration-test.sh
+ - name: Make networks directory
+ run: |
+ set -e
+ mkdir networks
+ - name: Run integration test
+ working-directory: ./networks
+ run: |
+ set -xe
+ DOCKER_VOLUMES="$PWD/../agoric-sdk:/usr/src/agoric-sdk" \
+ LOADGEN=1 \
+ ../agoric-sdk/packages/deployment/scripts/integration-test.sh
timeout-minutes: 90
- working-directory: /usr/src/agoric-sdk
env:
NETWORK_NAME: chaintest
- name: capture results
if: always()
+ working-directory: ./networks
run: |
NOW=$(date -u +%Y%m%dT%H%M%S)
echo "NOW=$NOW" >> "$GITHUB_ENV"
# Stop the chain from running.
- packages/deployment/scripts/setup.sh play stop || true
+ ../agoric-sdk/packages/deployment/scripts/setup.sh play stop || true
# Get the results.
- packages/deployment/scripts/capture-integration-results.sh "${{ job.status == 'failure' }}"
+ ../agoric-sdk/packages/deployment/scripts/capture-integration-results.sh "${{ job.status == 'failure' }}"
# Tear down the nodes.
- echo yes | packages/deployment/scripts/setup.sh destroy || true
- working-directory: /usr/src/agoric-sdk
+ echo yes | ../agoric-sdk/packages/deployment/scripts/setup.sh destroy || true
env:
NETWORK_NAME: chaintest
- uses: actions/upload-artifact@v3
if: always()
with:
name: deployment-test-results-${{ env.NOW }}
- path: /usr/src/agoric-sdk/chaintest/results
+ path: ./networks/chaintest/results
- name: notify on failure
if: failure() && github.event_name != 'pull_request'
- uses: ./.github/actions/notify-status
+ uses: ./agoric-sdk/.github/actions/notify-status
with:
webhook: ${{ secrets.SLACK_WEBHOOK_URL }}
from: ${{ secrets.NOTIFY_EMAIL_FROM }}
diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml
index b59eaa15b37..7d1e387affe 100644
--- a/.github/workflows/golangci-lint.yml
+++ b/.github/workflows/golangci-lint.yml
@@ -24,15 +24,22 @@ jobs:
go-version: '>=1.20'
cache: false
check-latest: true
+ # https://github.com/golangci/golangci-lint/issues/3862#issuecomment-1572973588
+ - run: echo "GOROOT=$(go env GOROOT)" >> $GITHUB_ENV
- name: golangci-lint
uses: golangci/golangci-lint-action@v3
with:
- # golangci-lint version and command line arguments
- # v1.52.3 has a bug that causes it to fail depguard with:
- # level=error msg="[linter] depguard: create analyzer:
- # couldn't expand $gostd: could not read GOROOT directory:
- # open src: no such file or directory\nmust have an Allow and/or Deny package list"
- version: 'v1.52.2' # FIXME: change back to latest when it is fixed
+ version: latest
args: --timeout=3m
# for pull requests, show only new issues
- only-new-issues: true
+ # Too bad it's incompatible with working-directory.
+ # only-new-issues: true
+ only-new-issues: false
+ working-directory: ./golang/cosmos
+ - name: forbid %w error-wrapping format specifier
+ run: |
+ set -e
+ if find ./golang/cosmos -name '*.go' ! -name '*_test.go' -print0 | xargs -0 grep '%w'; then
+ echo "Found %w in ./golang/cosmos; please use %s instead."
+ exit 1
+ fi
diff --git a/.github/workflows/test-all-packages.yml b/.github/workflows/test-all-packages.yml
index 2027e01e5eb..a1a6c167974 100644
--- a/.github/workflows/test-all-packages.yml
+++ b/.github/workflows/test-all-packages.yml
@@ -569,11 +569,22 @@ jobs:
matrix:
bootstrap-version: ['test', 'main']
steps:
+ - name: free up additional worker space
+ run: |
+ # Workaround to provide additional free space for testing.
+ # https://github.com/actions/virtual-environments/issues/2840
+ df -h
+ sudo rm -rf /usr/share/dotnet
+ sudo rm -rf /opt/ghc
+ sudo rm -rf "/usr/local/share/boost"
+ sudo rm -rf "$AGENT_TOOLSDIRECTORY"
+ df -h
+
- uses: actions/checkout@v3
- name: docker build (sdk)
run: cd packages/deployment && ./scripts/test-docker-build.sh | $TEST_COLLECT
- name: docker build upgrade test
- run: cd packages/deployment/upgrade-test && docker build --build-arg BOOTSTRAP_MODE=${{ matrix.bootstrap-version }} -t docker-upgrade-test:latest -f Dockerfile upgrade-test-scripts
+ run: cd packages/deployment/upgrade-test && docker build --build-arg BOOTSTRAP_MODE=${{ matrix.bootstrap-version }} --build-arg DEST_IMAGE=ghcr.io/agoric/agoric-sdk:latest -t docker-upgrade-test:latest -f Dockerfile upgrade-test-scripts
- name: docker run upgrade final stage
run: docker run --env "DEST=0" docker-upgrade-test:latest
- uses: ./.github/actions/post-test
diff --git a/.github/workflows/test-golang.yml b/.github/workflows/test-golang.yml
index 5661c7a0657..c5cba644049 100644
--- a/.github/workflows/test-golang.yml
+++ b/.github/workflows/test-golang.yml
@@ -20,7 +20,7 @@ jobs:
with:
go-version: '1.20'
- name: go test
- run: cd golang && go test -coverprofile=coverage.txt -covermode=atomic ./...
+ run: cd golang/cosmos && go test -coverprofile=coverage.txt -covermode=atomic ./...
- uses: ./.github/actions/post-test
if: (success() || failure())
continue-on-error: true
diff --git a/bin/agd b/bin/agd
index 69812305f85..e6a922b3476 100755
--- a/bin/agd
+++ b/bin/agd
@@ -148,7 +148,7 @@ fi
print=()
fi
print+=( -print )
- src=$(find go.* "$GOLANG_DIR" \( -name '*.go' -o -name 'go.*' \) "${print[@]}" | head -1 || true)
+ src=$(find "$GOLANG_DIR" \( -name '*.go' -o -name 'go.*' \) "${print[@]}" | head -1 || true)
test -z "$src" || {
echo "At least $src is newer than $stamp"
@@ -184,11 +184,6 @@ fi
}
)
-if $BUILD_ONLY; then
- echo "Build complete." 1>&2
- exit 0
-fi
-
# the xsnap binary lives in a platform-specific directory
unameOut="$(uname -s)"
case "${unameOut}" in
@@ -196,9 +191,15 @@ case "${unameOut}" in
Darwin*) platform=mac;;
*) platform=win;;
esac
+
# check the xsnap version against our baked-in notion of what version we should be using
xsnap_version=$("${thisdir}/../packages/xsnap/xsnap-native/xsnap/build/bin/${platform}/release/xsnap-worker" -n)
-[[ "${xsnap_version}" == "agoric-upgrade-10" ]] || fatal "xsnap out of date"
+[[ "${xsnap_version}" == "${XSNAP_VERSION}" ]] || fatal "xsnap version mismatch; expected ${XSNAP_VERSION}, got ${xsnap_version}"
+
+if $BUILD_ONLY; then
+ echo "Build complete." 1>&2
+ exit 0
+fi
# Run the built Cosmos daemon.
# shellcheck disable=SC2031
diff --git a/docs/architecture/state-sync.md b/docs/architecture/state-sync.md
new file mode 100644
index 00000000000..5d403707586
--- /dev/null
+++ b/docs/architecture/state-sync.md
@@ -0,0 +1,326 @@
+# State-sync
+
+## Creating Snapshot
+
+```mermaid
+sequenceDiagram
+ box whitesmoke Main goroutine
+ participant TM as Tendermint
+ participant A-M as App
+ participant MS-M as MultiStore
+ participant SSES-M as SwingSet ExtensionSnapshotter
+ participant SSEH-M as SwingStoreExportsHandler
+ end
+
+ box whitesmoke App snapshot goroutine
+ participant SSEH-AS as SwingStoreExportsHandler
+ participant SSES-AS as SwingSet ExtensionSnapshotter
+ participant A-AS as App
+ participant SM-AS as Snapshot manager
+ end
+
+ box whitesmoke Cosmos snapshot goroutine
+ participant SM-CS as Snapshot manager
+ participant MS-CS as MultiStore
+ participant SSES-CS as SwingSet ExtensionSnapshotter
+ participant SSEH-CS as SwingStoreExportsHandler
+ participant D-CS as Disk
+ end
+
+ box whitesmoke JS Main process
+ participant CM as Chain Main
+ participant D as Disk
+ end
+
+ box whitesmoke JS Export process
+ participant SSE as SwingStoreExport
+ participant Exporter as Exporter
+ participant D-E as Disk
+ end
+
+ TM->>+A-M: Commit
+ A-M->>+SSEH-M: WaitUntilSwingStoreExportStarted()
+ SSEH-M-->>-A-M:
+ A-M->>+CM: COMMIT_BLOCK
+ CM->>CM: swingStore.commit()
+ CM-->>-A-M:
+ A-M->>A-M: BaseApp.CommitWithoutSnapshot()
+ A-M->>+CM: AFTER_COMMIT_BLOCK
+ CM-->>-A-M:
+ A-M->>A-M: isSnapshotHeight: false
+ A-M-->>-TM:
+
+ TM->>+A-M: BeginBlock
+ A-M->>+CM: BEGIN_BLOCK
+ CM-->>-A-M:
+ A-M-->>-TM:
+
+ TM->>+A-M: EndBlock
+ A-M->>+CM: END_BLOCK
+ CM->>CM: runKernel()
+ CM-)A-M: swingset->swingStoreUpdateExportData(exportDataEntries)
+ A-M->>A-M: swingStore := NewPrefixStore("swingStore.")
+ loop each data entry
+ alt has value
+ A-M->>+MS-M: swingStore.Set(key, value)
+ else no value
+ A-M->>+MS-M: swingStore.Delete(key)
+ end
+ MS-M-->>-A-M:
+ end
+ CM-->>-A-M:
+ A-M-->>-TM:
+
+ TM->>+A-M: Commit
+ A-M->>+SSEH-M: WaitUntilSwingStoreExportStarted()
+ SSEH-M-->>-A-M:
+ A-M->>+CM: COMMIT_BLOCK
+ CM->>CM: swingStore.commit()
+ CM-->>-A-M:
+ A-M->>A-M: BaseApp.CommitWithoutSnapshot()
+ A-M->>+CM: AFTER_COMMIT_BLOCK
+ CM-->>-A-M:
+ A-M->>A-M: isSnapshotHeight: true
+ A-M->>+SSES-M: InitiateSnapshot()
+ SSES-M->>+SSEH-M: InitiateExport()
+ SSEH-M->>SSEH-M: checkNotActive()
+ SSEH-M->>SSEH-M: activeOperation = operationDetails{}
+ SSEH-M-)+SSEH-AS: go
+ SSEH-M-->>-SSES-M:
+ SSES-M-->>-A-M:
+ A-M-->>-TM:
+
+ par App Snapshot
+ SSEH-AS->>+CM: SWING_STORE_EXPORT/initiate
+ CM->>+D: MkDir(exportDir)
+ D-->>-CM:
+ CM-)+SSE: initiateSwingStoreExport(exportDir)
+ CM->>CM: await started
(blocking)
+ CM-->>-SSEH-AS:
+ alt not initiated
+ SSEH-AS-)SSEH-M: exportStartedResult <- err
close(exportStartedResult)
+ SSEH-AS-)SSEH-M: exportDone <- err
+ else initiated
+ SSEH-AS-)SSEH-M: close(exportStartedResult)
+ alt retrieval
+ SSEH-AS->>+SSES-AS: OnExportStarted()
+ SSES-AS->>+A-AS: BaseApp.Snapshot()
+ A-AS->>+SM-AS: Create()
+ SM-AS-)+SM-CS: go createSnapshot()
+ SM-CS->>+MS-CS: Snapshot()
+ loop each IAVL node
+ MS-CS->>+SM-CS: WriteMsg()
+ SM-CS-)SM-AS: chunks <- chunk
+ SM-CS-->>-MS-CS:
+ end
+ MS-CS-->>-SM-CS:
+ SM-CS->>+SSES-CS: SnapshotExtension()
+ SSES-CS->>+SSEH-CS: retrieveExport()
+ SSEH-CS->>+CM: SWING_STORE_EXPORT/retrieve
+ CM->>CM: await done
(blocking)
+ CM-->>-SSEH-CS: exportDir
+ SSEH-CS->>+D-CS: Read(export-manifest.json)
+ D-CS-->>-SSEH-CS:
+ SSEH-CS->>+SSES-CS: OnExportRetrieved()
+ loop
+ SSES-CS->>+SSEH-CS: provider.ReadNextArtifact()
+ SSEH-CS->>+D-CS: Read(artifactFile)
+ D-CS-->>-SSEH-CS:
+ SSEH-CS-->>-SSES-CS: artifact{name, data}
+ SSES-CS->>+SM-CS: payloadWriter(artifact)
+ SM-CS-)SM-AS: chunks <- chunk
+ SM-CS-->>-SSES-CS:
+ end
+ SSES-CS-->>-SSEH-CS:
+ SSEH-CS->>+D-CS: Delete(exportDir)
+ D-CS-->>-SSEH-CS:
+ SSEH-CS-->>-SSES-CS:
+ SSES-CS-->>-SM-CS:
+ SM-CS-)-SM-AS: close(chunks)
+ SM-AS->>SM-AS: Save()
+ SM-AS-->>-A-AS:
+ A-AS-->>-SSES-AS:
+ SSES-AS-->>-SSEH-AS:
+ else no retrieval
+ SSEH-AS->>+SSES-AS: OnExportStarted()
+ SSES-AS-->>-SSEH-AS:
+ SSEH-AS->>+CM: SWING_STORE_EXPORT/discard
+ CM-)SSE: Stop()
+ SSE-)CM: done::reject()
+ CM->>CM: await done
+ CM->>+D: Delete(exportDir)
+ D-->-CM:
+ CM-->>-SSEH-AS:
+ SSEH-AS-)SSEH-M: exportDone <- err
+ end
+ end
+ SSEH-AS-)SSEH-M: close(exportDone)
+ deactivate SSEH-AS
+ end
+
+ par JS SwingStore export
+ SSE->>Exporter: makeExporter()
+ Exporter->>SSE:
+ SSE-)CM: started::resolve()
+ opt Export Data, not used in state-sync
+ SSE->>Exporter: getExportData()
+ Exporter-)SSE: export data iterator
+ loop each data entry
+ SSE->>+D-E: Append(export-data.jsonl, "JSON(entry tuple)\n")
+ D-E-->>-SSE:
+ end
+ end
+ SSE->>Exporter: getArtifactNames()
+ Exporter--)SSE: names async iterator
+ loop each artifact name
+ SSE->>Exporter: getArtifact(name)
+ Exporter--)SSE: artifactStream
+ SSE->>+D-E: Write(name, artifactStream)
+ D-E-->>-SSE:
+ end
+ SSE->>+D-E: Write(export-manifest.jsonl, manifest)
+ D-E-->>-SSE:
+ SSE-)CM: done::resolve()
+ deactivate SSE
+ end
+
+ Note over TM, A-M: BeginBlock, EndBlock
+
+ TM->>+A-M: Commit
+ A-M->>+SSEH-M: WaitUntilSwingStoreExportStarted()
+ SSEH-M->>SSEH-M: err = <-exportStartedResult
(blocking)
+ SSEH-M-->>-A-M:
+ A-M->>+CM: COMMIT_BLOCK
+ CM->>CM: await started
(blocking)
+ CM->>CM: swingStore.commit()
+ CM-->>-A-M:
+ A-M->>A-M: BaseApp.CommitWithoutSnapshot()
+ A-M->>+CM: AFTER_COMMIT_BLOCK
+ CM-->>-A-M:
+ A-M->>A-M: isSnapshotHeight: false
+ A-M-->>-TM:
+```
+
+## Restoring Snapshot
+
+```mermaid
+sequenceDiagram
+ box whitesmoke Main goroutine
+ participant TM as Tendermint
+ participant A-M as BaseApp
+ participant SM-M as Snapshot Manager
+ end
+
+ box whitesmoke Cosmos snapshot goroutine
+ participant SM-CS as Snapshot manager
+ participant MS-CS as MultiStore
+ participant SSES-CS as SwingSet ExtensionSnapshotter
+ participant SSEH-CS as SwingStoreExportsHandler
+ participant D-CS as Disk
+ end
+
+ box whitesmoke JS Main process
+ participant CM as Chain Main
+ participant D as Disk
+ participant SSI as StateSyncImport
+ participant ISS as importSwingStore
+ participant SS as SwingStore
+ end
+
+ TM->>+A-M: OfferSnapshot
+ A-M->>+SM-M: Restore()
+ SM-M-)+SM-CS: go restoreSnapshot()
+ SM-M-->>-A-M:
+ A-M-->>-TM:
+
+ par Snapshot Restore
+ SM-CS->>+MS-CS: Restore()
+ loop IAVL snapshot items
+ MS-CS->>+SM-CS: protoReader.ReadMsg()
+ SM-CS->>+SM-M: chunk = <-chunks
+ SM-M-->>-SM-CS:
+ SM-CS-->>-MS-CS:
+ MS-CS->>MS-CS: importer.Add(node)
+ end
+ MS-CS-->>-SM-CS:
+
+ opt loop over extensions
+ SM-CS->>+SSES-CS: RestoreExtension()
+ SSES-CS->>+SSEH-CS: RestoreExport()
+ SSEH-CS->>SSEH-CS: checkNotActive()
+ SSEH-CS->>SSEH-CS: activeOperation = operationDetails{}
+ SSEH-CS->>+D-CS: MkDir(exportDir)
+ D-CS-->>-SSEH-CS:
+ SSEH-CS->>+SSES-CS: provider.GetExportDataReader()
+ SSES-CS->>MS-CS: PrefixStore.Iterator()
("swingStore.")
+ MS-CS--)SSES-CS: sdk.Iterator
+ SSES-CS--)-SSEH-CS: export data reader
+ loop each data entry
+ SSEH-CS->>+D-CS: Append(export-data.jsonl,
"JSON(entry tuple)\n")
+ D-CS-->>-SSEH-CS:
+ end
+ loop extension snapshot items
+ SSEH-CS->>+SSES-CS: provider.ReadNextArtifact()
+ SSES-CS->>+SM-CS: payloadReader()
+ SM-CS->>+SM-M: chunk = <-chunks
+ SM-M-->>-SM-CS:
+ SM-CS-->>-SSES-CS: extension payloadBytes
+ SSES-CS->>SSES-CS: artifact = parse(payloadBytes)
+ SSES-CS->>-SSEH-CS: artifact
+ SSEH-CS->>+D-CS: Write(sanitizedFilename, artifact.data)
+ D-CS-->>-SSEH-CS:
+ end
+ SSEH-CS->>+D-CS: Write(export-manifest.jsonl, manifest)
+ D-CS-->>-SSEH-CS:
+ SSEH-CS->>+CM: SWING_STORE_EXPORT/restore
+ CM->>+SSI: performStateSyncImport()
+ SSI->>+D: Read(export-manifest.json)
+ D-->>-SSI:
+ SSI->>+ISS: importSwingStore()
+ ISS->>ISS: initSwingStore()
+ ISS->>+SSI: exporter.getExportData()
+ SSI->>+D: Read(export-data.json)
+ D-->>-SSI:
+ SSI-->>-ISS: export data iterator
+ ISS->>+SS: restore kv and metadata
+ SS-->>-ISS:
+ ISS->>+SSI: exporter.getArtifactNames()
+ SSI--)-ISS: names async iterator
+ loop each artifact name
+ ISS->>+SSI: provider.getArtifact()
+ SSI->>+D: Read(artifactFilename)
+ D-->>-SSI:
+ SSI--)-ISS: artifactStream
+ ISS->>+SS: restore artifact
+ SS-->>-ISS:
+ end
+ ISS-->>-SSI:
+ SSI->>+SS: set(host.blockHeight)
+ SS-->>-SSI:
+ SSI-->>-CM:
+ CM-->>-SSEH-CS:
+ SSEH-CS->>+D-CS: Delete(exportDir)
+ D-CS-->>-SSEH-CS:
+ SSEH-CS-->>-SSES-CS:
+ SSES-CS-->>-SM-CS:
+ end
+ SM-CS-)-SM-M: chRestoreDone <- restoreDone{}
close(chRestoreDone)
+ end
+
+ TM->>+A-M: ApplySnapshotChunk
+ A-M->>+SM-M: RestoreChunk()
+ SM-M->>SM-M: select chRestoreDone, default
+ alt done (abnormal)
+ SM-M-->>A-M: false, error
+ else normal
+ SM-M-)SM-M: chunks <- chunk
+ alt chunks remaining
+ SM-M-->>A-M: false
+ else last chunk
+ SM-M->>SM-M: <-chRestoreDone
(blocking)
+ SM-M-->>-A-M: true
+ end
+ end
+ A-M-->>-TM:
+
+```
diff --git a/golang/cosmos/Makefile b/golang/cosmos/Makefile
index 2e4250d5859..02696a0284b 100644
--- a/golang/cosmos/Makefile
+++ b/golang/cosmos/Makefile
@@ -38,15 +38,12 @@ ldflags += -compressdwarf=false
gcflags += -N -l
endif
-ldflags_helper = $(ldflags) \
- -X github.com/cosmos/cosmos-sdk/version.AppName=ag-cosmos-helper
BUILD_FLAGS := -tags "$(build_tags)" -gcflags '$(gcflags)' -ldflags '$(ldflags)'
-BUILD_FLAGS_HELPER := -buildmode=exe -tags "$(build_tags)" -gcflags '$(gcflags)' -ldflags '$(ldflags_helper)'
all: compile-chain
-compile-chain: compile-agd compile-helper compile-daemon
-compile-go: compile-agd compile-helper compile-libdaemon
+compile-chain: compile-agd compile-daemon
+compile-go: compile-agd compile-libdaemon
compile-node: node-compile-gyp
compile-daemon: compile-libdaemon node-compile-gyp
@@ -60,10 +57,10 @@ node-compile-gyp:
fi
compile-agd: go-mod-cache
- go build -v $(MOD_READONLY) $(BUILD_FLAGS_HELPER) -o build/agd ./cmd/agd
+ go build -v $(MOD_READONLY) $(BUILD_FLAGS) -buildmode=exe -o build/agd ./cmd/agd
install-agd: go-mod-cache
- go install -v $(MOD_READONLY) $(BUILD_FLAGS_HELPER) ./cmd/agd
+ go install -v $(MOD_READONLY) $(BUILD_FLAGS) -buildmode=exe ./cmd/agd
# Only run from the package.json build:gyp script.
compile-gyp:
@@ -71,18 +68,14 @@ compile-gyp:
node-gyp configure build $(GYP_DEBUG) || { status=$$?; rm -f binding.gyp; exit $$status; }
rm -f binding.gyp
-compile-helper: go-mod-cache
- go build -v $(MOD_READONLY) $(BUILD_FLAGS_HELPER) -o build/ag-cosmos-helper ./cmd/helper
-
compile-libdaemon: go-mod-cache
go build -v $(MOD_READONLY) $(BUILD_FLAGS) -buildmode=c-shared -o build/libagcosmosdaemon.so ./cmd/libdaemon/main.go
- test "`uname -s 2>/dev/null`" != Darwin || install_name_tool -id $$PWD/build/libagcosmosdaemon.so build/libagcosmosdaemon.so
-go-mod-cache: ../../go.sum
+go-mod-cache: go.sum
@echo "--> Download go modules to local cache"
@go mod download
-../../go.sum: ../../go.mod
+go.sum: go.mod
@echo "--> Ensure dependencies have not been modified"
GO111MODULE=on go mod verify
@@ -140,7 +133,11 @@ BUF_VERSION ?= 0.56.0
PROTOC_VERSION ?= 3.11.2
ifeq ($(UNAME_S),Linux)
- PROTOC_ZIP ?= protoc-${PROTOC_VERSION}-linux-x86_64.zip
+ ifeq ($(UNAME_M),aarch64)
+ PROTOC_ZIP ?= protoc-${PROTOC_VERSION}-linux-aarch_64.zip
+ else
+ PROTOC_ZIP ?= protoc-${PROTOC_VERSION}-linux-x86_64.zip
+ endif
endif
ifeq ($(UNAME_S),Darwin)
PROTOC_ZIP ?= protoc-${PROTOC_VERSION}-osx-x86_64.zip
diff --git a/golang/cosmos/app/app.go b/golang/cosmos/app/app.go
index 1f26380e46d..f5932d90c95 100644
--- a/golang/cosmos/app/app.go
+++ b/golang/cosmos/app/app.go
@@ -8,6 +8,7 @@ import (
"net/http"
"os"
"path/filepath"
+ "runtime/debug"
"time"
"github.com/cosmos/cosmos-sdk/baseapp"
@@ -21,6 +22,7 @@ import (
servertypes "github.com/cosmos/cosmos-sdk/server/types"
"github.com/cosmos/cosmos-sdk/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/types/module"
"github.com/cosmos/cosmos-sdk/version"
"github.com/cosmos/cosmos-sdk/x/auth"
@@ -100,11 +102,13 @@ import (
tmjson "github.com/tendermint/tendermint/libs/json"
"github.com/tendermint/tendermint/libs/log"
tmos "github.com/tendermint/tendermint/libs/os"
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
dbm "github.com/tendermint/tm-db"
gaiaappparams "github.com/Agoric/agoric-sdk/golang/cosmos/app/params"
appante "github.com/Agoric/agoric-sdk/golang/cosmos/ante"
+ agorictypes "github.com/Agoric/agoric-sdk/golang/cosmos/types"
"github.com/Agoric/agoric-sdk/golang/cosmos/vm"
"github.com/Agoric/agoric-sdk/golang/cosmos/x/lien"
"github.com/Agoric/agoric-sdk/golang/cosmos/x/swingset"
@@ -122,6 +126,14 @@ import (
const appName = "agoric"
+// FlagSwingStoreExportDir defines the config flag used to specify where a
+// genesis swing-store export is expected. For start from genesis, the default
+// value is config/swing-store in the home directory. For genesis export, the
+// value is always a "swing-store" directory sibling to the exported
+// genesis.json file.
+// TODO: document this flag in config, likely alongside the genesis path
+const FlagSwingStoreExportDir = "swing-store-export-dir"
+
var (
// DefaultNodeHome default home directories for the application daemon
DefaultNodeHome string
@@ -196,11 +208,15 @@ type GaiaApp struct { // nolint: golint
interfaceRegistry types.InterfaceRegistry
controllerInited bool
+ bootstrapNeeded bool
lienPort int
+ swingsetPort int
vbankPort int
vibcPort int
vstoragePort int
+ upgradePlan *upgradetypes.Plan
+
invCheckPeriod uint
// keys to access the substores
@@ -228,12 +244,13 @@ type GaiaApp struct { // nolint: golint
FeeGrantKeeper feegrantkeeper.Keeper
AuthzKeeper authzkeeper.Keeper
- SwingSetKeeper swingset.Keeper
- SwingSetSnapshotter swingset.Snapshotter
- VstorageKeeper vstorage.Keeper
- VibcKeeper vibc.Keeper
- VbankKeeper vbank.Keeper
- LienKeeper lien.Keeper
+ SwingStoreExportsHandler swingset.SwingStoreExportsHandler
+ SwingSetSnapshotter swingset.ExtensionSnapshotter
+ SwingSetKeeper swingset.Keeper
+ VstorageKeeper vstorage.Keeper
+ VibcKeeper vibc.Keeper
+ VbankKeeper vbank.Keeper
+ LienKeeper lien.Keeper
// make scoped keepers public for test purposes
ScopedIBCKeeper capabilitykeeper.ScopedKeeper
@@ -429,17 +446,20 @@ func NewAgoricApp(
// This function is tricky to get right, so we build it ourselves.
callToController := func(ctx sdk.Context, str string) (string, error) {
+ app.CheckControllerInited(true)
// We use SwingSet-level metering to charge the user for the call.
- app.MustInitController(ctx)
defer vm.SetControllerContext(ctx)()
return sendToController(true, str)
}
+ setBootstrapNeeded := func() {
+ app.bootstrapNeeded = true
+ }
+
app.VstorageKeeper = vstorage.NewKeeper(
keys[vstorage.StoreKey],
)
- vm.RegisterPortHandler("vstorage", vstorage.NewStorageHandler(app.VstorageKeeper))
- app.vstoragePort = vm.GetPort("vstorage")
+ app.vstoragePort = vm.RegisterPortHandler("vstorage", vstorage.NewStorageHandler(app.VstorageKeeper))
// The SwingSetKeeper is the Keeper from the SwingSet module
app.SwingSetKeeper = swingset.NewKeeper(
@@ -448,11 +468,36 @@ func NewAgoricApp(
app.VstorageKeeper, vbanktypes.ReservePoolName,
callToController,
)
+ app.swingsetPort = vm.RegisterPortHandler("swingset", swingset.NewPortHandler(app.SwingSetKeeper))
+
+ app.SwingStoreExportsHandler = *swingsetkeeper.NewSwingStoreExportsHandler(
+ app.Logger(),
+ func(action vm.Jsonable, mustNotBeInited bool) (string, error) {
+ if mustNotBeInited {
+ app.CheckControllerInited(false)
+ }
+
+ bz, err := json.Marshal(action)
+ if err != nil {
+ return "", err
+ }
+ return sendToController(true, string(bz))
+ },
+ )
- app.SwingSetSnapshotter = swingsetkeeper.NewSwingsetSnapshotter(
+ getSwingStoreExportDataShadowCopyReader := func(height int64) agorictypes.KVEntryReader {
+ ctx := app.NewUncachedContext(false, tmproto.Header{Height: height})
+ exportDataIterator := app.SwingSetKeeper.GetSwingStore(ctx).Iterator(nil, nil)
+ if !exportDataIterator.Valid() {
+ exportDataIterator.Close()
+ return nil
+ }
+ return agorictypes.NewKVIteratorReader(exportDataIterator)
+ }
+ app.SwingSetSnapshotter = *swingsetkeeper.NewExtensionSnapshotter(
bApp,
- app.SwingSetKeeper,
- sendToController,
+ &app.SwingStoreExportsHandler,
+ getSwingStoreExportDataShadowCopyReader,
)
app.VibcKeeper = vibc.NewKeeper(
@@ -551,6 +596,7 @@ func NewAgoricApp(
app.EvidenceKeeper = *evidenceKeeper
skipGenesisInvariants := cast.ToBool(appOpts.Get(crisis.FlagSkipGenesisInvariants))
+ swingStoreExportDir := cast.ToString(appOpts.Get(FlagSwingStoreExportDir))
// NOTE: Any module instantiated in the module manager that is later modified
// must be passed by reference here.
@@ -580,7 +626,7 @@ func NewAgoricApp(
transferModule,
icaModule,
vstorage.NewAppModule(app.VstorageKeeper),
- swingset.NewAppModule(app.SwingSetKeeper),
+ swingset.NewAppModule(app.SwingSetKeeper, &app.SwingStoreExportsHandler, setBootstrapNeeded, app.ensureControllerInited, swingStoreExportDir),
vibcModule,
vbankModule,
lienModule,
@@ -613,6 +659,8 @@ func NewAgoricApp(
paramstypes.ModuleName,
vestingtypes.ModuleName,
vstorage.ModuleName,
+ // This will cause the swingset controller to init if it hadn't yet, passing
+ // any upgrade plan or bootstrap flag when starting at an upgrade height
swingset.ModuleName,
vibc.ModuleName,
vbank.ModuleName,
@@ -744,11 +792,11 @@ func NewAgoricApp(
app.UpgradeKeeper.SetUpgradeHandler(
upgradeName,
- upgrade10Handler(app, upgradeName),
+ upgrade11Handler(app, upgradeName),
)
app.UpgradeKeeper.SetUpgradeHandler(
upgradeNameTest,
- upgrade10Handler(app, upgradeNameTest),
+ upgrade11Handler(app, upgradeNameTest),
)
if loadLatest {
@@ -771,39 +819,115 @@ func NewAgoricApp(
return app
}
-func upgrade10Handler(app *GaiaApp, targetUpgrade string) func(sdk.Context, upgradetypes.Plan, module.VersionMap) (module.VersionMap, error) {
- return func(ctx sdk.Context, plan upgradetypes.Plan, fromVm module.VersionMap) (module.VersionMap, error) {
- // change bootrap gov parameter to correct vaults parameter
+type swingStoreMigrationEventHandler struct {
+ swingStore sdk.KVStore
+}
- prevParams := app.SwingSetKeeper.GetParams(ctx)
+func (eventHandler swingStoreMigrationEventHandler) OnExportStarted(height uint64, retrieveSwingStoreExport func() error) error {
+ return retrieveSwingStoreExport()
+}
- ctx.Logger().Info("Pre-upgrade swingset params", "BeansPerUnit", fmt.Sprintf("%v", prevParams.BeansPerUnit), "BootstrapVatConfig", prevParams.BootstrapVatConfig)
+func (eventHandler swingStoreMigrationEventHandler) OnExportRetrieved(provider swingsetkeeper.SwingStoreExportProvider) (err error) {
+ exportDataReader, err := provider.GetExportDataReader()
+ if err != nil {
+ return err
+ }
+ defer exportDataReader.Close()
+
+ var hasExportData bool
- switch targetUpgrade {
- case upgradeName:
- prevParams.BootstrapVatConfig = "@agoric/vats/decentral-main-vaults-config.json"
- case upgradeNameTest:
- prevParams.BootstrapVatConfig = "@agoric/vats/decentral-test-vaults-config.json"
- default:
- return fromVm, fmt.Errorf("invalid upgrade name")
+ for {
+ entry, err := exportDataReader.Read()
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return err
+ }
+ hasExportData = true
+ if !entry.HasValue() {
+ return fmt.Errorf("no value for export data key %s", entry.Key())
+ }
+ eventHandler.swingStore.Set([]byte(entry.Key()), []byte(entry.StringValue()))
+ }
+ if !hasExportData {
+ return fmt.Errorf("export data had no entries")
+ }
+ return nil
+}
+
+// upgrade11Handler performs standard upgrade actions plus custom actions for upgrade-11.
+func upgrade11Handler(app *GaiaApp, targetUpgrade string) func(sdk.Context, upgradetypes.Plan, module.VersionMap) (module.VersionMap, error) {
+ return func(ctx sdk.Context, plan upgradetypes.Plan, fromVm module.VersionMap) (module.VersionMap, error) {
+ app.CheckControllerInited(false)
+ // Record the plan to send to SwingSet
+ app.upgradePlan = &plan
+
+ // Perform swing-store migrations. We do this in the app upgrade handler
+ // since it involves multiple modules (x/vstorage and x/swingset) which
+ // don't strictly have a version change on their own.
+
+ // We are at the begining of the upgrade block, so all stores are commited
+ // as of the end of the previous block
+ savedBlockHeight := uint64(ctx.BlockHeight() - 1)
+
+ // First, repair swing-store metadata in case this node was previously
+ // initialized from a state-sync snapshot. This is done with a check on the
+ // block height to catch early any hangover related mismatch.
+ // Only entries related to missing historical metadata are imported, but we
+ // don't know what these look like here, so we provide it all.
+ getSwingStoreExportDataFromVstorage := func() (reader agorictypes.KVEntryReader, err error) {
+ return agorictypes.NewVstorageDataEntriesReader(
+ app.VstorageKeeper.ExportStorageFromPrefix(ctx, swingsetkeeper.StoragePathSwingStore),
+ ), nil
+ }
+
+ // We're not restoring any artifact to swing-store, nor have any to provide
+ readNoArtifact := func() (artifact swingsettypes.SwingStoreArtifact, err error) {
+ return artifact, io.EOF
}
- app.SwingSetKeeper.SetParams(ctx, prevParams)
- ctx.Logger().Info("Post-upgrade swingset params", "BeansPerUnit", fmt.Sprintf("%v", prevParams.BeansPerUnit), "BootstrapVatConfig", prevParams.BootstrapVatConfig)
+ err := app.SwingStoreExportsHandler.RestoreExport(
+ swingsetkeeper.SwingStoreExportProvider{
+ BlockHeight: savedBlockHeight,
+ GetExportDataReader: getSwingStoreExportDataFromVstorage,
+ ReadNextArtifact: readNoArtifact,
+ },
+ swingsetkeeper.SwingStoreRestoreOptions{
+ ArtifactMode: swingsetkeeper.SwingStoreArtifactModeNone,
+ ExportDataMode: swingsetkeeper.SwingStoreExportDataModeRepairMetadata,
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
- app.VstorageKeeper.MigrateNoDataPlaceholders(ctx) // upgrade-10 only
- normalizeModuleAccount(ctx, app.AccountKeeper, vbanktypes.ProvisionPoolName)
- normalizeModuleAccount(ctx, app.AccountKeeper, vbanktypes.ReservePoolName)
+ // Then migrate the swing-store shadow copy:
+ // 1. Remove the swing-store "export data" shadow-copy entries from vstorage.
+ // 2. Export swing-store "export-data" (as of the previous block) through a
+ // handler that writes every entry into the swingset module's new Store.
+ app.VstorageKeeper.RemoveEntriesWithPrefix(ctx, swingsetkeeper.StoragePathSwingStore)
+ err = app.SwingStoreExportsHandler.InitiateExport(
+ savedBlockHeight,
+ swingStoreMigrationEventHandler{swingStore: app.SwingSetKeeper.GetSwingStore(ctx)},
+ swingsetkeeper.SwingStoreExportOptions{
+ ArtifactMode: swingsetkeeper.SwingStoreArtifactModeNone,
+ ExportDataMode: swingsetkeeper.SwingStoreExportDataModeAll,
+ },
+ )
+ if err == nil {
+ err = swingsetkeeper.WaitUntilSwingStoreExportDone()
+ }
+ if err != nil {
+ return nil, err
+ }
+ // Always run module migrations
mvm, err := app.mm.RunMigrations(ctx, app.configurator, fromVm)
if err != nil {
return mvm, err
}
- // Just run the SwingSet kernel to finish bootstrap and get ready to open for
- // business.
- stdlog.Println("Rebooting SwingSet")
- return mvm, swingset.BootSwingset(ctx, app.SwingSetKeeper)
+ return mvm, nil
}
}
@@ -825,55 +949,94 @@ func normalizeModuleAccount(ctx sdk.Context, ak authkeeper.AccountKeeper, name s
}
type cosmosInitAction struct {
- Type string `json:"type"`
- ChainID string `json:"chainID"`
- Params swingset.Params `json:"params"`
- StoragePort int `json:"storagePort"`
- SupplyCoins sdk.Coins `json:"supplyCoins"`
- VibcPort int `json:"vibcPort"`
- VbankPort int `json:"vbankPort"`
- LienPort int `json:"lienPort"`
+ Type string `json:"type"`
+ ChainID string `json:"chainID"`
+ BlockTime int64 `json:"blockTime,omitempty"`
+ IsBootstrap bool `json:"isBootstrap"`
+ Params swingset.Params `json:"params"`
+ SupplyCoins sdk.Coins `json:"supplyCoins"`
+ UpgradePlan *upgradetypes.Plan `json:"upgradePlan,omitempty"`
+ LienPort int `json:"lienPort"`
+ StoragePort int `json:"storagePort"`
+ SwingsetPort int `json:"swingsetPort"`
+ VbankPort int `json:"vbankPort"`
+ VibcPort int `json:"vibcPort"`
}
// Name returns the name of the App
func (app *GaiaApp) Name() string { return app.BaseApp.Name() }
-func (app *GaiaApp) MustInitController(ctx sdk.Context) {
- if app.controllerInited {
- return
+// CheckControllerInited exits if the controller initialization state does not match `expected`.
+func (app *GaiaApp) CheckControllerInited(expected bool) {
+ if app.controllerInited != expected {
+ fmt.Fprintf(os.Stderr, "controllerInited != %t\n", expected)
+ debug.PrintStack()
+ os.Exit(1)
}
+}
+
+// initController sends the initialization message to the VM.
+// Exits if the controller has already been initialized.
+// The init message will contain any upgrade plan if we're starting after an
+// upgrade, and a flag indicating whether this is a bootstrap of the controller.
+func (app *GaiaApp) initController(ctx sdk.Context, bootstrap bool) {
+ app.CheckControllerInited(false)
app.controllerInited = true
+
+ var blockTime int64 = 0
+ if bootstrap || app.upgradePlan != nil {
+ blockTime = ctx.BlockTime().Unix()
+ }
+
// Begin initializing the controller here.
action := &cosmosInitAction{
- Type: "AG_COSMOS_INIT",
- ChainID: ctx.ChainID(),
- Params: app.SwingSetKeeper.GetParams(ctx),
- StoragePort: app.vstoragePort,
- SupplyCoins: sdk.NewCoins(app.BankKeeper.GetSupply(ctx, "uist")),
- VibcPort: app.vibcPort,
- VbankPort: app.vbankPort,
- LienPort: app.lienPort,
+ Type: "AG_COSMOS_INIT",
+ ChainID: ctx.ChainID(),
+ BlockTime: blockTime,
+ IsBootstrap: bootstrap,
+ Params: app.SwingSetKeeper.GetParams(ctx),
+ SupplyCoins: sdk.NewCoins(app.BankKeeper.GetSupply(ctx, "uist")),
+ UpgradePlan: app.upgradePlan,
+ LienPort: app.lienPort,
+ StoragePort: app.vstoragePort,
+ SwingsetPort: app.swingsetPort,
+ VbankPort: app.vbankPort,
+ VibcPort: app.vibcPort,
}
+ // This really abuses `BlockingSend` to get back at `sendToController`
out, err := app.SwingSetKeeper.BlockingSend(ctx, action)
// fmt.Fprintf(os.Stderr, "AG_COSMOS_INIT Returned from SwingSet: %s, %v\n", out, err)
if err != nil {
- fmt.Fprintln(os.Stderr, "Cannot initialize Controller", err)
- os.Exit(1)
+ panic(errors.Wrap(err, "cannot initialize Controller"))
}
var res bool
err = json.Unmarshal([]byte(out), &res)
if err != nil {
- fmt.Fprintln(os.Stderr, "Cannot unmarshal Controller init response", out, err)
- os.Exit(1)
+ panic(errors.Wrapf(err, "cannot unmarshal Controller init response: %s", out))
}
if !res {
- fmt.Fprintln(os.Stderr, "Controller negative init response")
- os.Exit(1)
+ panic(fmt.Errorf("controller negative init response"))
}
}
+// ensureControllerInited inits the controller if needed. It's used by the
+// x/swingset module's BeginBlock to lazily start the JS controller.
+// We cannot init early as we don't know when starting the software if this
+// might be a simple restart, or a chain init from genesis or upgrade which
+// require the controller to not be inited yet.
+func (app *GaiaApp) ensureControllerInited(ctx sdk.Context) {
+ if app.controllerInited {
+ return
+ }
+
+ // While we don't expect it anymore, some upgrade may want to throw away
+ // the current JS state and bootstrap again (bulldozer). In that case the
+ // upgrade handler can just set the bootstrapNeeded flag.
+ app.initController(ctx, app.bootstrapNeeded)
+}
+
// BeginBlocker application updates every begin block
func (app *GaiaApp) BeginBlocker(ctx sdk.Context, req abci.RequestBeginBlock) abci.ResponseBeginBlock {
return app.mm.BeginBlock(ctx, req)
@@ -894,6 +1057,16 @@ func (app *GaiaApp) InitChainer(ctx sdk.Context, req abci.RequestInitChain) abci
app.UpgradeKeeper.SetModuleVersionMap(ctx, app.mm.GetVersionMap())
res := app.mm.InitGenesis(ctx, app.appCodec, genesisState)
+ // initialize the provision and reserve module accounts, to avoid their implicit creation
+ // as a default account upon receiving a transfer. See BlockedAddrs().
+ normalizeModuleAccount(ctx, app.AccountKeeper, vbanktypes.ProvisionPoolName)
+ normalizeModuleAccount(ctx, app.AccountKeeper, vbanktypes.ReservePoolName)
+
+ // Init early (before first BeginBlock) to run the potentially lengthy bootstrap
+ if app.bootstrapNeeded {
+ app.initController(ctx, true)
+ }
+
// Agoric: report the genesis time explicitly.
genTime := req.GetTime()
if genTime.After(time.Now()) {
@@ -901,20 +1074,15 @@ func (app *GaiaApp) InitChainer(ctx sdk.Context, req abci.RequestInitChain) abci
stdlog.Printf("Genesis time %s is in %s\n", genTime, d)
}
- // initialize the provision and reserve module accounts, to avoid their implicit creation
- // as a default account upon receiving a transfer. See BockedAddrs().
- normalizeModuleAccount(ctx, app.AccountKeeper, vbanktypes.ProvisionPoolName)
- normalizeModuleAccount(ctx, app.AccountKeeper, vbanktypes.ReservePoolName)
-
return res
}
// Commit tells the controller that the block is commited
func (app *GaiaApp) Commit() abci.ResponseCommit {
- err := app.SwingSetSnapshotter.WaitUntilSnapshotStarted()
+ err := swingsetkeeper.WaitUntilSwingStoreExportStarted()
if err != nil {
- app.Logger().Error("swingset snapshot failed to start", "err", err)
+ app.Logger().Error("swing-store export failed to start", "err", err)
}
// Frontrun the BaseApp's Commit method
diff --git a/golang/cosmos/app/const.go b/golang/cosmos/app/const.go
index 15dd38b8b70..2e7f4a2f391 100644
--- a/golang/cosmos/app/const.go
+++ b/golang/cosmos/app/const.go
@@ -1,6 +1,6 @@
package gaia
const (
- upgradeName = "agoric-upgrade-10"
- upgradeNameTest = "agorictest-upgrade-10"
+ upgradeName = "agoric-upgrade-11"
+ upgradeNameTest = "agorictest-upgrade-11"
)
diff --git a/golang/cosmos/binding.gyp.in b/golang/cosmos/binding.gyp.in
index 2c08dd5b535..1b95d034276 100644
--- a/golang/cosmos/binding.gyp.in
+++ b/golang/cosmos/binding.gyp.in
@@ -2,6 +2,9 @@
"targets": [
{
"target_name": "agcosmosdaemon",
+ 'variables': {
+ "target_lib": "lib<(_target_name).so",
+ },
"cflags!": [ "-fno-exceptions" ],
"cflags_cc!": [ "-fno-exceptions" ],
"xcode_settings": {
@@ -18,17 +21,39 @@
" github.com/agoric-labs/cosmos-sdk v0.45.
replace github.com/cosmos/gaia/v7 => github.com/Agoric/ag0/v7 v7.0.2-alpha.agoric.1
// For testing against a local cosmos-sdk or tendermint
-// replace github.com/cosmos/cosmos-sdk => ../forks/cosmos-sdk
+// replace github.com/cosmos/cosmos-sdk => ../../../forks/cosmos-sdk
-// replace github.com/tendermint/tendermint => ../forks/tendermint
+// replace github.com/tendermint/tendermint => ../../../forks/tendermint
diff --git a/go.sum b/golang/cosmos/go.sum
similarity index 100%
rename from go.sum
rename to golang/cosmos/go.sum
diff --git a/golang/cosmos/package.json b/golang/cosmos/package.json
index 14315ee24be..60676206c74 100644
--- a/golang/cosmos/package.json
+++ b/golang/cosmos/package.json
@@ -11,18 +11,18 @@
},
"scripts": {
"test": "exit 0",
+ "build:all": "make",
"build:gyp": "make compile-gyp",
"build:gyp-debug": "make compile-gyp GYP_DEBUG=--debug",
"test:xs": "exit 0",
+ "prepack": "git rev-parse --short HEAD > git-revision.txt && rm -rf build",
+ "postpack": "git clean -f git-revision.txt",
"build": "exit 0",
"lint-fix": "yarn lint:eslint --fix",
"lint": "eslint '**/*.{cjs,js}'"
},
"dependencies": {
- "bindings": "^1.2.1"
- },
- "devDependencies": {
- "esm": "agoric-labs/esm#Agoric-built",
+ "bindings": "^1.2.1",
"napi-thread-safe-callback": "0.0.6",
"node-addon-api": "^1.7.1"
},
@@ -32,18 +32,6 @@
"url": "https://github.com/Agoric/agoric-sdk/issues"
},
"homepage": "https://github.com/Agoric/agoric-sdk/tree/HEAD/golang/cosmos",
- "files": [
- "Makefile*",
- "app",
- "binding.gyp.in",
- "cmd",
- "daemon",
- "proto",
- "scripts",
- "src",
- "third_party",
- "x"
- ],
"publishConfig": {
"access": "public"
}
diff --git a/golang/cosmos/proto/agoric/swingset/genesis.proto b/golang/cosmos/proto/agoric/swingset/genesis.proto
index 46cefb2b69a..8a178e4e12e 100644
--- a/golang/cosmos/proto/agoric/swingset/genesis.proto
+++ b/golang/cosmos/proto/agoric/swingset/genesis.proto
@@ -13,4 +13,14 @@ message GenesisState {
Params params = 2 [(gogoproto.nullable) = false];
State state = 3 [(gogoproto.nullable) = false];
+
+ repeated SwingStoreExportDataEntry swing_store_export_data = 4 [
+ (gogoproto.jsontag) = "swingStoreExportData"
+ ];
+}
+
+// A SwingStore "export data" entry.
+message SwingStoreExportDataEntry {
+ string key = 1;
+ string value = 2;
}
diff --git a/golang/cosmos/proto/agoric/swingset/swingset.proto b/golang/cosmos/proto/agoric/swingset/swingset.proto
index 991738b9569..c1a238edfea 100644
--- a/golang/cosmos/proto/agoric/swingset/swingset.proto
+++ b/golang/cosmos/proto/agoric/swingset/swingset.proto
@@ -150,8 +150,11 @@ message Egress {
];
}
-// The payload messages used by swingset state-sync
-message ExtensionSnapshotterArtifactPayload {
+// SwingStoreArtifact encodes an artifact of a swing-store export.
+// Artifacts may be stored or transmitted in any order. Most handlers do
+// maintain the artifact order from their original source as an effect of how
+// they handle the artifacts.
+message SwingStoreArtifact {
option (gogoproto.equal) = false;
string name = 1 [
(gogoproto.jsontag) = "name",
diff --git a/golang/cosmos/types/kv_entry.go b/golang/cosmos/types/kv_entry.go
new file mode 100644
index 00000000000..44448ad25b6
--- /dev/null
+++ b/golang/cosmos/types/kv_entry.go
@@ -0,0 +1,114 @@
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+var _ json.Marshaler = &KVEntry{}
+var _ json.Unmarshaler = &KVEntry{}
+
+// KVEntry represents a string key / string value pair, where the value may be
+// missing, which is different from an empty value.
+// The semantics of a missing value are purpose-dependent rather than specified
+// here, but frequently correspond with deletion/incompleteness/etc.
+// A KVEntry with an empty key is considered invalid.
+type KVEntry struct {
+ key string
+ value *string
+}
+
+// NewKVEntry creates a KVEntry with the provided key and value
+func NewKVEntry(key string, value string) KVEntry {
+ return KVEntry{key, &value}
+}
+
+// NewKVEntryWithNoValue creates a KVEntry with the provided key and no value
+func NewKVEntryWithNoValue(key string) KVEntry {
+ return KVEntry{key, nil}
+}
+
+// UnmarshalJSON updates a KVEntry from JSON text corresponding with a
+// [key: string, value?: string | null] shape, or returns an error indicating
+// invalid input.
+// The key must be a non-empty string, and the value (if present) must be a
+// string or null.
+//
+// Implements json.Unmarshaler
+// Note: unlike other methods, this accepts a pointer to satisfy
+// the Unmarshaler semantics.
+func (entry *KVEntry) UnmarshalJSON(input []byte) (err error) {
+ var generic []*string
+ err = json.Unmarshal(input, &generic)
+ if err != nil {
+ return err
+ }
+
+ length := len(generic)
+
+ if generic == nil {
+ return fmt.Errorf("KVEntry cannot be null")
+ }
+ if length != 1 && length != 2 {
+ return fmt.Errorf("KVEntry must be an array of length 1 or 2 (not %d)", length)
+ }
+
+ key := generic[0]
+ if key == nil || *key == "" {
+ return fmt.Errorf("KVEntry key must be a non-empty string: %v", key)
+ }
+
+ var value *string
+ if length == 2 {
+ value = generic[1]
+ }
+
+ entry.key = *key
+ entry.value = value
+
+ return nil
+}
+
+// MarshalJSON encodes the KVEntry into a JSON array of [key: string, value?: string],
+// with the value missing (array length of 1) if the entry has no value.
+//
+// Implements json.Marshaler
+func (entry KVEntry) MarshalJSON() ([]byte, error) {
+ if !entry.IsValidKey() {
+ return nil, fmt.Errorf("cannot marshal invalid KVEntry")
+ }
+ if entry.value != nil {
+ return json.Marshal([2]string{entry.key, *entry.value})
+ } else {
+ return json.Marshal([1]string{entry.key})
+ }
+}
+
+// IsValidKey returns whether the KVEntry has a non-empty key.
+func (entry KVEntry) IsValidKey() bool {
+ return entry.key != ""
+}
+
+// Key returns the string key.
+func (entry KVEntry) Key() string {
+ return entry.key
+}
+
+// HasValue returns whether the KVEntry has a value or not.
+func (entry KVEntry) HasValue() bool {
+ return entry.value != nil
+}
+
+// Value returns a pointer to the string value or nil if the entry has no value.
+func (entry KVEntry) Value() *string {
+ return entry.value
+}
+
+// StringValue returns the string value, or the empty string if the entry has no value.
+// Note that the result therefore does not differentiate an empty string value from no value.
+func (entry KVEntry) StringValue() string {
+ if entry.value != nil {
+ return *entry.value
+ }
+ return ""
+}
diff --git a/golang/cosmos/types/kv_entry_helpers.go b/golang/cosmos/types/kv_entry_helpers.go
new file mode 100644
index 00000000000..7ee16de189a
--- /dev/null
+++ b/golang/cosmos/types/kv_entry_helpers.go
@@ -0,0 +1,234 @@
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ swingsettypes "github.com/Agoric/agoric-sdk/golang/cosmos/x/swingset/types"
+ vstoragetypes "github.com/Agoric/agoric-sdk/golang/cosmos/x/vstorage/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+)
+
+// These helpers facilitate handling KVEntry streams, in particular for the
+// swing-store "export data" use case. The goal is to avoid passing around
+// large slices of key/value pairs.
+//
+// Handling of these streams is primarily accomplished through a KVEntryReader
+// interface, with multiple implementations for different backing sources, as
+// well as a helper function to consume a reader and write the entries into a
+// byte Writer as line terminated json encoded KVEntry.
+
+// We attempt to pass sdk.Iterator around as much as possible to abstract a
+// stream of Key/Value pairs without requiring the whole slice to be held in
+// memory if possible. Cosmos SDK defines iterators as yielding Key/Value
+// pairs, both as byte slices.
+//
+// More precisely, we define here the following:
+// - A KVEntryReader interface allowing to Read the KVEntry one by one from an
+// underlying source.
+// - Multiple implementations of the KVEntryReader interface:
+// - NewKVIteratorReader constructs a reader which consumes an sdk.Iterator.
+// Keys and values are converted from byte slices to strings, and nil values
+// are preserved as KVEntry instances with no value.
+// - A generic reader which uses a slice of key/value data, and a conversion
+// function from that data type to a KVEntry. The reader does bounds
+// checking and keeps track of the current position. The following data
+// types are available:
+// - NewVstorageDataEntriesReader constructs a reader from a slice of
+// vstorage DataEntry values.
+// - NewSwingStoreExportDataEntriesReader constructs a reader from a slice
+// of SwingStoreExportDataEntry values.
+// - NewJsonRawMessageKVEntriesReader constructs a reader from a slice of
+// [key: string, value?: string | null] JSON array values.
+// - NewJsonlKVEntryDecoderReader constructs a reader from an io.ReadCloser
+// (like a file) containing JSON Lines in which each item is a
+// [key: string, value?: string | null] array.
+// - EncodeKVEntryReaderToJsonl consumes a KVEntryReader and writes its entries
+// into an io.Writer as a sequence of single-line JSON texts. The encoding of
+// each line is [key, value] if the KVEntry has a value, and [key] otherwise.
+// This format terminates each line, but is still compatible with JSON Lines
+// (which is line feed *separated*) for Go and JS decoders.
+
+// KVEntryReader is an abstraction for iteratively reading KVEntry data.
+type KVEntryReader interface {
+ // Read returns the next KVEntry, or an error.
+ // An `io.EOF` error indicates that the previous Read() returned the final KVEntry.
+ Read() (KVEntry, error)
+ // Close frees the underlying resource (such as a slice or file descriptor).
+ Close() error
+}
+
+var _ KVEntryReader = &kvIteratorReader{}
+
+// kvIteratorReader is a KVEntryReader backed by an sdk.Iterator
+type kvIteratorReader struct {
+ iter sdk.Iterator
+}
+
+// NewKVIteratorReader returns a KVEntryReader backed by an sdk.Iterator.
+func NewKVIteratorReader(iter sdk.Iterator) KVEntryReader {
+ return &kvIteratorReader{
+ iter: iter,
+ }
+}
+
+// Read yields the next KVEntry from the source iterator
+// Implements KVEntryReader
+func (ir kvIteratorReader) Read() (next KVEntry, err error) {
+ if !ir.iter.Valid() {
+ // There is unfortunately no way to differentiate completion from iteration
+ // errors with the implementation of Iterators by cosmos-sdk since the
+ // iter.Error() returns an error in both cases
+ return KVEntry{}, io.EOF
+ }
+
+ key := ir.iter.Key()
+ if len(key) == 0 {
+ return KVEntry{}, fmt.Errorf("nil or empty key yielded by iterator")
+ }
+
+ value := ir.iter.Value()
+ ir.iter.Next()
+ if value == nil {
+ return NewKVEntryWithNoValue(string(key)), nil
+ } else {
+ return NewKVEntry(string(key), string(value)), nil
+ }
+}
+
+func (ir kvIteratorReader) Close() error {
+ return ir.iter.Close()
+}
+
+var _ KVEntryReader = &kvEntriesReader[any]{}
+
+// kvEntriesReader is the KVEntryReader using an underlying slice of generic
+// kv entries. It reads from the slice sequentially using a type specific
+// toKVEntry func, performing bounds checks, and tracking the position.
+type kvEntriesReader[T any] struct {
+ entries []T
+ toKVEntry func(T) (KVEntry, error)
+ nextIndex int
+}
+
+// Read yields the next KVEntry from the source
+// Implements KVEntryReader
+func (reader *kvEntriesReader[T]) Read() (next KVEntry, err error) {
+ if reader.entries == nil {
+ return KVEntry{}, fmt.Errorf("reader closed")
+ }
+
+ length := len(reader.entries)
+
+ if reader.nextIndex < length {
+ entry, err := reader.toKVEntry(reader.entries[reader.nextIndex])
+ reader.nextIndex += 1
+ if err != nil {
+ return KVEntry{}, err
+ }
+ if !entry.IsValidKey() {
+ return KVEntry{}, fmt.Errorf("source yielded a KVEntry with an invalid key")
+ }
+ return entry, err
+ } else if reader.nextIndex == length {
+ reader.nextIndex += 1
+ return KVEntry{}, io.EOF
+ } else {
+ return KVEntry{}, fmt.Errorf("index %d is out of source bounds (length %d)", reader.nextIndex, length)
+ }
+}
+
+// Close releases the source slice
+// Implements KVEntryReader
+func (reader *kvEntriesReader[any]) Close() error {
+ reader.entries = nil
+ return nil
+}
+
+// NewVstorageDataEntriesReader creates a KVEntryReader backed by a
+// vstorage DataEntry slice
+func NewVstorageDataEntriesReader(vstorageDataEntries []*vstoragetypes.DataEntry) KVEntryReader {
+ return &kvEntriesReader[*vstoragetypes.DataEntry]{
+ entries: vstorageDataEntries,
+ toKVEntry: func(sourceEntry *vstoragetypes.DataEntry) (KVEntry, error) {
+ return NewKVEntry(sourceEntry.Path, sourceEntry.Value), nil
+ },
+ }
+}
+
+// NewSwingStoreExportDataEntriesReader creates a KVEntryReader backed by
+// a SwingStoreExportDataEntry slice
+func NewSwingStoreExportDataEntriesReader(exportDataEntries []*swingsettypes.SwingStoreExportDataEntry) KVEntryReader {
+ return &kvEntriesReader[*swingsettypes.SwingStoreExportDataEntry]{
+ entries: exportDataEntries,
+ toKVEntry: func(sourceEntry *swingsettypes.SwingStoreExportDataEntry) (KVEntry, error) {
+ return NewKVEntry(sourceEntry.Key, sourceEntry.Value), nil
+ },
+ }
+}
+
+// NewJsonRawMessageKVEntriesReader creates a KVEntryReader backed by
+// a json.RawMessage slice
+func NewJsonRawMessageKVEntriesReader(jsonEntries []json.RawMessage) KVEntryReader {
+ return &kvEntriesReader[json.RawMessage]{
+ entries: jsonEntries,
+ toKVEntry: func(sourceEntry json.RawMessage) (entry KVEntry, err error) {
+ err = json.Unmarshal(sourceEntry, &entry)
+ return entry, err
+ },
+ }
+}
+
+var _ KVEntryReader = &jsonlKVEntryDecoderReader{}
+
+// jsonlKVEntryDecoderReader is the KVEntryReader decoding
+// jsonl-like encoded key/value pairs.
+type jsonlKVEntryDecoderReader struct {
+ closer io.Closer
+ decoder *json.Decoder
+}
+
+// Read yields the next decoded KVEntry
+// Implements KVEntryReader
+func (reader jsonlKVEntryDecoderReader) Read() (next KVEntry, err error) {
+ err = reader.decoder.Decode(&next)
+ return next, err
+}
+
+// Close release the underlying resource backing the decoder
+// Implements KVEntryReader
+func (reader jsonlKVEntryDecoderReader) Close() error {
+ return reader.closer.Close()
+}
+
+// NewJsonlKVEntryDecoderReader creates a KVEntryReader over a byte
+// stream reader that decodes each line as a json encoded KVEntry. The entries
+// are yielded in order they're present in the stream.
+func NewJsonlKVEntryDecoderReader(byteReader io.ReadCloser) KVEntryReader {
+ return &jsonlKVEntryDecoderReader{
+ closer: byteReader,
+ decoder: json.NewDecoder(byteReader),
+ }
+}
+
+// EncodeKVEntryReaderToJsonl consumes a KVEntryReader and JSON encodes each
+// KVEntry, terminating by new lines.
+// It will not Close the Reader when done
+func EncodeKVEntryReaderToJsonl(reader KVEntryReader, bytesWriter io.Writer) (err error) {
+ encoder := json.NewEncoder(bytesWriter)
+ encoder.SetEscapeHTML(false)
+ for {
+ entry, err := reader.Read()
+ if err == io.EOF {
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ err = encoder.Encode(entry)
+ if err != nil {
+ return err
+ }
+ }
+}
diff --git a/golang/cosmos/types/kv_entry_helpers_test.go b/golang/cosmos/types/kv_entry_helpers_test.go
new file mode 100644
index 00000000000..3037b5f024d
--- /dev/null
+++ b/golang/cosmos/types/kv_entry_helpers_test.go
@@ -0,0 +1,237 @@
+package types
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "strings"
+ "testing"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+)
+
+func toKVEntryIdentity(entry KVEntry) (KVEntry, error) {
+ return entry, nil
+}
+
+func toKVEntryError(err error) (KVEntry, error) {
+ return KVEntry{}, err
+}
+
+func checkSameKVEntry(t *testing.T, got KVEntry, expected KVEntry) {
+ if got.key != expected.key {
+ t.Errorf("got key %s, expected key %s", got.key, expected.key)
+ }
+ if got.value == nil && expected.value != nil {
+ t.Errorf("got nil value, expected string %s", *expected.value)
+ } else if got.value != nil && expected.value == nil {
+ t.Errorf("got string value %s, expected nil", *got.value)
+ } else if got.value != nil && expected.value != nil {
+ if *got.value != *expected.value {
+ t.Errorf("got string value %s, expected %s", *got.value, *expected.value)
+ }
+ }
+}
+
+func TestKVEntriesReaderNormal(t *testing.T) {
+ source := []KVEntry{NewKVEntry("foo", "bar"), NewKVEntryWithNoValue("baz")}
+ reader := kvEntriesReader[KVEntry]{entries: source, toKVEntry: toKVEntryIdentity}
+
+ got1, err := reader.Read()
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ checkSameKVEntry(t, got1, source[0])
+
+ got2, err := reader.Read()
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ checkSameKVEntry(t, got2, source[1])
+
+ _, err = reader.Read()
+ if err != io.EOF {
+ t.Errorf("expected error io.EOF, got %v", err)
+ }
+
+ _, err = reader.Read()
+ if err == nil || !strings.Contains(err.Error(), "bounds") {
+ t.Errorf("expected out of bounds error, got %v", err)
+ }
+
+ err = reader.Close()
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+
+ _, err = reader.Read()
+ if err == nil || !strings.Contains(err.Error(), "reader closed") {
+ t.Errorf("expected reader closed error, got %v", err)
+ }
+}
+
+func TestKVEntriesReaderErrors(t *testing.T) {
+ source := []error{errors.New("foo"), errors.New("bar")}
+ reader := kvEntriesReader[error]{entries: source, toKVEntry: toKVEntryError}
+
+ _, err := reader.Read()
+ if err != source[0] {
+ t.Errorf("got error %v, expected error %v", err, source[0])
+ }
+
+ // Nothing in the reader prevents reading after previous errors
+ _, err = reader.Read()
+ if err != source[1] {
+ t.Errorf("got error %v, expected error %v", err, source[1])
+ }
+
+ _, err = reader.Read()
+ if err != io.EOF {
+ t.Errorf("expected error io.EOF, got %v", err)
+ }
+}
+
+type kvEntryReaderIterator struct {
+ reader KVEntryReader
+ current KVEntry
+ err error
+}
+
+// newKVEntryReaderIterator creates an iterator over a KVEntryReader.
+// KVEntry keys and values are reported as []byte from the reader in order.
+func newKVEntryReaderIterator(reader KVEntryReader) sdk.Iterator {
+ iter := &kvEntryReaderIterator{
+ reader: reader,
+ }
+ iter.Next()
+ return iter
+}
+
+// Domain implements sdk.Iterator
+func (iter *kvEntryReaderIterator) Domain() (start []byte, end []byte) {
+ return nil, nil
+}
+
+// Valid returns whether the current iterator is valid. Once invalid, the
+// Iterator remains invalid forever.
+func (iter *kvEntryReaderIterator) Valid() bool {
+ if iter.err == io.EOF {
+ return false
+ } else if iter.err != nil {
+ panic(iter.err)
+ }
+ return true
+}
+
+// checkValid implements the validity invariants of sdk.Iterator methods.
+func (iter *kvEntryReaderIterator) checkValid() {
+ if !iter.Valid() {
+ panic("invalid iterator")
+ }
+}
+
+// Next moves the iterator to the next entry from the reader.
+// If Valid() returns false, this method will panic.
+func (iter *kvEntryReaderIterator) Next() {
+ iter.checkValid()
+
+ iter.current, iter.err = iter.reader.Read()
+}
+
+// Key returns the key at the current position. Panics if the iterator is invalid.
+// CONTRACT: key readonly []byte
+func (iter *kvEntryReaderIterator) Key() (key []byte) {
+ iter.checkValid()
+
+ return []byte(iter.current.Key())
+}
+
+// Value returns the value at the current position. Panics if the iterator is invalid.
+// CONTRACT: value readonly []byte
+func (iter *kvEntryReaderIterator) Value() (value []byte) {
+ iter.checkValid()
+
+ if !iter.current.HasValue() {
+ return nil
+ } else {
+ return []byte(iter.current.StringValue())
+ }
+}
+
+// Error returns the last error encountered by the iterator, if any.
+func (iter *kvEntryReaderIterator) Error() error {
+ err := iter.err
+ if err == io.EOF {
+ return nil
+ }
+
+ return err
+}
+
+// Close closes the iterator, releasing any allocated resources.
+func (iter *kvEntryReaderIterator) Close() error {
+ return iter.reader.Close()
+}
+
+func TestKVIteratorReader(t *testing.T) {
+ source := []KVEntry{NewKVEntry("foo", "bar"), NewKVEntryWithNoValue("baz")}
+ iterator := newKVEntryReaderIterator(&kvEntriesReader[KVEntry]{entries: source, toKVEntry: toKVEntryIdentity})
+ reader := NewKVIteratorReader(iterator)
+
+ got1, err := reader.Read()
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ checkSameKVEntry(t, got1, source[0])
+
+ got2, err := reader.Read()
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ checkSameKVEntry(t, got2, source[1])
+
+ _, err = reader.Read()
+ if err != io.EOF {
+ t.Errorf("expected error io.EOF, got %v", err)
+ }
+
+ err = reader.Close()
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+}
+
+func TestJsonlEncodeAndReadBack(t *testing.T) {
+ source := []KVEntry{NewKVEntry("foo", "bar"), NewKVEntryWithNoValue("baz")}
+ sourceReader := &kvEntriesReader[KVEntry]{entries: source, toKVEntry: toKVEntryIdentity}
+
+ var encodedKVEntries bytes.Buffer
+ err := EncodeKVEntryReaderToJsonl(sourceReader, &encodedKVEntries)
+ if err != nil {
+ t.Errorf("unexpected encode error %v", err)
+ }
+
+ jsonlReader := NewJsonlKVEntryDecoderReader(io.NopCloser(&encodedKVEntries))
+
+ got1, err := jsonlReader.Read()
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ checkSameKVEntry(t, got1, source[0])
+
+ got2, err := jsonlReader.Read()
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ checkSameKVEntry(t, got2, source[1])
+
+ _, err = jsonlReader.Read()
+ if err != io.EOF {
+ t.Errorf("expected error io.EOF, got %v", err)
+ }
+
+ err = jsonlReader.Close()
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+}
diff --git a/golang/cosmos/types/kv_entry_test.go b/golang/cosmos/types/kv_entry_test.go
new file mode 100644
index 00000000000..2a5c5b1e859
--- /dev/null
+++ b/golang/cosmos/types/kv_entry_test.go
@@ -0,0 +1,143 @@
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "strings"
+ "testing"
+)
+
+func checkEntry(t *testing.T, label string, entry KVEntry, isValidKey bool, expectedKey string, hasValue bool, expectedValue string) {
+ gotValidKey := entry.IsValidKey()
+ if gotValidKey != isValidKey {
+ t.Errorf("%s: valid key is %v, expected %v", label, gotValidKey, isValidKey)
+ }
+
+ gotKey := entry.Key()
+ if gotKey != expectedKey {
+ t.Errorf("%s: got %q, want %q", label, gotKey, expectedKey)
+ }
+
+ if entry.HasValue() {
+ if !hasValue {
+ t.Errorf("%s: expected has no value", label)
+ }
+
+ gotValue := *entry.Value()
+ if gotValue != expectedValue {
+ t.Errorf("%s: got %q, want %q", label, gotValue, expectedValue)
+ }
+ } else {
+ if hasValue {
+ t.Errorf("%s: expected has value", label)
+ }
+
+ gotValuePointer := entry.Value()
+ if gotValuePointer != nil {
+ t.Errorf("%s: got %#v, want nil", label, gotValuePointer)
+ }
+ }
+
+ gotValue := entry.StringValue()
+ if gotValue != expectedValue {
+ t.Errorf("%s: got %q, want %q", label, gotValue, expectedValue)
+ }
+}
+
+func TestKVEntry(t *testing.T) {
+ type testCase struct {
+ label string
+ entry KVEntry
+ isValidKey bool
+ expectedKey string
+ hasValue bool
+ expectedValue string
+ }
+ cases := []testCase{
+ {label: "normal", entry: NewKVEntry("foo", "bar"), isValidKey: true, expectedKey: "foo", hasValue: true, expectedValue: "bar"},
+ {label: "empty string value", entry: NewKVEntry("foo", ""), isValidKey: true, expectedKey: "foo", hasValue: true, expectedValue: ""},
+ {label: "no value", entry: NewKVEntryWithNoValue("foo"), isValidKey: true, expectedKey: "foo", hasValue: false, expectedValue: ""},
+ {label: "empty key", entry: NewKVEntryWithNoValue(""), isValidKey: false, expectedKey: "", hasValue: false, expectedValue: ""},
+ }
+ for _, desc := range cases {
+ checkEntry(t, desc.label, desc.entry, desc.isValidKey, desc.expectedKey, desc.hasValue, desc.expectedValue)
+ }
+}
+
+func TestKVEntryMarshall(t *testing.T) {
+ type testCase struct {
+ label string
+ entry KVEntry
+ expectedError error
+ expectedEncoding string
+ }
+ cases := []testCase{
+ {label: "normal", entry: NewKVEntry("foo", "bar"), expectedEncoding: `["foo","bar"]`},
+ {label: "empty string value", entry: NewKVEntry("foo", ""), expectedEncoding: `["foo",""]`},
+ {label: "no value", entry: NewKVEntryWithNoValue("foo"), expectedEncoding: `["foo"]`},
+ {label: "empty key", entry: NewKVEntryWithNoValue(""), expectedError: errors.New("cannot marshal invalid KVEntry")},
+ }
+ for _, desc := range cases {
+ marshalled, err := json.Marshal(desc.entry)
+ if desc.expectedError != nil && err == nil {
+ t.Errorf("%s: got nil error, expected marshal error: %q", desc.label, desc.expectedError.Error())
+ } else if err != nil {
+ if desc.expectedError == nil {
+ t.Errorf("%s: got error %v, expected no error", desc.label, err)
+ } else if !strings.Contains(err.Error(), desc.expectedError.Error()) {
+ t.Errorf("%s: got error %q, expected error %q", desc.label, err.Error(), desc.expectedError.Error())
+ }
+ continue
+ }
+ if string(marshalled) != desc.expectedEncoding {
+ t.Errorf("%s: got %q, want %q", desc.label, string(marshalled), desc.expectedEncoding)
+ }
+ }
+}
+
+func TestKVEntryUnmarshall(t *testing.T) {
+ type testCase struct {
+ label string
+ encoded string
+ expectedError error
+ expectedKey string
+ hasValue bool
+ expectedValue string
+ }
+ cases := []testCase{
+ {label: "normal", encoded: `["foo","bar"]`, expectedKey: "foo", hasValue: true, expectedValue: "bar"},
+ {label: "empty string value", encoded: `["foo",""]`, expectedKey: "foo", hasValue: true, expectedValue: ""},
+ {label: "no value", encoded: `["foo"]`, expectedKey: "foo", hasValue: false, expectedValue: ""},
+ {label: "null value", encoded: `["foo",null]`, expectedKey: "foo", hasValue: false, expectedValue: ""},
+ {label: "null", encoded: `null`, expectedError: errors.New("KVEntry cannot be null")},
+ {label: "string", encoded: `"foo"`, expectedError: errors.New("json")},
+ {label: "empty array", encoded: `[]`, expectedError: errors.New("KVEntry must be an array of length 1 or 2 (not 0)")},
+ {label: "[null, null] array", encoded: `[null,null]`, expectedError: errors.New("KVEntry key must be a non-empty string")},
+ {label: "invalid key array", encoded: `[42]`, expectedError: errors.New("json")},
+ {label: "empty key", encoded: `["",null]`, expectedError: errors.New("KVEntry key must be a non-empty string")},
+ {label: "too many entries array", encoded: `["foo","bar",null]`, expectedError: errors.New("KVEntry must be an array of length 1 or 2 (not 3)")},
+ {label: "invalid value array", encoded: `["foo",42]`, expectedError: errors.New("json")},
+ }
+ for _, desc := range cases {
+ unmarshalled := NewKVEntry("untouched", "untouched")
+ err := json.Unmarshal([]byte(desc.encoded), &unmarshalled)
+ if desc.expectedError != nil && err == nil {
+ t.Errorf("%s: got nil error, expected unmarshal error: %q", desc.label, desc.expectedError.Error())
+ } else if err != nil {
+ if unmarshalled.Key() != "untouched" {
+ t.Errorf("%s: expected error to not modify target key, got %s", desc.label, unmarshalled.Key())
+ }
+ if unmarshalled.StringValue() != "untouched" {
+ t.Errorf("%s: expected error to not modify target value, got %v", desc.label, unmarshalled.Value())
+ }
+ if desc.expectedError == nil {
+ t.Errorf("%s: got error %v, expected no error", desc.label, err)
+ } else if !strings.Contains(err.Error(), desc.expectedError.Error()) {
+ t.Errorf("%s: got error %q, expected error %q", desc.label, err.Error(), desc.expectedError.Error())
+ }
+ continue
+ }
+
+ checkEntry(t, desc.label, unmarshalled, true, desc.expectedKey, desc.hasValue, desc.expectedValue)
+ }
+}
diff --git a/golang/cosmos/vm/controller.go b/golang/cosmos/vm/controller.go
index bf4f317f402..acc70f3e573 100644
--- a/golang/cosmos/vm/controller.go
+++ b/golang/cosmos/vm/controller.go
@@ -8,7 +8,6 @@ import (
type ControllerContext struct {
Context sdk.Context
- StoragePort int
IBCChannelHandlerPort int
}
@@ -74,7 +73,7 @@ func UnregisterPortHandler(portNum int) error {
func ReceiveFromController(portNum int, msg string) (string, error) {
handler := portToHandler[portNum]
if handler == nil {
- return "", fmt.Errorf("Unregistered port %d", portNum)
+ return "", fmt.Errorf("unregistered port %d", portNum)
}
return handler.Receive(&controllerContext, msg)
}
diff --git a/golang/cosmos/x/lien/lien.go b/golang/cosmos/x/lien/lien.go
index a2cd60dea75..3ac507d27fb 100644
--- a/golang/cosmos/x/lien/lien.go
+++ b/golang/cosmos/x/lien/lien.go
@@ -147,7 +147,7 @@ func (ch portHandler) handleGetStaking(ctx sdk.Context, msg portMessage) (string
}
bz, err := json.Marshal(&reply)
if err != nil {
- return "", fmt.Errorf("cannot marshal %v: %w", reply, err)
+ return "", fmt.Errorf("cannot marshal %v: %s", reply, err)
}
return string(bz), nil
}
@@ -157,11 +157,11 @@ func (ch portHandler) handleGetStaking(ctx sdk.Context, msg portMessage) (string
func (ch portHandler) handleGetAccountState(ctx sdk.Context, msg portMessage) (string, error) {
addr, err := sdk.AccAddressFromBech32(msg.Address)
if err != nil {
- return "", fmt.Errorf("cannot convert %s to address: %w", msg.Address, err)
+ return "", fmt.Errorf("cannot convert %s to address: %s", msg.Address, err)
}
denom := msg.Denom
if err = sdk.ValidateDenom(denom); err != nil {
- return "", fmt.Errorf("invalid denom %s: %w", denom, err)
+ return "", fmt.Errorf("invalid denom %s: %s", denom, err)
}
state := ch.keeper.GetAccountState(ctx, addr)
reply := msgAccountState{
@@ -174,7 +174,7 @@ func (ch portHandler) handleGetAccountState(ctx sdk.Context, msg portMessage) (s
}
bz, err := json.Marshal(&reply)
if err != nil {
- return "", fmt.Errorf("cannot marshal %v: %w", reply, err)
+ return "", fmt.Errorf("cannot marshal %v: %s", reply, err)
}
return string(bz), nil
}
@@ -184,11 +184,11 @@ func (ch portHandler) handleGetAccountState(ctx sdk.Context, msg portMessage) (s
func (ch portHandler) handleChangeLiened(ctx sdk.Context, msg portMessage) (string, error) {
addr, err := sdk.AccAddressFromBech32(msg.Address)
if err != nil {
- return "", fmt.Errorf("cannot convert %s to address: %w", msg.Address, err)
+ return "", fmt.Errorf("cannot convert %s to address: %s", msg.Address, err)
}
denom := msg.Denom
if err = sdk.ValidateDenom(denom); err != nil {
- return "", fmt.Errorf("invalid denom %s: %w", denom, err)
+ return "", fmt.Errorf("invalid denom %s: %s", denom, err)
}
newAmt, err := ch.keeper.ChangeLien(ctx, addr, denom, msg.Delta)
@@ -197,7 +197,7 @@ func (ch portHandler) handleChangeLiened(ctx sdk.Context, msg portMessage) (stri
}
bz, err := json.Marshal(&newAmt)
if err != nil {
- return "", fmt.Errorf("cannot marshal %v: %w", newAmt, err)
+ return "", fmt.Errorf("cannot marshal %v: %s", newAmt, err)
}
return string(bz), nil
}
diff --git a/golang/cosmos/x/lien/module.go b/golang/cosmos/x/lien/module.go
index d2eceea250c..b8108a896a1 100644
--- a/golang/cosmos/x/lien/module.go
+++ b/golang/cosmos/x/lien/module.go
@@ -45,7 +45,7 @@ func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage {
func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, config client.TxEncodingConfig, data json.RawMessage) error {
var genesisState types.GenesisState
if err := cdc.UnmarshalJSON(data, &genesisState); err != nil {
- return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err)
+ return fmt.Errorf("failed to unmarshal %s genesis state: %s", types.ModuleName, err)
}
return ValidateGenesis(genesisState)
}
diff --git a/golang/cosmos/x/swingset/abci.go b/golang/cosmos/x/swingset/abci.go
index 96f00a3011f..567975c995f 100644
--- a/golang/cosmos/x/swingset/abci.go
+++ b/golang/cosmos/x/swingset/abci.go
@@ -9,13 +9,11 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
abci "github.com/tendermint/tendermint/abci/types"
- "github.com/Agoric/agoric-sdk/golang/cosmos/vm"
"github.com/Agoric/agoric-sdk/golang/cosmos/x/swingset/types"
)
type beginBlockAction struct {
Type string `json:"type"`
- StoragePort int `json:"storagePort"`
BlockHeight int64 `json:"blockHeight"`
BlockTime int64 `json:"blockTime"`
ChainID string `json:"chainID"`
@@ -39,7 +37,6 @@ func BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock, keeper Keeper) erro
action := &beginBlockAction{
Type: "BEGIN_BLOCK",
- StoragePort: vm.GetPort("vstorage"),
BlockHeight: ctx.BlockHeight(),
BlockTime: ctx.BlockTime().Unix(),
ChainID: ctx.ChainID(),
@@ -115,7 +112,7 @@ func AfterCommitBlock(keeper Keeper) error {
// fmt.Fprintf(os.Stderr, "AFTER_COMMIT_BLOCK Returned from SwingSet: %s, %v\n", out, err)
if err != nil {
// Panic here, in the hopes that a replay from scratch will fix the problem.
- panic(fmt.Errorf("AFTER_COMMIT_BLOCK failed: %w. Swingset is in an irrecoverable inconsistent state", err))
+ panic(fmt.Errorf("AFTER_COMMIT_BLOCK failed: %s. Swingset is in an irrecoverable inconsistent state", err))
}
return err
}
diff --git a/golang/cosmos/x/swingset/alias.go b/golang/cosmos/x/swingset/alias.go
index 9c4b80c96e8..117a284a1c6 100644
--- a/golang/cosmos/x/swingset/alias.go
+++ b/golang/cosmos/x/swingset/alias.go
@@ -21,10 +21,11 @@ var (
)
type (
- Keeper = keeper.Keeper
- Snapshotter = keeper.SwingsetSnapshotter
- Egress = types.Egress
- MsgDeliverInbound = types.MsgDeliverInbound
- MsgProvision = types.MsgProvision
- Params = types.Params
+ Keeper = keeper.Keeper
+ SwingStoreExportsHandler = keeper.SwingStoreExportsHandler
+ ExtensionSnapshotter = keeper.ExtensionSnapshotter
+ Egress = types.Egress
+ MsgDeliverInbound = types.MsgDeliverInbound
+ MsgProvision = types.MsgProvision
+ Params = types.Params
)
diff --git a/golang/cosmos/x/swingset/genesis.go b/golang/cosmos/x/swingset/genesis.go
index 8205d4836ba..a738e1497c7 100644
--- a/golang/cosmos/x/swingset/genesis.go
+++ b/golang/cosmos/x/swingset/genesis.go
@@ -3,18 +3,13 @@ package swingset
import (
// "os"
"fmt"
- stdlog "log"
- "github.com/Agoric/agoric-sdk/golang/cosmos/vm"
+ agoric "github.com/Agoric/agoric-sdk/golang/cosmos/types"
+ "github.com/Agoric/agoric-sdk/golang/cosmos/x/swingset/keeper"
"github.com/Agoric/agoric-sdk/golang/cosmos/x/swingset/types"
sdk "github.com/cosmos/cosmos-sdk/types"
- abci "github.com/tendermint/tendermint/abci/types"
)
-func NewGenesisState() *types.GenesisState {
- return &types.GenesisState{}
-}
-
func ValidateGenesis(data *types.GenesisState) error {
if data == nil {
return fmt.Errorf("swingset genesis data cannot be nil")
@@ -27,49 +22,120 @@ func ValidateGenesis(data *types.GenesisState) error {
func DefaultGenesisState() *types.GenesisState {
return &types.GenesisState{
- Params: types.DefaultParams(),
+ Params: types.DefaultParams(),
+ State: types.State{},
+ SwingStoreExportData: []*types.SwingStoreExportDataEntry{},
}
}
-type bootstrapBlockAction struct {
- Type string `json:"type"`
- BlockTime int64 `json:"blockTime"`
- StoragePort int `json:"storagePort"`
-}
+// InitGenesis initializes the (Cosmos-side) SwingSet state from the GenesisState.
+// Returns whether the app should send a bootstrap action to the controller.
+func InitGenesis(ctx sdk.Context, k Keeper, swingStoreExportsHandler *SwingStoreExportsHandler, swingStoreExportDir string, data *types.GenesisState) bool {
+ k.SetParams(ctx, data.GetParams())
+ k.SetState(ctx, data.GetState())
-func BootSwingset(ctx sdk.Context, keeper Keeper) error {
- // Just run the SwingSet kernel to finish bootstrap and get ready to open for
- // business.
- action := &bootstrapBlockAction{
- Type: "BOOTSTRAP_BLOCK",
- BlockTime: ctx.BlockTime().Unix(),
- StoragePort: vm.GetPort("vstorage"),
+ swingStoreExportData := data.GetSwingStoreExportData()
+ if len(swingStoreExportData) == 0 {
+ return true
}
- _, err := keeper.BlockingSend(ctx, action)
- return err
-}
+ artifactProvider, err := keeper.OpenSwingStoreExportDirectory(swingStoreExportDir)
+ if err != nil {
+ panic(err)
+ }
+
+ swingStore := k.GetSwingStore(ctx)
+
+ for _, entry := range swingStoreExportData {
+ swingStore.Set([]byte(entry.Key), []byte(entry.Value))
+ }
+
+ snapshotHeight := uint64(ctx.BlockHeight())
-func InitGenesis(ctx sdk.Context, keeper Keeper, data *types.GenesisState) []abci.ValidatorUpdate {
- keeper.SetParams(ctx, data.GetParams())
- keeper.SetState(ctx, data.GetState())
+ getExportDataReader := func() (agoric.KVEntryReader, error) {
+ exportDataIterator := swingStore.Iterator(nil, nil)
+ return agoric.NewKVIteratorReader(exportDataIterator), nil
+ }
- stdlog.Println("Running SwingSet until bootstrap is ready")
- err := BootSwingset(ctx, keeper)
-
- // fmt.Fprintf(os.Stderr, "BOOTSTRAP_BLOCK Returned from swingset: %s, %v\n", out, err)
+ err = swingStoreExportsHandler.RestoreExport(
+ keeper.SwingStoreExportProvider{
+ BlockHeight: snapshotHeight,
+ GetExportDataReader: getExportDataReader,
+ ReadNextArtifact: artifactProvider.ReadNextArtifact,
+ },
+ keeper.SwingStoreRestoreOptions{
+ ArtifactMode: keeper.SwingStoreArtifactModeReplay,
+ ExportDataMode: keeper.SwingStoreExportDataModeAll,
+ },
+ )
if err != nil {
- // NOTE: A failed BOOTSTRAP_BLOCK means that the SwingSet state is inconsistent.
- // Panic here, in the hopes that a replay from scratch will fix the problem.
panic(err)
}
- return []abci.ValidatorUpdate{}
+ return false
}
-func ExportGenesis(ctx sdk.Context, k Keeper) *types.GenesisState {
- gs := NewGenesisState()
- gs.Params = k.GetParams(ctx)
- gs.State = k.GetState(ctx)
+func ExportGenesis(ctx sdk.Context, k Keeper, swingStoreExportsHandler *SwingStoreExportsHandler, swingStoreExportDir string) *types.GenesisState {
+ gs := &types.GenesisState{
+ Params: k.GetParams(ctx),
+ State: k.GetState(ctx),
+ SwingStoreExportData: []*types.SwingStoreExportDataEntry{},
+ }
+
+ exportDataIterator := k.GetSwingStore(ctx).Iterator(nil, nil)
+ defer exportDataIterator.Close()
+ for ; exportDataIterator.Valid(); exportDataIterator.Next() {
+ entry := types.SwingStoreExportDataEntry{
+ Key: string(exportDataIterator.Key()),
+ Value: string(exportDataIterator.Value()),
+ }
+ gs.SwingStoreExportData = append(gs.SwingStoreExportData, &entry)
+ }
+
+ snapshotHeight := uint64(ctx.BlockHeight())
+
+ err := swingStoreExportsHandler.InitiateExport(
+ // The export will fail if the export of a historical height was requested
+ snapshotHeight,
+ swingStoreGenesisEventHandler{exportDir: swingStoreExportDir, snapshotHeight: snapshotHeight},
+ // The export will fail if the swing-store does not contain all replay artifacts
+ keeper.SwingStoreExportOptions{
+ ArtifactMode: keeper.SwingStoreArtifactModeReplay,
+ ExportDataMode: keeper.SwingStoreExportDataModeSkip,
+ },
+ )
+ if err != nil {
+ panic(err)
+ }
+
+ err = keeper.WaitUntilSwingStoreExportDone()
+ if err != nil {
+ panic(err)
+ }
+
return gs
}
+
+type swingStoreGenesisEventHandler struct {
+ exportDir string
+ snapshotHeight uint64
+}
+
+func (eventHandler swingStoreGenesisEventHandler) OnExportStarted(height uint64, retrieveSwingStoreExport func() error) error {
+ return retrieveSwingStoreExport()
+}
+
+func (eventHandler swingStoreGenesisEventHandler) OnExportRetrieved(provider keeper.SwingStoreExportProvider) error {
+ if eventHandler.snapshotHeight != provider.BlockHeight {
+ return fmt.Errorf("snapshot block height (%d) doesn't match requested height (%d)", provider.BlockHeight, eventHandler.snapshotHeight)
+ }
+
+ artifactsProvider := keeper.SwingStoreExportProvider{
+ GetExportDataReader: func() (agoric.KVEntryReader, error) {
+ return nil, nil
+ },
+ ReadNextArtifact: provider.ReadNextArtifact,
+ }
+
+ return keeper.WriteSwingStoreExportToDirectory(artifactsProvider, eventHandler.exportDir)
+}
diff --git a/golang/cosmos/x/swingset/keeper/extension_snapshotter.go b/golang/cosmos/x/swingset/keeper/extension_snapshotter.go
new file mode 100644
index 00000000000..0e73dc59970
--- /dev/null
+++ b/golang/cosmos/x/swingset/keeper/extension_snapshotter.go
@@ -0,0 +1,309 @@
+package keeper
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+
+ agoric "github.com/Agoric/agoric-sdk/golang/cosmos/types"
+ "github.com/Agoric/agoric-sdk/golang/cosmos/x/swingset/types"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ snapshots "github.com/cosmos/cosmos-sdk/snapshots/types"
+ "github.com/tendermint/tendermint/libs/log"
+)
+
+// This module implements a Cosmos ExtensionSnapshotter to capture and restore
+// state-sync Swingset state that is not part of the Cosmos DB.
+// See docs/architecture/state-sync.md for a sequence diagram of how this
+// module fits within the state-sync process.
+
+var _ snapshots.ExtensionSnapshotter = &ExtensionSnapshotter{}
+var _ SwingStoreExportEventHandler = &ExtensionSnapshotter{}
+
+// SnapshotFormat 1 defines all extension payloads to be SwingStoreArtifact proto messages
+const SnapshotFormat = 1
+
+// snapshotDetails describes an in-progress state-sync snapshot
+type snapshotDetails struct {
+ // blockHeight is the block height of this in-progress snapshot.
+ blockHeight uint64
+ // logger is the destination for this snapshot's log messages.
+ logger log.Logger
+ // retrieveExport is the callback provided by the SwingStoreExportsHandler to
+ // retrieve the SwingStore's export provider which allows to read the export's
+ // artifacts used to populate this state-sync extension's payloads.
+ retrieveExport func() error
+ // payloadWriter is the callback provided by the state-sync snapshot manager
+ // for an extension to write a payload into the under-construction snapshot
+ // stream. It may be called multiple times, and often is (currently once per
+ // SwingStore export artifact).
+ payloadWriter snapshots.ExtensionPayloadWriter
+}
+
+// ExtensionSnapshotter is the cosmos state-sync extension snapshotter for the
+// x/swingset module.
+// It handles the SwingSet state that is not part of the Cosmos DB. Currently
+// that state is solely composed of the SwingStore artifacts, as a copy of the
+// SwingStore "export data" is streamed into the cosmos DB during execution.
+// When performing a snapshot, the extension leverages the SwingStoreExportsHandler
+// to retrieve the needed SwingStore artifacts. When restoring a snapshot,
+// the extension combines the artifacts from the state-sync snapshot with the
+// SwingStore "export data" from the already restored cosmos DB, to produce a
+// full SwingStore export that can be imported to create a new JS swing-store DB.
+//
+// Since swing-store is not able to open its DB at historical commit points,
+// the export operation must start before new changes are committed, aka before
+// Swingset is instructed to commit the next block. For that reason the cosmos
+// snapshot operation is currently mediated by the SwingStoreExportsHandler,
+// which helps with the synchronization needed to generate consistent exports,
+// while allowing SwingSet activity to proceed for the next block. This relies
+// on the application calling WaitUntilSwingStoreExportStarted before
+// instructing SwingSet to commit a new block.
+type ExtensionSnapshotter struct {
+ isConfigured func() bool
+ // takeAppSnapshot is called by OnExportStarted when creating a snapshot
+ takeAppSnapshot func(height int64)
+ swingStoreExportsHandler *SwingStoreExportsHandler
+ getSwingStoreExportDataShadowCopyReader func(height int64) agoric.KVEntryReader
+ logger log.Logger
+ activeSnapshot *snapshotDetails
+}
+
+// NewExtensionSnapshotter creates a new swingset ExtensionSnapshotter
+func NewExtensionSnapshotter(
+ app *baseapp.BaseApp,
+ swingStoreExportsHandler *SwingStoreExportsHandler,
+ getSwingStoreExportDataShadowCopyReader func(height int64) agoric.KVEntryReader,
+) *ExtensionSnapshotter {
+ return &ExtensionSnapshotter{
+ isConfigured: func() bool { return app.SnapshotManager() != nil },
+ takeAppSnapshot: app.Snapshot,
+ logger: app.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName), "submodule", "extension snapshotter"),
+ swingStoreExportsHandler: swingStoreExportsHandler,
+ getSwingStoreExportDataShadowCopyReader: getSwingStoreExportDataShadowCopyReader,
+ activeSnapshot: nil,
+ }
+}
+
+// SnapshotName returns the name of the snapshotter, it should be unique in the manager.
+// Implements ExtensionSnapshotter
+func (snapshotter *ExtensionSnapshotter) SnapshotName() string {
+ return types.ModuleName
+}
+
+// SnapshotFormat returns the extension specific format used to encode the
+// extension payloads when creating a snapshot. It's independent of the format
+// used for the overall state-sync snapshot.
+// Implements ExtensionSnapshotter
+func (snapshotter *ExtensionSnapshotter) SnapshotFormat() uint32 {
+ return SnapshotFormat
+}
+
+// SupportedFormats returns a list of extension specific payload formats it can
+// restore from.
+// Implements ExtensionSnapshotter
+func (snapshotter *ExtensionSnapshotter) SupportedFormats() []uint32 {
+ return []uint32{SnapshotFormat}
+}
+
+// InitiateSnapshot initiates a snapshot for the given block height.
+// If a snapshot is already in progress, or if no snapshot manager is
+// configured, this will fail.
+//
+// The snapshot operation is performed in a goroutine.
+// Use WaitUntilSwingStoreExportStarted to synchronize commit boundaries.
+func (snapshotter *ExtensionSnapshotter) InitiateSnapshot(height int64) error {
+ if !snapshotter.isConfigured() {
+ return fmt.Errorf("snapshot manager not configured")
+ }
+ if height <= 0 {
+ return fmt.Errorf("block height must not be negative or 0")
+ }
+
+ blockHeight := uint64(height)
+
+ return snapshotter.swingStoreExportsHandler.InitiateExport(blockHeight, snapshotter, SwingStoreExportOptions{
+ ArtifactMode: SwingStoreArtifactModeReplay,
+ ExportDataMode: SwingStoreExportDataModeSkip,
+ })
+}
+
+// OnExportStarted performs the actual cosmos state-sync app snapshot.
+// The cosmos implementation will ultimately call SnapshotExtension, which can
+// retrieve and process the SwingStore artifacts.
+// This method is invoked by the SwingStoreExportsHandler in a goroutine
+// started by InitiateExport, only if no other SwingStore export operation is
+// already in progress.
+//
+// Implements SwingStoreExportEventHandler
+func (snapshotter *ExtensionSnapshotter) OnExportStarted(blockHeight uint64, retrieveExport func() error) error {
+ logger := snapshotter.logger.With("height", blockHeight)
+
+ if blockHeight > math.MaxInt64 {
+ return fmt.Errorf("snapshot block height %d is higher than max int64", blockHeight)
+ }
+ height := int64(blockHeight)
+
+ // We assume SwingStoreSnapshotter correctly guarded against concurrent snapshots
+ snapshotDetails := snapshotDetails{
+ blockHeight: blockHeight,
+ logger: logger,
+ retrieveExport: retrieveExport,
+ }
+ snapshotter.activeSnapshot = &snapshotDetails
+
+ snapshotter.takeAppSnapshot(height)
+
+ snapshotter.activeSnapshot = nil
+
+ // Unfortunately Cosmos BaseApp.Snapshot() does not report its errors.
+ return nil
+}
+
+// SnapshotExtension is the method invoked by cosmos to write extension payloads
+// into the underlying protobuf stream of the state-sync snapshot.
+// This method is invoked by the cosmos snapshot manager in a goroutine it
+// started during the call to OnExportStarted. However the snapshot manager
+// fully synchronizes its goroutine with the goroutine started by the
+// SwingStoreSnapshotter, making it safe to invoke callbacks of the
+// SwingStoreSnapshotter. SnapshotExtension actually delegates writing
+// extension payloads to OnExportRetrieved.
+//
+// Implements ExtensionSnapshotter
+func (snapshotter *ExtensionSnapshotter) SnapshotExtension(blockHeight uint64, payloadWriter snapshots.ExtensionPayloadWriter) error {
+ logError := func(err error) error {
+ // The cosmos layers do a poor job of reporting errors, however
+ // SwingStoreExportsHandler arranges to report retrieve errors swallowed by
+ // takeAppSnapshot, so we manually report unexpected errors.
+ snapshotter.logger.Error("swingset snapshot extension failed", "err", err)
+ return err
+ }
+
+ snapshotDetails := snapshotter.activeSnapshot
+ if snapshotDetails == nil {
+ // shouldn't happen, but return an error if it does
+ return logError(errors.New("no active swingset snapshot"))
+ }
+
+ if snapshotDetails.blockHeight != blockHeight {
+ return logError(fmt.Errorf("swingset extension snapshot requested for unexpected height %d (expected %d)", blockHeight, snapshotDetails.blockHeight))
+ }
+
+ snapshotDetails.payloadWriter = payloadWriter
+
+ return snapshotDetails.retrieveExport()
+}
+
+// OnExportRetrieved handles the SwingStore export retrieved by the SwingStoreExportsHandler
+// and writes it out to the SnapshotExtension's payloadWriter.
+// This operation is invoked by the SwingStoreExportsHandler in the snapshot
+// manager goroutine synchronized with SwingStoreExportsHandler's own goroutine.
+//
+// Implements SwingStoreExportEventHandler
+func (snapshotter *ExtensionSnapshotter) OnExportRetrieved(provider SwingStoreExportProvider) error {
+ snapshotDetails := snapshotter.activeSnapshot
+ if snapshotDetails == nil || snapshotDetails.payloadWriter == nil {
+ // shouldn't happen, but return an error if it does
+ return errors.New("no active swingset snapshot")
+ }
+
+ if snapshotDetails.blockHeight != provider.BlockHeight {
+ return fmt.Errorf("SwingStore export received for unexpected block height %d (app snapshot height is %d)", provider.BlockHeight, snapshotDetails.blockHeight)
+ }
+
+ writeArtifactToPayload := func(artifact types.SwingStoreArtifact) error {
+ payloadBytes, err := artifact.Marshal()
+ if err != nil {
+ return err
+ }
+
+ err = snapshotDetails.payloadWriter(payloadBytes)
+ if err != nil {
+ return err
+ }
+
+ return nil
+ }
+
+ for {
+ artifact, err := provider.ReadNextArtifact()
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return err
+ }
+
+ err = writeArtifactToPayload(artifact)
+ if err != nil {
+ return err
+ }
+ }
+
+ exportDataReader, err := provider.GetExportDataReader()
+ if err != nil {
+ return err
+ }
+ if exportDataReader == nil {
+ return nil
+ }
+ defer exportDataReader.Close()
+
+ // For debugging, write out any retrieved export data as a single untrusted artifact
+ // which has the same encoding as the internal SwingStore export data representation:
+ // a sequence of [key, value] JSON arrays each terminated by a new line.
+ exportDataArtifact := types.SwingStoreArtifact{Name: UntrustedExportDataArtifactName}
+
+ var encodedExportData bytes.Buffer
+ err = agoric.EncodeKVEntryReaderToJsonl(exportDataReader, &encodedExportData)
+ if err != nil {
+ return err
+ }
+ exportDataArtifact.Data = encodedExportData.Bytes()
+
+ err = writeArtifactToPayload(exportDataArtifact)
+ encodedExportData.Reset()
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// RestoreExtension restores an extension state snapshot,
+// the payload reader returns io.EOF when it reaches the extension boundaries.
+// Implements ExtensionSnapshotter
+func (snapshotter *ExtensionSnapshotter) RestoreExtension(blockHeight uint64, format uint32, payloadReader snapshots.ExtensionPayloadReader) error {
+ if format != SnapshotFormat {
+ return snapshots.ErrUnknownFormat
+ }
+
+ if blockHeight > math.MaxInt64 {
+ return fmt.Errorf("snapshot block height %d is higher than max int64", blockHeight)
+ }
+ height := int64(blockHeight)
+
+ // Retrieve the SwingStore "ExportData" from the verified vstorage data.
+ // At this point the content of the cosmos DB has been verified against the
+ // AppHash, which means the SwingStore data it contains can be used as the
+ // trusted root against which to validate the artifacts.
+ getExportDataReader := func() (agoric.KVEntryReader, error) {
+ exportDataReader := snapshotter.getSwingStoreExportDataShadowCopyReader(height)
+ return exportDataReader, nil
+ }
+
+ readNextArtifact := func() (artifact types.SwingStoreArtifact, err error) {
+ payloadBytes, err := payloadReader()
+ if err != nil {
+ return artifact, err
+ }
+
+ err = artifact.Unmarshal(payloadBytes)
+ return artifact, err
+ }
+
+ return snapshotter.swingStoreExportsHandler.RestoreExport(
+ SwingStoreExportProvider{BlockHeight: blockHeight, GetExportDataReader: getExportDataReader, ReadNextArtifact: readNextArtifact},
+ SwingStoreRestoreOptions{ArtifactMode: SwingStoreArtifactModeReplay, ExportDataMode: SwingStoreExportDataModeAll},
+ )
+}
diff --git a/golang/cosmos/x/swingset/keeper/extension_snapshotter_test.go b/golang/cosmos/x/swingset/keeper/extension_snapshotter_test.go
new file mode 100644
index 00000000000..2f20b1662f1
--- /dev/null
+++ b/golang/cosmos/x/swingset/keeper/extension_snapshotter_test.go
@@ -0,0 +1,104 @@
+package keeper
+
+import (
+ "io"
+ "testing"
+
+ "github.com/tendermint/tendermint/libs/log"
+)
+
+func newTestExtensionSnapshotter() *ExtensionSnapshotter {
+ logger := log.NewNopLogger() // log.NewTMLogger(log.NewSyncWriter( /* os.Stdout*/ io.Discard)).With("module", "sdk/app")
+ return &ExtensionSnapshotter{
+ isConfigured: func() bool { return true },
+ logger: logger,
+ swingStoreExportsHandler: newTestSwingStoreExportsHandler(),
+ }
+}
+
+func TestExtensionSnapshotterInProgress(t *testing.T) {
+ extensionSnapshotter := newTestExtensionSnapshotter()
+ ch := make(chan struct{})
+ extensionSnapshotter.takeAppSnapshot = func(height int64) {
+ <-ch
+ }
+ err := extensionSnapshotter.InitiateSnapshot(123)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = WaitUntilSwingStoreExportStarted()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = extensionSnapshotter.InitiateSnapshot(456)
+ if err == nil {
+ t.Error("wanted error for snapshot in progress")
+ }
+
+ err = extensionSnapshotter.RestoreExtension(
+ 456, SnapshotFormat,
+ func() ([]byte, error) {
+ return nil, io.EOF
+ })
+ if err == nil {
+ t.Error("wanted error for snapshot in progress")
+ }
+
+ close(ch)
+ err = WaitUntilSwingStoreExportDone()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = extensionSnapshotter.InitiateSnapshot(456)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = WaitUntilSwingStoreExportDone()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestExtensionSnapshotterNotConfigured(t *testing.T) {
+ extensionSnapshotter := newTestExtensionSnapshotter()
+ extensionSnapshotter.isConfigured = func() bool { return false }
+ err := extensionSnapshotter.InitiateSnapshot(123)
+ if err == nil {
+ t.Error("wanted error for unconfigured snapshot manager")
+ }
+}
+
+func TestExtensionSnapshotterSecondCommit(t *testing.T) {
+ extensionSnapshotter := newTestExtensionSnapshotter()
+
+ // Use a channel to block the snapshot goroutine after it has started but before it exits.
+ ch := make(chan struct{})
+ extensionSnapshotter.takeAppSnapshot = func(height int64) {
+ <-ch
+ }
+
+ // First run through app.Commit()
+ err := WaitUntilSwingStoreExportStarted()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = extensionSnapshotter.InitiateSnapshot(123)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Second run through app.Commit() - should return right away
+ err = WaitUntilSwingStoreExportStarted()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // close the signaling channel to let goroutine exit
+ close(ch)
+ err = WaitUntilSwingStoreExportDone()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/golang/cosmos/x/swingset/keeper/keeper.go b/golang/cosmos/x/swingset/keeper/keeper.go
index 2640e7176e0..29139e8e5fd 100644
--- a/golang/cosmos/x/swingset/keeper/keeper.go
+++ b/golang/cosmos/x/swingset/keeper/keeper.go
@@ -11,15 +11,16 @@ import (
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/store/prefix"
sdk "github.com/cosmos/cosmos-sdk/types"
bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
"github.com/Agoric/agoric-sdk/golang/cosmos/ante"
+ agoric "github.com/Agoric/agoric-sdk/golang/cosmos/types"
"github.com/Agoric/agoric-sdk/golang/cosmos/vm"
"github.com/Agoric/agoric-sdk/golang/cosmos/x/swingset/types"
vstoragekeeper "github.com/Agoric/agoric-sdk/golang/cosmos/x/vstorage/keeper"
- vstoragetypes "github.com/Agoric/agoric-sdk/golang/cosmos/x/vstorage/types"
)
// Top-level paths for chain storage should remain synchronized with
@@ -36,7 +37,10 @@ const (
StoragePathSwingStore = "swingStore"
)
-const stateKey string = "state"
+const (
+ stateKey = "state"
+ swingStoreKeyPrefix = "swingStore."
+)
// Contextual information about the message source of an action on an inbound queue.
// This context should be unique per inboundQueueRecord.
@@ -261,7 +265,7 @@ func getBeansOwingPathForAddress(addr sdk.AccAddress) string {
func (k Keeper) GetBeansOwing(ctx sdk.Context, addr sdk.AccAddress) sdk.Uint {
path := getBeansOwingPathForAddress(addr)
entry := k.vstorageKeeper.GetEntry(ctx, path)
- if !entry.HasData() {
+ if !entry.HasValue() {
return sdk.ZeroUint()
}
return sdk.NewUintFromString(entry.StringValue())
@@ -271,7 +275,7 @@ func (k Keeper) GetBeansOwing(ctx sdk.Context, addr sdk.AccAddress) sdk.Uint {
// feeCollector but has not yet paid.
func (k Keeper) SetBeansOwing(ctx sdk.Context, addr sdk.AccAddress, beans sdk.Uint) {
path := getBeansOwingPathForAddress(addr)
- k.vstorageKeeper.SetStorage(ctx, vstoragetypes.NewStorageEntry(path, beans.String()))
+ k.vstorageKeeper.SetStorage(ctx, agoric.NewKVEntry(path, beans.String()))
}
// ChargeBeans charges the given address the given number of beans. It divides
@@ -375,7 +379,7 @@ func (k Keeper) ChargeForProvisioning(ctx sdk.Context, submitter, addr sdk.AccAd
func (k Keeper) GetEgress(ctx sdk.Context, addr sdk.AccAddress) types.Egress {
path := StoragePathEgress + "." + addr.String()
entry := k.vstorageKeeper.GetEntry(ctx, path)
- if !entry.HasData() {
+ if !entry.HasValue() {
return types.Egress{}
}
@@ -398,7 +402,7 @@ func (k Keeper) SetEgress(ctx sdk.Context, egress *types.Egress) error {
}
// FIXME: We should use just SetStorageAndNotify here, but solo needs legacy for now.
- k.vstorageKeeper.LegacySetStorageAndNotify(ctx, vstoragetypes.NewStorageEntry(path, string(bz)))
+ k.vstorageKeeper.LegacySetStorageAndNotify(ctx, agoric.NewKVEntry(path, string(bz)))
// Now make sure the corresponding account has been initialised.
if acc := k.accountKeeper.GetAccount(ctx, egress.Peer); acc != nil {
@@ -431,11 +435,12 @@ func (k Keeper) GetMailbox(ctx sdk.Context, peer string) string {
func (k Keeper) SetMailbox(ctx sdk.Context, peer string, mailbox string) {
path := StoragePathMailbox + "." + peer
// FIXME: We should use just SetStorageAndNotify here, but solo needs legacy for now.
- k.vstorageKeeper.LegacySetStorageAndNotify(ctx, vstoragetypes.NewStorageEntry(path, mailbox))
+ k.vstorageKeeper.LegacySetStorageAndNotify(ctx, agoric.NewKVEntry(path, mailbox))
}
-func (k Keeper) ExportSwingStore(ctx sdk.Context) []*vstoragetypes.DataEntry {
- return k.vstorageKeeper.ExportStorageFromPrefix(ctx, StoragePathSwingStore)
+func (k Keeper) GetSwingStore(ctx sdk.Context) sdk.KVStore {
+ store := ctx.KVStore(k.storeKey)
+ return prefix.NewStore(store, []byte(swingStoreKeyPrefix))
}
func (k Keeper) PathToEncodedKey(path string) []byte {
diff --git a/golang/cosmos/x/swingset/keeper/keeper_test.go b/golang/cosmos/x/swingset/keeper/keeper_test.go
index 15b2fc21775..643b8dd0b79 100644
--- a/golang/cosmos/x/swingset/keeper/keeper_test.go
+++ b/golang/cosmos/x/swingset/keeper/keeper_test.go
@@ -1,10 +1,17 @@
package keeper
import (
+ "fmt"
+ "reflect"
"testing"
"github.com/Agoric/agoric-sdk/golang/cosmos/x/swingset/types"
+ "github.com/cosmos/cosmos-sdk/store"
+ prefixstore "github.com/cosmos/cosmos-sdk/store/prefix"
+ storetypes "github.com/cosmos/cosmos-sdk/store/types"
sdk "github.com/cosmos/cosmos-sdk/types"
+
+ dbm "github.com/tendermint/tm-db"
)
func mkcoin(denom string) func(amt int64) sdk.Coin {
@@ -181,3 +188,73 @@ func Test_calculateFees(t *testing.T) {
})
}
}
+
+var (
+ swingsetStoreKey = storetypes.NewKVStoreKey(types.StoreKey)
+)
+
+func makeTestStore() sdk.KVStore {
+ db := dbm.NewMemDB()
+ ms := store.NewCommitMultiStore(db)
+ ms.MountStoreWithDB(swingsetStoreKey, sdk.StoreTypeIAVL, db)
+ err := ms.LoadLatestVersion()
+ if err != nil {
+ panic(err)
+ }
+ kvStore := ms.GetKVStore(swingsetStoreKey)
+ prefixStore := prefixstore.NewStore(kvStore, []byte("swingStore."))
+ return prefixStore
+}
+
+func TestSwingStore(t *testing.T) {
+ store := makeTestStore()
+
+ // Test that we can store and retrieve a value.
+ store.Set([]byte("someKey"), []byte("someValue"))
+ if got := string(store.Get([]byte("someKey"))); got != "someValue" {
+ t.Errorf("got %q, want %q", got, "someValue")
+ }
+
+ // Test that we can update and retrieve an updated value.
+ store.Set([]byte("someKey"), []byte("someNewValue"))
+ if got := string(store.Get([]byte("someKey"))); got != "someNewValue" {
+ t.Errorf("got %q, want %q", got, "someNewValue")
+ }
+
+ // Test that we can store and retrieve empty values
+ store.Set([]byte("someEmptyKey"), []byte(""))
+ if got := store.Get([]byte("someEmptyKey")); got == nil || string(got) != "" {
+ t.Errorf("got %#v, want empty string", got)
+ }
+
+ // Test that we can store and delete values.
+ store.Set([]byte("someOtherKey"), []byte("someOtherValue"))
+ store.Delete([]byte("someOtherKey"))
+ if store.Has([]byte("someOtherKey")) {
+ t.Errorf("has value, expected not")
+ }
+
+ // Test that we can delete non existing keys (e.g. delete twice)
+ store.Delete([]byte("someMissingKey"))
+
+ // Check the iterated values
+ expectedEntries := [][2]string{
+ {"someEmptyKey", "[]byte{}"},
+ {"someKey", "[]byte{0x73, 0x6f, 0x6d, 0x65, 0x4e, 0x65, 0x77, 0x56, 0x61, 0x6c, 0x75, 0x65}"},
+ }
+
+ iter := store.Iterator(nil, nil)
+ gotEntries := [][2]string{}
+ for ; iter.Valid(); iter.Next() {
+ entry := [2]string{
+ string(iter.Key()),
+ fmt.Sprintf("%#v", iter.Value()),
+ }
+ gotEntries = append(gotEntries, entry)
+ }
+ iter.Close()
+
+ if !reflect.DeepEqual(gotEntries, expectedEntries) {
+ t.Errorf("got export %q, want %q", gotEntries, expectedEntries)
+ }
+}
diff --git a/golang/cosmos/x/swingset/keeper/msg_server.go b/golang/cosmos/x/swingset/keeper/msg_server.go
index 65f3d108f05..0d959ce23cc 100644
--- a/golang/cosmos/x/swingset/keeper/msg_server.go
+++ b/golang/cosmos/x/swingset/keeper/msg_server.go
@@ -25,7 +25,6 @@ type deliverInboundAction struct {
Peer string `json:"peer"`
Messages [][]interface{} `json:"messages"`
Ack uint64 `json:"ack"`
- StoragePort int `json:"storagePort"`
BlockHeight int64 `json:"blockHeight"`
BlockTime int64 `json:"blockTime"`
}
@@ -58,7 +57,6 @@ func (keeper msgServer) DeliverInbound(goCtx context.Context, msg *types.MsgDeli
Peer: msg.Submitter.String(),
Messages: messages,
Ack: msg.Ack,
- StoragePort: vm.GetPort("vstorage"),
BlockHeight: ctx.BlockHeight(),
BlockTime: ctx.BlockTime().Unix(),
}
diff --git a/golang/cosmos/x/swingset/keeper/querier.go b/golang/cosmos/x/swingset/keeper/querier.go
index ca678950371..3195b40885b 100644
--- a/golang/cosmos/x/swingset/keeper/querier.go
+++ b/golang/cosmos/x/swingset/keeper/querier.go
@@ -80,7 +80,7 @@ func queryMailbox(ctx sdk.Context, path []string, req abci.RequestQuery, keeper
// nolint: unparam
func legacyQueryStorage(ctx sdk.Context, path string, req abci.RequestQuery, keeper Keeper, legacyQuerierCdc *codec.LegacyAmino) (res []byte, err error) {
entry := keeper.vstorageKeeper.GetEntry(ctx, path)
- if !entry.HasData() {
+ if !entry.HasValue() {
return []byte{}, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "could not get swingset %+v", path)
}
diff --git a/golang/cosmos/x/swingset/keeper/snapshotter.go b/golang/cosmos/x/swingset/keeper/snapshotter.go
deleted file mode 100644
index 2cba32bcf6f..00000000000
--- a/golang/cosmos/x/swingset/keeper/snapshotter.go
+++ /dev/null
@@ -1,491 +0,0 @@
-package keeper
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "os"
- "path/filepath"
- "regexp"
-
- "github.com/Agoric/agoric-sdk/golang/cosmos/vm"
- "github.com/Agoric/agoric-sdk/golang/cosmos/x/swingset/types"
- vstoragetypes "github.com/Agoric/agoric-sdk/golang/cosmos/x/vstorage/types"
- "github.com/cosmos/cosmos-sdk/baseapp"
- snapshots "github.com/cosmos/cosmos-sdk/snapshots/types"
- sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/tendermint/tendermint/libs/log"
- tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
-)
-
-var _ snapshots.ExtensionSnapshotter = &SwingsetSnapshotter{}
-
-// SnapshotFormat 1 is a proto message containing an artifact name, and the binary artifact data
-const SnapshotFormat = 1
-
-// The manifest filename must be synchronized with the JS export/import tooling
-const ExportManifestFilename = "export-manifest.json"
-const ExportDataFilename = "export-data.jsonl"
-const UntrustedExportDataArtifactName = "UNTRUSTED-EXPORT-DATA"
-const UntrustedExportDataFilename = "untrusted-export-data.jsonl"
-const ExportedFilesMode = 0644
-
-var disallowedArtifactNameChar = regexp.MustCompile(`[^-_.a-zA-Z0-9]`)
-
-// sanitizeArtifactName searches a string for all characters
-// other than ASCII alphanumerics, hyphens, underscores, and dots,
-// and replaces each of them with a hyphen.
-func sanitizeArtifactName(name string) string {
- return disallowedArtifactNameChar.ReplaceAllString(name, "-")
-}
-
-type activeSnapshot struct {
- // The block height of the snapshot in progress
- height int64
- // The logger for this snapshot
- logger log.Logger
- // Use to synchronize the commit boundary
- startedResult chan error
- // Internal flag indicating whether the cosmos driven snapshot process completed
- // Only read or written by the snapshot worker goroutine.
- retrieved bool
- // Closed when this snapshot is complete
- done chan struct{}
-}
-
-type exportManifest struct {
- BlockHeight uint64 `json:"blockHeight,omitempty"`
- // The filename of the export data
- Data string `json:"data,omitempty"`
- // The list of artifact names and their corresponding filenames
- Artifacts [][2]string `json:"artifacts"`
-}
-
-type SwingStoreExporter interface {
- ExportSwingStore(ctx sdk.Context) []*vstoragetypes.DataEntry
-}
-
-type SwingsetSnapshotter struct {
- isConfigured func() bool
- takeSnapshot func(height int64)
- newRestoreContext func(height int64) sdk.Context
- logger log.Logger
- exporter SwingStoreExporter
- blockingSend func(action vm.Jsonable) (string, error)
- // Only modified by the main goroutine.
- activeSnapshot *activeSnapshot
-}
-
-type snapshotAction struct {
- Type string `json:"type"` // COSMOS_SNAPSHOT
- BlockHeight int64 `json:"blockHeight"`
- Request string `json:"request"` // "initiate", "discard", "retrieve", or "restore"
- Args []json.RawMessage `json:"args,omitempty"`
-}
-
-func NewSwingsetSnapshotter(app *baseapp.BaseApp, exporter SwingStoreExporter, sendToController func(bool, string) (string, error)) SwingsetSnapshotter {
- // The sendToController performed by this submodule are non-deterministic.
- // This submodule will send messages to JS from goroutines at unpredictable
- // times, but this is safe because when handling the messages, the JS side
- // does not perform operations affecting consensus and ignores state changes
- // since committing the previous block.
- // Since this submodule implements block level commit synchronization, the
- // processing and results are both insensitive to sub-block timing of messages.
-
- blockingSend := func(action vm.Jsonable) (string, error) {
- bz, err := json.Marshal(action)
- if err != nil {
- return "", err
- }
- return sendToController(true, string(bz))
- }
-
- return SwingsetSnapshotter{
- isConfigured: func() bool { return app.SnapshotManager() != nil },
- takeSnapshot: app.Snapshot,
- newRestoreContext: func(height int64) sdk.Context {
- return app.NewUncachedContext(false, tmproto.Header{Height: height})
- },
- logger: app.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName), "submodule", "snapshotter"),
- exporter: exporter,
- blockingSend: blockingSend,
- activeSnapshot: nil,
- }
-}
-
-// InitiateSnapshot synchronously initiates a snapshot for the given height.
-// If a snapshot is already in progress, or if no snapshot manager is configured,
-// this will fail.
-// The snapshot operation is performed in a goroutine, and synchronized with the
-// main thread through the `WaitUntilSnapshotStarted` method.
-func (snapshotter *SwingsetSnapshotter) InitiateSnapshot(height int64) error {
- if snapshotter.activeSnapshot != nil {
- select {
- case <-snapshotter.activeSnapshot.done:
- snapshotter.activeSnapshot = nil
- default:
- return fmt.Errorf("snapshot already in progress for height %d", snapshotter.activeSnapshot.height)
- }
- }
-
- if !snapshotter.isConfigured() {
- return fmt.Errorf("snapshot manager not configured")
- }
-
- logger := snapshotter.logger.With("height", height)
-
- // Indicate that a snapshot has been initiated by setting `activeSnapshot`.
- // This structure is used to synchronize with the goroutine spawned below.
- // It's nilled-out before exiting (and is the only code that does so).
- active := &activeSnapshot{
- height: height,
- logger: logger,
- startedResult: make(chan error, 1),
- retrieved: false,
- done: make(chan struct{}),
- }
- snapshotter.activeSnapshot = active
-
- go func() {
- defer close(active.done)
-
- action := &snapshotAction{
- Type: "COSMOS_SNAPSHOT",
- BlockHeight: height,
- Request: "initiate",
- }
-
- // blockingSend for COSMOS_SNAPSHOT action is safe to call from a goroutine
- _, err := snapshotter.blockingSend(action)
-
- if err != nil {
- // First indicate a snapshot is no longer in progress if the call to
- // `WaitUntilSnapshotStarted` has't happened yet.
- // Then signal the current snapshot operation if a call to
- // `WaitUntilSnapshotStarted` was already waiting.
- active.startedResult <- err
- close(active.startedResult)
- logger.Error("failed to initiate swingset snapshot", "err", err)
- return
- }
-
- // Signal that the snapshot operation has started in the goroutine. Calls to
- // `WaitUntilSnapshotStarted` will no longer block.
- close(active.startedResult)
-
- // In production this should indirectly call SnapshotExtension().
- snapshotter.takeSnapshot(height)
-
- // Check whether the cosmos Snapshot() method successfully handled our extension
- if active.retrieved {
- return
- }
-
- logger.Error("failed to make swingset snapshot")
- action = &snapshotAction{
- Type: "COSMOS_SNAPSHOT",
- BlockHeight: height,
- Request: "discard",
- }
- _, err = snapshotter.blockingSend(action)
-
- if err != nil {
- logger.Error("failed to discard swingset snapshot", "err", err)
- }
- }()
-
- return nil
-}
-
-// WaitUntilSnapshotStarted synchronizes with a snapshot in progress, if any.
-// The JS SwingStore export must have started before a new block is committed.
-// The app must call this method before sending a commit action to SwingSet.
-//
-// Waits for a just initiated snapshot to have started in its goroutine.
-// If no snapshot is in progress (`InitiateSnapshot` hasn't been called or
-// already completed), or if we previously checked if the snapshot had started,
-// returns immediately.
-func (snapshotter *SwingsetSnapshotter) WaitUntilSnapshotStarted() error {
- activeSnapshot := snapshotter.activeSnapshot
- if activeSnapshot == nil {
- return nil
- }
- // Block until the active snapshot has started, saving the result.
- // The snapshot goroutine only produces a value in case of an error,
- // and closes the channel once the snapshot has started or failed.
- // Only the first call after a snapshot was initiated will report an error.
- startErr := <-activeSnapshot.startedResult
-
- // Check if the active snapshot is done, and if so, nil it out so future
- // calls are faster.
- select {
- case <-activeSnapshot.done:
- snapshotter.activeSnapshot = nil
- default:
- // don't wait for it to finish
- }
-
- return startErr
-}
-
-// SnapshotName returns the name of snapshotter, it should be unique in the manager.
-// Implements ExtensionSnapshotter
-func (snapshotter *SwingsetSnapshotter) SnapshotName() string {
- return types.ModuleName
-}
-
-// SnapshotFormat returns the default format the extension snapshotter uses to encode the
-// payloads when taking a snapshot.
-// It's defined within the extension, different from the global format for the whole state-sync snapshot.
-// Implements ExtensionSnapshotter
-func (snapshotter *SwingsetSnapshotter) SnapshotFormat() uint32 {
- return SnapshotFormat
-}
-
-// SupportedFormats returns a list of formats it can restore from.
-// Implements ExtensionSnapshotter
-func (snapshotter *SwingsetSnapshotter) SupportedFormats() []uint32 {
- return []uint32{SnapshotFormat}
-}
-
-// SnapshotExtension writes extension payloads into the underlying protobuf stream.
-// This operation is invoked by the snapshot manager in the goroutine started by
-// `InitiateSnapshot`.
-// Implements ExtensionSnapshotter
-func (snapshotter *SwingsetSnapshotter) SnapshotExtension(height uint64, payloadWriter snapshots.ExtensionPayloadWriter) (err error) {
- defer func() {
- // Since the cosmos layers do a poor job of reporting errors, do our own reporting
- // `err` will be set correctly regardless if it was explicitly assigned or
- // a value was provided to a `return` statement.
- // See https://go.dev/blog/defer-panic-and-recover for details
- if err != nil {
- var logger log.Logger
- if snapshotter.activeSnapshot != nil {
- logger = snapshotter.activeSnapshot.logger
- } else {
- logger = snapshotter.logger
- }
-
- logger.Error("swingset snapshot extension failed", "err", err)
- }
- }()
-
- activeSnapshot := snapshotter.activeSnapshot
- if activeSnapshot == nil {
- // shouldn't happen, but return an error if it does
- return errors.New("no active swingset snapshot")
- }
-
- if activeSnapshot.height != int64(height) {
- return fmt.Errorf("swingset snapshot requested for unexpected height %d (expected %d)", height, activeSnapshot.height)
- }
-
- action := &snapshotAction{
- Type: "COSMOS_SNAPSHOT",
- BlockHeight: activeSnapshot.height,
- Request: "retrieve",
- }
- out, err := snapshotter.blockingSend(action)
-
- if err != nil {
- return err
- }
-
- var exportDir string
- err = json.Unmarshal([]byte(out), &exportDir)
- if err != nil {
- return err
- }
-
- defer os.RemoveAll(exportDir)
-
- rawManifest, err := os.ReadFile(filepath.Join(exportDir, ExportManifestFilename))
- if err != nil {
- return err
- }
-
- var manifest exportManifest
- err = json.Unmarshal(rawManifest, &manifest)
- if err != nil {
- return err
- }
-
- if manifest.BlockHeight != height {
- return fmt.Errorf("snapshot manifest blockHeight (%d) doesn't match (%d)", manifest.BlockHeight, height)
- }
-
- writeFileToPayload := func(fileName string, artifactName string) error {
- payload := types.ExtensionSnapshotterArtifactPayload{Name: artifactName}
-
- payload.Data, err = os.ReadFile(filepath.Join(exportDir, fileName))
- if err != nil {
- return err
- }
-
- payloadBytes, err := payload.Marshal()
- if err != nil {
- return err
- }
-
- err = payloadWriter(payloadBytes)
- if err != nil {
- return err
- }
-
- return nil
- }
-
- if manifest.Data != "" {
- err = writeFileToPayload(manifest.Data, UntrustedExportDataArtifactName)
- if err != nil {
- return err
- }
- }
-
- for _, artifactInfo := range manifest.Artifacts {
- artifactName := artifactInfo[0]
- fileName := artifactInfo[1]
- if artifactName == UntrustedExportDataArtifactName {
- return fmt.Errorf("unexpected artifact name %s", artifactName)
- }
- err = writeFileToPayload(fileName, artifactName)
- if err != nil {
- return err
- }
- }
-
- activeSnapshot.retrieved = true
- activeSnapshot.logger.Info("retrieved snapshot", "exportDir", exportDir)
-
- return nil
-}
-
-// RestoreExtension restores an extension state snapshot,
-// the payload reader returns `io.EOF` when it reaches the extension boundaries.
-// Implements ExtensionSnapshotter
-func (snapshotter *SwingsetSnapshotter) RestoreExtension(height uint64, format uint32, payloadReader snapshots.ExtensionPayloadReader) error {
- if format != SnapshotFormat {
- return snapshots.ErrUnknownFormat
- }
-
- ctx := snapshotter.newRestoreContext(int64(height))
-
- exportDir, err := os.MkdirTemp("", fmt.Sprintf("agd-state-sync-restore-%d-*", height))
- if err != nil {
- return err
- }
- defer os.RemoveAll(exportDir)
-
- manifest := exportManifest{
- BlockHeight: height,
- Data: ExportDataFilename,
- }
-
- exportDataFile, err := os.OpenFile(filepath.Join(exportDir, ExportDataFilename), os.O_CREATE|os.O_WRONLY, ExportedFilesMode)
- if err != nil {
- return err
- }
- defer exportDataFile.Close()
-
- // Retrieve the SwingStore "ExportData" from the verified vstorage data.
- // At this point the content of the cosmos DB has been verified against the
- // AppHash, which means the SwingStore data it contains can be used as the
- // trusted root against which to validate the artifacts.
- swingStoreEntries := snapshotter.exporter.ExportSwingStore(ctx)
-
- if len(swingStoreEntries) > 0 {
- encoder := json.NewEncoder(exportDataFile)
- encoder.SetEscapeHTML(false)
- for _, dataEntry := range swingStoreEntries {
- entry := []string{dataEntry.Path, dataEntry.Value}
- err := encoder.Encode(entry)
- if err != nil {
- return err
- }
- }
- }
-
- writeExportFile := func(filename string, data []byte) error {
- return os.WriteFile(filepath.Join(exportDir, filename), data, ExportedFilesMode)
- }
-
- for {
- payloadBytes, err := payloadReader()
- if err == io.EOF {
- break
- } else if err != nil {
- return err
- }
-
- payload := types.ExtensionSnapshotterArtifactPayload{}
- if err = payload.Unmarshal(payloadBytes); err != nil {
- return err
- }
-
- switch {
- case payload.Name != UntrustedExportDataArtifactName:
- // Artifact verifiable on import from the export data
- // Since we cannot trust the state-sync payload at this point, we generate
- // a safe and unique filename from the artifact name we received, by
- // substituting any non letters-digits-hyphen-underscore-dot by a hyphen,
- // and prefixing with an incremented id.
- // The filename is not used for any purpose in the snapshotting logic.
- filename := sanitizeArtifactName(payload.Name)
- filename = fmt.Sprintf("%d-%s", len(manifest.Artifacts), filename)
- manifest.Artifacts = append(manifest.Artifacts, [2]string{payload.Name, filename})
- err = writeExportFile(filename, payload.Data)
-
- case len(swingStoreEntries) > 0:
- // Pseudo artifact containing untrusted export data which may have been
- // saved separately for debugging purposes (not referenced from the manifest)
- err = writeExportFile(UntrustedExportDataFilename, payload.Data)
-
- default:
- // There is no trusted export data
- err = errors.New("cannot restore from untrusted export data")
- // snapshotter.logger.Info("using untrusted export data for swingstore restore")
- // _, err = exportDataFile.Write(payload.Data)
- }
-
- if err != nil {
- return err
- }
- }
-
- err = exportDataFile.Sync()
- if err != nil {
- return err
- }
- exportDataFile.Close()
-
- manifestBytes, err := json.MarshalIndent(manifest, "", " ")
- if err != nil {
- return err
- }
- err = writeExportFile(ExportManifestFilename, manifestBytes)
- if err != nil {
- return err
- }
-
- encodedExportDir, err := json.Marshal(exportDir)
- if err != nil {
- return err
- }
-
- action := &snapshotAction{
- Type: "COSMOS_SNAPSHOT",
- BlockHeight: int64(height),
- Request: "restore",
- Args: []json.RawMessage{encodedExportDir},
- }
-
- _, err = snapshotter.blockingSend(action)
- if err != nil {
- return err
- }
-
- snapshotter.logger.Info("restored snapshot", "exportDir", exportDir, "height", height)
-
- return nil
-}
diff --git a/golang/cosmos/x/swingset/keeper/snapshotter_test.go b/golang/cosmos/x/swingset/keeper/snapshotter_test.go
deleted file mode 100644
index e1b6f36dc94..00000000000
--- a/golang/cosmos/x/swingset/keeper/snapshotter_test.go
+++ /dev/null
@@ -1,185 +0,0 @@
-package keeper
-
-import (
- "errors"
- "testing"
-
- "github.com/Agoric/agoric-sdk/golang/cosmos/vm"
- sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/tendermint/tendermint/libs/log"
-)
-
-func newTestSnapshotter() SwingsetSnapshotter {
- logger := log.NewNopLogger() // log.NewTMLogger(log.NewSyncWriter( /* os.Stdout*/ io.Discard)).With("module", "sdk/app")
- return SwingsetSnapshotter{
- isConfigured: func() bool { return true },
- takeSnapshot: func(height int64) {},
- newRestoreContext: func(height int64) sdk.Context { return sdk.Context{} },
- logger: logger,
- blockingSend: func(action vm.Jsonable) (string, error) { return "", nil },
- }
-}
-
-func TestSnapshotInProgress(t *testing.T) {
- swingsetSnapshotter := newTestSnapshotter()
- ch := make(chan struct{})
- swingsetSnapshotter.takeSnapshot = func(height int64) {
- <-ch
- }
- err := swingsetSnapshotter.InitiateSnapshot(123)
- if err != nil {
- t.Fatal(err)
- }
- err = swingsetSnapshotter.WaitUntilSnapshotStarted()
- if err != nil {
- t.Fatal(err)
- }
-
- err = swingsetSnapshotter.InitiateSnapshot(456)
- if err == nil {
- t.Error("wanted error for snapshot in progress")
- }
-
- close(ch)
- <-swingsetSnapshotter.activeSnapshot.done
- err = swingsetSnapshotter.InitiateSnapshot(456)
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestNotConfigured(t *testing.T) {
- swingsetSnapshotter := newTestSnapshotter()
- swingsetSnapshotter.isConfigured = func() bool { return false }
- err := swingsetSnapshotter.InitiateSnapshot(123)
- if err == nil {
- t.Error("wanted error for unconfigured snapshot manager")
- }
-}
-
-func TestSecondCommit(t *testing.T) {
- swingsetSnapshotter := newTestSnapshotter()
-
- // Use a channel to block the snapshot goroutine after it has started but before it exits.
- ch := make(chan struct{})
- swingsetSnapshotter.takeSnapshot = func(height int64) {
- <-ch
- }
-
- // First run through app.Commit()
- err := swingsetSnapshotter.WaitUntilSnapshotStarted()
- if err != nil {
- t.Fatal(err)
- }
- err = swingsetSnapshotter.InitiateSnapshot(123)
- if err != nil {
- t.Fatal(err)
- }
-
- // Second run through app.Commit() - should return right away
- err = swingsetSnapshotter.WaitUntilSnapshotStarted()
- if err != nil {
- t.Fatal(err)
- }
-
- // close the signaling channel to let goroutine exit
- close(ch)
- <-swingsetSnapshotter.activeSnapshot.done
-}
-
-func TestInitiateFails(t *testing.T) {
- swingsetSnapshotter := newTestSnapshotter()
- swingsetSnapshotter.blockingSend = func(action vm.Jsonable) (string, error) {
- if action.(*snapshotAction).Request == "initiate" {
- return "", errors.New("initiate failed")
- }
- return "", nil
- }
-
- err := swingsetSnapshotter.InitiateSnapshot(123)
- if err != nil {
- t.Fatal(err)
- }
- err = swingsetSnapshotter.WaitUntilSnapshotStarted()
- if err == nil {
- t.Fatal("wanted initiation error")
- }
- if err.Error() != "initiate failed" {
- t.Errorf(`wanted error "initiate failed", got "%s"`, err.Error())
- }
- // another wait should succeed without error
- err = swingsetSnapshotter.WaitUntilSnapshotStarted()
- if err != nil {
- t.Error(err)
- }
-}
-
-func TestRetrievalFails(t *testing.T) {
- swingsetSnapshotter := newTestSnapshotter()
- swingsetSnapshotter.blockingSend = func(action vm.Jsonable) (string, error) {
- if action.(*snapshotAction).Request == "retrieve" {
- return "", errors.New("retrieve failed")
- }
- return "", nil
- }
- nilWriter := func(_ []byte) error { return nil }
- var savedErr error
- ch := make(chan struct{})
- swingsetSnapshotter.takeSnapshot = func(height int64) {
- // shortcut to the snapshot manager calling the extension
- savedErr = swingsetSnapshotter.SnapshotExtension(uint64(height), nilWriter)
- close(ch)
- }
-
- err := swingsetSnapshotter.InitiateSnapshot(123)
- if err != nil {
- t.Fatal(err)
- }
- err = swingsetSnapshotter.WaitUntilSnapshotStarted()
- if err != nil {
- t.Fatal(err)
- }
-
- <-ch
- if savedErr == nil {
- t.Fatal("wanted retrieval error")
- }
- if savedErr.Error() != "retrieve failed" {
- t.Errorf(`wanted error "retrieve failed", got "%s"`, savedErr.Error())
- }
-}
-
-func TestDiscard(t *testing.T) {
- discardCalled := false
- swingsetSnapshotter := newTestSnapshotter()
- swingsetSnapshotter.blockingSend = func(action vm.Jsonable) (string, error) {
- if action.(*snapshotAction).Request == "discard" {
- discardCalled = true
- }
- return "", nil
- }
-
- // simulate a normal Snapshot() call which calls SnapshotExtension()
- swingsetSnapshotter.takeSnapshot = func(height int64) {
- swingsetSnapshotter.activeSnapshot.retrieved = true
- }
- err := swingsetSnapshotter.InitiateSnapshot(123)
- if err != nil {
- t.Fatal(err)
- }
- <-swingsetSnapshotter.activeSnapshot.done
- if discardCalled {
- t.Error("didn't want discard called")
- }
-
- // simulate a Snapshot() call which doesn't call SnapshotExtension()
- swingsetSnapshotter.takeSnapshot = func(height int64) {}
- err = swingsetSnapshotter.InitiateSnapshot(456)
- if err != nil {
- t.Fatal(err)
- }
- <-swingsetSnapshotter.activeSnapshot.done
- if !discardCalled {
- t.Error("wanted discard called")
- }
-}
diff --git a/golang/cosmos/x/swingset/keeper/swing_store_exports_handler.go b/golang/cosmos/x/swingset/keeper/swing_store_exports_handler.go
new file mode 100644
index 00000000000..2cc7e83f588
--- /dev/null
+++ b/golang/cosmos/x/swingset/keeper/swing_store_exports_handler.go
@@ -0,0 +1,857 @@
+package keeper
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "regexp"
+
+ agoric "github.com/Agoric/agoric-sdk/golang/cosmos/types"
+ "github.com/Agoric/agoric-sdk/golang/cosmos/vm"
+ "github.com/Agoric/agoric-sdk/golang/cosmos/x/swingset/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/tendermint/tendermint/libs/log"
+)
+
+// This module abstracts the generation and handling of swing-store exports,
+// including the communication with the JS side to generate and restore them.
+//
+// Its interface derives from the following requirements:
+// - Multiple golang components may perform swing-store export or import
+// operations, but the JS side does not support concurrent operations as
+// there are no legitimate use cases.
+// - Some components cannot block the main execution while performing an export
+// operation. In particular, cosmos's state-sync snapshot process cannot
+// block the logic handling tendermint events.
+// - The JS swing-store cannot access historical states. To generate
+// deterministic exports, the export operations that cannot block must be able
+// to synchronize with commit points that will change the JS swing-store.
+// - The JS swing-store export logic does however support mutation of the
+// JS swing-store state after an export operation has started. Such mutations
+// do not affect the export that is produced, and can span multiple blocks.
+// - This implies the commit synchronization is only necessary until the JS
+// side of the export operation has started.
+// - Some components, in particular state-sync, may need to perform other work
+// alongside generating a swing-store export. This work similarly cannot block
+// the main execution, but must allow for the swing-store synchronization
+// that enables generating deterministic export. For state-sync, this work
+// happens before the generated swing-store export can be consumed.
+//
+// The general approach taken is to implement a SwingStoreExportsHandler that
+// implements the communication with the JS side, enforces that no concurrent
+// operations take place, defers the consumption of the export to a provided
+// SwingStoreExportEventHandler, and provides some synchronization methods to
+// let the application enforce mutation boundaries.
+//
+// There should be a single SwingStoreExportsHandler instance, and all its method
+// calls should be performed from the same goroutine (no mutex enforcement).
+//
+// The process of generating a SwingStore export proceeds as follow:
+// - The component invokes swingStoreExportsHandler.InitiateExport with an
+// eventHandler for the export.
+// - InitiateExport verifies no other export operation is in progress and
+// starts a goroutine to perform the export operation. It requests the JS
+// side to start generating an export of the swing-store, and calls the
+// eventHandler's OnExportStarted method with a function param allowing it to
+// retrieve the export.
+// - The cosmos app will call WaitUntilSwingStoreExportStarted before
+// instructing the JS controller to commit its work, satisfying the
+// deterministic exports requirement.
+// - OnExportStarted must call the retrieve function before returning, however
+// it may perform other work before. For cosmos state-sync snapshots,
+// OnExportStarted will call app.Snapshot which will invoke the swingset
+// module's ExtensionSnapshotter that will retrieve and process the
+// swing-store export.
+// - When the retrieve function is called, it blocks until the JS export is
+// ready, then creates a SwingStoreExportProvider that abstract accessing
+// the content of the export. The eventHandler's OnExportRetrieved is called
+// with the export provider.
+// - OnExportRetrieved reads the export using the provider.
+//
+// Restoring a swing-store export does not have similar non-blocking requirements.
+// The component simply invokes swingStoreExportsHandler.RestoreExport with a
+// SwingStoreExportProvider representing the swing-store export to
+// be restored, and RestoreExport will consume it and block until the JS side
+// has completed the restore before returning.
+
+// exportManifest represents the content of the JS swing-store export manifest.
+// The export is exchanged between Cosmos and JS using the file system, and only
+// the directory containing the export is exchanged with a blockingSend. The
+// manifest is a JSON file with the agreed upon file name of
+// "export-manifest.json" in the export directory. It contains the file names
+// for the "export data" (described in the godoc for exportDataFilename), and
+// for the opaque artifacts of the export.
+type exportManifest struct {
+ // BlockHeight is the block height of the manifest.
+ BlockHeight uint64 `json:"blockHeight,omitempty"`
+ // Data is the filename of the export data.
+ Data string `json:"data,omitempty"`
+ // Artifacts is the list of [artifact name, file name] pairs.
+ Artifacts [][2]string `json:"artifacts"`
+}
+
+// ExportManifestFilename is the manifest filename which must be synchronized with the JS export/import tooling
+// See packages/cosmic-swingset/src/export-kernel-db.js and packages/cosmic-swingset/src/import-kernel-db.js
+const ExportManifestFilename = "export-manifest.json"
+
+// For restore operations, the swing-store "export data" is exchanged with the
+// JS side as a file which encodes "export data" entries as a sequence of
+// [key, value] JSON arrays each terminated by a new line.
+// NB: this is not technically jsonlines since the entries are new line
+// terminated instead of being new line separated, however the parsers in both
+// JS and golang handle such extra whitespace.
+const exportDataFilename = "export-data.jsonl"
+
+// UntrustedExportDataArtifactName is a special artifact name that the provider
+// and consumer of an export can use to indicate the presence of a synthetic
+// artifact containing untrusted "export data". This artifact must not end up in
+// the list of artifacts imported by the JS import tooling (which would fail).
+const UntrustedExportDataArtifactName = "UNTRUSTED-EXPORT-DATA"
+const untrustedExportDataFilename = "untrusted-export-data.jsonl"
+
+const exportedFilesMode = 0644
+
+// swingStoreExportActionType is the action type used for all swing-store
+// export blockingSend, and synchronized with the JS side in
+// packages/internal/src/action-types.js
+const swingStoreExportActionType = "SWING_STORE_EXPORT"
+
+// initiateRequest is the request type for initiating an export
+const initiateRequest = "initiate"
+
+type swingStoreInitiateExportAction struct {
+ Type string `json:"type"` // "SWING_STORE_EXPORT"
+ Request string `json:"request"` // "initiate"
+ BlockHeight uint64 `json:"blockHeight,omitempty"` // empty if no blockHeight requested (latest)
+ Args [1]SwingStoreExportOptions `json:"args"`
+}
+
+// retrieveRequest is the request type for retrieving an initiated export
+const retrieveRequest = "retrieve"
+
+type swingStoreRetrieveExportAction struct {
+ Type string `json:"type"` // "SWING_STORE_EXPORT"
+ Request string `json:"request"` // "retrieve"
+}
+type swingStoreRetrieveResult = string
+
+// discardRequest is the request type for discarding an initiated but an export
+// that was not retrieved
+const discardRequest = "discard"
+
+type swingStoreDiscardExportAction struct {
+ Type string `json:"type"` // "SWING_STORE_EXPORT"
+ Request string `json:"request"` // "discard"
+}
+
+// restoreRequest is the request type for restoring an export
+const restoreRequest = "restore"
+
+type swingStoreRestoreExportAction struct {
+ Type string `json:"type"` // "SWING_STORE_EXPORT"
+ Request string `json:"request"` // "restore"
+ BlockHeight uint64 `json:"blockHeight,omitempty"` // empty if deferring blockHeight to the manifest
+ Args [1]swingStoreImportOptions `json:"args"`
+}
+
+const (
+ // SwingStoreArtifactModeNone means that no artifacts are part of the
+ // export / import.
+ SwingStoreArtifactModeNone = "none"
+
+ // SwingStoreArtifactModeOperational represents the minimal set of artifacts
+ // needed to operate a node.
+ SwingStoreArtifactModeOperational = "operational"
+
+ // SwingStoreArtifactModeReplay represents the set of artifacts needed to
+ // replay the current incarnation of every vat.
+ SwingStoreArtifactModeReplay = "replay"
+
+ // SwingStoreArtifactModeArchival represents the set of all artifacts
+ // providing all available historical state.
+ SwingStoreArtifactModeArchival = "archival"
+
+ // SwingStoreArtifactModeDebug represents the maximal set of artifacts
+ // available in the JS swing-store, including any kept around for debugging
+ // purposes only (like previous XS heap snapshots)
+ SwingStoreArtifactModeDebug = "debug"
+)
+
+const (
+ // SwingStoreExportDataModeSkip indicates "export data" should be excluded from
+ // an export. ArtifactMode cannot be "none" in this case.
+ SwingStoreExportDataModeSkip = "skip"
+
+ // SwingStoreExportDataModeRepairMetadata indicates the "export data" should be
+ // used to repair the metadata of an existing swing-store for an import
+ // operation. ArtifactMode must be "none" in this case.
+ SwingStoreExportDataModeRepairMetadata = "repair-metadata"
+
+ // SwingStoreExportDataModeAll indicates "export data" should be part of the
+ // export or import. For import, ArtifactMode cannot be "none".
+ SwingStoreExportDataModeAll = "all"
+)
+
+// SwingStoreExportOptions are configurable options provided to the JS swing-store export
+type SwingStoreExportOptions struct {
+ // ArtifactMode controls the set of artifacts that should be included in the
+ // swing-store export. Any SwingStoreArtifactMode* const value can be used
+ // (None, Operational, Replay, Archival, Debug).
+ // See packages/cosmic-swingset/src/export-kernel-db.js initiateSwingStoreExport
+ ArtifactMode string `json:"artifactMode,omitempty"`
+ // ExportDataMode selects whether to include "export data" in the swing-store
+ // export or not. Use the value SwingStoreExportDataModeSkip or
+ // SwingStoreExportDataModeAll. If "skip", the reader returned by
+ // SwingStoreExportProvider's GetExportDataReader will be nil.
+ ExportDataMode string `json:"exportDataMode,omitempty"`
+}
+
+// SwingStoreRestoreOptions are configurable options provided to the JS swing-store import
+type SwingStoreRestoreOptions struct {
+ // ArtifactMode controls the set of artifacts that should be restored in
+ // swing-store. Any SwingStoreArtifactMode* const value can be used
+ // (None, Operational, Replay, Archival, Debug).
+ // See packages/cosmic-swingset/src/import-kernel-db.js performStateSyncImport
+ ArtifactMode string `json:"artifactMode,omitempty"`
+ // ExportDataMode selects the purpose of the restore, to recreate a
+ // swing-store (SwingStoreExportDataModeAll), or just to import missing
+ // metadata (SwingStoreExportDataModeRepairMetadata).
+ // If RepairMetadata, ArtifactMode should be SwingStoreArtifactModeNone.
+ // If All, ArtifactMode must be at least SwingStoreArtifactModeOperational.
+ ExportDataMode string `json:"exportDataMode,omitempty"`
+}
+
+type swingStoreImportOptions struct {
+ // ExportDir is the directory created by RestoreExport that JS swing-store
+ // should import from.
+ ExportDir string `json:"exportDir"`
+ // ArtifactMode is a copy of SwingStoreRestoreOptions.ArtifactMode
+ ArtifactMode string `json:"artifactMode,omitempty"`
+ // ExportDataMode is a copy of SwingStoreRestoreOptions.ExportDataMode
+ ExportDataMode string `json:"exportDataMode,omitempty"`
+}
+
+var disallowedArtifactNameChar = regexp.MustCompile(`[^-_.a-zA-Z0-9]`)
+
+// sanitizeArtifactName searches a string for all characters
+// other than ASCII alphanumerics, hyphens, underscores, and dots,
+// and replaces each of them with a hyphen.
+func sanitizeArtifactName(name string) string {
+ return disallowedArtifactNameChar.ReplaceAllString(name, "-")
+}
+
+type operationDetails struct {
+ // isRestore indicates whether the operation in progress is a restore.
+ // It is assigned at creation and never mutated.
+ isRestore bool
+ // blockHeight is the block height of this in-progress operation.
+ // It is assigned at creation and never mutated.
+ blockHeight uint64
+ // logger is the destination for this operation's log messages.
+ // It is assigned at creation and never mutated.
+ logger log.Logger
+ // exportStartedResult is used to synchronize the commit boundary by the
+ // component performing the export operation to ensure export determinism.
+ // unused for restore operations
+ // It is assigned at creation and never mutated. The started goroutine
+ // writes into the channel and closes it. The main goroutine reads from the
+ // channel.
+ exportStartedResult chan error
+ // exportRetrieved is an internal flag indicating whether the JS generated
+ // export was retrieved. It can be false regardless of the component's
+ // eventHandler reporting an error or not. It is only indicative of whether
+ // the component called retrieveExport, and used to control whether to send
+ // a discard request if the JS side stayed responsible for the generated but
+ // un-retrieved export.
+ // It is only read or written by the export operation's goroutine.
+ exportRetrieved bool
+ // exportDone is a channel that is closed when the active export operation
+ // is complete.
+ // It is assigned at creation and never mutated. The started goroutine
+ // writes into the channel and closes it. The main goroutine reads from the
+ // channel.
+ exportDone chan error
+}
+
+// activeOperation is a global variable reflecting a swing-store import or
+// export in progress on the JS side.
+// This variable is only assigned to through calls of the public methods of
+// SwingStoreExportsHandler, which rely on the exportDone channel getting
+// closed to nil this variable.
+// Only the calls to InitiateExport and RestoreExport set this to a non-nil
+// value. The goroutine in which these calls occur is referred to as the
+// "main goroutine". That goroutine may be different over time, but it's the
+// caller's responsibility to ensure those goroutines do not overlap calls to
+// the SwingStoreExportsHandler public methods.
+// See also the details of each field for the conditions under which they are
+// accessed.
+var activeOperation *operationDetails
+
+// WaitUntilSwingStoreExportStarted synchronizes with an export operation in
+// progress, if any.
+// The JS swing-store export must have started before a new block is committed
+// to ensure the content of the export is the one expected. The app must call
+// this method before sending a commit action to the JS controller.
+//
+// Waits for a just initiated export operation to have started in its goroutine.
+// If no operation is in progress (InitiateExport hasn't been called or
+// already completed), or if we previously checked if the operation had started,
+// returns immediately.
+//
+// Must be called by the main goroutine
+func WaitUntilSwingStoreExportStarted() error {
+ operationDetails := activeOperation
+ if operationDetails == nil {
+ return nil
+ }
+ // Block until the active operation has started, saving the result.
+ // The operation's goroutine only produces a value in case of an error,
+ // and closes the channel once the export has started or failed.
+ // Only the first call after an export was initiated will report an error.
+ startErr := <-operationDetails.exportStartedResult
+
+ // Check if the active export operation is done, and if so, nil it out so
+ // future calls are faster.
+ select {
+ case <-operationDetails.exportDone:
+ // If there was a start error, the channel is already closed at this point.
+ activeOperation = nil
+ default:
+ // don't wait for it to finish
+ // If there is no start error, the operation may take an arbitrary amount
+ // of time to terminate, likely spanning multiple blocks. However this
+ // function will only ever observe the expected activeOperation since the
+ // internal checkNotActive() called immediately on InitiateSnapshot will
+ // nil-out activeOperation if a stale value was sill sitting around.
+ }
+
+ return startErr
+}
+
+// WaitUntilSwingStoreExportDone synchronizes with the completion of an export
+// operation in progress, if any.
+// Only a single swing-store operation may execute at a time. Calling
+// InitiateExport or RestoreExport will fail if a swing-store operation is
+// already in progress. Furthermore, a component may need to know once an
+// export it initiated has completed. Once this method call returns, the
+// goroutine is guaranteed to have terminated, and the SwingStoreExportEventHandler
+// provided to InitiateExport to no longer be in use.
+//
+// Reports any error that may have occurred from InitiateExport.
+// If no export operation is in progress (InitiateExport hasn't been called or
+// already completed), or if we previously checked if an export had completed,
+// returns immediately.
+//
+// Must be called by the main goroutine
+func WaitUntilSwingStoreExportDone() error {
+ operationDetails := activeOperation
+ if operationDetails == nil {
+ return nil
+ }
+ // Block until the active export has completed.
+ // The export operation's goroutine only produces a value in case of an error,
+ // and closes the channel once the export has completed or failed.
+ // Only the first call after an export was initiated will report an error.
+ exportErr := <-operationDetails.exportDone
+ activeOperation = nil
+
+ return exportErr
+}
+
+// checkNotActive returns an error if there is an active operation.
+//
+// Always internally called by the main goroutine
+func checkNotActive() error {
+ operationDetails := activeOperation
+ if operationDetails != nil {
+ select {
+ case <-operationDetails.exportDone:
+ // nil-out any stale operation
+ activeOperation = nil
+ default:
+ if operationDetails.isRestore {
+ return fmt.Errorf("restore operation already in progress for height %d", operationDetails.blockHeight)
+ } else {
+ return fmt.Errorf("export operation already in progress for height %d", operationDetails.blockHeight)
+ }
+ }
+ }
+ return nil
+}
+
+// SwingStoreExportProvider gives access to a SwingStore "export data" and the
+// related artifacts.
+// A JS swing-store export is composed of optional "export data" (a set of
+// key/value pairs), and opaque artifacts (a name and data as bytes) that
+// complement the "export data".
+// The abstraction is similar to the JS side swing-store export abstraction,
+// but without the ability to list artifacts or random access them.
+//
+// A swing-store export for creating a state-sync snapshot will not contain any
+// "export data" since this information is reflected every block into the
+// verified cosmos DB.
+// On state-sync snapshot restore, the swingset ExtensionSnapshotter will
+// synthesize a provider for this module with "export data" sourced from the
+// restored cosmos DB, and artifacts from the extension's payloads. When
+// importing, the JS swing-store will verify that the artifacts match hashes
+// contained in the trusted "export data".
+type SwingStoreExportProvider struct {
+ // BlockHeight is the block height of the SwingStore export.
+ BlockHeight uint64
+ // GetExportDataReader returns a KVEntryReader for the "export data" of the
+ // SwingStore export, or nil if the "export data" is not part of this export.
+ GetExportDataReader func() (agoric.KVEntryReader, error)
+ // ReadNextArtifact is a function to return the next unread artifact in the SwingStore export.
+ // It errors with io.EOF upon reaching the end of the list of available artifacts.
+ ReadNextArtifact func() (types.SwingStoreArtifact, error)
+}
+
+// SwingStoreExportEventHandler is used to handle events that occur while generating
+// a swing-store export. It is provided to SwingStoreExportsHandler.InitiateExport.
+type SwingStoreExportEventHandler interface {
+ // OnExportStarted is called by InitiateExport in a goroutine after the
+ // swing-store export has successfully started.
+ // This is where the component performing the export must initiate its own
+ // off main goroutine work, which results in retrieving and processing the
+ // swing-store export.
+ //
+ // Must call the retrieveExport function before returning, which will in turn
+ // synchronously invoke OnExportRetrieved once the swing-store export is ready.
+ OnExportStarted(blockHeight uint64, retrieveExport func() error) error
+ // OnExportRetrieved is called when the swing-store export has been retrieved,
+ // during the retrieveExport invocation.
+ // The provider is not a return value to retrieveExport in order to
+ // report errors in components that are unable to propagate errors back to the
+ // OnExportStarted result, like cosmos state-sync ExtensionSnapshotter.
+ // The implementation must synchronously consume the provider, which becomes
+ // invalid after the method returns.
+ OnExportRetrieved(provider SwingStoreExportProvider) error
+}
+
+// SwingStoreExportsHandler exclusively manages the communication with the JS side
+// related to swing-store exports, ensuring insensitivity to sub-block timing,
+// and enforcing concurrency requirements.
+// The caller of this submodule must arrange block level commit synchronization,
+// to ensure the results are deterministic.
+//
+// Some blockingSend calls performed by this submodule are non-deterministic.
+// This submodule will send messages to JS from goroutines at unpredictable
+// times, but this is safe because when handling the messages, the JS side
+// does not perform operations affecting consensus and ignores state changes
+// since committing the previous block.
+// Some other blockingSend calls however do change the JS swing-store and
+// must happen before the Swingset controller on the JS side was inited, in
+// which case the mustNotBeInited parameter will be set to true.
+type SwingStoreExportsHandler struct {
+ logger log.Logger
+ blockingSend func(action vm.Jsonable, mustNotBeInited bool) (string, error)
+}
+
+// NewSwingStoreExportsHandler creates a SwingStoreExportsHandler
+func NewSwingStoreExportsHandler(logger log.Logger, blockingSend func(action vm.Jsonable, mustNotBeInited bool) (string, error)) *SwingStoreExportsHandler {
+ return &SwingStoreExportsHandler{
+ logger: logger.With("module", fmt.Sprintf("x/%s", types.ModuleName), "submodule", "SwingStoreExportsHandler"),
+ blockingSend: blockingSend,
+ }
+}
+
+// InitiateExport synchronously verifies that there is not already an export or
+// import operation in progress and initiates a new export in a goroutine,
+// via a dedicated SWING_STORE_EXPORT blockingSend action independent of other
+// block related blockingSends, calling the given eventHandler when a related
+// blockingSend completes. If the eventHandler doesn't retrieve the export,
+// then it sends another blockingSend action to discard it.
+//
+// eventHandler is invoked solely from the spawned goroutine.
+// The "started" and "done" events can be used for synchronization with an
+// active operation taking place in the goroutine, by calling respectively the
+// WaitUntilSwingStoreExportStarted and WaitUntilSwingStoreExportDone methods
+// from the goroutine that initiated the export.
+//
+// Must be called by the main goroutine
+func (exportsHandler SwingStoreExportsHandler) InitiateExport(blockHeight uint64, eventHandler SwingStoreExportEventHandler, exportOptions SwingStoreExportOptions) error {
+ err := checkNotActive()
+ if err != nil {
+ return err
+ }
+
+ var logger log.Logger
+ if blockHeight != 0 {
+ logger = exportsHandler.logger.With("height", blockHeight)
+ } else {
+ logger = exportsHandler.logger.With("height", "latest")
+ }
+
+ // Indicate that an export operation has been initiated by setting the global
+ // activeOperation var.
+ // This structure is used to synchronize with the goroutine spawned below.
+ operationDetails := &operationDetails{
+ blockHeight: blockHeight,
+ logger: logger,
+ exportStartedResult: make(chan error, 1),
+ exportRetrieved: false,
+ exportDone: make(chan error, 1),
+ }
+ activeOperation = operationDetails
+
+ go func() {
+ var err error
+ var startedErr error
+ defer func() {
+ if err == nil {
+ err = startedErr
+ }
+ if err != nil {
+ operationDetails.exportDone <- err
+ }
+ // First, indicate an export is no longer in progress. This ensures that
+ // for an operation with a start error, a call to WaitUntilSwingStoreExportStarted
+ // waiting on exportStartedResult will always find the operation has
+ // completed, and clear the active operation instead of racing if the
+ // channel close order was reversed.
+ close(operationDetails.exportDone)
+ // Then signal the current export operation that it failed to start,
+ // which will be reported to a waiting WaitUntilSwingStoreExportStarted,
+ // or the next call otherwise.
+ if startedErr != nil {
+ operationDetails.exportStartedResult <- startedErr
+ close(operationDetails.exportStartedResult)
+ }
+ }()
+
+ initiateAction := &swingStoreInitiateExportAction{
+ Type: swingStoreExportActionType,
+ BlockHeight: blockHeight,
+ Request: initiateRequest,
+ Args: [1]SwingStoreExportOptions{exportOptions},
+ }
+
+ // blockingSend for SWING_STORE_EXPORT action is safe to call from a goroutine
+ _, startedErr = exportsHandler.blockingSend(initiateAction, false)
+
+ if startedErr != nil {
+ logger.Error("failed to initiate swing-store export", "err", startedErr)
+ // The deferred function will communicate the error and close channels
+ // in the appropriate order.
+ return
+ }
+
+ // Signal that the export operation has started successfully in the goroutine.
+ // Calls to WaitUntilSwingStoreExportStarted will no longer block.
+ close(operationDetails.exportStartedResult)
+
+ // The user provided OnExportStarted function should call retrieveExport()
+ var retrieveErr error
+ err = eventHandler.OnExportStarted(blockHeight, func() error {
+ activeOperationDetails := activeOperation
+ if activeOperationDetails != operationDetails || operationDetails.exportRetrieved {
+ // shouldn't happen, but return an error if it does
+ return errors.New("export operation no longer active")
+ }
+
+ retrieveErr = exportsHandler.retrieveExport(eventHandler.OnExportRetrieved)
+
+ return retrieveErr
+ })
+
+ // Restore any retrieve error swallowed by OnExportStarted
+ if err == nil {
+ err = retrieveErr
+ }
+ if err != nil {
+ logger.Error("failed to process swing-store export", "err", err)
+ }
+
+ // Check whether the JS generated export was retrieved by eventHandler
+ if operationDetails.exportRetrieved {
+ return
+ }
+
+ // Discarding the export so invalidate retrieveExport
+ operationDetails.exportRetrieved = true
+
+ discardAction := &swingStoreDiscardExportAction{
+ Type: swingStoreExportActionType,
+ Request: discardRequest,
+ }
+ _, discardErr := exportsHandler.blockingSend(discardAction, false)
+
+ if discardErr != nil {
+ logger.Error("failed to discard swing-store export", "err", err)
+ }
+
+ if err == nil {
+ err = discardErr
+ } else if discardErr != nil {
+ // Safe to wrap error and use detailed error info since this error
+ // will not go back into swingset layers
+ err = sdkerrors.Wrapf(err, "failed to discard swing-store export after failing to process export: %+v", discardErr)
+ }
+ }()
+
+ return nil
+}
+
+// retrieveExport retrieves an initiated export then invokes onExportRetrieved
+// with the retrieved export.
+//
+// It performs a SWING_STORE_EXPORT blockingSend which on success returns a
+// string of the directory containing the JS swing-store export. It then reads
+// the export manifest generated by the JS side, and synthesizes a
+// SwingStoreExportProvider for the onExportRetrieved callback to access the
+// retrieved swing-store export.
+// The export manifest format is described by the exportManifest struct.
+//
+// After calling onExportRetrieved, the export directory and its contents are
+// deleted.
+//
+// This will block until the export is ready. Internally invoked by the
+// InitiateExport logic in the export operation's goroutine.
+func (exportsHandler SwingStoreExportsHandler) retrieveExport(onExportRetrieved func(provider SwingStoreExportProvider) error) (err error) {
+ operationDetails := activeOperation
+ if operationDetails == nil {
+ // shouldn't happen, but return an error if it does
+ return errors.New("no active swing-store export operation")
+ }
+
+ blockHeight := operationDetails.blockHeight
+
+ action := &swingStoreRetrieveExportAction{
+ Type: swingStoreExportActionType,
+ Request: retrieveRequest,
+ }
+ out, err := exportsHandler.blockingSend(action, false)
+
+ if err != nil {
+ return err
+ }
+ operationDetails.exportRetrieved = true
+
+ var exportDir swingStoreRetrieveResult
+ err = json.Unmarshal([]byte(out), &exportDir)
+ if err != nil {
+ return err
+ }
+
+ defer os.RemoveAll(exportDir)
+
+ provider, err := OpenSwingStoreExportDirectory(exportDir)
+ if err != nil {
+ return err
+ }
+
+ if blockHeight != 0 && provider.BlockHeight != blockHeight {
+ return fmt.Errorf("export manifest blockHeight (%d) doesn't match (%d)", provider.BlockHeight, blockHeight)
+ }
+
+ err = onExportRetrieved(provider)
+ if err != nil {
+ return err
+ }
+
+ operationDetails.logger.Info("retrieved swing-store export", "exportDir", exportDir)
+
+ return nil
+}
+
+// OpenSwingStoreExportDirectory creates an export provider from a swing-store
+// export saved on disk in the provided directory. It expects the export manifest
+// to be present in that directory. The provider's function will read the
+// export's data and artifacts from disk on demand. Each artifact is using a
+// dedicated file, and the export data is read from a jsonl-like file, if any.
+// The export manifest filename and overall export format is common with the JS
+// swing-store import/export logic.
+func OpenSwingStoreExportDirectory(exportDir string) (SwingStoreExportProvider, error) {
+ rawManifest, err := os.ReadFile(filepath.Join(exportDir, ExportManifestFilename))
+ if err != nil {
+ return SwingStoreExportProvider{}, err
+ }
+
+ var manifest exportManifest
+ err = json.Unmarshal(rawManifest, &manifest)
+ if err != nil {
+ return SwingStoreExportProvider{}, err
+ }
+
+ getExportDataReader := func() (agoric.KVEntryReader, error) {
+ if manifest.Data == "" {
+ return nil, nil
+ }
+
+ dataFile, err := os.Open(filepath.Join(exportDir, manifest.Data))
+ if err != nil {
+ return nil, err
+ }
+ exportDataReader := agoric.NewJsonlKVEntryDecoderReader(dataFile)
+ return exportDataReader, nil
+ }
+
+ nextArtifact := 0
+
+ readNextArtifact := func() (artifact types.SwingStoreArtifact, err error) {
+ if nextArtifact == len(manifest.Artifacts) {
+ return artifact, io.EOF
+ } else if nextArtifact > len(manifest.Artifacts) {
+ return artifact, fmt.Errorf("exceeded expected artifact count: %d > %d", nextArtifact, len(manifest.Artifacts))
+ }
+
+ artifactEntry := manifest.Artifacts[nextArtifact]
+ nextArtifact++
+
+ artifactName := artifactEntry[0]
+ fileName := artifactEntry[1]
+ if artifactName == UntrustedExportDataArtifactName {
+ return artifact, fmt.Errorf("unexpected export artifact name %s", artifactName)
+ }
+ artifact.Name = artifactName
+ artifact.Data, err = os.ReadFile(filepath.Join(exportDir, fileName))
+
+ return artifact, err
+ }
+
+ return SwingStoreExportProvider{BlockHeight: manifest.BlockHeight, GetExportDataReader: getExportDataReader, ReadNextArtifact: readNextArtifact}, nil
+}
+
+// RestoreExport restores the JS swing-store using previously exported data and artifacts.
+//
+// Must be called by the main goroutine
+func (exportsHandler SwingStoreExportsHandler) RestoreExport(provider SwingStoreExportProvider, restoreOptions SwingStoreRestoreOptions) error {
+ err := checkNotActive()
+ if err != nil {
+ return err
+ }
+
+ blockHeight := provider.BlockHeight
+
+ // We technically don't need to create an active operation here since both
+ // InitiateExport and RestoreExport should only be called from the main
+ // goroutine, but it doesn't cost much to add in case things go wrong.
+ operationDetails := &operationDetails{
+ isRestore: true,
+ blockHeight: blockHeight,
+ logger: exportsHandler.logger,
+ // goroutine synchronization is unnecessary since anything checking should
+ // be called from the same goroutine.
+ // Effectively WaitUntilSwingStoreExportStarted would block infinitely and
+ // exportsHandler.InitiateExport will error when calling checkNotActive.
+ exportStartedResult: nil,
+ exportDone: nil,
+ }
+ activeOperation = operationDetails
+ defer func() {
+ activeOperation = nil
+ }()
+
+ exportDir, err := os.MkdirTemp("", fmt.Sprintf("agd-swing-store-restore-%d-*", blockHeight))
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(exportDir)
+
+ err = WriteSwingStoreExportToDirectory(provider, exportDir)
+ if err != nil {
+ return err
+ }
+
+ action := &swingStoreRestoreExportAction{
+ Type: swingStoreExportActionType,
+ BlockHeight: blockHeight,
+ Request: restoreRequest,
+ Args: [1]swingStoreImportOptions{{
+ ExportDir: exportDir,
+ ArtifactMode: restoreOptions.ArtifactMode,
+ ExportDataMode: restoreOptions.ExportDataMode,
+ }},
+ }
+
+ _, err = exportsHandler.blockingSend(action, true)
+ if err != nil {
+ return err
+ }
+
+ exportsHandler.logger.Info("restored swing-store export", "exportDir", exportDir, "height", blockHeight)
+
+ return nil
+}
+
+// WriteSwingStoreExportToDirectory consumes a provider and saves a swing-store
+// export to disk in the provided directory. It creates files for each artifact
+// deriving a filename from the artifact name, and stores any "export data" in
+// a jsonl-like file, before saving the export manifest linking these together.
+// The export manifest filename and overall export format is common with the JS
+// swing-store import/export logic.
+func WriteSwingStoreExportToDirectory(provider SwingStoreExportProvider, exportDir string) error {
+ manifest := exportManifest{
+ BlockHeight: provider.BlockHeight,
+ }
+
+ exportDataReader, err := provider.GetExportDataReader()
+ if err != nil {
+ return err
+ }
+
+ if exportDataReader != nil {
+ defer exportDataReader.Close()
+
+ manifest.Data = exportDataFilename
+ exportDataFile, err := os.OpenFile(filepath.Join(exportDir, exportDataFilename), os.O_CREATE|os.O_WRONLY, exportedFilesMode)
+ if err != nil {
+ return err
+ }
+ defer exportDataFile.Close()
+
+ err = agoric.EncodeKVEntryReaderToJsonl(exportDataReader, exportDataFile)
+ if err != nil {
+ return err
+ }
+
+ err = exportDataFile.Sync()
+ if err != nil {
+ return err
+ }
+ }
+
+ writeExportFile := func(filename string, data []byte) error {
+ return os.WriteFile(filepath.Join(exportDir, filename), data, exportedFilesMode)
+ }
+
+ for {
+ artifact, err := provider.ReadNextArtifact()
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return err
+ }
+
+ if artifact.Name != UntrustedExportDataArtifactName {
+ // An artifact is only verifiable by the JS swing-store import using the
+ // information contained in the "export data".
+ // Since we cannot trust the source of the artifact at this point,
+ // including that the artifact's name is genuine, we generate a safe and
+ // unique filename from the artifact's name we received, by substituting
+ // any non letters-digits-hyphen-underscore-dot by a hyphen, and
+ // prefixing with an incremented id.
+ // The filename is not used for any purpose in the import logic.
+ filename := sanitizeArtifactName(artifact.Name)
+ filename = fmt.Sprintf("%d-%s", len(manifest.Artifacts), filename)
+ manifest.Artifacts = append(manifest.Artifacts, [2]string{artifact.Name, filename})
+ err = writeExportFile(filename, artifact.Data)
+ } else {
+ // Pseudo artifact containing untrusted export data which may have been
+ // saved separately for debugging purposes (not referenced from the manifest)
+ err = writeExportFile(untrustedExportDataFilename, artifact.Data)
+ }
+ if err != nil {
+ return err
+ }
+ }
+
+ manifestBytes, err := json.MarshalIndent(manifest, "", " ")
+ if err != nil {
+ return err
+ }
+ return writeExportFile(ExportManifestFilename, manifestBytes)
+}
diff --git a/golang/cosmos/x/swingset/keeper/swing_store_exports_handler_test.go b/golang/cosmos/x/swingset/keeper/swing_store_exports_handler_test.go
new file mode 100644
index 00000000000..c13951c9414
--- /dev/null
+++ b/golang/cosmos/x/swingset/keeper/swing_store_exports_handler_test.go
@@ -0,0 +1,247 @@
+package keeper
+
+import (
+ "errors"
+ "io"
+ "testing"
+
+ "github.com/Agoric/agoric-sdk/golang/cosmos/vm"
+ "github.com/tendermint/tendermint/libs/log"
+)
+
+func newTestSwingStoreExportsHandler() *SwingStoreExportsHandler {
+ logger := log.NewNopLogger() // log.NewTMLogger(log.NewSyncWriter( /* os.Stdout*/ io.Discard)).With("module", "sdk/app")
+ return &SwingStoreExportsHandler{
+ logger: logger,
+ blockingSend: func(action vm.Jsonable, mustNotBeInited bool) (string, error) { return "", nil },
+ }
+}
+
+var _ SwingStoreExportEventHandler = testSwingStoreEventHandler{}
+
+type testSwingStoreEventHandler struct {
+ onExportStarted func(height uint64, retrieveExport func() error) error
+ onExportRetrieved func(provider SwingStoreExportProvider) error
+}
+
+func newTestSwingStoreEventHandler() testSwingStoreEventHandler {
+ return testSwingStoreEventHandler{
+ onExportStarted: func(height uint64, retrieveExport func() error) error {
+ return retrieveExport()
+ },
+ onExportRetrieved: func(provider SwingStoreExportProvider) error {
+ for {
+ _, err := provider.ReadNextArtifact()
+ if err == io.EOF {
+ return nil
+ } else if err != nil {
+ return err
+ }
+ }
+ },
+ }
+}
+
+func (taker testSwingStoreEventHandler) OnExportStarted(height uint64, retrieveExport func() error) error {
+ return taker.onExportStarted(height, retrieveExport)
+}
+
+func (taker testSwingStoreEventHandler) OnExportRetrieved(provider SwingStoreExportProvider) error {
+ return taker.onExportRetrieved(provider)
+}
+
+func TestSwingStoreSnapshotterInProgress(t *testing.T) {
+ exportsHandler := newTestSwingStoreExportsHandler()
+ ch := make(chan struct{})
+ exportEventHandler := newTestSwingStoreEventHandler()
+ exportEventHandler.onExportStarted = func(height uint64, retrieveExport func() error) error {
+ <-ch
+ return nil
+ }
+ err := exportsHandler.InitiateExport(123, exportEventHandler, SwingStoreExportOptions{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = WaitUntilSwingStoreExportStarted()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = exportsHandler.InitiateExport(456, newTestSwingStoreEventHandler(), SwingStoreExportOptions{})
+ if err == nil {
+ t.Error("wanted error for export operation in progress")
+ }
+
+ err = exportsHandler.RestoreExport(SwingStoreExportProvider{BlockHeight: 456}, SwingStoreRestoreOptions{})
+ if err == nil {
+ t.Error("wanted error for export operation in progress")
+ }
+
+ close(ch)
+ err = WaitUntilSwingStoreExportDone()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = exportsHandler.InitiateExport(456, exportEventHandler, SwingStoreExportOptions{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = WaitUntilSwingStoreExportDone()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestSwingStoreSnapshotterSecondCommit(t *testing.T) {
+ exportsHandler := newTestSwingStoreExportsHandler()
+
+ exportEventHandler := newTestSwingStoreEventHandler()
+ // Use a channel to block the snapshot goroutine after it has started but before it exits.
+ ch := make(chan struct{})
+ exportEventHandler.onExportStarted = func(height uint64, retrieveExport func() error) error {
+ <-ch
+ return nil
+ }
+
+ // First run through app.Commit()
+ err := WaitUntilSwingStoreExportStarted()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = exportsHandler.InitiateExport(123, exportEventHandler, SwingStoreExportOptions{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Second run through app.Commit() - should return right away
+ err = WaitUntilSwingStoreExportStarted()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // close the signaling channel to let goroutine exit
+ close(ch)
+ err = WaitUntilSwingStoreExportDone()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestSwingStoreSnapshotterInitiateFails(t *testing.T) {
+ exportsHandler := newTestSwingStoreExportsHandler()
+ exportEventHandler := newTestSwingStoreEventHandler()
+ exportsHandler.blockingSend = func(action vm.Jsonable, mustNotBeInited bool) (string, error) {
+ initiateAction, ok := action.(*swingStoreInitiateExportAction)
+ if ok && initiateAction.Request == "initiate" {
+ return "", errors.New("initiate failed")
+ }
+ return "", nil
+ }
+
+ err := exportsHandler.InitiateExport(123, exportEventHandler, SwingStoreExportOptions{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = WaitUntilSwingStoreExportStarted()
+ if err == nil {
+ t.Fatal("wanted initiation error")
+ }
+ if err.Error() != "initiate failed" {
+ t.Errorf(`wanted error "initiate failed", got "%s"`, err.Error())
+ }
+ // another wait should succeed without error
+ err = WaitUntilSwingStoreExportStarted()
+ if err != nil {
+ t.Error(err)
+ }
+ err = WaitUntilSwingStoreExportDone()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestSwingStoreSnapshotterRetrievalFails(t *testing.T) {
+ exportsHandler := newTestSwingStoreExportsHandler()
+ var retrieveError error
+ exportsHandler.blockingSend = func(action vm.Jsonable, mustNotBeInited bool) (string, error) {
+ retrieveAction, ok := action.(*swingStoreRetrieveExportAction)
+ if ok && retrieveAction.Request == "retrieve" {
+ retrieveError = errors.New("retrieve failed")
+ return "", retrieveError
+ }
+ return "", nil
+ }
+ exportEventHandler := newTestSwingStoreEventHandler()
+ var savedErr error
+ ch := make(chan struct{})
+ exportEventHandler.onExportStarted = func(height uint64, retrieveExport func() error) error {
+ savedErr = retrieveExport()
+ <-ch
+ return savedErr
+ }
+
+ err := exportsHandler.InitiateExport(123, exportEventHandler, SwingStoreExportOptions{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = WaitUntilSwingStoreExportStarted()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ close(ch)
+ if savedErr != retrieveError {
+ t.Errorf(`wanted retrieval error, got "%v"`, savedErr)
+ }
+ err = WaitUntilSwingStoreExportDone()
+ if err != retrieveError {
+ t.Errorf(`wanted retrieval error, got "%v"`, err)
+ }
+}
+
+func TestSwingStoreSnapshotterDiscard(t *testing.T) {
+ discardCalled := false
+ exportsHandler := newTestSwingStoreExportsHandler()
+ exportsHandler.blockingSend = func(action vm.Jsonable, mustNotBeInited bool) (string, error) {
+ discardAction, ok := action.(*swingStoreDiscardExportAction)
+ if ok && discardAction.Request == "discard" {
+ discardCalled = true
+ }
+ return "", nil
+ }
+
+ // simulate an onExportStarted which successfully calls retrieveExport()
+ exportEventHandler := newTestSwingStoreEventHandler()
+ exportEventHandler.onExportStarted = func(height uint64, retrieveExport func() error) error {
+ activeOperation.exportRetrieved = true
+ return nil
+ }
+ err := exportsHandler.InitiateExport(123, exportEventHandler, SwingStoreExportOptions{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = WaitUntilSwingStoreExportDone()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if discardCalled {
+ t.Error("didn't want discard called")
+ }
+
+ // simulate an onExportStarted which doesn't call retrieveExport()
+ exportEventHandler = newTestSwingStoreEventHandler()
+ exportEventHandler.onExportStarted = func(height uint64, retrieveExport func() error) error {
+ return nil
+ }
+ err = exportsHandler.InitiateExport(456, exportEventHandler, SwingStoreExportOptions{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = WaitUntilSwingStoreExportDone()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !discardCalled {
+ t.Error("wanted discard called")
+ }
+}
diff --git a/golang/cosmos/x/swingset/module.go b/golang/cosmos/x/swingset/module.go
index 8e3f030ccf9..ec6f3b4fd45 100644
--- a/golang/cosmos/x/swingset/module.go
+++ b/golang/cosmos/x/swingset/module.go
@@ -80,14 +80,22 @@ func (AppModuleBasic) GetTxCmd() *cobra.Command {
type AppModule struct {
AppModuleBasic
- keeper Keeper
+ keeper Keeper
+ swingStoreExportsHandler *SwingStoreExportsHandler
+ setBootstrapNeeded func()
+ ensureControllerInited func(sdk.Context)
+ swingStoreExportDir string
}
// NewAppModule creates a new AppModule Object
-func NewAppModule(k Keeper) AppModule {
+func NewAppModule(k Keeper, swingStoreExportsHandler *SwingStoreExportsHandler, setBootstrapNeeded func(), ensureControllerInited func(sdk.Context), swingStoreExportDir string) AppModule {
am := AppModule{
- AppModuleBasic: AppModuleBasic{},
- keeper: k,
+ AppModuleBasic: AppModuleBasic{},
+ keeper: k,
+ swingStoreExportsHandler: swingStoreExportsHandler,
+ setBootstrapNeeded: setBootstrapNeeded,
+ ensureControllerInited: ensureControllerInited,
+ swingStoreExportDir: swingStoreExportDir,
}
return am
}
@@ -125,6 +133,8 @@ func (am AppModule) RegisterServices(cfg module.Configurator) {
func (AppModule) ConsensusVersion() uint64 { return 2 }
func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) {
+ am.ensureControllerInited(ctx)
+
err := BeginBlock(ctx, req, am.keeper)
if err != nil {
fmt.Println("BeginBlock error:", err)
@@ -144,13 +154,25 @@ func (am AppModule) EndBlock(ctx sdk.Context, req abci.RequestEndBlock) []abci.V
return []abci.ValidatorUpdate{}
}
+func (am AppModule) checkSwingStoreExportSetup() {
+ if am.swingStoreExportDir == "" {
+ panic(fmt.Errorf("SwingStore export dir not set"))
+ }
+}
+
func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) []abci.ValidatorUpdate {
var genesisState types.GenesisState
cdc.MustUnmarshalJSON(data, &genesisState)
- return InitGenesis(ctx, am.keeper, &genesisState)
+ am.checkSwingStoreExportSetup()
+ bootstrapNeeded := InitGenesis(ctx, am.keeper, am.swingStoreExportsHandler, am.swingStoreExportDir, &genesisState)
+ if bootstrapNeeded {
+ am.setBootstrapNeeded()
+ }
+ return []abci.ValidatorUpdate{}
}
func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage {
- gs := ExportGenesis(ctx, am.keeper)
+ am.checkSwingStoreExportSetup()
+ gs := ExportGenesis(ctx, am.keeper, am.swingStoreExportsHandler, am.swingStoreExportDir)
return cdc.MustMarshalJSON(gs)
}
diff --git a/golang/cosmos/x/swingset/swingset.go b/golang/cosmos/x/swingset/swingset.go
new file mode 100644
index 00000000000..8281e938932
--- /dev/null
+++ b/golang/cosmos/x/swingset/swingset.go
@@ -0,0 +1,71 @@
+package swingset
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ agoric "github.com/Agoric/agoric-sdk/golang/cosmos/types"
+ "github.com/Agoric/agoric-sdk/golang/cosmos/vm"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+)
+
+// portHandler implements vm.PortHandler
+// for processing inbound messages from Swingset.
+type portHandler struct {
+ keeper Keeper
+}
+
+type swingsetMessage struct {
+ Method string `json:"method"`
+ Args []json.RawMessage `json:"args"`
+}
+
+const (
+ SwingStoreUpdateExportData = "swingStoreUpdateExportData"
+)
+
+// NewPortHandler returns a port handler for a swingset Keeper.
+func NewPortHandler(k Keeper) vm.PortHandler {
+ return portHandler{keeper: k}
+}
+
+// Receive implements the vm.PortHandler method.
+// It receives and processes an inbound message, returning the
+// JSON-serialized response or an error.
+func (ph portHandler) Receive(ctx *vm.ControllerContext, str string) (string, error) {
+ var msg swingsetMessage
+ err := json.Unmarshal([]byte(str), &msg)
+ if err != nil {
+ return "", err
+ }
+
+ switch msg.Method {
+ case SwingStoreUpdateExportData:
+ return ph.handleSwingStoreUpdateExportData(ctx.Context, msg.Args)
+
+ default:
+ return "", fmt.Errorf("unrecognized swingset method %s", msg.Method)
+ }
+}
+
+func (ph portHandler) handleSwingStoreUpdateExportData(ctx sdk.Context, entries []json.RawMessage) (ret string, err error) {
+ store := ph.keeper.GetSwingStore(ctx)
+ exportDataReader := agoric.NewJsonRawMessageKVEntriesReader(entries)
+ defer exportDataReader.Close()
+ for {
+ entry, err := exportDataReader.Read()
+ if err == io.EOF {
+ return "true", nil
+ } else if err != nil {
+ return ret, err
+ }
+
+ key := []byte(entry.Key())
+ if !entry.HasValue() {
+ store.Delete(key)
+ } else {
+ store.Set(key, []byte(entry.StringValue()))
+ }
+ }
+}
diff --git a/golang/cosmos/x/swingset/types/genesis.pb.go b/golang/cosmos/x/swingset/types/genesis.pb.go
index acaa75a5272..47d94b1e51f 100644
--- a/golang/cosmos/x/swingset/types/genesis.pb.go
+++ b/golang/cosmos/x/swingset/types/genesis.pb.go
@@ -25,8 +25,9 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// The initial or exported state.
type GenesisState struct {
- Params Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params"`
- State State `protobuf:"bytes,3,opt,name=state,proto3" json:"state"`
+ Params Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params"`
+ State State `protobuf:"bytes,3,opt,name=state,proto3" json:"state"`
+ SwingStoreExportData []*SwingStoreExportDataEntry `protobuf:"bytes,4,rep,name=swing_store_export_data,json=swingStoreExportData,proto3" json:"swingStoreExportData"`
}
func (m *GenesisState) Reset() { *m = GenesisState{} }
@@ -76,29 +77,96 @@ func (m *GenesisState) GetState() State {
return State{}
}
+func (m *GenesisState) GetSwingStoreExportData() []*SwingStoreExportDataEntry {
+ if m != nil {
+ return m.SwingStoreExportData
+ }
+ return nil
+}
+
+// A SwingStore "export data" entry.
+type SwingStoreExportDataEntry struct {
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *SwingStoreExportDataEntry) Reset() { *m = SwingStoreExportDataEntry{} }
+func (m *SwingStoreExportDataEntry) String() string { return proto.CompactTextString(m) }
+func (*SwingStoreExportDataEntry) ProtoMessage() {}
+func (*SwingStoreExportDataEntry) Descriptor() ([]byte, []int) {
+ return fileDescriptor_49b057311de9d296, []int{1}
+}
+func (m *SwingStoreExportDataEntry) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SwingStoreExportDataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SwingStoreExportDataEntry.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SwingStoreExportDataEntry) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SwingStoreExportDataEntry.Merge(m, src)
+}
+func (m *SwingStoreExportDataEntry) XXX_Size() int {
+ return m.Size()
+}
+func (m *SwingStoreExportDataEntry) XXX_DiscardUnknown() {
+ xxx_messageInfo_SwingStoreExportDataEntry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SwingStoreExportDataEntry proto.InternalMessageInfo
+
+func (m *SwingStoreExportDataEntry) GetKey() string {
+ if m != nil {
+ return m.Key
+ }
+ return ""
+}
+
+func (m *SwingStoreExportDataEntry) GetValue() string {
+ if m != nil {
+ return m.Value
+ }
+ return ""
+}
+
func init() {
proto.RegisterType((*GenesisState)(nil), "agoric.swingset.GenesisState")
+ proto.RegisterType((*SwingStoreExportDataEntry)(nil), "agoric.swingset.SwingStoreExportDataEntry")
}
func init() { proto.RegisterFile("agoric/swingset/genesis.proto", fileDescriptor_49b057311de9d296) }
var fileDescriptor_49b057311de9d296 = []byte{
- // 234 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4d, 0x4c, 0xcf, 0x2f,
- 0xca, 0x4c, 0xd6, 0x2f, 0x2e, 0xcf, 0xcc, 0x4b, 0x2f, 0x4e, 0x2d, 0xd1, 0x4f, 0x4f, 0xcd, 0x4b,
- 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x87, 0x48, 0xeb, 0xc1, 0xa4,
- 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x72, 0xfa, 0x20, 0x16, 0x44, 0x99, 0x94, 0x1c, 0xba,
- 0x29, 0x30, 0x06, 0x44, 0x5e, 0xa9, 0x9e, 0x8b, 0xc7, 0x1d, 0x62, 0x6e, 0x70, 0x49, 0x62, 0x49,
- 0xaa, 0x90, 0x29, 0x17, 0x5b, 0x41, 0x62, 0x51, 0x62, 0x6e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06,
- 0xb7, 0x91, 0xb8, 0x1e, 0x9a, 0x3d, 0x7a, 0x01, 0x60, 0x69, 0x27, 0x96, 0x13, 0xf7, 0xe4, 0x19,
- 0x82, 0xa0, 0x8a, 0x85, 0x8c, 0xb8, 0x58, 0x8b, 0x41, 0xfa, 0x25, 0x98, 0xc1, 0xba, 0xc4, 0x30,
- 0x74, 0x81, 0x4d, 0x87, 0x6a, 0x82, 0x28, 0xb5, 0x62, 0x79, 0xb1, 0x40, 0x9e, 0xc1, 0x29, 0xf4,
- 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e,
- 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0xac, 0xd3, 0x33, 0x4b, 0x32, 0x4a,
- 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0x1d, 0x21, 0xbe, 0x80, 0x98, 0xaa, 0x5b, 0x9c, 0x92, 0xad,
- 0x9f, 0x9e, 0x9f, 0x93, 0x98, 0x97, 0xae, 0x9f, 0x9c, 0x5f, 0x9c, 0x9b, 0x5f, 0xac, 0x5f, 0x81,
- 0xf0, 0x60, 0x49, 0x65, 0x41, 0x6a, 0x71, 0x12, 0x1b, 0xd8, 0x7b, 0xc6, 0x80, 0x00, 0x00, 0x00,
- 0xff, 0xff, 0x65, 0xe6, 0xb9, 0x87, 0x46, 0x01, 0x00, 0x00,
+ // 334 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xcf, 0x4b, 0x02, 0x41,
+ 0x1c, 0xc5, 0x77, 0xf2, 0x07, 0x38, 0x06, 0xc5, 0x22, 0xb9, 0x09, 0x8d, 0xe2, 0x49, 0x82, 0x76,
+ 0xc0, 0xe8, 0x52, 0xa7, 0x2c, 0xe9, 0x1a, 0x2b, 0x5d, 0xba, 0xc8, 0xa8, 0xc3, 0xb4, 0xa8, 0x3b,
+ 0xcb, 0x7c, 0xc7, 0x52, 0xfa, 0x27, 0xfa, 0x13, 0xfa, 0x73, 0x3c, 0x7a, 0xec, 0x24, 0xa1, 0x97,
+ 0xe8, 0x6f, 0xe8, 0x10, 0x3b, 0xa3, 0x04, 0x6a, 0xb7, 0xb7, 0xfb, 0x79, 0xef, 0x0d, 0x33, 0x0f,
+ 0x9f, 0x30, 0x21, 0x55, 0xd8, 0xa5, 0xf0, 0x12, 0x46, 0x02, 0xb8, 0xa6, 0x82, 0x47, 0x1c, 0x42,
+ 0xf0, 0x63, 0x25, 0xb5, 0x74, 0x0f, 0x2c, 0xf6, 0xd7, 0xb8, 0x54, 0x10, 0x52, 0x48, 0xc3, 0x68,
+ 0xa2, 0xac, 0xad, 0x44, 0x36, 0x5b, 0xd6, 0xc2, 0xf2, 0xea, 0x0f, 0xc2, 0xfb, 0x77, 0xb6, 0xb8,
+ 0xa5, 0x99, 0xe6, 0xee, 0x05, 0xce, 0xc6, 0x4c, 0xb1, 0x21, 0x78, 0x7b, 0x15, 0x54, 0xcb, 0xd7,
+ 0x8b, 0xfe, 0xc6, 0x41, 0xfe, 0xbd, 0xc1, 0x8d, 0xf4, 0x74, 0x5e, 0x76, 0x82, 0x95, 0xd9, 0xad,
+ 0xe3, 0x0c, 0x24, 0x79, 0x2f, 0x65, 0x52, 0x47, 0x5b, 0x29, 0xd3, 0xbe, 0x0a, 0x59, 0xab, 0xfb,
+ 0x8a, 0x8b, 0x06, 0xb7, 0x41, 0x4b, 0xc5, 0xdb, 0x7c, 0x1c, 0x4b, 0xa5, 0xdb, 0x3d, 0xa6, 0x99,
+ 0x97, 0xae, 0xa4, 0x6a, 0xf9, 0xfa, 0xe9, 0x76, 0x4b, 0x22, 0x5a, 0x89, 0xbd, 0x69, 0xdc, 0xb7,
+ 0x4c, 0xb3, 0x66, 0xa4, 0xd5, 0xa4, 0xe1, 0x7d, 0xcf, 0xcb, 0x05, 0xd8, 0x81, 0x83, 0x9d, 0x7f,
+ 0x2f, 0xd3, 0x5f, 0xef, 0x65, 0xa7, 0x7a, 0x83, 0x8f, 0xff, 0xad, 0x74, 0x0f, 0x71, 0xaa, 0xcf,
+ 0x27, 0x1e, 0xaa, 0xa0, 0x5a, 0x2e, 0x48, 0xa4, 0x5b, 0xc0, 0x99, 0x67, 0x36, 0x18, 0x71, 0xf3,
+ 0x36, 0xb9, 0xc0, 0x7e, 0x34, 0x1e, 0xa6, 0x0b, 0x82, 0x66, 0x0b, 0x82, 0x3e, 0x17, 0x04, 0xbd,
+ 0x2d, 0x89, 0x33, 0x5b, 0x12, 0xe7, 0x63, 0x49, 0x9c, 0xc7, 0x2b, 0x11, 0xea, 0xa7, 0x51, 0xc7,
+ 0xef, 0xca, 0x21, 0xbd, 0xb6, 0x43, 0xd8, 0x1b, 0x9d, 0x41, 0xaf, 0x4f, 0x85, 0x1c, 0xb0, 0x48,
+ 0xd0, 0xae, 0x84, 0xa1, 0x04, 0x3a, 0xfe, 0xdb, 0x48, 0x4f, 0x62, 0x0e, 0x9d, 0xac, 0x59, 0xe8,
+ 0xfc, 0x37, 0x00, 0x00, 0xff, 0xff, 0x94, 0xe9, 0x22, 0x36, 0x09, 0x02, 0x00, 0x00,
}
func (m *GenesisState) Marshal() (dAtA []byte, err error) {
@@ -121,6 +189,20 @@ func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if len(m.SwingStoreExportData) > 0 {
+ for iNdEx := len(m.SwingStoreExportData) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.SwingStoreExportData[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
{
size, err := m.State.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
@@ -144,6 +226,43 @@ func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *SwingStoreExportDataEntry) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SwingStoreExportDataEntry) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SwingStoreExportDataEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Value) > 0 {
+ i -= len(m.Value)
+ copy(dAtA[i:], m.Value)
+ i = encodeVarintGenesis(dAtA, i, uint64(len(m.Value)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Key) > 0 {
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintGenesis(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int {
offset -= sovGenesis(v)
base := offset
@@ -165,6 +284,29 @@ func (m *GenesisState) Size() (n int) {
n += 1 + l + sovGenesis(uint64(l))
l = m.State.Size()
n += 1 + l + sovGenesis(uint64(l))
+ if len(m.SwingStoreExportData) > 0 {
+ for _, e := range m.SwingStoreExportData {
+ l = e.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SwingStoreExportDataEntry) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovGenesis(uint64(l))
+ }
return n
}
@@ -269,6 +411,154 @@ func (m *GenesisState) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SwingStoreExportData", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SwingStoreExportData = append(m.SwingStoreExportData, &SwingStoreExportDataEntry{})
+ if err := m.SwingStoreExportData[len(m.SwingStoreExportData)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenesis(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SwingStoreExportDataEntry) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SwingStoreExportDataEntry: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SwingStoreExportDataEntry: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenesis(dAtA[iNdEx:])
diff --git a/golang/cosmos/x/swingset/types/swingset.pb.go b/golang/cosmos/x/swingset/types/swingset.pb.go
index 940e367d844..061101daa2c 100644
--- a/golang/cosmos/x/swingset/types/swingset.pb.go
+++ b/golang/cosmos/x/swingset/types/swingset.pb.go
@@ -495,24 +495,27 @@ func (m *Egress) GetPowerFlags() []string {
return nil
}
-// The payload messages used by swingset state-sync
-type ExtensionSnapshotterArtifactPayload struct {
+// SwingStoreArtifact encodes an artifact of a swing-store export.
+// Artifacts may be stored or transmitted in any order. Most handlers do
+// maintain the artifact order from their original source as an effect of how
+// they handle the artifacts.
+type SwingStoreArtifact struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"`
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data" yaml:"data"`
}
-func (m *ExtensionSnapshotterArtifactPayload) Reset() { *m = ExtensionSnapshotterArtifactPayload{} }
-func (m *ExtensionSnapshotterArtifactPayload) String() string { return proto.CompactTextString(m) }
-func (*ExtensionSnapshotterArtifactPayload) ProtoMessage() {}
-func (*ExtensionSnapshotterArtifactPayload) Descriptor() ([]byte, []int) {
+func (m *SwingStoreArtifact) Reset() { *m = SwingStoreArtifact{} }
+func (m *SwingStoreArtifact) String() string { return proto.CompactTextString(m) }
+func (*SwingStoreArtifact) ProtoMessage() {}
+func (*SwingStoreArtifact) Descriptor() ([]byte, []int) {
return fileDescriptor_ff9c341e0de15f8b, []int{8}
}
-func (m *ExtensionSnapshotterArtifactPayload) XXX_Unmarshal(b []byte) error {
+func (m *SwingStoreArtifact) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
-func (m *ExtensionSnapshotterArtifactPayload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+func (m *SwingStoreArtifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
- return xxx_messageInfo_ExtensionSnapshotterArtifactPayload.Marshal(b, m, deterministic)
+ return xxx_messageInfo_SwingStoreArtifact.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
@@ -522,26 +525,26 @@ func (m *ExtensionSnapshotterArtifactPayload) XXX_Marshal(b []byte, deterministi
return b[:n], nil
}
}
-func (m *ExtensionSnapshotterArtifactPayload) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExtensionSnapshotterArtifactPayload.Merge(m, src)
+func (m *SwingStoreArtifact) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SwingStoreArtifact.Merge(m, src)
}
-func (m *ExtensionSnapshotterArtifactPayload) XXX_Size() int {
+func (m *SwingStoreArtifact) XXX_Size() int {
return m.Size()
}
-func (m *ExtensionSnapshotterArtifactPayload) XXX_DiscardUnknown() {
- xxx_messageInfo_ExtensionSnapshotterArtifactPayload.DiscardUnknown(m)
+func (m *SwingStoreArtifact) XXX_DiscardUnknown() {
+ xxx_messageInfo_SwingStoreArtifact.DiscardUnknown(m)
}
-var xxx_messageInfo_ExtensionSnapshotterArtifactPayload proto.InternalMessageInfo
+var xxx_messageInfo_SwingStoreArtifact proto.InternalMessageInfo
-func (m *ExtensionSnapshotterArtifactPayload) GetName() string {
+func (m *SwingStoreArtifact) GetName() string {
if m != nil {
return m.Name
}
return ""
}
-func (m *ExtensionSnapshotterArtifactPayload) GetData() []byte {
+func (m *SwingStoreArtifact) GetData() []byte {
if m != nil {
return m.Data
}
@@ -557,67 +560,66 @@ func init() {
proto.RegisterType((*PowerFlagFee)(nil), "agoric.swingset.PowerFlagFee")
proto.RegisterType((*QueueSize)(nil), "agoric.swingset.QueueSize")
proto.RegisterType((*Egress)(nil), "agoric.swingset.Egress")
- proto.RegisterType((*ExtensionSnapshotterArtifactPayload)(nil), "agoric.swingset.ExtensionSnapshotterArtifactPayload")
+ proto.RegisterType((*SwingStoreArtifact)(nil), "agoric.swingset.SwingStoreArtifact")
}
func init() { proto.RegisterFile("agoric/swingset/swingset.proto", fileDescriptor_ff9c341e0de15f8b) }
var fileDescriptor_ff9c341e0de15f8b = []byte{
- // 858 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xbd, 0x6f, 0x23, 0x45,
- 0x14, 0xf7, 0x62, 0x3b, 0xc4, 0xcf, 0xbe, 0xe4, 0x18, 0x22, 0x9d, 0x89, 0x38, 0x4f, 0xb4, 0x14,
- 0x44, 0x3a, 0x9d, 0x7d, 0x01, 0x21, 0x24, 0x9f, 0x28, 0xbc, 0x91, 0x4f, 0x27, 0x21, 0x90, 0xd9,
- 0x28, 0x14, 0x08, 0xb4, 0x1a, 0xaf, 0xc7, 0x7b, 0x93, 0xac, 0x67, 0xf6, 0x66, 0x26, 0x5f, 0xd7,
- 0x23, 0x68, 0x90, 0x10, 0x15, 0x65, 0x6a, 0xfe, 0x92, 0x2b, 0xaf, 0x44, 0x14, 0x0b, 0x4a, 0x1a,
- 0x94, 0xd2, 0x25, 0x12, 0x12, 0x9a, 0x99, 0xf5, 0xc6, 0x22, 0x48, 0xa4, 0xb9, 0x6a, 0xdf, 0xe7,
- 0xef, 0xbd, 0xf7, 0x7b, 0x33, 0x3b, 0xd0, 0x21, 0x89, 0x90, 0x2c, 0xee, 0xa9, 0x13, 0xc6, 0x13,
- 0x45, 0x75, 0x29, 0x74, 0x33, 0x29, 0xb4, 0x40, 0xeb, 0xce, 0xdf, 0x5d, 0x98, 0x37, 0x37, 0x12,
- 0x91, 0x08, 0xeb, 0xeb, 0x19, 0xc9, 0x85, 0x6d, 0x76, 0x62, 0xa1, 0x66, 0x42, 0xf5, 0xc6, 0x44,
- 0xd1, 0xde, 0xf1, 0xce, 0x98, 0x6a, 0xb2, 0xd3, 0x8b, 0x05, 0xe3, 0xce, 0xef, 0x7f, 0xe7, 0xc1,
- 0xdd, 0x5d, 0x21, 0xe9, 0xf0, 0x98, 0xa4, 0x23, 0x29, 0x32, 0xa1, 0x48, 0x8a, 0x36, 0xa0, 0xae,
- 0x99, 0x4e, 0x69, 0xdb, 0xdb, 0xf2, 0xb6, 0x1b, 0xa1, 0x53, 0xd0, 0x16, 0x34, 0x27, 0x54, 0xc5,
- 0x92, 0x65, 0x9a, 0x09, 0xde, 0x7e, 0xc3, 0xfa, 0x96, 0x4d, 0xe8, 0x23, 0xa8, 0xd3, 0x63, 0x92,
- 0xaa, 0x76, 0x75, 0xab, 0xba, 0xdd, 0xfc, 0xe0, 0x9d, 0xee, 0xbf, 0x7a, 0xec, 0x2e, 0x2a, 0x05,
- 0xb5, 0x97, 0x39, 0xae, 0x84, 0x2e, 0xba, 0x5f, 0xfb, 0xfe, 0x1c, 0x57, 0x7c, 0x05, 0xab, 0x0b,
- 0x37, 0xea, 0x43, 0xeb, 0x40, 0x09, 0x1e, 0x65, 0x54, 0xce, 0x98, 0x56, 0xae, 0x8f, 0xe0, 0xde,
- 0x3c, 0xc7, 0x6f, 0x9f, 0x91, 0x59, 0xda, 0xf7, 0x97, 0xbd, 0x7e, 0xd8, 0x34, 0xea, 0xc8, 0x69,
- 0xe8, 0x01, 0xbc, 0x79, 0xa0, 0xa2, 0x58, 0x4c, 0xa8, 0x6b, 0x31, 0x40, 0xf3, 0x1c, 0xaf, 0x2d,
- 0xd2, 0xac, 0xc3, 0x0f, 0x57, 0x0e, 0xd4, 0xae, 0x11, 0x7e, 0xa8, 0xc2, 0xca, 0x88, 0x48, 0x32,
- 0x53, 0xe8, 0x29, 0xac, 0x8d, 0x29, 0xe1, 0xca, 0xc0, 0x46, 0x47, 0x9c, 0xe9, 0xb6, 0x67, 0xa7,
- 0x78, 0xf7, 0xc6, 0x14, 0x7b, 0x5a, 0x32, 0x9e, 0x04, 0x26, 0xb8, 0x18, 0xa4, 0x65, 0x33, 0x47,
- 0x54, 0xee, 0x73, 0xa6, 0xd1, 0x73, 0x58, 0x9b, 0x52, 0x6a, 0x31, 0xa2, 0x4c, 0xb2, 0xd8, 0x34,
- 0xe2, 0xf8, 0x70, 0xcb, 0xe8, 0x9a, 0x65, 0x74, 0x8b, 0x65, 0x74, 0x77, 0x05, 0xe3, 0xc1, 0x23,
- 0x03, 0xf3, 0xcb, 0xef, 0x78, 0x3b, 0x61, 0xfa, 0xd9, 0xd1, 0xb8, 0x1b, 0x8b, 0x59, 0xaf, 0xd8,
- 0x9c, 0xfb, 0x3c, 0x54, 0x93, 0xc3, 0x9e, 0x3e, 0xcb, 0xa8, 0xb2, 0x09, 0x2a, 0x6c, 0x4d, 0x29,
- 0x35, 0xd5, 0x46, 0xa6, 0x00, 0x7a, 0x04, 0x1b, 0x63, 0x21, 0xb4, 0xd2, 0x92, 0x64, 0xd1, 0x31,
- 0xd1, 0x51, 0x2c, 0xf8, 0x94, 0x25, 0xed, 0xaa, 0x5d, 0x12, 0x2a, 0x7d, 0x5f, 0x12, 0xbd, 0x6b,
- 0x3d, 0xe8, 0x53, 0x58, 0xcf, 0xc4, 0x09, 0x95, 0xd1, 0x34, 0x25, 0x49, 0x34, 0xa5, 0x54, 0xb5,
- 0x6b, 0xb6, 0xcb, 0xfb, 0x37, 0xe6, 0x1d, 0x99, 0xb8, 0x27, 0x29, 0x49, 0x9e, 0x50, 0x5a, 0x0c,
- 0x7c, 0x27, 0x5b, 0xb2, 0x29, 0xf4, 0x09, 0x34, 0x9e, 0x1f, 0xd1, 0x23, 0x1a, 0xcd, 0xc8, 0x69,
- 0xbb, 0x6e, 0x61, 0x36, 0x6f, 0xc0, 0x7c, 0x61, 0x22, 0xf6, 0xd8, 0x8b, 0x05, 0xc6, 0xaa, 0x4d,
- 0xf9, 0x8c, 0x9c, 0xf6, 0x57, 0x7f, 0x3e, 0xc7, 0x95, 0x3f, 0xcf, 0xb1, 0xe7, 0x7f, 0x0e, 0xf5,
- 0x3d, 0x4d, 0x34, 0x45, 0x43, 0xb8, 0xe3, 0x10, 0x49, 0x9a, 0x8a, 0x13, 0x3a, 0x29, 0x96, 0xf1,
- 0xff, 0xa8, 0x2d, 0x9b, 0x36, 0x70, 0x59, 0x7e, 0x0a, 0xcd, 0xa5, 0x6d, 0xa1, 0xbb, 0x50, 0x3d,
- 0xa4, 0x67, 0xc5, 0xb1, 0x36, 0x22, 0x1a, 0x42, 0xdd, 0xee, 0xae, 0x38, 0x2b, 0x3d, 0x83, 0xf1,
- 0x5b, 0x8e, 0xdf, 0xbf, 0xc5, 0x1e, 0xf6, 0x19, 0xd7, 0xa1, 0xcb, 0xee, 0xd7, 0x6c, 0xf7, 0x3f,
- 0x79, 0xd0, 0x5a, 0x26, 0x0b, 0xdd, 0x07, 0xb8, 0x26, 0xb9, 0x28, 0xdb, 0x28, 0xa9, 0x43, 0xdf,
- 0x40, 0x75, 0x4a, 0x5f, 0xcb, 0xe9, 0x30, 0xb8, 0x45, 0x53, 0x1f, 0x43, 0xa3, 0xe4, 0xe8, 0x3f,
- 0x08, 0x40, 0x50, 0x53, 0xec, 0x85, 0xbb, 0x2b, 0xf5, 0xd0, 0xca, 0x45, 0xe2, 0xdf, 0x1e, 0xac,
- 0x0c, 0x13, 0x49, 0x95, 0x42, 0x8f, 0x61, 0x95, 0xb3, 0xf8, 0x90, 0x93, 0x59, 0xf1, 0x4f, 0x08,
- 0xf0, 0x55, 0x8e, 0x4b, 0xdb, 0x3c, 0xc7, 0xeb, 0xee, 0x82, 0x2d, 0x2c, 0x7e, 0x58, 0x3a, 0xd1,
- 0xd7, 0x50, 0xcb, 0x28, 0x95, 0xb6, 0x42, 0x2b, 0x78, 0x7a, 0x95, 0x63, 0xab, 0xcf, 0x73, 0xdc,
- 0x74, 0x49, 0x46, 0xf3, 0xff, 0xca, 0xf1, 0xc3, 0x5b, 0x8c, 0x37, 0x88, 0xe3, 0xc1, 0x64, 0x62,
- 0x9a, 0x0a, 0x2d, 0x0a, 0x0a, 0xa1, 0x79, 0x4d, 0xb1, 0xfb, 0xf3, 0x34, 0x82, 0x9d, 0x8b, 0x1c,
- 0x43, 0xb9, 0x09, 0x75, 0x95, 0x63, 0x28, 0x59, 0x57, 0xf3, 0x1c, 0xbf, 0x55, 0x14, 0x2e, 0x6d,
- 0x7e, 0xb8, 0x14, 0x60, 0xe7, 0xaf, 0xf8, 0xdf, 0x7a, 0xf0, 0xde, 0xf0, 0x54, 0x53, 0xae, 0x98,
- 0xe0, 0x7b, 0x9c, 0x64, 0xea, 0x99, 0xd0, 0x9a, 0xca, 0x81, 0xd4, 0x6c, 0x4a, 0x62, 0x3d, 0x22,
- 0x67, 0xa9, 0x20, 0x13, 0xf4, 0x00, 0x6a, 0x4b, 0xc4, 0xdc, 0x33, 0xf3, 0x15, 0xa4, 0x14, 0xf3,
- 0x39, 0x42, 0xac, 0xd1, 0x04, 0x4f, 0x88, 0x26, 0x05, 0x19, 0x36, 0xd8, 0xe8, 0xd7, 0xc1, 0x46,
- 0xf3, 0x43, 0x6b, 0x74, 0x7d, 0x04, 0xfb, 0x2f, 0x2f, 0x3a, 0xde, 0xab, 0x8b, 0x8e, 0xf7, 0xc7,
- 0x45, 0xc7, 0xfb, 0xf1, 0xb2, 0x53, 0x79, 0x75, 0xd9, 0xa9, 0xfc, 0x7a, 0xd9, 0xa9, 0x7c, 0xf5,
- 0x78, 0x89, 0xb0, 0x81, 0x7b, 0x2e, 0xdc, 0xf5, 0xb0, 0x84, 0x25, 0x22, 0x25, 0x3c, 0x59, 0x30,
- 0x79, 0x7a, 0xfd, 0x92, 0x58, 0x26, 0xc7, 0x2b, 0xf6, 0x01, 0xf8, 0xf0, 0x9f, 0x00, 0x00, 0x00,
- 0xff, 0xff, 0xd2, 0xc0, 0xc3, 0x71, 0x69, 0x06, 0x00, 0x00,
+ // 842 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcf, 0x6f, 0xe3, 0x44,
+ 0x14, 0x8e, 0x49, 0x52, 0x9a, 0x97, 0x6c, 0xbb, 0x0c, 0x95, 0x36, 0x54, 0x6c, 0xa6, 0xf2, 0x85,
+ 0x4a, 0xab, 0x8d, 0xb7, 0x20, 0x84, 0x94, 0x15, 0x87, 0xb8, 0xea, 0x6a, 0x25, 0x04, 0x0a, 0x8e,
+ 0xca, 0x01, 0x81, 0xac, 0x89, 0x33, 0x31, 0xd3, 0x3a, 0x1e, 0xef, 0xcc, 0xf4, 0xd7, 0xfe, 0x03,
+ 0x70, 0x41, 0x42, 0x9c, 0x38, 0xf6, 0xcc, 0x5f, 0xb2, 0xc7, 0x3d, 0x22, 0x0e, 0x06, 0xb5, 0x17,
+ 0xd4, 0x63, 0x8e, 0x48, 0x48, 0x68, 0x66, 0x1c, 0xc7, 0xa2, 0x48, 0xf4, 0xc2, 0x29, 0xf3, 0x7e,
+ 0x7d, 0xef, 0x7d, 0xdf, 0x1b, 0x4f, 0xa0, 0x47, 0x62, 0x2e, 0x58, 0xe4, 0xc9, 0x33, 0x96, 0xc6,
+ 0x92, 0xaa, 0xf2, 0xd0, 0xcf, 0x04, 0x57, 0x1c, 0x6d, 0xda, 0x78, 0x7f, 0xe9, 0xde, 0xde, 0x8a,
+ 0x79, 0xcc, 0x4d, 0xcc, 0xd3, 0x27, 0x9b, 0xb6, 0xdd, 0x8b, 0xb8, 0x9c, 0x73, 0xe9, 0x4d, 0x88,
+ 0xa4, 0xde, 0xe9, 0xde, 0x84, 0x2a, 0xb2, 0xe7, 0x45, 0x9c, 0xa5, 0x36, 0xee, 0x7e, 0xeb, 0xc0,
+ 0xfd, 0x7d, 0x2e, 0xe8, 0xc1, 0x29, 0x49, 0x46, 0x82, 0x67, 0x5c, 0x92, 0x04, 0x6d, 0x41, 0x53,
+ 0x31, 0x95, 0xd0, 0xae, 0xb3, 0xe3, 0xec, 0xb6, 0x02, 0x6b, 0xa0, 0x1d, 0x68, 0x4f, 0xa9, 0x8c,
+ 0x04, 0xcb, 0x14, 0xe3, 0x69, 0xf7, 0x0d, 0x13, 0xab, 0xba, 0xd0, 0x87, 0xd0, 0xa4, 0xa7, 0x24,
+ 0x91, 0xdd, 0xfa, 0x4e, 0x7d, 0xb7, 0xfd, 0xfe, 0x3b, 0xfd, 0x7f, 0xcc, 0xd8, 0x5f, 0x76, 0xf2,
+ 0x1b, 0xaf, 0x72, 0x5c, 0x0b, 0x6c, 0xf6, 0xa0, 0xf1, 0xdd, 0x25, 0xae, 0xb9, 0x12, 0xd6, 0x97,
+ 0x61, 0x34, 0x80, 0xce, 0x91, 0xe4, 0x69, 0x98, 0x51, 0x31, 0x67, 0x4a, 0xda, 0x39, 0xfc, 0x07,
+ 0x8b, 0x1c, 0xbf, 0x7d, 0x41, 0xe6, 0xc9, 0xc0, 0xad, 0x46, 0xdd, 0xa0, 0xad, 0xcd, 0x91, 0xb5,
+ 0xd0, 0x23, 0x78, 0xf3, 0x48, 0x86, 0x11, 0x9f, 0x52, 0x3b, 0xa2, 0x8f, 0x16, 0x39, 0xde, 0x58,
+ 0x96, 0x99, 0x80, 0x1b, 0xac, 0x1d, 0xc9, 0x7d, 0x7d, 0xf8, 0xbe, 0x0e, 0x6b, 0x23, 0x22, 0xc8,
+ 0x5c, 0xa2, 0xe7, 0xb0, 0x31, 0xa1, 0x24, 0x95, 0x1a, 0x36, 0x3c, 0x49, 0x99, 0xea, 0x3a, 0x86,
+ 0xc5, 0xbb, 0xb7, 0x58, 0x8c, 0x95, 0x60, 0x69, 0xec, 0xeb, 0xe4, 0x82, 0x48, 0xc7, 0x54, 0x8e,
+ 0xa8, 0x38, 0x4c, 0x99, 0x42, 0x2f, 0x60, 0x63, 0x46, 0xa9, 0xc1, 0x08, 0x33, 0xc1, 0x22, 0x3d,
+ 0x88, 0xd5, 0xc3, 0x2e, 0xa3, 0xaf, 0x97, 0xd1, 0x2f, 0x96, 0xd1, 0xdf, 0xe7, 0x2c, 0xf5, 0x9f,
+ 0x68, 0x98, 0x9f, 0x7f, 0xc3, 0xbb, 0x31, 0x53, 0xdf, 0x9c, 0x4c, 0xfa, 0x11, 0x9f, 0x7b, 0xc5,
+ 0xe6, 0xec, 0xcf, 0x63, 0x39, 0x3d, 0xf6, 0xd4, 0x45, 0x46, 0xa5, 0x29, 0x90, 0x41, 0x67, 0x46,
+ 0xa9, 0xee, 0x36, 0xd2, 0x0d, 0xd0, 0x13, 0xd8, 0x9a, 0x70, 0xae, 0xa4, 0x12, 0x24, 0x0b, 0x4f,
+ 0x89, 0x0a, 0x23, 0x9e, 0xce, 0x58, 0xdc, 0xad, 0x9b, 0x25, 0xa1, 0x32, 0xf6, 0x05, 0x51, 0xfb,
+ 0x26, 0x82, 0x3e, 0x81, 0xcd, 0x8c, 0x9f, 0x51, 0x11, 0xce, 0x12, 0x12, 0x87, 0x33, 0x4a, 0x65,
+ 0xb7, 0x61, 0xa6, 0x7c, 0x78, 0x8b, 0xef, 0x48, 0xe7, 0x3d, 0x4b, 0x48, 0xfc, 0x8c, 0xd2, 0x82,
+ 0xf0, 0xbd, 0xac, 0xe2, 0x93, 0xe8, 0x63, 0x68, 0xbd, 0x38, 0xa1, 0x27, 0x34, 0x9c, 0x93, 0xf3,
+ 0x6e, 0xd3, 0xc0, 0x6c, 0xdf, 0x82, 0xf9, 0x5c, 0x67, 0x8c, 0xd9, 0xcb, 0x25, 0xc6, 0xba, 0x29,
+ 0xf9, 0x94, 0x9c, 0x0f, 0xd6, 0x7f, 0xba, 0xc4, 0xb5, 0x3f, 0x2e, 0xb1, 0xe3, 0x7e, 0x06, 0xcd,
+ 0xb1, 0x22, 0x8a, 0xa2, 0x03, 0xb8, 0x67, 0x11, 0x49, 0x92, 0xf0, 0x33, 0x3a, 0x2d, 0x96, 0xf1,
+ 0xdf, 0xa8, 0x1d, 0x53, 0x36, 0xb4, 0x55, 0x6e, 0x02, 0xed, 0xca, 0xb6, 0xd0, 0x7d, 0xa8, 0x1f,
+ 0xd3, 0x8b, 0xe2, 0x5a, 0xeb, 0x23, 0x3a, 0x80, 0xa6, 0xd9, 0x5d, 0x71, 0x57, 0x3c, 0x8d, 0xf1,
+ 0x6b, 0x8e, 0xdf, 0xbb, 0xc3, 0x1e, 0x0e, 0x59, 0xaa, 0x02, 0x5b, 0x3d, 0x68, 0x98, 0xe9, 0x7f,
+ 0x74, 0xa0, 0x53, 0x15, 0x0b, 0x3d, 0x04, 0x58, 0x89, 0x5c, 0xb4, 0x6d, 0x95, 0xd2, 0xa1, 0xaf,
+ 0xa1, 0x3e, 0xa3, 0xff, 0xcb, 0xed, 0xd0, 0xb8, 0xc5, 0x50, 0x1f, 0x41, 0xab, 0xd4, 0xe8, 0x5f,
+ 0x04, 0x40, 0xd0, 0x90, 0xec, 0xa5, 0xfd, 0x56, 0x9a, 0x81, 0x39, 0x17, 0x85, 0x7f, 0x39, 0xb0,
+ 0x76, 0x10, 0x0b, 0x2a, 0x25, 0x7a, 0x0a, 0xeb, 0x29, 0x8b, 0x8e, 0x53, 0x32, 0x2f, 0xde, 0x04,
+ 0x1f, 0xdf, 0xe4, 0xb8, 0xf4, 0x2d, 0x72, 0xbc, 0x69, 0x3f, 0xb0, 0xa5, 0xc7, 0x0d, 0xca, 0x20,
+ 0xfa, 0x0a, 0x1a, 0x19, 0xa5, 0xc2, 0x74, 0xe8, 0xf8, 0xcf, 0x6f, 0x72, 0x6c, 0xec, 0x45, 0x8e,
+ 0xdb, 0xb6, 0x48, 0x5b, 0xee, 0x9f, 0x39, 0x7e, 0x7c, 0x07, 0x7a, 0xc3, 0x28, 0x1a, 0x4e, 0xa7,
+ 0x7a, 0xa8, 0xc0, 0xa0, 0xa0, 0x00, 0xda, 0x2b, 0x89, 0xed, 0xcb, 0xd3, 0xf2, 0xf7, 0xae, 0x72,
+ 0x0c, 0xe5, 0x26, 0xe4, 0x4d, 0x8e, 0xa1, 0x54, 0x5d, 0x2e, 0x72, 0xfc, 0x56, 0xd1, 0xb8, 0xf4,
+ 0xb9, 0x41, 0x25, 0xc1, 0xf0, 0xaf, 0xb9, 0x0a, 0xd0, 0x58, 0xdf, 0xb2, 0xb1, 0xe2, 0x82, 0x0e,
+ 0x85, 0x62, 0x33, 0x12, 0x29, 0xf4, 0x08, 0x1a, 0x15, 0x19, 0x1e, 0x68, 0x36, 0x85, 0x04, 0x05,
+ 0x1b, 0x4b, 0xdf, 0x38, 0x75, 0xf2, 0x94, 0x28, 0x52, 0x50, 0x37, 0xc9, 0xda, 0x5e, 0x25, 0x6b,
+ 0xcb, 0x0d, 0x8c, 0xd3, 0x76, 0xf5, 0x0f, 0x5f, 0x5d, 0xf5, 0x9c, 0xd7, 0x57, 0x3d, 0xe7, 0xf7,
+ 0xab, 0x9e, 0xf3, 0xc3, 0x75, 0xaf, 0xf6, 0xfa, 0xba, 0x57, 0xfb, 0xe5, 0xba, 0x57, 0xfb, 0xf2,
+ 0x69, 0x45, 0x9e, 0xa1, 0xfd, 0x73, 0xb0, 0x1f, 0x83, 0x91, 0x27, 0xe6, 0x09, 0x49, 0xe3, 0xa5,
+ 0x6e, 0xe7, 0xab, 0xff, 0x0d, 0xa3, 0xdb, 0x64, 0xcd, 0x3c, 0xf7, 0x1f, 0xfc, 0x1d, 0x00, 0x00,
+ 0xff, 0xff, 0xee, 0x34, 0x5f, 0xf6, 0x57, 0x06, 0x00, 0x00,
}
func (this *Params) Equal(that interface{}) bool {
@@ -1138,7 +1140,7 @@ func (m *Egress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func (m *ExtensionSnapshotterArtifactPayload) Marshal() (dAtA []byte, err error) {
+func (m *SwingStoreArtifact) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1148,12 +1150,12 @@ func (m *ExtensionSnapshotterArtifactPayload) Marshal() (dAtA []byte, err error)
return dAtA[:n], nil
}
-func (m *ExtensionSnapshotterArtifactPayload) MarshalTo(dAtA []byte) (int, error) {
+func (m *SwingStoreArtifact) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ExtensionSnapshotterArtifactPayload) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *SwingStoreArtifact) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@@ -1351,7 +1353,7 @@ func (m *Egress) Size() (n int) {
return n
}
-func (m *ExtensionSnapshotterArtifactPayload) Size() (n int) {
+func (m *SwingStoreArtifact) Size() (n int) {
if m == nil {
return 0
}
@@ -2419,7 +2421,7 @@ func (m *Egress) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *ExtensionSnapshotterArtifactPayload) Unmarshal(dAtA []byte) error {
+func (m *SwingStoreArtifact) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -2442,10 +2444,10 @@ func (m *ExtensionSnapshotterArtifactPayload) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: ExtensionSnapshotterArtifactPayload: wiretype end group for non-group")
+ return fmt.Errorf("proto: SwingStoreArtifact: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: ExtensionSnapshotterArtifactPayload: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: SwingStoreArtifact: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
diff --git a/golang/cosmos/x/vbank/vbank.go b/golang/cosmos/x/vbank/vbank.go
index 75ea273e11f..cdd3527a2e4 100644
--- a/golang/cosmos/x/vbank/vbank.go
+++ b/golang/cosmos/x/vbank/vbank.go
@@ -135,10 +135,10 @@ func (ch portHandler) Receive(ctx *vm.ControllerContext, str string) (ret string
case "VBANK_GET_BALANCE":
addr, err := sdk.AccAddressFromBech32(msg.Address)
if err != nil {
- return "", fmt.Errorf("cannot convert %s to address: %w", msg.Address, err)
+ return "", fmt.Errorf("cannot convert %s to address: %s", msg.Address, err)
}
if err = sdk.ValidateDenom(msg.Denom); err != nil {
- return "", fmt.Errorf("invalid denom %s: %w", msg.Denom, err)
+ return "", fmt.Errorf("invalid denom %s: %s", msg.Denom, err)
}
coin := keeper.GetBalance(ctx.Context, addr, msg.Denom)
packet := coin.Amount.String()
@@ -152,10 +152,10 @@ func (ch portHandler) Receive(ctx *vm.ControllerContext, str string) (ret string
case "VBANK_GRAB":
addr, err := sdk.AccAddressFromBech32(msg.Sender)
if err != nil {
- return "", fmt.Errorf("cannot convert %s to address: %w", msg.Sender, err)
+ return "", fmt.Errorf("cannot convert %s to address: %s", msg.Sender, err)
}
if err = sdk.ValidateDenom(msg.Denom); err != nil {
- return "", fmt.Errorf("invalid denom %s: %w", msg.Denom, err)
+ return "", fmt.Errorf("invalid denom %s: %s", msg.Denom, err)
}
value, ok := sdk.NewIntFromString(msg.Amount)
if !ok {
@@ -163,7 +163,7 @@ func (ch portHandler) Receive(ctx *vm.ControllerContext, str string) (ret string
}
coins := sdk.NewCoins(sdk.NewCoin(msg.Denom, value))
if err := keeper.GrabCoins(ctx.Context, addr, coins); err != nil {
- return "", fmt.Errorf("cannot grab %s coins: %w", coins.Sort().String(), err)
+ return "", fmt.Errorf("cannot grab %s coins: %s", coins.Sort().String(), err)
}
addressToBalances := make(map[string]sdk.Coins, 1)
addressToBalances[msg.Sender] = sdk.NewCoins(sdk.NewInt64Coin(msg.Denom, 1))
@@ -180,10 +180,10 @@ func (ch portHandler) Receive(ctx *vm.ControllerContext, str string) (ret string
case "VBANK_GIVE":
addr, err := sdk.AccAddressFromBech32(msg.Recipient)
if err != nil {
- return "", fmt.Errorf("cannot convert %s to address: %w", msg.Recipient, err)
+ return "", fmt.Errorf("cannot convert %s to address: %s", msg.Recipient, err)
}
if err = sdk.ValidateDenom(msg.Denom); err != nil {
- return "", fmt.Errorf("invalid denom %s: %w", msg.Denom, err)
+ return "", fmt.Errorf("invalid denom %s: %s", msg.Denom, err)
}
value, ok := sdk.NewIntFromString(msg.Amount)
if !ok {
@@ -191,7 +191,7 @@ func (ch portHandler) Receive(ctx *vm.ControllerContext, str string) (ret string
}
coins := sdk.NewCoins(sdk.NewCoin(msg.Denom, value))
if err := keeper.SendCoins(ctx.Context, addr, coins); err != nil {
- return "", fmt.Errorf("cannot give %s coins: %w", coins.Sort().String(), err)
+ return "", fmt.Errorf("cannot give %s coins: %s", coins.Sort().String(), err)
}
addressToBalances := make(map[string]sdk.Coins, 1)
addressToBalances[msg.Recipient] = sdk.NewCoins(sdk.NewInt64Coin(msg.Denom, 1))
@@ -212,7 +212,7 @@ func (ch portHandler) Receive(ctx *vm.ControllerContext, str string) (ret string
}
coins := sdk.NewCoins(sdk.NewCoin(msg.Denom, value))
if err := keeper.StoreRewardCoins(ctx.Context, coins); err != nil {
- return "", fmt.Errorf("cannot store reward %s coins: %w", coins.Sort().String(), err)
+ return "", fmt.Errorf("cannot store reward %s coins: %s", coins.Sort().String(), err)
}
if err != nil {
return "", err
diff --git a/golang/cosmos/x/vstorage/genesis.go b/golang/cosmos/x/vstorage/genesis.go
index 2d596166829..970a70a8e7e 100644
--- a/golang/cosmos/x/vstorage/genesis.go
+++ b/golang/cosmos/x/vstorage/genesis.go
@@ -22,9 +22,6 @@ func ValidateGenesis(data *types.GenesisState) error {
if err := types.ValidatePath(entry.Path); err != nil {
return fmt.Errorf("genesis vstorage.data entry %q has invalid path format: %s", entry.Path, err)
}
- if entry.Value == "" {
- return fmt.Errorf("genesis vstorage.data entry %q has no data", entry.Path)
- }
}
return nil
}
diff --git a/golang/cosmos/x/vstorage/keeper/keeper.go b/golang/cosmos/x/vstorage/keeper/keeper.go
index ea6eca0797e..fb5a831fe8b 100644
--- a/golang/cosmos/x/vstorage/keeper/keeper.go
+++ b/golang/cosmos/x/vstorage/keeper/keeper.go
@@ -35,7 +35,7 @@ type ProposedChange struct {
}
type ChangeManager interface {
- Track(ctx sdk.Context, k Keeper, entry types.StorageEntry, isLegacy bool)
+ Track(ctx sdk.Context, k Keeper, entry agoric.KVEntry, isLegacy bool)
EmitEvents(ctx sdk.Context, k Keeper)
Rollback(ctx sdk.Context)
}
@@ -65,8 +65,8 @@ type Keeper struct {
storeKey sdk.StoreKey
}
-func (bcm *BatchingChangeManager) Track(ctx sdk.Context, k Keeper, entry types.StorageEntry, isLegacy bool) {
- path := entry.Path()
+func (bcm *BatchingChangeManager) Track(ctx sdk.Context, k Keeper, entry agoric.KVEntry, isLegacy bool) {
+ path := entry.Key()
// TODO: differentiate between deletion and setting empty string?
// Using empty string for deletion for backwards compatibility
value := entry.StringValue()
@@ -177,32 +177,56 @@ func (k Keeper) ImportStorage(ctx sdk.Context, entries []*types.DataEntry) {
for _, entry := range entries {
// This set does the bookkeeping for us in case the entries aren't a
// complete tree.
- k.SetStorage(ctx, types.NewStorageEntry(entry.Path, entry.Value))
+ k.SetStorage(ctx, agoric.NewKVEntry(entry.Path, entry.Value))
}
}
-func (k Keeper) MigrateNoDataPlaceholders(ctx sdk.Context) {
+func getEncodedKeysWithPrefixFromIterator(iterator sdk.Iterator, prefix string) [][]byte {
+ keys := make([][]byte, 0)
+ defer iterator.Close()
+ for ; iterator.Valid(); iterator.Next() {
+ key := iterator.Key()
+ path := types.EncodedKeyToPath(key)
+ if strings.HasPrefix(path, prefix) {
+ keys = append(keys, key)
+ }
+ }
+ return keys
+}
+
+// RemoveEntriesWithPrefix removes all storage entries starting with the
+// supplied pathPrefix, which may not be empty.
+// It has the same effect as listing children of the prefix and removing each
+// descendant recursively.
+func (k Keeper) RemoveEntriesWithPrefix(ctx sdk.Context, pathPrefix string) {
store := ctx.KVStore(k.storeKey)
+ if len(pathPrefix) == 0 {
+ panic("cannot remove all content")
+ }
+ if err := types.ValidatePath(pathPrefix); err != nil {
+ panic(err)
+ }
+ descendantPrefix := pathPrefix + types.PathSeparator
+
+ // since vstorage encodes keys with a prefix indicating the number of path
+ // elements, we cannot use a simple prefix iterator.
+ // Instead we iterate over the whole vstorage content and check
+ // whether each entry matches the descendantPrefix. This choice assumes most
+ // entries will be deleted. An alternative implementation would be to
+ // recursively list all children under the descendantPrefix, and delete them.
+
iterator := sdk.KVStorePrefixIterator(store, nil)
- // Copy empty keys first since cosmos stores do not support writing keys
- // while an iterator is open over the domain
- emptyKeys := [][]byte{}
- for ; iterator.Valid(); iterator.Next() {
- rawValue := iterator.Value()
- if bytes.Equal(rawValue, types.EncodedDataPrefix) {
- key := iterator.Key()
- clonedKey := make([]byte, len(key))
- copy(clonedKey, key)
- emptyKeys = append(emptyKeys, clonedKey)
- }
- }
- iterator.Close()
+ keys := getEncodedKeysWithPrefixFromIterator(iterator, descendantPrefix)
- for _, key := range emptyKeys {
- store.Set(key, types.EncodedNoDataValue)
+ for _, key := range keys {
+ store.Delete(key)
}
+
+ // Update the prefix entry itself with SetStorage, which will effectively
+ // delete it and all necessary ancestors.
+ k.SetStorage(ctx, agoric.NewKVEntryWithNoValue(pathPrefix))
}
func (k Keeper) EmitChange(ctx sdk.Context, change *ProposedChange) {
@@ -229,22 +253,22 @@ func (k Keeper) EmitChange(ctx sdk.Context, change *ProposedChange) {
}
// GetEntry gets generic storage. The default value is an empty string.
-func (k Keeper) GetEntry(ctx sdk.Context, path string) types.StorageEntry {
+func (k Keeper) GetEntry(ctx sdk.Context, path string) agoric.KVEntry {
//fmt.Printf("GetEntry(%s)\n", path);
store := ctx.KVStore(k.storeKey)
encodedKey := types.PathToEncodedKey(path)
rawValue := store.Get(encodedKey)
if len(rawValue) == 0 {
- return types.NewStorageEntryWithNoData(path)
+ return agoric.NewKVEntryWithNoValue(path)
}
if bytes.Equal(rawValue, types.EncodedNoDataValue) {
- return types.NewStorageEntryWithNoData(path)
+ return agoric.NewKVEntryWithNoValue(path)
}
value, hasPrefix := cutPrefix(rawValue, types.EncodedDataPrefix)
if !hasPrefix {
panic(fmt.Errorf("value at path %q starts with unexpected prefix", path))
}
- return types.NewStorageEntry(path, string(value))
+ return agoric.NewKVEntry(path, string(value))
}
func (k Keeper) getKeyIterator(ctx sdk.Context, path string) db.Iterator {
@@ -273,7 +297,7 @@ func (k Keeper) GetChildren(ctx sdk.Context, path string) *types.Children {
// (just an empty string) and exist only to provide linkage to subnodes with
// data.
func (k Keeper) HasStorage(ctx sdk.Context, path string) bool {
- return k.GetEntry(ctx, path).HasData()
+ return k.GetEntry(ctx, path).HasValue()
}
// HasEntry tells if a given path has either subnodes or data.
@@ -302,12 +326,12 @@ func (k Keeper) FlushChangeEvents(ctx sdk.Context) {
k.changeManager.Rollback(ctx)
}
-func (k Keeper) SetStorageAndNotify(ctx sdk.Context, entry types.StorageEntry) {
+func (k Keeper) SetStorageAndNotify(ctx sdk.Context, entry agoric.KVEntry) {
k.changeManager.Track(ctx, k, entry, false)
k.SetStorage(ctx, entry)
}
-func (k Keeper) LegacySetStorageAndNotify(ctx sdk.Context, entry types.StorageEntry) {
+func (k Keeper) LegacySetStorageAndNotify(ctx sdk.Context, entry agoric.KVEntry) {
k.changeManager.Track(ctx, k, entry, true)
k.SetStorage(ctx, entry)
}
@@ -332,7 +356,7 @@ func (k Keeper) AppendStorageValueAndNotify(ctx sdk.Context, path, value string)
if err != nil {
return err
}
- k.SetStorageAndNotify(ctx, types.NewStorageEntry(path, string(bz)))
+ k.SetStorageAndNotify(ctx, agoric.NewKVEntry(path, string(bz)))
return nil
}
@@ -344,12 +368,12 @@ func componentsToPath(components []string) string {
//
// Maintains the invariant: path entries exist if and only if self or some
// descendant has non-empty storage
-func (k Keeper) SetStorage(ctx sdk.Context, entry types.StorageEntry) {
+func (k Keeper) SetStorage(ctx sdk.Context, entry agoric.KVEntry) {
store := ctx.KVStore(k.storeKey)
- path := entry.Path()
+ path := entry.Key()
encodedKey := types.PathToEncodedKey(path)
- if !entry.HasData() {
+ if !entry.HasValue() {
if !k.HasChildren(ctx, path) {
// We have no children, can delete.
store.Delete(encodedKey)
@@ -364,7 +388,7 @@ func (k Keeper) SetStorage(ctx sdk.Context, entry types.StorageEntry) {
// Update our other parent children.
pathComponents := strings.Split(path, types.PathSeparator)
- if !entry.HasData() {
+ if !entry.HasValue() {
// delete placeholder ancestors if they're no longer needed
for i := len(pathComponents) - 1; i >= 0; i-- {
ancestor := componentsToPath(pathComponents[0:i])
@@ -405,7 +429,7 @@ func (k Keeper) GetNoDataValue() []byte {
func (k Keeper) getIntValue(ctx sdk.Context, path string) (sdk.Int, error) {
indexEntry := k.GetEntry(ctx, path)
- if !indexEntry.HasData() {
+ if !indexEntry.HasValue() {
return sdk.NewInt(0), nil
}
@@ -444,10 +468,10 @@ func (k Keeper) PushQueueItem(ctx sdk.Context, queuePath string, value string) e
// Set the vstorage corresponding to the queue entry for the current tail.
path := queuePath + "." + tail.String()
- k.SetStorage(ctx, types.NewStorageEntry(path, value))
+ k.SetStorage(ctx, agoric.NewKVEntry(path, value))
// Update the tail to point to the next available entry.
path = queuePath + ".tail"
- k.SetStorage(ctx, types.NewStorageEntry(path, nextTail.String()))
+ k.SetStorage(ctx, agoric.NewKVEntry(path, nextTail.String()))
return nil
}
diff --git a/golang/cosmos/x/vstorage/keeper/keeper_test.go b/golang/cosmos/x/vstorage/keeper/keeper_test.go
index d53c67ee842..38fcdb7e7f6 100644
--- a/golang/cosmos/x/vstorage/keeper/keeper_test.go
+++ b/golang/cosmos/x/vstorage/keeper/keeper_test.go
@@ -4,6 +4,7 @@ import (
"reflect"
"testing"
+ agoric "github.com/Agoric/agoric-sdk/golang/cosmos/types"
"github.com/Agoric/agoric-sdk/golang/cosmos/x/vstorage/types"
"github.com/cosmos/cosmos-sdk/store"
@@ -57,19 +58,19 @@ func TestStorage(t *testing.T) {
ctx, keeper := testKit.ctx, testKit.vstorageKeeper
// Test that we can store and retrieve a value.
- keeper.SetStorage(ctx, types.NewStorageEntry("inited", "initValue"))
+ keeper.SetStorage(ctx, agoric.NewKVEntry("inited", "initValue"))
if got := keeper.GetEntry(ctx, "inited").StringValue(); got != "initValue" {
t.Errorf("got %q, want %q", got, "initValue")
}
// Test that unknown children return empty string.
- if got := keeper.GetEntry(ctx, "unknown"); got.HasData() || got.StringValue() != "" {
+ if got := keeper.GetEntry(ctx, "unknown"); got.HasValue() || got.StringValue() != "" {
t.Errorf("got %q, want no value", got.StringValue())
}
// Test that we can store and retrieve an empty string value.
- keeper.SetStorage(ctx, types.NewStorageEntry("inited", ""))
- if got := keeper.GetEntry(ctx, "inited"); !got.HasData() || got.StringValue() != "" {
+ keeper.SetStorage(ctx, agoric.NewKVEntry("inited", ""))
+ if got := keeper.GetEntry(ctx, "inited"); !got.HasValue() || got.StringValue() != "" {
t.Errorf("got %q, want empty string", got.StringValue())
}
@@ -78,18 +79,18 @@ func TestStorage(t *testing.T) {
t.Errorf("got %q children, want [inited]", got.Children)
}
- keeper.SetStorage(ctx, types.NewStorageEntry("key1", "value1"))
+ keeper.SetStorage(ctx, agoric.NewKVEntry("key1", "value1"))
if got := keeper.GetChildren(ctx, ""); !childrenEqual(got.Children, []string{"inited", "key1"}) {
t.Errorf("got %q children, want [inited,key1]", got.Children)
}
// Check alphabetical.
- keeper.SetStorage(ctx, types.NewStorageEntry("alpha2", "value2"))
+ keeper.SetStorage(ctx, agoric.NewKVEntry("alpha2", "value2"))
if got := keeper.GetChildren(ctx, ""); !childrenEqual(got.Children, []string{"alpha2", "inited", "key1"}) {
t.Errorf("got %q children, want [alpha2,inited,key1]", got.Children)
}
- keeper.SetStorage(ctx, types.NewStorageEntry("beta3", "value3"))
+ keeper.SetStorage(ctx, agoric.NewKVEntry("beta3", "value3"))
if got := keeper.GetChildren(ctx, ""); !childrenEqual(got.Children, []string{"alpha2", "beta3", "inited", "key1"}) {
t.Errorf("got %q children, want [alpha2,beta3,inited,key1]", got.Children)
}
@@ -99,7 +100,7 @@ func TestStorage(t *testing.T) {
}
// Check adding children.
- keeper.SetStorage(ctx, types.NewStorageEntry("key1.child1", "value1child"))
+ keeper.SetStorage(ctx, agoric.NewKVEntry("key1.child1", "value1child"))
if got := keeper.GetEntry(ctx, "key1.child1").StringValue(); got != "value1child" {
t.Errorf("got %q, want %q", got, "value1child")
}
@@ -109,7 +110,7 @@ func TestStorage(t *testing.T) {
}
// Add a grandchild.
- keeper.SetStorage(ctx, types.NewStorageEntry("key1.child1.grandchild1", "value1grandchild"))
+ keeper.SetStorage(ctx, agoric.NewKVEntry("key1.child1.grandchild1", "value1grandchild"))
if got := keeper.GetEntry(ctx, "key1.child1.grandchild1").StringValue(); got != "value1grandchild" {
t.Errorf("got %q, want %q", got, "value1grandchild")
}
@@ -119,7 +120,7 @@ func TestStorage(t *testing.T) {
}
// Delete the child's contents.
- keeper.SetStorage(ctx, types.NewStorageEntryWithNoData("key1.child1"))
+ keeper.SetStorage(ctx, agoric.NewKVEntryWithNoValue("key1.child1"))
if got := keeper.GetChildren(ctx, "key1"); !childrenEqual(got.Children, []string{"child1"}) {
t.Errorf("got %q children, want [child1]", got.Children)
}
@@ -129,7 +130,7 @@ func TestStorage(t *testing.T) {
}
// Delete the grandchild's contents.
- keeper.SetStorage(ctx, types.NewStorageEntryWithNoData("key1.child1.grandchild1"))
+ keeper.SetStorage(ctx, agoric.NewKVEntryWithNoValue("key1.child1.grandchild1"))
if got := keeper.GetChildren(ctx, "key1.child1"); !childrenEqual(got.Children, []string{}) {
t.Errorf("got %q children, want []", got.Children)
}
@@ -139,13 +140,13 @@ func TestStorage(t *testing.T) {
}
// See about deleting the parent.
- keeper.SetStorage(ctx, types.NewStorageEntryWithNoData("key1"))
+ keeper.SetStorage(ctx, agoric.NewKVEntryWithNoValue("key1"))
if got := keeper.GetChildren(ctx, ""); !childrenEqual(got.Children, []string{"alpha2", "beta3", "inited"}) {
t.Errorf("got %q children, want [alpha2,beta3,inited]", got.Children)
}
// Do a deep set.
- keeper.SetStorage(ctx, types.NewStorageEntry("key2.child2.grandchild2", "value2grandchild"))
+ keeper.SetStorage(ctx, agoric.NewKVEntry("key2.child2.grandchild2", "value2grandchild"))
if got := keeper.GetChildren(ctx, ""); !childrenEqual(got.Children, []string{"alpha2", "beta3", "inited", "key2"}) {
t.Errorf("got %q children, want [alpha2,beta3,inited,key2]", got.Children)
}
@@ -157,7 +158,7 @@ func TestStorage(t *testing.T) {
}
// Do another deep set.
- keeper.SetStorage(ctx, types.NewStorageEntry("key2.child2.grandchild2a", "value2grandchilda"))
+ keeper.SetStorage(ctx, agoric.NewKVEntry("key2.child2.grandchild2a", "value2grandchilda"))
if got := keeper.GetChildren(ctx, "key2.child2"); !childrenEqual(got.Children, []string{"grandchild2", "grandchild2a"}) {
t.Errorf("got %q children, want [grandchild2,grandchild2a]", got.Children)
}
@@ -184,19 +185,37 @@ func TestStorage(t *testing.T) {
t.Errorf("got export %q, want %q", got, expectedKey2Export)
}
+ keeper.RemoveEntriesWithPrefix(ctx, "key2.child2")
+ if keeper.HasEntry(ctx, "key2") {
+ t.Errorf("got leftover entries for key2 after removal")
+ }
+ expectedRemainingExport := []*types.DataEntry{
+ {Path: "alpha2", Value: "value2"},
+ {Path: "beta3", Value: "value3"},
+ {Path: "inited", Value: ""},
+ }
+ gotRemainingExport := keeper.ExportStorage(ctx)
+ if !reflect.DeepEqual(gotRemainingExport, expectedRemainingExport) {
+ t.Errorf("got remaining export %q, want %q", expectedRemainingExport, expectedRemainingExport)
+ }
+
keeper.ImportStorage(ctx, gotExport)
+ gotExport = keeper.ExportStorage(ctx)
+ if !reflect.DeepEqual(gotExport, expectedExport) {
+ t.Errorf("got export %q after import, want %q", gotExport, expectedExport)
+ }
}
func TestStorageNotify(t *testing.T) {
tk := makeTestKit()
ctx, keeper := tk.ctx, tk.vstorageKeeper
- keeper.SetStorageAndNotify(ctx, types.NewStorageEntry("notify.noLegacy", "noLegacyValue"))
- keeper.LegacySetStorageAndNotify(ctx, types.NewStorageEntry("notify.legacy", "legacyValue"))
- keeper.SetStorageAndNotify(ctx, types.NewStorageEntry("notify.noLegacy2", "noLegacyValue2"))
- keeper.SetStorageAndNotify(ctx, types.NewStorageEntry("notify.legacy2", "legacyValue2"))
- keeper.LegacySetStorageAndNotify(ctx, types.NewStorageEntry("notify.legacy2", "legacyValue2b"))
- keeper.SetStorageAndNotify(ctx, types.NewStorageEntry("notify.noLegacy2", "noLegacyValue2b"))
+ keeper.SetStorageAndNotify(ctx, agoric.NewKVEntry("notify.noLegacy", "noLegacyValue"))
+ keeper.LegacySetStorageAndNotify(ctx, agoric.NewKVEntry("notify.legacy", "legacyValue"))
+ keeper.SetStorageAndNotify(ctx, agoric.NewKVEntry("notify.noLegacy2", "noLegacyValue2"))
+ keeper.SetStorageAndNotify(ctx, agoric.NewKVEntry("notify.legacy2", "legacyValue2"))
+ keeper.LegacySetStorageAndNotify(ctx, agoric.NewKVEntry("notify.legacy2", "legacyValue2b"))
+ keeper.SetStorageAndNotify(ctx, agoric.NewKVEntry("notify.noLegacy2", "noLegacyValue2b"))
// Check the batched events.
expectedBeforeFlushEvents := sdk.Events{}
@@ -273,44 +292,3 @@ func TestStorageNotify(t *testing.T) {
t.Errorf("got after second flush events %#v, want %#v", got, expectedAfterFlushEvents)
}
}
-
-func TestStorageMigrate(t *testing.T) {
- testKit := makeTestKit()
- ctx, keeper := testKit.ctx, testKit.vstorageKeeper
-
- // Simulate a pre-migration storage with empty string as placeholders
- keeper.SetStorage(ctx, types.NewStorageEntry("key1", "value1"))
- keeper.SetStorage(ctx, types.NewStorageEntry("key1.child1.grandchild1", "value1grandchild"))
- keeper.SetStorage(ctx, types.NewStorageEntry("key1.child1", ""))
-
- // Do a deep set.
- keeper.SetStorage(ctx, types.NewStorageEntry("key2.child2.grandchild2", "value2grandchild"))
- keeper.SetStorage(ctx, types.NewStorageEntry("key2.child2.grandchild2a", "value2grandchilda"))
- keeper.SetStorage(ctx, types.NewStorageEntry("key2.child2", ""))
- keeper.SetStorage(ctx, types.NewStorageEntry("key2", ""))
-
- keeper.MigrateNoDataPlaceholders(ctx)
-
- if keeper.HasStorage(ctx, "key1.child1") {
- t.Errorf("has key1.child1, want no value")
- }
- if keeper.HasStorage(ctx, "key2.child2") {
- t.Errorf("has key2.child2, want no value")
- }
- if keeper.HasStorage(ctx, "key2") {
- t.Errorf("has key2, want no value")
- }
-
- // Check the export.
- expectedExport := []*types.DataEntry{
- {Path: "key1", Value: "value1"},
- {Path: "key1.child1.grandchild1", Value: "value1grandchild"},
- {Path: "key2.child2.grandchild2", Value: "value2grandchild"},
- {Path: "key2.child2.grandchild2a", Value: "value2grandchilda"},
- }
- got := keeper.ExportStorage(ctx)
- if !reflect.DeepEqual(got, expectedExport) {
- t.Errorf("got export %q, want %q", got, expectedExport)
- }
- keeper.ImportStorage(ctx, got)
-}
diff --git a/golang/cosmos/x/vstorage/keeper/querier.go b/golang/cosmos/x/vstorage/keeper/querier.go
index 698d61fac3b..44a8a8d40b4 100644
--- a/golang/cosmos/x/vstorage/keeper/querier.go
+++ b/golang/cosmos/x/vstorage/keeper/querier.go
@@ -35,7 +35,7 @@ func NewQuerier(keeper Keeper, legacyQuerierCdc *codec.LegacyAmino) sdk.Querier
// nolint: unparam
func queryData(ctx sdk.Context, path string, req abci.RequestQuery, keeper Keeper, legacyQuerierCdc *codec.LegacyAmino) (res []byte, err error) {
entry := keeper.GetEntry(ctx, path)
- if !entry.HasData() {
+ if !entry.HasValue() {
return nil, sdkerrors.Wrap(sdkerrors.ErrUnknownRequest, "could not get vstorage path")
}
diff --git a/golang/cosmos/x/vstorage/types/types.go b/golang/cosmos/x/vstorage/types/types.go
index b8cb3174c16..c65fe0948ea 100644
--- a/golang/cosmos/x/vstorage/types/types.go
+++ b/golang/cosmos/x/vstorage/types/types.go
@@ -1,10 +1,5 @@
package types
-import (
- "encoding/json"
- "fmt"
-)
-
func NewData() *Data {
return &Data{}
}
@@ -12,64 +7,3 @@ func NewData() *Data {
func NewChildren() *Children {
return &Children{}
}
-
-type StorageEntry struct {
- path string
- value *string
-}
-
-func NewStorageEntry(path string, value string) StorageEntry {
- return StorageEntry{path, &value}
-}
-
-func NewStorageEntryWithNoData(path string) StorageEntry {
- return StorageEntry{path, nil}
-}
-
-// UnmarshalStorageEntry interprets its argument as a [key: string, value?: string | null]
-// JSON array and returns a corresponding StorageEntry.
-// The key must be a string, and the value (if present) must be a string or null.
-func UnmarshalStorageEntry(msg json.RawMessage) (entry StorageEntry, err error) {
- var generic [2]interface{}
- err = json.Unmarshal(msg, &generic)
-
- if err != nil {
- return
- }
-
- path, ok := generic[0].(string)
- if !ok {
- err = fmt.Errorf("invalid storage entry path: %q", generic[0])
- return
- }
-
- switch generic[1].(type) {
- case string:
- entry = NewStorageEntry(path, generic[1].(string))
- case nil:
- entry = NewStorageEntryWithNoData(path)
- default:
- err = fmt.Errorf("invalid storage entry value: %q", generic[1])
- }
- return
-}
-
-func (se StorageEntry) HasData() bool {
- return se.value != nil
-}
-
-func (se StorageEntry) Path() string {
- return se.path
-}
-
-func (se StorageEntry) Value() *string {
- return se.value
-}
-
-func (se StorageEntry) StringValue() string {
- if se.value != nil {
- return *se.value
- } else {
- return ""
- }
-}
diff --git a/golang/cosmos/x/vstorage/vstorage.go b/golang/cosmos/x/vstorage/vstorage.go
index b2120948a30..3df0da359de 100644
--- a/golang/cosmos/x/vstorage/vstorage.go
+++ b/golang/cosmos/x/vstorage/vstorage.go
@@ -7,8 +7,8 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
+ agoric "github.com/Agoric/agoric-sdk/golang/cosmos/types"
"github.com/Agoric/agoric-sdk/golang/cosmos/vm"
- "github.com/Agoric/agoric-sdk/golang/cosmos/x/vstorage/types"
)
type vstorageHandler struct {
@@ -69,8 +69,8 @@ func (sh vstorageHandler) Receive(cctx *vm.ControllerContext, str string) (ret s
switch msg.Method {
case "set":
for _, arg := range msg.Args {
- var entry types.StorageEntry
- entry, err = types.UnmarshalStorageEntry(arg)
+ var entry agoric.KVEntry
+ err = json.Unmarshal(arg, &entry)
if err != nil {
return
}
@@ -83,8 +83,8 @@ func (sh vstorageHandler) Receive(cctx *vm.ControllerContext, str string) (ret s
// FIXME: Use just "set" and remove this case.
case "legacySet":
for _, arg := range msg.Args {
- var entry types.StorageEntry
- entry, err = types.UnmarshalStorageEntry(arg)
+ var entry agoric.KVEntry
+ err = json.Unmarshal(arg, &entry)
if err != nil {
return
}
@@ -95,8 +95,8 @@ func (sh vstorageHandler) Receive(cctx *vm.ControllerContext, str string) (ret s
case "setWithoutNotify":
for _, arg := range msg.Args {
- var entry types.StorageEntry
- entry, err = types.UnmarshalStorageEntry(arg)
+ var entry agoric.KVEntry
+ err = json.Unmarshal(arg, &entry)
if err != nil {
return
}
@@ -106,16 +106,16 @@ func (sh vstorageHandler) Receive(cctx *vm.ControllerContext, str string) (ret s
case "append":
for _, arg := range msg.Args {
- var entry types.StorageEntry
- entry, err = types.UnmarshalStorageEntry(arg)
+ var entry agoric.KVEntry
+ err = json.Unmarshal(arg, &entry)
if err != nil {
return
}
- if !entry.HasData() {
- err = fmt.Errorf("no value for append entry with path: %q", entry.Path())
+ if !entry.HasValue() {
+ err = fmt.Errorf("no value for append entry with path: %q", entry.Key())
return
}
- err = keeper.AppendStorageValueAndNotify(cctx.Context, entry.Path(), entry.StringValue())
+ err = keeper.AppendStorageValueAndNotify(cctx.Context, entry.Key(), entry.StringValue())
if err != nil {
return
}
@@ -131,10 +131,7 @@ func (sh vstorageHandler) Receive(cctx *vm.ControllerContext, str string) (ret s
}
entry := keeper.GetEntry(cctx.Context, path)
- if !entry.HasData() {
- return "null", nil
- }
- bz, err := json.Marshal(entry.StringValue())
+ bz, err := json.Marshal(entry.Value())
if err != nil {
return "", err
}
@@ -194,13 +191,13 @@ func (sh vstorageHandler) Receive(cctx *vm.ControllerContext, str string) (ret s
return
}
children := keeper.GetChildren(cctx.Context, path)
- entries := make([][]interface{}, len(children.Children))
+ entries := make([]agoric.KVEntry, len(children.Children))
for i, child := range children.Children {
entry := keeper.GetEntry(cctx.Context, fmt.Sprintf("%s.%s", path, child))
- if !entry.HasData() {
- entries[i] = []interface{}{child}
+ if !entry.HasValue() {
+ entries[i] = agoric.NewKVEntryWithNoValue(child)
} else {
- entries[i] = []interface{}{child, entry.Value()}
+ entries[i] = agoric.NewKVEntry(child, entry.StringValue())
}
}
bytes, err := json.Marshal(entries)
@@ -216,9 +213,9 @@ func (sh vstorageHandler) Receive(cctx *vm.ControllerContext, str string) (ret s
return
}
children := keeper.GetChildren(cctx.Context, path)
- vals := make([]string, len(children.Children))
+ vals := make([]*string, len(children.Children))
for i, child := range children.Children {
- vals[i] = keeper.GetEntry(cctx.Context, fmt.Sprintf("%s.%s", path, child)).StringValue()
+ vals[i] = keeper.GetEntry(cctx.Context, fmt.Sprintf("%s.%s", path, child)).Value()
}
bytes, err := json.Marshal(vals)
if err != nil {
diff --git a/golang/cosmos/x/vstorage/vstorage_test.go b/golang/cosmos/x/vstorage/vstorage_test.go
index 02e478aea09..5817e1ade25 100644
--- a/golang/cosmos/x/vstorage/vstorage_test.go
+++ b/golang/cosmos/x/vstorage/vstorage_test.go
@@ -70,10 +70,10 @@ func TestGetAndHas(t *testing.T) {
kit := makeTestKit()
keeper, handler, ctx, cctx := kit.keeper, kit.handler, kit.ctx, kit.cctx
- keeper.SetStorage(ctx, types.NewStorageEntry("foo", "bar"))
- keeper.SetStorage(ctx, types.NewStorageEntry("empty", ""))
- keeper.SetStorage(ctx, types.NewStorageEntry("top.empty-non-terminal.leaf", ""))
- keeper.SetStorage(ctx, types.NewStorageEntryWithNoData("top.empty-non-terminal"))
+ keeper.SetStorage(ctx, agorictypes.NewKVEntry("foo", "bar"))
+ keeper.SetStorage(ctx, agorictypes.NewKVEntry("empty", ""))
+ keeper.SetStorage(ctx, agorictypes.NewKVEntry("top.empty-non-terminal.leaf", ""))
+ keeper.SetStorage(ctx, agorictypes.NewKVEntryWithNoValue("top.empty-non-terminal"))
type testCase struct {
label string
@@ -153,7 +153,7 @@ func doTestSet(t *testing.T, method string, expectNotify bool) {
// TODO: Fully validate input before making changes
// args: []interface{}{[]string{"foo", "X"}, []interface{}{42, "new"}},
args: []interface{}{[]interface{}{42, "new"}},
- errContains: ptr("path"),
+ errContains: ptr("json"),
},
{label: "non-string value",
// TODO: Fully validate input before making changes
@@ -259,15 +259,15 @@ func TestEntries(t *testing.T) {
kit := makeTestKit()
keeper, handler, ctx, cctx := kit.keeper, kit.handler, kit.ctx, kit.cctx
- keeper.SetStorage(ctx, types.NewStorageEntry("key1", "value1"))
- keeper.SetStorage(ctx, types.NewStorageEntry("key1.child1.grandchild1", "value1grandchild"))
- keeper.SetStorage(ctx, types.NewStorageEntryWithNoData("key1.child1.grandchild2"))
- keeper.SetStorage(ctx, types.NewStorageEntryWithNoData("key1.child1"))
- keeper.SetStorage(ctx, types.NewStorageEntry("key1.child1.empty-non-terminal.leaf", ""))
- keeper.SetStorage(ctx, types.NewStorageEntryWithNoData("key2"))
- keeper.SetStorage(ctx, types.NewStorageEntryWithNoData("key2.child2"))
- keeper.SetStorage(ctx, types.NewStorageEntry("key2.child2.grandchild2", "value2grandchild"))
- keeper.SetStorage(ctx, types.NewStorageEntry("key2.child2.grandchild2a", "value2grandchilda"))
+ keeper.SetStorage(ctx, agorictypes.NewKVEntry("key1", "value1"))
+ keeper.SetStorage(ctx, agorictypes.NewKVEntry("key1.child1.grandchild1", "value1grandchild"))
+ keeper.SetStorage(ctx, agorictypes.NewKVEntryWithNoValue("key1.child1.grandchild2"))
+ keeper.SetStorage(ctx, agorictypes.NewKVEntryWithNoValue("key1.child1"))
+ keeper.SetStorage(ctx, agorictypes.NewKVEntry("key1.child1.empty-non-terminal.leaf", ""))
+ keeper.SetStorage(ctx, agorictypes.NewKVEntryWithNoValue("key2"))
+ keeper.SetStorage(ctx, agorictypes.NewKVEntryWithNoValue("key2.child2"))
+ keeper.SetStorage(ctx, agorictypes.NewKVEntry("key2.child2.grandchild2", "value2grandchild"))
+ keeper.SetStorage(ctx, agorictypes.NewKVEntry("key2.child2.grandchild2a", "value2grandchilda"))
type testCase struct {
path string
diff --git a/packages/agoric-cli/src/helpers.js b/packages/agoric-cli/src/helpers.js
index db9abdd0043..cbb9904108a 100644
--- a/packages/agoric-cli/src/helpers.js
+++ b/packages/agoric-cli/src/helpers.js
@@ -13,8 +13,7 @@ export const getSDKBinaries = ({
return {
agSolo: new URL(`${jsPfx}/solo/src/entrypoint.js`, myUrl).pathname,
agSoloBuild: ['yarn', '--cwd', xsnap, `build:from-env`],
- cosmosChain: new URL(`${jsPfx}/cosmic-swingset/bin/ag-chain-cosmos`, myUrl)
- .pathname,
+ cosmosChain: new URL(`${goPfx}/cosmos/build/agd`, myUrl).pathname,
cosmosChainBuild: cosmosBuild,
cosmosClientBuild: cosmosBuild,
cosmosHelper: new URL(`${goPfx}/cosmos/build/agd`, myUrl).pathname,
diff --git a/packages/cosmic-swingset/Makefile b/packages/cosmic-swingset/Makefile
index 7b8192ce8fa..be464ea28f6 100644
--- a/packages/cosmic-swingset/Makefile
+++ b/packages/cosmic-swingset/Makefile
@@ -81,7 +81,7 @@ all: build-chain install-nobuild
client: build-helper install-nobuild
-install-nobuild: install-local install-agd install-helper
+install-nobuild: install-local install-agd
install: all install-nobuild
@@ -363,10 +363,6 @@ install-agd:
mkdir -p "$(BIN)"
ln -sf "$(SDK_ROOT)/bin/agd" "$(BIN)/agd"
-install-helper:
- mkdir -p "$(BIN)"
- ln -sf "$(SDK_ROOT)/bin/agd" "$(BIN)/ag-cosmos-helper"
-
install-local:
mkdir -p "$(BIN)"
ln -sf "$(SDK_ROOT)/bin/agd" "$(BIN)/ag-chain-cosmos"
diff --git a/packages/cosmic-swingset/package.json b/packages/cosmic-swingset/package.json
index 4e804413047..d14cc7ab655 100644
--- a/packages/cosmic-swingset/package.json
+++ b/packages/cosmic-swingset/package.json
@@ -4,7 +4,7 @@
"description": "Agoric's Cosmos blockchain integration",
"type": "module",
"bin": {
- "ag-chain-cosmos": "./src/entrypoint.js"
+ "ag-chain-cosmos": "src/entrypoint.js"
},
"main": "src/chain-main.js",
"repository": "https://github.com/Agoric/agoric-sdk",
diff --git a/packages/cosmic-swingset/src/chain-main.js b/packages/cosmic-swingset/src/chain-main.js
index 89f383e4214..287b23a324d 100644
--- a/packages/cosmic-swingset/src/chain-main.js
+++ b/packages/cosmic-swingset/src/chain-main.js
@@ -39,14 +39,18 @@ import stringify from './helpers/json-stable-stringify.js';
import { launch } from './launch-chain.js';
import { getTelemetryProviders } from './kernel-stats.js';
import { makeProcessValue } from './helpers/process-value.js';
-import { spawnSwingStoreExport } from './export-kernel-db.js';
-import { performStateSyncImport } from './import-kernel-db.js';
+import {
+ spawnSwingStoreExport,
+ validateExporterOptions,
+} from './export-kernel-db.js';
+import {
+ performStateSyncImport,
+ validateImporterOptions,
+} from './import-kernel-db.js';
// eslint-disable-next-line no-unused-vars
let whenHellFreezesOver = null;
-const AG_COSMOS_INIT = 'AG_COSMOS_INIT';
-
const TELEMETRY_SERVICE_NAME = 'agd-cosmos';
const toNumber = specimen => {
@@ -240,11 +244,11 @@ export default async function main(progname, args, { env, homedir, agcc }) {
/** @type {((obj: object) => void) | undefined} */
let writeSlogObject;
- // this storagePort changes for every single message. We define it out here
- // so the 'externalStorage' object can close over the single mutable
- // instance, and we update the 'portNums.storage' value each time toSwingSet is called
+ // the storagePort used to change for every single message. It's defined out
+ // here so 'sendToChainStorage' can close over the single mutable instance,
+ // when we updated the 'portNums.storage' value each time toSwingSet was called.
async function launchAndInitializeSwingSet(bootMsg) {
- const sendToChain = msg => chainSend(portNums.storage, msg);
+ const sendToChainStorage = msg => chainSend(portNums.storage, msg);
// this object is used to store the mailbox state.
const fromBridgeMailbox = data => {
const ack = toNumber(data.ack);
@@ -253,7 +257,7 @@ export default async function main(progname, args, { env, homedir, agcc }) {
};
const mailboxStorage = makeReadCachingStorage(
makePrefixedBridgeStorage(
- sendToChain,
+ sendToChainStorage,
`${STORAGE_PATH.MAILBOX}.`,
'legacySet',
val => fromBridgeMailbox(JSON.parse(val)),
@@ -263,7 +267,7 @@ export default async function main(progname, args, { env, homedir, agcc }) {
const makeQueueStorage = queuePath => {
const { kvStore, commit, abort } = makeBufferedStorage(
makePrefixedBridgeStorage(
- sendToChain,
+ sendToChainStorage,
`${queuePath}.`,
'setWithoutNotify',
x => x,
@@ -294,20 +298,20 @@ export default async function main(progname, args, { env, homedir, agcc }) {
if (typeof key !== 'string') {
throw Fail`Unexpected swingStore exported key ${q(key)}`;
}
- const path = `${STORAGE_PATH.SWING_STORE}.${key}`;
if (value == null) {
- return [path];
+ return [key];
}
if (typeof value !== 'string') {
throw Fail`Unexpected ${typeof value} value for swingStore exported key ${q(
key,
)}`;
}
- return [path, value];
+ return [key, value];
});
- sendToChain(
+ chainSend(
+ portNums.swingset,
stringify({
- method: 'setWithoutNotify',
+ method: 'swingStoreUpdateExportData',
args: entries,
}),
);
@@ -493,26 +497,50 @@ export default async function main(progname, args, { env, homedir, agcc }) {
};
}
- async function handleCosmosSnapshot(blockHeight, request, requestArgs) {
+ /** @type {Awaited>['blockingSend'] | undefined} */
+ let blockingSend;
+
+ async function handleSwingStoreExport(blockHeight, request, requestArgs) {
switch (request) {
case 'restore': {
- const exportDir = requestArgs[0];
- if (typeof exportDir !== 'string') {
- throw Fail`Invalid exportDir argument ${q(exportDir)}`;
- }
+ const requestOptions =
+ typeof requestArgs[0] === 'string'
+ ? { exportDir: requestArgs[0] }
+ : requestArgs[0] || {};
+ const options = {
+ ...requestOptions,
+ stateDir: stateDBDir,
+ blockHeight,
+ };
+ validateImporterOptions(options);
+ !stateSyncExport ||
+ Fail`Snapshot already in progress for ${stateSyncExport.blockHeight}`;
+ !blockingSend || Fail`Cannot restore snapshot after init`;
console.info(
'Restoring SwingSet state from snapshot at block height',
blockHeight,
+ 'with options',
+ JSON.stringify(requestOptions),
);
- return performStateSyncImport(
- { exportDir, stateDir: stateDBDir, blockHeight },
- { fs: { ...fs, ...fsPromises }, pathResolve, log: null },
- );
+ return performStateSyncImport(options, {
+ fs: { ...fs, ...fsPromises },
+ pathResolve,
+ log: null,
+ });
}
case 'initiate': {
!stateSyncExport ||
Fail`Snapshot already in progress for ${stateSyncExport.blockHeight}`;
+ const requestOptions = requestArgs[0] || {};
+
+ validateExporterOptions({
+ ...requestOptions,
+ stateDir: stateDBDir,
+ exportDir: '',
+ blockHeight,
+ });
+
const exportData =
/** @type {Required>} */ ({
blockHeight,
@@ -553,12 +581,15 @@ export default async function main(progname, args, { env, homedir, agcc }) {
);
});
- console.info(
+ console.warn(
'Initiating SwingSet state snapshot at block height',
blockHeight,
+ 'with options',
+ JSON.stringify(requestOptions),
);
exportData.exporter = spawnSwingStoreExport(
{
+ ...requestOptions,
stateDir: stateDBDir,
exportDir: exportData.exportDir,
blockHeight,
@@ -607,74 +638,90 @@ export default async function main(progname, args, { env, homedir, agcc }) {
}
}
- /** @type {Awaited>['blockingSend'] | undefined} */
- let blockingSend;
-
async function toSwingSet(action, _replier) {
// console.log(`toSwingSet`, action);
- if (action.vibcPort) {
- portNums.dibc = action.vibcPort;
- }
- if (action.storagePort) {
- // Initialize the storage for this particular transaction.
- // console.log(` setting portNums.storage to`, action.storagePort);
- portNums.storage = action.storagePort;
- }
+ await null;
- if (action.vbankPort) {
- portNums.bank = action.vbankPort;
- }
+ switch (action.type) {
+ case ActionType.AG_COSMOS_INIT: {
+ // console.error('got AG_COSMOS_INIT', action);
- if (action.lienPort) {
- portNums.lien = action.lienPort;
- }
+ !blockingSend || Fail`Swingset already initialized`;
- // Snapshot actions are specific to cosmos chains and handled here
- if (action.type === ActionType.COSMOS_SNAPSHOT) {
- const { blockHeight, request, args: requestArgs } = action;
- writeSlogObject?.({
- type: 'cosmic-swingset-snapshot-start',
- blockHeight,
- request,
- args: requestArgs,
- });
+ if (action.swingsetPort) {
+ portNums.swingset = action.swingsetPort;
+ }
- const resultP = handleCosmosSnapshot(blockHeight, request, requestArgs);
+ if (action.vibcPort) {
+ portNums.dibc = action.vibcPort;
+ }
- resultP.then(
- result => {
- writeSlogObject?.({
- type: 'cosmic-swingset-snapshot-finish',
- blockHeight,
- request,
- args: requestArgs,
- result,
- });
- },
- error => {
- writeSlogObject?.({
- type: 'cosmic-swingset-snapshot-finish',
- blockHeight,
- request,
- args: requestArgs,
- error,
- });
- },
- );
+ if (action.storagePort) {
+ portNums.storage = action.storagePort;
+ }
- return resultP;
- }
+ if (action.vbankPort) {
+ portNums.bank = action.vbankPort;
+ }
- // Ensure that initialization has completed.
- blockingSend = await (blockingSend || launchAndInitializeSwingSet(action));
+ if (action.lienPort) {
+ portNums.lien = action.lienPort;
+ }
+ harden(portNums);
- if (action.type === AG_COSMOS_INIT) {
- // console.error('got AG_COSMOS_INIT', action);
- return true;
- }
+ // Ensure that initialization has completed.
+ blockingSend = await launchAndInitializeSwingSet(action);
- // Block related actions are processed by `blockingSend`
- return blockingSend(action);
+ return blockingSend(action);
+ }
+
+ // Snapshot actions are specific to cosmos chains and handled here
+ case ActionType.SWING_STORE_EXPORT: {
+ const { blockHeight, request, args: requestArgs } = action;
+ writeSlogObject?.({
+ type: 'cosmic-swingset-snapshot-start',
+ blockHeight,
+ request,
+ args: requestArgs,
+ });
+
+ const resultP = handleSwingStoreExport(
+ blockHeight,
+ request,
+ requestArgs,
+ );
+
+ resultP.then(
+ result => {
+ writeSlogObject?.({
+ type: 'cosmic-swingset-snapshot-finish',
+ blockHeight,
+ request,
+ args: requestArgs,
+ result,
+ });
+ },
+ error => {
+ writeSlogObject?.({
+ type: 'cosmic-swingset-snapshot-finish',
+ blockHeight,
+ request,
+ args: requestArgs,
+ error,
+ });
+ },
+ );
+
+ return resultP;
+ }
+
+ default: {
+ if (!blockingSend) throw Fail`Swingset not initialized`;
+
+ // Block related actions are processed by `blockingSend`
+ return blockingSend(action);
+ }
+ }
}
}
diff --git a/packages/cosmic-swingset/src/export-kernel-db.js b/packages/cosmic-swingset/src/export-kernel-db.js
index 337083945b4..b9920f355b4 100755
--- a/packages/cosmic-swingset/src/export-kernel-db.js
+++ b/packages/cosmic-swingset/src/export-kernel-db.js
@@ -11,7 +11,7 @@ import pathPower from 'path';
import { fileURLToPath } from 'url';
import { makePromiseKit } from '@endo/promise-kit';
-import { Fail } from '@agoric/assert';
+import { Fail, q } from '@agoric/assert';
import { makeAggregateError } from '@agoric/internal';
import { makeShutdown } from '@agoric/internal/src/node/shutdown.js';
import { openSwingStore, makeSwingStoreExporter } from '@agoric/swing-store';
@@ -19,22 +19,66 @@ import { openSwingStore, makeSwingStoreExporter } from '@agoric/swing-store';
import { isEntrypoint } from './helpers/is-entrypoint.js';
import { makeProcessValue } from './helpers/process-value.js';
-/** @typedef {'current' | 'archival' | 'debug'} SwingStoreExportMode */
+// ExportManifestFilename is the manifest filename which must be synchronized
+// with the golang SwingStoreExportsHandler in golang/cosmos/x/swingset/keeper/swing_store_exports_handler.go
+export const ExportManifestFileName = 'export-manifest.json';
-// eslint-disable-next-line jsdoc/require-returns-check
/**
- * @param {string | undefined} mode
- * @returns {asserts mode is SwingStoreExportMode | undefined}
+ * @typedef {'none' // No artifacts included
+ * | import("@agoric/swing-store").ArtifactMode
+ * } SwingStoreArtifactMode
*/
-const checkExportMode = mode => {
- switch (mode) {
- case 'current':
+
+/**
+ * @typedef {'skip' // Do not include any "export data" (artifacts only)
+ * | 'repair-metadata' // Add missing artifact metadata (import only)
+ * | 'all' // Include all export data, create new swing-store on import
+ * } SwingStoreExportDataMode
+ */
+
+/**
+ * @param {SwingStoreArtifactMode | undefined} artifactMode
+ * @returns {import("@agoric/swing-store").ArtifactMode}
+ */
+export const getEffectiveArtifactMode = artifactMode => {
+ switch (artifactMode) {
+ case 'none':
+ case 'operational':
+ return 'operational';
+ case undefined:
+ case 'replay':
+ return 'replay';
case 'archival':
case 'debug':
+ return artifactMode;
+ default:
+ throw Fail`Invalid value ${q(artifactMode)} for "artifact-mode"`;
+ }
+};
+
+/** @type {(artifactMode: string | undefined) => asserts artifactMode is SwingStoreArtifactMode | undefined} */
+export const checkArtifactMode = getEffectiveArtifactMode;
+
+/**
+ * @param {string | undefined} mode
+ * @param {boolean} [isImport]
+ * @returns {asserts mode is SwingStoreExportDataMode | undefined}
+ */
+export const checkExportDataMode = (mode, isImport = false) => {
+ switch (mode) {
+ case 'skip':
case undefined:
break;
+ case 'all':
+ break;
+ case 'repair-metadata': {
+ if (isImport) {
+ break;
+ }
+ // Fall through
+ }
default:
- throw Fail`Invalid value ${mode} for "export-mode"`;
+ throw Fail`Invalid value ${q(mode)} for "export-data-mode"`;
}
};
@@ -50,7 +94,7 @@ const checkExportMode = mode => {
*
* @typedef {object} StateSyncManifest
* @property {number} blockHeight the block height corresponding to this export
- * @property {SwingStoreExportMode} [mode]
+ * @property {SwingStoreArtifactMode} [artifactMode]
* @property {string} [data] file name containing the swingStore "export data"
* @property {Array<[artifactName: string, fileName: string]>} artifacts
* List of swingStore export artifacts which can be validated by the export data
@@ -69,10 +113,31 @@ const checkExportMode = mode => {
* @property {string} stateDir the directory containing the SwingStore to export
* @property {string} exportDir the directory in which to place the exported artifacts and manifest
* @property {number} [blockHeight] block height to check for
- * @property {SwingStoreExportMode} [exportMode] whether to include historical or debug artifacts in the export
- * @property {boolean} [includeExportData] whether to include an artifact for the export data in the export
+ * @property {SwingStoreArtifactMode} [artifactMode] the level of artifacts to include in the export
+ * @property {SwingStoreExportDataMode} [exportDataMode] include a synthetic artifact for the export data in the export
*/
+/**
+ * @param {object} options
+ * @returns {asserts options is StateSyncExporterOptions}
+ */
+export const validateExporterOptions = options => {
+ typeof options === 'object' || Fail`options is not an object`;
+ typeof options.stateDir === 'string' ||
+ Fail`required stateDir option not a string`;
+ typeof options.exportDir === 'string' ||
+ Fail`required exportDir option not a string`;
+ options.blockHeight == null ||
+ typeof options.blockHeight === 'number' ||
+ Fail`optional blockHeight option not a number`;
+ checkArtifactMode(options.artifactMode);
+ checkExportDataMode(options.exportDataMode);
+
+ options.includeExportData === undefined ||
+ Fail`deprecated includeExportData option found`;
+ options.exportMode === undefined || Fail`deprecated exportMode option found`;
+};
+
/**
* @param {StateSyncExporterOptions} options
* @param {object} powers
@@ -84,7 +149,7 @@ const checkExportMode = mode => {
* @returns {StateSyncExporter}
*/
export const initiateSwingStoreExport = (
- { stateDir, exportDir, blockHeight, exportMode, includeExportData },
+ { stateDir, exportDir, blockHeight, artifactMode, exportDataMode },
{
fs: { open, writeFile },
pathResolve,
@@ -93,11 +158,7 @@ export const initiateSwingStoreExport = (
log = console.log,
},
) => {
- const effectiveExportMode = exportMode ?? 'current';
- if (effectiveExportMode !== 'current' && !includeExportData) {
- throw Fail`Must include export data if export mode not "current"`;
- }
-
+ const effectiveArtifactMode = getEffectiveArtifactMode(artifactMode);
/** @type {number | undefined} */
let savedBlockHeight;
@@ -113,11 +174,13 @@ export const initiateSwingStoreExport = (
const cleanup = [];
const exportDone = (async () => {
- const manifestPath = pathResolve(exportDir, 'export-manifest.json');
+ const manifestPath = pathResolve(exportDir, ExportManifestFileName);
const manifestFile = await open(manifestPath, 'wx');
cleanup.push(async () => manifestFile.close());
- const swingStoreExporter = makeExporter(stateDir, exportMode);
+ const swingStoreExporter = makeExporter(stateDir, {
+ artifactMode: effectiveArtifactMode,
+ });
cleanup.push(async () => swingStoreExporter.close());
const { hostStorage } = openDB(stateDir);
@@ -127,7 +190,9 @@ export const initiateSwingStoreExport = (
if (blockHeight) {
blockHeight === savedBlockHeight ||
- Fail`DB at unexpected block height ${savedBlockHeight} (expected ${blockHeight})`;
+ Fail`DB at unexpected block height ${q(savedBlockHeight)} (expected ${q(
+ blockHeight,
+ )})`;
}
abortIfStopped();
@@ -137,11 +202,11 @@ export const initiateSwingStoreExport = (
/** @type {StateSyncManifest} */
const manifest = {
blockHeight: savedBlockHeight,
- mode: exportMode,
+ artifactMode: artifactMode || effectiveArtifactMode,
artifacts: [],
};
- if (includeExportData) {
+ if (exportDataMode === 'all') {
log?.(`Writing Export Data`);
const fileName = `export-data.jsonl`;
// eslint-disable-next-line @jessie.js/no-nested-await
@@ -157,14 +222,16 @@ export const initiateSwingStoreExport = (
}
abortIfStopped();
- for await (const artifactName of swingStoreExporter.getArtifactNames()) {
- abortIfStopped();
- log?.(`Writing artifact: ${artifactName}`);
- const artifactData = swingStoreExporter.getArtifact(artifactName);
- // Use artifactName as the file name as we trust swingStore to generate
- // artifact names that are valid file names.
- await writeFile(pathResolve(exportDir, artifactName), artifactData);
- manifest.artifacts.push([artifactName, artifactName]);
+ if (artifactMode !== 'none') {
+ for await (const artifactName of swingStoreExporter.getArtifactNames()) {
+ abortIfStopped();
+ log?.(`Writing artifact: ${artifactName}`);
+ const artifactData = swingStoreExporter.getArtifact(artifactName);
+ // Use artifactName as the file name as we trust swingStore to generate
+ // artifact names that are valid file names.
+ await writeFile(pathResolve(exportDir, artifactName), artifactData);
+ manifest.artifacts.push([artifactName, artifactName]);
+ }
}
await manifestFile.write(JSON.stringify(manifest, null, 2));
@@ -248,11 +315,22 @@ export const main = async (
/** @type {string} */ (processValue.getFlag('export-dir', '.')),
);
- const includeExportData = processValue.getBoolean({
- flagName: 'include-export-data',
- });
- const exportMode = processValue.getFlag('export-mode');
- checkExportMode(exportMode);
+ const artifactMode = /** @type {SwingStoreArtifactMode | undefined} */ (
+ processValue.getFlag('artifact-mode')
+ );
+ checkArtifactMode(artifactMode);
+
+ const exportDataMode = processValue.getFlag('export-data-mode');
+ checkExportDataMode(exportDataMode);
+
+ if (
+ processValue.getBoolean({ flagName: 'include-export-data' }) !== undefined
+ ) {
+ throw Fail`deprecated "include-export-data" options, use "export-data-mode" instead`;
+ }
+ if (processValue.getFlag('export-mode') !== undefined) {
+ throw Fail`deprecated "export-mode" options, use "artifact-mode" instead`;
+ }
const checkBlockHeight = processValue.getInteger({
flagName: 'check-block-height',
@@ -269,13 +347,13 @@ export const main = async (
stateDir,
exportDir,
blockHeight: checkBlockHeight,
- exportMode,
- includeExportData,
+ artifactMode,
+ exportDataMode,
},
{
fs,
pathResolve,
- log: verbose ? console.log : null,
+ log: verbose ? console.warn : null,
},
);
@@ -311,7 +389,7 @@ export const main = async (
* @returns {StateSyncExporter}
*/
export const spawnSwingStoreExport = (
- { stateDir, exportDir, blockHeight, exportMode, includeExportData },
+ { stateDir, exportDir, blockHeight, artifactMode, exportDataMode },
{ fork, verbose },
) => {
const args = ['--state-dir', stateDir, '--export-dir', exportDir];
@@ -320,12 +398,12 @@ export const spawnSwingStoreExport = (
args.push('--check-block-height', String(blockHeight));
}
- if (exportMode) {
- args.push('--export-mode', exportMode);
+ if (artifactMode) {
+ args.push('--artifact-mode', artifactMode);
}
- if (includeExportData) {
- args.push('--include-export-data');
+ if (exportDataMode) {
+ args.push('--export-data-mode', exportDataMode);
}
if (verbose) {
@@ -377,7 +455,7 @@ export const spawnSwingStoreExport = (
}
default: {
// @ts-expect-error exhaustive check
- Fail`Unexpected ${msg.type} message`;
+ Fail`Unexpected ${q(msg.type)} message`;
}
}
};
diff --git a/packages/cosmic-swingset/src/import-kernel-db.js b/packages/cosmic-swingset/src/import-kernel-db.js
index 4acd7152841..d41e6309cd1 100755
--- a/packages/cosmic-swingset/src/import-kernel-db.js
+++ b/packages/cosmic-swingset/src/import-kernel-db.js
@@ -12,35 +12,109 @@ import fsPromisesPower from 'fs/promises';
import pathPower from 'path';
import BufferLineTransform from '@agoric/internal/src/node/buffer-line-transform.js';
-import { Fail } from '@agoric/assert';
-import { importSwingStore } from '@agoric/swing-store';
+import { Fail, q } from '@agoric/assert';
+import { importSwingStore, openSwingStore } from '@agoric/swing-store';
import { isEntrypoint } from './helpers/is-entrypoint.js';
import { makeProcessValue } from './helpers/process-value.js';
+import {
+ ExportManifestFileName,
+ checkExportDataMode,
+ checkArtifactMode,
+} from './export-kernel-db.js';
/**
* @typedef {object} StateSyncImporterOptions
* @property {string} stateDir the directory containing the SwingStore to export
* @property {string} exportDir the directory where to place the exported artifacts and manifest
* @property {number} [blockHeight] block height to check for
- * @property {boolean} [includeHistorical] whether to include historical artifacts in the export
+ * @property {import('./export-kernel-db.js').SwingStoreExportDataMode} [exportDataMode] how to handle export data
+ * @property {import('./export-kernel-db.js').SwingStoreArtifactMode} [artifactMode] the level of historical artifacts to import
*/
+/**
+ * @param {object} options
+ * @returns {asserts options is StateSyncImporterOptions}
+ */
+export const validateImporterOptions = options => {
+ typeof options === 'object' || Fail`options is not an object`;
+ typeof options.stateDir === 'string' ||
+ Fail`required stateDir option not a string`;
+ typeof options.exportDir === 'string' ||
+ Fail`required exportDir option not a string`;
+ options.blockHeight == null ||
+ typeof options.blockHeight === 'number' ||
+ Fail`optional blockHeight option not a number`;
+ checkExportDataMode(options.exportDataMode, true);
+ checkArtifactMode(options.artifactMode);
+ options.includeHistorical === undefined ||
+ Fail`deprecated includeHistorical option found`;
+};
+
+/**
+ * @param {Pick} options
+ * @param {Readonly} manifest
+ * @returns {import('@agoric/swing-store').ImportSwingStoreOptions}
+ */
+const checkAndGetImportSwingStoreOptions = (options, manifest) => {
+ typeof manifest.blockHeight === 'number' ||
+ Fail`Cannot restore snapshot without block height`;
+
+ manifest.data || Fail`State-sync manifest missing export data`;
+
+ const { artifactMode = manifest.artifactMode || 'replay' } = options;
+
+ if (artifactMode === 'none') {
+ throw Fail`Cannot import "export data" without at least "operational" artifacts`;
+ }
+
+ manifest.artifacts?.length ||
+ Fail`State-sync manifest missing required artifacts`;
+
+ switch (artifactMode) {
+ case 'debug':
+ // eslint-disable-next-line no-fallthrough
+ case 'operational':
+ if (manifest.artifactMode === 'operational') break;
+ // eslint-disable-next-line no-fallthrough
+ case 'replay':
+ if (manifest.artifactMode === 'replay') break;
+ // eslint-disable-next-line no-fallthrough
+ case 'archival':
+ if (manifest.artifactMode === 'archival') break;
+ if (
+ manifest.artifactMode === undefined ||
+ manifest.artifactMode === 'debug'
+ ) {
+ // assume the export has sufficient data
+ break;
+ }
+ throw Fail`State-sync manifest has insufficient artifacts: requested import artifact mode: ${q(
+ artifactMode,
+ )}, manifest has ${q(manifest.artifactMode)} artifacts`;
+ default:
+ throw Fail`Unexpected artifactMode ${q(artifactMode)}`;
+ }
+ return { artifactMode };
+};
+
/**
* @param {StateSyncImporterOptions} options
* @param {object} powers
* @param {Pick & Pick} powers.fs
* @param {import('path')['resolve']} powers.pathResolve
* @param {typeof import('@agoric/swing-store')['importSwingStore']} [powers.importSwingStore]
+ * @param {typeof import('@agoric/swing-store')['openSwingStore']} [powers.openSwingStore]
* @param {null | ((...args: any[]) => void)} [powers.log]
* @returns {Promise}
*/
export const performStateSyncImport = async (
- { stateDir, exportDir, blockHeight, includeHistorical },
+ { stateDir, exportDir, blockHeight, exportDataMode = 'all', artifactMode },
{
fs: { createReadStream, readFile },
pathResolve,
importSwingStore: importDB = importSwingStore,
+ openSwingStore: openDB = openSwingStore,
log = console.log,
},
) => {
@@ -48,37 +122,25 @@ export const performStateSyncImport = async (
const safeExportFileResolve = allegedRelativeFilename => {
const resolvedPath = pathResolve(exportDir, allegedRelativeFilename);
resolvedPath.startsWith(exportDir) ||
- Fail`Exported file ${allegedRelativeFilename} must be in export dir ${exportDir}`;
+ Fail`Exported file ${q(
+ allegedRelativeFilename,
+ )} must be in export dir ${q(exportDir)}`;
return resolvedPath;
};
- const manifestPath = safeExportFileResolve('export-manifest.json');
+ const manifestPath = safeExportFileResolve(ExportManifestFileName);
/** @type {Readonly} */
const manifest = await readFile(manifestPath, { encoding: 'utf-8' }).then(
data => JSON.parse(data),
);
if (blockHeight !== undefined && manifest.blockHeight !== blockHeight) {
- Fail`State-sync manifest for unexpected block height ${manifest.blockHeight} (expected ${blockHeight})`;
- }
-
- if (!manifest.data) {
- throw Fail`State-sync manifest missing export data`;
- }
-
- if (!manifest.artifacts) {
- throw Fail`State-sync manifest missing required artifacts`;
+ Fail`State-sync manifest for unexpected block height ${q(
+ manifest.blockHeight,
+ )} (expected ${q(blockHeight)})`;
}
- const artifacts = harden(Object.fromEntries(manifest.artifacts));
-
- if (
- includeHistorical &&
- manifest.mode !== 'archival' &&
- manifest.mode !== 'debug'
- ) {
- throw Fail`State-sync manifest missing historical artifacts`;
- }
+ const artifacts = harden(Object.fromEntries(manifest.artifacts || []));
// Represent the data in `exportDir` as a SwingSetExporter object.
/** @type {import('@agoric/swing-store').SwingStoreExporter} */
@@ -109,7 +171,7 @@ export const performStateSyncImport = async (
log?.(`importing artifact ${name}`);
const fileName = artifacts[name];
if (!fileName) {
- Fail`invalid artifact ${name}`;
+ Fail`invalid artifact ${q(name)}`;
}
const stream = createReadStream(safeExportFileResolve(fileName));
yield* stream;
@@ -122,13 +184,47 @@ export const performStateSyncImport = async (
},
});
- const swingstore = await importDB(exporter, stateDir, { includeHistorical });
+ if (exportDataMode === 'all') {
+ const importOptions = checkAndGetImportSwingStoreOptions(
+ { artifactMode, exportDataMode },
+ manifest,
+ );
+
+ const swingstore = await importDB(exporter, stateDir, importOptions);
+
+ const { hostStorage } = swingstore;
+
+ hostStorage.kvStore.set('host.height', String(manifest.blockHeight));
+ await hostStorage.commit();
+ await hostStorage.close();
+ } else if (exportDataMode === 'repair-metadata') {
+ blockHeight !== 0 || Fail`repair metadata requires a block height`;
+
+ manifest.data || Fail`State-sync manifest missing export data`;
+
+ artifactMode === 'none' ||
+ Fail`Cannot restore artifacts while repairing metadata`;
+
+ const { hostStorage } = openDB(stateDir);
- const { hostStorage } = swingstore;
+ const savedBlockHeight =
+ Number(hostStorage.kvStore.get('host.height')) || 0;
- hostStorage.kvStore.set('host.height', String(manifest.blockHeight));
- await hostStorage.commit();
- await hostStorage.close();
+ if (blockHeight !== savedBlockHeight) {
+ throw Fail`block height doesn't match. requested=${q(
+ blockHeight,
+ )}, current=${q(savedBlockHeight)}`;
+ }
+
+ await hostStorage.repairMetadata(exporter);
+
+ await hostStorage.commit();
+ await hostStorage.close();
+ } else if (exportDataMode === 'skip') {
+ throw Fail`Repopulation of artifacts not yet supported`;
+ } else {
+ throw Fail`Unknown export-data-mode ${exportDataMode}`;
+ }
};
/**
@@ -163,9 +259,20 @@ export const main = async (
/** @type {string} */ (processValue.getFlag('export-dir', '.')),
);
- const includeHistorical = processValue.getBoolean({
- flagName: 'include-historical',
- });
+ const artifactMode =
+ /** @type {import('./export-kernel-db.js').SwingStoreArtifactMode | undefined} */ (
+ processValue.getFlag('artifact-mode')
+ );
+ checkArtifactMode(artifactMode);
+
+ const exportDataMode = processValue.getFlag('export-data-mode');
+ checkExportDataMode(exportDataMode, true);
+
+ if (
+ processValue.getBoolean({ flagName: 'include-historical' }) !== undefined
+ ) {
+ throw Fail`deprecated "include-historical" options, use "artifact-mode" instead`;
+ }
const checkBlockHeight = processValue.getInteger({
flagName: 'check-block-height',
@@ -180,7 +287,8 @@ export const main = async (
stateDir,
exportDir,
blockHeight: checkBlockHeight,
- includeHistorical,
+ artifactMode,
+ exportDataMode,
},
{
fs,
diff --git a/packages/cosmic-swingset/src/launch-chain.js b/packages/cosmic-swingset/src/launch-chain.js
index 0699c39bba7..f7e1531525f 100644
--- a/packages/cosmic-swingset/src/launch-chain.js
+++ b/packages/cosmic-swingset/src/launch-chain.js
@@ -673,14 +673,9 @@ export async function launch({
// Handle block related actions
// Some actions that are integration specific may be handled by the caller
- // For example COSMOS_SNAPSHOT and AG_COSMOS_INIT are handled in chain-main.js
- async function blockingSend(action) {
- if (decohered) {
- throw decohered;
- }
-
- await afterCommitWorkDone;
-
+ // For example SWING_STORE_EXPORT is handled in chain-main.js
+ async function doBlockingSend(action) {
+ await null;
// blockManagerConsole.warn(
// 'FIGME: blockHeight',
// action.blockHeight,
@@ -688,38 +683,55 @@ export async function launch({
// action.type,
// );
switch (action.type) {
- case ActionType.BOOTSTRAP_BLOCK: {
+ case ActionType.AG_COSMOS_INIT: {
+ const { isBootstrap, upgradePlan, blockTime } = action;
// This only runs for the very first block on the chain.
- const { blockTime } = action;
- verboseBlocks && blockManagerConsole.info('block bootstrap');
- if (savedHeight !== 0) {
- throw Error(`Cannot run a bootstrap block at height ${savedHeight}`);
+ if (isBootstrap) {
+ verboseBlocks && blockManagerConsole.info('block bootstrap');
+ savedHeight === 0 ||
+ Fail`Cannot run a bootstrap block at height ${savedHeight}`;
+ const blockHeight = 0;
+ const runNum = 0;
+ controller.writeSlogObject({
+ type: 'cosmic-swingset-bootstrap-block-start',
+ blockTime,
+ });
+ controller.writeSlogObject({
+ type: 'cosmic-swingset-run-start',
+ blockHeight,
+ runNum,
+ });
+ await processAction(action.type, async () =>
+ bootstrapBlock(blockHeight, blockTime),
+ );
+ controller.writeSlogObject({
+ type: 'cosmic-swingset-run-finish',
+ blockHeight,
+ runNum,
+ });
+ controller.writeSlogObject({
+ type: 'cosmic-swingset-bootstrap-block-finish',
+ blockTime,
+ });
}
- const blockHeight = 0;
- const runNum = 0;
- controller.writeSlogObject({
- type: 'cosmic-swingset-bootstrap-block-start',
- blockTime,
- });
- controller.writeSlogObject({
- type: 'cosmic-swingset-run-start',
- blockHeight,
- runNum,
- });
- await processAction(action.type, async () =>
- bootstrapBlock(blockHeight, blockTime),
- );
- controller.writeSlogObject({
- type: 'cosmic-swingset-run-finish',
- blockHeight,
- runNum,
- });
- await pendingSwingStoreExport;
- controller.writeSlogObject({
- type: 'cosmic-swingset-bootstrap-block-finish',
- blockTime,
- });
- return undefined;
+ if (upgradePlan) {
+ const blockHeight = upgradePlan.height;
+ if (blockNeedsExecution(blockHeight)) {
+ controller.writeSlogObject({
+ type: 'cosmic-swingset-upgrade-start',
+ blockHeight,
+ blockTime,
+ upgradePlan,
+ });
+ // TODO: Process upgrade plan
+ controller.writeSlogObject({
+ type: 'cosmic-swingset-upgrade-finish',
+ blockHeight,
+ blockTime,
+ });
+ }
+ }
+ return true;
}
case ActionType.COMMIT_BLOCK: {
@@ -826,7 +838,7 @@ export async function launch({
// We write out our on-chain state as a number of chainSends.
const start = Date.now();
- await Promise.all([saveChainState(), pendingSwingStoreExport]);
+ await saveChainState();
chainTime = Date.now() - start;
// Advance our saved state variables.
@@ -849,6 +861,15 @@ export async function launch({
}
}
}
+ async function blockingSend(action) {
+ if (decohered) {
+ throw decohered;
+ }
+
+ await afterCommitWorkDone;
+
+ return doBlockingSend(action).finally(() => pendingSwingStoreExport);
+ }
async function shutdown() {
return controller.shutdown();
diff --git a/packages/cosmic-swingset/src/sim-chain.js b/packages/cosmic-swingset/src/sim-chain.js
index f1e5b0e8f71..685c4674b37 100644
--- a/packages/cosmic-swingset/src/sim-chain.js
+++ b/packages/cosmic-swingset/src/sim-chain.js
@@ -202,11 +202,12 @@ export async function connectToFakeChain(basedir, GCI, delay, inbound) {
return;
}
// The before-first-block is special... do it now.
- // This emulates what x/swingset does to run a BOOTSTRAP_BLOCK
+ // This emulates what x/swingset does when bootstrapping
// before continuing with the real initialHeight.
await blockingSend({
- type: 'BOOTSTRAP_BLOCK',
+ type: 'AG_COSMOS_INIT',
blockTime: scaleBlockTime(Date.now()),
+ isBootstrap: true,
});
blockHeight = initialHeight;
};
diff --git a/packages/cosmic-swingset/test/scenario2.js b/packages/cosmic-swingset/test/scenario2.js
index 1a54b80e9af..bc5656da90e 100644
--- a/packages/cosmic-swingset/test/scenario2.js
+++ b/packages/cosmic-swingset/test/scenario2.js
@@ -80,7 +80,10 @@ export const makeScenario2 = ({ pspawnMake, pspawnAgd, log }) => {
return runMake(['scenario2-run-rosetta-ci'], { stdio: onlyStderr });
},
export: () =>
- pspawnAgd(['export', '--home=t1/n0'], { stdio: onlyStderr }).exit,
+ pspawnAgd(
+ ['export', '--home=t1/n0', '--export-dir=t1/n0/genesis-export'],
+ { stdio: onlyStderr },
+ ).exit,
});
};
diff --git a/packages/deployment/Dockerfile b/packages/deployment/Dockerfile
index 75925e15b8c..88bbde7a0c2 100644
--- a/packages/deployment/Dockerfile
+++ b/packages/deployment/Dockerfile
@@ -2,22 +2,13 @@ ARG TAG=latest
ARG REPO=agoric/agoric-sdk
ARG REGISTRY=ghcr.io
-# FIXME: Journalbeat compilation is currently broken, but non-essential.
-# Removed from the build.
-# FROM golang:1.20 AS go-build
-
-# WORKDIR /usr/src/journalbeat
-# RUN apt-get update -y && apt-get install -y libsystemd-dev
-# RUN go get github.com/mheese/journalbeat
-
FROM $REGISTRY/$REPO:$TAG
+COPY ./scripts/install-deps.sh /usr/src/agoric-sdk/packages/deployment/scripts/
RUN /usr/src/agoric-sdk/packages/deployment/scripts/install-deps.sh
-# # Copy journalbeat for logging support
-# COPY --from=go-build /go/bin/journalbeat /usr/local/bin/
-
WORKDIR /usr/src/agoric-sdk/packages/deployment
+COPY . .
RUN ln -sf $PWD/bin/ag-setup-cosmos /usr/local/bin/ag-setup-cosmos
WORKDIR /data/chains
diff --git a/packages/deployment/Dockerfile.sdk b/packages/deployment/Dockerfile.sdk
index 3298108fdb0..71b0ad65307 100644
--- a/packages/deployment/Dockerfile.sdk
+++ b/packages/deployment/Dockerfile.sdk
@@ -1,9 +1,9 @@
###########################
# The golang build container
-FROM golang:1.20 as cosmos-go
+FROM golang:1.20-bullseye as cosmos-go
WORKDIR /usr/src/agoric-sdk/golang/cosmos
-COPY go.mod go.sum ../../
+COPY golang/cosmos/go.mod golang/cosmos/go.sum ./
RUN go mod download
COPY golang/cosmos ./
@@ -16,7 +16,7 @@ RUN make GIT_COMMIT="$GIT_COMMIT" GIT_REVISION="$GIT_REVISION" MOD_READONLY= com
# OTEL fetch
# from https://github.com/open-telemetry/opentelemetry-collector-releases/releases
-FROM node:lts AS otel
+FROM node:18-bullseye AS otel
ARG OTEL_VERSION=0.48.0
ARG OTEL_HASH_arm64=846852f4c34f6e494abe202402fdf1d17e2ec3c7a7f96985b6011126ae553249
@@ -32,7 +32,7 @@ RUN set -eux; \
###############################
# The js build container
-FROM node:lts AS build-js
+FROM node:18-bullseye AS build-js
# When changing/adding entries here, make sure to search the whole project for
# `@@AGORIC_DOCKER_SUBMODULES@@`
@@ -62,13 +62,13 @@ RUN \
yarn build
# Remove dev dependencies.
-RUN rm -rf packages/xsnap/moddable
+RUN rm -rf packages/xsnap/moddable packages/xsnap/xsnap-native/build/tmp
# FIXME: This causes bundling differences. https://github.com/endojs/endo/issues/919
# RUN yarn install --frozen-lockfile --production --network-timeout 100000
###############################
# The install container.
-FROM node:lts AS install
+FROM node:18-bullseye AS install
# Install some conveniences.
RUN apt-get --allow-releaseinfo-change update && apt-get install -y vim jq less && apt-get clean -y
@@ -77,7 +77,6 @@ WORKDIR /usr/src
COPY --from=build-js /usr/src/agoric-sdk agoric-sdk
COPY --from=otel /otelcol-contrib /usr/local/bin/
RUN ln -s /usr/src/agoric-sdk/bin/agd /usr/local/bin/
-RUN ln -s /usr/src/agoric-sdk/bin/agd /usr/local/bin/ag-cosmos-helper
RUN ln -s /usr/src/agoric-sdk/packages/cosmic-swingset/bin/ag-chain-cosmos /usr/local/bin/
RUN ln -s /usr/src/agoric-sdk/packages/solo/bin/ag-solo /usr/local/bin/
RUN ln -s /usr/src/agoric-sdk/packages/agoric-cli/bin/agoric /usr/local/bin/
@@ -87,11 +86,11 @@ ARG GIT_REVISION=unknown
RUN echo "$GIT_REVISION" > /usr/src/agoric-sdk/packages/solo/public/git-revision.txt
# Compatibility links for older containers.
-RUN ln -s /data /agoric
-RUN ln -s /data/solo /usr/src/agoric-sdk/packages/cosmic-swingset/solo
-RUN ln -s /data/chains /usr/src/agoric-sdk/packages/cosmic-swingset/chains
+RUN ln -sf /data /agoric
+RUN ln -sf /data/solo /usr/src/agoric-sdk/packages/cosmic-swingset/solo
+RUN ln -sf /data/chains /usr/src/agoric-sdk/packages/cosmic-swingset/chains
-RUN /usr/src/agoric-sdk/packages/deployment/scripts/smoketest-binaries.sh
+RUN /usr/src/agoric-sdk/scripts/smoketest-binaries.sh
# By default, run the daemon with specified arguments.
WORKDIR /root
diff --git a/packages/deployment/Makefile b/packages/deployment/Makefile
index d718079ea6b..0f9316a9929 100644
--- a/packages/deployment/Makefile
+++ b/packages/deployment/Makefile
@@ -5,7 +5,7 @@ VERSION := $(shell node -e 'console.log(require("../../package.json").version??"
TAG := $(if $(VERSION),$(VERSION),latest)
GIT_REVISION := $(shell hash=$$(git rev-parse --short HEAD); \
- dirty=`git diff --quiet || echo -dirty`; \
+ dirty=`git diff --quiet 2>/dev/null || echo -dirty`; \
echo "$$hash$$dirty")
# Don't push alpha tags as ":$(TAG)".
diff --git a/packages/deployment/ansible/roles/copy/tasks/main.yml b/packages/deployment/ansible/roles/copy/tasks/main.yml
index 217bc184635..75ab448daab 100644
--- a/packages/deployment/ansible/roles/copy/tasks/main.yml
+++ b/packages/deployment/ansible/roles/copy/tasks/main.yml
@@ -7,15 +7,14 @@
use_ssh_args: true
when: HELPER_BINARY is defined
-- name: Synchronize Agoric SDK
+- name: 'Synchronize Agoric SDK {{ AGORIC_SDK }}'
synchronize:
src: '{{ AGORIC_SDK }}/'
dest: '/usr/src/agoric-sdk/'
- # dirs: yes
delete: yes
- rsync_opts:
- - "--partial-dir=/usr/src/agoric-sdk/.rsync-partial/{{ hostvars[inventory_hostname]['ansible_host'] | default(inventory_hostname) }}"
- - '--exclude=.rsync-partial'
+ #rsync_opts:
+ # - "--partial-dir=/usr/src/agoric-sdk/.rsync-partial/{{ hostvars[inventory_hostname]['ansible_host'] | default(inventory_hostname) }}"
+ # - '--exclude=.rsync-partial'
# checksum: yes
mode: push
use_ssh_args: true
diff --git a/packages/deployment/docker/ag-setup-cosmos b/packages/deployment/docker/ag-setup-cosmos
index 8bf3694c0e3..7404290420e 100755
--- a/packages/deployment/docker/ag-setup-cosmos
+++ b/packages/deployment/docker/ag-setup-cosmos
@@ -1,5 +1,4 @@
#! /bin/sh
-PORT=26657
NETWORK_NAME=${NETWORK_NAME-agoric}
SETUP_HOME=${SETUP_HOME-$NETWORK_NAME}
IMAGE=ghcr.io/agoric/cosmic-swingset-setup:${TAG-latest}
@@ -21,10 +20,18 @@ show-*)
TTY=-i
;;
esac
+setup_volume=
+if test -f "$PWD/$SETUP_HOME/setup/deployment.json"; then
+ setup_volume=--volume="$PWD/$SETUP_HOME/setup:/data/chains/$SETUP_HOME"
+elif test -f deployment.json; then
+ setup_volume=--volume="$PWD:/data/chains/$SETUP_HOME"
+fi
exec docker run --rm $TTY $FLAGS \
--volume=ag-setup-cosmos-chains:/data/chains \
--volume=ag-chain-cosmos-state:/root/.ag-chain-cosmos \
--volume=/var/run/docker.sock:/var/run/docker.sock \
+ $setup_volume \
+ --env AGD_HOME=/root/.ag-chain-cosmos \
--env AG_SETUP_COSMOS_NAME=$NETWORK_NAME \
--env AG_SETUP_COSMOS_HOME=$SETUP_HOME \
--env AG_SETUP_COSMOS_BACKEND=$AG_SETUP_COSMOS_BACKEND \
diff --git a/packages/deployment/docker/integration-test.sh b/packages/deployment/docker/integration-test.sh
new file mode 100755
index 00000000000..663eb9a4972
--- /dev/null
+++ b/packages/deployment/docker/integration-test.sh
@@ -0,0 +1,38 @@
+#! /bin/sh
+NETWORK_NAME=${NETWORK_NAME-localtest}
+SETUP_HOME=${SETUP_HOME-$NETWORK_NAME}
+IMAGE=ghcr.io/agoric/cosmic-swingset-setup:${TAG-latest}
+TTY=-i
+test -t 0 && test -t 1 && TTY=-it
+FLAGS=--entrypoint=/bin/bash
+case "$1" in
+--pull)
+ shift
+ docker pull "$IMAGE"
+ ;;
+esac
+case "$1" in
+shell) shift ;;
+*)
+ set /usr/src/agoric-sdk/packages/deployment/scripts/integration-test.sh ${1+"$@"}
+ ;;
+esac
+
+setup_volume=
+if test -f "$PWD/$SETUP_HOME/setup/deployment.json"; then
+ setup_volume=--volume="$PWD/$SETUP_HOME/setup:/data/chains/$SETUP_HOME"
+elif test -f deployment.json; then
+ setup_volume=--volume="$PWD:/data/chains/$SETUP_HOME"
+fi
+if test -n "$LOADGEN"; then
+ setup_volume="$setup_volume --volume=$LOADGEN:/usr/src/testnet-load-generator"
+fi
+exec docker run --rm $TTY $FLAGS \
+ --volume=ag-setup-cosmos-chains:/data/chains \
+ --volume=ag-chain-cosmos-state:/root/.ag-chain-cosmos \
+ --volume=/var/run/docker.sock:/var/run/docker.sock \
+ $setup_volume \
+ --env AGD_HOME=/root/.ag-chain-cosmos \
+ --env NETWORK_NAME=$NETWORK_NAME \
+ -w /data/chains \
+ "$IMAGE" ${1+"$@"}
diff --git a/packages/deployment/scripts/install-deps.sh b/packages/deployment/scripts/install-deps.sh
index c63b88853b8..7221748619c 100755
--- a/packages/deployment/scripts/install-deps.sh
+++ b/packages/deployment/scripts/install-deps.sh
@@ -46,6 +46,9 @@ case $VERSION_CODENAME in
bullseye)
VERSION_CODENAME=focal
;;
+ bookworm)
+ VERSION_CODENAME=jammy
+ ;;
esac
# Install Ansible.
diff --git a/packages/deployment/scripts/integration-test.sh b/packages/deployment/scripts/integration-test.sh
index 542322aa13b..59025808a07 100755
--- a/packages/deployment/scripts/integration-test.sh
+++ b/packages/deployment/scripts/integration-test.sh
@@ -7,6 +7,28 @@ thisdir=$(cd "$(dirname -- "$real0")" > /dev/null && pwd -P)
export GOBIN="$thisdir/../../../golang/cosmos/build"
export NETWORK_NAME=${NETWORK_NAME-localtest}
+SDK_SRC=${SDK_SRC-$(cd "$thisdir/../../.." > /dev/null && pwd -P)}
+
+LOADGEN=${LOADGEN-""}
+if [ -z "$LOADGEN" ] || [ "x$LOADGEN" = "x1" ]; then
+ for dir in "$SDK_SRC/../testnet-load-generator" /usr/src/testnet-load-generator; do
+ if [ -d "$dir" ]; then
+ LOADGEN="$dir"
+ break
+ fi
+ done
+fi
+
+if [ -d "$LOADGEN" ]; then
+ # Get the absolute path.
+ LOADGEN=$(cd "$LOADGEN" > /dev/null && pwd -P)
+elif [ -n "$LOADGEN" ]; then
+ echo "Cannot find loadgen (\$LOADGEN=$LOADGEN)" >&2
+ exit 2
+else
+ echo "Running chain without loadgen" >&2
+fi
+
SOLO_ADDR=
VAT_CONFIG=
RESULTSDIR=${RESULTSDIR-"$NETWORK_NAME/results"}
@@ -21,19 +43,16 @@ cd "$NETWORK_NAME/setup"
export AG_SETUP_COSMOS_HOME=${AG_SETUP_COSMOS_HOME-$PWD}
export AG_SETUP_COSMOS_STATE_SYNC_INTERVAL=20
-AGORIC_SDK_PATH=${AGORIC_SDK_PATH-$(cd "$thisdir/../../.." > /dev/null && pwd -P)}
-if [ -d /usr/src/testnet-load-generator ]
+if [ -n "$LOADGEN" ]
then
- solodir=/usr/src/testnet-load-generator/_agstate/agoric-servers/testnet-8000
+ solodir="$LOADGEN"/_agstate/agoric-servers/testnet-8000
"$thisdir/../../solo/bin/ag-solo" init "$solodir" --webport=8000
SOLO_ADDR=$(cat "$solodir/ag-cosmos-helper-address")
VAT_CONFIG="@agoric/vats/decentral-demo-config.json"
fi
-# Speed up the docker deployment by pre-mounting /usr/src/agoric-sdk.
-DOCKER_VOLUMES="$AGORIC_SDK_PATH:/usr/src/agoric-sdk" \
- "$thisdir/docker-deployment.cjs" > deployment.json
+"$thisdir/docker-deployment.cjs" > deployment.json
# Set up the network from our above deployment.json.
"$thisdir/setup.sh" init --noninteractive
@@ -44,17 +63,18 @@ VAULT_FACTORY_CONTROLLER_ADDR="$SOLO_ADDR" \
CHAIN_BOOTSTRAP_VAT_CONFIG="$VAT_CONFIG" \
"$thisdir/setup.sh" bootstrap ${1+"$@"}
-if [ -d /usr/src/testnet-load-generator ]
+if [ -n "$LOADGEN" ]
then
- /usr/src/agoric-sdk/packages/deployment/scripts/setup.sh show-config > "$RESULTSDIR/network-config"
+ "$SDK_SRC/packages/deployment/scripts/setup.sh" show-config > "$RESULTSDIR/network-config"
cp ag-chain-cosmos/data/genesis.json "$RESULTSDIR/genesis.json"
cp "$AG_SETUP_COSMOS_HOME/ag-chain-cosmos/data/genesis.json" "$RESULTSDIR/genesis.json"
- cd /usr/src/testnet-load-generator
- SOLO_COINS=40000000000uist \
+ cd "$LOADGEN"
+ SOLO_COINS=40000000000uist PATH="$thisdir/../bin:$PATH" \
"$AG_SETUP_COSMOS_HOME/faucet-helper.sh" add-egress loadgen "$SOLO_ADDR"
- SLOGSENDER=@agoric/telemetry/src/otel-trace.js SOLO_SLOGSENDER= \
+ SLOGSENDER=@agoric/telemetry/src/otel-trace.js SOLO_SLOGSENDER="" \
SLOGSENDER_FAIL_ON_ERROR=1 SLOGSENDER_AGENT=process \
- SDK_BUILD=0 MUST_USE_PUBLISH_BUNDLE=1 SDK_SRC=/usr/src/agoric-sdk OUTPUT_DIR="$RESULTSDIR" ./start.sh \
+ AG_CHAIN_COSMOS_HOME=$HOME/.agoric \
+ SDK_BUILD=0 MUST_USE_PUBLISH_BUNDLE=1 SDK_SRC=$SDK_SRC OUTPUT_DIR="$RESULTSDIR" ./start.sh \
--no-stage.save-storage \
--stages=3 --stage.duration=10 --stage.loadgen.cycles=4 \
--stage.loadgen.faucet.interval=6 --stage.loadgen.faucet.limit=4 \
diff --git a/packages/deployment/scripts/setup.sh b/packages/deployment/scripts/setup.sh
index 8be0a710a20..8dcb79466ea 100755
--- a/packages/deployment/scripts/setup.sh
+++ b/packages/deployment/scripts/setup.sh
@@ -11,8 +11,7 @@ export NETWORK_NAME
export AG_SETUP_COSMOS_NAME=$NETWORK_NAME
export AG_SETUP_COSMOS_HOME=${AG_SETUP_COSMOS_HOME-"$PWD/$NETWORK_NAME/setup"}
-# Put our bindir into the PATH so that children can find ag-setup-cosmos.
+# Put GOBIN into the PATH so that children can find ag-setup-cosmos.
export PATH="$thisdir/../bin:${GOBIN-${GOPATH-/usr/local}/bin}:$PATH"
-# Run our setup command.
exec ag-setup-cosmos ${1+"$@"}
diff --git a/packages/deployment/src/init.js b/packages/deployment/src/init.js
index 981de84cb7f..bea22fc5ceb 100644
--- a/packages/deployment/src/init.js
+++ b/packages/deployment/src/init.js
@@ -493,7 +493,7 @@ const doInit =
config.OFFSETS[PLACEMENT] = offset;
}
Object.values(ROLE_INSTANCE).some(i => i > 0) ||
- Fail`Aborting due to no nodes configured!`;
+ Fail`Aborting due to no nodes configured! (${ROLE_INSTANCE})`;
await wr.createFile(
`vars.tf`,
diff --git a/packages/deployment/upgrade-test/Dockerfile b/packages/deployment/upgrade-test/Dockerfile
index 57468df48aa..6ef270a54e3 100644
--- a/packages/deployment/upgrade-test/Dockerfile
+++ b/packages/deployment/upgrade-test/Dockerfile
@@ -1,24 +1,25 @@
-ARG DEST_IMAGE=ghcr.io/agoric/agoric-sdk:latest
+ARG DEST_IMAGE=ghcr.io/agoric/agoric-sdk:dev
ARG BOOTSTRAP_MODE=main
# on agoric-uprade-7-2, with upgrade to agoric-upgrade-8
FROM ghcr.io/agoric/ag0:agoric-upgrade-7-2 as agoric-upgrade-7-2
ARG BOOTSTRAP_MODE
-ENV UPGRADE_TO=agoric-upgrade-8 THIS_NAME=agoric-upgrade-7-2 BOOTSTRAP_MODE=${BOOTSTRAP_MODE}
+ARG UPGRADE_INFO_8
+ENV UPGRADE_TO=agoric-upgrade-8 UPGRADE_INFO=${UPGRADE_INFO_8} THIS_NAME=agoric-upgrade-7-2 BOOTSTRAP_MODE=${BOOTSTRAP_MODE}
RUN echo "${BOOTSTRAP_MODE}"
RUN mkdir -p /usr/src/agoric-sdk/upgrade-test-scripts
WORKDIR /usr/src/agoric-sdk/
-COPY ./*.sh ./upgrade-test-scripts/
+COPY ./start_ag0.sh ./upgrade-test-scripts/
+COPY ./bash_entrypoint.sh ./env_setup.sh ./start_to_to.sh ./upgrade-test-scripts/
SHELL ["/bin/bash", "-c"]
RUN . ./upgrade-test-scripts/start_ag0.sh
-ARG BOOTSTRAP_MODE
## this is agoric-upgrade-8 aka pismoA
FROM ghcr.io/agoric/agoric-sdk:29 as agoric-upgrade-8
ARG BOOTSTRAP_MODE
ENV THIS_NAME=agoric-upgrade-8 BOOTSTRAP_MODE=${BOOTSTRAP_MODE}
WORKDIR /usr/src/agoric-sdk/
-COPY ./*.sh ./upgrade-test-scripts/
+COPY ./bash_entrypoint.sh ./env_setup.sh ./start_to_to.sh ./upgrade-test-scripts/
COPY ./${THIS_NAME} ./upgrade-test-scripts/${THIS_NAME}/
COPY --from=agoric-upgrade-7-2 /root/.agoric /root/.agoric
RUN chmod +x ./upgrade-test-scripts/*.sh
@@ -28,11 +29,11 @@ RUN . ./upgrade-test-scripts/start_to_to.sh
ARG DEST_IMAGE
#this is agoric-upgrade-8-1 aka pismoB
FROM ghcr.io/agoric/agoric-sdk:30 as agoric-upgrade-8-1
-ARG BOOTSTRAP_MODE
-ENV THIS_NAME=agoric-upgrade-8-1 UPGRADE_TO=agoric-upgrade-9 BOOTSTRAP_MODE=${BOOTSTRAP_MODE}
+ARG BOOTSTRAP_MODE UPGRADE_INFO_9
+ENV THIS_NAME=agoric-upgrade-8-1 UPGRADE_TO=agoric-upgrade-9 UPGRADE_INFO=${UPGRADE_INFO_9} BOOTSTRAP_MODE=${BOOTSTRAP_MODE}
WORKDIR /usr/src/agoric-sdk/
-COPY ./*.sh ./upgrade-test-scripts/
+COPY ./bash_entrypoint.sh ./env_setup.sh ./start_to_to.sh ./upgrade-test-scripts/
COPY ./${THIS_NAME} ./upgrade-test-scripts/${THIS_NAME}/
COPY --from=agoric-upgrade-8 /root/.agoric /root/.agoric
RUN chmod +x ./upgrade-test-scripts/*.sh
@@ -42,11 +43,11 @@ RUN . ./upgrade-test-scripts/start_to_to.sh
ARG DEST_IMAGE
# this is agoric-upgrade-9 / pismoC with upgrade to agoric-upgrade-10
FROM ghcr.io/agoric/agoric-sdk:31 as agoric-upgrade-9
-ARG BOOTSTRAP_MODE
-ENV THIS_NAME=agoric-upgrade-9 UPGRADE_TO=agoric-upgrade-10 BOOTSTRAP_MODE=${BOOTSTRAP_MODE}
+ARG BOOTSTRAP_MODE UPGRADE_INFO_10
+ENV THIS_NAME=agoric-upgrade-9 UPGRADE_TO=agoric-upgrade-10 UPGRADE_INFO=${UPGRADE_INFO_10} BOOTSTRAP_MODE=${BOOTSTRAP_MODE}
WORKDIR /usr/src/agoric-sdk/
-COPY ./*.sh ./upgrade-test-scripts/
+COPY ./bash_entrypoint.sh ./env_setup.sh ./start_to_to.sh ./upgrade-test-scripts/
COPY ./${THIS_NAME} ./upgrade-test-scripts/${THIS_NAME}/
COPY --from=agoric-upgrade-8-1 /root/.agoric /root/.agoric
WORKDIR /usr/src/agoric-sdk/
@@ -57,18 +58,35 @@ RUN . ./upgrade-test-scripts/start_to_to.sh
ARG DEST_IMAGE
#this is agoric-upgrade-10 / vaults
-FROM ${DEST_IMAGE} as agoric-upgrade-10
+FROM ghcr.io/agoric/agoric-sdk:35 as agoric-upgrade-10
ARG BOOTSTRAP_MODE
ENV THIS_NAME=agoric-upgrade-10 BOOTSTRAP_MODE=${BOOTSTRAP_MODE}
WORKDIR /usr/src/agoric-sdk/
-COPY ./*.sh ./upgrade-test-scripts/
+COPY ./bash_entrypoint.sh ./env_setup.sh ./start_to_to.sh ./upgrade-test-scripts/
COPY ./${THIS_NAME} ./upgrade-test-scripts/${THIS_NAME}/
COPY --from=agoric-upgrade-9 /root/.agoric /root/.agoric
RUN chmod +x ./upgrade-test-scripts/*.sh
SHELL ["/bin/bash", "-c"]
RUN . ./upgrade-test-scripts/start_to_to.sh
+ARG DEST_IMAGE
+#this is agoric-upgrade-10 upgrading to 11
+#it's a separate target because agoric-upgrade-10 takes so long to test
+FROM ghcr.io/agoric/agoric-sdk:35 as agoric-upgrade-10-to-11
+# This default UPGRADE_INFO_11 is to test core proposals like the network vat.
+# TODO: Maybe replace with a Zoe core proposal, or remove when other paths test it.
+ARG BOOTSTRAP_MODE UPGRADE_INFO_11='{"coreProposals":["@agoric/vats/scripts/init-network.js"]}'
+ENV THIS_NAME=agoric-upgrade-10-to-11 UPGRADE_TO=agoric-upgrade-11 UPGRADE_INFO=${UPGRADE_INFO_11} BOOTSTRAP_MODE=${BOOTSTRAP_MODE}
+
+WORKDIR /usr/src/agoric-sdk/
+COPY ./bash_entrypoint.sh ./env_setup.sh ./start_to_to.sh ./upgrade-test-scripts/
+COPY ./${THIS_NAME} ./upgrade-test-scripts/${THIS_NAME}/
+COPY --from=agoric-upgrade-10 /root/.agoric /root/.agoric
+RUN chmod +x ./upgrade-test-scripts/*.sh
+SHELL ["/bin/bash", "-c"]
+RUN . ./upgrade-test-scripts/start_to_to.sh
+
ARG DEST_IMAGE
#this is agoric-upgrade-11 / vaults+1
FROM ${DEST_IMAGE} as agoric-upgrade-11
@@ -77,9 +95,9 @@ ENV THIS_NAME=agoric-upgrade-11 BOOTSTRAP_MODE=${BOOTSTRAP_MODE}
# this boot doesn't need an upgrade
WORKDIR /usr/src/agoric-sdk/
-COPY ./*.sh ./upgrade-test-scripts/
+COPY ./bash_entrypoint.sh ./env_setup.sh ./start_to_to.sh ./upgrade-test-scripts/
COPY ./${THIS_NAME} ./upgrade-test-scripts/${THIS_NAME}/
-COPY --from=agoric-upgrade-10 /root/.agoric /root/.agoric
+COPY --from=agoric-upgrade-10-to-11 /root/.agoric /root/.agoric
RUN apt install -y tmux
SHELL ["/bin/bash", "-c"]
RUN chmod +x ./upgrade-test-scripts/*.sh
diff --git a/packages/deployment/upgrade-test/Makefile b/packages/deployment/upgrade-test/Makefile
index 33e0b3199aa..8514195248f 100644
--- a/packages/deployment/upgrade-test/Makefile
+++ b/packages/deployment/upgrade-test/Makefile
@@ -1,22 +1,60 @@
REPOSITORY = agoric/upgrade-test
-dockerLabel = latest
-ifdef TARGET
-buildTargetFlag = --target $(TARGET)
-dockerLabel = $(TARGET)
+# use :dev (latest prerelease image) unless we build local sdk
+DEST_IMAGE ?= $(if $(findstring local_sdk,$(MAKECMDGOALS)),ghcr.io/agoric/agoric-sdk:latest,ghcr.io/agoric/agoric-sdk:dev)
+BOOTSTRAP_MODE?=main
+TARGET?=agoric-upgrade-11
+dockerLabel?=$(TARGET)
+ifdef TMUX_CC
+ tmuxCC=1
+else
+ tmuxCC=0
endif
-@echo buildTargetFlag: $(buildTargetFlag)
+@echo target: $(TARGET)
local_sdk:
(cd ../ && make docker-build-sdk)
+BUILD = docker build --progress=plain $(BUILD_OPTS) \
+ --build-arg BOOTSTRAP_MODE=$(BOOTSTRAP_MODE) --build-arg DEST_IMAGE=$(DEST_IMAGE) \
+ -f Dockerfile upgrade-test-scripts
+
+agoric-upgrade-7-2:
+ $(BUILD) --target agoric-upgrade-7-2 -t $(REPOSITORY):agoric-upgrade-7-2
+
+agoric-upgrade-8: agoric-upgrade-7-2
+ $(BUILD) --target agoric-upgrade-8 -t $(REPOSITORY):agoric-upgrade-8
+
+agoric-upgrade-8-1: agoric-upgrade-8
+ $(BUILD) --target agoric-upgrade-8-1 -t $(REPOSITORY):agoric-upgrade-8-1
+
+agoric-upgrade-9: agoric-upgrade-8-1
+ $(BUILD) --target agoric-upgrade-9 -t $(REPOSITORY):agoric-upgrade-9
+
+agoric-upgrade-10: agoric-upgrade-9
+ $(BUILD) --target agoric-upgrade-10 -t $(REPOSITORY):agoric-upgrade-10
+
+agoric-upgrade-10-to-11: agoric-upgrade-10
+ $(BUILD) --target agoric-upgrade-10-to-11 -t $(REPOSITORY):agoric-upgrade-10-to-11
+
+agoric-upgrade-11: agoric-upgrade-10-to-11
+ $(BUILD) --target agoric-upgrade-11 -t $(REPOSITORY):agoric-upgrade-11
+
# build main bootstrap
-build:
- docker build --build-arg BOOTSTRAP_MODE=main --progress=plain $(buildTargetFlag) -t $(REPOSITORY):$(dockerLabel) -f Dockerfile upgrade-test-scripts
+build: $(TARGET)
# build test bootstrap
-build_test:
- docker build --build-arg BOOTSTRAP_MODE=test --progress=plain $(buildTargetFlag) -t $(REPOSITORY):$(dockerLabel) -f Dockerfile upgrade-test-scripts
+build_test: BOOTSTRAP_MODE=test
+build_test: $(TARGET)
+
+DEBUG ?= SwingSet:ls,SwingSet:vat
+RUN = docker run --rm -it \
+ -p 26656:26656 -p 26657:26657 -p 1317:1317 \
+ -v "$${PWD}:/workspace" \
+ -e "DEST=1" -e "DEBUG=$(DEBUG)"
run:
- docker run --rm -it -e "DEST=1" -p 26656:26656 -p 26657:26657 -p 1317:1317 --entrypoint "/usr/src/agoric-sdk/upgrade-test-scripts/start_to_to.sh" -v "$${PWD}:/workspace" $(REPOSITORY):$(dockerLabel)
+ $(RUN) -e "TMUX_USE_CC=$(tmuxCC)" \
+ --entrypoint /usr/src/agoric-sdk/upgrade-test-scripts/start_to_to.sh \
+ $(REPOSITORY):$(dockerLabel)
+.PHONY: local_sdk agoric-upgrade-7-2 agoric-upgrade-8 agoric-upgrade-8-1 agoric-upgrade-9 agoric-upgrade-10 agoric-upgrade-11 build build_test run
diff --git a/packages/deployment/upgrade-test/Readme.md b/packages/deployment/upgrade-test/Readme.md
index 9a66a76f604..3b3e233ecb4 100644
--- a/packages/deployment/upgrade-test/Readme.md
+++ b/packages/deployment/upgrade-test/Readme.md
@@ -20,12 +20,40 @@ This will build all previous upgrades and upgrade each one.
make build
```
+By default pre-releases use the lastest image tagged `dev` in our [container repository](https://github.com/agoric/agoric-sdk/pkgs/container/agoric-sdk). To use
+a specific build:
+
+```shell
+DEST_IMAGE=docker pull ghcr.io/agoric/agoric-sdk:20230515033839-e56ae7
+```
+To use a build based on local changes:
+```shell
+# build ghcr.io/agoric/agoric-sdk:latest
+make local_sdk build
+# or DEST_IMAGE=ghcr.io/agoric/agoric-sdk:latest make build
+```
+
**To run the latest upgrade interactively**
```shell
make run
```
+This will start a container with tmux, with the first window `0` being chain logs `agd start` and the second and current window `1` being a bash shell. You can navigate using `bind-key+B N` (assuming `bind-key` is CTRL/CMD) and N is the window. For more shortcuts see [tmux shortcuts & cheatsheet](https://gist.github.com/MohamedAlaa/2961058#list-all-shortcuts).
+
+The container and chain will halt once you detach from the session.
+
+### Using tmux control mode
+
+If you use [iTerm you can use tmux with native integration](https://iterm2.com/documentation-tmux-integration.html), called control mode, which will make your tmux session appear as a physical window. Pass `TMUX_CC=1`:
+
+```shell
+TMUX_CC=1 make run
+```
+
+**Note:** If your terminal does not support control mode, do not use this. It will show raw control codes, garbling your terminal.
+
+### Troubleshooting
If you get an error about port 26656 already in use, you have a local chain running on your OS.
If you run into other problems, you might have a local `agoric-sdk:latest` that
@@ -49,6 +77,16 @@ docker ps
docker attach sweet_edison
```
+**To pass specific `software-upgrade --upgrade-info`**
+
+```shell
+json='{"some":"json","here":123}'
+make build BUILD_OPTS="--build-arg UPGRADE_INFO_11='$json'"
+```
+
+Search this directory for `UPGRADE_INFO` if you want to see how it is plumbed
+through.
+
**To test CLI**
You can point your local CLI tools to the chain running in Docker. Our Docker config binds on the same port (26656) as running a local chain. So you can use the agoric-cli commands on the Docker chain the same way. But note that the Cosmos account keys will be different from in your dev keyring.
diff --git a/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-10-to-11/.keep b/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-10-to-11/.keep
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-10/actions.sh b/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-10/actions.sh
index 75a27d1de9c..cc53553a99b 100644
--- a/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-10/actions.sh
+++ b/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-10/actions.sh
@@ -10,6 +10,9 @@ set -x
# agoric wallet show --from $GOV1ADDR
waitForBlock 20
+# user1 has no mailbox provisioned; later we test that this was discarded
+submitDeliverInbound user1
+
# provision a new user wallet
agd keys add user2 --keyring-backend=test 2>&1 | tee "$HOME/.agoric/user2.out"
@@ -171,6 +174,28 @@ OFFER=$(mktemp -t agops.XXX)
agops vaults close --vaultId vault2 --giveMinted 5.75 --from $USER2ADDR --keyring-backend="test" >|"$OFFER"
agops perf satisfaction --from "$USER2ADDR" --executeOffer "$OFFER" --keyring-backend=test
-# # TODO test bidding
-# # TODO liquidations
-# # agops inter bid by-price --price 1 --give 1.0IST --from $GOV1ADDR --keyring-backend test
+# replicate state-sync of node
+# this will cause the swing-store to prune some data
+# we will save the pruned artifact for later
+killAgd
+EXPORT_DIR=$(mktemp -t -d swing-store-export-upgrade-10-XXX)
+make_swing_store_snapshot $EXPORT_DIR || fail "Couldn't make swing-store snapshot"
+test_val "$(compare_swing_store_export_data $EXPORT_DIR)" "match" "swing-store export data"
+EXPORT_DIR_ALL_ARTIFACTS=$(mktemp -t -d swing-store-export-upgrade-10-all-artifacts-XXX)
+make_swing_store_snapshot $EXPORT_DIR_ALL_ARTIFACTS --export-mode archival || fail "Couldn't make swing-store snapshot for historical artifacts"
+restore_swing_store_snapshot $EXPORT_DIR || fail "Couldn't restore swing-store snapshot"
+(
+ cd $EXPORT_DIR_ALL_ARTIFACTS
+ mkdir $HOME/.agoric/data/agoric/swing-store-historical-artifacts
+ for i in *; do
+ [ -f $EXPORT_DIR/$i ] && continue
+ mv $i $HOME/.agoric/data/agoric/swing-store-historical-artifacts/
+ done
+)
+rm -rf $EXPORT_DIR
+rm -rf $EXPORT_DIR_ALL_ARTIFACTS
+startAgd
+
+# # TODO fully test bidding
+# # TODO test liquidations
+agops inter bid by-price --price 1 --give 1.0IST --from $GOV1ADDR --keyring-backend test
diff --git a/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-10/env_setup.sh b/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-10/env_setup.sh
new file mode 100644
index 00000000000..22d6425887f
--- /dev/null
+++ b/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-10/env_setup.sh
@@ -0,0 +1,156 @@
+#!/bin/bash
+
+# agoric-upgrade-10 specific env here...
+export USER2ADDR=$($binary keys show user2 -a --keyring-backend="test" 2> /dev/null)
+
+printKeys() {
+ echo "========== GOVERNANCE KEYS =========="
+ echo "gov1: $GOV1ADDR"
+ cat ~/.agoric/gov1.key || true
+ echo "gov2: $GOV2ADDR"
+ cat ~/.agoric/gov2.key || true
+ echo "gov3: $GOV3ADDR"
+ cat ~/.agoric/gov3.key || true
+ echo "validator: $VALIDATORADDR"
+ cat ~/.agoric/validator.key || true
+ echo "user1: $USER1ADDR"
+ cat ~/.agoric/user1.key || true
+ echo "user2: $USER2ADDR"
+ cat ~/.agoric/user2.key || true
+ echo "========== GOVERNANCE KEYS =========="
+}
+
+pushPrice () {
+ echo ACTIONS pushPrice $1
+ newPrice="${1:-10.00}"
+ for oracleNum in {1..2}; do
+ if [[ ! -e "$HOME/.agoric/lastOracle" ]]; then
+ echo "$GOV1ADDR" > "$HOME/.agoric/lastOracle"
+ fi
+
+ lastOracle=$(cat "$HOME/.agoric/lastOracle")
+ nextOracle="$GOV1ADDR"
+ if [[ "$lastOracle" == "$GOV1ADDR" ]]; then
+ nextOracle="$GOV2ADDR"
+ fi
+ echo "Pushing Price from oracle $nextOracle"
+
+ oid="${nextOracle}_ORACLE"
+ offer=$(mktemp -t pushPrice.XXX)
+ agops oracle pushPriceRound --price "$newPrice" --oracleAdminAcceptOfferId "${!oid}" >|"$offer"
+ sleep 1
+ timeout --preserve-status 15 yarn run --silent agops perf satisfaction --from $nextOracle --executeOffer "$offer" --keyring-backend test
+ if [ $? -ne 0 ]; then
+ echo "WARNING: pushPrice for $nextOracle failed!"
+ fi
+ echo "$nextOracle" > "$HOME/.agoric/lastOracle"
+ done
+}
+
+
+# variant of pushPrice() that figures out which oracle to send from
+# WIP because it doesn't always work
+pushPriceOnce () {
+ echo ACTIONS pushPrice $1
+ newPrice="${1:-10.00}"
+ timeout 3 agoric follow -lF :published.priceFeed.ATOM-USD_price_feed.latestRound -ojson > "$HOME/.agoric/latestRound-ATOM.json"
+
+ lastStartedBy=$(jq -r .startedBy "$HOME/.agoric/latestRound-ATOM.json" || echo null)
+ echo lastStartedBy $lastStartedBy
+ nextOracle="ERROR"
+ # cycle to next among oracles (first of the two governance accounts)
+ case $lastStartedBy in
+ "$GOV1ADDR") nextOracle=$GOV2ADDR;;
+ "$GOV2ADDR") nextOracle=$GOV1ADDR;;
+ *)
+ echo last price was pushed by a different account, using GOV1
+ nextOracle=$GOV1ADDR
+ ;;
+ esac
+ echo nextOracle $nextOracle
+
+ adminOfferId="${nextOracle}_ORACLE"
+
+ echo "Pushing Price from oracle $nextOracle with offer $adminOfferId"
+
+ offer=$(mktemp -t pushPrice.XXX)
+ agops oracle pushPriceRound --price "$newPrice" --oracleAdminAcceptOfferId "${adminOfferId}" >|"$offer"
+ cat "$offer"
+ sleep 1
+ timeout --preserve-status 15 yarn run --silent agops perf satisfaction --from $nextOracle --executeOffer "$offer" --keyring-backend test
+ if [ $? -eq 0 ]; then
+ echo SUCCESS
+ else
+ echo "ERROR: pushPrice failed (using $nextOracle)"
+ fi
+}
+
+# submit a DeliverInbound transaction
+#
+# see {agoric.swingset.MsgDeliverInbound} in swingset/msgs.proto
+# https://github.com/Agoric/agoric-sdk/blob/5cc5ec8836dcd0c6e11b10799966b6e74601295d/golang/cosmos/proto/agoric/swingset/msgs.proto#L23
+submitDeliverInbound() {
+ sender="${1:-user1}"
+
+ # ag-solo is a client that sends DeliverInbound transactions using a golang client
+ # @see {connectToChain} in chain-cosmos-sdk.js
+ # runHelper
+ # https://github.com/Agoric/agoric-sdk/blob/5cc5ec8836dcd0c6e11b10799966b6e74601295d/packages/solo/src/chain-cosmos-sdk.js
+
+ # The payload is JSON.stringify([messages, highestAck])
+ # https://github.com/Agoric/agoric-sdk/blob/5cc5ec8836dcd0c6e11b10799966b6e74601295d/packages/solo/src/chain-cosmos-sdk.js#L625
+ # for example, this json was captured from a running `agoric start local-solo`
+ json='[[[1,"1:0:deliver:ro+1:rp-44;#[\"getConfiguration\",[]]"]],0]'
+
+ agd tx swingset deliver "${json}" \
+ --chain-id="$CHAINID" -ojson --yes \
+ --from="$sender" --keyring-backend=test -b block
+}
+
+make_swing_store_snapshot() {( set -euo pipefail
+ EXPORT_DIR="$1"
+ shift
+ /usr/src/agoric-sdk/packages/cosmic-swingset/src/export-kernel-db.js --home "$HOME/.agoric" --export-dir "$EXPORT_DIR" --verbose --include-export-data "$@"
+
+ EXPORT_MANIFEST_FILE="$EXPORT_DIR/export-manifest.json"
+ EXPORT_DATA_FILE="$EXPORT_DIR/$(cat "$EXPORT_MANIFEST_FILE" | jq -r .data)"
+ EXPORT_DATA_UNTRUSTED_FILE="${EXPORT_DATA_FILE%.*}-untrusted.jsonl"
+ EXPORT_HEIGHT=$(cat "$EXPORT_MANIFEST_FILE" | jq -r .blockHeight)
+ EXPORT_MANIFEST="$(cat $EXPORT_MANIFEST_FILE)"
+
+ mv "$EXPORT_DATA_FILE" "$EXPORT_DATA_UNTRUSTED_FILE"
+ agd export --height $EXPORT_HEIGHT | jq -cr '.app_state.vstorage.data[] | if .path | startswith("swingStore.") then [.path[11:],.value] else empty end' > "$EXPORT_DATA_FILE"
+
+ jq -n "$EXPORT_MANIFEST | .untrustedData=\"$(basename -- "$EXPORT_DATA_UNTRUSTED_FILE")\"" > "$EXPORT_MANIFEST_FILE"
+
+ echo "Successful swing-store export for block $EXPORT_HEIGHT"
+)}
+
+restore_swing_store_snapshot() {( set -euo pipefail
+ rm -f $HOME/.agoric/data/agoric/swingstore.sqlite
+
+ /usr/src/agoric-sdk/packages/cosmic-swingset/src/import-kernel-db.js --home "$HOME/.agoric" --export-dir "$1" --verbose
+)}
+
+compare_swing_store_export_data() {
+ EXPORT_DIR="$1"
+ EXPORT_MANIFEST_FILE="$EXPORT_DIR/export-manifest.json"
+ EXPORT_DATA_FILE="$(cat "$EXPORT_MANIFEST_FILE" | jq -r .data)"
+ EXPORT_DATA_UNTRUSTED_FILE="$(cat "$EXPORT_MANIFEST_FILE" | jq -r .untrustedData)"
+
+ if [ -z "$EXPORT_DATA_FILE" ]; then
+ echo "missing-export-data"
+ return
+ fi
+
+ if [ -z "$EXPORT_DATA_UNTRUSTED_FILE" ]; then
+ echo "missing-untrusted-export-data"
+ return
+ fi
+
+ diff <(cat "$EXPORT_DIR/$EXPORT_DATA_FILE" | sort) <(cat "$EXPORT_DIR/$EXPORT_DATA_UNTRUSTED_FILE" | sort) >&2 && {
+ echo "match"
+ } || {
+ echo "mismatch"
+ }
+}
diff --git a/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-10/test.sh b/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-10/test.sh
index 34b83c16ad5..633716f5b4a 100644
--- a/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-10/test.sh
+++ b/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-10/test.sh
@@ -2,6 +2,11 @@
. ./upgrade-test-scripts/env_setup.sh
+# DeliverInbound from un-provisioned account is discarded
+# Note: sending to a provisioned account resulted in an .outbox of
+# [[1,"1:1:resolve:fulfill:rp+44:ro-20;#\"$0.Alleged: notifier\""]]
+test_val $(agd query swingset mailbox $USER1ADDR -o json | jq '.value |fromjson |.outbox') '[]' "DeliverInbound (getConfiguration) is discarded"
+
# provision pool has right balance
test_val $(agd query bank balances agoric1megzytg65cyrgzs6fvzxgrcqvwwl7ugpt62346 -o json | jq -r '.balances | first | .amount ') "18750000"
@@ -30,3 +35,11 @@ test_val $(agoric follow -l -F :published.vaultFactory.managers.manager0.vaults.
test_val $(agoric follow -l -F :published.vaultFactory.managers.manager0.vaults.vault2 -o jsonlines | jq -r '.vaultState') "closed" "vault2 is closed"
test_val $(agoric follow -l -F :published.vaultFactory.managers.manager0.vaults.vault2 -o jsonlines | jq -r '.locked.value') "0" "vault2 contains no collateral"
test_val $(agoric follow -l -F :published.vaultFactory.managers.manager0.vaults.vault2 -o jsonlines | jq -r '.debtSnapshot.debt.value') "0" "vault2 has no debt"
+
+# verify state-sync would be broken
+killAgd
+EXPORT_DIR=$(mktemp -t -d swing-store-export-upgrade-10-XXX)
+make_swing_store_snapshot $EXPORT_DIR || fail "Couldn't make swing-store snapshot"
+test_val "$(compare_swing_store_export_data $EXPORT_DIR)" "mismatch" "swing-store broken state-sync"
+rm -rf $EXPORT_DIR
+startAgd
\ No newline at end of file
diff --git a/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-11/actions.sh b/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-11/actions.sh
index 53a9292d65a..9d5987b8ef6 100644
--- a/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-11/actions.sh
+++ b/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-11/actions.sh
@@ -2,4 +2,46 @@
. ./upgrade-test-scripts/env_setup.sh
-# Core-eval contract upgrade
+# Enable debugging
+set -x
+
+# CWD is agoric-sdk
+upgrade11=./upgrade-test-scripts/agoric-upgrade-11
+
+# hacky restore of pruned artifacts
+killAgd
+EXPORT_DIR=$(mktemp -t -d swing-store-export-upgrade-11-XXX)
+WITHOUT_GENESIS_EXPORT=1 make_swing_store_snapshot $EXPORT_DIR --artifact-mode debug || fail "Couldn't make swing-store snapshot"
+HISTORICAL_ARTIFACTS="$(cd $HOME/.agoric/data/agoric/swing-store-historical-artifacts/; for i in *; do echo -n "[\"$i\",\"$i\"],"; done)"
+mv -n $HOME/.agoric/data/agoric/swing-store-historical-artifacts/* $EXPORT_DIR || fail "some historical artifacts not pruned"
+mv $EXPORT_DIR/export-manifest.json $EXPORT_DIR/export-manifest-original.json
+cat $EXPORT_DIR/export-manifest-original.json | jq -r ".artifacts = .artifacts + [${HISTORICAL_ARTIFACTS%%,}] | del(.artifactMode)" > $EXPORT_DIR/export-manifest.json
+restore_swing_store_snapshot $EXPORT_DIR || fail "Couldn't restore swing-store snapshot"
+rm -rf $EXPORT_DIR
+startAgd
+
+test_not_val "$(agops vaults list --from $GOV1ADDR)" "" "gov1 has no vaults"
+
+# open up a vault
+OFFER=$(mktemp -t agops.XXX)
+agops vaults open --wantMinted 7.00 --giveCollateral 11.0 >|"$OFFER"
+agops perf satisfaction --from "$GOV1ADDR" --executeOffer "$OFFER" --keyring-backend=test
+
+# put some IST in
+OFFER=$(mktemp -t agops.XXX)
+agops vaults adjust --vaultId vault3 --giveMinted 1.5 --from $GOV1ADDR --keyring-backend=test >|"$OFFER"
+agops perf satisfaction --from "$GOV1ADDR" --executeOffer "$OFFER" --keyring-backend=test
+
+# add some collateral
+OFFER=$(mktemp -t agops.XXX)
+agops vaults adjust --vaultId vault3 --giveCollateral 2.0 --from $GOV1ADDR --keyring-backend="test" >|"$OFFER"
+agops perf satisfaction --from "$GOV1ADDR" --executeOffer "$OFFER" --keyring-backend=test
+
+# close out
+OFFER=$(mktemp -t agops.XXX)
+agops vaults close --vaultId vault3 --giveMinted 5.75 --from $GOV1ADDR --keyring-backend="test" >|"$OFFER"
+agops perf satisfaction --from "$GOV1ADDR" --executeOffer "$OFFER" --keyring-backend=test
+
+test_val $(agoric follow -l -F :published.vaultFactory.managers.manager0.vaults.vault3 -o jsonlines | jq -r '.vaultState') "closed" "vault3 is closed"
+test_val $(agoric follow -l -F :published.vaultFactory.managers.manager0.vaults.vault3 -o jsonlines | jq -r '.locked.value') "0" "vault3 contains no collateral"
+test_val $(agoric follow -l -F :published.vaultFactory.managers.manager0.vaults.vault3 -o jsonlines | jq -r '.debtSnapshot.debt.value') "0" "vault3 has no debt"
diff --git a/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-11/env_setup.sh b/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-11/env_setup.sh
new file mode 100644
index 00000000000..3a9537719f1
--- /dev/null
+++ b/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-11/env_setup.sh
@@ -0,0 +1,153 @@
+#!/bin/bash
+
+# agoric-upgrade-11 specific env here...
+export USER2ADDR=$($binary keys show user2 -a --keyring-backend="test" 2> /dev/null)
+
+printKeys() {
+ echo "========== GOVERNANCE KEYS =========="
+ echo "gov1: $GOV1ADDR"
+ cat ~/.agoric/gov1.key || true
+ echo "gov2: $GOV2ADDR"
+ cat ~/.agoric/gov2.key || true
+ echo "gov3: $GOV3ADDR"
+ cat ~/.agoric/gov3.key || true
+ echo "validator: $VALIDATORADDR"
+ cat ~/.agoric/validator.key || true
+ echo "user1: $USER1ADDR"
+ cat ~/.agoric/user1.key || true
+ echo "user2: $USER2ADDR"
+ cat ~/.agoric/user2.key || true
+ echo "========== GOVERNANCE KEYS =========="
+}
+
+pushPrice () {
+ echo ACTIONS pushPrice $1
+ newPrice="${1:-10.00}"
+ for oracleNum in {1..2}; do
+ if [[ ! -e "$HOME/.agoric/lastOracle" ]]; then
+ echo "$GOV1ADDR" > "$HOME/.agoric/lastOracle"
+ fi
+
+ lastOracle=$(cat "$HOME/.agoric/lastOracle")
+ nextOracle="$GOV1ADDR"
+ if [[ "$lastOracle" == "$GOV1ADDR" ]]; then
+ nextOracle="$GOV2ADDR"
+ fi
+ echo "Pushing Price from oracle $nextOracle"
+
+ oid="${nextOracle}_ORACLE"
+ offer=$(mktemp -t pushPrice.XXX)
+ agops oracle pushPriceRound --price "$newPrice" --oracleAdminAcceptOfferId "${!oid}" >|"$offer"
+ sleep 1
+ timeout --preserve-status 15 yarn run --silent agops perf satisfaction --from $nextOracle --executeOffer "$offer" --keyring-backend test
+ if [ $? -ne 0 ]; then
+ echo "WARNING: pushPrice for $nextOracle failed!"
+ fi
+ echo "$nextOracle" > "$HOME/.agoric/lastOracle"
+ done
+}
+
+
+# variant of pushPrice() that figures out which oracle to send from
+# WIP because it doesn't always work
+pushPriceOnce () {
+ echo ACTIONS pushPrice $1
+ newPrice="${1:-10.00}"
+ timeout 3 agoric follow -lF :published.priceFeed.ATOM-USD_price_feed.latestRound -ojson > "$HOME/.agoric/latestRound-ATOM.json"
+
+ lastStartedBy=$(jq -r .startedBy "$HOME/.agoric/latestRound-ATOM.json" || echo null)
+ echo lastStartedBy $lastStartedBy
+ nextOracle="ERROR"
+ # cycle to next among oracles (first of the two governance accounts)
+ case $lastStartedBy in
+ "$GOV1ADDR") nextOracle=$GOV2ADDR;;
+ "$GOV2ADDR") nextOracle=$GOV1ADDR;;
+ *)
+ echo last price was pushed by a different account, using GOV1
+ nextOracle=$GOV1ADDR
+ ;;
+ esac
+ echo nextOracle $nextOracle
+
+ adminOfferId="${nextOracle}_ORACLE"
+
+ echo "Pushing Price from oracle $nextOracle with offer $adminOfferId"
+
+ offer=$(mktemp -t pushPrice.XXX)
+ agops oracle pushPriceRound --price "$newPrice" --oracleAdminAcceptOfferId "${adminOfferId}" >|"$offer"
+ cat "$offer"
+ sleep 1
+ timeout --preserve-status 15 yarn run --silent agops perf satisfaction --from $nextOracle --executeOffer "$offer" --keyring-backend test
+ if [ $? -eq 0 ]; then
+ echo SUCCESS
+ else
+ echo "ERROR: pushPrice failed (using $nextOracle)"
+ fi
+}
+
+export_genesis() {
+ GENESIS_EXPORT_DIR="$1"
+ shift
+ GENESIS_HEIGHT_ARG=
+
+ if [ -n "$1" ]; then
+ GENESIS_HEIGHT_ARG="--height $1"
+ shift
+ fi
+
+ agd export --export-dir "$GENESIS_EXPORT_DIR" $GENESIS_HEIGHT_ARG "$@"
+}
+
+make_swing_store_snapshot() {( set -euo pipefail
+ EXPORT_DIR="$1"
+ shift
+ /usr/src/agoric-sdk/packages/cosmic-swingset/src/export-kernel-db.js --home "$HOME/.agoric" --export-dir "$EXPORT_DIR" --verbose --artifact-mode replay --export-data-mode all "$@"
+
+ EXPORT_MANIFEST_FILE="$EXPORT_DIR/export-manifest.json"
+ EXPORT_HEIGHT=$(cat "$EXPORT_MANIFEST_FILE" | jq -r .blockHeight)
+
+ [ "x${WITHOUT_GENESIS_EXPORT:-0}" = "x1" ] || {
+ EXPORT_DATA_FILE="$EXPORT_DIR/$(cat "$EXPORT_MANIFEST_FILE" | jq -r .data)"
+ EXPORT_DATA_UNTRUSTED_FILE="${EXPORT_DATA_FILE%.*}-untrusted.jsonl"
+ EXPORT_MANIFEST="$(cat $EXPORT_MANIFEST_FILE)"
+
+ mv "$EXPORT_DATA_FILE" "$EXPORT_DATA_UNTRUSTED_FILE"
+ export_genesis "$EXPORT_DIR/genesis-export" $EXPORT_HEIGHT
+ cat $EXPORT_DIR/genesis-export/genesis.json | jq -cr '.app_state.swingset.swing_store_export_data[] | [.key,.value]' > "$EXPORT_DATA_FILE"
+
+ jq -n "$EXPORT_MANIFEST | .untrustedData=\"$(basename -- "$EXPORT_DATA_UNTRUSTED_FILE")\"" > "$EXPORT_MANIFEST_FILE"
+ }
+
+ echo "Successful swing-store export for block $EXPORT_HEIGHT"
+)}
+
+restore_swing_store_snapshot() {( set -euo pipefail
+ rm -f $HOME/.agoric/data/agoric/swingstore.sqlite
+ EXPORT_DIR="$1"
+ shift
+
+ /usr/src/agoric-sdk/packages/cosmic-swingset/src/import-kernel-db.js --home "$HOME/.agoric" --export-dir "$EXPORT_DIR" --verbose --artifact-mode replay --export-data-mode all "$@"
+)}
+
+compare_swing_store_export_data() {
+ EXPORT_DIR="$1"
+ EXPORT_MANIFEST_FILE="$EXPORT_DIR/export-manifest.json"
+ EXPORT_DATA_FILE="$(cat "$EXPORT_MANIFEST_FILE" | jq -r .data)"
+ EXPORT_DATA_UNTRUSTED_FILE="$(cat "$EXPORT_MANIFEST_FILE" | jq -r .untrustedData)"
+
+ if [ -z "$EXPORT_DATA_FILE" ]; then
+ echo "missing-export-data"
+ return
+ fi
+
+ if [ -z "$EXPORT_DATA_UNTRUSTED_FILE" ]; then
+ echo "missing-untrusted-export-data"
+ return
+ fi
+
+ diff <(cat "$EXPORT_DIR/$EXPORT_DATA_FILE" | sort) <(cat "$EXPORT_DIR/$EXPORT_DATA_UNTRUSTED_FILE" | sort) >&2 && {
+ echo "match"
+ } || {
+ echo "mismatch"
+ }
+}
diff --git a/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-11/pre_test.sh b/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-11/pre_test.sh
new file mode 100755
index 00000000000..1db5c34bbdc
--- /dev/null
+++ b/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-11/pre_test.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+. ./upgrade-test-scripts/env_setup.sh
+
+echo Wait for upgrade to settle
+waitForBlock 5
+
+# CWD is agoric-sdk
+upgrade11=./upgrade-test-scripts/agoric-upgrade-11
+
+# validate agoric-upgrade-10 metrics after update
+
+test_val $(agd q vstorage children published.vaultFactory.managers.manager0.vaults -o json | jq -r '.children | length') 3 "we have three vaults"
+test_val $(agoric follow -l -F :published.vaultFactory.managers.manager0.metrics -o jsonlines | jq -r '.numActiveVaults') 1 "only one vault is active"
+
+test_val $(agoric follow -l -F :published.vaultFactory.managers.manager0.metrics -o jsonlines | jq -r '.totalDebt.value') "6030000" "totalDebt is correct"
+test_val $(agoric follow -l -F :published.vaultFactory.managers.manager0.metrics -o jsonlines | jq -r '.totalCollateral.value') "8000000" "totalCollateral is correct"
+
+test_val $(agoric follow -l -F :published.vaultFactory.managers.manager0.vaults.vault0 -o jsonlines | jq -r '.vaultState') "active" "vault0 is open"
+test_val $(agoric follow -l -F :published.vaultFactory.managers.manager0.vaults.vault0 -o jsonlines | jq -r '.locked.value') "8000000" "vault0 contains 8 ATOM collateral"
+test_val $(agoric follow -l -F :published.vaultFactory.managers.manager0.vaults.vault0 -o jsonlines | jq -r '.debtSnapshot.debt.value') "6030000" "vault0 debt is 6.03 IST"
+
+test_val $(agoric follow -l -F :published.vaultFactory.managers.manager0.vaults.vault1 -o jsonlines | jq -r '.vaultState') "closed" "vault1 is closed"
+test_val $(agoric follow -l -F :published.vaultFactory.managers.manager0.vaults.vault1 -o jsonlines | jq -r '.locked.value') "0" "vault1 contains no collateral"
+test_val $(agoric follow -l -F :published.vaultFactory.managers.manager0.vaults.vault1 -o jsonlines | jq -r '.debtSnapshot.debt.value') "0" "vault1 has no debt"
+
+test_val $(agoric follow -l -F :published.vaultFactory.managers.manager0.vaults.vault2 -o jsonlines | jq -r '.vaultState') "closed" "vault2 is closed"
+test_val $(agoric follow -l -F :published.vaultFactory.managers.manager0.vaults.vault2 -o jsonlines | jq -r '.locked.value') "0" "vault2 contains no collateral"
+test_val $(agoric follow -l -F :published.vaultFactory.managers.manager0.vaults.vault2 -o jsonlines | jq -r '.debtSnapshot.debt.value') "0" "vault2 has no debt"
+
diff --git a/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-11/test.sh b/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-11/test.sh
new file mode 100755
index 00000000000..86dcbf2d057
--- /dev/null
+++ b/packages/deployment/upgrade-test/upgrade-test-scripts/agoric-upgrade-11/test.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+. ./upgrade-test-scripts/env_setup.sh
+
+echo Wait for actions to settle
+waitForBlock 2
+
+# CWD is agoric-sdk
+upgrade11=./upgrade-test-scripts/agoric-upgrade-11
+
+# verify swing-store export-data is consistent and perform genesis style "upgrade"
+killAgd
+EXPORT_DIR=$(mktemp -t -d swing-store-export-upgrade-11-XXX)
+make_swing_store_snapshot $EXPORT_DIR --artifact-mode none || fail "Couldn't make swing-store snapshot"
+test_val "$(compare_swing_store_export_data $EXPORT_DIR)" "match" "swing-store consistent cosmos kvstore"
+
+TMP_GENESIS_DIR=$EXPORT_DIR/genesis-export
+cp $HOME/.agoric/config/genesis.json $TMP_GENESIS_DIR/old_genesis.json
+cp $HOME/.agoric/data/priv_validator_state.json $TMP_GENESIS_DIR/priv_validator_state.json
+rm -rf $HOME/.agoric/data
+mkdir $HOME/.agoric/data
+mv $TMP_GENESIS_DIR/priv_validator_state.json $HOME/.agoric/data
+mv $TMP_GENESIS_DIR/* $HOME/.agoric/config/
+rm -rf $EXPORT_DIR
+startAgd
diff --git a/packages/deployment/upgrade-test/upgrade-test-scripts/bash_entrypoint.sh b/packages/deployment/upgrade-test/upgrade-test-scripts/bash_entrypoint.sh
index 06dbbdb5918..9f0161bccad 100644
--- a/packages/deployment/upgrade-test/upgrade-test-scripts/bash_entrypoint.sh
+++ b/packages/deployment/upgrade-test/upgrade-test-scripts/bash_entrypoint.sh
@@ -2,6 +2,12 @@
cd /usr/src/agoric-sdk/ || exit 1
tmux -V || apt install -y tmux
-tmux \
+if [[ $TMUX_USE_CC == "1" ]]; then
+ TMUX_FLAGS="-CC -u"
+else
+ TMUX_FLAGS=""
+fi
+
+tmux $TMUX_FLAGS \
new-session 'SLOGFILE=slog.slog ./upgrade-test-scripts/start_to_to.sh' \; \
new-window 'bash -i'
diff --git a/packages/deployment/upgrade-test/upgrade-test-scripts/env_setup.sh b/packages/deployment/upgrade-test/upgrade-test-scripts/env_setup.sh
index 21d91a9fd7a..feb055da6ec 100644
--- a/packages/deployment/upgrade-test/upgrade-test-scripts/env_setup.sh
+++ b/packages/deployment/upgrade-test/upgrade-test-scripts/env_setup.sh
@@ -23,10 +23,6 @@ export GOV3ADDR=$($binary keys show gov3 -a --keyring-backend="test")
export VALIDATORADDR=$($binary keys show validator -a --keyring-backend="test")
export USER1ADDR=$($binary keys show user1 -a --keyring-backend="test")
-if [[ $THIS_NAME == "agoric-upgrade-10" || $THIS_NAME == "agoric-upgrade-11" ]]; then
- export USER2ADDR=$($binary keys show user2 -a --keyring-backend="test" 2> /dev/null)
-fi
-
if [[ "$binary" == "agd" ]]; then
# Support testnet addresses
sed -i "s/agoric1ldmtatp24qlllgxmrsjzcpe20fvlkp448zcuce/$GOV1ADDR/g" /usr/src/agoric-sdk/packages/vats/*.json
@@ -58,6 +54,26 @@ if [[ "$binary" == "agd" ]]; then
sed -i 's/minSubmissionCount": 3/minSubmissionCount": 1/g' /usr/src/agoric-sdk/packages/vats/*.json
fi
+startAgd() {
+ agd start --log_level warn "$@" &
+ AGD_PID=$!
+ echo $AGD_PID > $HOME/.agoric/agd.pid
+ wait_for_bootstrap
+ waitForBlock 2
+}
+
+killAgd() {
+ AGD_PID=$(cat $HOME/.agoric/agd.pid)
+ kill $AGD_PID
+ rm $HOME/.agoric/agd.pid
+ wait $AGD_PID || true
+}
+
+waitAgd() {
+ wait $(cat $HOME/.agoric/agd.pid)
+ rm $HOME/.agoric/agd.pid
+}
+
provisionSmartWallet() {
i="$1"
amount="$2"
@@ -186,12 +202,18 @@ voteLatestProposalAndWait() {
while true; do
status=$($binary q gov proposal $proposal -ojson | jq -r .status)
- if [ "$status" == "PROPOSAL_STATUS_PASSED" ]; then
+ case $status in
+ PROPOSAL_STATUS_PASSED)
break
- else
- echo "Waiting for proposal to pass"
+ ;;
+ PROPOSAL_STATUS_REJECTED)
+ echo "Proposal rejected"
+ exit 1
+ ;;
+ *)
+ echo "Waiting for proposal to pass (status=$status)"
sleep 1
- fi
+ esac
done
}
@@ -211,78 +233,9 @@ printKeys() {
cat ~/.agoric/validator.key || true
echo "user1: $USER1ADDR"
cat ~/.agoric/user1.key || true
- if [[ $THIS_NAME == "agoric-upgrade-10" || $THIS_NAME == "agoric-upgrade-11" ]]; then
- cat ~/.agoric/user2.key || true
- fi
echo "========== GOVERNANCE KEYS =========="
}
-echo ENV_SETUP finished
-
-pushPrice () {
- echo ACTIONS pushPrice $1
- newPrice="${1:-10.00}"
- for oracleNum in {1..2}; do
- if [[ ! -e "$HOME/.agoric/lastOracle" ]]; then
- echo "$GOV1ADDR" > "$HOME/.agoric/lastOracle"
- fi
-
- lastOracle=$(cat "$HOME/.agoric/lastOracle")
- nextOracle="$GOV1ADDR"
- if [[ "$lastOracle" == "$GOV1ADDR" ]]; then
- nextOracle="$GOV2ADDR"
- fi
- echo "Pushing Price from oracle $nextOracle"
-
- oid="${nextOracle}_ORACLE"
- offer=$(mktemp -t pushPrice.XXX)
- agops oracle pushPriceRound --price "$newPrice" --oracleAdminAcceptOfferId "${!oid}" >|"$offer"
- sleep 1
- timeout --preserve-status 15 yarn run --silent agops perf satisfaction --from $nextOracle --executeOffer "$offer" --keyring-backend test
- if [ $? -ne 0 ]; then
- echo "WARNING: pushPrice for $nextOracle failed!"
- fi
- echo "$nextOracle" > "$HOME/.agoric/lastOracle"
- done
-}
-
-
-# variant of pushPrice() that figures out which oracle to send from
-# WIP because it doesn't always work
-pushPriceOnce () {
- echo ACTIONS pushPrice $1
- newPrice="${1:-10.00}"
- timeout 3 agoric follow -lF :published.priceFeed.ATOM-USD_price_feed.latestRound -ojson > "$HOME/.agoric/latestRound-ATOM.json"
-
- lastStartedBy=$(jq -r .startedBy "$HOME/.agoric/latestRound-ATOM.json" || echo null)
- echo lastStartedBy $lastStartedBy
- nextOracle="ERROR"
- # cycle to next among oracles (first of the two governance accounts)
- case $lastStartedBy in
- "$GOV1ADDR") nextOracle=$GOV2ADDR;;
- "$GOV2ADDR") nextOracle=$GOV1ADDR;;
- *)
- echo last price was pushed by a different account, using GOV1
- nextOracle=$GOV1ADDR
- ;;
- esac
- echo nextOracle $nextOracle
-
- adminOfferId="${nextOracle}_ORACLE"
-
- echo "Pushing Price from oracle $nextOracle with offer $adminOfferId"
-
- offer=$(mktemp -t pushPrice.XXX)
- agops oracle pushPriceRound --price "$newPrice" --oracleAdminAcceptOfferId "${adminOfferId}" >|"$offer"
- cat "$offer"
- sleep 1
- timeout --preserve-status 15 yarn run --silent agops perf satisfaction --from $nextOracle --executeOffer "$offer" --keyring-backend test
- if [ $? -eq 0 ]; then
- echo SUCCESS
- else
- echo "ERROR: pushPrice failed (using $nextOracle)"
- fi
-}
export USDC_DENOM="ibc/toyusdc"
# Recent transfer to Emerynet
@@ -293,3 +246,12 @@ if [[ "$BOOTSTRAP_MODE" == "main" ]]; then
export ATOM_DENOM="ibc/BA313C4A19DFBF943586C0387E6B11286F9E416B4DD27574E6909CABE0E342FA"
export PSM_PAIR="IST.USDC_axl"
fi
+
+# additional env specific to a version
+if test -f ./upgrade-test-scripts/$THIS_NAME/env_setup.sh; then
+ echo ENV_SETUP found $THIS_NAME specific env, importing...
+ . ./upgrade-test-scripts/$THIS_NAME/env_setup.sh
+ echo ENV_SETUP imported $THIS_NAME specific env
+fi
+
+echo ENV_SETUP finished
diff --git a/packages/deployment/upgrade-test/upgrade-test-scripts/start_ag0.sh b/packages/deployment/upgrade-test/upgrade-test-scripts/start_ag0.sh
index 251df7e4015..4281ae92318 100644
--- a/packages/deployment/upgrade-test/upgrade-test-scripts/start_ag0.sh
+++ b/packages/deployment/upgrade-test/upgrade-test-scripts/start_ag0.sh
@@ -65,7 +65,18 @@ if [[ "$BOOTSTRAP_MODE" == "test" ]]; then
UPGRADE_TO=${UPGRADE_TO//agoric-/agorictest-}
fi
-ag0 tx gov submit-proposal software-upgrade "$UPGRADE_TO" --upgrade-height="$height" --title="Upgrade to ${UPGRADE_TO}" --description="upgrades" --from=validator --chain-id="$CHAINID" --yes --keyring-backend=test
+info=${UPGRADE_INFO-"{}"}
+if echo "$info" | jq .; then :
+else
+ status=$?
+ echo "Upgrade info is not valid JSON: $info"
+ exit $status
+fi
+ag0 tx gov submit-proposal software-upgrade "$UPGRADE_TO" \
+ --upgrade-height="$height" --upgrade-info="$info" \
+ --title="Upgrade to ${UPGRADE_TO}" --description="upgrades" \
+ --from=validator --chain-id="$CHAINID" \
+ --yes --keyring-backend=test
waitForBlock
voteLatestProposalAndWait
diff --git a/packages/deployment/upgrade-test/upgrade-test-scripts/start_to_to.sh b/packages/deployment/upgrade-test/upgrade-test-scripts/start_to_to.sh
index bdb316469e0..2ba2d2a8e13 100644
--- a/packages/deployment/upgrade-test/upgrade-test-scripts/start_to_to.sh
+++ b/packages/deployment/upgrade-test/upgrade-test-scripts/start_to_to.sh
@@ -3,7 +3,7 @@
grep -qF 'env_setup.sh' /root/.bashrc || echo ". ./upgrade-test-scripts/env_setup.sh" >> /root/.bashrc
grep -qF 'printKeys' /root/.bashrc || echo "printKeys" >> /root/.bashrc
-tmux -V || apt install -y tmux
+tmux -V 2>/dev/null || apt-get install -y tmux
if [[ "$DEST" == "1" ]] && [[ "$TMUX" == "" ]]; then
echo "launching entrypoint"
@@ -14,10 +14,7 @@ fi
. ./upgrade-test-scripts/env_setup.sh
-agd start --log_level warn &
-AGD_PID=$!
-wait_for_bootstrap
-waitForBlock 2
+startAgd
if ! test -f "$HOME/.agoric/runActions-${THIS_NAME}"; then
runActions "pre_test"
@@ -42,7 +39,18 @@ if [[ "$DEST" != "1" ]]; then
voting_period_s=10
latest_height=$(agd status | jq -r .SyncInfo.latest_block_height)
height=$(( $latest_height + $voting_period_s + 10 ))
- agd tx gov submit-proposal software-upgrade "$UPGRADE_TO" --upgrade-height="$height" --title="Upgrade to ${UPGRADE_TO}" --description="upgrades" --from=validator --chain-id="$CHAINID" --yes --keyring-backend=test
+ info=${UPGRADE_INFO-"{}"}
+ if echo "$info" | jq .; then :
+ else
+ status=$?
+ echo "Upgrade info is not valid JSON: $info"
+ exit $status
+ fi
+ agd tx gov submit-proposal software-upgrade "$UPGRADE_TO" \
+ --upgrade-height="$height" --upgrade-info="$info" \
+ --title="Upgrade to ${UPGRADE_TO}" --description="upgrades" \
+ --from=validator --chain-id="$CHAINID" \
+ --yes --keyring-backend=test
waitForBlock
voteLatestProposalAndWait
@@ -61,9 +69,9 @@ if [[ "$DEST" != "1" ]]; then
done
sleep 2
- kill $AGD_PID
+ killAgd
echo "ready for upgrade to $UPGRADE_TO"
else
- wait $AGD_PID
+ waitAgd
fi
\ No newline at end of file
diff --git a/packages/internal/src/action-types.js b/packages/internal/src/action-types.js
index 2d5a3d77ce2..a7dec994dff 100644
--- a/packages/internal/src/action-types.js
+++ b/packages/internal/src/action-types.js
@@ -1,7 +1,7 @@
// @jessie-check
-export const BOOTSTRAP_BLOCK = 'BOOTSTRAP_BLOCK';
-export const COSMOS_SNAPSHOT = 'COSMOS_SNAPSHOT';
+export const AG_COSMOS_INIT = 'AG_COSMOS_INIT';
+export const SWING_STORE_EXPORT = 'SWING_STORE_EXPORT';
export const BEGIN_BLOCK = 'BEGIN_BLOCK';
export const CALCULATE_FEES_IN_BEANS = 'CALCULATE_FEES_IN_BEANS';
export const CORE_EVAL = 'CORE_EVAL';
diff --git a/packages/solo/src/init-basedir.js b/packages/solo/src/init-basedir.js
index e704e2e7346..384d81c3b84 100644
--- a/packages/solo/src/init-basedir.js
+++ b/packages/solo/src/init-basedir.js
@@ -55,7 +55,7 @@ export default function initBasedir(
`${JSON.stringify(connections)}\n`,
);
const dstHtmldir = path.join(basedir, 'html');
- fs.mkdirSync(dstHtmldir);
+ fs.mkdirSync(dstHtmldir, { recursive: true });
// Save the configuration options.
fs.writeFileSync(path.join(basedir, 'options.json'), JSON.stringify(options));
diff --git a/packages/swing-store/docs/data-export.md b/packages/swing-store/docs/data-export.md
index c538448fa21..ecdecda6267 100644
--- a/packages/swing-store/docs/data-export.md
+++ b/packages/swing-store/docs/data-export.md
@@ -16,7 +16,7 @@ The SwingStore export protocol defines two stages (effectively two datasets). Th
Each time a SwingStore API is used to modify the state somehow (e.g. adding/changing/deleting a `kvStore` entry, or pushing a new item on to a transcript), the contents of both datasets may change. New first-stage entries can be created, existing ones may be modified or deleted. And the set of second-stage artifacts may change.
-These export data/artifact changes can happen when calling into the kernel (e.g. invoking the external API of a device, causing the device code to change its own state or push messages onto the run-queue), or by normal kernel operations as it runs (any time `controller.run()` is executing). When the kernel is idle (after `controller.run()` has completed), the kernel will not make any changes to the SwingStore, and both datasets will be stable.
+These export data/artifact changes can happen when calling into the kernel (e.g. invoking the external API of a device, causing the device code to change its own state or push messages onto the run-queue), or by normal kernel operations as it runs (any time `controller.run()` is executing). When the kernel is idle (after `controller.run()` has completed), and `hostStorage.commit()` is called, the kernel will not make any changes to the SwingStore, and both datasets will be stable.
Among other things, the SwingStore records a transcript of deliveries for each vat. The collection of all deliveries to a particular vat since its last heap snapshot was written is called the "current span". For each vat, the first-stage export data will record a single record that remembers the extent and the hash of the current span. This record then refers to a second-stage export artifact that contains the actual transcript contents.
@@ -83,6 +83,8 @@ So, to include SwingStore data in this state-sync snapshot, we need a way to get
To support this, SwingStore has an "incremental export" mode. This is activated when the host application supplies an "export callback" option to the SwingStore instance constructor. Instead of retrieving the entire first-stage export data at the end of the block, the host application will be continuously notified about changes to this data as the kernel executes. The host application can then incorporate those entries into an existing hashed Merkle tree (e.g. the cosmos-sdk IAVL tree), whose root hash is included in the consensus block hash. Every time the callback is given `(key, value)`, the host should add a new (or modify some existing) IAVL entry, using an IAVL key within some range dedicated to the SwingStore first-stage export data. When the callback receives `(key, undefined)` or `(key, null)`, it should delete the entry. In this way, the IAVL tree maintains a "shadow copy" of the first-stage export data at all times, making the contents both covered by the consensus hash, and automatically included in the cosmos-sdk IAVL tree where it will become available to the new validator as it begins to reconstruct the SwingStore.
+The export callback must be established from the very beginning, so it includes all changes made during kernel initialization.
+
All validator nodes use this export callback, even if they never perform the rest of the export process, to ensure that the consensus state includes the entire first-stage dataset. (Note that the first stage data is generally smaller than the full dataset, making this relatively inexpensive).
Then, on the few occasions when the application needs to build a full state-sync snapshot, it can ask the SwingStore (after block commit) for the full set of artifacts that match the most recent commit.
@@ -177,18 +179,41 @@ As a result, for each active vat, the first-stage Export Data contains a record
The `openSwingStore()` function has an option named `keepTranscripts` (which defaults to `true`), which causes the transcriptStore to retain the old transcript items. A second option named `keepSnapshots` (which defaults to `false`) causes the snapStore to retain the old heap snapshots. Opening the swingStore with a `false` option does not necessarily delete the old items immediately, but they'll probably get deleted the next time the kernel triggers a heap snapshot or transcript-span rollover. Validators who care about minimizing their disk usage will want to set both to `false`. In the future, we will arrange the SwingStore SQLite tables to provide easy `sqlite3` CLI commands that will delete the old data, so validators can also periodically use the CLI command to prune it.
-The `getArtifactNames()` API includes an option named `includeHistorical`. If `true`, all available historical artifacts will be included in the export (limited by what the `openSwingStore` options have deleted). If `false`, none will be included. Note that the "export data" is necessarily unaffected: if we *ever* want to validate this optional data, the hashes are mandatory. But the `getArtifactNames()` list will be smaller if you set `includeHistorical = false`. Also, re-exporting from a pruned copy will lack the old data, even if the re-export uses `includeHistorical = true`, because the second SwingStore cannot magically reconstruct the missing data.
+When exporting, the `makeSwingStoreExporter()` function takes an `artifactMode` option (in an options bag). This serves to both limit, and provide some minimal guarantees about, the set of artifacts that will be provided in the export. The defined values of `artifactMode` each build upon the previous one:
+
+* `operational`: include only the current transcript span and current snapshot for each vat: just the minimum set necessary for current operations
+* `replay`: add all transcript spans for the current incarnation
+* `archival`: add all available transcript spans, even for old incarnations
+* `debug`: add all available snapshots, giving you everything. The old snapshots are never necessary for normal operations, nor are they likely to be useful for extreme upgrade scenarios, but they might be useful for some unusual debugging operations or investigations
+
+For each mode, the export will fail if the data necessary for those artifacts is not available (e.g. it was previously pruned). For example, an export with `artifactMode: 'replay'` will fail unless every vat has all transcript entries for each one's current incarnation. The `archival` mode will fail to export unless every vat has *every* transcript entry, back to the very first incarnation.
+
+However the `debug` export mode will never fail: it merely dumps everything in the swingstore, without limits or completeness checks.
+
+Note that `artifactMode` does not affect the Export Data generated by the exporter (because if we *ever* want to validate this optional data, the hashes are mandatory). It only affects the names returned by `getArtifactNames()`: `operational` returns a subset of `replay`, which returns a subset of `archival`. And re-exporting from a previously-pruned copy under `archival` mode will fail, because the second SwingStore cannot magically reconstruct the missing data.
+
+Also note that when a vat is terminated, we delete all information about it, including transcript items and snapshots, both current and old. This will remove all the Export Data records, and well as the matching artifacts from `getArtifactNames`.
+
+When importing, the `importSwingStore()` function's options bag takes a property named `artifactMode`, with the same meanings as for export. Importing with the `operational` mode will ignore any artifacts other than those needed for current operations, and will fail unless all such artifacts were available. Importing with `replay` will ignore spans from old incarnations, but will fail unless all spans from current incarnations are present. Importing with `archival` will fail unless all spans from all incarnations are present. There is no `debug` option during import.
+
+`importSwingStore()` returns a swingstore, which means its options bag also contains the same options as `openSwingStore()`, including the `keepTranscripts` option. This defaults to `true`, but if it were overridden to `false`, then the new swingstore will delete transcript spans as soon as they are no longer needed for operational purposes (e.g. when `transcriptStore.rolloverSpan()` is called).
+
+So, to avoid pruning current-incarnation historical transcript spans when exporting from one swingstore to another, you must set (or avoid overriding) the following options along the way:
-Note that when a vat is terminated, we delete all information about it, including transcript items and snapshots, both current and old. This will remove all the Export Data records, and well as the matching artifacts from `getArtifactNames`.
+* the original swingstore must not be opened with `{ keepTranscripts: false }`, otherwise the old spans will be pruned immediately
+* the export must use `makeSwingStoreExporter(dirpath, { artifactMode: 'replay'})`, otherwise the export will omit the old spans
+* the import must use `importSwingStore(exporter, dirPath, { artifactMode: 'replay'})`, otherwise the import will ignore the old spans
+ * the `importSwingStore` call (and all subsequent `openSwingStore` calls) must not use `keepTranscripts: false`, otherwise the new swingstore will prune historical spans as new ones are created (during `rolloverSpan`).
## Implementation Details
-SwingStore contains components to accomodate all the various kinds of state that the SwingSet kernel needs to store. This currently consists of three portions:
+SwingStore contains components to accommodate all the various kinds of state that the SwingSet kernel needs to store. This currently consists of four portions:
* `kvStore`, a general-purpose string/string key-value table
* `transcriptStore`: append-only vat deliveries, broken into "spans", delimited by heap snapshot events
* `snapshotStore`: binary blobs containing JS engine heap state, to limit transcript replay depth
+* `bundleStore`: code bundles that can be imported with `@endo/import-bundle`
-Currently, the SwingStore treats transcript spans and heap snapshots as export artifacts, with hashes recorded in the export data for validation (and to remember exactly which artifacts are necessary). The `kvStore` is copied one-to-one into the export data (i.e. we keep a full shadow copy in IAVL), because that is the fastest way to ensure the `kvStore` data is fully available and validated.
+Currently, the SwingStore treats transcript spans, heap snapshots, and bundles as export artifacts, with hashes recorded in the export data for validation (and to remember exactly which artifacts are necessary). The `kvStore` is copied one-to-one into the export data (i.e. we keep a full shadow copy in IAVL), because that is the fastest way to ensure the `kvStore` data is fully available and validated.
If some day we implement an IAVL-like Merkle tree inside SwingStore, and use it to automatically generate a root hash for the `kvStore` at the end of each block, we will replace this (large) shadow copy with a single `kvStoreRootHash` entry, and add a new export artifact to contain the full contents of the kvStore. This reduce the size of the IAVL tree, as well as the rate of IAVL updates during block execution, at the cost of increased CPU and complexity within SwingStore.
diff --git a/packages/swing-store/docs/swingstore.md b/packages/swing-store/docs/swingstore.md
new file mode 100644
index 00000000000..56bd174dd28
--- /dev/null
+++ b/packages/swing-store/docs/swingstore.md
@@ -0,0 +1,52 @@
+# SwingStore Data Model
+
+The "SwingStore" provides a database to hold SwingSet kernel state, with an API crafted to help both the kernel and the host application mutate, commit, export, and import this state.
+
+The state is broken up into several pieces, or "stores":
+
+* `bundleStore`: a string-keyed Bundle-value table, holding source bundles which can be evaluated by `importBundle` to create vats, or new Compartments within a vat
+* `transcriptStore`: records a linear sequence of deliveries and syscalls (with results), collectively known as "transcript entries", for each vat
+* `snapStore`: records one or more XS heap snapshots for each vat, to rebuild a worker more efficiently than replaying all transcript entries from the beginning
+* `kvStore`: a string-keyed string-valued table, which holds everything else. Currently, this holds each vat's c-list and vatstore data, as well as the kernel-wide object and promise tables, and run-queues.
+
+## Incarnations, Spans, Snapshots
+
+The kernel tracks the state of one or more vats. Each vat's execution is split into "incarnations", which are separated by a "vat upgrade" (a call to `E(vatAdminFacet).upgrade(newBundleCap, options)`, see https://github.com/Agoric/agoric-sdk/blob/master/packages/SwingSet/docs/vat-upgrade.md for details). Each incarnation gets a new worker, which erases the heap state and only retains durable vatstore data across the upgrade. Every active vat has a "current incarnation", and zero or more "historic incarnations". Only the current incarnation is instantiated.
+
+Within each incarnation, execution is broken into one or more "spans", with a "current span" and zero or more "historic spans". This breaks up the transcript into corresponding spans.
+
+Each historic span ends with a `save-snapshot` entry which records the creation and saving of an XS heap snapshot. The initial span starts with a `start-worker` entry, while all non-initial spans start with a `load-snapshot` entry. The final span of historic incarnations each end with a `shutdown-worker` entry.
+
+Each `save-snapshot` entry adds a new snapshot to the `snapStore`, so each vat has zero or more snapshots, of which the last one is called the "current" or "in-use" snapshot, and the earlier ones are called "historical snapshots".
+
+(note: the `deliveryNum` counter is scoped to the vat and does not reset at incarnation or span boundaries)
+
+## Artifacts
+
+The import/export process (using `makeSwingStoreExporter` and `importSwingStore`) defines some number of "artifacts" to contain much of the SwingStore data. Each bundle is a separate artifact, as is each heap snapshot. Each transcript span is a separate artifact (an aggregate of the individual transcript entries comprising that span).
+
+During export, the `getArtifactNames()` method provides a list of all available artifacts, while `getArtifact(name)` is used to retrieve the actual data. The import function processes each artifact separately.
+
+## Populated vs Pruned
+
+For normal operation, the kernel does not require historical incarnations, spans, or snapshots. It only needs the ability to reconstruct a worker for the current incarnation of each vat, which means loading the current snapshot (if any), and replaying the contents of the current transcript span.
+
+For this reason, the swingstore must always contain the current transcript span, and the current snapshot (if any), for every vat.
+
+However, to save space, historical spans/snapshots might be pruned, by deleting their contents from the database (but retaining the metadata, which includes a hash of the contents for later validation). Historical snapshots are pruned by default (unless `openSwingStore()` is given an options bag with `keepSnapshots: true`). Historical spans are not currently pruned (the `keepTranscripts` option defaults to `true`), but that may change.
+
+In addition, `importSwingStore()` can be used to create a SwingStore from data exported out of some other SwingStore. The export-then-import process might result in a pruned DB in one of three ways:
+
+* the import-time options might instruct the import process to ignore some of the available data
+* the export-time options might have done the same
+* the original DB was itself already pruned, so the data was not available in the first place
+
+In the future, a separate SwingStore API will exist to allow previously-pruned artifacts to be repopulated. Every artifact has a metadata record which *is* included in the export (in the `exportData` section, but separate from the kvStore shadow table entries, see [data-export.md](./data-export.md)), regardless of pruning modes, to ensure that this API can check the integrity of these repopulated artifacts. This reduces the reliance set and trust burden of the repopulation process (we can safely use untrusted artifact providers).
+
+When a snapshot is pruned, the `snapshots` SQL table row is modified, replacing its `compressedSnapshot` BLOB with a NULL. The other columns are left alone, especially the `hash` column, which retains the integrity-checking metadata to support a future repopulation.
+
+When a transcript span is pruned, the `transcriptSpans` row is left alone, but the collection of `transcriptItems` rows are deleted. Any span for which all the `transcriptItems` rows are present is said to be "populated", while any span that is missing one or more `transcriptItems` rows is said to be "pruned". (There is no good reason for a span to be only partially pruned, but until we compress historical spans into a single row, in some new table, there remains the possibility of partial pruning).
+
+During import, we create the metadata first (as the export-data is parsed), then later, we fill in the details as the artifacts are read.
+
+Bundles are never pruned, however during import, the `bundles` table will temporarily contain rows whose `bundle` BLOB is NULL.
diff --git a/packages/swing-store/package.json b/packages/swing-store/package.json
index 7626c5bec46..b597852effd 100644
--- a/packages/swing-store/package.json
+++ b/packages/swing-store/package.json
@@ -3,7 +3,10 @@
"version": "0.9.1",
"description": "Persistent storage for SwingSet",
"type": "module",
- "main": "src/swingStore.js",
+ "main": "./src/index.js",
+ "exports": {
+ ".": "./src/index.js"
+ },
"repository": "https://github.com/Agoric/agoric-sdk",
"author": "Agoric",
"license": "Apache-2.0",
diff --git a/packages/swing-store/src/assertComplete.js b/packages/swing-store/src/assertComplete.js
new file mode 100644
index 00000000000..73a8d0f7292
--- /dev/null
+++ b/packages/swing-store/src/assertComplete.js
@@ -0,0 +1,21 @@
+/**
+ * @param {import('./internal.js').SwingStoreInternal} internal
+ * @param {Omit} checkMode
+ * @returns {void}
+ */
+export function assertComplete(internal, checkMode) {
+ // every bundle must be populated
+ internal.bundleStore.assertComplete(checkMode);
+
+ // every 'isCurrent' transcript span must have all items
+ // TODO: every vat with any data must have a isCurrent transcript
+ // span
+ internal.transcriptStore.assertComplete(checkMode);
+
+ // every 'inUse' snapshot must be populated
+ internal.snapStore.assertComplete(checkMode);
+
+ // TODO: every isCurrent span that starts with load-snapshot has a
+ // matching snapshot (counter-argument: swing-store should not know
+ // those details about transcript entries)
+}
diff --git a/packages/swing-store/src/bundleStore.js b/packages/swing-store/src/bundleStore.js
index 2ccfb69c7b0..2a6f0f20fd0 100644
--- a/packages/swing-store/src/bundleStore.js
+++ b/packages/swing-store/src/bundleStore.js
@@ -7,7 +7,6 @@ import { checkBundle } from '@endo/check-bundle/lite.js';
import { Nat } from '@endo/nat';
import { Fail, q } from '@agoric/assert';
import { createSHA256 } from './hasher.js';
-import { buffer } from './util.js';
/**
* @typedef { { moduleFormat: 'getExport', source: string, sourceMap?: string } } GetExportBundle
@@ -16,7 +15,8 @@ import { buffer } from './util.js';
* @typedef { EndoZipBase64Bundle | GetExportBundle | NestedEvaluateBundle } Bundle
*/
/**
- * @typedef { import('./swingStore').SwingStoreExporter } SwingStoreExporter
+ * @typedef { import('./exporter').SwingStoreExporter } SwingStoreExporter
+ * @typedef { import('./internal.js').ArtifactMode } ArtifactMode
*
* @typedef {{
* addBundle: (bundleID: string, bundle: Bundle) => void;
@@ -27,7 +27,10 @@ import { buffer } from './util.js';
*
* @typedef {{
* exportBundle: (name: string) => AsyncIterableIterator,
- * importBundle: (artifactName: string, exporter: SwingStoreExporter, bundleID: string) => void,
+ * repairBundleRecord: (key: string, value: string) => void,
+ * importBundleRecord: (key: string, value: string) => void,
+ * importBundle: (name: string, dataProvider: () => Promise) => Promise,
+ * assertComplete: (checkMode: Omit) => void,
* getExportRecords: () => IterableIterator,
* getArtifactNames: () => AsyncIterableIterator,
* getBundleIDs: () => IterableIterator,
@@ -39,6 +42,18 @@ import { buffer } from './util.js';
*
*/
+function bundleIDFromName(name) {
+ typeof name === 'string' || Fail`artifact name must be a string`;
+ const [tag, ...pieces] = name.split('.');
+ if (tag !== 'bundle' || pieces.length !== 1) {
+ Fail`expected artifact name of the form 'bundle.{bundleID}', saw ${q(
+ name,
+ )}`;
+ }
+ const bundleID = pieces[0];
+ return bundleID;
+}
+
/**
* @param {*} db
* @param {() => void} ensureTxn
@@ -54,6 +69,9 @@ export function makeBundleStore(db, ensureTxn, noteExport = () => {}) {
)
`);
+ // A populated record contains both bundleID and bundle, while a
+ // pruned record has a bundle of NULL.
+
function bundleArtifactName(bundleID) {
return `bundle.${bundleID}`;
}
@@ -62,20 +80,36 @@ export function makeBundleStore(db, ensureTxn, noteExport = () => {}) {
return `b${Nat(version)}-${hash}`;
}
- const sqlAddBundle = db.prepare(`
- INSERT OR REPLACE INTO bundles
- (bundleID, bundle)
- VALUES (?, ?)
+ // the PRIMARY KEY constraint requires the bundleID not already
+ // exist
+ const sqlAddBundleRecord = db.prepare(`
+ INSERT INTO bundles (bundleID, bundle) VALUES (?, NULL)
`);
- /**
- * Store a bundle. Here the bundle itself is presumed valid.
- *
- * @param {string} bundleID
- * @param {Bundle} bundle
- */
- function addBundle(bundleID, bundle) {
+ // this sees both populated and pruned (not-yet-populated) records
+ const sqlHasBundleRecord = db.prepare(`
+ SELECT count(*)
+ FROM bundles
+ WHERE bundleID = ?
+ `);
+ sqlHasBundleRecord.pluck();
+
+ const sqlPopulateBundleRecord = db.prepare(`
+ UPDATE bundles SET bundle = $serialized WHERE bundleID = $bundleID
+ `);
+
+ function addBundleRecord(bundleID) {
+ ensureTxn();
+ sqlAddBundleRecord.run(bundleID);
+ }
+
+ function populateBundle(bundleID, serialized) {
ensureTxn();
+ sqlHasBundleRecord.get(bundleID) || Fail`missing ${bundleID}`;
+ sqlPopulateBundleRecord.run({ bundleID, serialized });
+ }
+
+ function serializeBundle(bundleID, bundle) {
const { moduleFormat } = bundle;
let serialized;
if (bundleID.startsWith('b0-')) {
@@ -98,19 +132,55 @@ export function makeBundleStore(db, ensureTxn, noteExport = () => {}) {
} else {
throw Fail`unsupported BundleID ${bundleID}`;
}
- sqlAddBundle.run(bundleID, serialized);
+ return serialized;
+ }
+
+ /**
+ * Store a complete bundle in a single operation, used by runtime
+ * (i.e. not an import). We rely upon the caller to provide a
+ * correct bundle (e.g. no unexpected properties), but we still
+ * check the ID against the contents.
+ *
+ * @param {string} bundleID
+ * @param {Bundle} bundle
+ */
+ function addBundle(bundleID, bundle) {
+ const serialized = serializeBundle(bundleID, bundle);
+ addBundleRecord(bundleID);
+ populateBundle(bundleID, serialized);
noteExport(bundleArtifactName(bundleID), bundleID);
}
- const sqlHasBundle = db.prepare(`
+ const sqlGetPrunedBundles = db.prepare(`
+ SELECT bundleID
+ FROM bundles
+ WHERE bundle IS NULL
+ ORDER BY bundleID
+ `);
+ sqlGetPrunedBundles.pluck();
+
+ function getPrunedBundles() {
+ return sqlGetPrunedBundles.all();
+ }
+
+ function assertComplete(checkMode) {
+ assert(checkMode !== 'debug', checkMode);
+ const pruned = getPrunedBundles();
+ if (pruned.length) {
+ throw Fail`missing bundles for: ${pruned.join(',')}`;
+ }
+ }
+
+ const sqlHasPopulatedBundle = db.prepare(`
SELECT count(*)
FROM bundles
WHERE bundleID = ?
+ AND bundle IS NOT NULL
`);
- sqlHasBundle.pluck(true);
+ sqlHasPopulatedBundle.pluck(true);
function hasBundle(bundleID) {
- const count = sqlHasBundle.get(bundleID);
+ const count = sqlHasPopulatedBundle.get(bundleID);
return count !== 0;
}
@@ -119,15 +189,15 @@ export function makeBundleStore(db, ensureTxn, noteExport = () => {}) {
FROM bundles
WHERE bundleID = ?
`);
- sqlGetBundle.pluck(true);
/**
* @param {string} bundleID
* @returns {Bundle}
*/
function getBundle(bundleID) {
- const rawBundle = sqlGetBundle.get(bundleID);
- rawBundle || Fail`bundle ${q(bundleID)} not found`;
+ const row =
+ sqlGetBundle.get(bundleID) || Fail`bundle ${q(bundleID)} not found`;
+ const rawBundle = row.bundle || Fail`bundle ${q(bundleID)} pruned`;
if (bundleID.startsWith('b0-')) {
return harden(JSON.parse(rawBundle));
} else if (bundleID.startsWith('b1-')) {
@@ -153,6 +223,31 @@ export function makeBundleStore(db, ensureTxn, noteExport = () => {}) {
}
}
+ // take an export-data record (id/hash but not bundle contents) and
+ // insert something in the DB
+ function importBundleRecord(key, value) {
+ const bundleID = bundleIDFromName(key);
+ assert.equal(bundleID, value);
+ addBundleRecord(bundleID);
+ }
+
+ function repairBundleRecord(key, value) {
+ // Bundle records have no metadata, and all bundles must be
+ // present (there's no notion of "historical bundle"). So there's
+ // no "repair", and if the repair process supplies a bundle record
+ // that isn't already present, we throw an error. The repair
+ // process doesn't get artifacts, so adding a new record here
+ // would fail the subsequent completeness check anyways.
+
+ const bundleID = bundleIDFromName(key);
+ assert.equal(bundleID, value);
+ if (sqlHasBundleRecord.get(bundleID)) {
+ // record is present, there's no metadata to mismatch, so ignore quietly
+ return;
+ }
+ throw Fail`unexpected new bundle record for ${bundleID} during repair`;
+ }
+
/**
* Read a bundle and return it as a stream of data suitable for export to
* another store.
@@ -166,14 +261,10 @@ export function makeBundleStore(db, ensureTxn, noteExport = () => {}) {
* @returns {AsyncIterableIterator}
*/
async function* exportBundle(name) {
- typeof name === 'string' || Fail`artifact name must be a string`;
- const parts = name.split('.');
- const [type, bundleID] = parts;
- // prettier-ignore
- (parts.length === 2 && type === 'bundle') ||
- Fail`expected artifact name of the form 'bundle.{bundleID}', saw ${q(name)}`;
- const rawBundle = sqlGetBundle.get(bundleID);
- rawBundle || Fail`bundle ${q(name)} not available`;
+ const bundleID = bundleIDFromName(name);
+ const row =
+ sqlGetBundle.get(bundleID) || Fail`bundle ${q(bundleID)} not found`;
+ const rawBundle = row.bundle || Fail`bundle ${q(bundleID)} pruned`;
yield* Readable.from(Buffer.from(rawBundle));
}
@@ -209,23 +300,17 @@ export function makeBundleStore(db, ensureTxn, noteExport = () => {}) {
}
/**
- * @param {string} name Artifact name of the bundle
- * @param {SwingStoreExporter} exporter Whence to get the bits
- * @param {string} bundleID Bundle ID of the bundle
+ * Call addBundleRecord() first, then this importBundle() will
+ * populate the record.
+ *
+ * @param {string} name Artifact name, `bundle.${bundleID}`
+ * @param {() => Promise} dataProvider Function to get bundle bytes
* @returns {Promise}
*/
- async function importBundle(name, exporter, bundleID) {
+ async function importBundle(name, dataProvider) {
await 0; // no synchronous prefix
- const parts = name.split('.');
- const [type, bundleIDkey] = parts;
- // prettier-ignore
- parts.length === 2 && type === 'bundle' ||
- Fail`expected artifact name of the form 'bundle.{bundleID}', saw '${q(name)}'`;
- bundleIDkey === bundleID ||
- Fail`bundle artifact name ${name} doesn't match bundleID ${bundleID}`;
- const artifactChunks = exporter.getArtifact(name);
- const inStream = Readable.from(artifactChunks);
- const data = await buffer(inStream);
+ const bundleID = bundleIDFromName(name);
+ const data = await dataProvider();
if (bundleID.startsWith('b0-')) {
// we dissect and reassemble the bundle, to exclude unexpected properties
const { moduleFormat, source, sourceMap } = JSON.parse(data.toString());
@@ -234,7 +319,7 @@ export function makeBundleStore(db, ensureTxn, noteExport = () => {}) {
const serialized = JSON.stringify(bundle);
bundleID === bundleIdFromHash(0, createSHA256(serialized).finish()) ||
Fail`bundleID ${q(bundleID)} does not match bundle artifact`;
- addBundle(bundleID, bundle);
+ populateBundle(bundleID, serialized);
} else if (bundleID.startsWith('b1-')) {
/** @type {EndoZipBase64Bundle} */
const bundle = harden({
@@ -245,7 +330,7 @@ export function makeBundleStore(db, ensureTxn, noteExport = () => {}) {
// Assert that the bundle contents match the ID and hash
// eslint-disable-next-line @jessie.js/no-nested-await
await checkBundle(bundle, computeSha512, bundleID);
- addBundle(bundleID, bundle);
+ populateBundle(bundleID, serializeBundle(bundleID, bundle));
} else {
Fail`unsupported BundleID ${q(bundleID)}`;
}
@@ -265,7 +350,7 @@ export function makeBundleStore(db, ensureTxn, noteExport = () => {}) {
const dump = {};
for (const row of sql.iterate()) {
const { bundleID, bundle } = row;
- dump[bundleID] = encodeBase64(bundle);
+ dump[bundleID] = encodeBase64(Buffer.from(bundle, 'utf-8'));
}
return dump;
}
@@ -282,15 +367,20 @@ export function makeBundleStore(db, ensureTxn, noteExport = () => {}) {
}
return harden({
+ importBundleRecord,
+ importBundle,
+ assertComplete,
+
addBundle,
hasBundle,
getBundle,
deleteBundle,
+
getExportRecords,
getArtifactNames,
exportBundle,
- importBundle,
getBundleIDs,
+ repairBundleRecord,
dumpBundles,
});
diff --git a/packages/swing-store/src/exporter.js b/packages/swing-store/src/exporter.js
new file mode 100644
index 00000000000..777340c4d0f
--- /dev/null
+++ b/packages/swing-store/src/exporter.js
@@ -0,0 +1,181 @@
+import sqlite3 from 'better-sqlite3';
+
+import { Fail, q } from '@agoric/assert';
+
+import { dbFileInDirectory } from './util.js';
+import { getKeyType } from './kvStore.js';
+import { makeBundleStore } from './bundleStore.js';
+import { makeSnapStore } from './snapStore.js';
+import { makeSnapStoreIO } from './snapStoreIO.js';
+import { makeTranscriptStore } from './transcriptStore.js';
+import { assertComplete } from './assertComplete.js';
+import { validateArtifactMode } from './internal.js';
+
+/**
+ * @template T
+ * @typedef { Iterable | AsyncIterable } AnyIterable
+ */
+/**
+ * @template T
+ * @typedef { IterableIterator | AsyncIterableIterator } AnyIterableIterator
+ */
+
+/**
+ *
+ * @typedef {readonly [
+ * key: string,
+ * value?: string | null | undefined,
+ * ]} KVPair
+ *
+ * @typedef {object} SwingStoreExporter
+ *
+ * Allows export of data from a swingStore as a fixed view onto the content as
+ * of the most recent commit point at the time the exporter was created. The
+ * exporter may be used while another SwingStore instance is active for the same
+ * DB, possibly in another thread or process. It guarantees that regardless of
+ * the concurrent activity of other swingStore instances, the data representing
+ * the commit point will stay consistent and available.
+ *
+ * @property {() => AnyIterableIterator} getExportData
+ *
+ * Get a full copy of the first-stage export data (key-value pairs) from the
+ * swingStore. This represents both the contents of the KVStore (excluding host
+ * and local prefixes), as well as any data needed to validate all artifacts,
+ * both current and historical. As such it represents the root of trust for the
+ * application.
+ *
+ * Content of validation data (with supporting entries for indexing):
+ * - kv.${key} = ${value} // ordinary kvStore data entry
+ * - snapshot.${vatID}.${snapPos} = ${{ vatID, snapPos, hash }};
+ * - snapshot.${vatID}.current = `snapshot.${vatID}.${snapPos}`
+ * - transcript.${vatID}.${startPos} = ${{ vatID, startPos, endPos, hash }}
+ * - transcript.${vatID}.current = ${{ vatID, startPos, endPos, hash }}
+ *
+ * @property {() => AnyIterableIterator} getArtifactNames
+ *
+ * Get a list of name of artifacts available from the swingStore. A name
+ * returned by this method guarantees that a call to `getArtifact` on the same
+ * exporter instance will succeed. The `artifactMode` option to
+ * `makeSwingStoreExporter` controls the filtering of the artifact names
+ * yielded.
+ *
+ * Artifact names:
+ * - transcript.${vatID}.${startPos}.${endPos}
+ * - snapshot.${vatID}.${snapPos}
+ * - bundle.${bundleID}
+ *
+ * @property {(name: string) => AnyIterableIterator} getArtifact
+ *
+ * Retrieve an artifact by name as a sequence of binary chunks. May throw if
+ * the artifact is not available, which can occur if the artifact is historical
+ * and wasn't preserved.
+ *
+ * @property {() => Promise} close
+ *
+ * Dispose of all resources held by this exporter. Any further operation on this
+ * exporter or its outstanding iterators will fail.
+ */
+
+/**
+ * @typedef { object } ExportSwingStoreOptions
+ * @property { import('./internal.js').ArtifactMode } [artifactMode] What artifacts should/must the exporter provide?
+ */
+
+/**
+ * @param {string} dirPath
+ * @param { ExportSwingStoreOptions } [options]
+ * @returns {SwingStoreExporter}
+ */
+export function makeSwingStoreExporter(dirPath, options = {}) {
+ typeof dirPath === 'string' || Fail`dirPath must be a string`;
+ const { artifactMode = 'operational' } = options;
+ validateArtifactMode(artifactMode);
+
+ const filePath = dbFileInDirectory(dirPath);
+ const db = sqlite3(filePath);
+
+ // Execute the data export in a (read) transaction, to ensure that we are
+ // capturing the state of the database at a single point in time. Our close()
+ // will ROLLBACK the txn just in case some bug tried to change the DB.
+ const sqlBeginTransaction = db.prepare('BEGIN TRANSACTION');
+ sqlBeginTransaction.run();
+
+ // ensureTxn can be a dummy, we just started one
+ const ensureTxn = () => {};
+ const snapStore = makeSnapStore(db, ensureTxn, makeSnapStoreIO());
+ const bundleStore = makeBundleStore(db, ensureTxn);
+ const transcriptStore = makeTranscriptStore(db, ensureTxn, () => {});
+
+ if (artifactMode !== 'debug') {
+ // throw early if this DB will not be able to create all the desired artifacts
+ const internal = { snapStore, bundleStore, transcriptStore };
+ assertComplete(internal, artifactMode);
+ }
+
+ const sqlGetAllKVData = db.prepare(`
+ SELECT key, value
+ FROM kvStore
+ ORDER BY key
+ `);
+
+ /**
+ * @returns {AsyncIterableIterator}
+ * @yields {KVPair}
+ */
+ async function* getExportData() {
+ for (const { key, value } of sqlGetAllKVData.iterate()) {
+ if (getKeyType(key) === 'consensus') {
+ yield [`kv.${key}`, value];
+ }
+ }
+ yield* snapStore.getExportRecords(true);
+ yield* transcriptStore.getExportRecords(true);
+ yield* bundleStore.getExportRecords();
+ }
+
+ /**
+ * @returns {AsyncIterableIterator}
+ * @yields {string}
+ */
+ async function* getArtifactNames() {
+ yield* snapStore.getArtifactNames(artifactMode);
+ yield* transcriptStore.getArtifactNames(artifactMode);
+ yield* bundleStore.getArtifactNames();
+ }
+
+ /**
+ * @param {string} name
+ * @returns {AsyncIterableIterator}
+ */
+ function getArtifact(name) {
+ typeof name === 'string' || Fail`artifact name must be a string`;
+ const [type] = name.split('.', 1);
+
+ if (type === 'snapshot') {
+ return snapStore.exportSnapshot(name);
+ } else if (type === 'transcript') {
+ return transcriptStore.exportSpan(name);
+ } else if (type === 'bundle') {
+ return bundleStore.exportBundle(name);
+ } else {
+ throw Fail`invalid type in artifact name ${q(name)}`;
+ }
+ }
+
+ const sqlAbort = db.prepare('ROLLBACK');
+
+ async function close() {
+ // After all the data has been extracted, always abort the export
+ // transaction to ensure that the export was read-only (i.e., that no bugs
+ // inadvertantly modified the database).
+ sqlAbort.run();
+ db.close();
+ }
+
+ return harden({
+ getExportData,
+ getArtifactNames,
+ getArtifact,
+ close,
+ });
+}
diff --git a/packages/swing-store/src/importer.js b/packages/swing-store/src/importer.js
new file mode 100644
index 00000000000..a6cedb25e5d
--- /dev/null
+++ b/packages/swing-store/src/importer.js
@@ -0,0 +1,125 @@
+import { Fail, q } from '@agoric/assert';
+
+import { makeSwingStore } from './swingStore.js';
+import { buffer } from './util.js';
+import { validateArtifactMode } from './internal.js';
+import { assertComplete } from './assertComplete.js';
+
+/**
+ * @typedef { object } ImportSwingStoreOptions
+ * @property { import('./internal.js').ArtifactMode } [artifactMode] What artifacts should the importer use and require?
+ */
+
+/**
+ * Function used to create a new swingStore from an object implementing the
+ * exporter API. The exporter API may be provided by a swingStore instance, or
+ * implemented by a host to restore data that was previously exported.
+ *
+ * @param {import('./exporter').SwingStoreExporter} exporter
+ * @param {string | null} [dirPath]
+ * @param {ImportSwingStoreOptions} [options]
+ * @returns {Promise}
+ */
+export async function importSwingStore(exporter, dirPath = null, options = {}) {
+ if (dirPath && typeof dirPath !== 'string') {
+ Fail`dirPath must be a string`;
+ }
+ const { artifactMode = 'operational', ...makeSwingStoreOptions } = options;
+ validateArtifactMode(artifactMode);
+
+ const store = makeSwingStore(dirPath, true, makeSwingStoreOptions);
+ const { kernelStorage, internal } = store;
+
+ // For every exportData entry, we add a DB record. 'kv' entries are
+ // the "kvStore shadow table", and are not associated with any
+ // artifacts. All other entries are associated with an artifact,
+ // however the import may or may not contain that artifact (the
+ // dataset can be incomplete: either the original DB was pruned at
+ // some point, or the exporter did not choose to include
+ // everything). The DB records we add are marked as incomplete (as
+ // if they had been pruned locally), and can be populated later when
+ // the artifact is retrieved.
+
+ // While unlikely, the getExportData() protocol *is* allowed to
+ // deliver multiple values for the same key (last one wins), or use
+ // 'null' to delete a previously-defined key. So our first pass both
+ // installs the kvStore shadow records, and de-dups/deletes the
+ // metadata records into this Map.
+
+ const allMetadata = new Map();
+
+ for await (const [key, value] of exporter.getExportData()) {
+ const [tag] = key.split('.', 1);
+ if (tag === 'kv') {
+ // 'kv' keys contain individual kvStore entries
+ const subKey = key.substring(tag.length + 1);
+ if (value == null) {
+ // Note '==' rather than '===': any nullish value implies deletion
+ kernelStorage.kvStore.delete(subKey);
+ } else {
+ kernelStorage.kvStore.set(subKey, value);
+ }
+ } else if (value == null) {
+ allMetadata.delete(key);
+ } else {
+ allMetadata.set(key, value);
+ }
+ }
+
+ // Now take each metadata record and install the stub/pruned entry
+ // into the DB.
+
+ for (const [key, value] of allMetadata.entries()) {
+ const [tag] = key.split('.', 1);
+ if (tag === 'bundle') {
+ internal.bundleStore.importBundleRecord(key, value);
+ } else if (tag === 'snapshot') {
+ internal.snapStore.importSnapshotRecord(key, value);
+ } else if (tag === 'transcript') {
+ internal.transcriptStore.importTranscriptSpanRecord(key, value);
+ } else {
+ Fail`unknown export-data type ${q(tag)} on import`;
+ }
+ }
+
+ // All the metadata is now installed, and we're prepared for
+ // artifacts. We walk `getArtifactNames()` and offer each one to the
+ // submodule, which may ignore it according to `artifactMode`, but
+ // otherwise validates and accepts it. This is an initial import, so
+ // we don't need to check if we already have the data, but the
+ // submodule function is free to do such checks.
+
+ for await (const name of exporter.getArtifactNames()) {
+ const makeChunkIterator = () => exporter.getArtifact(name);
+ const dataProvider = async () => buffer(makeChunkIterator());
+ const [tag] = name.split('.', 1);
+ // TODO: pass the same args to all artifact importers, and let
+ // stores register their functions by
+ // 'type'. https://github.com/Agoric/agoric-sdk/pull/8075#discussion_r1285265453
+ if (tag === 'bundle') {
+ await internal.bundleStore.importBundle(name, dataProvider);
+ } else if (tag === 'snapshot') {
+ await internal.snapStore.populateSnapshot(name, makeChunkIterator, {
+ artifactMode,
+ });
+ } else if (tag === 'transcript') {
+ await internal.transcriptStore.populateTranscriptSpan(
+ name,
+ makeChunkIterator,
+ { artifactMode },
+ );
+ } else {
+ Fail`unknown artifact type ${q(tag)} on import`;
+ }
+ }
+
+ // We've installed all the artifacts that we could, now do a
+ // completeness check. Enforce at least 'operational' completeness,
+ // even if the given mode was 'debug'.
+
+ const checkMode = artifactMode === 'debug' ? 'operational' : artifactMode;
+ assertComplete(internal, checkMode);
+
+ await exporter.close();
+ return store;
+}
diff --git a/packages/swing-store/src/index.js b/packages/swing-store/src/index.js
new file mode 100644
index 00000000000..f2144bc43ee
--- /dev/null
+++ b/packages/swing-store/src/index.js
@@ -0,0 +1,11 @@
+export { initSwingStore, openSwingStore, isSwingStore } from './swingStore.js';
+export { makeSwingStoreExporter } from './exporter.js';
+export { importSwingStore } from './importer.js';
+
+// temporary, for the benefit of SwingSet/misc-tools/replay-transcript.js
+export { makeSnapStore } from './snapStore.js';
+// and less temporary, for SwingSet/test/vat-warehouse/test-reload-snapshot.js
+export { makeSnapStoreIO } from './snapStoreIO.js';
+
+// eslint-disable-next-line import/export
+export * from './types.js';
diff --git a/packages/swing-store/src/internal.js b/packages/swing-store/src/internal.js
new file mode 100644
index 00000000000..18ece829261
--- /dev/null
+++ b/packages/swing-store/src/internal.js
@@ -0,0 +1,22 @@
+import { Fail, q } from '@agoric/assert';
+
+/**
+ * @typedef { import('./snapStore').SnapStoreInternal } SnapStoreInternal
+ * @typedef { import('./transcriptStore').TranscriptStoreInternal } TranscriptStoreInternal
+ * @typedef { import('./bundleStore').BundleStoreInternal } BundleStoreInternal
+ *
+ * @typedef {{
+ * transcriptStore: TranscriptStoreInternal,
+ * snapStore: SnapStoreInternal,
+ * bundleStore: BundleStoreInternal,
+ * }} SwingStoreInternal
+ *
+ * @typedef {'operational' | 'replay' | 'archival' | 'debug'} ArtifactMode
+ */
+
+export const artifactModes = ['operational', 'replay', 'archival', 'debug'];
+export function validateArtifactMode(artifactMode) {
+ if (!artifactModes.includes(artifactMode)) {
+ Fail`invalid artifactMode ${q(artifactMode)}`;
+ }
+}
diff --git a/packages/swing-store/src/kvStore.js b/packages/swing-store/src/kvStore.js
new file mode 100644
index 00000000000..bf3e80e740c
--- /dev/null
+++ b/packages/swing-store/src/kvStore.js
@@ -0,0 +1,172 @@
+// @ts-check
+import { Fail } from '@agoric/assert';
+
+/**
+ * @typedef {{
+ * has: (key: string) => boolean,
+ * get: (key: string) => string | undefined,
+ * getNextKey: (previousKey: string) => string | undefined,
+ * set: (key: string, value: string, bypassHash?: boolean ) => void,
+ * delete: (key: string) => void,
+ * }} KVStore
+ */
+
+/**
+ * @param {string} key
+ */
+export function getKeyType(key) {
+ if (key.startsWith('local.')) {
+ return 'local';
+ } else if (key.startsWith('host.')) {
+ return 'host';
+ }
+ return 'consensus';
+}
+
+/**
+ * @param {object} db The SQLite database connection.
+ * @param {() => void} ensureTxn Called before mutating methods to establish a DB transaction
+ * @param {(...args: string[]) => void} trace Called after sets/gets to record a debug log
+ * @returns { KVStore }
+ */
+
+export function makeKVStore(db, ensureTxn, trace) {
+ db.exec(`
+ CREATE TABLE IF NOT EXISTS kvStore (
+ key TEXT,
+ value TEXT,
+ PRIMARY KEY (key)
+ )
+ `);
+
+ const sqlKVGet = db.prepare(`
+ SELECT value
+ FROM kvStore
+ WHERE key = ?
+ `);
+ sqlKVGet.pluck(true);
+
+ /**
+ * Obtain the value stored for a given key.
+ *
+ * @param {string} key The key whose value is sought.
+ *
+ * @returns {string | undefined} the (string) value for the given key, or
+ * undefined if there is no such value.
+ *
+ * @throws if key is not a string.
+ */
+ function get(key) {
+ typeof key === 'string' || Fail`key must be a string`;
+ return sqlKVGet.get(key);
+ }
+
+ const sqlKVGetNextKey = db.prepare(`
+ SELECT key
+ FROM kvStore
+ WHERE key > ?
+ LIMIT 1
+ `);
+ sqlKVGetNextKey.pluck(true);
+
+ /**
+ * getNextKey enables callers to iterate over all keys within a
+ * given range. To build an iterator of all keys from start
+ * (inclusive) to end (exclusive), do:
+ *
+ * function* iterate(start, end) {
+ * if (kvStore.has(start)) {
+ * yield start;
+ * }
+ * let prev = start;
+ * while (true) {
+ * let next = kvStore.getNextKey(prev);
+ * if (!next || next >= end) {
+ * break;
+ * }
+ * yield next;
+ * prev = next;
+ * }
+ * }
+ *
+ * @param {string} previousKey The key returned will always be later than this one.
+ *
+ * @returns {string | undefined} a key string, or undefined if we reach the end of the store
+ *
+ * @throws if previousKey is not a string
+ */
+
+ function getNextKey(previousKey) {
+ typeof previousKey === 'string' || Fail`previousKey must be a string`;
+ return sqlKVGetNextKey.get(previousKey);
+ }
+
+ /**
+ * Test if the state contains a value for a given key.
+ *
+ * @param {string} key The key that is of interest.
+ *
+ * @returns {boolean} true if a value is stored for the key, false if not.
+ *
+ * @throws if key is not a string.
+ */
+ function has(key) {
+ typeof key === 'string' || Fail`key must be a string`;
+ return get(key) !== undefined;
+ }
+
+ const sqlKVSet = db.prepare(`
+ INSERT INTO kvStore (key, value)
+ VALUES (?, ?)
+ ON CONFLICT DO UPDATE SET value = excluded.value
+ `);
+
+ /**
+ * Store a value for a given key. The value will replace any prior value if
+ * there was one.
+ *
+ * @param {string} key The key whose value is being set.
+ * @param {string} value The value to set the key to.
+ *
+ * @throws if either parameter is not a string.
+ */
+ function set(key, value) {
+ typeof key === 'string' || Fail`key must be a string`;
+ typeof value === 'string' || Fail`value must be a string`;
+ // synchronous read after write within a transaction is safe
+ // The transaction's overall success will be awaited during commit
+ ensureTxn();
+ sqlKVSet.run(key, value);
+ trace('set', key, value);
+ }
+
+ const sqlKVDel = db.prepare(`
+ DELETE FROM kvStore
+ WHERE key = ?
+ `);
+
+ /**
+ * Remove any stored value for a given key. It is permissible for there to
+ * be no existing stored value for the key.
+ *
+ * @param {string} key The key whose value is to be deleted
+ *
+ * @throws if key is not a string.
+ */
+ function del(key) {
+ typeof key === 'string' || Fail`key must be a string`;
+ ensureTxn();
+ sqlKVDel.run(key);
+ trace('del', key);
+ }
+
+ const kvStore = {
+ has,
+ get,
+ getNextKey,
+ set,
+ delete: del,
+ };
+
+ return kvStore;
+}
diff --git a/packages/swing-store/src/repairMetadata.js b/packages/swing-store/src/repairMetadata.js
new file mode 100644
index 00000000000..8ebc53fc4d7
--- /dev/null
+++ b/packages/swing-store/src/repairMetadata.js
@@ -0,0 +1,67 @@
+import { Fail, q } from '@agoric/assert';
+import { assertComplete } from './assertComplete.js';
+
+/**
+ * Given a pre-existing swingstore and a SwingStoreExporter, read in
+ * all the metadata from the exporter and use it to regenerate any
+ * missing metadata records. This can be used to fix the damage caused
+ * by #8025.
+ *
+ * The repair method will call `exporter.getExportData` and examine
+ * all entries to do one of three things:
+ *
+ * 1: kvStore records are ignored (they are not metadata)
+ * 2: bundle/snapshot/transcript records whose keys already exist will
+ * be compared against the existing data, and an error thrown if
+ * they do not match
+ * 3: new snapshot/transcript records will be silently added to
+ * the swingstore (new bundle records are an error, since we do not
+ * tolerate pruned bundles)
+ *
+ * It will not call `exporter.getArtifactNames` or `getArtifacts`.
+ *
+ * At the end of the process, the DB will contain pending changes in
+ * an open transaction. The caller is responsible for calling
+ * `hostStorage.commit()` when they are ready.
+ *
+ * @param {import('./internal.js').SwingStoreInternal} internal
+ * @param {import('./exporter').SwingStoreExporter} exporter
+ * @returns {Promise}
+ */
+export async function doRepairMetadata(internal, exporter) {
+ // first we strip kvStore entries and deduplicate the rest
+
+ const allMetadata = new Map();
+
+ for await (const [key, value] of exporter.getExportData()) {
+ const [tag] = key.split('.', 1);
+ if (tag === 'kv') {
+ continue;
+ } else if (value == null) {
+ allMetadata.delete(key);
+ } else {
+ allMetadata.set(key, value);
+ }
+ }
+
+ // then process the metadata records
+
+ for (const [key, value] of allMetadata.entries()) {
+ const [tag] = key.split('.', 1);
+ if (tag === 'bundle') {
+ internal.bundleStore.repairBundleRecord(key, value);
+ } else if (tag === 'snapshot') {
+ internal.snapStore.repairSnapshotRecord(key, value);
+ } else if (tag === 'transcript') {
+ internal.transcriptStore.repairTranscriptSpanRecord(key, value);
+ } else {
+ Fail`unknown export-data type in key ${q(key)} on repairMetadata`;
+ }
+ }
+
+ // and do a completeness check
+ /** @type { import('./internal.js').ArtifactMode } */
+ const artifactMode = 'operational';
+ assertComplete(internal, artifactMode);
+ await exporter.close();
+}
diff --git a/packages/swing-store/src/snapStore.js b/packages/swing-store/src/snapStore.js
index b3862fa0784..a201d3a5405 100644
--- a/packages/swing-store/src/snapStore.js
+++ b/packages/swing-store/src/snapStore.js
@@ -25,7 +25,13 @@ import { buffer } from './util.js';
*/
/**
- * @typedef { import('./swingStore').SwingStoreExporter } SwingStoreExporter
+ * @template T
+ * @typedef { import('./exporter').AnyIterableIterator } AnyIterableIterator
+ */
+
+/**
+ * @typedef { import('./exporter').SwingStoreExporter } SwingStoreExporter
+ * @typedef { import('./internal.js').ArtifactMode } ArtifactMode
*
* @typedef {{
* loadSnapshot: (vatID: string) => AsyncIterableIterator,
@@ -37,10 +43,13 @@ import { buffer } from './util.js';
* }} SnapStore
*
* @typedef {{
- * exportSnapshot: (name: string, includeHistorical: boolean) => AsyncIterableIterator,
- * importSnapshot: (artifactName: string, exporter: SwingStoreExporter, artifactMetadata: Map) => void,
+ * exportSnapshot: (name: string) => AsyncIterableIterator,
* getExportRecords: (includeHistorical: boolean) => IterableIterator,
- * getArtifactNames: (includeHistorical: boolean) => AsyncIterableIterator,
+ * getArtifactNames: (artifactMode: ArtifactMode) => AsyncIterableIterator,
+ * importSnapshotRecord: (key: string, value: string) => void,
+ * populateSnapshot: (name: string, makeChunkIterator: () => AnyIterableIterator, options: { artifactMode: ArtifactMode }) => Promise,
+ * assertComplete: (checkMode: Omit) => void,
+ * repairSnapshotRecord: (key: string, value: string) => void,
* }} SnapStoreInternal
*
* @typedef {{
@@ -81,11 +90,26 @@ export function makeSnapStore(
compressedSize INTEGER,
compressedSnapshot BLOB,
PRIMARY KEY (vatID, snapPos),
- UNIQUE (vatID, inUse),
- CHECK(compressedSnapshot is not null or inUse is null)
+ UNIQUE (vatID, inUse)
)
`);
+ // NOTE: there are two versions of this schema. The original, which
+ // we'll call "version 1A", has a:
+ // CHECK(compressedSnapshot is not null or inUse is null)
+ // in the table. Version 1B is missing that constraint. Any DB
+ // created by the original code will use 1A. Any DB created by the
+ // new version will use 1B. The import process needs to temporarily
+ // violate that check, but any DB created by `importSwingStore` is
+ // (by definition) new, so it will use 1B, which doesn't enforce the
+ // check. We expect to implement schema migration
+ // (https://github.com/Agoric/agoric-sdk/issues/8089) soon, which
+ // will upgrade both 1A and 1B to "version 2", which will omit the
+ // check (in addition to any other changes we need at that point)
+
+ // pruned snapshots will have compressedSnapshot of NULL, and might
+ // also have NULL for uncompressedSize and compressedSize
+
const sqlDeleteAllUnusedSnapshots = db.prepare(`
DELETE FROM snapshots
WHERE inUse is null
@@ -98,6 +122,12 @@ export function makeSnapStore(
function deleteAllUnusedSnapshots() {
ensureTxn();
sqlDeleteAllUnusedSnapshots.run();
+
+ // NOTE: this is more than pruning the snapshot data, it deletes
+ // the metadata/hash as well, making it impossible to safely
+ // repopulate the snapshot data from an untrusted source. We need
+ // to replace this with a method that merely nulls out the
+ // 'compressedSnapshot' field.
}
function snapshotArtifactName(rec) {
@@ -255,10 +285,9 @@ export function makeSnapStore(
* `snapshot.${vatID}.${startPos}`
*
* @param {string} name
- * @param {boolean} includeHistorical
* @returns {AsyncIterableIterator}
*/
- function exportSnapshot(name, includeHistorical) {
+ function exportSnapshot(name) {
typeof name === 'string' || Fail`artifact name must be a string`;
const parts = name.split('.');
const [type, vatID, pos] = parts;
@@ -268,9 +297,8 @@ export function makeSnapStore(
const snapPos = Number(pos);
const snapshotInfo = sqlGetSnapshot.get(vatID, snapPos);
snapshotInfo || Fail`snapshot ${q(name)} not available`;
- const { inUse, compressedSnapshot } = snapshotInfo;
+ const { compressedSnapshot } = snapshotInfo;
compressedSnapshot || Fail`artifact ${q(name)} is not available`;
- inUse || includeHistorical || Fail`artifact ${q(name)} is not available`;
// weird construct here is because we need to be able to throw before the generator starts
async function* exporter() {
const gzReader = Readable.from(compressedSnapshot);
@@ -412,6 +440,13 @@ export function makeSnapStore(
ORDER BY vatID, snapPos
`);
+ const sqlGetAvailableSnapshots = db.prepare(`
+ SELECT vatID, snapPos, hash, uncompressedSize, compressedSize, inUse
+ FROM snapshots
+ WHERE inUse IS ? AND compressedSnapshot is not NULL
+ ORDER BY vatID, snapPos
+ `);
+
/**
* Obtain artifact metadata records for spanshots contained in this store.
*
@@ -447,41 +482,113 @@ export function makeSnapStore(
}
}
- async function* getArtifactNames(includeHistorical) {
- for (const rec of sqlGetSnapshotMetadata.iterate(1)) {
+ async function* getArtifactNames(artifactMode) {
+ for (const rec of sqlGetAvailableSnapshots.iterate(1)) {
yield snapshotArtifactName(rec);
}
- if (includeHistorical) {
- for (const rec of sqlGetSnapshotMetadata.iterate(null)) {
+ if (artifactMode === 'debug') {
+ for (const rec of sqlGetAvailableSnapshots.iterate(null)) {
yield snapshotArtifactName(rec);
}
}
}
+ const sqlAddSnapshotRecord = db.prepare(`
+ INSERT INTO snapshots (vatID, snapPos, hash, inUse)
+ VALUES (?, ?, ?, ?)
+ `);
+
+ function importSnapshotRecord(key, value) {
+ ensureTxn();
+ const [tag, ...pieces] = key.split('.');
+ assert.equal(tag, 'snapshot');
+ const [_vatID, endPos] = pieces;
+ if (endPos === 'current') {
+ // metadata['snapshot.v1.current'] = 'snapshot.v1.5' , i.e. it
+ // points to the name of the current artifact. We could
+ // conceivably remember this and compare it against the .inUse
+ // property of that record, but it's not worth the effort (we
+ // might encounter the records in either order).
+ return;
+ }
+ const metadata = JSON.parse(value);
+ const { vatID, snapPos, hash, inUse } = metadata;
+ vatID || Fail`snapshot metadata missing vatID: ${metadata}`;
+ snapPos !== undefined ||
+ Fail`snapshot metadata missing snapPos: ${metadata}`;
+ hash || Fail`snapshot metadata missing hash: ${metadata}`;
+ inUse !== undefined || Fail`snapshot metadata missing inUse: ${metadata}`;
+
+ sqlAddSnapshotRecord.run(vatID, snapPos, hash, inUse ? 1 : null);
+ }
+
+ const sqlGetSnapshotHashFor = db.prepare(`
+ SELECT hash, inUse
+ FROM snapshots
+ WHERE vatID = ? AND snapPos = ?
+ `);
+
+ function repairSnapshotRecord(key, value) {
+ ensureTxn();
+ const [tag, keyVatID, keySnapPos] = key.split('.');
+ assert.equal(tag, 'snapshot');
+ if (keySnapPos === 'current') {
+ // "snapshot.${vatID}.current" entries are meta-metadata: they
+ // point to the metadata key of the current snapshot, to avoid
+ // the need for an expensive search
+ return;
+ }
+ const metadata = JSON.parse(value);
+ const { vatID, snapPos, hash, inUse } = metadata;
+ assert.equal(keyVatID, vatID);
+ assert.equal(Number(keySnapPos), snapPos);
+ const existing = sqlGetSnapshotHashFor.get(vatID, snapPos);
+ if (existing) {
+ if (
+ Boolean(existing.inUse) !== Boolean(inUse) ||
+ existing.hash !== hash
+ ) {
+ throw Fail`repairSnapshotRecord metadata mismatch: ${existing} vs ${metadata}`;
+ }
+ } else {
+ sqlAddSnapshotRecord.run(vatID, snapPos, hash, inUse ? 1 : null);
+ }
+ }
+
+ const sqlPopulateSnapshot = db.prepare(`
+ UPDATE snapshots SET
+ uncompressedSize = ?, compressedSize = ?, compressedSnapshot = ?
+ WHERE vatID = ? AND snapPos = ?
+ `);
+
/**
* @param {string} name Artifact name of the snapshot
- * @param {SwingStoreExporter} exporter Whence to get the bits
- * @param {object} info Metadata describing the artifact
+ * @param {() => AnyIterableIterator} makeChunkIterator get an iterator of snapshot byte chunks
+ * @param {object} options
+ * @param {ArtifactMode} options.artifactMode
* @returns {Promise}
*/
- async function importSnapshot(name, exporter, info) {
+ async function populateSnapshot(name, makeChunkIterator, options) {
+ ensureTxn();
+ const { artifactMode } = options;
const parts = name.split('.');
const [type, vatID, rawEndPos] = parts;
// prettier-ignore
parts.length === 3 && type === 'snapshot' ||
Fail`expected snapshot name of the form 'snapshot.{vatID}.{snapPos}', saw '${q(name)}'`;
- // prettier-ignore
- info.vatID === vatID ||
- Fail`snapshot name says vatID ${q(vatID)}, metadata says ${q(info.vatID)}`;
const snapPos = Number(rawEndPos);
- // prettier-ignore
- info.snapPos === snapPos ||
- Fail`snapshot name says snapPos ${q(snapPos)}, metadata says ${q(info.snapPos)}`;
+ const metadata =
+ sqlGetSnapshotHashFor.get(vatID, snapPos) ||
+ Fail`no metadata for snapshot ${name}`;
- const artifactChunks = exporter.getArtifact(name);
+ if (!metadata.inUse && artifactMode !== 'debug') {
+ return; // ignore old snapshots
+ }
+
+ const artifactChunks = makeChunkIterator();
const inStream = Readable.from(artifactChunks);
- let size = 0;
- inStream.on('data', chunk => (size += chunk.length));
+ let uncompressedSize = 0;
+ inStream.on('data', chunk => (uncompressedSize += chunk.length));
const hashStream = createHash('sha256');
const gzip = createGzip();
inStream.pipe(hashStream);
@@ -489,21 +596,37 @@ export function makeSnapStore(
const compressedArtifact = await buffer(gzip);
await finished(inStream);
const hash = hashStream.digest('hex');
+
+ // validate against the previously-established metadata
// prettier-ignore
- info.hash === hash ||
- Fail`snapshot ${q(name)} hash is ${q(hash)}, metadata says ${q(info.hash)}`;
- ensureTxn();
- sqlSaveSnapshot.run(
- vatID,
- snapPos,
- info.inUse ? 1 : null,
- info.hash,
- size,
+ metadata.hash === hash ||
+ Fail`snapshot ${q(name)} hash is ${q(hash)}, metadata says ${q(metadata.hash)}`;
+
+ sqlPopulateSnapshot.run(
+ uncompressedSize,
compressedArtifact.length,
compressedArtifact,
+ vatID,
+ snapPos,
);
}
+ const sqlListPrunedCurrentSnapshots = db.prepare(`
+ SELECT vatID FROM snapshots
+ WHERE inUse = 1 AND compressedSnapshot IS NULL
+ ORDER BY vatID
+ `);
+ sqlListPrunedCurrentSnapshots.pluck();
+
+ function assertComplete(checkMode) {
+ assert(checkMode !== 'debug', checkMode);
+ // every 'inUse' snapshot must be populated
+ const vatIDs = sqlListPrunedCurrentSnapshots.all();
+ if (vatIDs.length) {
+ throw Fail`current snapshots are pruned for vats ${vatIDs.join(',')}`;
+ }
+ }
+
const sqlListAllSnapshots = db.prepare(`
SELECT vatID, snapPos, inUse, hash, uncompressedSize, compressedSize
FROM snapshots
@@ -563,10 +686,15 @@ export function makeSnapStore(
deleteVatSnapshots,
stopUsingLastSnapshot,
getSnapshotInfo,
+
getExportRecords,
getArtifactNames,
exportSnapshot,
- importSnapshot,
+
+ importSnapshotRecord,
+ populateSnapshot,
+ assertComplete,
+ repairSnapshotRecord,
hasHash,
listAllSnapshots,
diff --git a/packages/swing-store/src/snapStoreIO.js b/packages/swing-store/src/snapStoreIO.js
new file mode 100644
index 00000000000..ddbeb28c64c
--- /dev/null
+++ b/packages/swing-store/src/snapStoreIO.js
@@ -0,0 +1,8 @@
+import { performance } from 'perf_hooks';
+import { makeMeasureSeconds } from '@agoric/internal';
+
+export function makeSnapStoreIO() {
+ return {
+ measureSeconds: makeMeasureSeconds(performance.now),
+ };
+}
diff --git a/packages/swing-store/src/swingStore.js b/packages/swing-store/src/swingStore.js
index 4cf7027d211..368fb1c5a54 100644
--- a/packages/swing-store/src/swingStore.js
+++ b/packages/swing-store/src/swingStore.js
@@ -2,74 +2,34 @@
/* global Buffer */
import fs from 'fs';
import path from 'path';
-import { performance } from 'perf_hooks';
import sqlite3 from 'better-sqlite3';
-import { assert, Fail, q } from '@agoric/assert';
-import { makeMeasureSeconds } from '@agoric/internal';
+import { Fail, q } from '@agoric/assert';
+import { dbFileInDirectory } from './util.js';
+import { makeKVStore, getKeyType } from './kvStore.js';
import { makeTranscriptStore } from './transcriptStore.js';
import { makeSnapStore } from './snapStore.js';
import { makeBundleStore } from './bundleStore.js';
import { createSHA256 } from './hasher.js';
-
-export { makeSnapStore, makeBundleStore };
-
-/**
- * This is a polyfill for the `buffer` function from Node's
- * 'stream/consumers' package, which unfortunately only exists in newer versions
- * of Node.
- *
- * @param {AsyncIterable} inStream
- */
-export const buffer = async inStream => {
- const chunks = [];
- for await (const chunk of inStream) {
- chunks.push(chunk);
- }
- return Buffer.concat(chunks);
-};
-
-export function makeSnapStoreIO() {
- return {
- measureSeconds: makeMeasureSeconds(performance.now),
- };
-}
-
-/**
- * @param {string} key
- */
-function getKeyType(key) {
- if (key.startsWith('local.')) {
- return 'local';
- } else if (key.startsWith('host.')) {
- return 'host';
- }
- return 'consensus';
-}
+import { makeSnapStoreIO } from './snapStoreIO.js';
+import { doRepairMetadata } from './repairMetadata.js';
/**
- * @typedef {{
- * has: (key: string) => boolean,
- * get: (key: string) => string | undefined,
- * getNextKey: (previousKey: string) => string | undefined,
- * set: (key: string, value: string, bypassHash?: boolean ) => void,
- * delete: (key: string) => void,
- * }} KVStore
+ * @typedef { import('./kvStore').KVStore } KVStore
*
* @typedef { import('./snapStore').SnapStore } SnapStore
- * @typedef { import('./snapStore').SnapStoreInternal } SnapStoreInternal
* @typedef { import('./snapStore').SnapshotResult } SnapshotResult
*
* @typedef { import('./transcriptStore').TranscriptStore } TranscriptStore
- * @typedef { import('./transcriptStore').TranscriptStoreInternal } TranscriptStoreInternal
* @typedef { import('./transcriptStore').TranscriptStoreDebug } TranscriptStoreDebug
*
* @typedef { import('./bundleStore').BundleStore } BundleStore
- * @typedef { import('./bundleStore').BundleStoreInternal } BundleStoreInternal
* @typedef { import('./bundleStore').BundleStoreDebug } BundleStoreDebug
*
+ * @typedef { import('./exporter').KVPair } KVPair
+ *
* @typedef {{
* kvStore: KVStore, // a key-value API object to load and store data on behalf of the kernel
* transcriptStore: TranscriptStore, // a stream-oriented API object to append and read transcript entries
@@ -89,6 +49,7 @@ function getKeyType(key) {
* close: () => Promise, // shutdown the store, abandoning any uncommitted changes
* diskUsage?: () => number, // optional stats method
* setExportCallback: (cb: (updates: KVPair[]) => void) => void, // Set a callback invoked by swingStore when new serializable data is available for export
+ * repairMetadata: (exporter: import('./exporter').SwingStoreExporter) => Promise,
* }} SwingStoreHostStorage
*/
@@ -105,184 +66,13 @@ function getKeyType(key) {
* }} SwingStoreDebugTools
*
* @typedef {{
- * transcriptStore: TranscriptStoreInternal,
- * snapStore: SnapStoreInternal,
- * bundleStore: BundleStoreInternal,
- * }} SwingStoreInternal
- *
- * @typedef {{
* kernelStorage: SwingStoreKernelStorage,
* hostStorage: SwingStoreHostStorage,
* debug: SwingStoreDebugTools,
- * internal: SwingStoreInternal,
+ * internal: import('./internal.js').SwingStoreInternal,
* }} SwingStore
*/
-/**
- * @template T
- * @typedef { Iterable | AsyncIterable } AnyIterable
- */
-/**
- * @template T
- * @typedef { IterableIterator | AsyncIterableIterator } AnyIterableIterator
- */
-
-/**
- * @typedef {readonly [
- * key: string,
- * value?: string | null | undefined,
- * ]} KVPair
- *
- * @typedef {object} SwingStoreExporter
- *
- * Allows export of data from a swingStore as a fixed view onto the content as
- * of the most recent commit point at the time the exporter was created. The
- * exporter may be used while another SwingStore instance is active for the same
- * DB, possibly in another thread or process. It guarantees that regardless of
- * the concurrent activity of other swingStore instances, the data representing
- * the commit point will stay consistent and available.
- *
- * @property {() => AnyIterableIterator} getExportData
- *
- * Get a full copy of the first-stage export data (key-value pairs) from the
- * swingStore. This represents both the contents of the KVStore (excluding host
- * and local prefixes), as well as any data needed to validate all artifacts,
- * both current and historical. As such it represents the root of trust for the
- * application.
- *
- * Content of validation data (with supporting entries for indexing):
- * - kv.${key} = ${value} // ordinary kvStore data entry
- * - snapshot.${vatID}.${snapPos} = ${{ vatID, snapPos, hash });
- * - snapshot.${vatID}.current = `snapshot.${vatID}.${snapPos}`
- * - transcript.${vatID}.${startPos} = ${{ vatID, startPos, endPos, hash }}
- * - transcript.${vatID}.current = ${{ vatID, startPos, endPos, hash }}
- *
- * @property {() => AnyIterableIterator} getArtifactNames
- *
- * Get a list of name of artifacts available from the swingStore. A name returned
- * by this method guarantees that a call to `getArtifact` on the same exporter
- * instance will succeed. Options control the filtering of the artifact names
- * yielded.
- *
- * Artifact names:
- * - transcript.${vatID}.${startPos}.${endPos}
- * - snapshot.${vatID}.${snapPos}
- *
- * @property {(name: string) => AnyIterableIterator} getArtifact
- *
- * Retrieve an artifact by name. May throw if the artifact is not available,
- * which can occur if the artifact is historical and wasn't been preserved.
- *
- * @property {() => Promise} close
- *
- * Dispose of all resources held by this exporter. Any further operation on this
- * exporter or its outstanding iterators will fail.
- */
-
-/**
- * @param {string} dirPath
- * @param {string} exportMode
- * @returns {SwingStoreExporter}
- */
-export function makeSwingStoreExporter(dirPath, exportMode = 'current') {
- typeof dirPath === 'string' || Fail`dirPath must be a string`;
- exportMode === 'current' ||
- exportMode === 'archival' ||
- exportMode === 'debug' ||
- Fail`invalid exportMode ${q(exportMode)}`;
- const exportHistoricalSnapshots = exportMode === 'debug';
- const exportHistoricalTranscripts = exportMode !== 'current';
- const filePath = path.join(dirPath, 'swingstore.sqlite');
- const db = sqlite3(filePath);
-
- // Execute the data export in a (read) transaction, to ensure that we are
- // capturing the state of the database at a single point in time.
- const sqlBeginTransaction = db.prepare('BEGIN TRANSACTION');
- sqlBeginTransaction.run();
-
- // ensureTxn can be a dummy, we just started one
- const ensureTxn = () => {};
- const snapStore = makeSnapStore(db, ensureTxn, makeSnapStoreIO());
- const bundleStore = makeBundleStore(db, ensureTxn);
- const transcriptStore = makeTranscriptStore(db, ensureTxn, () => {});
-
- const sqlGetAllKVData = db.prepare(`
- SELECT key, value
- FROM kvStore
- ORDER BY key
- `);
-
- /**
- * @returns {AsyncIterableIterator}
- * @yields {KVPair}
- */
- async function* getExportData() {
- const kvPairs = sqlGetAllKVData.iterate();
- for (const kv of kvPairs) {
- if (getKeyType(kv.key) === 'consensus') {
- yield [`kv.${kv.key}`, kv.value];
- }
- }
- yield* snapStore.getExportRecords(true);
- yield* transcriptStore.getExportRecords(true);
- yield* bundleStore.getExportRecords();
- }
-
- /**
- * @returns {AsyncIterableIterator}
- * @yields {string}
- */
- async function* getArtifactNames() {
- yield* snapStore.getArtifactNames(exportHistoricalSnapshots);
- yield* transcriptStore.getArtifactNames(exportHistoricalTranscripts);
- yield* bundleStore.getArtifactNames();
- }
-
- /**
- * @param {string} name
- * @returns {AsyncIterableIterator}
- */
- function getArtifact(name) {
- typeof name === 'string' || Fail`artifact name must be a string`;
- const [type] = name.split('.', 1);
-
- if (type === 'snapshot') {
- return snapStore.exportSnapshot(name, exportHistoricalSnapshots);
- } else if (type === 'transcript') {
- return transcriptStore.exportSpan(name, exportHistoricalTranscripts);
- } else if (type === 'bundle') {
- return bundleStore.exportBundle(name);
- } else {
- throw Fail`invalid artifact type ${q(type)}`;
- }
- }
-
- const sqlAbort = db.prepare('ROLLBACK');
-
- async function close() {
- // After all the data has been extracted, always abort the export
- // transaction to ensure that the export was read-only (i.e., that no bugs
- // inadvertantly modified the database).
- sqlAbort.run();
- db.close();
- }
-
- return harden({
- getExportData,
- getArtifactNames,
- getArtifact,
- close,
- });
-}
-
-/**
- * Function used to create a new swingStore from an object implementing the
- * exporter API. The exporter API may be provided by a swingStore instance, or
- * implemented by a host to restore data that was previously exported.
- *
- * @typedef {(exporter: SwingStoreExporter) => Promise} ImportSwingStore
- */
-
/**
* A swing store holds the state of a swingset instance. This "store" is
* actually several different stores of different types that travel as a flock
@@ -342,7 +132,7 @@ export function makeSwingStoreExporter(dirPath, exportMode = 'current') {
*
* @returns {SwingStore}
*/
-function makeSwingStore(dirPath, forceReset, options = {}) {
+export function makeSwingStore(dirPath, forceReset, options = {}) {
const { serialized } = options;
if (serialized) {
Buffer.isBuffer(serialized) || Fail`options.serialized must be Buffer`;
@@ -374,7 +164,7 @@ function makeSwingStore(dirPath, forceReset, options = {}) {
}
}
fs.mkdirSync(dirPath, { recursive: true });
- filePath = path.join(dirPath, 'swingstore.sqlite');
+ filePath = dbFileInDirectory(dirPath);
} else {
filePath = ':memory:';
}
@@ -449,13 +239,6 @@ function makeSwingStore(dirPath, forceReset, options = {}) {
// Perform all database initialization in a single transaction
sqlBeginTransaction.run();
- db.exec(`
- CREATE TABLE IF NOT EXISTS kvStore (
- key TEXT,
- value TEXT,
- PRIMARY KEY (key)
- )
- `);
db.exec(`
CREATE TABLE IF NOT EXISTS pendingExports (
@@ -465,10 +248,32 @@ function makeSwingStore(dirPath, forceReset, options = {}) {
)
`);
+ let exportCallback;
+ function setExportCallback(cb) {
+ typeof cb === 'function' || Fail`callback must be a function`;
+ exportCallback = cb;
+ }
+ if (options.exportCallback) {
+ setExportCallback(options.exportCallback);
+ }
+
+ const sqlAddPendingExport = db.prepare(`
+ INSERT INTO pendingExports (key, value)
+ VALUES (?, ?)
+ ON CONFLICT DO UPDATE SET value = excluded.value
+ `);
+
+ function noteExport(key, value) {
+ if (exportCallback) {
+ sqlAddPendingExport.run(key, value);
+ }
+ }
+
+ const kvStore = makeKVStore(db, ensureTxn, trace);
+
const { dumpTranscripts, ...transcriptStore } = makeTranscriptStore(
db,
ensureTxn,
- // eslint-disable-next-line no-use-before-define
noteExport,
{
keepTranscripts,
@@ -478,7 +283,6 @@ function makeSwingStore(dirPath, forceReset, options = {}) {
db,
ensureTxn,
makeSnapStoreIO(),
- // eslint-disable-next-line no-use-before-define
noteExport,
{
keepSnapshots,
@@ -487,7 +291,6 @@ function makeSwingStore(dirPath, forceReset, options = {}) {
const { dumpBundles, ...bundleStore } = makeBundleStore(
db,
ensureTxn,
- // eslint-disable-next-line no-use-before-define
noteExport,
);
@@ -496,20 +299,11 @@ function makeSwingStore(dirPath, forceReset, options = {}) {
// At this point, all database initialization should be complete, so commit now.
sqlCommit.run();
- let exportCallback;
- function setExportCallback(cb) {
- typeof cb === 'function' || Fail`callback must be a function`;
- exportCallback = cb;
- }
- if (options.exportCallback) {
- setExportCallback(options.exportCallback);
- }
-
let inCrank = false;
function diskUsage() {
if (dirPath) {
- const dataFilePath = `${dirPath}/swingstore.sqlite`;
+ const dataFilePath = dbFileInDirectory(dirPath);
const stat = fs.statSync(dataFilePath);
return stat.size;
} else {
@@ -517,154 +311,13 @@ function makeSwingStore(dirPath, forceReset, options = {}) {
}
}
- const sqlKVGet = db.prepare(`
- SELECT value
- FROM kvStore
- WHERE key = ?
- `);
- sqlKVGet.pluck(true);
-
- /**
- * Obtain the value stored for a given key.
- *
- * @param {string} key The key whose value is sought.
- *
- * @returns {string | undefined} the (string) value for the given key, or
- * undefined if there is no such value.
- *
- * @throws if key is not a string.
- */
- function get(key) {
- typeof key === 'string' || Fail`key must be a string`;
- return sqlKVGet.get(key);
- }
-
- const sqlKVGetNextKey = db.prepare(`
- SELECT key
- FROM kvStore
- WHERE key > ?
- LIMIT 1
- `);
- sqlKVGetNextKey.pluck(true);
-
- /**
- * getNextKey enables callers to iterate over all keys within a
- * given range. To build an iterator of all keys from start
- * (inclusive) to end (exclusive), do:
- *
- * function* iterate(start, end) {
- * if (kvStore.has(start)) {
- * yield start;
- * }
- * let prev = start;
- * while (true) {
- * let next = kvStore.getNextKey(prev);
- * if (!next || next >= end) {
- * break;
- * }
- * yield next;
- * prev = next;
- * }
- * }
- *
- * @param {string} previousKey The key returned will always be later than this one.
- *
- * @returns {string | undefined} a key string, or undefined if we reach the end of the store
- *
- * @throws if previousKey is not a string
- */
-
- function getNextKey(previousKey) {
- typeof previousKey === 'string' || Fail`previousKey must be a string`;
- return sqlKVGetNextKey.get(previousKey);
- }
-
- /**
- * Test if the state contains a value for a given key.
- *
- * @param {string} key The key that is of interest.
- *
- * @returns {boolean} true if a value is stored for the key, false if not.
- *
- * @throws if key is not a string.
- */
- function has(key) {
- typeof key === 'string' || Fail`key must be a string`;
- return get(key) !== undefined;
- }
-
- const sqlKVSet = db.prepare(`
- INSERT INTO kvStore (key, value)
- VALUES (?, ?)
- ON CONFLICT DO UPDATE SET value = excluded.value
- `);
-
- /**
- * Store a value for a given key. The value will replace any prior value if
- * there was one.
- *
- * @param {string} key The key whose value is being set.
- * @param {string} value The value to set the key to.
- *
- * @throws if either parameter is not a string.
- */
- function set(key, value) {
- typeof key === 'string' || Fail`key must be a string`;
- typeof value === 'string' || Fail`value must be a string`;
- // synchronous read after write within a transaction is safe
- // The transaction's overall success will be awaited during commit
- ensureTxn();
- sqlKVSet.run(key, value);
- trace('set', key, value);
- }
-
- const sqlKVDel = db.prepare(`
- DELETE FROM kvStore
- WHERE key = ?
- `);
-
- /**
- * Remove any stored value for a given key. It is permissible for there to
- * be no existing stored value for the key.
- *
- * @param {string} key The key whose value is to be deleted
- *
- * @throws if key is not a string.
- */
- function del(key) {
- typeof key === 'string' || Fail`key must be a string`;
- ensureTxn();
- sqlKVDel.run(key);
- trace('del', key);
- }
-
- const kvStore = {
- has,
- get,
- getNextKey,
- set,
- delete: del,
- };
-
- const sqlAddPendingExport = db.prepare(`
- INSERT INTO pendingExports (key, value)
- VALUES (?, ?)
- ON CONFLICT DO UPDATE SET value = excluded.value
- `);
-
- function noteExport(key, value) {
- if (exportCallback) {
- sqlAddPendingExport.run(key, value);
- }
- }
-
const kernelKVStore = {
...kvStore,
set(key, value) {
typeof key === 'string' || Fail`key must be a string`;
const keyType = getKeyType(key);
keyType !== 'host' || Fail`kernelKVStore refuses host keys`;
- set(key, value);
+ kvStore.set(key, value);
if (keyType === 'consensus') {
noteExport(`kv.${key}`, value);
crankhasher.add('add');
@@ -679,7 +332,7 @@ function makeSwingStore(dirPath, forceReset, options = {}) {
typeof key === 'string' || Fail`key must be a string`;
const keyType = getKeyType(key);
keyType !== 'host' || Fail`kernelKVStore refuses host keys`;
- del(key);
+ kvStore.delete(key);
if (keyType === 'consensus') {
noteExport(`kv.${key}`, undefined);
crankhasher.add('delete');
@@ -695,12 +348,12 @@ function makeSwingStore(dirPath, forceReset, options = {}) {
set(key, value) {
const keyType = getKeyType(key);
keyType === 'host' || Fail`hostKVStore requires host keys`;
- set(key, value);
+ kvStore.set(key, value);
},
delete(key) {
const keyType = getKeyType(key);
keyType === 'host' || Fail`hostKVStore requires host keys`;
- del(key);
+ kvStore.delete(key);
},
};
@@ -740,7 +393,7 @@ function makeSwingStore(dirPath, forceReset, options = {}) {
resetCrankhash();
// Get the old activityhash
- let oldActivityhash = get('activityhash');
+ let oldActivityhash = kvStore.get('activityhash');
if (oldActivityhash === undefined) {
oldActivityhash = '';
}
@@ -756,7 +409,7 @@ function makeSwingStore(dirPath, forceReset, options = {}) {
// Store the new activityhash
const activityhash = hasher.finish();
- set('activityhash', activityhash);
+ kvStore.set('activityhash', activityhash);
// Need to explicitly call noteExport here because activityhash is written
// directly to the low-level store to avoid recursive hashing, which
// bypasses the normal notification mechanism
@@ -766,7 +419,7 @@ function makeSwingStore(dirPath, forceReset, options = {}) {
}
function getActivityhash() {
- return get('activityhash') || '';
+ return kvStore.get('activityhash') || '';
}
const sqlExportsGet = db.prepare(`
@@ -823,6 +476,13 @@ function makeSwingStore(dirPath, forceReset, options = {}) {
stopTrace();
}
+ /** @type {import('./internal.js').SwingStoreInternal} */
+ const internal = harden({ snapStore, transcriptStore, bundleStore });
+
+ async function repairMetadata(exporter) {
+ return doRepairMetadata(internal, exporter);
+ }
+
/**
* Return a Buffer with the entire DB state, useful for cloning a
* small swingstore in unit tests.
@@ -894,6 +554,7 @@ function makeSwingStore(dirPath, forceReset, options = {}) {
getActivityhash,
};
const hostStorage = {
+ repairMetadata,
kvStore: hostKVStore,
commit,
close,
@@ -904,11 +565,6 @@ function makeSwingStore(dirPath, forceReset, options = {}) {
serialize,
dump,
};
- const internal = {
- snapStore,
- transcriptStore,
- bundleStore,
- };
return harden({
kernelStorage,
@@ -941,198 +597,6 @@ export function initSwingStore(dirPath = null, options = {}) {
return makeSwingStore(dirPath, true, options);
}
-function parseVatArtifactExportKey(key) {
- const parts = key.split('.');
- const [_type, vatID, rawPos] = parts;
- // prettier-ignore
- parts.length === 3 ||
- Fail`expected artifact name of the form '{type}.{vatID}.{pos}', saw ${q(key)}`;
- const isCurrent = rawPos === 'current';
- let pos;
- if (isCurrent) {
- pos = -1;
- } else {
- pos = Number(rawPos);
- }
-
- return { vatID, isCurrent, pos };
-}
-
-function artifactKey(type, vatID, pos) {
- return `${type}.${vatID}.${pos}`;
-}
-
-/**
- * @param {SwingStoreExporter} exporter
- * @param {string | null} [dirPath]
- * @param {object} options
- * @returns {Promise}
- */
-export async function importSwingStore(exporter, dirPath = null, options = {}) {
- if (dirPath) {
- typeof dirPath === 'string' || Fail`dirPath must be a string`;
- }
- const { includeHistorical = false } = options;
- const store = makeSwingStore(dirPath, true, options);
- const { kernelStorage, internal } = store;
-
- // Artifact metadata, keyed as `${type}.${vatID}.${pos}`
- //
- // Note that this key is almost but not quite the artifact name, since the
- // names of transcript span artifacts also include the endPos, but the endPos
- // value is in flux until the span is complete.
- const artifactMetadata = new Map();
-
- // Each vat requires a transcript span and (usually) a snapshot. This table
- // tracks which of these we've seen, keyed by vatID.
- // vatID -> { snapshotKey: metadataKey, transcriptKey: metatdataKey }
- const vatArtifacts = new Map();
- const bundleArtifacts = new Map();
-
- for await (const [key, value] of exporter.getExportData()) {
- const [tag] = key.split('.', 1);
- const subKey = key.substring(tag.length + 1);
- if (tag === 'kv') {
- // 'kv' keys contain individual kvStore entries
- if (value == null) {
- // Note '==' rather than '===': any nullish value implies deletion
- kernelStorage.kvStore.delete(subKey);
- } else {
- kernelStorage.kvStore.set(subKey, value);
- }
- } else if (tag === 'bundle') {
- // 'bundle' keys contain bundle IDs
- if (value == null) {
- bundleArtifacts.delete(key);
- } else {
- bundleArtifacts.set(key, value);
- }
- } else if (tag === 'transcript' || tag === 'snapshot') {
- // 'transcript' and 'snapshot' keys contain artifact description info.
- assert(value); // make TypeScript shut up
- const { vatID, isCurrent, pos } = parseVatArtifactExportKey(key);
- if (isCurrent) {
- const vatInfo = vatArtifacts.get(vatID) || {};
- if (tag === 'snapshot') {
- // `export.snapshot.{vatID}.current` directly identifies the current snapshot artifact
- vatInfo.snapshotKey = value;
- } else if (tag === 'transcript') {
- // `export.transcript.${vatID}.current` contains a metadata record for the current
- // state of the current transcript span as of the time of export
- const metadata = JSON.parse(value);
- vatInfo.transcriptKey = artifactKey(tag, vatID, metadata.startPos);
- artifactMetadata.set(vatInfo.transcriptKey, metadata);
- }
- vatArtifacts.set(vatID, vatInfo);
- } else {
- artifactMetadata.set(artifactKey(tag, vatID, pos), JSON.parse(value));
- }
- } else {
- Fail`unknown artifact type tag ${q(tag)} on import`;
- }
- }
-
- // At this point we should have acquired the entire KV store state, plus
- // sufficient metadata to identify the complete set of artifacts we'll need to
- // fetch along with the information required to validate each of them after
- // fetching.
- //
- // Depending on how the export was parameterized, the metadata may also include
- // information about historical artifacts that we might or might not actually
- // fetch depending on how this import was parameterized
-
- // Fetch the set of current artifacts.
-
- // Keep track of fetched artifacts in this set so we don't fetch them a second
- // time if we are trying for historical artifacts also.
- const fetchedArtifacts = new Set();
-
- for await (const [vatID, vatInfo] of vatArtifacts.entries()) {
- // For each vat, we *must* have a transcript span. If this is not the very
- // first transcript span in the history of that vat, then we also must have
- // a snapshot for the state of the vat immediately prior to when the
- // transcript span begins.
- vatInfo.transcriptKey ||
- Fail`missing current transcript key for vat ${q(vatID)}`;
- const transcriptInfo = artifactMetadata.get(vatInfo.transcriptKey);
- transcriptInfo || Fail`missing transcript metadata for vat ${q(vatID)}`;
- let snapshotInfo;
- if (vatInfo.snapshotKey) {
- snapshotInfo = artifactMetadata.get(vatInfo.snapshotKey);
- snapshotInfo || Fail`missing snapshot metadata for vat ${q(vatID)}`;
- }
- if (!snapshotInfo) {
- transcriptInfo.startPos === 0 ||
- Fail`missing current snapshot for vat ${q(vatID)}`;
- } else {
- snapshotInfo.snapPos + 1 === transcriptInfo.startPos ||
- Fail`current transcript for vat ${q(vatID)} doesn't go with snapshot`;
- fetchedArtifacts.add(vatInfo.snapshotKey);
- }
- await (!snapshotInfo ||
- internal.snapStore.importSnapshot(
- vatInfo.snapshotKey,
- exporter,
- snapshotInfo,
- ));
-
- const transcriptArtifactName = `${vatInfo.transcriptKey}.${transcriptInfo.endPos}`;
- await internal.transcriptStore.importSpan(
- transcriptArtifactName,
- exporter,
- transcriptInfo,
- );
- fetchedArtifacts.add(transcriptArtifactName);
- }
- const bundleArtifactNames = Array.from(bundleArtifacts.keys()).sort();
- for await (const bundleArtifactName of bundleArtifactNames) {
- await internal.bundleStore.importBundle(
- bundleArtifactName,
- exporter,
- bundleArtifacts.get(bundleArtifactName),
- );
- }
-
- if (!includeHistorical) {
- // eslint-disable-next-line @jessie.js/no-nested-await
- await exporter.close();
- return store;
- }
-
- // If we're also importing historical artifacts, have the exporter enumerate
- // the complete set of artifacts it has and fetch all of them except for the
- // ones we've already fetched.
- for await (const artifactName of exporter.getArtifactNames()) {
- if (fetchedArtifacts.has(artifactName)) {
- continue;
- }
- let fetchedP;
- if (artifactName.startsWith('snapshot.')) {
- fetchedP = internal.snapStore.importSnapshot(
- artifactName,
- exporter,
- artifactMetadata.get(artifactName),
- );
- } else if (artifactName.startsWith('transcript.')) {
- // strip endPos off artifact name
- const metadataKey = artifactName.split('.').slice(0, 3).join('.');
- fetchedP = internal.transcriptStore.importSpan(
- artifactName,
- exporter,
- artifactMetadata.get(metadataKey),
- );
- } else if (artifactName.startsWith('bundle.')) {
- // already taken care of
- continue;
- } else {
- Fail`unknown artifact type: ${artifactName}`;
- }
- await fetchedP;
- }
- await exporter.close();
- return store;
-}
-
/**
* Open a persistent swingset store. If there is no existing store at the given
* `dirPath`, a new, empty store will be created.
@@ -1163,7 +627,7 @@ export function openSwingStore(dirPath, options = {}) {
export function isSwingStore(dirPath) {
typeof dirPath === 'string' || Fail`dirPath must be a string`;
if (fs.existsSync(dirPath)) {
- const storeFile = path.resolve(dirPath, 'swingstore.sqlite');
+ const storeFile = dbFileInDirectory(dirPath);
if (fs.existsSync(storeFile)) {
return true;
}
diff --git a/packages/swing-store/src/transcriptStore.js b/packages/swing-store/src/transcriptStore.js
index a8f2b4f3ea7..ee30af5d4d6 100644
--- a/packages/swing-store/src/transcriptStore.js
+++ b/packages/swing-store/src/transcriptStore.js
@@ -6,7 +6,12 @@ import BufferLineTransform from '@agoric/internal/src/node/buffer-line-transform
import { createSHA256 } from './hasher.js';
/**
- * @typedef { import('./swingStore').SwingStoreExporter } SwingStoreExporter
+ * @template T
+ * @typedef { IterableIterator | AsyncIterableIterator } AnyIterableIterator
+ */
+
+/**
+ * @typedef { import('./internal.js').ArtifactMode } ArtifactMode
*
* @typedef {{
* initTranscript: (vatID: string) => void,
@@ -19,10 +24,13 @@ import { createSHA256 } from './hasher.js';
* }} TranscriptStore
*
* @typedef {{
- * exportSpan: (name: string, includeHistorical: boolean) => AsyncIterableIterator
- * importSpan: (artifactName: string, exporter: SwingStoreExporter, artifactMetadata: Map) => Promise,
+ * exportSpan: (name: string) => AsyncIterableIterator
* getExportRecords: (includeHistorical: boolean) => IterableIterator,
- * getArtifactNames: (includeHistorical: boolean) => AsyncIterableIterator,
+ * getArtifactNames: (artifactMode: ArtifactMode) => AsyncIterableIterator,
+ * importTranscriptSpanRecord: (key: string, value: string) => void,
+ * populateTranscriptSpan: (name: string, makeChunkIterator: () => AnyIterableIterator, options: { artifactMode: ArtifactMode }) => Promise,
+ * assertComplete: (checkMode: Omit) => void,
+ * repairTranscriptSpanRecord: (key: string, value: string) => void,
* readFullVatTranscript: (vatID: string) => Iterable<{position: number, item: string}>
* }} TranscriptStoreInternal
*
@@ -82,7 +90,7 @@ export function makeTranscriptStore(
//
// The transcriptItems associated with historical spans may or may not exist,
// depending on pruning. However, the items associated with the current span
- // must always be present
+ // must always be present.
db.exec(`
CREATE TABLE IF NOT EXISTS transcriptSpans (
@@ -325,6 +333,13 @@ export function makeTranscriptStore(
ORDER BY vatID, startPos
`);
+ const sqlGetIncarnationSpanMetadata = db.prepare(`
+ SELECT vatID, startPos, endPos, hash, isCurrent, incarnation
+ FROM transcriptSpans
+ WHERE vatID=? AND incarnation=?
+ ORDER BY vatID, startPos
+ `);
+
const sqlGetCurrentSpanMetadata = db.prepare(`
SELECT vatID, startPos, endPos, hash, isCurrent, incarnation
FROM transcriptSpans
@@ -332,6 +347,11 @@ export function makeTranscriptStore(
ORDER BY vatID, startPos
`);
+ function dbRecToExportRec(dbRec) {
+ const { vatID, startPos, endPos, hash, isCurrent, incarnation } = dbRec;
+ return spanRec(vatID, startPos, endPos, hash, isCurrent, incarnation);
+ }
+
/**
* Obtain artifact metadata records for spans contained in this store.
*
@@ -354,45 +374,77 @@ export function makeTranscriptStore(
* replay will never be required or because such replay would be prohibitively
* expensive regardless of need and therefor other repair strategies employed.
*
+ * The only code path which could use 'false' would be `swingstore.dump()`,
+ * which takes the same flag.
+ *
* @yields {readonly [key: string, value: string]}
* @returns {IterableIterator}
* An iterator over pairs of [spanMetadataKey, rec], where `rec` is a
* JSON-encoded metadata record for the span named by `spanMetadataKey`.
*/
function* getExportRecords(includeHistorical = true) {
- const sql = includeHistorical
- ? sqlGetAllSpanMetadata
- : sqlGetCurrentSpanMetadata;
- for (const rec of sql.iterate()) {
- const { vatID, startPos, endPos, hash, isCurrent, incarnation } = rec;
- const exportRec = spanRec(
- vatID,
- startPos,
- endPos,
- hash,
- isCurrent,
- incarnation,
- );
- yield [spanMetadataKey(rec), JSON.stringify(exportRec)];
+ if (includeHistorical) {
+ for (const rec of sqlGetAllSpanMetadata.iterate()) {
+ yield [spanMetadataKey(rec), JSON.stringify(dbRecToExportRec(rec))];
+ }
+ } else {
+ for (const rec of sqlGetCurrentSpanMetadata.iterate()) {
+ yield [spanMetadataKey(rec), JSON.stringify(dbRecToExportRec(rec))];
+ }
}
}
+ const sqlCountSpanItems = db.prepare(`
+ SELECT COUNT(*) FROM transcriptItems
+ WHERE vatID = ? AND position >= ? AND position < ?
+ `);
+ sqlCountSpanItems.pluck();
+
/**
* Obtain artifact names for spans contained in this store.
*
- * @param {boolean} includeHistorical If true, include all spans that are
- * present in the store regardless of their currency; if false, only include
- * the current span for each vat.
- *
+ * @param {ArtifactMode} artifactMode Control which artifacts should be exported.
+ * At 'operational', only include current spans. At 'replay',
+ * include all spans of the current incarnation for each vat. At
+ * 'archival' and 'debug', include all spans.
* @yields {string}
* @returns {AsyncIterableIterator} An iterator over the names of all the artifacts requested
*/
- async function* getArtifactNames(includeHistorical) {
- const sql = includeHistorical
- ? sqlGetAllSpanMetadata
- : sqlGetCurrentSpanMetadata;
- for (const rec of sql.iterate()) {
- yield spanArtifactName(rec);
+ async function* getArtifactNames(artifactMode) {
+ // for all non-'debug' modes, the exporter asserts that all
+ // requested items are present (i.e. the artifacts will be
+ // complete), so we don't need to check that ourselves
+ if (artifactMode === 'operational') {
+ for (const rec of sqlGetCurrentSpanMetadata.iterate()) {
+ yield spanArtifactName(rec);
+ }
+ } else if (artifactMode === 'replay') {
+ for (const curRec of sqlGetCurrentSpanMetadata.iterate()) {
+ const { vatID, incarnation } = curRec;
+ for (const rec of sqlGetIncarnationSpanMetadata.iterate(
+ vatID,
+ incarnation,
+ )) {
+ yield spanArtifactName(rec);
+ }
+ }
+ } else if (artifactMode === 'archival') {
+ // everything
+ for (const rec of sqlGetAllSpanMetadata.iterate()) {
+ yield spanArtifactName(rec);
+ }
+ } else if (artifactMode === 'debug') {
+ // everything that is a complete span
+ for (const rec of sqlGetAllSpanMetadata.iterate()) {
+ const { vatID, startPos, endPos } = rec;
+ const count = sqlCountSpanItems.get(vatID, startPos, endPos);
+ if (count !== endPos - startPos) {
+ // skip incomplete spans, because the exporter did not
+ // already do a completeness check in 'debug' mode
+ continue;
+ }
+ yield spanArtifactName(rec);
+ }
}
}
@@ -432,15 +484,22 @@ export function makeTranscriptStore(
}
}
startPos <= endPos || Fail`${q(startPos)} <= ${q(endPos)}}`;
+ const expectedCount = endPos - startPos;
function* reader() {
+ let count = 0;
for (const { item } of sqlReadSpanItems.iterate(
vatID,
startPos,
endPos,
)) {
yield item;
+ count += 1;
}
+ count === expectedCount ||
+ Fail`read ${q(count)} transcript entries (expected ${q(
+ expectedCount,
+ )})`;
}
if (startPos === endPos) {
@@ -465,12 +524,10 @@ export function makeTranscriptStore(
* `transcript.${vatID}.${startPos}.${endPos}`
*
* @param {string} name The name of the transcript artifact to be read
- * @param {boolean} includeHistorical If true, allow non-current spans to be fetched
- *
* @returns {AsyncIterableIterator}
* @yields {Uint8Array}
*/
- async function* exportSpan(name, includeHistorical) {
+ async function* exportSpan(name) {
typeof name === 'string' || Fail`artifact name must be a string`;
const parts = name.split('.');
const [type, vatID, pos] = parts;
@@ -479,9 +536,6 @@ export function makeTranscriptStore(
Fail`expected artifact name of the form 'transcript.{vatID}.{startPos}.{endPos}', saw ${q(name)}`;
const isCurrent = sqlGetSpanIsCurrent.get(vatID, pos);
isCurrent !== undefined || Fail`transcript span ${q(name)} not available`;
- isCurrent ||
- includeHistorical ||
- Fail`transcript span ${q(name)} not available`;
const startPos = Number(pos);
for (const entry of readSpan(vatID, startPos)) {
yield Buffer.from(`${entry}\n`);
@@ -516,33 +570,104 @@ export function makeTranscriptStore(
noteExport(spanMetadataKey(rec), JSON.stringify(rec));
};
+ function importTranscriptSpanRecord(key, value) {
+ ensureTxn();
+ const [tag, keyVatID, keyStartPos] = key.split('.');
+ assert.equal(tag, 'transcript');
+ const metadata = JSON.parse(value);
+ if (key.endsWith('.current') !== Boolean(metadata.isCurrent)) {
+ throw Fail`transcript key ${key} mismatches metadata ${metadata}`;
+ }
+ const { vatID, startPos, endPos, hash, isCurrent, incarnation } = metadata;
+ vatID || Fail`transcript metadata missing vatID: ${metadata}`;
+ startPos !== undefined ||
+ Fail`transcript metadata missing startPos: ${metadata}`;
+ endPos !== undefined ||
+ Fail`transcript metadata missing endPos: ${metadata}`;
+ hash || Fail`transcript metadata missing hash: ${metadata}`;
+ isCurrent !== undefined ||
+ Fail`transcript metadata missing isCurrent: ${metadata}`;
+ incarnation !== undefined ||
+ Fail`transcript metadata missing incarnation: ${metadata}`;
+ if (keyStartPos !== 'current') {
+ if (Number(keyStartPos) !== startPos) {
+ Fail`transcript key ${key} mismatches metadata ${metadata}`;
+ }
+ }
+ keyVatID === vatID ||
+ Fail`transcript key ${key} mismatches metadata ${metadata}`;
+
+ // sqlWriteSpan is an INSERT, so the PRIMARY KEY (vatID, position)
+ // constraint will catch broken export-data errors like trying to
+ // add two different versions of the same span (e.g. one holding
+ // items 4..8, a second holding 4..9)
+
+ sqlWriteSpan.run(
+ vatID,
+ startPos,
+ endPos,
+ hash,
+ isCurrent ? 1 : null,
+ incarnation,
+ );
+ }
+
+ const sqlGetSpanMetadataFor = db.prepare(`
+ SELECT hash, isCurrent, incarnation, endPos
+ FROM transcriptSpans
+ WHERE vatID = ? AND startPos = ?
+ `);
+
+ const sqlGetStartOfIncarnation = db.prepare(`
+ SELECT startPos
+ FROM transcriptSpans
+ WHERE vatID=? AND incarnation=?
+ ORDER BY startPos ASC LIMIT 1
+ `);
+ sqlGetStartOfIncarnation.pluck();
+
/**
* Import a transcript span from another store.
*
* @param {string} name Artifact Name of the transcript span
- * @param {SwingStoreExporter} exporter Exporter from which to get the span data
- * @param {object} info Metadata describing the span
+ * @param {() => AnyIterableIterator} makeChunkIterator get an iterator of transcript byte chunks
+ * @param {object} options
+ * @param {ArtifactMode} options.artifactMode
*
* @returns {Promise}
*/
- async function importSpan(name, exporter, info) {
+ async function populateTranscriptSpan(name, makeChunkIterator, options) {
+ ensureTxn();
+ const { artifactMode } = options;
const parts = name.split('.');
const [type, vatID, rawStartPos, rawEndPos] = parts;
// prettier-ignore
parts.length === 4 && type === 'transcript' ||
Fail`expected artifact name of the form 'transcript.{vatID}.{startPos}.{endPos}', saw '${q(name)}'`;
- // prettier-ignore
- info.vatID === vatID ||
- Fail`artifact name says vatID ${q(vatID)}, metadata says ${q(info.vatID)}`;
const startPos = Number(rawStartPos);
- // prettier-ignore
- info.startPos === startPos ||
- Fail`artifact name says startPos ${q(startPos)}, metadata says ${q(info.startPos)}`;
const endPos = Number(rawEndPos);
- // prettier-ignore
- info.endPos === endPos ||
- Fail`artifact name says endPos ${q(endPos)}, metadata says ${q(info.endPos)}`;
- const artifactChunks = exporter.getArtifact(name);
+
+ const metadata =
+ sqlGetSpanMetadataFor.get(vatID, startPos) ||
+ Fail`no metadata for transcript span ${name}`;
+ assert.equal(metadata.endPos, endPos);
+
+ if (artifactMode === 'operational') {
+ if (!metadata.isCurrent) {
+ return; // ignore old spans
+ }
+ }
+ if (artifactMode === 'replay') {
+ // ignore spans that aren't for the current incarnation
+ const { incarnation } = sqlGetCurrentSpanBounds.get(vatID);
+ const incStart = sqlGetStartOfIncarnation.get(vatID, incarnation);
+ if (startPos < incStart) {
+ return;
+ }
+ }
+ // 'archival' and 'debug' modes accept all spans
+
+ const artifactChunks = await makeChunkIterator();
const inStream = Readable.from(artifactChunks);
const lineTransform = new BufferLineTransform();
const lineStream = inStream.pipe(lineTransform).setEncoding('utf8');
@@ -550,21 +675,90 @@ export function makeTranscriptStore(
let pos = startPos;
for await (const line of lineStream) {
const item = line.trimEnd();
- sqlAddItem.run(vatID, item, pos, info.incarnation);
+ sqlAddItem.run(vatID, item, pos, metadata.incarnation);
hash = updateSpanHash(hash, item);
pos += 1;
}
- pos === endPos || Fail`artifact ${name} is not available`;
- info.hash === hash ||
- Fail`artifact ${name} hash is ${q(hash)}, metadata says ${q(info.hash)}`;
- sqlWriteSpan.run(
- info.vatID,
- info.startPos,
- info.endPos,
- info.hash,
- info.isCurrent ? 1 : null,
- info.incarnation,
- );
+ pos === endPos || Fail`artifact ${name} is not complete`;
+
+ // validate against the previously-established metadata
+
+ // prettier-ignore
+ metadata.hash === hash ||
+ Fail`artifact ${name} hash is ${q(hash)}, metadata says ${q(metadata.hash)}`;
+
+ // If that passes, the not-yet-committed data is good. If it
+ // fails, the thrown error will flunk the import and inhibit a
+ // commit. So we're done.
+ }
+
+ function repairTranscriptSpanRecord(key, value) {
+ ensureTxn();
+ const [tag, keyVatID, keyStartPos] = key.split('.');
+ assert.equal(tag, 'transcript');
+ const metadata = JSON.parse(value);
+ const { vatID, startPos, endPos, hash, isCurrent, incarnation } = metadata;
+ assert.equal(keyVatID, vatID);
+ if (keyStartPos !== 'current') {
+ if (Number(keyStartPos) !== startPos) {
+ Fail`transcript key ${key} mismatches metadata ${metadata}`;
+ }
+ }
+
+ const existing = sqlGetSpanMetadataFor.get(vatID, startPos);
+ if (existing) {
+ if (
+ Boolean(existing.isCurrent) !== Boolean(isCurrent) ||
+ existing.hash !== hash ||
+ existing.incarnation !== incarnation ||
+ existing.endPos !== endPos
+ ) {
+ throw Fail`repairTranscriptSpanRecord metadata mismatch: ${existing} vs ${metadata}`;
+ }
+ } else {
+ sqlWriteSpan.run(
+ vatID,
+ startPos,
+ endPos,
+ hash,
+ isCurrent ? 1 : null,
+ incarnation,
+ );
+ }
+ }
+
+ function assertComplete(checkMode) {
+ assert(checkMode !== 'debug', checkMode);
+ for (const rec of sqlGetCurrentSpanMetadata.iterate()) {
+ const { vatID, startPos, endPos, incarnation } = rec;
+
+ if (checkMode === 'operational') {
+ // at 'operational', every 'isCurrent' transcript span must
+ // have all items
+ const count = sqlCountSpanItems.get(vatID, startPos, endPos);
+ if (count !== endPos - startPos) {
+ throw Fail`incomplete current transcript span: ${count} items, ${rec}`;
+ }
+ } else if (checkMode === 'replay') {
+ // at 'replay', every vat's current incarnation must be fully
+ // populated (which implies 'operational')
+ const incStart = sqlGetStartOfIncarnation.get(vatID, incarnation);
+ const incCount = sqlCountSpanItems.get(vatID, incStart, endPos);
+ if (incCount !== endPos - incStart) {
+ throw Fail`incomplete current incarnation transcript: ${incCount} items`;
+ }
+ } else if (checkMode === 'archival') {
+ // at 'archival', every incarnation must be fully populated,
+ // which means position=0 up through endPos-1 (which implies
+ // 'replay')
+ const arcCount = sqlCountSpanItems.get(vatID, 0, endPos);
+ if (arcCount !== endPos) {
+ throw Fail`incomplete archival transcript: ${arcCount} vs ${endPos}`;
+ }
+ } else {
+ throw Fail`unknown checkMode ${checkMode}`;
+ }
+ }
}
return harden({
@@ -577,10 +771,14 @@ export function makeTranscriptStore(
deleteVatTranscripts,
exportSpan,
- importSpan,
getExportRecords,
getArtifactNames,
+ importTranscriptSpanRecord,
+ populateTranscriptSpan,
+ assertComplete,
+ repairTranscriptSpanRecord,
+
dumpTranscripts,
readFullVatTranscript,
});
diff --git a/packages/swing-store/src/types.d.ts b/packages/swing-store/src/types.d.ts
new file mode 100644
index 00000000000..e8f9d7d84fd
--- /dev/null
+++ b/packages/swing-store/src/types.d.ts
@@ -0,0 +1,15 @@
+export type {
+ SwingStore,
+ SwingStoreKernelStorage,
+ SwingStoreHostStorage,
+} from './swingStore.js';
+export type { KVStore } from './kvStore.js';
+export type { BundleStore } from './bundleStore.js';
+export type { SnapStore, SnapshotResult, SnapshotInfo } from './snapStore.js';
+export type { TranscriptStore } from './transcriptStore.js';
+export type { ArtifactMode } from './internal.js';
+export type { ImportSwingStoreOptions } from './importer.js';
+export type {
+ SwingStoreExporter,
+ ExportSwingStoreOptions,
+} from './exporter.js';
diff --git a/packages/swing-store/src/types.js b/packages/swing-store/src/types.js
new file mode 100644
index 00000000000..a863ae2d2fa
--- /dev/null
+++ b/packages/swing-store/src/types.js
@@ -0,0 +1,6 @@
+// Types for the public API
+
+// Everything this "exports" actually comes from the neighboring types.d.ts file
+
+// Ensure this is a module.
+export {};
diff --git a/packages/swing-store/src/util.js b/packages/swing-store/src/util.js
index 4d4c765288e..f22f974d325 100644
--- a/packages/swing-store/src/util.js
+++ b/packages/swing-store/src/util.js
@@ -1,3 +1,4 @@
+import path from 'path';
import { Buffer } from 'buffer';
/**
@@ -5,7 +6,7 @@ import { Buffer } from 'buffer';
* 'stream/consumers' package, which unfortunately only exists in newer versions
* of Node.
*
- * @param {import('./swingStore').AnyIterable} inStream
+ * @param {import('./exporter').AnyIterable} inStream
*/
export const buffer = async inStream => {
const chunks = [];
@@ -14,3 +15,8 @@ export const buffer = async inStream => {
}
return Buffer.concat(chunks);
};
+
+export function dbFileInDirectory(dirPath) {
+ const filePath = path.resolve(dirPath, 'swingstore.sqlite');
+ return filePath;
+}
diff --git a/packages/swing-store/test/test-bundles.js b/packages/swing-store/test/test-bundles.js
index c622583210b..b181611a129 100644
--- a/packages/swing-store/test/test-bundles.js
+++ b/packages/swing-store/test/test-bundles.js
@@ -4,11 +4,9 @@ import test from 'ava';
import tmp from 'tmp';
import { Buffer } from 'buffer';
import { createSHA256 } from '../src/hasher.js';
-import {
- importSwingStore,
- initSwingStore,
- makeSwingStoreExporter,
-} from '../src/swingStore.js';
+import { initSwingStore } from '../src/swingStore.js';
+import { makeSwingStoreExporter } from '../src/exporter.js';
+import { importSwingStore } from '../src/importer.js';
import { buffer } from '../src/util.js';
function makeB0ID(bundle) {
@@ -116,7 +114,9 @@ test('b0 import', async t => {
t.is(name, nameA);
yield Buffer.from(JSON.stringify(b0A));
},
- getArtifactNames: () => assert.fail('import should not query all names'),
+ async *getArtifactNames() {
+ yield* [nameA];
+ },
close: async () => undefined,
};
const { kernelStorage } = await importSwingStore(exporter);
@@ -138,7 +138,9 @@ test('b0 bad import', async t => {
t.is(name, nameA);
yield Buffer.from(JSON.stringify(b0Abogus));
},
- getArtifactNames: () => assert.fail('import should not query all names'),
+ async *getArtifactNames() {
+ yield* [nameA];
+ },
close: async () => undefined,
};
await t.throwsAsync(async () => importSwingStore(exporter), {
diff --git a/packages/swing-store/test/test-export.js b/packages/swing-store/test/test-export.js
new file mode 100644
index 00000000000..fae8e295c00
--- /dev/null
+++ b/packages/swing-store/test/test-export.js
@@ -0,0 +1,343 @@
+import '@endo/init/debug.js';
+
+import test from 'ava';
+
+import { buffer } from '../src/util.js';
+import { initSwingStore, makeSwingStoreExporter } from '../src/index.js';
+
+import { tmpDir, getSnapshotStream, makeB0ID } from './util.js';
+
+const rank = {
+ operational: 1,
+ replay: 2,
+ archival: 3,
+ debug: 4,
+ 'debug-on-pruned': 4,
+};
+
+const snapshotData = 'snapshot data';
+// this snapHash was computed manually
+const snapHash =
+ 'e7dee7266896538616b630a5da40a90e007726a383e005a9c9c5dd0c2daf9329';
+
+/** @type {import('../src/bundleStore.js').Bundle} */
+const bundle0 = { moduleFormat: 'nestedEvaluate', source: '1+1' };
+const bundle0ID = makeB0ID(bundle0);
+
+const exportTest = test.macro(async (t, mode) => {
+ const [dbDir, cleanup] = await tmpDir('testdb');
+ t.teardown(cleanup);
+ // const dbDir = 't-db';
+
+ const options = {};
+ if (mode === 'debug') {
+ options.keepSnapshots = true; // else old snapshots are deleted
+ }
+ const ss1 = initSwingStore(dbDir, options);
+ const ks = ss1.kernelStorage;
+
+ // build a DB with four spans (one in an old incarnation, two
+ // historical but current incarnation, only one inUse) and two
+ // snapshots (only one inUSe)
+
+ ks.kvStore.set('key1', 'value1');
+ ks.bundleStore.addBundle(bundle0ID, bundle0);
+ ks.transcriptStore.initTranscript('v1');
+
+ // incarnation 0
+ ks.transcriptStore.addItem('v1', 'start-worker'); // 0
+ ks.transcriptStore.addItem('v1', 'shutdown-worker'); // 1
+ ks.transcriptStore.rolloverIncarnation('v1');
+ const spanHash0 =
+ '5bee0f44eca02f23eab03703e84ed2647d5d117fed99e1c30a3b424b7f082ab9';
+
+ // incarnation 1
+ ks.transcriptStore.addItem('v1', 'start-worker'); // 2
+ ks.transcriptStore.addItem('v1', 'delivery1'); // 3
+ await ks.snapStore.saveSnapshot('v1', 4, getSnapshotStream(snapshotData));
+ ks.transcriptStore.addItem('v1', 'save-snapshot'); // 4
+ ks.transcriptStore.rolloverSpan('v1'); // range= 2..5
+ const spanHash1 =
+ '57152efdd7fdf75c03371d2b4f1088d5bf3eae7fe643babce527ff81df38998c';
+
+ ks.transcriptStore.addItem('v1', 'load-snapshot'); // 5
+ ks.transcriptStore.addItem('v1', 'delivery2'); // 6
+ await ks.snapStore.saveSnapshot('v1', 7, getSnapshotStream(snapshotData));
+ ks.transcriptStore.addItem('v1', 'save-snapshot'); // 7
+ ks.transcriptStore.rolloverSpan('v1'); // range= 5..8
+ const spanHash2 =
+ '1947001e78e01bd1e773feb22b4ffc530447373b9de9274d5d5fbda3f23dbf2b';
+
+ ks.transcriptStore.addItem('v1', 'load-snapshot'); // 8
+ ks.transcriptStore.addItem('v1', 'delivery3'); // 9
+ const spanHash3 =
+ 'e6b42c6a3fb94285a93162f25a9fc0145fd4c5bb144917dc572c50ae2d02ee69';
+ // current range= 8..10
+
+ await ss1.hostStorage.commit();
+
+ // create an export, and assert that the pieces match what we
+ // expect. artifactMode='operational' means we get all metadata, no
+ // historical transcript spans, and no historical snapshots
+
+ assert.typeof(mode, 'string');
+ /** @typedef {import('../src/internal.js').ArtifactMode} ArtifactMode */
+ let artifactMode = /** @type {ArtifactMode} */ (mode);
+ if (mode === 'debug-on-pruned') {
+ artifactMode = 'debug';
+ }
+ const exporter = makeSwingStoreExporter(dbDir, { artifactMode });
+
+ // exportData
+ {
+ const exportData = new Map();
+ for await (const [key, value] of exporter.getExportData()) {
+ exportData.set(key, value);
+ }
+ // console.log('exportData:', exportData);
+
+ const check = (key, expected) => {
+ t.true(exportData.has(key));
+ let value = exportData.get(key);
+ exportData.delete(key);
+ if (typeof expected === 'object') {
+ value = JSON.parse(value);
+ }
+ t.deepEqual(value, expected);
+ };
+
+ check('kv.key1', 'value1');
+ check('snapshot.v1.4', {
+ vatID: 'v1',
+ snapPos: 4,
+ inUse: 0,
+ hash: snapHash,
+ });
+ check('snapshot.v1.7', {
+ vatID: 'v1',
+ snapPos: 7,
+ inUse: 1,
+ hash: snapHash,
+ });
+ check('snapshot.v1.current', 'snapshot.v1.7');
+ const base = { vatID: 'v1', isCurrent: 0 };
+ check('transcript.v1.0', {
+ ...base,
+ incarnation: 0,
+ startPos: 0,
+ endPos: 2,
+ hash: spanHash0,
+ });
+ check('transcript.v1.2', {
+ ...base,
+ incarnation: 1,
+ startPos: 2,
+ endPos: 5,
+ hash: spanHash1,
+ });
+ check('transcript.v1.5', {
+ ...base,
+ incarnation: 1,
+ startPos: 5,
+ endPos: 8,
+ hash: spanHash2,
+ });
+ check('transcript.v1.current', {
+ ...base,
+ incarnation: 1,
+ startPos: 8,
+ endPos: 10,
+ isCurrent: 1,
+ hash: spanHash3,
+ });
+ check(`bundle.${bundle0ID}`, bundle0ID);
+
+ // the above list is supposed to be exhaustive
+ if (exportData.size) {
+ console.log('unexpected exportData keys');
+ console.log(exportData);
+ t.fail('unexpected exportData keys');
+ }
+ }
+
+ // artifacts
+ {
+ const names = new Set();
+ const contents = new Map();
+ for await (const name of exporter.getArtifactNames()) {
+ names.add(name);
+ contents.set(name, (await buffer(exporter.getArtifact(name))).toString());
+ }
+ // console.log('artifacts:', contents);
+
+ const check = async (name, expected) => {
+ t.true(names.has(name));
+ names.delete(name);
+ let data = contents.get(name);
+ if (typeof expected === 'object') {
+ data = JSON.parse(data);
+ }
+ t.deepEqual(data, expected);
+ };
+
+ // export mode 'operational' means we omit historical snapshots and
+ // transcript spans
+
+ await check('snapshot.v1.7', 'snapshot data');
+ await check('transcript.v1.8.10', 'load-snapshot\ndelivery3\n');
+ await check(`bundle.${bundle0ID}`, bundle0);
+
+ t.true(rank[mode] > 0);
+ if (rank[mode] >= rank.replay) {
+ // add the old transcript spans of the current incarnation
+ await check(
+ 'transcript.v1.2.5',
+ 'start-worker\ndelivery1\nsave-snapshot\n',
+ );
+ await check(
+ 'transcript.v1.5.8',
+ 'load-snapshot\ndelivery2\nsave-snapshot\n',
+ );
+ }
+
+ if (rank[mode] >= rank.archival) {
+ // add the spans of the old incarnation
+ await check('transcript.v1.0.2', 'start-worker\nshutdown-worker\n');
+ }
+
+ if (mode === 'debug') {
+ // adds the old snapshots, which are only present if
+ // initSwingStore() was given {keepSnapshots: true}
+ await check('snapshot.v1.4', 'snapshot data');
+ // mode='debug-on-pruned' exercises the keepSnapshots:false case
+ }
+
+ if (names.size) {
+ console.log(`unexpected artifacts:`);
+ console.log(names);
+ t.fail('unexpected artifacts');
+ }
+ }
+});
+
+test('export operational', exportTest, 'operational');
+test('export replay', exportTest, 'replay');
+test('export archival', exportTest, 'archival');
+test('export debug', exportTest, 'debug');
+test('export debug-on-pruned', exportTest, 'debug-on-pruned');
+
+test('export omits pruned span artifacts', async t => {
+ const [dbDir, cleanup] = await tmpDir('testdb');
+ t.teardown(cleanup);
+ // const dbDir = 't-db';
+
+ // use keepTranscripts=false to simulate an explicit prune of the
+ // old span
+ const options = { keepTranscripts: false };
+ const ss1 = initSwingStore(dbDir, options);
+ const ks = ss1.kernelStorage;
+
+ // build a DB with two spans, one is inUse, other is pruned
+
+ ks.transcriptStore.initTranscript('v1');
+ ks.transcriptStore.addItem('v1', 'start-worker'); // 0
+ ks.transcriptStore.addItem('v1', 'delivery1'); // 1
+ await ks.snapStore.saveSnapshot('v1', 2, getSnapshotStream(snapshotData));
+ ks.transcriptStore.addItem('v1', 'save-snapshot'); // 2
+ ks.transcriptStore.rolloverSpan('v1'); // range= 0..3
+ const spanHash1 =
+ '57152efdd7fdf75c03371d2b4f1088d5bf3eae7fe643babce527ff81df38998c';
+ // rolloverSpan prunes the contents of the old span
+
+ ks.transcriptStore.addItem('v1', 'load-snapshot'); // 3
+ ks.transcriptStore.addItem('v1', 'delivery2'); // 4
+ const spanHash2 =
+ 'b26c8faf425c3c2738e0c5a5e9a7cd71075c68f0c9f2d6cdfd83c68204801dbb';
+
+ await ss1.hostStorage.commit();
+
+ const artifactMode = 'debug';
+ const exporter = makeSwingStoreExporter(dbDir, { artifactMode });
+
+ // exportData
+ {
+ const exportData = new Map();
+ for await (const [key, value] of exporter.getExportData()) {
+ exportData.set(key, value);
+ }
+ // console.log('exportData:', exportData);
+
+ const check = (key, expected) => {
+ t.true(exportData.has(key));
+ let value = exportData.get(key);
+ exportData.delete(key);
+ if (typeof expected === 'object') {
+ value = JSON.parse(value);
+ }
+ t.deepEqual(value, expected);
+ };
+
+ check('snapshot.v1.2', {
+ vatID: 'v1',
+ snapPos: 2,
+ inUse: 1,
+ hash: snapHash,
+ });
+ check('snapshot.v1.current', 'snapshot.v1.2');
+ const base = { vatID: 'v1', incarnation: 0, isCurrent: 0 };
+ check('transcript.v1.0', {
+ ...base,
+ startPos: 0,
+ endPos: 3,
+ hash: spanHash1,
+ });
+ check('transcript.v1.current', {
+ ...base,
+ startPos: 3,
+ endPos: 5,
+ isCurrent: 1,
+ hash: spanHash2,
+ });
+
+ // the above list is supposed to be exhaustive
+ if (exportData.size) {
+ console.log('unexpected exportData keys');
+ console.log(exportData);
+ t.fail('unexpected exportData keys');
+ }
+ }
+
+ // artifacts
+ {
+ const names = new Set();
+ const contents = new Map();
+ for await (const name of exporter.getArtifactNames()) {
+ names.add(name);
+ contents.set(name, (await buffer(exporter.getArtifact(name))).toString());
+ }
+ // console.log('artifacts:', contents);
+
+ const check = async (name, expected) => {
+ t.true(names.has(name));
+ names.delete(name);
+ let data = contents.get(name);
+ if (typeof expected === 'object') {
+ data = JSON.parse(data);
+ }
+ t.deepEqual(data, expected);
+ };
+
+ // export mode 'archival' means we include all available
+ // historical snapshots and transcript spans
+
+ await check('snapshot.v1.2', 'snapshot data');
+ // no transcript.v1.0.3 because the contents were pruned
+ await check('transcript.v1.3.5', 'load-snapshot\ndelivery2\n');
+
+ if (names.size) {
+ console.log(`unexpected artifacts:`);
+ console.log(names);
+ t.fail('unexpected artifacts');
+ }
+ }
+});
diff --git a/packages/swing-store/test/test-exportImport.js b/packages/swing-store/test/test-exportImport.js
index 852526582f7..20fb5d0dc7b 100644
--- a/packages/swing-store/test/test-exportImport.js
+++ b/packages/swing-store/test/test-exportImport.js
@@ -9,11 +9,9 @@ import test from 'ava';
import tmp from 'tmp';
import bundleSource from '@endo/bundle-source';
-import {
- initSwingStore,
- makeSwingStoreExporter,
- importSwingStore,
-} from '../src/swingStore.js';
+import { initSwingStore } from '../src/swingStore.js';
+import { makeSwingStoreExporter } from '../src/exporter.js';
+import { importSwingStore } from '../src/importer.js';
function makeExportLog() {
const exportLog = [];
@@ -128,7 +126,8 @@ test('crank abort leaves no debris in export log', async t => {
await ssOut.hostStorage.commit();
}
- const exporter = makeSwingStoreExporter(dbDir, 'current');
+ const artifactMode = 'operational';
+ const exporter = makeSwingStoreExporter(dbDir, { artifactMode });
const exportData = [];
for await (const elem of exporter.getExportData()) {
@@ -176,14 +175,14 @@ async function testExportImport(
runMode,
exportMode,
importMode,
- failureMode,
expectedArtifactNames,
+ failureMode = 'none',
) {
const exportLog = makeExportLog();
const [dbDir, cleanup] = await tmpDir('testdb');
t.teardown(cleanup);
- const keepTranscripts = runMode !== 'current';
+ const keepTranscripts = runMode !== 'operational';
const keepSnapshots = runMode === 'debug';
const ssOut = initSwingStore(dbDir, {
exportCallback: exportLog.callback,
@@ -226,7 +225,15 @@ async function testExportImport(
await ssOut.hostStorage.commit();
}
- const exporter = makeSwingStoreExporter(dbDir, exportMode);
+ const incomplete = 'incomplete archival transcript: 3 vs 12';
+ function doExport() {
+ return makeSwingStoreExporter(dbDir, { artifactMode: exportMode });
+ }
+ if (failureMode === 'export') {
+ await t.throws(doExport, { message: incomplete });
+ return;
+ }
+ const exporter = doExport();
const exportData = [];
for await (const elem of exporter.getExportData()) {
@@ -300,38 +307,46 @@ async function testExportImport(
],
]);
- expectedArtifactNames = Array.from(expectedArtifactNames);
- expectedArtifactNames.push(`bundle.${bundleIDA}`);
- expectedArtifactNames.push(`bundle.${bundleIDB}`);
+ expectedArtifactNames = new Set(expectedArtifactNames);
+ expectedArtifactNames.add(`bundle.${bundleIDA}`);
+ expectedArtifactNames.add(`bundle.${bundleIDB}`);
- const artifactNames = [];
+ const artifactNames = new Set();
for await (const name of exporter.getArtifactNames()) {
- artifactNames.push(name);
+ artifactNames.add(name);
}
t.deepEqual(artifactNames, expectedArtifactNames);
- const includeHistorical = importMode !== 'current';
-
const beforeDump = debug.dump(keepSnapshots);
- let ssIn;
- try {
- ssIn = await importSwingStore(exporter, null, {
- includeHistorical,
- });
- } catch (e) {
- if (failureMode === 'transcript') {
- t.is(e.message, 'artifact "transcript.vatA.0.3" is not available');
- return;
- } else if (failureMode === 'snapshot') {
- t.is(e.message, 'artifact "snapshot.vatA.2" is not available');
- return;
- }
- throw e;
+ function doImport() {
+ return importSwingStore(exporter, null, { artifactMode: importMode });
+ }
+
+ if (failureMode === 'import') {
+ await t.throwsAsync(doImport, { message: incomplete });
+ return;
}
t.is(failureMode, 'none');
+ const ssIn = await doImport();
await ssIn.hostStorage.commit();
- const dumpsShouldMatch =
- runMode !== 'debug' || (exportMode === 'debug' && importMode !== 'current');
+ let dumpsShouldMatch = true;
+ if (runMode === 'operational') {
+ dumpsShouldMatch = true; // there's no data to lose
+ } else if (runMode === 'archival') {
+ if (exportMode === 'current') {
+ dumpsShouldMatch = false; // export omits some data
+ }
+ if (importMode === 'current') {
+ dumpsShouldMatch = false; // import ignores some data
+ }
+ } else if (runMode === 'debug') {
+ if (exportMode !== 'debug') {
+ dumpsShouldMatch = false; // export omits some data
+ }
+ if (importMode !== 'debug') {
+ dumpsShouldMatch = false; // import ignores some data
+ }
+ }
if (dumpsShouldMatch) {
const afterDump = ssIn.debug.dump(keepSnapshots);
t.deepEqual(beforeDump, afterDump);
@@ -366,100 +381,108 @@ const expectedDebugArtifacts = [
'transcript.vatB.5.10',
];
-const C = 'current';
+const C = 'operational'; // nee 'current'
+// we don't try to test 'replay' here: see test-import.js and test-export.js
const A = 'archival';
const D = 'debug';
+// importMode='archival' requires a non-pruned DB
+// (runMode!=='current'), with exportMode as 'archival' or 'debug'
+
+// the expected artifacts are a function of the runMode and exportMode, not importMode
+
test('export and import data for state sync - current->current->current', async t => {
- await testExportImport(t, C, C, C, 'none', expectedCurrentArtifacts);
+ await testExportImport(t, C, C, C, expectedCurrentArtifacts);
});
+// so this one fails during import
test('export and import data for state sync - current->current->archival', async t => {
- await testExportImport(t, C, C, A, 'none', expectedCurrentArtifacts);
+ await testExportImport(t, C, C, A, expectedCurrentArtifacts, 'import');
});
test('export and import data for state sync - current->current->debug', async t => {
- await testExportImport(t, C, C, D, 'none', expectedCurrentArtifacts);
+ await testExportImport(t, C, C, D, expectedCurrentArtifacts);
});
+// these all throw an error during export, because 'archival' requires a non-pruned DB
test('export and import data for state sync - current->archival->current', async t => {
- await testExportImport(t, C, A, C, 'none', expectedArchivalArtifacts);
+ await testExportImport(t, C, A, C, [], 'export');
});
test('export and import data for state sync - current->archival->archival', async t => {
- await testExportImport(t, C, A, A, 'transcript', expectedArchivalArtifacts);
+ await testExportImport(t, C, A, A, [], 'export');
});
test('export and import data for state sync - current->archival->debug', async t => {
- await testExportImport(t, C, A, D, 'transcript', expectedArchivalArtifacts);
+ await testExportImport(t, C, A, D, [], 'export');
});
test('export and import data for state sync - current->debug->current', async t => {
- await testExportImport(t, C, D, C, 'none', expectedDebugArtifacts);
+ await testExportImport(t, C, D, C, expectedCurrentArtifacts);
});
test('export and import data for state sync - current->debug->archival', async t => {
- await testExportImport(t, C, D, A, 'snapshot', expectedDebugArtifacts);
+ await testExportImport(t, C, D, A, expectedCurrentArtifacts, 'import');
});
test('export and import data for state sync - current->debug->debug', async t => {
- await testExportImport(t, C, D, D, 'snapshot', expectedDebugArtifacts);
+ await testExportImport(t, C, D, D, expectedCurrentArtifacts);
});
// ------------------------------------------------------------
test('export and import data for state sync - archival->current->current', async t => {
- await testExportImport(t, A, C, C, 'none', expectedCurrentArtifacts);
+ await testExportImport(t, A, C, C, expectedCurrentArtifacts);
});
test('export and import data for state sync - archival->current->archival', async t => {
- await testExportImport(t, A, C, A, 'none', expectedCurrentArtifacts);
+ await testExportImport(t, A, C, A, expectedCurrentArtifacts, 'import');
});
test('export and import data for state sync - archival->current->debug', async t => {
- await testExportImport(t, A, C, D, 'none', expectedCurrentArtifacts);
+ await testExportImport(t, A, C, D, expectedCurrentArtifacts);
});
test('export and import data for state sync - archival->archival->current', async t => {
- await testExportImport(t, A, A, C, 'none', expectedArchivalArtifacts);
+ await testExportImport(t, A, A, C, expectedArchivalArtifacts);
});
test('export and import data for state sync - archival->archival->archival', async t => {
- await testExportImport(t, A, A, A, 'none', expectedArchivalArtifacts);
+ await testExportImport(t, A, A, A, expectedArchivalArtifacts);
});
test('export and import data for state sync - archival->archival->debug', async t => {
- await testExportImport(t, A, A, D, 'none', expectedArchivalArtifacts);
+ await testExportImport(t, A, A, D, expectedArchivalArtifacts);
});
test('export and import data for state sync - archival->debug->current', async t => {
- await testExportImport(t, A, D, C, 'none', expectedDebugArtifacts);
+ await testExportImport(t, A, D, C, expectedArchivalArtifacts);
});
test('export and import data for state sync - archival->debug->archival', async t => {
- await testExportImport(t, A, D, A, 'snapshot', expectedDebugArtifacts);
+ await testExportImport(t, A, D, A, expectedArchivalArtifacts);
});
test('export and import data for state sync - archival->debug->debug', async t => {
- await testExportImport(t, A, D, D, 'snapshot', expectedDebugArtifacts);
+ await testExportImport(t, A, D, D, expectedArchivalArtifacts);
});
// ------------------------------------------------------------
test('export and import data for state sync - debug->current->current', async t => {
- await testExportImport(t, D, C, C, 'none', expectedCurrentArtifacts);
+ await testExportImport(t, D, C, C, expectedCurrentArtifacts);
});
test('export and import data for state sync - debug->current->archival', async t => {
- await testExportImport(t, D, C, A, 'none', expectedCurrentArtifacts);
+ await testExportImport(t, D, C, A, expectedCurrentArtifacts, 'import');
});
test('export and import data for state sync - debug->current->debug', async t => {
- await testExportImport(t, D, C, D, 'none', expectedCurrentArtifacts);
+ await testExportImport(t, D, C, D, expectedCurrentArtifacts);
});
test('export and import data for state sync - debug->archival->current', async t => {
- await testExportImport(t, D, A, C, 'none', expectedArchivalArtifacts);
+ await testExportImport(t, D, A, C, expectedArchivalArtifacts);
});
test('export and import data for state sync - debug->archival->archival', async t => {
- await testExportImport(t, D, A, A, 'none', expectedArchivalArtifacts);
+ await testExportImport(t, D, A, A, expectedArchivalArtifacts);
});
test('export and import data for state sync - debug->archival->debug', async t => {
- await testExportImport(t, D, A, D, 'none', expectedArchivalArtifacts);
+ await testExportImport(t, D, A, D, expectedArchivalArtifacts);
});
test('export and import data for state sync - debug->debug->current', async t => {
- await testExportImport(t, D, D, C, 'none', expectedDebugArtifacts);
+ await testExportImport(t, D, D, C, expectedDebugArtifacts);
});
test('export and import data for state sync - debug->debug->archival', async t => {
- await testExportImport(t, D, D, A, 'none', expectedDebugArtifacts);
+ await testExportImport(t, D, D, A, expectedDebugArtifacts);
});
test('export and import data for state sync - debug->debug->debug', async t => {
- await testExportImport(t, D, D, D, 'none', expectedDebugArtifacts);
+ await testExportImport(t, D, D, D, expectedDebugArtifacts);
});
diff --git a/packages/swing-store/test/test-import.js b/packages/swing-store/test/test-import.js
new file mode 100644
index 00000000000..3711b8beb60
--- /dev/null
+++ b/packages/swing-store/test/test-import.js
@@ -0,0 +1,505 @@
+// @ts-check
+
+import '@endo/init/debug.js';
+
+import path from 'path';
+import { createGunzip } from 'zlib';
+import { Readable } from 'stream';
+import { Buffer } from 'buffer';
+
+import sqlite3 from 'better-sqlite3';
+import test from 'ava';
+import { decodeBase64 } from '@endo/base64';
+
+import { buffer } from '../src/util.js';
+import { importSwingStore, makeSwingStoreExporter } from '../src/index.js';
+
+import { tmpDir, makeB0ID } from './util.js';
+
+const rank = {
+ operational: 1,
+ replay: 2,
+ archival: 3,
+ debug: 4,
+};
+
+const snapshotData = 'snapshot data';
+// this snapHash was computed manually
+const snapHash =
+ 'e7dee7266896538616b630a5da40a90e007726a383e005a9c9c5dd0c2daf9329';
+
+/** @type {import('../src/bundleStore.js').Bundle} */
+const bundle0 = { moduleFormat: 'nestedEvaluate', source: '1+1' };
+const bundle0ID = makeB0ID(bundle0);
+
+function convert(orig) {
+ const bundles = Object.fromEntries(
+ Object.entries(orig.bundles).map(([bundleID, encBundle]) => {
+ const s = new TextDecoder().decode(decodeBase64(encBundle));
+ assert(bundleID.startsWith('b0-'), bundleID);
+ const bundle = JSON.parse(s);
+ return [bundleID, bundle];
+ }),
+ );
+ return { ...orig, bundles };
+}
+
+/**
+ * @typedef { import('../src/exporter').KVPair } KVPair
+ */
+
+/**
+ * @param { Map } exportData
+ * @param { Map } artifacts
+ */
+export function makeExporter(exportData, artifacts) {
+ return {
+ async *getExportData() {
+ for (const [key, value] of exportData.entries()) {
+ /** @type { KVPair } */
+ const pair = [key, value];
+ yield pair;
+ }
+ },
+ async *getArtifactNames() {
+ for (const name of artifacts.keys()) {
+ yield name;
+ }
+ },
+ async *getArtifact(name) {
+ const data = artifacts.get(name);
+ assert(data, `missing artifact ${name}`);
+ yield Buffer.from(data);
+ },
+ // eslint-disable-next-line no-empty-function
+ async close() {},
+ };
+}
+
+test('import empty', async t => {
+ const [dbDir, cleanup] = await tmpDir('testdb');
+ t.teardown(cleanup);
+ const exporter = makeExporter(new Map(), new Map());
+ const ss = await importSwingStore(exporter, dbDir);
+ await ss.hostStorage.commit();
+ const data = convert(ss.debug.dump());
+ t.deepEqual(data, {
+ kvEntries: {},
+ transcripts: {},
+ snapshots: {},
+ bundles: {},
+ });
+});
+
+export function buildData() {
+ // build an export manually
+ const exportData = new Map();
+ const artifacts = new Map();
+
+ // shadow kvStore
+ exportData.set('kv.key1', 'value1');
+
+ // now add artifacts and metadata in pairs
+
+ artifacts.set(`bundle.${bundle0ID}`, JSON.stringify(bundle0));
+ exportData.set(`bundle.${bundle0ID}`, bundle0ID);
+
+ const sbase = { vatID: 'v1', hash: snapHash, inUse: 0 };
+ const tbase = { vatID: 'v1', startPos: 0, isCurrent: 0, incarnation: 1 };
+ const addTS = (key, obj) =>
+ exportData.set(key, JSON.stringify({ ...tbase, ...obj }));
+ const t0hash =
+ '5bee0f44eca02f23eab03703e84ed2647d5d117fed99e1c30a3b424b7f082ab9';
+ const t2hash =
+ '57152efdd7fdf75c03371d2b4f1088d5bf3eae7fe643babce527ff81df38998c';
+ const t5hash =
+ '1947001e78e01bd1e773feb22b4ffc530447373b9de9274d5d5fbda3f23dbf2b';
+ const t8hash =
+ 'e6b42c6a3fb94285a93162f25a9fc0145fd4c5bb144917dc572c50ae2d02ee69';
+
+ addTS(`transcript.v1.0`, { incarnation: 0, endPos: 2, hash: t0hash });
+ artifacts.set(`transcript.v1.0.2`, 'start-worker\nshutdown-worker\n');
+
+ addTS(`transcript.v1.2`, { startPos: 2, endPos: 5, hash: t2hash });
+ artifacts.set(
+ `transcript.v1.2.5`,
+ 'start-worker\ndelivery1\nsave-snapshot\n',
+ );
+ exportData.set(`snapshot.v1.4`, JSON.stringify({ ...sbase, snapPos: 4 }));
+ artifacts.set(`snapshot.v1.4`, snapshotData);
+
+ addTS(`transcript.v1.5`, { startPos: 5, endPos: 8, hash: t5hash });
+ artifacts.set(
+ 'transcript.v1.5.8',
+ 'load-snapshot\ndelivery2\nsave-snapshot\n',
+ );
+ exportData.set(
+ `snapshot.v1.7`,
+ JSON.stringify({ ...sbase, snapPos: 7, inUse: 1 }),
+ );
+ artifacts.set(`snapshot.v1.7`, snapshotData);
+
+ artifacts.set('transcript.v1.8.10', 'load-snapshot\ndelivery3\n');
+ exportData.set(`snapshot.v1.current`, 'snapshot.v1.7');
+ addTS(`transcript.v1.current`, {
+ startPos: 8,
+ endPos: 10,
+ isCurrent: 1,
+ hash: t8hash,
+ });
+
+ return { exportData, artifacts, t0hash, t2hash, t5hash, t8hash };
+}
+
+const importTest = test.macro(async (t, mode) => {
+ /** @typedef {import('../src/internal.js').ArtifactMode} ArtifactMode */
+ const artifactMode = /** @type {ArtifactMode} */ (mode);
+
+ const [dbDir, cleanup] = await tmpDir('testdb');
+ t.teardown(cleanup);
+
+ const { exportData, artifacts, t0hash, t2hash, t5hash, t8hash } = buildData();
+
+ const exporter = makeExporter(exportData, artifacts);
+
+ // now import
+ const ss = await importSwingStore(exporter, dbDir, { artifactMode });
+ await ss.hostStorage.commit();
+ const data = convert(ss.debug.dump());
+
+ const convertTranscript = (items, startPos = 0) => {
+ const out = {};
+ let pos = startPos;
+ for (const item of items) {
+ out[pos] = item;
+ pos += 1;
+ }
+ return out;
+ };
+
+ const convertSnapshots = async allVatSnapshots => {
+ const out = {};
+ for await (const [vatID, snapshots] of Object.entries(allVatSnapshots)) {
+ const convertedSnapshots = [];
+ for await (const snapshot of snapshots) {
+ if (!snapshot.compressedSnapshot) {
+ continue;
+ }
+ const gzReader = Readable.from(snapshot.compressedSnapshot);
+ const unzipper = createGunzip();
+ const snapshotReader = gzReader.pipe(unzipper);
+ const uncompressedSnapshot = await buffer(snapshotReader);
+ const converted = { ...snapshot, uncompressedSnapshot };
+ delete converted.compressedSnapshot;
+ convertedSnapshots.push(converted);
+ }
+ out[vatID] = convertedSnapshots;
+ }
+ return out;
+ };
+
+ t.deepEqual(data.kvEntries, { key1: 'value1' });
+ let ts = [];
+ if (rank[artifactMode] >= rank.archival) {
+ // only 'archival' and 'debug' get the old incarnation's span
+ ts = ts.concat(['start-worker', 'shutdown-worker']); // 0,1
+ }
+ if (rank[artifactMode] >= rank.replay) {
+ // those, or 'replay', get the current incarnation's old spans
+ ts = ts.concat(['start-worker', 'delivery1', 'save-snapshot']); // 2,3,4
+ ts = ts.concat(['load-snapshot', 'delivery2', 'save-snapshot']); // 5,6,7
+ }
+ ts = ts.concat(['load-snapshot', 'delivery3']); // 8,9
+
+ let tsStart;
+ if (artifactMode === 'archival' || artifactMode === 'debug') {
+ tsStart = 0;
+ } else if (artifactMode === 'replay') {
+ tsStart = 2;
+ } else {
+ tsStart = 8;
+ }
+
+ const expectedTranscript = convertTranscript(ts, tsStart);
+ t.deepEqual(data.transcripts, { v1: expectedTranscript });
+ const uncompressedSnapshot = Buffer.from(snapshotData);
+ const expectedSnapshots = [];
+ if (artifactMode === 'debug') {
+ expectedSnapshots.push({
+ uncompressedSnapshot,
+ hash: snapHash,
+ inUse: 0,
+ snapPos: 4,
+ });
+ }
+ expectedSnapshots.push({
+ uncompressedSnapshot,
+ hash: snapHash,
+ inUse: 1,
+ snapPos: 7,
+ });
+ t.deepEqual(await convertSnapshots(data.snapshots), {
+ v1: expectedSnapshots,
+ });
+ t.deepEqual(data.bundles, { [bundle0ID]: bundle0 });
+
+ // look directly at the DB to confirm presence of metadata rows
+ const db = sqlite3(path.join(dbDir, 'swingstore.sqlite'));
+ const spanRows = [
+ ...db.prepare('SELECT * FROM transcriptSpans ORDER BY startPos').iterate(),
+ ];
+ t.deepEqual(
+ spanRows.map(sr => sr.startPos),
+ [0, 2, 5, 8],
+ );
+
+ // and a new export should include all metadata, regardless of import mode
+
+ const reExporter = makeSwingStoreExporter(dbDir);
+ const reExportData = new Map();
+ for await (const [key, value] of reExporter.getExportData()) {
+ reExportData.set(key, value);
+ }
+ // console.log(reExportData);
+
+ const check = (key, expected) => {
+ t.true(reExportData.has(key), `missing exportData ${key}`);
+ let value = reExportData.get(key);
+ reExportData.delete(key);
+ if (typeof expected === 'object') {
+ value = JSON.parse(value);
+ }
+ t.deepEqual(value, expected);
+ };
+
+ check('kv.key1', 'value1');
+ check('snapshot.v1.4', { vatID: 'v1', snapPos: 4, inUse: 0, hash: snapHash });
+ check('snapshot.v1.7', { vatID: 'v1', snapPos: 7, inUse: 1, hash: snapHash });
+ check('snapshot.v1.current', 'snapshot.v1.7');
+ const base0 = { vatID: 'v1', incarnation: 0, isCurrent: 0 };
+ const base1 = { vatID: 'v1', incarnation: 1, isCurrent: 0 };
+ check('transcript.v1.0', { ...base0, startPos: 0, endPos: 2, hash: t0hash });
+ check('transcript.v1.2', { ...base1, startPos: 2, endPos: 5, hash: t2hash });
+ check('transcript.v1.5', { ...base1, startPos: 5, endPos: 8, hash: t5hash });
+ check('transcript.v1.current', {
+ ...base1,
+ startPos: 8,
+ endPos: 10,
+ isCurrent: 1,
+ hash: t8hash,
+ });
+ check(`bundle.${bundle0ID}`, bundle0ID);
+
+ // the above list is supposed to be exhaustive
+ if (reExportData.size) {
+ console.log(reExportData);
+ t.fail('unexpected exportData keys');
+ }
+});
+
+test('import operational', importTest, 'operational');
+test('import replay', importTest, 'replay');
+test('import archival', importTest, 'archival');
+test('import debug', importTest, 'debug');
+
+test('import is missing bundle', async t => {
+ const [dbDir, cleanup] = await tmpDir('testdb');
+ t.teardown(cleanup);
+
+ const exportData = new Map();
+ exportData.set(`bundle.${bundle0ID}`, bundle0ID);
+ // but there is no artifact to match
+ const exporter = makeExporter(exportData, new Map());
+ await t.throwsAsync(async () => importSwingStore(exporter, dbDir), {
+ message: /missing bundles for:/,
+ });
+});
+
+test('import is missing snapshot', async t => {
+ const [dbDir, cleanup] = await tmpDir('testdb');
+ t.teardown(cleanup);
+
+ const exportData = new Map();
+ exportData.set(
+ `snapshot.v1.2`,
+ JSON.stringify({ vatID: 'v1', hash: snapHash, inUse: 1, snapPos: 2 }),
+ );
+ // but there is no artifact to match
+ const exporter = makeExporter(exportData, new Map());
+ await t.throwsAsync(async () => importSwingStore(exporter, dbDir), {
+ message: /current snapshots are pruned for vats/,
+ });
+});
+
+test('import is missing transcript span', async t => {
+ const [dbDir, cleanup] = await tmpDir('testdb');
+ t.teardown(cleanup);
+
+ const exportData = new Map();
+ const t0hash =
+ '57152efdd7fdf75c03371d2b4f1088d5bf3eae7fe643babce527ff81df38998c';
+ exportData.set(
+ `transcript.v1.current`,
+ JSON.stringify({
+ vatID: 'v1',
+ startPos: 0,
+ endPos: 3,
+ hash: t0hash,
+ isCurrent: 1,
+ incarnation: 0,
+ }),
+ );
+ // but there is no artifact to match
+ const exporter = makeExporter(exportData, new Map());
+ await t.throwsAsync(async () => importSwingStore(exporter, dbDir), {
+ message: /incomplete current transcript/,
+ });
+});
+
+test('import has mismatched transcript span', async t => {
+ const [dbDir, cleanup] = await tmpDir('testdb');
+ t.teardown(cleanup);
+
+ const exportData = new Map();
+ const t0hash =
+ '57152efdd7fdf75c03371d2b4f1088d5bf3eae7fe643babce527ff81df38998c';
+ exportData.set(
+ `transcript.v1.current`,
+ JSON.stringify({
+ vatID: 'v1',
+ startPos: 0,
+ endPos: 3,
+ hash: t0hash,
+ isCurrent: 0, // mismatch
+ incarnation: 0,
+ }),
+ );
+ const exporter = makeExporter(exportData, new Map());
+ await t.throwsAsync(async () => importSwingStore(exporter, dbDir), {
+ message: /transcript key "transcript.v1.current" mismatches metadata/,
+ });
+});
+
+test('import has incomplete transcript span', async t => {
+ const [dbDir, cleanup] = await tmpDir('testdb');
+ t.teardown(cleanup);
+
+ const exportData = new Map();
+ const artifacts = new Map();
+ const t0hash =
+ '57152efdd7fdf75c03371d2b4f1088d5bf3eae7fe643babce527ff81df38998c';
+ exportData.set(
+ `transcript.v1.current`,
+ JSON.stringify({
+ vatID: 'v1',
+ startPos: 0,
+ endPos: 4, // expect 4 items
+ hash: t0hash,
+ isCurrent: 1,
+ incarnation: 0,
+ }),
+ );
+ // but artifact only contains 3
+ artifacts.set(
+ `transcript.v1.0.4`,
+ 'start-worker\ndelivery1\nsave-snapshot\n',
+ );
+
+ const exporter = makeExporter(exportData, artifacts);
+ await t.throwsAsync(async () => importSwingStore(exporter, dbDir), {
+ message: /artifact "transcript.v1.0.4" is not complete/,
+ });
+});
+
+test('import has corrupt transcript span', async t => {
+ const [dbDir, cleanup] = await tmpDir('testdb');
+ t.teardown(cleanup);
+
+ const exportData = new Map();
+ const artifacts = new Map();
+ const t0hash =
+ '57152efdd7fdf75c03371d2b4f1088d5bf3eae7fe643babce527ff81df38998c';
+ exportData.set(
+ `transcript.v1.current`,
+ JSON.stringify({
+ vatID: 'v1',
+ startPos: 0,
+ endPos: 3,
+ hash: t0hash,
+ isCurrent: 1,
+ incarnation: 0,
+ }),
+ );
+ artifacts.set(
+ `transcript.v1.0.3`,
+ 'start-worker\nBAD-DELIVERY1\nsave-snapshot\n',
+ );
+
+ const exporter = makeExporter(exportData, artifacts);
+ await t.throwsAsync(async () => importSwingStore(exporter, dbDir), {
+ message: /artifact "transcript.v1.0.3" hash is.*metadata says/,
+ });
+});
+
+test('import has corrupt snapshot', async t => {
+ const [dbDir, cleanup] = await tmpDir('testdb');
+ t.teardown(cleanup);
+
+ const exportData = new Map();
+ const artifacts = new Map();
+ exportData.set(
+ `snapshot.v1.2`,
+ JSON.stringify({
+ vatID: 'v1',
+ snapPos: 2,
+ hash: snapHash,
+ inUse: 1,
+ }),
+ );
+ artifacts.set('snapshot.v1.2', `${snapshotData}WRONG`);
+
+ const exporter = makeExporter(exportData, artifacts);
+ await t.throwsAsync(async () => importSwingStore(exporter, dbDir), {
+ message: /snapshot "snapshot.v1.2" hash is.*metadata says/,
+ });
+});
+
+test('import has corrupt bundle', async t => {
+ const [dbDir, cleanup] = await tmpDir('testdb');
+ t.teardown(cleanup);
+
+ const exportData = new Map();
+ const artifacts = new Map();
+ exportData.set(`bundle.${bundle0ID}`, bundle0ID);
+ const badBundle = { ...bundle0, source: 'WRONG' };
+ artifacts.set(`bundle.${bundle0ID}`, JSON.stringify(badBundle));
+
+ const exporter = makeExporter(exportData, artifacts);
+ await t.throwsAsync(async () => importSwingStore(exporter, dbDir), {
+ message: /bundleID ".*" does not match bundle artifact/,
+ });
+});
+
+test('import has unknown metadata tag', async t => {
+ const [dbDir, cleanup] = await tmpDir('testdb');
+ t.teardown(cleanup);
+
+ const exportData = new Map();
+ exportData.set(`unknown.v1.current`, 'value');
+ const exporter = makeExporter(exportData, new Map());
+ await t.throwsAsync(async () => importSwingStore(exporter, dbDir), {
+ message: /unknown export-data type "unknown" on import/,
+ });
+});
+
+test('import has unknown artifact tag', async t => {
+ const [dbDir, cleanup] = await tmpDir('testdb');
+ t.teardown(cleanup);
+
+ const artifacts = new Map();
+ artifacts.set('unknown.v1.current', 'value');
+ const exporter = makeExporter(new Map(), artifacts);
+ await t.throwsAsync(async () => importSwingStore(exporter, dbDir), {
+ message: /unknown artifact type "unknown" on import/,
+ });
+});
diff --git a/packages/swing-store/test/test-repair-metadata.js b/packages/swing-store/test/test-repair-metadata.js
new file mode 100644
index 00000000000..38f2f9972a7
--- /dev/null
+++ b/packages/swing-store/test/test-repair-metadata.js
@@ -0,0 +1,131 @@
+// @ts-check
+
+import '@endo/init/debug.js';
+
+import path from 'path';
+import test from 'ava';
+import sqlite3 from 'better-sqlite3';
+
+import { importSwingStore } from '../src/index.js';
+
+import { makeExporter, buildData } from './test-import.js';
+import { tmpDir } from './util.js';
+
+test('repair metadata', async t => {
+ const [dbDir, cleanup] = await tmpDir('testdb');
+ t.teardown(cleanup);
+
+ const { exportData, artifacts } = buildData();
+
+ // simulate a swingstore broken by #8025 by importing everything,
+ // then manually deleting the historical metadata entries from the
+ // DB
+ const exporter = makeExporter(exportData, artifacts);
+ const ss = await importSwingStore(exporter, dbDir);
+ await ss.hostStorage.commit();
+
+ const filePath = path.join(dbDir, 'swingstore.sqlite');
+ const db = sqlite3(filePath);
+
+ const getTS = db.prepare(
+ 'SELECT startPos FROM transcriptSpans WHERE vatID = ? ORDER BY startPos',
+ );
+ getTS.pluck();
+ const getSS = db.prepare(
+ 'SELECT snapPos FROM snapshots WHERE vatID = ? ORDER BY snapPos',
+ );
+ getSS.pluck();
+
+ // assert that all the metadata is there at first
+ const ts1 = getTS.all('v1');
+ t.deepEqual(ts1, [0, 2, 5, 8]); // four spans
+ const ss1 = getSS.all('v1');
+ t.deepEqual(ss1, [4, 7]); // two snapshots
+
+ // now clobber them to simulate #8025 (note: these auto-commit)
+ db.prepare('DELETE FROM transcriptSpans WHERE isCurrent IS NULL').run();
+ db.prepare('DELETE FROM snapshots WHERE inUSE IS NULL').run();
+
+ // confirm that we clobbered them
+ const ts2 = getTS.all('v1');
+ t.deepEqual(ts2, [8]); // only the latest
+ const ss2 = getSS.all('v1');
+ t.deepEqual(ss2, [7]);
+
+ // now fix it
+ await ss.hostStorage.repairMetadata(exporter);
+ await ss.hostStorage.commit();
+
+ // and check that the metadata is back
+ const ts3 = getTS.all('v1');
+ t.deepEqual(ts3, [0, 2, 5, 8]); // all four again
+ const ss3 = getSS.all('v1');
+ t.deepEqual(ss3, [4, 7]);
+
+ // repair should be idempotent
+ await ss.hostStorage.repairMetadata(exporter);
+
+ const ts4 = getTS.all('v1');
+ t.deepEqual(ts4, [0, 2, 5, 8]); // still there
+ const ss4 = getSS.all('v1');
+ t.deepEqual(ss4, [4, 7]);
+});
+
+test('repair metadata ignores kvStore entries', async t => {
+ const [dbDir, cleanup] = await tmpDir('testdb');
+ t.teardown(cleanup);
+
+ const { exportData, artifacts } = buildData();
+
+ const exporter = makeExporter(exportData, artifacts);
+ const ss = await importSwingStore(exporter, dbDir);
+ await ss.hostStorage.commit();
+
+ // perform the repair with spurious kv entries
+ exportData.set('kv.key2', 'value2');
+ await ss.hostStorage.repairMetadata(exporter);
+ await ss.hostStorage.commit();
+
+ // the spurious kv entry should be ignored
+ t.deepEqual(ss.debug.dump().kvEntries, { key1: 'value1' });
+});
+
+test('repair metadata rejects mismatched snapshot entries', async t => {
+ const [dbDir, cleanup] = await tmpDir('testdb');
+ t.teardown(cleanup);
+
+ const { exportData, artifacts } = buildData();
+
+ const exporter = makeExporter(exportData, artifacts);
+ const ss = await importSwingStore(exporter, dbDir);
+ await ss.hostStorage.commit();
+
+ // perform the repair with mismatched snapshot entry
+ const old = JSON.parse(exportData.get('snapshot.v1.4'));
+ const wrong = { ...old, hash: 'wrong' };
+ exportData.set('snapshot.v1.4', JSON.stringify(wrong));
+
+ await t.throwsAsync(async () => ss.hostStorage.repairMetadata(exporter), {
+ message: /repairSnapshotRecord metadata mismatch/,
+ });
+});
+
+test('repair metadata rejects mismatched transcript span', async t => {
+ const [dbDir, cleanup] = await tmpDir('testdb');
+ t.teardown(cleanup);
+
+ const { exportData, artifacts } = buildData();
+
+ const exporter = makeExporter(exportData, artifacts);
+ const ss = await importSwingStore(exporter, dbDir);
+ await ss.hostStorage.commit();
+
+ // perform the repair with mismatched transcript span entry
+ const old = JSON.parse(exportData.get('transcript.v1.0'));
+ const wrong = { ...old, hash: 'wrong' };
+ exportData.set('transcript.v1.0', JSON.stringify(wrong));
+
+ await t.throwsAsync(async () => ss.hostStorage.repairMetadata(exporter), {
+ message: /repairTranscriptSpanRecord metadata mismatch/,
+ });
+});
diff --git a/packages/swing-store/test/util.js b/packages/swing-store/test/util.js
new file mode 100644
index 00000000000..615cd3af567
--- /dev/null
+++ b/packages/swing-store/test/util.js
@@ -0,0 +1,26 @@
+import { Buffer } from 'node:buffer';
+import tmp from 'tmp';
+import { createSHA256 } from '../src/hasher.js';
+
+/**
+ * @param {string} [prefix]
+ * @returns {Promise<[string, () => void]>}
+ */
+export const tmpDir = prefix =>
+ new Promise((resolve, reject) => {
+ tmp.dir({ unsafeCleanup: true, prefix }, (err, name, removeCallback) => {
+ if (err) {
+ reject(err);
+ } else {
+ resolve([name, removeCallback]);
+ }
+ });
+ });
+
+export async function* getSnapshotStream(contents) {
+ yield Buffer.from(contents);
+}
+
+export function makeB0ID(bundle) {
+ return `b0-${createSHA256(JSON.stringify(bundle)).finish()}`;
+}
diff --git a/packages/telemetry/src/slog-to-otel.js b/packages/telemetry/src/slog-to-otel.js
index 5e42756a8d0..45591d35035 100644
--- a/packages/telemetry/src/slog-to-otel.js
+++ b/packages/telemetry/src/slog-to-otel.js
@@ -908,6 +908,17 @@ export const makeSlogToOtelKit = (tracer, overrideAttrs = {}) => {
dbTransactionManager.end();
break;
}
+ case 'cosmic-swingset-upgrade-start': {
+ dbTransactionManager.begin();
+ assert(!spans.top());
+ spans.push(['upgrade', slogAttrs.blockHeight]);
+ break;
+ }
+ case 'cosmic-swingset-upgrade-finish': {
+ spans.pop(['slogAttrs.blockHeight', slogAttrs.blockHeight]);
+ dbTransactionManager.end();
+ break;
+ }
case 'cosmic-swingset-begin-block': {
if (spans.topKind() === 'intra-block') {
spans.pop('intra-block');
diff --git a/packages/vats/src/core/basic-behaviors.js b/packages/vats/src/core/basic-behaviors.js
index b79d79f2948..4ad810a9073 100644
--- a/packages/vats/src/core/basic-behaviors.js
+++ b/packages/vats/src/core/basic-behaviors.js
@@ -40,6 +40,7 @@ const bootMsgEx = {
{ denom: 'ubld', amount: '1000000000000000' },
{ denom: 'uist', amount: '50000000000' },
],
+ swingsetPort: 4,
vbankPort: 3,
vibcPort: 2,
};
diff --git a/packages/vats/test/test-vat-bank-integration.js b/packages/vats/test/test-vat-bank-integration.js
index ed83b39a561..1a06e34c7fd 100644
--- a/packages/vats/test/test-vat-bank-integration.js
+++ b/packages/vats/test/test-vat-bank-integration.js
@@ -49,6 +49,7 @@ test('mintInitialSupply, addBankAssets bootstrap actions', async t => {
chainID: 'ag',
storagePort: 1,
supplyCoins: [{ amount: '50000000', denom: 'uist' }],
+ swingsetPort: 4,
vbankPort: 2,
vibcPort: 3,
};
diff --git a/packages/xsnap/package.json b/packages/xsnap/package.json
index 4cc651c1395..9bd066e8339 100644
--- a/packages/xsnap/package.json
+++ b/packages/xsnap/package.json
@@ -16,6 +16,7 @@
"build:env": "if git status >/dev/null 2>&1; then node src/build.js --show-env > build.env; fi",
"build:from-env": "{ cat build.env; echo node src/build.js; } | xargs env",
"build": "yarn build:bin && yarn build:env",
+ "postinstall": "yarn build:from-env",
"clean": "rm -rf xsnap-native/xsnap/build",
"lint": "run-s --continue-on-error lint:*",
"lint:js": "eslint 'src/**/*.js' 'test/**/*.js' api.js",
diff --git a/packages/xsnap/src/build.js b/packages/xsnap/src/build.js
index fbb4bbb671b..2fcba467a21 100644
--- a/packages/xsnap/src/build.js
+++ b/packages/xsnap/src/build.js
@@ -235,6 +235,7 @@ async function main(args, { env, stdout, spawn, fs, os }) {
`MODDABLE=${ModdableSDK.MODDABLE}`,
`GOAL=${goal}`,
`XSNAP_VERSION=${pkg.version}`,
+ `CC=cc "-D__has_builtin(x)=1"`,
'-f',
'xsnap-worker.mk',
],
diff --git a/packages/xsnap/test/snapshots/test-xsnap.js.md b/packages/xsnap/test/snapshots/test-xsnap.js.md
new file mode 100644
index 00000000000..eec90719b35
--- /dev/null
+++ b/packages/xsnap/test/snapshots/test-xsnap.js.md
@@ -0,0 +1,15 @@
+# Snapshot report for `test/test-xsnap.js`
+
+The actual snapshot is saved in `test-xsnap.js.snap`.
+
+Generated by [AVA](https://avajs.dev).
+
+## produce golden snapshot hashes
+
+> no evaluations
+
+ '91d30e59c1a087d58bb6d8eefcf1262e99e59cfc249222ab25f881ac642437e5'
+
+> smallish safeInteger multiplication doesn't spill to XS_NUMBER_KIND
+
+ '4b48f6c58c08bb757efd3b8fb21891a386bdc5bfbae6803c8cb7df108e553ace'
diff --git a/packages/xsnap/test/snapshots/test-xsnap.js.snap b/packages/xsnap/test/snapshots/test-xsnap.js.snap
new file mode 100644
index 00000000000..c4e93b1b54a
Binary files /dev/null and b/packages/xsnap/test/snapshots/test-xsnap.js.snap differ
diff --git a/packages/xsnap/test/test-xsnap.js b/packages/xsnap/test/test-xsnap.js
index 8b827bbca9b..07754b88b4d 100644
--- a/packages/xsnap/test/test-xsnap.js
+++ b/packages/xsnap/test/test-xsnap.js
@@ -4,6 +4,7 @@ import '@endo/init/debug.js';
// eslint-disable-next-line import/no-extraneous-dependencies
import test from 'ava';
+import { createHash } from 'crypto';
import * as proc from 'child_process';
import * as os from 'os';
import fs from 'fs';
@@ -302,6 +303,55 @@ const writeAndReadSnapshot = async (t, snapshotUseFs) => {
test('write and read snapshot (use FS)', writeAndReadSnapshot, true);
test('write and read snapshot (use stream)', writeAndReadSnapshot, false);
+test('produce golden snapshot hashes', async t => {
+ t.log(`\
+The snapshot hashes produced by this test were created from this package's
+version of xsnap compiled for and run on Agoric's supported (within-consensus)
+platforms.
+
+The snapshot will change (and the test will fail) if xsnap or this platform
+is not compatible with this predefined consensus. This is likely to happen
+in the future when xsnap is upgraded, in which case there will need to be
+special accommodation for the new version, not just generating new golden
+hashes.
+`);
+ const toEvals = [
+ [`no evaluations`, ''],
+ [
+ `smallish safeInteger multiplication doesn't spill to XS_NUMBER_KIND`,
+ `globalThis.bazinga = 100; globalThis.bazinga *= 1_000_000;`,
+ ],
+ ];
+ for await (const [description, toEval] of toEvals) {
+ t.log(description);
+ const messages = [];
+ async function handleCommand(message) {
+ messages.push(decode(message));
+ return new Uint8Array();
+ }
+
+ const vat0 = await xsnap({
+ ...options(io),
+ handleCommand,
+ snapshotUseFs: false,
+ });
+ if (toEval) {
+ await vat0.evaluate(toEval);
+ }
+
+ const hash = createHash('sha256');
+ for await (const buf of vat0.makeSnapshotStream()) {
+ hash.update(buf);
+ }
+ await vat0.close();
+
+ const hexHash = hash.digest('hex');
+ t.log(`${description} produces golden hash ${hexHash}`);
+ t.snapshot(hexHash, description);
+ t.deepEqual(messages, [], `${description} messages`);
+ }
+});
+
test('execute immediately after makeSnapshotStream', async t => {
const messages = [];
async function handleCommand(message) {
diff --git a/patches/@lerna+conventional-commits+3.22.0.patch b/patches/@lerna+conventional-commits+3.22.0.patch
new file mode 100644
index 00000000000..49a69ff814a
--- /dev/null
+++ b/patches/@lerna+conventional-commits+3.22.0.patch
@@ -0,0 +1,20 @@
+diff --git a/node_modules/@lerna/conventional-commits/lib/recommend-version.js b/node_modules/@lerna/conventional-commits/lib/recommend-version.js
+index f524f9d..a9b5427 100644
+--- a/node_modules/@lerna/conventional-commits/lib/recommend-version.js
++++ b/node_modules/@lerna/conventional-commits/lib/recommend-version.js
+@@ -53,6 +53,15 @@ function recommendVersion(pkg, type, { changelogPreset, rootPath, tagPrefix, pre
+ // we still need to bump _something_ because lerna saw a change here
+ let releaseType = data.releaseType || "patch";
+
++ // Don't gratuitously break compatibility with clients using `^0.x.y`.
++ if (semver.major(pkg.version) === 0) {
++ if (releaseType === "major") {
++ releaseType = "minor";
++ } else if (releaseType === "minor") {
++ releaseType = "patch";
++ }
++ }
++
+ if (prereleaseId) {
+ const shouldBump = shouldBumpPrerelease(releaseType, pkg.version);
+ const prereleaseType = shouldBump ? `pre${releaseType}` : "prerelease";
diff --git a/repoconfig.sh b/repoconfig.sh
index ab34b7243fc..755ca3d1b1b 100644
--- a/repoconfig.sh
+++ b/repoconfig.sh
@@ -4,6 +4,7 @@ NODEJS_VERSION=v16
GOLANG_VERSION=1.20.3
GOLANG_DIR=golang/cosmos
GOLANG_DAEMON=$GOLANG_DIR/build/agd
+XSNAP_VERSION=agoric-upgrade-10
# Args are major, minor and patch version numbers
function golang_version_check() {
diff --git a/scripts/run-deployment-integration.sh b/scripts/run-deployment-integration.sh
index d5511a02039..eae1ab555c2 100644
--- a/scripts/run-deployment-integration.sh
+++ b/scripts/run-deployment-integration.sh
@@ -1,36 +1,45 @@
#!/bin/sh
set -xueo pipefail
-SDK_REAL_DIR="$(cd "$(dirname "$(readlink -f -- "$0")")/.." > /dev/null && pwd -P)"
+SDK_SRC="$(cd "$(dirname "$(readlink -f -- "$0")")/.." > /dev/null && pwd -P)"
+export SDK_SRC
-# For some reason something in the integration script
-# relies on the SDK being at that location
# Set AGORIC_SDK_PATH to the SDK path on the host if this
# script is running inside a docker environment (and make sure to
# bind mount /var/run/docker.sock)
-if [ "$SDK_REAL_DIR" != "/usr/src/agoric-sdk" ]; then
- echo 'Agoric SDK must be mounted in "/usr/src/agoric-sdk"'
- exit 1
-fi
+export AGORIC_SDK_PATH="${AGORIC_SDK_PATH-$SDK_SRC}"
export NETWORK_NAME=chaintest
-sudo ln -sf /usr/src/agoric-sdk/packages/deployment/bin/ag-setup-cosmos /usr/local/bin/ag-setup-cosmos
-rm -rf /usr/src/agoric-sdk/chaintest ~/.ag-chain-cosmos/ /usr/src/testnet-load-generator/_agstate/agoric-servers/testnet-8000
+# Note: the deployment test and the loadgen test in testnet mode modify some
+# directories in $HOME so provide an empty $HOME for them.
+export HOME="$(mktemp -d -t deployment-integration-home.XXXXX)"
-cd /usr/src/agoric-sdk/
+# While it'd be great if these [tests were more hermetic](https://github.com/Agoric/agoric-sdk/issues/8059),
+# this manual runner must currently reset paths relative to the SDK to ensure
+# reproducible tests.
+rm -rf "$SDK_SRC/../testnet-load-generator/_agstate/agoric-servers/testnet-8000"
+
+export OUTPUT_PATH="$SDK_SRC/../deployment-test-results/networks-$(date +%s)"
+mkdir -p "$OUTPUT_PATH"
+
+cd "$SDK_SRC"
sudo ./packages/deployment/scripts/install-deps.sh
yarn install && XSNAP_RANDOM_INIT=1 yarn build && make -C packages/cosmic-swingset/
+
+cd "$OUTPUT_PATH"
# change to "false" to skip extraction on success like in CI
testfailure="unknown"
-/usr/src/agoric-sdk/packages/deployment/scripts/integration-test.sh || {
+DOCKER_VOLUMES="$AGORIC_SDK_PATH:/usr/src/agoric-sdk" \
+LOADGEN=1 \
+$SDK_SRC/packages/deployment/scripts/integration-test.sh || {
echo "Test failed!!!"
testfailure="true"
}
-packages/deployment/scripts/setup.sh play stop || true
-packages/deployment/scripts/capture-integration-results.sh $testfailure
-echo yes | packages/deployment/scripts/setup.sh destroy || true
+$SDK_SRC/packages/deployment/scripts/setup.sh play stop || true
+$SDK_SRC/packages/deployment/scripts/capture-integration-results.sh $testfailure
+echo yes | $SDK_SRC/packages/deployment/scripts/setup.sh destroy || true
# Not part of CI
-/usr/src/agoric-sdk/scripts/process-integration-results.sh $NETWORK_NAME/results
+$SDK_SRC/scripts/process-integration-results.sh $NETWORK_NAME/results
diff --git a/packages/deployment/scripts/smoketest-binaries.sh b/scripts/smoketest-binaries.sh
similarity index 100%
rename from packages/deployment/scripts/smoketest-binaries.sh
rename to scripts/smoketest-binaries.sh