diff --git a/.cirrus.yml b/.cirrus.yml index 1a6badbe09..2531b46283 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -29,14 +29,8 @@ env: # test changes made there, in a PR in this repository. SKOPEO_PR: - #### - #### Cache-image names to test with (double-quotes around names are critical) - #### - FEDORA_NAME: "fedora-38" - DEBIAN_NAME: "debian-13" - # Google-cloud VM Images - IMAGE_SUFFIX: "c20230807t144831z-f38f37d13" + IMAGE_SUFFIX: "c20231208t193858z-f39f38d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" diff --git a/.codespellrc b/.codespellrc new file mode 100644 index 0000000000..2d68648595 --- /dev/null +++ b/.codespellrc @@ -0,0 +1,6 @@ +# See https://github.com/codespell-project/codespell#using-a-config-file +[codespell] +skip = .git,*.pdf,*.svg,.codespellrc,go.sum,system_registries_v2_test.go,Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej,*.gpg +check-hidden = true +ignore-regex = \b(isT|BU|this/doesnt:match)\b +ignore-words-list = te,pathc diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 22f80d3921..7c3c0607cc 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -36,7 +36,7 @@ ****** Global/general configuration options ***** *************************************************/ - // Re-use predefined sets of configuration options to DRY + // Reuse predefined sets of configuration options to DRY "extends": [ // https://github.com/containers/automation/blob/main/renovate/defaults.json5 "github>containers/automation//renovate/defaults.json5" diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml new file mode 100644 index 0000000000..4a42998d85 --- /dev/null +++ b/.github/workflows/codespell.yml @@ -0,0 +1,23 @@ +--- +name: Codespell + +on: + push: + branches: [main] + pull_request: + branches: [main] + +permissions: + contents: read + +jobs: + codespell: + name: Check for spelling errors + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Codespell + # uses configuration within .codespellrc file + uses: codespell-project/actions-codespell@v2 diff --git a/Makefile b/Makefile index fe88f1d108..93f33aa3df 100644 --- a/Makefile +++ b/Makefile @@ -24,6 +24,18 @@ GOMD2MAN ?= $(shell command -v go-md2man || echo '$(GOBIN)/go-md2man') MANPAGES_MD = $(wildcard docs/*.5.md) MANPAGES ?= $(MANPAGES_MD:%.md=%) +ifeq ($(shell uname -s),FreeBSD) +CONTAINERSCONFDIR ?= /usr/local/etc/containers +else +CONTAINERSCONFDIR ?= /etc/containers +endif +REGISTRIESDDIR ?= ${CONTAINERSCONFDIR}/registries.d + +# N/B: This value is managed by Renovate, manual changes are +# possible, as long as they don't disturb the formatting +# (i.e. DO NOT ADD A 'v' prefix!) +GOLANGCI_LINT_VERSION := 1.55.2 + export PATH := $(PATH):${GOBIN} all: tools test validate .gitvalidation @@ -41,12 +53,16 @@ install-docs: docs install -m 644 docs/*.5 ${MANINSTALLDIR}/man5/ install: install-docs + install -d -m 755 ${DESTDIR}${CONTAINERSCONFDIR} + install -m 644 default-policy.json ${DESTDIR}${CONTAINERSCONFDIR}/policy.json + install -d -m 755 ${DESTDIR}${REGISTRIESDDIR} + install -m 644 default.yaml ${DESTDIR}${REGISTRIESDDIR}/default.yaml cross: GOOS=windows $(MAKE) build BUILDTAGS="$(BUILDTAGS) $(BUILD_TAGS_WINDOWS_CROSS)" GOOS=darwin $(MAKE) build BUILDTAGS="$(BUILDTAGS) $(BUILD_TAGS_DARWIN_CROSS)" -tools: .install.gitvalidation .install.golangci-lint .install.golint +tools: .install.gitvalidation .install.golangci-lint .install.gitvalidation: if [ ! -x "$(GOBIN)/git-validation" ]; then \ @@ -55,13 +71,7 @@ tools: .install.gitvalidation .install.golangci-lint .install.golint .install.golangci-lint: if [ ! -x "$(GOBIN)/golangci-lint" ]; then \ - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b $(GOBIN) v1.51.0; \ - fi - -.install.golint: - # Note, golint is only needed for Skopeo's tests. - if [ ! -x "$(GOBIN)/golint" ]; then \ - GO111MODULE="off" go get -u $(BUILDFLAGS) golang.org/x/lint/golint; \ + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b $(GOBIN) v$(GOLANGCI_LINT_VERSION) ; \ fi clean: @@ -90,4 +100,4 @@ vendor-in-container: podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang go mod tidy codespell: - codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L keypair,flate,uint,iff,od,ERRO -w + codespell -w diff --git a/README.md b/README.md index 034665bf14..7628ef5290 100644 --- a/README.md +++ b/README.md @@ -63,6 +63,8 @@ the primary downside is that creating new signatures with the Golang-only implem - `containers_image_ostree`: Import `ostree:` transport in `github.com/containers/image/transports/alltransports`. This builds the library requiring the `libostree` development libraries. Otherwise a stub which reports that the transport is not supported gets used. The `github.com/containers/image/ostree` package is completely disabled and impossible to import when this build tag is not in use. - `containers_image_storage_stub`: Don’t import the `containers-storage:` transport in `github.com/containers/image/transports/alltransports`, to decrease the amount of required dependencies. Use a stub which reports that the transport is not supported instead. +- `containers_image_fulcio_stub`: Don't import sigstore/fulcio code, all fulcio operations will return an error code +- `containers_image_rekor_stub`: Don't import sigstore/reckor code, all rekor operations will return an error code ## [Contributing](CONTRIBUTING.md) diff --git a/copy/compression.go b/copy/compression.go index eb4da5092f..a42e3b67ab 100644 --- a/copy/compression.go +++ b/copy/compression.go @@ -284,10 +284,24 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf } } if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression { - c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName) + if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName { + // HACK: Don’t record zstd:chunked algorithms. + // There is already a similar hack in internal/imagedestination/impl/helpers.BlobMatchesRequiredCompression, + // and that one prevents reusing zstd:chunked blobs, so recording the algorithm here would be mostly harmless. + // + // We skip that here anyway to work around the inability of blobPipelineDetectCompressionStep to differentiate + // between zstd and zstd:chunked; so we could, in varying situations over time, call RecordDigestCompressorName + // with the same digest and both ZstdAlgorithmName and ZstdChunkedAlgorithmName , which causes warnings about + // inconsistent data to be logged. + c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName) + } } - if srcInfo.Digest != "" && d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression { - c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName) + if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest && + d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression { + if d.srcCompressorName != compressiontypes.ZstdChunkedAlgorithmName { + // HACK: Don’t record zstd:chunked algorithms, see above. + c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName) + } } return nil } diff --git a/copy/copy.go b/copy/copy.go index 11b2dbfd52..ad1453fcbc 100644 --- a/copy/copy.go +++ b/copy/copy.go @@ -242,11 +242,13 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, unparsedToplevel: image.UnparsedInstance(rawSource, nil), // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. - // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually - // we might want to add a separate CommonCtx — or would that be too confusing? + // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more). + // Conceptually the cache settings should be in copy.Options instead. blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)), } defer c.close() + c.blobInfoCache.Open() + defer c.blobInfoCache.Close() // Set the concurrentBlobCopiesSemaphore if we can copy layers in parallel. if dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() { diff --git a/copy/encryption.go b/copy/encryption.go index b406b0c316..1305676d7a 100644 --- a/copy/encryption.go +++ b/copy/encryption.go @@ -70,7 +70,7 @@ func (d *bpDecryptionStepData) updateCryptoOperation(operation *types.LayerCrypt } } -// bpdData contains data that the copy pipeline needs about the encryption step. +// bpEncryptionStepData contains data that the copy pipeline needs about the encryption step. type bpEncryptionStepData struct { encrypting bool // We are actually encrypting the stream finalizer ocicrypt.EncryptLayerFinalizer diff --git a/copy/manifest.go b/copy/manifest.go index 6f01cf5cc3..8844ac8e7e 100644 --- a/copy/manifest.go +++ b/copy/manifest.go @@ -6,8 +6,10 @@ import ( "fmt" "strings" + internalManifest "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/manifest" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" @@ -19,8 +21,8 @@ import ( // Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used. var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} -// ociEncryptionMIMETypes lists manifest MIME types that are known to support OCI encryption. -var ociEncryptionMIMETypes = []string{v1.MediaTypeImageManifest} +// allManifestMIMETypes lists all possible manifest MIME types. +var allManifestMIMETypes = []string{v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType} // orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once. type orderedSet struct { @@ -51,9 +53,10 @@ type determineManifestConversionInputs struct { destSupportedManifestMIMETypes []string // MIME types supported by the destination, per types.ImageDestination.SupportedManifestMIMETypes() - forceManifestMIMEType string // User’s choice of forced manifest MIME type - requiresOCIEncryption bool // Restrict to manifest formats that can support OCI encryption - cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can + forceManifestMIMEType string // User’s choice of forced manifest MIME type + requestedCompressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user _explictily_ requested one. + requiresOCIEncryption bool // Restrict to manifest formats that can support OCI encryption + cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can } // manifestConversionPlan contains the decisions made by determineManifestConversion. @@ -80,41 +83,74 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType} } + restrictiveCompressionRequired := in.requestedCompressionFormat != nil && !internalManifest.CompressionAlgorithmIsUniversallySupported(*in.requestedCompressionFormat) if len(destSupportedManifestMIMETypes) == 0 { - if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType) { + if (!in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType)) && + (!restrictiveCompressionRequired || internalManifest.MIMETypeSupportsCompressionAlgorithm(srcType, *in.requestedCompressionFormat)) { return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions. preferredMIMEType: srcType, otherMIMETypeCandidates: []string{}, }, nil } - destSupportedManifestMIMETypes = ociEncryptionMIMETypes + destSupportedManifestMIMETypes = allManifestMIMETypes } supportedByDest := set.New[string]() for _, t := range destSupportedManifestMIMETypes { - if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(t) { - supportedByDest.Add(t) + if in.requiresOCIEncryption && !manifest.MIMETypeSupportsEncryption(t) { + continue } + if restrictiveCompressionRequired && !internalManifest.MIMETypeSupportsCompressionAlgorithm(t, *in.requestedCompressionFormat) { + continue + } + supportedByDest.Add(t) } if supportedByDest.Empty() { - if len(destSupportedManifestMIMETypes) == 0 { // Coverage: This should never happen, empty values were replaced by ociEncryptionMIMETypes + if len(destSupportedManifestMIMETypes) == 0 { // Coverage: This should never happen, empty values were replaced by allManifestMIMETypes return manifestConversionPlan{}, errors.New("internal error: destSupportedManifestMIMETypes is empty") } - // We know, and have verified, that destSupportedManifestMIMETypes is not empty, so encryption must have been involved. - if !in.requiresOCIEncryption { // Coverage: This should never happen, destSupportedManifestMIMETypes was not empty, so we should have filtered for encryption. - return manifestConversionPlan{}, errors.New("internal error: supportedByDest is empty but destSupportedManifestMIMETypes is not, and not encrypting") - } + // We know, and have verified, that destSupportedManifestMIMETypes is not empty, so some filtering of supported MIME types must have been involved. + // destSupportedManifestMIMETypes has three possible origins: if in.forceManifestMIMEType != "" { // 1. forceManifestType specified - return manifestConversionPlan{}, fmt.Errorf("encryption required together with format %s, which does not support encryption", - in.forceManifestMIMEType) + switch { + case in.requiresOCIEncryption && restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s, and encryption, required together with format %s, which does not support both", + in.requestedCompressionFormat.Name(), in.forceManifestMIMEType) + case in.requiresOCIEncryption: + return manifestConversionPlan{}, fmt.Errorf("encryption required together with format %s, which does not support encryption", + in.forceManifestMIMEType) + case restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s required together with format %s, which does not support it", + in.requestedCompressionFormat.Name(), in.forceManifestMIMEType) + default: + return manifestConversionPlan{}, errors.New("internal error: forceManifestMIMEType was rejected for an unknown reason") + } + } + if len(in.destSupportedManifestMIMETypes) == 0 { // 2. destination accepts anything and we have chosen allManifestTypes + if !restrictiveCompressionRequired { + // Coverage: This should never happen. + // If we have not rejected for encryption reasons, we must have rejected due to encryption, but + // allManifestTypes includes OCI, which supports encryption. + return manifestConversionPlan{}, errors.New("internal error: in.destSupportedManifestMIMETypes is empty but supportedByDest is empty as well") + } + // This can legitimately happen when the user asks for completely unsupported formats like Bzip2 or Xz. + return manifestConversionPlan{}, fmt.Errorf("compression using %s required, but none of the known manifest formats support it", in.requestedCompressionFormat.Name()) } - if len(in.destSupportedManifestMIMETypes) == 0 { // 2. destination accepts anything and we have chosen ociEncryptionMIMETypes - // Coverage: This should never happen, ociEncryptionMIMETypes all support encryption - return manifestConversionPlan{}, errors.New("internal error: in.destSupportedManifestMIMETypes is empty but supportedByDest is empty as well") + // 3. destination accepts a restricted list of mime types + destMIMEList := strings.Join(destSupportedManifestMIMETypes, ", ") + switch { + case in.requiresOCIEncryption && restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s, and encryption, required but the destination only supports MIME types [%s], none of which support both", + in.requestedCompressionFormat.Name(), destMIMEList) + case in.requiresOCIEncryption: + return manifestConversionPlan{}, fmt.Errorf("encryption required but the destination only supports MIME types [%s], none of which support encryption", + destMIMEList) + case restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s required but the destination only supports MIME types [%s], none of which support it", + in.requestedCompressionFormat.Name(), destMIMEList) + default: // Coverage: This should never happen, we only filter for in.requiresOCIEncryption || restrictiveCompressionRequired + return manifestConversionPlan{}, errors.New("internal error: supportedByDest is empty but destSupportedManifestMIMETypes is not, and we are neither encrypting nor requiring a restrictive compression algorithm") } - // 3. destination does not support encryption. - return manifestConversionPlan{}, fmt.Errorf("encryption required but the destination only supports MIME types [%s], none of which support encryption", - strings.Join(destSupportedManifestMIMETypes, ", ")) } // destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types. @@ -156,7 +192,7 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest } logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", ")) - if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes and supportedByDest, which is a subset, is not empty (or we would have exited above), so this should never happen. + if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes and supportedByDest, which is a subset, is not empty (or we would have exited above), so this should never happen. return manifestConversionPlan{}, errors.New("Internal error: no candidate MIME types") } res := manifestConversionPlan{ diff --git a/copy/manifest_test.go b/copy/manifest_test.go index 0a7d6540af..7f22c0fe31 100644 --- a/copy/manifest_test.go +++ b/copy/manifest_test.go @@ -8,6 +8,7 @@ import ( "github.com/containers/image/v5/internal/testing/mocks" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -216,10 +217,11 @@ func TestDetermineManifestConversion(t *testing.T) { }, res, c.description) } - // When encryption is required: + // When encryption or zstd is required: + // In both of these cases, we we are restricted to OCI for _, c := range []struct { description string - in determineManifestConversionInputs // with requiresOCIEncryption implied + in determineManifestConversionInputs // with requiresOCIEncryption or requestedCompressionFormat: zstd implied expected manifestConversionPlan // Or {} to expect a failure }{ { // Destination accepts anything - no conversion necessary @@ -234,7 +236,7 @@ func TestDetermineManifestConversion(t *testing.T) { otherMIMETypeCandidates: []string{}, }, }, - { // Destination accepts anything - need to convert for encryption + { // Destination accepts anything - need to convert to OCI "s2→anything", determineManifestConversionInputs{ srcMIMEType: manifest.DockerV2Schema2MediaType, @@ -246,7 +248,7 @@ func TestDetermineManifestConversion(t *testing.T) { otherMIMETypeCandidates: []string{}, }, }, - // Destination accepts an encrypted format + // Destination accepts OCI { "OCI→OCI", determineManifestConversionInputs{ @@ -271,7 +273,7 @@ func TestDetermineManifestConversion(t *testing.T) { otherMIMETypeCandidates: []string{}, }, }, - // Destination does not accept an encrypted format + // Destination does not accept OCI { "OCI→s2", determineManifestConversionInputs{ @@ -289,9 +291,9 @@ func TestDetermineManifestConversion(t *testing.T) { manifestConversionPlan{}, }, // Whatever the input is, with cannotModifyManifestReason we return "keep the original as is". - // Still, encryption is necessarily going to fail… + // Still, encryption/compression is necessarily going to fail… { - "OCI→OCI cannotModifyManifestReason", + "OCI cannotModifyManifestReason", determineManifestConversionInputs{ srcMIMEType: v1.MediaTypeImageManifest, destSupportedManifestMIMETypes: supportS1S2OCI, @@ -304,7 +306,7 @@ func TestDetermineManifestConversion(t *testing.T) { }, }, { - "s2→OCI cannotModifyManifestReason", + "s2 cannotModifyManifestReason", determineManifestConversionInputs{ srcMIMEType: manifest.DockerV2Schema2MediaType, destSupportedManifestMIMETypes: supportS1S2OCI, @@ -316,7 +318,7 @@ func TestDetermineManifestConversion(t *testing.T) { otherMIMETypeCandidates: []string{}, }, }, - // forceManifestMIMEType to a type that supports encryption + // forceManifestMIMEType to a type that supports OCI features { "OCI→OCI forced", determineManifestConversionInputs{ @@ -343,7 +345,7 @@ func TestDetermineManifestConversion(t *testing.T) { otherMIMETypeCandidates: []string{}, }, }, - // forceManifestMIMEType to a type that does not support encryption + // forceManifestMIMEType to a type that does not support OCI features { "OCI→s2 forced", determineManifestConversionInputs{ @@ -363,16 +365,154 @@ func TestDetermineManifestConversion(t *testing.T) { manifestConversionPlan{}, }, } { - in := c.in - in.requiresOCIEncryption = true - res, err := determineManifestConversion(in) - if c.expected.preferredMIMEType != "" { - require.NoError(t, err, c.description) - assert.Equal(t, c.expected, res, c.description) - } else { - assert.Error(t, err, c.description) + for _, restriction := range []struct { + description string + edit func(in *determineManifestConversionInputs) + }{ + { + description: "encrypted", + edit: func(in *determineManifestConversionInputs) { + in.requiresOCIEncryption = true + }, + }, + { + description: "zstd", + edit: func(in *determineManifestConversionInputs) { + in.requestedCompressionFormat = &compression.Zstd + }, + }, + { + description: "zstd:chunked", + edit: func(in *determineManifestConversionInputs) { + in.requestedCompressionFormat = &compression.ZstdChunked + }, + }, + { + description: "encrypted+zstd", + edit: func(in *determineManifestConversionInputs) { + in.requiresOCIEncryption = true + in.requestedCompressionFormat = &compression.Zstd + }, + }, + } { + desc := c.description + " / " + restriction.description + + in := c.in + restriction.edit(&in) + res, err := determineManifestConversion(in) + if c.expected.preferredMIMEType != "" { + require.NoError(t, err, desc) + assert.Equal(t, c.expected, res, desc) + } else { + assert.Error(t, err, desc) + } } } + + // When encryption using a completely unsupported algorithm is required: + for _, c := range []struct { + description string + in determineManifestConversionInputs // with requiresOCIEncryption or requestedCompressionFormat: zstd implied + }{ + { // Destination accepts anything + "OCI→anything", + determineManifestConversionInputs{ + srcMIMEType: v1.MediaTypeImageManifest, + destSupportedManifestMIMETypes: nil, + }, + }, + { // Destination accepts anything - need to convert to OCI + "s2→anything", + determineManifestConversionInputs{ + srcMIMEType: manifest.DockerV2Schema2MediaType, + destSupportedManifestMIMETypes: nil, + }, + }, + // Destination only supports some formats + { + "OCI→OCI", + determineManifestConversionInputs{ + srcMIMEType: v1.MediaTypeImageManifest, + destSupportedManifestMIMETypes: supportS1S2OCI, + }, + }, + { + "s2→OCI", + determineManifestConversionInputs{ + srcMIMEType: manifest.DockerV2Schema2MediaType, + destSupportedManifestMIMETypes: supportS1S2OCI, + }, + }, + { + "OCI→s2", + determineManifestConversionInputs{ + srcMIMEType: v1.MediaTypeImageManifest, + destSupportedManifestMIMETypes: supportS1S2, + }, + }, + { + "s2→s2", + determineManifestConversionInputs{ + srcMIMEType: manifest.DockerV2Schema2MediaType, + destSupportedManifestMIMETypes: supportS1S2, + }, + }, + // cannotModifyManifestReason + { + "OCI cannotModifyManifestReason", + determineManifestConversionInputs{ + srcMIMEType: v1.MediaTypeImageManifest, + destSupportedManifestMIMETypes: supportS1S2OCI, + cannotModifyManifestReason: "Preserving digests", + }, + }, + { + "s2 cannotModifyManifestReason", + determineManifestConversionInputs{ + srcMIMEType: manifest.DockerV2Schema2MediaType, + destSupportedManifestMIMETypes: supportS1S2OCI, + cannotModifyManifestReason: "Preserving digests", + }, + }, + // forceManifestMIMEType + { + "OCI→OCI forced", + determineManifestConversionInputs{ + srcMIMEType: v1.MediaTypeImageManifest, + destSupportedManifestMIMETypes: supportS1S2OCI, + forceManifestMIMEType: v1.MediaTypeImageManifest, + }, + }, + { + "s2→OCI forced", + determineManifestConversionInputs{ + srcMIMEType: manifest.DockerV2Schema2MediaType, + destSupportedManifestMIMETypes: supportS1S2OCI, + forceManifestMIMEType: v1.MediaTypeImageManifest, + }, + }, + { + "OCI→s2 forced", + determineManifestConversionInputs{ + srcMIMEType: v1.MediaTypeImageManifest, + destSupportedManifestMIMETypes: supportS1S2OCI, + forceManifestMIMEType: manifest.DockerV2Schema2MediaType, + }, + }, + { + "s2→s2 forced", + determineManifestConversionInputs{ + srcMIMEType: manifest.DockerV2Schema2MediaType, + destSupportedManifestMIMETypes: supportS1S2OCI, + forceManifestMIMEType: manifest.DockerV2Schema2MediaType, + }, + }, + } { + in := c.in + in.requestedCompressionFormat = &compression.Xz + _, err := determineManifestConversion(in) + assert.Error(t, err, c.description) + } } // fakeUnparsedImage is an implementation of types.UnparsedImage which only returns itself as a MIME type in Manifest, diff --git a/copy/multiple.go b/copy/multiple.go index 30f6da2511..f252e3476f 100644 --- a/copy/multiple.go +++ b/copy/multiple.go @@ -340,7 +340,7 @@ func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte, if err != nil { return nil, err } - sigs = append(sigs, newSigs...) + sigs = append(slices.Clone(sigs), newSigs...) c.Printf("Storing list signatures\n") if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil { diff --git a/copy/single.go b/copy/single.go index 5297f019f7..9003965c95 100644 --- a/copy/single.go +++ b/copy/single.go @@ -20,6 +20,7 @@ import ( compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" + chunkedToc "github.com/containers/storage/pkg/chunked/toc" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" @@ -161,12 +162,13 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar return copySingleImageResult{}, err } - destRequiresOciEncryption := (isEncrypted(src) && ic.c.options.OciDecryptConfig != nil) || c.options.OciEncryptLayers != nil + destRequiresOciEncryption := (isEncrypted(src) && ic.c.options.OciDecryptConfig == nil) || c.options.OciEncryptLayers != nil manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{ srcMIMEType: ic.src.ManifestMIMEType, destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(), forceManifestMIMEType: c.options.ForceManifestMIMEType, + requestedCompressionFormat: ic.compressionFormat, requiresOCIEncryption: destRequiresOciEncryption, cannotModifyManifestReason: ic.cannotModifyManifestReason, }) @@ -277,7 +279,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar if err != nil { return copySingleImageResult{}, err } - sigs = append(sigs, newSigs...) + sigs = append(slices.Clone(sigs), newSigs...) if len(sigs) > 0 { c.Printf("Storing signatures\n") @@ -305,18 +307,18 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst options := newOrderedSet() match := false for _, wantedPlatform := range wantedPlatforms { - // Waiting for https://github.com/opencontainers/image-spec/pull/777 : - // This currently can’t use image.MatchesPlatform because we don’t know what to use - // for image.Variant. - if wantedPlatform.OS == c.OS && wantedPlatform.Architecture == c.Architecture { + // For a transitional period, this might trigger warnings because the Variant + // field was added to OCI config only recently. If this turns out to be too noisy, + // revert this check to only look for (OS, Architecture). + if platform.MatchesPlatform(c.Platform, wantedPlatform) { match = true break } - options.append(fmt.Sprintf("%s+%s", wantedPlatform.OS, wantedPlatform.Architecture)) + options.append(fmt.Sprintf("%s+%s+%q", wantedPlatform.OS, wantedPlatform.Architecture, wantedPlatform.Variant)) } if !match { - logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q, expecting one of %q", - c.OS, c.Architecture, strings.Join(options.list, ", ")) + logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q", + c.OS, c.Architecture, c.Variant, strings.Join(options.list, ", ")) } } return nil @@ -380,8 +382,9 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context, compressionAlgos := set.New[string]() for _, srcInfo := range ic.src.LayerInfos() { - compression := compressionAlgorithmFromMIMEType(srcInfo) - compressionAlgos.Add(compression.Name()) + if c := compressionAlgorithmFromMIMEType(srcInfo); c != nil { + compressionAlgos.Add(c.Name()) + } } algos, err := algorithmsByNames(compressionAlgos.Values()) @@ -460,8 +463,14 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor encryptAll = len(*ic.c.options.OciEncryptLayers) == 0 totalLayers := len(srcInfos) for _, l := range *ic.c.options.OciEncryptLayers { - // if layer is negative, it is reverse indexed. - layersToEncrypt.Add((totalLayers + l) % totalLayers) + switch { + case l >= 0 && l < totalLayers: + layersToEncrypt.Add(l) + case l < 0 && l+totalLayers >= 0: // Implies (l + totalLayers) < totalLayers + layersToEncrypt.Add(l + totalLayers) // If l is negative, it is reverse indexed. + default: + return nil, fmt.Errorf("when choosing layers to encrypt, layer index %d out of range (%d layers exist)", l, totalLayers) + } } if encryptAll { @@ -656,8 +665,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to ic.c.printCopyInfo("blob", srcInfo) - cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" - diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" + diffIDIsNeeded := false + var cachedDiffID digest.Digest = "" + if ic.diffIDsAreNeeded { + cachedDiffID = ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" + diffIDIsNeeded = cachedDiffID == "" + } // When encrypting to decrypting, only use the simple code path. We might be able to optimize more // (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again), // but it’s not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, let’s not. @@ -682,6 +695,13 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to requiredCompression = ic.compressionFormat originalCompression = srcInfo.CompressionAlgorithm } + + // Check if we have a chunked layer in storage that's based on that blob. These layers are stored by their TOC digest. + tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations) + if err != nil { + return types.BlobInfo{}, "", err + } + reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{ Cache: ic.c.blobInfoCache, CanSubstitute: canSubstitute, @@ -690,6 +710,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to SrcRef: srcRef, RequiredCompression: requiredCompression, OriginalCompression: originalCompression, + TOCDigest: tocDigest, }) if err != nil { return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err) @@ -733,7 +754,9 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache) if err == nil { if srcInfo.Size != -1 { - bar.SetRefill(srcInfo.Size - bar.Current()) + refill := srcInfo.Size - bar.Current() + bar.SetCurrent(srcInfo.Size) + bar.SetRefill(refill) } bar.mark100PercentComplete() hideProgressBar = false diff --git a/default-policy.json b/default-policy.json new file mode 100644 index 0000000000..dffc54a626 --- /dev/null +++ b/default-policy.json @@ -0,0 +1,14 @@ +{ + "default": [ + { + "type": "insecureAcceptAnything" + } + ], + "transports": + { + "docker-daemon": + { + "": [{"type":"insecureAcceptAnything"}] + } + } +} diff --git a/default.yaml b/default.yaml new file mode 100644 index 0000000000..9e892d760b --- /dev/null +++ b/default.yaml @@ -0,0 +1,27 @@ +# This is a default registries.d configuration file. You may +# add to this file or create additional files in registries.d/. +# +# lookaside: for reading/writing simple signing signatures +# lookaside-staging: for writing simple signing signatures, preferred over lookaside +# +# lookaside and lookaside-staging take a value of the following: +# lookaside: {schema}://location +# +# For reading signatures, schema may be http, https, or file. +# For writing signatures, schema may only be file. + +# The default locations are built-in, for both reading and writing: +# /var/lib/containers/sigstore for root, or +# ~/.local/share/containers/sigstore for non-root users. +default-docker: +# lookaside: https://… +# lookaside-staging: file:///… + +# The 'docker' indicator here is the start of the configuration +# for docker registries. +# +# docker: +# +# privateregistry.com: +# lookaside: https://privateregistry.com/sigstore/ +# lookaside-staging: /mnt/nfs/privateregistry/sigstore diff --git a/docker/daemon/daemon_dest.go b/docker/daemon/daemon_dest.go index 59e02462f0..55431db13a 100644 --- a/docker/daemon/daemon_dest.go +++ b/docker/daemon/daemon_dest.go @@ -2,6 +2,7 @@ package daemon import ( "context" + "encoding/json" "errors" "fmt" "io" @@ -85,12 +86,40 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe } }() + err = imageLoad(ctx, c, reader) +} + +// imageLoad accepts tar stream on reader and sends it to c +func imageLoad(ctx context.Context, c *client.Client, reader *io.PipeReader) error { resp, err := c.ImageLoad(ctx, reader, true) if err != nil { - err = fmt.Errorf("saving image to docker engine: %w", err) - return + return fmt.Errorf("starting a load operation in docker engine: %w", err) } defer resp.Body.Close() + + // jsonError and jsonMessage are small subsets of docker/docker/pkg/jsonmessage.JSONError and JSONMessage, + // copied here to minimize dependencies. + type jsonError struct { + Message string `json:"message,omitempty"` + } + type jsonMessage struct { + Error *jsonError `json:"errorDetail,omitempty"` + } + + dec := json.NewDecoder(resp.Body) + for { + var msg jsonMessage + if err := dec.Decode(&msg); err != nil { + if err == io.EOF { + break + } + return fmt.Errorf("parsing docker load progress: %w", err) + } + if msg.Error != nil { + return fmt.Errorf("docker engine reported: %s", msg.Error.Message) + } + } + return nil // No error reported = success } // DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved diff --git a/docker/distribution_error.go b/docker/distribution_error.go index 0fe915249b..11b42c6e00 100644 --- a/docker/distribution_error.go +++ b/docker/distribution_error.go @@ -24,6 +24,7 @@ import ( "github.com/docker/distribution/registry/api/errcode" dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge" + "golang.org/x/exp/slices" ) // errNoErrorsInBody is returned when an HTTP response body parses to an empty @@ -105,7 +106,7 @@ func makeErrorList(err error) []error { } func mergeErrors(err1, err2 error) error { - return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...)) + return errcode.Errors(append(slices.Clone(makeErrorList(err1)), makeErrorList(err2)...)) } // handleErrorResponse returns error parsed from HTTP response for an diff --git a/docker/docker_client.go b/docker/docker_client.go index dd9127c5ac..6ce8f70083 100644 --- a/docker/docker_client.go +++ b/docker/docker_client.go @@ -1,7 +1,6 @@ package docker import ( - "bytes" "context" "crypto/tls" "encoding/json" @@ -19,6 +18,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/internal/useragent" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/docker/config" @@ -121,6 +121,9 @@ type dockerClient struct { // Private state for detectProperties: detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once. detectPropertiesError error // detectPropertiesError caches the initial error. + // Private state for logResponseWarnings + reportedWarningsLock sync.Mutex + reportedWarnings *set.Set[string] } type authScope struct { @@ -281,10 +284,11 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc } return &dockerClient{ - sys: sys, - registry: registry, - userAgent: userAgent, - tlsClientConfig: tlsClientConfig, + sys: sys, + registry: registry, + userAgent: userAgent, + tlsClientConfig: tlsClientConfig, + reportedWarnings: set.New[string](), }, nil } @@ -359,6 +363,11 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima hostname := registry if registry == dockerHostname { hostname = dockerV1Hostname + // A search term of library/foo does not find the library/foo image on the docker.io servers, + // which is surprising - and that Docker is modifying the search term client-side this same way, + // and it seems convenient to do the same thing. + // Read more here: https://github.com/containers/image/pull/2133#issue-1928524334 + image = strings.TrimPrefix(image, "library/") } client, err := newDockerClient(sys, hostname, registry) @@ -624,9 +633,76 @@ func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method if err != nil { return nil, err } + if warnings := res.Header.Values("Warning"); len(warnings) != 0 { + c.logResponseWarnings(res, warnings) + } return res, nil } +// logResponseWarnings logs warningHeaders from res, if any. +func (c *dockerClient) logResponseWarnings(res *http.Response, warningHeaders []string) { + c.reportedWarningsLock.Lock() + defer c.reportedWarningsLock.Unlock() + + for _, header := range warningHeaders { + warningString := parseRegistryWarningHeader(header) + if warningString == "" { + logrus.Debugf("Ignored Warning: header from registry: %q", header) + } else { + if !c.reportedWarnings.Contains(warningString) { + c.reportedWarnings.Add(warningString) + // Note that reportedWarnings is based only on warningString, so that we don’t + // repeat the same warning for every request - but the warning includes the URL; + // so it may not be specific to that URL. + logrus.Warnf("Warning from registry (first encountered at %q): %q", res.Request.URL.Redacted(), warningString) + } else { + logrus.Debugf("Repeated warning from registry at %q: %q", res.Request.URL.Redacted(), warningString) + } + } + } +} + +// parseRegistryWarningHeader parses a Warning: header per RFC 7234, limited to the warning +// values allowed by opencontainers/distribution-spec. +// It returns the warning string if the header has the expected format, or "" otherwise. +func parseRegistryWarningHeader(header string) string { + const expectedPrefix = `299 - "` + const expectedSuffix = `"` + + // warning-value = warn-code SP warn-agent SP warn-text [ SP warn-date ] + // distribution-spec requires warn-code=299, warn-agent="-", warn-date missing + if !strings.HasPrefix(header, expectedPrefix) || !strings.HasSuffix(header, expectedSuffix) { + return "" + } + header = header[len(expectedPrefix) : len(header)-len(expectedSuffix)] + + // ”Recipients that process the value of a quoted-string MUST handle a quoted-pair + // as if it were replaced by the octet following the backslash.”, so let’s do that… + res := strings.Builder{} + afterBackslash := false + for _, c := range []byte(header) { // []byte because escaping is defined in terms of bytes, not Unicode code points + switch { + case c == 0x7F || (c < ' ' && c != '\t'): + return "" // Control characters are forbidden + case afterBackslash: + res.WriteByte(c) + afterBackslash = false + case c == '"': + // This terminates the warn-text and warn-date, forbidden by distribution-spec, follows, + // or completely invalid input. + return "" + case c == '\\': + afterBackslash = true + default: + res.WriteByte(c) + } + } + if afterBackslash { + return "" + } + return res.String() +} + // we're using the challenges from the /v2/ ping response and not the one from the destination // URL in this request because: // @@ -1008,9 +1084,10 @@ func isManifestUnknownError(err error) bool { if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && e.Message == "Not Found" { return true } - // ALSO registry.redhat.io as of October 2022 + // opencontainers/distribution-spec does not require the errcode.Error payloads to be used, + // but specifies that the HTTP status must be 404. var unexpected *unexpectedHTTPResponseError - if errors.As(err, &unexpected) && unexpected.StatusCode == http.StatusNotFound && bytes.Contains(unexpected.Response, []byte("Not found")) { + if errors.As(err, &unexpected) && unexpected.StatusCode == http.StatusNotFound { return true } return false diff --git a/docker/docker_client_test.go b/docker/docker_client_test.go index 086bc132a6..67e38764b4 100644 --- a/docker/docker_client_test.go +++ b/docker/docker_client_test.go @@ -332,6 +332,28 @@ func TestNeedsNoRetry(t *testing.T) { } } +func TestParseRegistryWarningHeader(t *testing.T) { + for _, c := range []struct{ header, expected string }{ + {"completely invalid", ""}, + {`299 - "trivial"`, "trivial"}, + {`100 - "not-299"`, ""}, + {`299 localhost "warn-agent set"`, ""}, + {`299 - "no-terminating-quote`, ""}, + {"299 - \"\x01 control\"", ""}, + {"299 - \"\\\x01 escaped control\"", ""}, + {"299 - \"e\\scaped\"", "escaped"}, + {"299 - \"non-UTF8 \xA1\xA2\"", "non-UTF8 \xA1\xA2"}, + {"299 - \"non-UTF8 escaped \\\xA1\\\xA2\"", "non-UTF8 escaped \xA1\xA2"}, + {"299 - \"UTF8 žluťoučký\"", "UTF8 žluťoučký"}, + {"299 - \"UTF8 \\\xC5\\\xBEluťoučký\"", "UTF8 žluťoučký"}, + {`299 - "unterminated`, ""}, + {`299 - "warning" "some-date"`, ""}, + } { + res := parseRegistryWarningHeader(c.header) + assert.Equal(t, c.expected, res, c.header) + } +} + func TestIsManifestUnknownError(t *testing.T) { // Mostly a smoke test; we can add more registries here if they need special handling. diff --git a/docker/docker_image.go b/docker/docker_image.go index 42bbfd95ee..93160480ea 100644 --- a/docker/docker_image.go +++ b/docker/docker_image.go @@ -123,6 +123,9 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef if !ok { return "", errors.New("ref must be a dockerReference") } + if dr.isUnknownDigest { + return "", fmt.Errorf("docker: reference %q is for unknown digest case; cannot get digest", dr.StringWithinTransport()) + } tagOrDigest, err := dr.tagOrDigest() if err != nil { diff --git a/docker/docker_image_dest.go b/docker/docker_image_dest.go index 63e372d677..a9a36f0a34 100644 --- a/docker/docker_image_dest.go +++ b/docker/docker_image_dest.go @@ -137,7 +137,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream // If requested, precompute the blob digest to prevent uploading layers that already exist on the registry. // This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests, // the source blob is uncompressed, and the destination blob is being compressed "on the fly". - if inputInfo.Digest == "" && d.c.sys.DockerRegistryPushPrecomputeDigests { + if inputInfo.Digest == "" && d.c.sys != nil && d.c.sys.DockerRegistryPushPrecomputeDigests { logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref)) streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo) if err != nil { @@ -341,34 +341,58 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, // Then try reusing blobs from other locations. candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, options.CanSubstitute) for _, candidate := range candidates { - candidateRepo, err := parseBICLocationReference(candidate.Location) - if err != nil { - logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) - continue - } + var err error compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName) if err != nil { logrus.Debugf("OperationAndAlgorithmForCompressor Failed: %v", err) continue } + var candidateRepo reference.Named + if !candidate.UnknownLocation { + candidateRepo, err = parseBICLocationReference(candidate.Location) + if err != nil { + logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) + continue + } + } if !impl.BlobMatchesRequiredCompression(options, compressionAlgorithm) { requiredCompression := "nil" if compressionAlgorithm != nil { requiredCompression = compressionAlgorithm.Name() } - logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) in %s", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression, candidateRepo.Name()) + if !candidate.UnknownLocation { + logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) in %s", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression, candidateRepo.Name()) + } else { + logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) with no location match, checking current repo", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression) + } continue } - if candidate.CompressorName != blobinfocache.Uncompressed { - logrus.Debugf("Trying to reuse cached location %s compressed with %s in %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name()) + if !candidate.UnknownLocation { + if candidate.CompressorName != blobinfocache.Uncompressed { + logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name()) + } else { + logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo %s", candidate.Digest.String(), candidateRepo.Name()) + } + // Sanity checks: + if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) { + // OCI distribution spec 1.1 allows mounting blobs without specifying the source repo + // (the "from" parameter); in that case we might try to use these candidates as well. + // + // OTOH that would mean we can’t do the “blobExists” check, and if there is no match + // we could get an upload request that we would have to cancel. + logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref)) + continue + } } else { - logrus.Debugf("Trying to reuse cached location %s with no compression in %s", candidate.Digest.String(), candidateRepo.Name()) - } - - // Sanity checks: - if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) { - logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref)) - continue + if candidate.CompressorName != blobinfocache.Uncompressed { + logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s with no location match, checking current repo", candidate.Digest.String(), candidate.CompressorName) + } else { + logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo with no location match, checking current repo", candidate.Digest.String()) + } + // This digest is a known variant of this blob but we don’t + // have a recorded location in this registry, let’s try looking + // for it in the current repo. + candidateRepo = reference.TrimNamed(d.ref.ref) } if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest { logrus.Debug("... Already tried the primary destination") @@ -428,7 +452,15 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { var refTail string - if instanceDigest != nil { + // If d.ref.isUnknownDigest=true, then we push without a tag, so get the + // digest that will be used + if d.ref.isUnknownDigest { + digest, err := manifest.Digest(m) + if err != nil { + return err + } + refTail = digest.String() + } else if instanceDigest != nil { // If the instanceDigest is provided, then use it as the refTail, because the reference, // whether it includes a tag or a digest, refers to the list as a whole, and not this // particular instance. @@ -683,6 +715,10 @@ func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context. } } + // To make sure we can safely append to the slices of ociManifest, without adding a remote dependency on the code that creates it. + ociManifest.Layers = slices.Clone(ociManifest.Layers) + // We don’t need to ^^^ for ociConfig.RootFS.DiffIDs because we have created it empty ourselves, and json.Unmarshal is documented to append() to + // the slice in the original object (or in a newly allocated object). for _, sig := range signatures { mimeType := sig.UntrustedMIMEType() payloadBlob := sig.UntrustedPayload() diff --git a/docker/docker_image_src.go b/docker/docker_image_src.go index 231d5d2124..f9d4d6030f 100644 --- a/docker/docker_image_src.go +++ b/docker/docker_image_src.go @@ -38,8 +38,8 @@ type dockerImageSource struct { impl.DoesNotAffectLayerInfosForCopy stubs.ImplementsGetBlobAt - logicalRef dockerReference // The reference the user requested. - physicalRef dockerReference // The actual reference we are accessing (possibly a mirror) + logicalRef dockerReference // The reference the user requested. This must satisfy !isUnknownDigest + physicalRef dockerReference // The actual reference we are accessing (possibly a mirror). This must satisfy !isUnknownDigest c *dockerClient // State cachedManifest []byte // nil if not loaded yet @@ -48,7 +48,12 @@ type dockerImageSource struct { // newImageSource creates a new ImageSource for the specified image reference. // The caller must call .Close() on the returned ImageSource. +// The caller must ensure !ref.isUnknownDigest. func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) { + if ref.isUnknownDigest { + return nil, fmt.Errorf("reading images from docker: reference %q without a tag or digest is not supported", ref.StringWithinTransport()) + } + registryConfig, err := loadRegistryConfiguration(sys) if err != nil { return nil, err @@ -121,7 +126,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef // The caller must call .Close() on the returned ImageSource. func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logicalRef dockerReference, pullSource sysregistriesv2.PullSource, registryConfig *registryConfiguration) (*dockerImageSource, error) { - physicalRef, err := newReference(pullSource.Reference) + physicalRef, err := newReference(pullSource.Reference, false) if err != nil { return nil, err } @@ -591,6 +596,10 @@ func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Con // deleteImage deletes the named image from the registry, if supported. func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error { + if ref.isUnknownDigest { + return fmt.Errorf("Docker reference without a tag or digest cannot be deleted") + } + registryConfig, err := loadRegistryConfiguration(sys) if err != nil { return err diff --git a/docker/docker_transport.go b/docker/docker_transport.go index 6ae8491594..1c89302f46 100644 --- a/docker/docker_transport.go +++ b/docker/docker_transport.go @@ -12,6 +12,11 @@ import ( "github.com/containers/image/v5/types" ) +// UnknownDigestSuffix can be appended to a reference when the caller +// wants to push an image without a tag or digest. +// NewReferenceUnknownDigest() is called when this const is detected. +const UnknownDigestSuffix = "@@unknown-digest@@" + func init() { transports.Register(Transport) } @@ -43,7 +48,8 @@ func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error { // dockerReference is an ImageReference for Docker images. type dockerReference struct { - ref reference.Named // By construction we know that !reference.IsNameOnly(ref) + ref reference.Named // By construction we know that !reference.IsNameOnly(ref) unless isUnknownDigest=true + isUnknownDigest bool } // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. @@ -51,23 +57,46 @@ func ParseReference(refString string) (types.ImageReference, error) { if !strings.HasPrefix(refString, "//") { return nil, fmt.Errorf("docker: image reference %s does not start with //", refString) } + // Check if ref has UnknownDigestSuffix suffixed to it + unknownDigest := false + if strings.HasSuffix(refString, UnknownDigestSuffix) { + unknownDigest = true + refString = strings.TrimSuffix(refString, UnknownDigestSuffix) + } ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//")) if err != nil { return nil, err } + + if unknownDigest { + if !reference.IsNameOnly(ref) { + return nil, fmt.Errorf("docker: image reference %q has unknown digest set but it contains either a tag or digest", ref.String()+UnknownDigestSuffix) + } + return NewReferenceUnknownDigest(ref) + } + ref = reference.TagNameOnly(ref) return NewReference(ref) } // NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly(). func NewReference(ref reference.Named) (types.ImageReference, error) { - return newReference(ref) + return newReference(ref, false) +} + +// NewReferenceUnknownDigest returns a Docker reference for a named reference, which can be used to write images without setting +// a tag on the registry. The reference must satisfy reference.IsNameOnly() +func NewReferenceUnknownDigest(ref reference.Named) (types.ImageReference, error) { + return newReference(ref, true) } // newReference returns a dockerReference for a named reference. -func newReference(ref reference.Named) (dockerReference, error) { - if reference.IsNameOnly(ref) { - return dockerReference{}, fmt.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) +func newReference(ref reference.Named, unknownDigest bool) (dockerReference, error) { + if reference.IsNameOnly(ref) && !unknownDigest { + return dockerReference{}, fmt.Errorf("Docker reference %s is not for an unknown digest case; tag or digest is needed", reference.FamiliarString(ref)) + } + if !reference.IsNameOnly(ref) && unknownDigest { + return dockerReference{}, fmt.Errorf("Docker reference %s is for an unknown digest case but reference has a tag or digest", reference.FamiliarString(ref)) } // A github.com/distribution/reference value can have a tag and a digest at the same time! // The docker/distribution API does not really support that (we can’t ask for an image with a specific @@ -81,7 +110,8 @@ func newReference(ref reference.Named) (dockerReference, error) { } return dockerReference{ - ref: ref, + ref: ref, + isUnknownDigest: unknownDigest, }, nil } @@ -95,7 +125,11 @@ func (ref dockerReference) Transport() types.ImageTransport { // e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. func (ref dockerReference) StringWithinTransport() string { - return "//" + reference.FamiliarString(ref.ref) + famString := "//" + reference.FamiliarString(ref.ref) + if ref.isUnknownDigest { + return famString + UnknownDigestSuffix + } + return famString } // DockerReference returns a Docker reference associated with this reference @@ -113,6 +147,9 @@ func (ref dockerReference) DockerReference() reference.Named { // not required/guaranteed that it will be a valid input to Transport().ParseReference(). // Returns "" if configuration identities for these references are not supported. func (ref dockerReference) PolicyConfigurationIdentity() string { + if ref.isUnknownDigest { + return ref.ref.Name() + } res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) @@ -126,7 +163,13 @@ func (ref dockerReference) PolicyConfigurationIdentity() string { // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), // and each following element to be a prefix of the element preceding it. func (ref dockerReference) PolicyConfigurationNamespaces() []string { - return policyconfiguration.DockerReferenceNamespaces(ref.ref) + namespaces := policyconfiguration.DockerReferenceNamespaces(ref.ref) + if ref.isUnknownDigest { + if len(namespaces) != 0 && namespaces[0] == ref.ref.Name() { + namespaces = namespaces[1:] + } + } + return namespaces } // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. @@ -163,6 +206,10 @@ func (ref dockerReference) tagOrDigest() (string, error) { if ref, ok := ref.ref.(reference.NamedTagged); ok { return ref.Tag(), nil } + + if ref.isUnknownDigest { + return "", fmt.Errorf("Docker reference %q is for an unknown digest case, has neither a digest nor a tag", reference.FamiliarString(ref.ref)) + } // This should not happen, NewReference above refuses reference.IsNameOnly values. return "", fmt.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref)) } diff --git a/docker/docker_transport_test.go b/docker/docker_transport_test.go index 1cb3bfcff1..c7bc1917c9 100644 --- a/docker/docker_transport_test.go +++ b/docker/docker_transport_test.go @@ -2,6 +2,7 @@ package docker import ( "context" + "strings" "testing" "github.com/containers/image/v5/docker/reference" @@ -11,8 +12,9 @@ import ( ) const ( - sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - sha256digest = "@sha256:" + sha256digestHex + sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" + sha256digest = "@sha256:" + sha256digestHex + unknownDigestSuffixTest = "@@unknown-digest@@" ) func TestTransportName(t *testing.T) { @@ -43,17 +45,24 @@ func TestParseReference(t *testing.T) { // testParseReference is a test shared for Transport.ParseReference and ParseReference. func testParseReference(t *testing.T, fn func(string) (types.ImageReference, error)) { - for _, c := range []struct{ input, expected string }{ - {"busybox", ""}, // Missing // prefix - {"//busybox:notlatest", "docker.io/library/busybox:notlatest"}, // Explicit tag - {"//busybox" + sha256digest, "docker.io/library/busybox" + sha256digest}, // Explicit digest - {"//busybox", "docker.io/library/busybox:latest"}, // Default tag + for _, c := range []struct { + input, expected string + expectedUnknownDigest bool + }{ + {"busybox", "", false}, // Missing // prefix + {"//busybox:notlatest", "docker.io/library/busybox:notlatest", false}, // Explicit tag + {"//busybox" + sha256digest, "docker.io/library/busybox" + sha256digest, false}, // Explicit digest + {"//busybox", "docker.io/library/busybox:latest", false}, // Default tag // A github.com/distribution/reference value can have a tag and a digest at the same time! // The docker/distribution API does not really support that (we can’t ask for an image with a specific // tag and digest), so fail. This MAY be accepted in the future. - {"//busybox:latest" + sha256digest, ""}, // Both tag and digest - {"//docker.io/library/busybox:latest", "docker.io/library/busybox:latest"}, // All implied values explicitly specified - {"//UPPERCASEISINVALID", ""}, // Invalid input + {"//busybox:latest" + sha256digest, "", false}, // Both tag and digest + {"//docker.io/library/busybox:latest", "docker.io/library/busybox:latest", false}, // All implied values explicitly specified + {"//UPPERCASEISINVALID", "", false}, // Invalid input + {"//busybox" + unknownDigestSuffixTest, "docker.io/library/busybox", true}, // UnknownDigest suffix + {"//example.com/ns/busybox" + unknownDigestSuffixTest, "example.com/ns/busybox", true}, // UnknownDigest with registry/repo + {"//example.com/ns/busybox:tag1" + unknownDigestSuffixTest, "", false}, // UnknownDigest with tag should fail + {"//example.com/ns/busybox" + sha256digest + unknownDigestSuffixTest, "", false}, // UnknownDigest with digest should fail } { ref, err := fn(c.input) if c.expected == "" { @@ -63,20 +72,29 @@ func testParseReference(t *testing.T, fn func(string) (types.ImageReference, err dockerRef, ok := ref.(dockerReference) require.True(t, ok, c.input) assert.Equal(t, c.expected, dockerRef.ref.String(), c.input) + assert.Equal(t, c.expectedUnknownDigest, dockerRef.isUnknownDigest) } } } // A common list of reference formats to test for the various ImageReference methods. -var validReferenceTestCases = []struct{ input, dockerRef, stringWithinTransport string }{ - {"busybox:notlatest", "docker.io/library/busybox:notlatest", "//busybox:notlatest"}, // Explicit tag - {"busybox" + sha256digest, "docker.io/library/busybox" + sha256digest, "//busybox" + sha256digest}, // Explicit digest - {"docker.io/library/busybox:latest", "docker.io/library/busybox:latest", "//busybox:latest"}, // All implied values explicitly specified - {"example.com/ns/foo:bar", "example.com/ns/foo:bar", "//example.com/ns/foo:bar"}, // All values explicitly specified +var validReferenceTestCases = []struct { + input, dockerRef, stringWithinTransport string + expectedUnknownDigest bool +}{ + {"busybox:notlatest", "docker.io/library/busybox:notlatest", "//busybox:notlatest", false}, // Explicit tag + {"busybox" + sha256digest, "docker.io/library/busybox" + sha256digest, "//busybox" + sha256digest, false}, // Explicit digest + {"docker.io/library/busybox:latest", "docker.io/library/busybox:latest", "//busybox:latest", false}, // All implied values explicitly specified + {"example.com/ns/foo:bar", "example.com/ns/foo:bar", "//example.com/ns/foo:bar", false}, // All values explicitly specified + {"example.com/ns/busybox" + unknownDigestSuffixTest, "example.com/ns/busybox", "//example.com/ns/busybox" + unknownDigestSuffixTest, true}, // UnknownDigest Suffix full name + {"busybox" + unknownDigestSuffixTest, "docker.io/library/busybox", "//busybox" + unknownDigestSuffixTest, true}, // UnknownDigest short name } func TestNewReference(t *testing.T) { for _, c := range validReferenceTestCases { + if strings.HasSuffix(c.input, unknownDigestSuffixTest) { + continue + } parsed, err := reference.ParseNormalizedNamed(c.input) require.NoError(t, err) ref, err := NewReference(parsed) @@ -84,6 +102,7 @@ func TestNewReference(t *testing.T) { dockerRef, ok := ref.(dockerReference) require.True(t, ok, c.input) assert.Equal(t, c.dockerRef, dockerRef.ref.String(), c.input) + assert.Equal(t, false, dockerRef.isUnknownDigest) } // Neither a tag nor digest @@ -103,6 +122,28 @@ func TestNewReference(t *testing.T) { assert.Error(t, err) } +func TestNewReferenceUnknownDigest(t *testing.T) { + // References with tags and digests should be rejected + for _, c := range validReferenceTestCases { + if !strings.Contains(c.input, unknownDigestSuffixTest) { + parsed, err := reference.ParseNormalizedNamed(c.input) + require.NoError(t, err) + _, err = NewReferenceUnknownDigest(parsed) + assert.Error(t, err) + continue + } + in := strings.TrimSuffix(c.input, unknownDigestSuffixTest) + parsed, err := reference.ParseNormalizedNamed(in) + require.NoError(t, err) + ref, err := NewReferenceUnknownDigest(parsed) + require.NoError(t, err, c.input) + dockerRef, ok := ref.(dockerReference) + require.True(t, ok, c.input) + assert.Equal(t, c.dockerRef, dockerRef.ref.String(), c.input) + assert.Equal(t, true, dockerRef.isUnknownDigest) + } +} + func TestReferenceTransport(t *testing.T) { ref, err := ParseReference("//busybox") require.NoError(t, err) @@ -138,6 +179,10 @@ func TestReferencePolicyConfigurationIdentity(t *testing.T) { ref, err := ParseReference("//busybox") require.NoError(t, err) assert.Equal(t, "docker.io/library/busybox:latest", ref.PolicyConfigurationIdentity()) + + ref, err = ParseReference("//busybox" + unknownDigestSuffixTest) + require.NoError(t, err) + assert.Equal(t, "docker.io/library/busybox", ref.PolicyConfigurationIdentity()) } func TestReferencePolicyConfigurationNamespaces(t *testing.T) { @@ -150,28 +195,52 @@ func TestReferencePolicyConfigurationNamespaces(t *testing.T) { "docker.io", "*.io", }, ref.PolicyConfigurationNamespaces()) + + ref, err = ParseReference("//busybox" + unknownDigestSuffixTest) + require.NoError(t, err) + assert.Equal(t, []string{ + "docker.io/library", + "docker.io", + "*.io", + }, ref.PolicyConfigurationNamespaces()) } func TestReferenceNewImage(t *testing.T) { - ref, err := ParseReference("//quay.io/libpod/busybox") - require.NoError(t, err) - img, err := ref.NewImage(context.Background(), &types.SystemContext{ + sysCtx := &types.SystemContext{ RegistriesDirPath: "/this/does/not/exist", DockerPerHostCertDirPath: "/this/does/not/exist", ArchitectureChoice: "amd64", OSChoice: "linux", - }) + } + ref, err := ParseReference("//quay.io/libpod/busybox") + require.NoError(t, err) + img, err := ref.NewImage(context.Background(), sysCtx) require.NoError(t, err) defer img.Close() + + // unknownDigest case should return error + ref, err = ParseReference("//quay.io/libpod/busybox" + unknownDigestSuffixTest) + require.NoError(t, err) + _, err = ref.NewImage(context.Background(), sysCtx) + assert.Error(t, err) } func TestReferenceNewImageSource(t *testing.T) { + sysCtx := &types.SystemContext{ + RegistriesDirPath: "/this/does/not/exist", + DockerPerHostCertDirPath: "/this/does/not/exist", + } ref, err := ParseReference("//quay.io/libpod/busybox") require.NoError(t, err) - src, err := ref.NewImageSource(context.Background(), - &types.SystemContext{RegistriesDirPath: "/this/does/not/exist", DockerPerHostCertDirPath: "/this/does/not/exist"}) - assert.NoError(t, err) + src, err := ref.NewImageSource(context.Background(), sysCtx) + require.NoError(t, err) defer src.Close() + + // unknownDigest case should return error + ref, err = ParseReference("//quay.io/libpod/busybox" + unknownDigestSuffixTest) + require.NoError(t, err) + _, err = ref.NewImageSource(context.Background(), sysCtx) + assert.Error(t, err) } func TestReferenceNewImageDestination(t *testing.T) { @@ -181,6 +250,13 @@ func TestReferenceNewImageDestination(t *testing.T) { &types.SystemContext{RegistriesDirPath: "/this/does/not/exist", DockerPerHostCertDirPath: "/this/does/not/exist"}) require.NoError(t, err) defer dest.Close() + + ref, err = ParseReference("//quay.io/libpod/busybox" + unknownDigestSuffixTest) + require.NoError(t, err) + dest2, err := ref.NewImageDestination(context.Background(), + &types.SystemContext{RegistriesDirPath: "/this/does/not/exist", DockerPerHostCertDirPath: "/this/does/not/exist"}) + require.NoError(t, err) + defer dest2.Close() } func TestReferenceTagOrDigest(t *testing.T) { @@ -203,4 +279,11 @@ func TestReferenceTagOrDigest(t *testing.T) { dockerRef := dockerReference{ref: ref} _, err = dockerRef.tagOrDigest() assert.Error(t, err) + + // Invalid input, unknownDigest case + ref, err = reference.ParseNormalizedNamed("busybox") + require.NoError(t, err) + dockerRef = dockerReference{ref: ref, isUnknownDigest: true} + _, err = dockerRef.tagOrDigest() + assert.Error(t, err) } diff --git a/docker/errors.go b/docker/errors.go index 2caa10d7d3..4392f9d182 100644 --- a/docker/errors.go +++ b/docker/errors.go @@ -47,7 +47,12 @@ func httpResponseToError(res *http.Response, context string) error { } // registryHTTPResponseToError creates a Go error from an HTTP error response of a docker/distribution -// registry +// registry. +// +// WARNING: The OCI distribution spec says +// “A `4XX` response code from the registry MAY return a body in any format.”; but if it is +// JSON, it MUST use the errcode.Error structure. +// So, callers should primarily decide based on HTTP StatusCode, not based on error type here. func registryHTTPResponseToError(res *http.Response) error { err := handleErrorResponse(res) // len(errs) == 0 should never be returned by handleErrorResponse; if it does, we don't modify it and let the caller report it as is. @@ -83,7 +88,7 @@ func registryHTTPResponseToError(res *http.Response) error { response = response[:50] + "..." } // %.0w makes e visible to error.Unwrap() without including any text - err = fmt.Errorf("StatusCode: %d, %s%.0w", e.StatusCode, response, e) + err = fmt.Errorf("StatusCode: %d, %q%.0w", e.StatusCode, response, e) case errcode.Error: // e.Error() is fmt.Sprintf("%s: %s", e.Code.Error(), e.Message, which is usually // rather redundant. So reword it without using e.Code.Error() if e.Message is the default. diff --git a/docker/errors_test.go b/docker/errors_test.go index 4149067fec..2463d275f4 100644 --- a/docker/errors_test.go +++ b/docker/errors_test.go @@ -43,7 +43,7 @@ func TestRegistryHTTPResponseToError(t *testing.T) { "Header1: Value1\r\n" + "\r\n" + "JSON? What JSON?\r\n", - errorString: "StatusCode: 400, JSON? What JSON?\r\n", + errorString: `StatusCode: 400, "JSON? What JSON?\r\n"`, errorType: nil, unwrappedErrorPtr: &unwrappedUnexpectedHTTPResponseError, }, @@ -161,7 +161,7 @@ func TestRegistryHTTPResponseToError(t *testing.T) { "X-Docker-Size: -1\r\n" + "\r\n" + "Not found\r\n", - errorString: "StatusCode: 404, Not found\r", + errorString: `StatusCode: 404, "Not found\r"`, errorType: nil, unwrappedErrorPtr: &unwrappedUnexpectedHTTPResponseError, fn: func(t *testing.T, err error) { diff --git a/docs/containers-policy.json.5.md b/docs/containers-policy.json.5.md index 71d66c5945..909d04afd0 100644 --- a/docs/containers-policy.json.5.md +++ b/docs/containers-policy.json.5.md @@ -94,7 +94,7 @@ Finally, two full-store specifiers matching all images in the store are valid sc - `[`_graph-root_`]` Note that some tools like Podman and Buildah hard-code overrides of the signature verification policy for “push” operations, -allowing these oprations regardless of configuration in `policy.json`. +allowing these operations regardless of configuration in `policy.json`. ### `dir:` @@ -165,7 +165,7 @@ The _reference_ annotation value, if any, is not used. Supported scopes have the form _repo-path_`:`_image-scope_; _repo_path_ is the path to the OSTree repository. _image-scope_ is the _docker_reference_ part of the reference, with with a `:latest` tag implied if no tag is present, -and parent namespaces of the _docker_reference_ value (by omitting the tag, or a prefix speciyfing a higher-level namespace). +and parent namespaces of the _docker_reference_ value (by omitting the tag, or a prefix specifying a higher-level namespace). *Note:* - The _repo_path_ must be absolute and contain no symlinks. Paths violating these requirements may be silently ignored. diff --git a/docs/containers-transports.5.md b/docs/containers-transports.5.md index 8ec42fe87a..481bdb73c7 100644 --- a/docs/containers-transports.5.md +++ b/docs/containers-transports.5.md @@ -40,10 +40,13 @@ By default, uses the authorization state in `$XDG_RUNTIME_DIR/containers/auth.js If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using docker-login(1). The containers-registries.conf(5) further allows for configuring various settings of a registry. -Note that a _docker-reference_ has the following format: `name[:tag|@digest]`. +Note that a _docker-reference_ has the following format: _name_[**:**_tag_ | **@**_digest_]. While the docker transport does not support both a tag and a digest at the same time some formats like containers-storage do. Digests can also be used in an image destination as long as the manifest matches the provided digest. + +The docker transport supports pushing images without a tag or digest to a registry when the image name is suffixed with **@@unknown-digest@@**. The _name_**@@unknown-digest@@** reference format cannot be used with a reference that has a tag or digest. The digest of images can be explored with skopeo-inspect(1). + If `name` does not contain a slash, it is treated as `docker.io/library/name`. Otherwise, the component before the first slash is checked if it is recognized as a `hostname[:port]` (i.e., it contains either a . or a :, or the component is exactly localhost). If the first component of name is not recognized as a `hostname[:port]`, `name` is treated as `docker.io/name`. diff --git a/go.mod b/go.mod index f88c9b759d..84bb4d1bc0 100644 --- a/go.mod +++ b/go.mod @@ -6,61 +6,70 @@ require ( dario.cat/mergo v1.0.0 github.com/BurntSushi/toml v1.3.2 github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 - github.com/containers/ocicrypt v1.1.7 - github.com/containers/storage v1.48.1-0.20230728131509-c3da76fa3f63 - github.com/cyberphone/json-canonicalization v0.0.0-20230710064741-aa7fe85c7dbd - github.com/docker/distribution v2.8.2+incompatible - github.com/docker/docker v24.0.5+incompatible + github.com/containers/ocicrypt v1.1.9 + github.com/containers/storage v1.51.1-0.20231205203947-fe005407c7d5 + github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 + github.com/distribution/reference v0.5.0 + github.com/docker/cli v24.0.7+incompatible + github.com/docker/distribution v2.8.3+incompatible + github.com/docker/docker v24.0.7+incompatible github.com/docker/docker-credential-helpers v0.8.0 github.com/docker/go-connections v0.4.0 - github.com/go-openapi/strfmt v0.21.7 + github.com/go-openapi/strfmt v0.21.9 github.com/go-openapi/swag v0.22.4 github.com/hashicorp/go-multierror v1.1.1 - github.com/hashicorp/go-retryablehttp v0.7.4 - github.com/klauspost/compress v1.16.7 + github.com/hashicorp/go-retryablehttp v0.7.5 + github.com/klauspost/compress v1.17.4 github.com/klauspost/pgzip v1.2.6 github.com/manifoldco/promptui v0.9.0 + github.com/mattn/go-sqlite3 v1.14.18 github.com/opencontainers/go-digest v1.0.0 - github.com/opencontainers/image-spec v1.1.0-rc4 + github.com/opencontainers/image-spec v1.1.0-rc5 github.com/opencontainers/selinux v1.11.0 github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f + github.com/otiai10/copy v1.14.0 github.com/proglottis/gpgme v0.1.3 - github.com/sigstore/fulcio v1.4.0 + github.com/secure-systems-lab/go-securesystemslib v0.7.0 + github.com/sigstore/fulcio v1.4.3 github.com/sigstore/rekor v1.2.2 - github.com/sigstore/sigstore v1.7.1 + github.com/sigstore/sigstore v1.7.6 github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.8.4 - github.com/sylabs/sif/v2 v2.11.5 - github.com/theupdateframework/go-tuf v0.5.2 + github.com/sylabs/sif/v2 v2.15.0 github.com/ulikunitz/xz v0.5.11 github.com/vbatts/tar-split v0.11.5 - github.com/vbauerster/mpb/v8 v8.5.2 + github.com/vbauerster/mpb/v8 v8.7.0 github.com/xeipuuv/gojsonschema v1.2.0 - go.etcd.io/bbolt v1.3.7 - golang.org/x/crypto v0.12.0 - golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b - golang.org/x/oauth2 v0.11.0 - golang.org/x/sync v0.3.0 - golang.org/x/term v0.11.0 + go.etcd.io/bbolt v1.3.8 + golang.org/x/crypto v0.16.0 + golang.org/x/exp v0.0.0-20231006140011-7918f672742d + golang.org/x/oauth2 v0.15.0 + golang.org/x/sync v0.5.0 + golang.org/x/term v0.15.0 gopkg.in/yaml.v3 v3.0.1 ) require ( github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/Microsoft/hcsshim v0.10.0 // indirect + github.com/Microsoft/hcsshim v0.12.0-rc.1 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chzyer/readline v1.5.1 // indirect github.com/containerd/cgroups/v3 v3.0.2 // indirect github.com/containerd/containerd v1.7.0 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect - github.com/coreos/go-oidc/v3 v3.6.0 // indirect - github.com/cyphar/filepath-securejoin v0.2.3 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect + github.com/coreos/go-oidc/v3 v3.7.0 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/go-jose/go-jose/v3 v3.0.0 // indirect + github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect + github.com/go-jose/go-jose/v3 v3.0.1 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.21.4 // indirect @@ -74,9 +83,9 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-containerregistry v0.15.2 // indirect + github.com/google/go-containerregistry v0.16.1 // indirect github.com/google/go-intervals v0.0.2 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.4.0 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -84,23 +93,28 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-runewidth v0.0.14 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect github.com/mattn/go-shellwords v1.0.12 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/moby/sys/mountinfo v0.6.2 // indirect + github.com/moby/sys/mountinfo v0.7.1 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/morikuni/aec v1.0.0 // indirect github.com/oklog/ulid v1.3.1 // indirect - github.com/opencontainers/runc v1.1.8 // indirect + github.com/opencontainers/runc v1.1.10 // indirect github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.17.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.11.1 // indirect github.com/rivo/uniseg v0.4.4 // indirect - github.com/rogpeppe/go-internal v1.6.1 // indirect github.com/segmentio/ksuid v1.0.4 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect @@ -109,23 +123,22 @@ require ( github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - go.mongodb.org/mongo-driver v1.11.3 // indirect + go.mongodb.org/mongo-driver v1.13.0 // indirect go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel v1.16.0 // indirect go.opentelemetry.io/otel/metric v1.16.0 // indirect go.opentelemetry.io/otel/trace v1.16.0 // indirect - golang.org/x/mod v0.11.0 // indirect - golang.org/x/net v0.14.0 // indirect - golang.org/x/sys v0.11.0 // indirect - golang.org/x/text v0.12.0 // indirect - golang.org/x/tools v0.8.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect - google.golang.org/grpc v1.56.2 // indirect + golang.org/x/mod v0.13.0 // indirect + golang.org/x/net v0.19.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/tools v0.14.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect + google.golang.org/grpc v1.58.3 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect - gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gotest.tools/v3 v3.4.0 // indirect + gotest.tools/v3 v3.5.0 // indirect ) diff --git a/go.sum b/go.sum index 5db44e134c..bfa2f5bcfe 100644 --- a/go.sum +++ b/go.sum @@ -4,25 +4,32 @@ dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/Microsoft/hcsshim v0.10.0 h1:PbvoxdUGgXxyirmN5Oncp3POLkxEG5LbWCEBfWmHTGA= -github.com/Microsoft/hcsshim v0.10.0/go.mod h1:3j1trOamcUdi86J5Tr5+1BpqMjSv/QeRWkX2whBF6dY= +github.com/Microsoft/hcsshim v0.12.0-rc.1 h1:Hy+xzYujv7urO5wrgcG58SPMOXNLrj4WCJbySs2XX/A= +github.com/Microsoft/hcsshim v0.12.0-rc.1/go.mod h1:Y1a1S0QlYp1mBpyvGiuEdOfZqnao+0uX5AWHXQ5NhZU= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= @@ -38,34 +45,43 @@ github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKk github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= github.com/containerd/containerd v1.7.0 h1:G/ZQr3gMZs6ZT0qPUZ15znx5QSdQdASW11nXTLTM2Pg= github.com/containerd/containerd v1.7.0/go.mod h1:QfR7Efgb/6X2BDpTPJRvPTYDE9rsF0FsXX9J8sIs/sc= -github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= -github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= +github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU= +github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= -github.com/containers/ocicrypt v1.1.7 h1:thhNr4fu2ltyGz8aMx8u48Ae0Pnbip3ePP9/mzkZ/3U= -github.com/containers/ocicrypt v1.1.7/go.mod h1:7CAhjcj2H8AYp5YvEie7oVSK2AhBY8NscCYRawuDNtw= -github.com/containers/storage v1.48.1-0.20230728131509-c3da76fa3f63 h1:oHER814v4p86QDV9EKZRSaMmEeC8yWD2wXlFAige1kc= -github.com/containers/storage v1.48.1-0.20230728131509-c3da76fa3f63/go.mod h1:m9LC8fEm9FcuJ4wOJHYmCqdQUb0f66850wXyen+hh78= -github.com/coreos/go-oidc/v3 v3.6.0 h1:AKVxfYw1Gmkn/w96z0DbT/B/xFnzTd3MkZvWLjF4n/o= -github.com/coreos/go-oidc/v3 v3.6.0/go.mod h1:ZpHUsHBucTUj6WOkrP4E20UPynbLZzhTQ1XKCXkxyPc= +github.com/containers/ocicrypt v1.1.9 h1:2Csfba4jse85Raxk5HIyEk8OwZNjRvfkhEGijOjIdEM= +github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPNFN4jwA9GBys= +github.com/containers/storage v1.51.1-0.20231205203947-fe005407c7d5 h1:eiCkAt+i9BYRjR7KEKPI3iORCSABhY+spM/w8BkI2lo= +github.com/containers/storage v1.51.1-0.20231205203947-fe005407c7d5/go.mod h1:pMhG1O3eMGlQKpuEuv7ves+K3BsK8/UJs8ctV5fEaoI= +github.com/coreos/go-oidc/v3 v3.7.0 h1:FTdj0uexT4diYIPlF4yoFVI5MRO1r5+SEcIpEw9vC0o= +github.com/coreos/go-oidc/v3 v3.7.0/go.mod h1:yQzSCqBnK3e6Fs5l+f5i0F8Kwf0zpH9bPEsbY00KanM= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyberphone/json-canonicalization v0.0.0-20230710064741-aa7fe85c7dbd h1:0av0vtcjA8Hqv5gyWj79CLCFVwOOyBNWPjrfUWceMNg= -github.com/cyberphone/json-canonicalization v0.0.0-20230710064741-aa7fe85c7dbd/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= -github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc= +github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= -github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.5+incompatible h1:WmgcE4fxyI6EEXxBRxsHnZXrO1pQ3smi0k/jho4HLeY= -github.com/docker/docker v24.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg= +github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= +github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -73,8 +89,11 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg= github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8= -github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= -github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA= +github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -108,8 +127,8 @@ github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6 github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= -github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= +github.com/go-openapi/strfmt v0.21.9 h1:LnEGOO9qyEC1v22Bzr323M98G13paIUGPU7yeJtG9Xs= +github.com/go-openapi/strfmt v0.21.9/go.mod h1:0k3v301mglEaZRJdDDGSlN6Npq4VMVU69DE0LUyf7uA= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= @@ -118,7 +137,7 @@ github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogB github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= -github.com/go-rod/rod v0.113.3 h1:oLiKZW721CCMwA5g7977cWfcAKQ+FuosP47Zf1QiDrA= +github.com/go-rod/rod v0.114.5 h1:1x6oqnslwFVuXJbJifgxspJUd3O4ntaGhRLHt+4Er9c= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= @@ -145,6 +164,7 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -163,6 +183,7 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -174,17 +195,17 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-containerregistry v0.15.2 h1:MMkSh+tjSdnmJZO7ljvEqV1DjfekB6VUEAZgy3a+TQE= -github.com/google/go-containerregistry v0.15.2/go.mod h1:wWK+LnOv4jXMM23IT/F1wdYftGWGr47Is8CG+pmHK1Q= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ= +github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -196,8 +217,8 @@ github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxC github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= -github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M= +github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXpNvOEDLDc= github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -205,22 +226,26 @@ github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLm github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= -github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= +github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -236,11 +261,15 @@ github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYt github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= -github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-sqlite3 v1.14.18 h1:JL0eqdCOq6DJVNPSvArO/bIV9/P7fbGrV00LZHc+5aI= +github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU= @@ -249,27 +278,30 @@ github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= -github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= +github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0= -github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= -github.com/opencontainers/runc v1.1.8 h1:zICRlc+C1XzivLc3nzE+cbJV4LIi8tib6YG0MqC6OqA= -github.com/opencontainers/runc v1.1.8/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQBizE40= +github.com/opencontainers/runc v1.1.10/go.mod h1:+/R6+KmDlh+hOO8NkjmgkG9Qzvypzk0yXxAPYYR65+M= github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= @@ -278,6 +310,9 @@ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= +github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= +github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= +github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -287,33 +322,48 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0= github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= +github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg= +github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI= github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/sigstore/fulcio v1.4.0 h1:05+k8BFvwTQzfCkVxESWzCN4b70KIRliGYz0Upmdrs8= -github.com/sigstore/fulcio v1.4.0/go.mod h1:wcjlktbhoy6+ZTxO3yXpvqUxsLV+JEH4FF3a5Jz4VPI= +github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ= +github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og= github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY= github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg= -github.com/sigstore/sigstore v1.7.1 h1:fCATemikcBK0cG4+NcM940MfoIgmioY1vC6E66hXxks= -github.com/sigstore/sigstore v1.7.1/go.mod h1:0PmMzfJP2Y9+lugD0wer4e7TihR5tM7NcIs3bQNk5xg= +github.com/sigstore/sigstore v1.7.6 h1:zB0woXx+3Bp7dk7AjklHF1VhXBdCs84VXkZbp0IHLv8= +github.com/sigstore/sigstore v1.7.6/go.mod h1:FJE+NpEZIs4QKqZl4B2RtaVLVDcDtocAwTiNlexeBkY= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= @@ -335,31 +385,30 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/sylabs/sif/v2 v2.11.5 h1:7ssPH3epSonsTrzbS1YxeJ9KuqAN7ISlSM61a7j/mQM= -github.com/sylabs/sif/v2 v2.11.5/go.mod h1:GBoZs9LU3e4yJH1dcZ3Akf/jsqYgy5SeguJQC+zd75Y= +github.com/sylabs/sif/v2 v2.15.0 h1:Nv0tzksFnoQiQ2eUwpAis9nVqEu4c3RcNSxX8P3Cecw= +github.com/sylabs/sif/v2 v2.15.0/go.mod h1:X1H7eaPz6BAxA84POMESXoXfTqgAnLQkujyF/CQFWTc= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= -github.com/theupdateframework/go-tuf v0.5.2 h1:habfDzTmpbzBLIFGWa2ZpVhYvFBoK0C1onC3a4zuPRA= -github.com/theupdateframework/go-tuf v0.5.2/go.mod h1:SyMV5kg5n4uEclsyxXJZI2UxPFJNDc4Y+r7wv+MlvTA= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= -github.com/vbauerster/mpb/v8 v8.5.2 h1:zanzt1cZpSEG5uGNYKcv43+97f0IgEnXpuBFaMxKbM0= -github.com/vbauerster/mpb/v8 v8.5.2/go.mod h1:YqKyR4ZR6Gd34yD3cDHPMmQxc+uUQMwjgO/LkxiJQ6I= +github.com/vbauerster/mpb/v8 v8.7.0 h1:n2LTGyol7qqNBcLQn8FL5Bga2O8CGF75OOYsJVFsfMg= +github.com/vbauerster/mpb/v8 v8.7.0/go.mod h1:0RgdqeTpu6cDbdWeSaDvEvfgm9O598rBnRZ09HKaV0k= github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -375,14 +424,14 @@ github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= github.com/ysmood/leakless v0.8.0 h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= -go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= +go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y= -go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= -go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.mongodb.org/mongo-driver v1.13.0 h1:67DgFFjYOCMWdtTEmKFpV3ffWlFnh+CYZ8ZS/tXWUfY= +go.mongodb.org/mongo-driver v1.13.0/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= @@ -402,26 +451,28 @@ golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b h1:r+vk0EmXNmekl0S0BascoeeoHk/L7wmaW2QF90K+kYI= -golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= -golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -429,23 +480,27 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= -golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= +golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= +golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -453,29 +508,32 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -489,29 +547,29 @@ golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= -google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -525,6 +583,7 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -534,9 +593,7 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/go-jose/go-jose.v2 v2.6.1 h1:qEzJlIDmG9q5VO0M/o8tGS65QMHMS1w01TQJB1VPJ4U= gopkg.in/go-jose/go-jose.v2 v2.6.1/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= -gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -545,11 +602,10 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= -gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= +gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= +gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/image/unparsed.go b/image/unparsed.go index 123f6ce6f1..f2ebb929a2 100644 --- a/image/unparsed.go +++ b/image/unparsed.go @@ -2,6 +2,8 @@ package image import ( "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/unparsedimage" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" ) @@ -17,3 +19,23 @@ type UnparsedImage = image.UnparsedImage func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { return image.UnparsedInstance(src, instanceDigest) } + +// unparsedWithRef wraps a private.UnparsedImage, claiming another replacementRef +type unparsedWithRef struct { + private.UnparsedImage + ref types.ImageReference +} + +func (uwr *unparsedWithRef) Reference() types.ImageReference { + return uwr.ref +} + +// UnparsedInstanceWithReference returns a types.UnparsedImage for wrappedInstance which claims to be a replacementRef. +// This is useful for combining image data with other reference values, e.g. to check signatures on a locally-pulled image +// based on a remote-registry policy. +func UnparsedInstanceWithReference(wrappedInstance types.UnparsedImage, replacementRef types.ImageReference) types.UnparsedImage { + return &unparsedWithRef{ + UnparsedImage: unparsedimage.FromPublic(wrappedInstance), + ref: replacementRef, + } +} diff --git a/internal/blobinfocache/blobinfocache.go b/internal/blobinfocache/blobinfocache.go index b86e8b1ac3..2767c39507 100644 --- a/internal/blobinfocache/blobinfocache.go +++ b/internal/blobinfocache/blobinfocache.go @@ -23,6 +23,12 @@ type v1OnlyBlobInfoCache struct { types.BlobInfoCache } +func (bic *v1OnlyBlobInfoCache) Open() { +} + +func (bic *v1OnlyBlobInfoCache) Close() { +} + func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) { } diff --git a/internal/blobinfocache/types.go b/internal/blobinfocache/types.go index 3c2be57f32..4d3858ab8d 100644 --- a/internal/blobinfocache/types.go +++ b/internal/blobinfocache/types.go @@ -18,6 +18,13 @@ const ( // of compression was applied to the blobs it keeps information about. type BlobInfoCache2 interface { types.BlobInfoCache + + // Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close(). + // Note that public callers may call the types.BlobInfoCache operations without Open()/Close(). + Open() + // Close destroys state created by Open(). + Close() + // RecordDigestCompressorName records a compressor for the blob with the specified digest, // or Uncompressed or UnknownCompression. // WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a @@ -25,11 +32,11 @@ type BlobInfoCache2 interface { // otherwise the cache could be poisoned and cause us to make incorrect edits to type // information in a manifest. RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) - // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations + // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) // that could possibly be reused within the specified (transport scope) (if they still // exist, which is not guaranteed). // - // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if + // If !canSubstitute, the returned candidates will match the submitted digest exactly; if // canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look // up variants of the blob which have the same uncompressed digest. // @@ -39,7 +46,8 @@ type BlobInfoCache2 interface { // BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2. type BICReplacementCandidate2 struct { - Digest digest.Digest - CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression - Location types.BICLocationReference + Digest digest.Digest + CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression + UnknownLocation bool // is true when `Location` for this blob is not set + Location types.BICLocationReference // not set if UnknownLocation is set to `true` } diff --git a/internal/image/common_test.go b/internal/image/common_test.go index d903cfc469..d66fb4130f 100644 --- a/internal/image/common_test.go +++ b/internal/image/common_test.go @@ -6,8 +6,11 @@ import ( "path/filepath" "testing" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" ) // assertJSONEqualsFixture tests that jsonBytes is structurally equal to fixture, @@ -29,3 +32,22 @@ func assertJSONEqualsFixture(t *testing.T, jsonBytes []byte, fixture string, ign } assert.Equal(t, fixtureContents, contents) } + +// layerInfosWithCryptoOperation returns a copy of input where CryptoOperation is set to op +func layerInfosWithCryptoOperation(input []types.BlobInfo, op types.LayerCrypto) []types.BlobInfo { + res := slices.Clone(input) + for i := range res { + res[i].CryptoOperation = op + } + return res +} + +// layerInfosWithCompressionEdits returns a copy of input where CompressionOperation and CompressionAlgorithm is set to op and algo +func layerInfosWithCompressionEdits(input []types.BlobInfo, op types.LayerCompression, algo *compressiontypes.Algorithm) []types.BlobInfo { + res := slices.Clone(input) + for i := range res { + res[i].CompressionOperation = op + res[i].CompressionAlgorithm = algo + } + return res +} diff --git a/internal/image/docker_schema1_test.go b/internal/image/docker_schema1_test.go index e536d567bc..ee3f31187e 100644 --- a/internal/image/docker_schema1_test.go +++ b/internal/image/docker_schema1_test.go @@ -14,6 +14,7 @@ import ( imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" ) var schema1FixtureLayerInfos = []types.BlobInfo{ @@ -372,7 +373,7 @@ func TestManifestSchema1UpdatedImage(t *testing.T) { original := manifestSchema1FromFixture(t, "schema1.json") // LayerInfos: - layerInfos := append(original.LayerInfos()[1:], original.LayerInfos()[0]) + layerInfos := append(slices.Clone(original.LayerInfos()[1:]), original.LayerInfos()[0]) res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ LayerInfos: layerInfos, }) @@ -507,6 +508,18 @@ func TestManifestSchema1ConvertToSchema2(t *testing.T) { }, }, s2Manifest.LayerInfos()) + // Conversion to schema2 with encryption fails + encryptedLayers := layerInfosWithCryptoOperation(original.LayerInfos(), types.Encrypt) + _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ + LayerInfos: encryptedLayers, + ManifestMIMEType: manifest.DockerV2Schema2MediaType, + InformationOnly: types.ManifestUpdateInformation{ + LayerInfos: updatedLayers, + LayerDiffIDs: schema1WithThrowawaysFixtureLayerDiffIDs, + }, + }) + assert.Error(t, err) + // FIXME? Test also the various failure cases, if only to see that we don't crash? } @@ -582,6 +595,51 @@ func TestManifestSchema1ConvertToManifestOCI1(t *testing.T) { }, }, ociManifest.LayerInfos()) + // Conversion to OCI with encryption is possible. + encryptedLayers := layerInfosWithCryptoOperation(schema1WithThrowawaysFixtureLayerInfos, types.Encrypt) + res, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ + LayerInfos: encryptedLayers, + ManifestMIMEType: imgspecv1.MediaTypeImageManifest, + InformationOnly: types.ManifestUpdateInformation{ + LayerInfos: encryptedLayers, + LayerDiffIDs: schema1WithThrowawaysFixtureLayerDiffIDs, + }, + }) + require.NoError(t, err) + convertedJSON, mt, err = res.Manifest(context.Background()) + require.NoError(t, err) + assert.Equal(t, imgspecv1.MediaTypeImageManifest, mt) + // Layers have been updated as expected + ociManifest, err = manifestOCI1FromManifest(originalSrc, convertedJSON) + require.NoError(t, err) + assert.Equal(t, []types.BlobInfo{ + { + Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", + Size: 51354364, + MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted", + }, + { + Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", + Size: 150, + MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted", + }, + { + Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", + Size: 11739507, + MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted", + }, + { + Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", + Size: 8841833, + MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted", + }, + { + Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", + Size: 291, + MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted", + }, + }, ociManifest.LayerInfos()) + // FIXME? Test also the various failure cases, if only to see that we don't crash? } diff --git a/internal/image/docker_schema2_test.go b/internal/image/docker_schema2_test.go index f2f8a3f1d0..cf3b7f4ea6 100644 --- a/internal/image/docker_schema2_test.go +++ b/internal/image/docker_schema2_test.go @@ -22,6 +22,8 @@ import ( "golang.org/x/exp/slices" ) +const commonFixtureConfigDigest = "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f" + func manifestSchema2FromFixture(t *testing.T, src types.ImageSource, fixture string, mustFail bool) genericManifest { manifest, err := os.ReadFile(filepath.Join("fixtures", fixture)) require.NoError(t, err) @@ -39,7 +41,7 @@ func manifestSchema2FromComponentsLikeFixture(configBlob []byte) genericManifest return manifestSchema2FromComponents(manifest.Schema2Descriptor{ MediaType: "application/octet-stream", Size: 5940, - Digest: "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f", + Digest: commonFixtureConfigDigest, }, nil, configBlob, []manifest.Schema2Descriptor{ { MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", @@ -114,7 +116,7 @@ func TestManifestSchema2ConfigInfo(t *testing.T) { } { assert.Equal(t, types.BlobInfo{ Size: 5940, - Digest: "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f", + Digest: commonFixtureConfigDigest, MediaType: "application/octet-stream", }, m.ConfigInfo()) } @@ -123,11 +125,12 @@ func TestManifestSchema2ConfigInfo(t *testing.T) { // configBlobImageSource allows testing various GetBlob behaviors in .ConfigBlob() type configBlobImageSource struct { mocks.ForbiddenImageSource // We inherit almost all of the methods, which just panic() + expectedDigest digest.Digest f func() (io.ReadCloser, int64, error) } func (f configBlobImageSource) GetBlob(ctx context.Context, info types.BlobInfo, _ types.BlobInfoCache) (io.ReadCloser, int64, error) { - if info.Digest.String() != "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f" { + if info.Digest != f.expectedDigest { panic("Unexpected digest in GetBlob") } return f.f() @@ -163,7 +166,10 @@ func TestManifestSchema2ConfigBlob(t *testing.T) { } { var src types.ImageSource if c.cbISfn != nil { - src = configBlobImageSource{f: c.cbISfn} + src = configBlobImageSource{ + expectedDigest: commonFixtureConfigDigest, + f: c.cbISfn, + } } else { src = nil } @@ -350,6 +356,7 @@ func newSchema2ImageSource(t *testing.T, dockerRef string) *schema2ImageSource { return &schema2ImageSource{ configBlobImageSource: configBlobImageSource{ + expectedDigest: commonFixtureConfigDigest, f: func() (io.ReadCloser, int64, error) { return io.NopCloser(bytes.NewReader(realConfigJSON)), int64(len(realConfigJSON)), nil }, @@ -441,7 +448,7 @@ func TestManifestSchema2UpdatedImage(t *testing.T) { original := manifestSchema2FromFixture(t, originalSrc, "schema2.json", false) // LayerInfos: - layerInfos := append(original.LayerInfos()[1:], original.LayerInfos()[0]) + layerInfos := append(slices.Clone(original.LayerInfos()[1:]), original.LayerInfos()[0]) res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ LayerInfos: layerInfos, }) @@ -514,6 +521,46 @@ func TestConvertToManifestOCI(t *testing.T) { convertedConfig, err := res.ConfigBlob(context.Background()) require.NoError(t, err) assertJSONEqualsFixture(t, convertedConfig, "schema2-to-oci1-config.json") + + // Conversion to OCI with encryption is possible. + res, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ + LayerInfos: layerInfosWithCryptoOperation(original.LayerInfos(), types.Encrypt), + ManifestMIMEType: imgspecv1.MediaTypeImageManifest, + }) + require.NoError(t, err) + convertedJSON, mt, err = res.Manifest(context.Background()) + require.NoError(t, err) + assert.Equal(t, imgspecv1.MediaTypeImageManifest, mt) + // Layers have been updated as expected + ociManifest, err := manifestOCI1FromManifest(originalSrc, convertedJSON) + require.NoError(t, err) + assert.Equal(t, []types.BlobInfo{ + { + Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", + Size: 51354364, + MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted", + }, + { + Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", + Size: 150, + MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted", + }, + { + Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", + Size: 11739507, + MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted", + }, + { + Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", + Size: 8841833, + MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted", + }, + { + Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", + Size: 291, + MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted", + }, + }, ociManifest.LayerInfos()) } func TestConvertToManifestOCIAllMediaTypes(t *testing.T) { @@ -597,6 +644,16 @@ func TestConvertToManifestSchema1(t *testing.T) { {Digest: GzippedEmptyLayerDigest, Size: -1}, }, s1Manifest.LayerInfos()) + // Conversion to schema1 with encryption fails + _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ + LayerInfos: layerInfosWithCryptoOperation(original.LayerInfos(), types.Encrypt), + ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType, + InformationOnly: types.ManifestUpdateInformation{ + Destination: memoryDest, + }, + }) + assert.Error(t, err) + // FIXME? Test also the various failure cases, if only to see that we don't crash? } diff --git a/internal/image/fixtures/oci1-all-media-types-config.json b/internal/image/fixtures/oci1-all-media-types-config.json new file mode 100644 index 0000000000..cd17d264cb --- /dev/null +++ b/internal/image/fixtures/oci1-all-media-types-config.json @@ -0,0 +1,161 @@ +{ + "architecture": "amd64", + "config": { + "Hostname": "383850eeb47b", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "ExposedPorts": { + "80/tcp": {} + }, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HTTPD_PREFIX=/usr/local/apache2", + "HTTPD_VERSION=2.4.23", + "HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f", + "HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2", + "HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc" + ], + "Cmd": [ + "httpd-foreground" + ], + "ArgsEscaped": true, + "Image": "sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd", + "Volumes": null, + "WorkingDir": "/usr/local/apache2", + "Entrypoint": null, + "OnBuild": [], + "Labels": {} + }, + "container": "8825acde1b009729807e4b70a65a89399dd8da8e53be9216b9aaabaff4339f69", + "container_config": { + "Hostname": "383850eeb47b", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "ExposedPorts": { + "80/tcp": {} + }, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HTTPD_PREFIX=/usr/local/apache2", + "HTTPD_VERSION=2.4.23", + "HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f", + "HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2", + "HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc" + ], + "Cmd": [ + "/bin/sh", + "-c", + "#(nop) ", + "CMD [\"httpd-foreground\"]" + ], + "ArgsEscaped": true, + "Image": "sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd", + "Volumes": null, + "WorkingDir": "/usr/local/apache2", + "Entrypoint": null, + "OnBuild": [], + "Labels": {} + }, + "created": "2016-09-23T23:20:45.78976459Z", + "docker_version": "1.12.1", + "history": [ + { + "created": "2016-09-23T18:08:50.537223822Z", + "created_by": "/bin/sh -c #(nop) ADD file:c6c23585ab140b0b320d4e99bc1b0eb544c9e96c24d90fec5e069a6d57d335ca in / " + }, + { + "created": "2016-09-23T18:08:51.133779867Z", + "created_by": "/bin/sh -c #(nop) CMD [\"/bin/bash\"]", + "empty_layer": true + }, + { + "created": "2016-09-23T19:16:40.725768956Z", + "created_by": "/bin/sh -c #(nop) ENV HTTPD_PREFIX=/usr/local/apache2", + "empty_layer": true + }, + { + "created": "2016-09-23T19:16:41.037788416Z", + "created_by": "/bin/sh -c #(nop) ENV PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "empty_layer": true + }, + { + "created": "2016-09-23T19:16:41.990121202Z", + "created_by": "/bin/sh -c mkdir -p \"$HTTPD_PREFIX\" \t\u0026\u0026 chown www-data:www-data \"$HTTPD_PREFIX\"" + }, + { + "created": "2016-09-23T19:16:42.339911155Z", + "created_by": "/bin/sh -c #(nop) WORKDIR /usr/local/apache2", + "empty_layer": true + }, + { + "created": "2016-09-23T19:16:54.948461741Z", + "created_by": "/bin/sh -c apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends \t\tlibapr1 \t\tlibaprutil1 \t\tlibaprutil1-ldap \t\tlibapr1-dev \t\tlibaprutil1-dev \t\tlibpcre++0 \t\tlibssl1.0.0 \t\u0026\u0026 rm -r /var/lib/apt/lists/*" + }, + { + "created": "2016-09-23T19:16:55.321573403Z", + "created_by": "/bin/sh -c #(nop) ENV HTTPD_VERSION=2.4.23", + "empty_layer": true + }, + { + "created": "2016-09-23T19:16:55.629947307Z", + "created_by": "/bin/sh -c #(nop) ENV HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f", + "empty_layer": true + }, + { + "created": "2016-09-23T23:19:03.705796801Z", + "created_by": "/bin/sh -c #(nop) ENV HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2", + "empty_layer": true + }, + { + "created": "2016-09-23T23:19:04.009782822Z", + "created_by": "/bin/sh -c #(nop) ENV HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc", + "empty_layer": true + }, + { + "created": "2016-09-23T23:20:44.585743332Z", + "created_by": "/bin/sh -c set -x \t\u0026\u0026 buildDeps=' \t\tbzip2 \t\tca-certificates \t\tgcc \t\tlibpcre++-dev \t\tlibssl-dev \t\tmake \t\twget \t' \t\u0026\u0026 apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends $buildDeps \t\u0026\u0026 rm -r /var/lib/apt/lists/* \t\t\u0026\u0026 wget -O httpd.tar.bz2 \"$HTTPD_BZ2_URL\" \t\u0026\u0026 echo \"$HTTPD_SHA1 *httpd.tar.bz2\" | sha1sum -c - \t\u0026\u0026 wget -O httpd.tar.bz2.asc \"$HTTPD_ASC_URL\" \t\u0026\u0026 export GNUPGHOME=\"$(mktemp -d)\" \t\u0026\u0026 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys A93D62ECC3C8EA12DB220EC934EA76E6791485A8 \t\u0026\u0026 gpg --batch --verify httpd.tar.bz2.asc httpd.tar.bz2 \t\u0026\u0026 rm -r \"$GNUPGHOME\" httpd.tar.bz2.asc \t\t\u0026\u0026 mkdir -p src \t\u0026\u0026 tar -xvf httpd.tar.bz2 -C src --strip-components=1 \t\u0026\u0026 rm httpd.tar.bz2 \t\u0026\u0026 cd src \t\t\u0026\u0026 ./configure \t\t--prefix=\"$HTTPD_PREFIX\" \t\t--enable-mods-shared=reallyall \t\u0026\u0026 make -j\"$(nproc)\" \t\u0026\u0026 make install \t\t\u0026\u0026 cd .. \t\u0026\u0026 rm -r src \t\t\u0026\u0026 sed -ri \t\t-e 's!^(\\s*CustomLog)\\s+\\S+!\\1 /proc/self/fd/1!g' \t\t-e 's!^(\\s*ErrorLog)\\s+\\S+!\\1 /proc/self/fd/2!g' \t\t\"$HTTPD_PREFIX/conf/httpd.conf\" \t\t\u0026\u0026 apt-get purge -y --auto-remove $buildDeps" + }, + { + "created": "2016-09-23T23:20:45.127455562Z", + "created_by": "/bin/sh -c #(nop) COPY file:761e313354b918b6cd7ea99975a4f6b53ff5381ba689bab2984aec4dab597215 in /usr/local/bin/ " + }, + { + "created": "2016-09-23T23:20:45.453934921Z", + "created_by": "/bin/sh -c #(nop) EXPOSE 80/tcp", + "empty_layer": true + }, + { + "created": "2016-09-23T23:20:45.78976459Z", + "created_by": "/bin/sh -c #(nop) CMD [\"httpd-foreground\"]", + "empty_layer": true + }, + { + "created": "2023-10-01T02:03:04.56789764Z", + "created_by": "/bin/sh echo something > last" + } + ], + "os": "linux", + "rootfs": { + "type": "layers", + "diff_ids": [ + "sha256:142a601d97936307e75220c35dde0348971a9584c21e7cb42e1f7004005432ab", + "sha256:90fcc66ad3be9f1757f954b750deb37032f208428aa12599fcb02182b9065a9c", + "sha256:5a8624bb7e76d1e6829f9c64c43185e02bc07f97a2189eb048609a8914e72c56", + "sha256:d349ff6b3afc6a2800054768c82bfbf4289c9aa5da55c1290f802943dcd4d1e9", + "sha256:8c064bb1f60e84fa8cc6079b6d2e76e0423389fd6aeb7e497dfdae5e05b2b25b", + "sha256:1111111111111111111111111111111111111111111111111111111111111111" + ] + } +} \ No newline at end of file diff --git a/internal/image/fixtures/oci1-all-media-types-to-schema2-config.json b/internal/image/fixtures/oci1-all-media-types-to-schema2-config.json new file mode 100644 index 0000000000..cd17d264cb --- /dev/null +++ b/internal/image/fixtures/oci1-all-media-types-to-schema2-config.json @@ -0,0 +1,161 @@ +{ + "architecture": "amd64", + "config": { + "Hostname": "383850eeb47b", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "ExposedPorts": { + "80/tcp": {} + }, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HTTPD_PREFIX=/usr/local/apache2", + "HTTPD_VERSION=2.4.23", + "HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f", + "HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2", + "HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc" + ], + "Cmd": [ + "httpd-foreground" + ], + "ArgsEscaped": true, + "Image": "sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd", + "Volumes": null, + "WorkingDir": "/usr/local/apache2", + "Entrypoint": null, + "OnBuild": [], + "Labels": {} + }, + "container": "8825acde1b009729807e4b70a65a89399dd8da8e53be9216b9aaabaff4339f69", + "container_config": { + "Hostname": "383850eeb47b", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "ExposedPorts": { + "80/tcp": {} + }, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HTTPD_PREFIX=/usr/local/apache2", + "HTTPD_VERSION=2.4.23", + "HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f", + "HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2", + "HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc" + ], + "Cmd": [ + "/bin/sh", + "-c", + "#(nop) ", + "CMD [\"httpd-foreground\"]" + ], + "ArgsEscaped": true, + "Image": "sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd", + "Volumes": null, + "WorkingDir": "/usr/local/apache2", + "Entrypoint": null, + "OnBuild": [], + "Labels": {} + }, + "created": "2016-09-23T23:20:45.78976459Z", + "docker_version": "1.12.1", + "history": [ + { + "created": "2016-09-23T18:08:50.537223822Z", + "created_by": "/bin/sh -c #(nop) ADD file:c6c23585ab140b0b320d4e99bc1b0eb544c9e96c24d90fec5e069a6d57d335ca in / " + }, + { + "created": "2016-09-23T18:08:51.133779867Z", + "created_by": "/bin/sh -c #(nop) CMD [\"/bin/bash\"]", + "empty_layer": true + }, + { + "created": "2016-09-23T19:16:40.725768956Z", + "created_by": "/bin/sh -c #(nop) ENV HTTPD_PREFIX=/usr/local/apache2", + "empty_layer": true + }, + { + "created": "2016-09-23T19:16:41.037788416Z", + "created_by": "/bin/sh -c #(nop) ENV PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "empty_layer": true + }, + { + "created": "2016-09-23T19:16:41.990121202Z", + "created_by": "/bin/sh -c mkdir -p \"$HTTPD_PREFIX\" \t\u0026\u0026 chown www-data:www-data \"$HTTPD_PREFIX\"" + }, + { + "created": "2016-09-23T19:16:42.339911155Z", + "created_by": "/bin/sh -c #(nop) WORKDIR /usr/local/apache2", + "empty_layer": true + }, + { + "created": "2016-09-23T19:16:54.948461741Z", + "created_by": "/bin/sh -c apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends \t\tlibapr1 \t\tlibaprutil1 \t\tlibaprutil1-ldap \t\tlibapr1-dev \t\tlibaprutil1-dev \t\tlibpcre++0 \t\tlibssl1.0.0 \t\u0026\u0026 rm -r /var/lib/apt/lists/*" + }, + { + "created": "2016-09-23T19:16:55.321573403Z", + "created_by": "/bin/sh -c #(nop) ENV HTTPD_VERSION=2.4.23", + "empty_layer": true + }, + { + "created": "2016-09-23T19:16:55.629947307Z", + "created_by": "/bin/sh -c #(nop) ENV HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f", + "empty_layer": true + }, + { + "created": "2016-09-23T23:19:03.705796801Z", + "created_by": "/bin/sh -c #(nop) ENV HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2", + "empty_layer": true + }, + { + "created": "2016-09-23T23:19:04.009782822Z", + "created_by": "/bin/sh -c #(nop) ENV HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc", + "empty_layer": true + }, + { + "created": "2016-09-23T23:20:44.585743332Z", + "created_by": "/bin/sh -c set -x \t\u0026\u0026 buildDeps=' \t\tbzip2 \t\tca-certificates \t\tgcc \t\tlibpcre++-dev \t\tlibssl-dev \t\tmake \t\twget \t' \t\u0026\u0026 apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends $buildDeps \t\u0026\u0026 rm -r /var/lib/apt/lists/* \t\t\u0026\u0026 wget -O httpd.tar.bz2 \"$HTTPD_BZ2_URL\" \t\u0026\u0026 echo \"$HTTPD_SHA1 *httpd.tar.bz2\" | sha1sum -c - \t\u0026\u0026 wget -O httpd.tar.bz2.asc \"$HTTPD_ASC_URL\" \t\u0026\u0026 export GNUPGHOME=\"$(mktemp -d)\" \t\u0026\u0026 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys A93D62ECC3C8EA12DB220EC934EA76E6791485A8 \t\u0026\u0026 gpg --batch --verify httpd.tar.bz2.asc httpd.tar.bz2 \t\u0026\u0026 rm -r \"$GNUPGHOME\" httpd.tar.bz2.asc \t\t\u0026\u0026 mkdir -p src \t\u0026\u0026 tar -xvf httpd.tar.bz2 -C src --strip-components=1 \t\u0026\u0026 rm httpd.tar.bz2 \t\u0026\u0026 cd src \t\t\u0026\u0026 ./configure \t\t--prefix=\"$HTTPD_PREFIX\" \t\t--enable-mods-shared=reallyall \t\u0026\u0026 make -j\"$(nproc)\" \t\u0026\u0026 make install \t\t\u0026\u0026 cd .. \t\u0026\u0026 rm -r src \t\t\u0026\u0026 sed -ri \t\t-e 's!^(\\s*CustomLog)\\s+\\S+!\\1 /proc/self/fd/1!g' \t\t-e 's!^(\\s*ErrorLog)\\s+\\S+!\\1 /proc/self/fd/2!g' \t\t\"$HTTPD_PREFIX/conf/httpd.conf\" \t\t\u0026\u0026 apt-get purge -y --auto-remove $buildDeps" + }, + { + "created": "2016-09-23T23:20:45.127455562Z", + "created_by": "/bin/sh -c #(nop) COPY file:761e313354b918b6cd7ea99975a4f6b53ff5381ba689bab2984aec4dab597215 in /usr/local/bin/ " + }, + { + "created": "2016-09-23T23:20:45.453934921Z", + "created_by": "/bin/sh -c #(nop) EXPOSE 80/tcp", + "empty_layer": true + }, + { + "created": "2016-09-23T23:20:45.78976459Z", + "created_by": "/bin/sh -c #(nop) CMD [\"httpd-foreground\"]", + "empty_layer": true + }, + { + "created": "2023-10-01T02:03:04.56789764Z", + "created_by": "/bin/sh echo something > last" + } + ], + "os": "linux", + "rootfs": { + "type": "layers", + "diff_ids": [ + "sha256:142a601d97936307e75220c35dde0348971a9584c21e7cb42e1f7004005432ab", + "sha256:90fcc66ad3be9f1757f954b750deb37032f208428aa12599fcb02182b9065a9c", + "sha256:5a8624bb7e76d1e6829f9c64c43185e02bc07f97a2189eb048609a8914e72c56", + "sha256:d349ff6b3afc6a2800054768c82bfbf4289c9aa5da55c1290f802943dcd4d1e9", + "sha256:8c064bb1f60e84fa8cc6079b6d2e76e0423389fd6aeb7e497dfdae5e05b2b25b", + "sha256:1111111111111111111111111111111111111111111111111111111111111111" + ] + } +} \ No newline at end of file diff --git a/internal/image/fixtures/oci1-all-media-types.json b/internal/image/fixtures/oci1-all-media-types.json index 9655392828..e92fe2cce8 100644 --- a/internal/image/fixtures/oci1-all-media-types.json +++ b/internal/image/fixtures/oci1-all-media-types.json @@ -4,7 +4,7 @@ "config": { "mediaType": "application/vnd.oci.image.config.v1+json", "size": 4651, - "digest": "sha256:a13a0762ab7bed51a1b49adec0a702b1cd99294fd460a025b465bcfb7b152745" + "digest": "sha256:94ac69e4413476d061116c9d05757e46a0afc744e8b9886f75cf7f6f14c78fb3" }, "layers": [ { @@ -28,7 +28,7 @@ "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9" }, { - "mediaType": "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip", + "mediaType": "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd", "size": 8841833, "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909" }, diff --git a/internal/image/fixtures/oci1-config-extra-fields.json b/internal/image/fixtures/oci1-config-extra-fields.json new file mode 100644 index 0000000000..1d670d590b --- /dev/null +++ b/internal/image/fixtures/oci1-config-extra-fields.json @@ -0,0 +1,158 @@ +{ + "extra-string-field": "string", + "extra-object": {"foo":"bar"}, + "architecture": "amd64", + "config": { + "Hostname": "383850eeb47b", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "ExposedPorts": { + "80/tcp": {} + }, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HTTPD_PREFIX=/usr/local/apache2", + "HTTPD_VERSION=2.4.23", + "HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f", + "HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2", + "HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc" + ], + "Cmd": [ + "httpd-foreground" + ], + "ArgsEscaped": true, + "Image": "sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd", + "Volumes": null, + "WorkingDir": "/usr/local/apache2", + "Entrypoint": null, + "OnBuild": [], + "Labels": {} + }, + "container": "8825acde1b009729807e4b70a65a89399dd8da8e53be9216b9aaabaff4339f69", + "container_config": { + "Hostname": "383850eeb47b", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "ExposedPorts": { + "80/tcp": {} + }, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HTTPD_PREFIX=/usr/local/apache2", + "HTTPD_VERSION=2.4.23", + "HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f", + "HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2", + "HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc" + ], + "Cmd": [ + "/bin/sh", + "-c", + "#(nop) ", + "CMD [\"httpd-foreground\"]" + ], + "ArgsEscaped": true, + "Image": "sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd", + "Volumes": null, + "WorkingDir": "/usr/local/apache2", + "Entrypoint": null, + "OnBuild": [], + "Labels": {} + }, + "created": "2016-09-23T23:20:45.78976459Z", + "docker_version": "1.12.1", + "history": [ + { + "created": "2016-09-23T18:08:50.537223822Z", + "created_by": "/bin/sh -c #(nop) ADD file:c6c23585ab140b0b320d4e99bc1b0eb544c9e96c24d90fec5e069a6d57d335ca in / " + }, + { + "created": "2016-09-23T18:08:51.133779867Z", + "created_by": "/bin/sh -c #(nop) CMD [\"/bin/bash\"]", + "empty_layer": true + }, + { + "created": "2016-09-23T19:16:40.725768956Z", + "created_by": "/bin/sh -c #(nop) ENV HTTPD_PREFIX=/usr/local/apache2", + "empty_layer": true + }, + { + "created": "2016-09-23T19:16:41.037788416Z", + "created_by": "/bin/sh -c #(nop) ENV PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "empty_layer": true + }, + { + "created": "2016-09-23T19:16:41.990121202Z", + "created_by": "/bin/sh -c mkdir -p \"$HTTPD_PREFIX\" \t\u0026\u0026 chown www-data:www-data \"$HTTPD_PREFIX\"" + }, + { + "created": "2016-09-23T19:16:42.339911155Z", + "created_by": "/bin/sh -c #(nop) WORKDIR /usr/local/apache2", + "empty_layer": true + }, + { + "created": "2016-09-23T19:16:54.948461741Z", + "created_by": "/bin/sh -c apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends \t\tlibapr1 \t\tlibaprutil1 \t\tlibaprutil1-ldap \t\tlibapr1-dev \t\tlibaprutil1-dev \t\tlibpcre++0 \t\tlibssl1.0.0 \t\u0026\u0026 rm -r /var/lib/apt/lists/*" + }, + { + "created": "2016-09-23T19:16:55.321573403Z", + "created_by": "/bin/sh -c #(nop) ENV HTTPD_VERSION=2.4.23", + "empty_layer": true + }, + { + "created": "2016-09-23T19:16:55.629947307Z", + "created_by": "/bin/sh -c #(nop) ENV HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f", + "empty_layer": true + }, + { + "created": "2016-09-23T23:19:03.705796801Z", + "created_by": "/bin/sh -c #(nop) ENV HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2", + "empty_layer": true + }, + { + "created": "2016-09-23T23:19:04.009782822Z", + "created_by": "/bin/sh -c #(nop) ENV HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc", + "empty_layer": true + }, + { + "created": "2016-09-23T23:20:44.585743332Z", + "created_by": "/bin/sh -c set -x \t\u0026\u0026 buildDeps=' \t\tbzip2 \t\tca-certificates \t\tgcc \t\tlibpcre++-dev \t\tlibssl-dev \t\tmake \t\twget \t' \t\u0026\u0026 apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends $buildDeps \t\u0026\u0026 rm -r /var/lib/apt/lists/* \t\t\u0026\u0026 wget -O httpd.tar.bz2 \"$HTTPD_BZ2_URL\" \t\u0026\u0026 echo \"$HTTPD_SHA1 *httpd.tar.bz2\" | sha1sum -c - \t\u0026\u0026 wget -O httpd.tar.bz2.asc \"$HTTPD_ASC_URL\" \t\u0026\u0026 export GNUPGHOME=\"$(mktemp -d)\" \t\u0026\u0026 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys A93D62ECC3C8EA12DB220EC934EA76E6791485A8 \t\u0026\u0026 gpg --batch --verify httpd.tar.bz2.asc httpd.tar.bz2 \t\u0026\u0026 rm -r \"$GNUPGHOME\" httpd.tar.bz2.asc \t\t\u0026\u0026 mkdir -p src \t\u0026\u0026 tar -xvf httpd.tar.bz2 -C src --strip-components=1 \t\u0026\u0026 rm httpd.tar.bz2 \t\u0026\u0026 cd src \t\t\u0026\u0026 ./configure \t\t--prefix=\"$HTTPD_PREFIX\" \t\t--enable-mods-shared=reallyall \t\u0026\u0026 make -j\"$(nproc)\" \t\u0026\u0026 make install \t\t\u0026\u0026 cd .. \t\u0026\u0026 rm -r src \t\t\u0026\u0026 sed -ri \t\t-e 's!^(\\s*CustomLog)\\s+\\S+!\\1 /proc/self/fd/1!g' \t\t-e 's!^(\\s*ErrorLog)\\s+\\S+!\\1 /proc/self/fd/2!g' \t\t\"$HTTPD_PREFIX/conf/httpd.conf\" \t\t\u0026\u0026 apt-get purge -y --auto-remove $buildDeps" + }, + { + "created": "2016-09-23T23:20:45.127455562Z", + "created_by": "/bin/sh -c #(nop) COPY file:761e313354b918b6cd7ea99975a4f6b53ff5381ba689bab2984aec4dab597215 in /usr/local/bin/ " + }, + { + "created": "2016-09-23T23:20:45.453934921Z", + "created_by": "/bin/sh -c #(nop) EXPOSE 80/tcp", + "empty_layer": true + }, + { + "created": "2016-09-23T23:20:45.78976459Z", + "created_by": "/bin/sh -c #(nop) CMD [\"httpd-foreground\"]", + "empty_layer": true + } + ], + "os": "linux", + "rootfs": { + "type": "layers", + "diff_ids": [ + "sha256:142a601d97936307e75220c35dde0348971a9584c21e7cb42e1f7004005432ab", + "sha256:90fcc66ad3be9f1757f954b750deb37032f208428aa12599fcb02182b9065a9c", + "sha256:5a8624bb7e76d1e6829f9c64c43185e02bc07f97a2189eb048609a8914e72c56", + "sha256:d349ff6b3afc6a2800054768c82bfbf4289c9aa5da55c1290f802943dcd4d1e9", + "sha256:8c064bb1f60e84fa8cc6079b6d2e76e0423389fd6aeb7e497dfdae5e05b2b25b" + ] + } +} \ No newline at end of file diff --git a/internal/image/fixtures/oci1-extra-config-fields.json b/internal/image/fixtures/oci1-extra-config-fields.json new file mode 100644 index 0000000000..b297f4abcc --- /dev/null +++ b/internal/image/fixtures/oci1-extra-config-fields.json @@ -0,0 +1,43 @@ +{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "config": { + "mediaType": "application/vnd.oci.image.config.v1+json", + "size": 7693, + "digest": "sha256:7f2a783ee2f07826b1856e68a40c930cd0430d6e7d4a88c29c2c8b7718706e74", + "annotations": { + "test-annotation-1": "one" + } + }, + "layers": [ + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "size": 51354364, + "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb" + }, + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "size": 150, + "digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c" + }, + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "size": 11739507, + "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", + "urls": ["https://layer.url"] + }, + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "size": 8841833, + "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", + "annotations": { + "test-annotation-2": "two" + } + }, + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "size": 291, + "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa" + } + ] +} diff --git a/internal/image/fixtures/oci1.encrypted.json b/internal/image/fixtures/oci1.encrypted.json new file mode 100644 index 0000000000..c6c523e70b --- /dev/null +++ b/internal/image/fixtures/oci1.encrypted.json @@ -0,0 +1,43 @@ +{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "config": { + "mediaType": "application/vnd.oci.image.config.v1+json", + "size": 5940, + "digest": "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f", + "annotations": { + "test-annotation-1": "one" + } + }, + "layers": [ + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip+encrypted", + "size": 51354364, + "digest": "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + }, + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip+encrypted", + "size": 150, + "digest": "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + }, + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip+encrypted", + "size": 11739507, + "digest": "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", + "urls": ["https://layer.url"] + }, + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip+encrypted", + "size": 8841833, + "digest": "sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd", + "annotations": { + "test-annotation-2": "two" + } + }, + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip+encrypted", + "size": 291, + "digest": "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" + } + ] +} diff --git a/internal/image/oci.go b/internal/image/oci.go index 166daa0e87..df0e8e4171 100644 --- a/internal/image/oci.go +++ b/internal/image/oci.go @@ -12,8 +12,10 @@ import ( "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache/none" "github.com/containers/image/v5/types" + ociencspec "github.com/containers/ocicrypt/spec" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/slices" ) type manifestOCI1 struct { @@ -86,7 +88,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { // old image manifests work (docker v2s1 especially). func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig { - return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType) + return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest) } cb, err := m.ConfigBlob(ctx) @@ -194,26 +196,86 @@ func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, opti return m.convertToManifestSchema2(ctx, options) } +// layerEditsOfOCIOnlyFeatures checks if options requires some layer edits to be done before converting to a Docker format. +// If not, it returns (nil, nil). +// If decryption is required, it returns a set of edits to provide to OCI1.UpdateLayerInfos, +// and edits *options to not try decryption again. +func (m *manifestOCI1) layerEditsOfOCIOnlyFeatures(options *types.ManifestUpdateOptions) ([]types.BlobInfo, error) { + if options == nil || options.LayerInfos == nil { + return nil, nil + } + + originalInfos := m.LayerInfos() + if len(originalInfos) != len(options.LayerInfos) { + return nil, fmt.Errorf("preparing to decrypt before conversion: %d layers vs. %d layer edits", len(originalInfos), len(options.LayerInfos)) + } + + ociOnlyEdits := slices.Clone(originalInfos) // Start with a full copy so that we don't forget to copy anything: use the current data in full unless we intentionally deviate. + laterEdits := slices.Clone(options.LayerInfos) + needsOCIOnlyEdits := false + for i, edit := range options.LayerInfos { + // Unless determined otherwise, don't do any compression-related MIME type conversions. m.LayerInfos() should not set these edit instructions, but be explicit. + ociOnlyEdits[i].CompressionOperation = types.PreserveOriginal + ociOnlyEdits[i].CompressionAlgorithm = nil + + if edit.CryptoOperation == types.Decrypt { + needsOCIOnlyEdits = true // Encrypted types must be removed before conversion because they can’t be represented in Docker schemas + ociOnlyEdits[i].CryptoOperation = types.Decrypt + laterEdits[i].CryptoOperation = types.PreserveOriginalCrypto // Don't try to decrypt in a schema[12] manifest later, that would fail. + } + + if originalInfos[i].MediaType == imgspecv1.MediaTypeImageLayerZstd || + originalInfos[i].MediaType == imgspecv1.MediaTypeImageLayerNonDistributableZstd { //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. + needsOCIOnlyEdits = true // Zstd MIME types must be removed before conversion because they can’t be represented in Docker schemas. + ociOnlyEdits[i].CompressionOperation = edit.CompressionOperation + ociOnlyEdits[i].CompressionAlgorithm = edit.CompressionAlgorithm + laterEdits[i].CompressionOperation = types.PreserveOriginal + laterEdits[i].CompressionAlgorithm = nil + } + } + if !needsOCIOnlyEdits { + return nil, nil + } + + options.LayerInfos = laterEdits + return ociOnlyEdits, nil +} + // convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. // It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned // value. // This does not change the state of the original manifestOCI1 object. -func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.ManifestUpdateOptions) (*manifestSchema2, error) { +func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) { if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig { - return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType) + return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest) + } + + // Mostly we first make a format conversion, and _afterwards_ do layer edits. But first we need to do the layer edits + // which remove OCI-specific features, because trying to convert those layers would fail. + // So, do the layer updates for decryption, and for conversions from Zstd. + ociManifest := m.m + ociOnlyEdits, err := m.layerEditsOfOCIOnlyFeatures(options) + if err != nil { + return nil, err + } + if ociOnlyEdits != nil { + ociManifest = manifest.OCI1Clone(ociManifest) + if err := ociManifest.UpdateLayerInfos(ociOnlyEdits); err != nil { + return nil, err + } } // Create a copy of the descriptor. - config := schema2DescriptorFromOCI1Descriptor(m.m.Config) + config := schema2DescriptorFromOCI1Descriptor(ociManifest.Config) // Above, we have already checked that this manifest refers to an image, not an OCI artifact, // so the only difference between OCI and DockerSchema2 is the mediatypes. The // media type of the manifest is handled by manifestSchema2FromComponents. config.MediaType = manifest.DockerV2Schema2ConfigMediaType - layers := make([]manifest.Schema2Descriptor, len(m.m.Layers)) + layers := make([]manifest.Schema2Descriptor, len(ociManifest.Layers)) for idx := range layers { - layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx]) + layers[idx] = schema2DescriptorFromOCI1Descriptor(ociManifest.Layers[idx]) switch layers[idx].MediaType { case imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType @@ -227,6 +289,9 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.Mani layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType case imgspecv1.MediaTypeImageLayerZstd: return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) + case ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc, ociencspec.MediaTypeLayerZstdEnc, + ociencspec.MediaTypeLayerNonDistributableEnc, ociencspec.MediaTypeLayerNonDistributableGzipEnc, ociencspec.MediaTypeLayerNonDistributableZstdEnc: + return nil, fmt.Errorf("during manifest conversion: encrypted layers (%q) are not supported in docker images", layers[idx].MediaType) default: return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType) } @@ -244,7 +309,7 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.Mani // This does not change the state of the original manifestOCI1 object. func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig { - return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType) + return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest) } // We can't directly convert images to V1, but we can transitively convert via a V2 image diff --git a/internal/image/oci_test.go b/internal/image/oci_test.go index 84398dcf34..0ac22f34bf 100644 --- a/internal/image/oci_test.go +++ b/internal/image/oci_test.go @@ -14,10 +14,14 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/testing/mocks" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" ) func manifestOCI1FromFixture(t *testing.T, src types.ImageSource, fixture string) genericManifest { @@ -29,47 +33,62 @@ func manifestOCI1FromFixture(t *testing.T, src types.ImageSource, fixture string return m } +var layerDescriptorsLikeFixture = []imgspecv1.Descriptor{ + { + MediaType: imgspecv1.MediaTypeImageLayerGzip, + Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", + Size: 51354364, + }, + { + MediaType: imgspecv1.MediaTypeImageLayerGzip, + Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", + Size: 150, + }, + { + MediaType: imgspecv1.MediaTypeImageLayerGzip, + Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", + Size: 11739507, + URLs: []string{ + "https://layer.url", + }, + }, + { + MediaType: imgspecv1.MediaTypeImageLayerGzip, + Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", + Size: 8841833, + Annotations: map[string]string{ + "test-annotation-2": "two", + }, + }, + { + MediaType: imgspecv1.MediaTypeImageLayerGzip, + Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", + Size: 291, + }, +} + func manifestOCI1FromComponentsLikeFixture(configBlob []byte) genericManifest { return manifestOCI1FromComponents(imgspecv1.Descriptor{ MediaType: imgspecv1.MediaTypeImageConfig, Size: 5940, - Digest: "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f", + Digest: commonFixtureConfigDigest, Annotations: map[string]string{ "test-annotation-1": "one", }, - }, nil, configBlob, []imgspecv1.Descriptor{ - { - MediaType: imgspecv1.MediaTypeImageLayerGzip, - Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", - Size: 51354364, - }, - { - MediaType: imgspecv1.MediaTypeImageLayerGzip, - Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", - Size: 150, - }, - { - MediaType: imgspecv1.MediaTypeImageLayerGzip, - Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", - Size: 11739507, - URLs: []string{ - "https://layer.url", - }, - }, - { - MediaType: imgspecv1.MediaTypeImageLayerGzip, - Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", - Size: 8841833, - Annotations: map[string]string{ - "test-annotation-2": "two", - }, - }, - { - MediaType: imgspecv1.MediaTypeImageLayerGzip, - Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", - Size: 291, + }, nil, configBlob, layerDescriptorsLikeFixture) +} + +func manifestOCI1FromComponentsWithExtraConfigFields(t *testing.T, src types.ImageSource) genericManifest { + configJSON, err := os.ReadFile("fixtures/oci1-config-extra-fields.json") + require.NoError(t, err) + return manifestOCI1FromComponents(imgspecv1.Descriptor{ + MediaType: imgspecv1.MediaTypeImageConfig, + Size: 7693, + Digest: "sha256:7f2a783ee2f07826b1856e68a40c930cd0430d6e7d4a88c29c2c8b7718706e74", + Annotations: map[string]string{ + "test-annotation-1": "one", }, - }) + }, src, configJSON, layerDescriptorsLikeFixture) } func TestManifestOCI1FromManifest(t *testing.T) { @@ -117,7 +136,7 @@ func TestManifestOCI1ConfigInfo(t *testing.T) { } { assert.Equal(t, types.BlobInfo{ Size: 5940, - Digest: "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f", + Digest: commonFixtureConfigDigest, Annotations: map[string]string{ "test-annotation-1": "one", }, @@ -156,7 +175,10 @@ func TestManifestOCI1ConfigBlob(t *testing.T) { } { var src types.ImageSource if c.cbISfn != nil { - src = configBlobImageSource{f: c.cbISfn} + src = configBlobImageSource{ + expectedDigest: commonFixtureConfigDigest, + f: c.cbISfn, + } } else { src = nil } @@ -190,7 +212,7 @@ func TestManifestOCI1OCIConfig(t *testing.T) { err = json.Unmarshal(configJSON, &expectedConfig) require.NoError(t, err) - originalSrc := newOCI1ImageSource(t, "httpd:latest") + originalSrc := newOCI1ImageSource(t, "oci1-config.json", "httpd:latest") for _, m := range []genericManifest{ manifestOCI1FromFixture(t, originalSrc, "oci1.json"), manifestOCI1FromComponentsLikeFixture(configJSON), @@ -200,6 +222,19 @@ func TestManifestOCI1OCIConfig(t *testing.T) { assert.Equal(t, &expectedConfig, config) } + // “Any extra fields in the Image JSON struct are considered implementation specific + // and MUST NOT generate an error by any implementations which are unable to interpret them.” + // oci1-config-extra-fields.json is the same as oci1-config.json, apart from a few added fields. + srcWithExtraFields := newOCI1ImageSource(t, "oci1-config-extra-fields.json", "httpd:latest") + for _, m := range []genericManifest{ + manifestOCI1FromFixture(t, srcWithExtraFields, "oci1-extra-config-fields.json"), + manifestOCI1FromComponentsWithExtraConfigFields(t, srcWithExtraFields), + } { + config, err := m.OCIConfig(context.Background()) + require.NoError(t, err) + assert.Equal(t, &expectedConfig, config) + } + // This can share originalSrc because the config digest is the same between oci1-artifact.json and oci1.json artifact := manifestOCI1FromFixture(t, originalSrc, "oci1-artifact.json") _, err = artifact.OCIConfig(context.Background()) @@ -263,67 +298,75 @@ func TestManifestOCI1EmbeddedDockerReferenceConflicts(t *testing.T) { } func TestManifestOCI1Inspect(t *testing.T) { - configJSON, err := os.ReadFile("fixtures/oci1-config.json") - require.NoError(t, err) var emptyAnnotations map[string]string - m := manifestOCI1FromComponentsLikeFixture(configJSON) - ii, err := m.Inspect(context.Background()) - require.NoError(t, err) created := time.Date(2016, 9, 23, 23, 20, 45, 789764590, time.UTC) - assert.Equal(t, types.ImageInspectInfo{ - Tag: "", - Created: &created, - DockerVersion: "1.12.1", - Labels: map[string]string{}, - Architecture: "amd64", - Os: "linux", - Layers: []string{ - "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", - "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", - "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", - "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", - "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", - }, - LayersData: []types.ImageInspectLayer{{ - MIMEType: "application/vnd.oci.image.layer.v1.tar+gzip", - Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", - Size: 51354364, - Annotations: emptyAnnotations, - }, { - MIMEType: "application/vnd.oci.image.layer.v1.tar+gzip", - Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", - Size: 150, - Annotations: emptyAnnotations, - }, { - MIMEType: "application/vnd.oci.image.layer.v1.tar+gzip", - Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", - Size: 11739507, - Annotations: emptyAnnotations, - }, { - MIMEType: "application/vnd.oci.image.layer.v1.tar+gzip", - Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", - Size: 8841833, - Annotations: map[string]string{"test-annotation-2": "two"}, - }, { - MIMEType: "application/vnd.oci.image.layer.v1.tar+gzip", - Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", - Size: 291, - Annotations: emptyAnnotations, - }, - }, - Author: "", - Env: []string{ - "PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "HTTPD_PREFIX=/usr/local/apache2", - "HTTPD_VERSION=2.4.23", - "HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f", - "HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download&filename=httpd/httpd-2.4.23.tar.bz2", - "HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc", - }, - }, *ii) + + configJSON, err := os.ReadFile("fixtures/oci1-config.json") + require.NoError(t, err) + for _, m := range []genericManifest{ + manifestOCI1FromComponentsLikeFixture(configJSON), + // “Any extra fields in the Image JSON struct are considered implementation specific + // and MUST NOT generate an error by any implementations which are unable to interpret them.” + // oci1-config-extra-fields.json is the same as oci1-config.json, apart from a few added fields. + manifestOCI1FromComponentsWithExtraConfigFields(t, nil), + } { + ii, err := m.Inspect(context.Background()) + require.NoError(t, err) + assert.Equal(t, types.ImageInspectInfo{ + Tag: "", + Created: &created, + DockerVersion: "1.12.1", + Labels: map[string]string{}, + Architecture: "amd64", + Os: "linux", + Layers: []string{ + "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", + "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", + "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", + "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", + "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", + }, + LayersData: []types.ImageInspectLayer{{ + MIMEType: "application/vnd.oci.image.layer.v1.tar+gzip", + Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", + Size: 51354364, + Annotations: emptyAnnotations, + }, { + MIMEType: "application/vnd.oci.image.layer.v1.tar+gzip", + Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", + Size: 150, + Annotations: emptyAnnotations, + }, { + MIMEType: "application/vnd.oci.image.layer.v1.tar+gzip", + Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", + Size: 11739507, + Annotations: emptyAnnotations, + }, { + MIMEType: "application/vnd.oci.image.layer.v1.tar+gzip", + Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", + Size: 8841833, + Annotations: map[string]string{"test-annotation-2": "two"}, + }, { + MIMEType: "application/vnd.oci.image.layer.v1.tar+gzip", + Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", + Size: 291, + Annotations: emptyAnnotations, + }, + }, + Author: "", + Env: []string{ + "PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HTTPD_PREFIX=/usr/local/apache2", + "HTTPD_VERSION=2.4.23", + "HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f", + "HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download&filename=httpd/httpd-2.4.23.tar.bz2", + "HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc", + }, + }, *ii) + } // nil configBlob will trigger an error in m.ConfigBlob() - m = manifestOCI1FromComponentsLikeFixture(nil) + m := manifestOCI1FromComponentsLikeFixture(nil) _, err = m.Inspect(context.Background()) assert.Error(t, err) @@ -353,8 +396,8 @@ func (OCIis *oci1ImageSource) Reference() types.ImageReference { return refImageReferenceMock{ref: OCIis.ref} } -func newOCI1ImageSource(t *testing.T, dockerRef string) *oci1ImageSource { - realConfigJSON, err := os.ReadFile("fixtures/oci1-config.json") +func newOCI1ImageSource(t *testing.T, configFixture string, dockerRef string) *oci1ImageSource { + realConfigJSON, err := os.ReadFile(filepath.Join("fixtures", configFixture)) require.NoError(t, err) ref, err := reference.ParseNormalizedNamed(dockerRef) @@ -362,6 +405,7 @@ func newOCI1ImageSource(t *testing.T, dockerRef string) *oci1ImageSource { return &oci1ImageSource{ configBlobImageSource: configBlobImageSource{ + expectedDigest: digest.FromBytes(realConfigJSON), f: func() (io.ReadCloser, int64, error) { return io.NopCloser(bytes.NewReader(realConfigJSON)), int64(len(realConfigJSON)), nil }, @@ -371,11 +415,11 @@ func newOCI1ImageSource(t *testing.T, dockerRef string) *oci1ImageSource { } func TestManifestOCI1UpdatedImage(t *testing.T) { - originalSrc := newOCI1ImageSource(t, "httpd:latest") + originalSrc := newOCI1ImageSource(t, "oci1-config.json", "httpd:latest") original := manifestOCI1FromFixture(t, originalSrc, "oci1.json") // LayerInfos: - layerInfos := append(original.LayerInfos()[1:], original.LayerInfos()[0]) + layerInfos := append(slices.Clone(original.LayerInfos()[1:]), original.LayerInfos()[0]) res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ LayerInfos: layerInfos, }) @@ -422,7 +466,7 @@ func TestManifestOCI1UpdatedImage(t *testing.T) { assert.Error(t, err, mime) } - // m hasn’t been changed: + // original hasn’t been changed: m2 := manifestOCI1FromFixture(t, originalSrc, "oci1.json") typedOriginal, ok := original.(*manifestOCI1) require.True(t, ok) @@ -431,17 +475,34 @@ func TestManifestOCI1UpdatedImage(t *testing.T) { assert.Equal(t, *typedM2, *typedOriginal) } +// successfulOCI1Conversion verifies that an edit of original with edits suceeeds, and and original continues to match originalClone. +// It returns the resulting image, for more checks +func successfulOCI1Conversion(t *testing.T, original genericManifest, originalClone genericManifest, + edits types.ManifestUpdateOptions) types.Image { + res, err := original.UpdatedImage(context.Background(), edits) + require.NoError(t, err) + + // original = the source Image implementation hasn’t been changed by the edits + typedOriginal, ok := original.(*manifestOCI1) + require.True(t, ok) + typedOriginalClone, ok := originalClone.(*manifestOCI1) + require.True(t, ok) + assert.Equal(t, *typedOriginalClone, *typedOriginal) + + return res +} + func TestManifestOCI1ConvertToManifestSchema1(t *testing.T) { - originalSrc := newOCI1ImageSource(t, "httpd-copy:latest") + originalSrc := newOCI1ImageSource(t, "oci1-config.json", "httpd-copy:latest") original := manifestOCI1FromFixture(t, originalSrc, "oci1.json") + original2 := manifestOCI1FromFixture(t, originalSrc, "oci1.json") memoryDest := &memoryImageDest{ref: originalSrc.ref} - res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ + res := successfulOCI1Conversion(t, original, original2, types.ManifestUpdateOptions{ ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType, InformationOnly: types.ManifestUpdateInformation{ Destination: memoryDest, }, }) - require.NoError(t, err) convertedJSON, mt, err := res.Manifest(context.Background()) require.NoError(t, err) @@ -453,14 +514,13 @@ func TestManifestOCI1ConvertToManifestSchema1(t *testing.T) { // Conversion to schema1 together with changing LayerInfos works as expected (which requires // handling schema1 empty layers): updatedLayers, updatedLayersCopy := modifiedLayerInfos(t, original.LayerInfos()) - res, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ + res = successfulOCI1Conversion(t, original, original2, types.ManifestUpdateOptions{ LayerInfos: updatedLayers, ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType, InformationOnly: types.ManifestUpdateInformation{ Destination: memoryDest, }, }) - require.NoError(t, err) assert.Equal(t, updatedLayersCopy, updatedLayers) // updatedLayers have not been modified in place convertedJSON, mt, err = res.Manifest(context.Background()) require.NoError(t, err) @@ -497,16 +557,128 @@ func TestManifestOCI1ConvertToManifestSchema1(t *testing.T) { var expected manifest.NonImageArtifactError assert.ErrorAs(t, err, &expected) + // Conversion of an encrypted image fails + encrypted := manifestOCI1FromFixture(t, originalSrc, "oci1.encrypted.json") + encrypted2 := manifestOCI1FromFixture(t, originalSrc, "oci1.encrypted.json") + _, err = encrypted.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ + ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType, + InformationOnly: types.ManifestUpdateInformation{ + Destination: memoryDest, + }, + }) + assert.Error(t, err) + + // Conversion to schema1 with encryption fails + _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ + LayerInfos: layerInfosWithCryptoOperation(original.LayerInfos(), types.Encrypt), + ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType, + InformationOnly: types.ManifestUpdateInformation{ + Destination: memoryDest, + }, + }) + assert.Error(t, err) + + // Conversion to schema1 with simultaneous decryption is possible + updatedLayers = layerInfosWithCryptoOperation(encrypted.LayerInfos(), types.Decrypt) + updatedLayersCopy = slices.Clone(updatedLayers) + res = successfulOCI1Conversion(t, encrypted, encrypted2, types.ManifestUpdateOptions{ + LayerInfos: updatedLayers, + ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType, + InformationOnly: types.ManifestUpdateInformation{ + Destination: memoryDest, + }, + }) + assert.Equal(t, updatedLayersCopy, updatedLayers) // updatedLayers have not been modified in place + convertedJSON, mt, err = res.Manifest(context.Background()) + require.NoError(t, err) + assert.Equal(t, manifest.DockerV2Schema1SignedMediaType, mt) + // Layers have been updated as expected + s1Manifest, err = manifestSchema1FromManifest(convertedJSON) + require.NoError(t, err) + assert.Equal(t, []types.BlobInfo{ + {Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", Size: -1}, + {Digest: GzippedEmptyLayerDigest, Size: -1}, + {Digest: GzippedEmptyLayerDigest, Size: -1}, + {Digest: GzippedEmptyLayerDigest, Size: -1}, + {Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", Size: -1}, + {Digest: GzippedEmptyLayerDigest, Size: -1}, + {Digest: "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", Size: -1}, + {Digest: GzippedEmptyLayerDigest, Size: -1}, + {Digest: GzippedEmptyLayerDigest, Size: -1}, + {Digest: GzippedEmptyLayerDigest, Size: -1}, + {Digest: GzippedEmptyLayerDigest, Size: -1}, + {Digest: "sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd", Size: -1}, + {Digest: "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", Size: -1}, + {Digest: GzippedEmptyLayerDigest, Size: -1}, + {Digest: GzippedEmptyLayerDigest, Size: -1}, + }, s1Manifest.LayerInfos()) + + // Conversion to schema1 of an image with Zstd layers fails + mixedSrc := newOCI1ImageSource(t, "oci1-all-media-types-config.json", "httpd-copy:latest") + mixedImage := manifestOCI1FromFixture(t, mixedSrc, "oci1-all-media-types.json") + mixedImage2 := manifestOCI1FromFixture(t, mixedSrc, "oci1-all-media-types.json") + _, err = mixedImage.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ + ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType, + InformationOnly: types.ManifestUpdateInformation{ + Destination: memoryDest, + }, + }) + assert.Error(t, err) // zstd compression is not supported for docker images + + // Conversion to schema1 of an image with Zstd layers, while editing layers to be uncompressed, or gzip-compressed, is possible. + for _, c := range []struct { + op types.LayerCompression + algo *compressiontypes.Algorithm + }{ + {types.Decompress, nil}, + {types.PreserveOriginal, &compression.Gzip}, + } { + updatedLayers = layerInfosWithCompressionEdits(mixedImage.LayerInfos(), c.op, c.algo) + updatedLayersCopy = slices.Clone(updatedLayers) + res = successfulOCI1Conversion(t, mixedImage, mixedImage2, types.ManifestUpdateOptions{ + LayerInfos: updatedLayers, + ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType, + InformationOnly: types.ManifestUpdateInformation{ + Destination: memoryDest, + }, + }) + assert.Equal(t, updatedLayersCopy, updatedLayers) // updatedLayers have not been modified in place + convertedJSON, mt, err = res.Manifest(context.Background()) + require.NoError(t, err) + assert.Equal(t, manifest.DockerV2Schema1SignedMediaType, mt) + s1Manifest, err = manifestSchema1FromManifest(convertedJSON) + require.NoError(t, err) + // The schema1 data does not contain a MIME type (and we don’t update the digests), so both loop iterations look the same here + assert.Equal(t, []types.BlobInfo{ + {Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", Size: -1}, + {Digest: GzippedEmptyLayerDigest, Size: -1}, + {Digest: GzippedEmptyLayerDigest, Size: -1}, + {Digest: GzippedEmptyLayerDigest, Size: -1}, + {Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", Size: -1}, + {Digest: GzippedEmptyLayerDigest, Size: -1}, + {Digest: "sha256:2bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", Size: -1}, + {Digest: GzippedEmptyLayerDigest, Size: -1}, + {Digest: GzippedEmptyLayerDigest, Size: -1}, + {Digest: GzippedEmptyLayerDigest, Size: -1}, + {Digest: GzippedEmptyLayerDigest, Size: -1}, + {Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", Size: -1}, + {Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", Size: -1}, + {Digest: GzippedEmptyLayerDigest, Size: -1}, + {Digest: GzippedEmptyLayerDigest, Size: -1}, + {Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", Size: -1}, + }, s1Manifest.LayerInfos()) + } + // FIXME? Test also the other failure cases, if only to see that we don't crash? } func TestConvertToManifestSchema2(t *testing.T) { - originalSrc := newOCI1ImageSource(t, "httpd-copy:latest") + originalSrc := newOCI1ImageSource(t, "oci1-config.json", "httpd-copy:latest") original := manifestOCI1FromFixture(t, originalSrc, "oci1.json") - res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ + original2 := manifestOCI1FromFixture(t, originalSrc, "oci1.json") + res := successfulOCI1Conversion(t, original, original2, types.ManifestUpdateOptions{ ManifestMIMEType: manifest.DockerV2Schema2MediaType, }) - require.NoError(t, err) convertedJSON, mt, err := res.Manifest(context.Background()) require.NoError(t, err) @@ -525,20 +697,178 @@ func TestConvertToManifestSchema2(t *testing.T) { var expected manifest.NonImageArtifactError assert.ErrorAs(t, err, &expected) - // FIXME? Test also the other failure cases, if only to see that we don't crash? -} + // Conversion of an encrypted image fails + encrypted := manifestOCI1FromFixture(t, originalSrc, "oci1.encrypted.json") + encrypted2 := manifestOCI1FromFixture(t, originalSrc, "oci1.encrypted.json") + _, err = encrypted.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ + ManifestMIMEType: manifest.DockerV2Schema2MediaType, + }) + assert.Error(t, err) + + // Conversion to schema2 with encryption fails + _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ + LayerInfos: layerInfosWithCryptoOperation(original.LayerInfos(), types.Encrypt), + ManifestMIMEType: manifest.DockerV2Schema2MediaType, + }) + assert.Error(t, err) + + // Conversion to schema2 with simultaneous decryption is possible + updatedLayers := layerInfosWithCryptoOperation(encrypted.LayerInfos(), types.Decrypt) + updatedLayersCopy := slices.Clone(updatedLayers) + res = successfulOCI1Conversion(t, encrypted, encrypted2, types.ManifestUpdateOptions{ + LayerInfos: updatedLayers, + ManifestMIMEType: manifest.DockerV2Schema2MediaType, + }) + assert.Equal(t, updatedLayersCopy, updatedLayers) // updatedLayers have not been modified in place + convertedJSON, mt, err = res.Manifest(context.Background()) + require.NoError(t, err) + assert.Equal(t, manifest.DockerV2Schema2MediaType, mt) + s2Manifest, err := manifestSchema2FromManifest(originalSrc, convertedJSON) + require.NoError(t, err) + assert.Equal(t, []types.BlobInfo{ + { + Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + Size: 51354364, + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + }, + { + Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + Size: 150, + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + }, + { + Digest: "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", + Size: 11739507, + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + URLs: []string{"https://layer.url"}, + }, + { + Digest: "sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd", + Size: 8841833, + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + }, + { + Digest: "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", + Size: 291, + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + }, + }, s2Manifest.LayerInfos()) + convertedConfig, err = res.ConfigBlob(context.Background()) + require.NoError(t, err) + assertJSONEqualsFixture(t, convertedConfig, "oci1-to-schema2-config.json") -func TestConvertToManifestSchema2AllMediaTypes(t *testing.T) { - originalSrc := newOCI1ImageSource(t, "httpd-copy:latest") - original := manifestOCI1FromFixture(t, originalSrc, "oci1-all-media-types.json") - _, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ + // Conversion to schema2 of an image with Zstd layers fails + mixedSrc := newOCI1ImageSource(t, "oci1-all-media-types-config.json", "httpd-copy:latest") + mixedImage := manifestOCI1FromFixture(t, mixedSrc, "oci1-all-media-types.json") + mixedImage2 := manifestOCI1FromFixture(t, mixedSrc, "oci1-all-media-types.json") + _, err = mixedImage.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ ManifestMIMEType: manifest.DockerV2Schema2MediaType, }) - require.Error(t, err) // zstd compression is not supported for docker images + assert.Error(t, err) // zstd compression is not supported for docker images + + // Conversion to schema2 of an image with Zstd layers, while editing layers to be uncompressed, is possible. + updatedLayers = layerInfosWithCompressionEdits(mixedImage.LayerInfos(), types.Decompress, nil) + updatedLayersCopy = slices.Clone(updatedLayers) + res = successfulOCI1Conversion(t, mixedImage, mixedImage2, types.ManifestUpdateOptions{ + LayerInfos: updatedLayers, + ManifestMIMEType: manifest.DockerV2Schema2MediaType, + }) + assert.Equal(t, updatedLayersCopy, updatedLayers) // updatedLayers have not been modified in place + convertedJSON, mt, err = res.Manifest(context.Background()) + require.NoError(t, err) + assert.Equal(t, manifest.DockerV2Schema2MediaType, mt) + s2Manifest, err = manifestSchema2FromManifest(mixedSrc, convertedJSON) + require.NoError(t, err) + assert.Equal(t, []types.BlobInfo{ + { + Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", + Size: 51354364, + MediaType: "application/vnd.docker.image.rootfs.diff.tar", + }, + { + Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", + Size: 150, + MediaType: "application/vnd.docker.image.rootfs.diff.tar", + }, + { + Digest: "sha256:2bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", + Size: 152, + MediaType: "application/vnd.docker.image.rootfs.diff.tar", + }, + { + Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", + Size: 11739507, + MediaType: "application/vnd.docker.image.rootfs.foreign.diff.tar", + }, + { + Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", + Size: 8841833, + MediaType: "application/vnd.docker.image.rootfs.foreign.diff.tar", + }, + { + Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", + Size: 291, + MediaType: "application/vnd.docker.image.rootfs.foreign.diff.tar", + }, + }, s2Manifest.LayerInfos()) + convertedConfig, err = res.ConfigBlob(context.Background()) + require.NoError(t, err) + assertJSONEqualsFixture(t, convertedConfig, "oci1-all-media-types-to-schema2-config.json") + + // Conversion to schema2 of an image with Zstd layers, while editing layers to be gzip-compressed, is possible. + updatedLayers = layerInfosWithCompressionEdits(mixedImage.LayerInfos(), types.PreserveOriginal, &compression.Gzip) + updatedLayersCopy = slices.Clone(updatedLayers) + res = successfulOCI1Conversion(t, mixedImage, mixedImage2, types.ManifestUpdateOptions{ + LayerInfos: updatedLayers, + ManifestMIMEType: manifest.DockerV2Schema2MediaType, + }) + assert.Equal(t, updatedLayersCopy, updatedLayers) // updatedLayers have not been modified in place + convertedJSON, mt, err = res.Manifest(context.Background()) + require.NoError(t, err) + assert.Equal(t, manifest.DockerV2Schema2MediaType, mt) + s2Manifest, err = manifestSchema2FromManifest(mixedSrc, convertedJSON) + require.NoError(t, err) + assert.Equal(t, []types.BlobInfo{ + { + Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", + Size: 51354364, + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + }, + { + Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", + Size: 150, + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + }, + { + Digest: "sha256:2bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", + Size: 152, + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + }, + { + Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", + Size: 11739507, + MediaType: "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", + }, + { + Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", + Size: 8841833, + MediaType: "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", + }, + { + Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", + Size: 291, + MediaType: "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", + }, + }, s2Manifest.LayerInfos()) + convertedConfig, err = res.ConfigBlob(context.Background()) + require.NoError(t, err) + assertJSONEqualsFixture(t, convertedConfig, "oci1-all-media-types-to-schema2-config.json") + + // FIXME? Test also the other failure cases, if only to see that we don't crash? } func TestConvertToV2S2WithInvalidMIMEType(t *testing.T) { - originalSrc := newOCI1ImageSource(t, "httpd-copy:latest") + originalSrc := newOCI1ImageSource(t, "oci1-config.json", "httpd-copy:latest") manifest, err := os.ReadFile(filepath.Join("fixtures", "oci1-invalid-media-type.json")) require.NoError(t, err) diff --git a/internal/imagedestination/impl/helpers.go b/internal/imagedestination/impl/helpers.go index d5de81a613..5d28b3e73a 100644 --- a/internal/imagedestination/impl/helpers.go +++ b/internal/imagedestination/impl/helpers.go @@ -12,6 +12,11 @@ func BlobMatchesRequiredCompression(options private.TryReusingBlobOptions, candi if options.RequiredCompression == nil { return true // no requirement imposed } + if options.RequiredCompression.Name() == compression.ZstdChunkedAlgorithmName { + // HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs. + // The caller must re-compress to build those annotations. + return false + } return candidateCompression != nil && (options.RequiredCompression.Name() == candidateCompression.Name()) } diff --git a/internal/imagedestination/wrapper.go b/internal/imagedestination/wrapper.go index 17e1870c19..cdd3c5e5d0 100644 --- a/internal/imagedestination/wrapper.go +++ b/internal/imagedestination/wrapper.go @@ -28,7 +28,7 @@ type wrapped struct { // // NOTE: The returned API MUST NOT be a public interface (it can be either just a struct // with public methods, or perhaps a private interface), so that we can add methods -// without breaking any external implementors of a public interface. +// without breaking any external implementers of a public interface. func FromPublic(dest types.ImageDestination) private.ImageDestination { if dest2, ok := dest.(private.ImageDestination); ok { return dest2 diff --git a/internal/imagesource/wrapper.go b/internal/imagesource/wrapper.go index 886b4e833b..f0d1d042bf 100644 --- a/internal/imagesource/wrapper.go +++ b/internal/imagesource/wrapper.go @@ -27,7 +27,7 @@ type wrapped struct { // // NOTE: The returned API MUST NOT be a public interface (it can be either just a struct // with public methods, or perhaps a private interface), so that we can add methods -// without breaking any external implementors of a public interface. +// without breaking any external implementers of a public interface. func FromPublic(src types.ImageSource) private.ImageSource { if src2, ok := src.(private.ImageSource); ok { return src2 diff --git a/internal/manifest/docker_schema2_list.go b/internal/manifest/docker_schema2_list.go index 14a476642e..7ce5bb0696 100644 --- a/internal/manifest/docker_schema2_list.go +++ b/internal/manifest/docker_schema2_list.go @@ -64,13 +64,8 @@ func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdat MediaType: manifest.MediaType, } ret.ReadOnly.CompressionAlgorithmNames = []string{compression.GzipAlgorithmName} - ret.ReadOnly.Platform = &imgspecv1.Platform{ - OS: manifest.Platform.OS, - Architecture: manifest.Platform.Architecture, - OSVersion: manifest.Platform.OSVersion, - OSFeatures: manifest.Platform.OSFeatures, - Variant: manifest.Platform.Variant, - } + platform := ociPlatformFromSchema2PlatformSpec(manifest.Platform) + ret.ReadOnly.Platform = &platform return ret, nil } } @@ -119,23 +114,28 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error { } index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType case ListOpAdd: - addInstance := Schema2ManifestDescriptor{ - Schema2Descriptor{Digest: editInstance.AddDigest, Size: editInstance.AddSize, MediaType: editInstance.AddMediaType}, - Schema2PlatformSpec{ - OS: editInstance.AddPlatform.OS, - Architecture: editInstance.AddPlatform.Architecture, - OSVersion: editInstance.AddPlatform.OSVersion, - OSFeatures: editInstance.AddPlatform.OSFeatures, - Variant: editInstance.AddPlatform.Variant, - }, + if editInstance.AddPlatform == nil { + // Should we create a struct with empty fields instead? + // Right now ListOpAdd is only called when an instance with the same platform value + // already exists in the manifest, so this should not be reached in practice. + return fmt.Errorf("adding a schema2 list instance with no platform specified is not supported") } - addedEntries = append(addedEntries, addInstance) + addedEntries = append(addedEntries, Schema2ManifestDescriptor{ + Schema2Descriptor{ + Digest: editInstance.AddDigest, + Size: editInstance.AddSize, + MediaType: editInstance.AddMediaType, + }, + schema2PlatformSpecFromOCIPlatform(*editInstance.AddPlatform), + }) default: return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation) } } if len(addedEntries) != 0 { - index.Manifests = append(index.Manifests, addedEntries...) + // slices.Clone() here to ensure a private backing array; + // an external caller could have manually created Schema2ListPublic with a slice with extra capacity. + index.Manifests = append(slices.Clone(index.Manifests), addedEntries...) } return nil } @@ -158,13 +158,7 @@ func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest. } for _, wantedPlatform := range wantedPlatforms { for _, d := range list.Manifests { - imagePlatform := imgspecv1.Platform{ - Architecture: d.Platform.Architecture, - OS: d.Platform.OS, - OSVersion: d.Platform.OSVersion, - OSFeatures: slices.Clone(d.Platform.OSFeatures), - Variant: d.Platform.Variant, - } + imagePlatform := ociPlatformFromSchema2PlatformSpec(d.Platform) if platform.MatchesPlatform(imagePlatform, wantedPlatform) { return d.Digest, nil } @@ -224,20 +218,14 @@ func Schema2ListPublicClone(list *Schema2ListPublic) *Schema2ListPublic { func (list *Schema2ListPublic) ToOCI1Index() (*OCI1IndexPublic, error) { components := make([]imgspecv1.Descriptor, 0, len(list.Manifests)) for _, manifest := range list.Manifests { - converted := imgspecv1.Descriptor{ + platform := ociPlatformFromSchema2PlatformSpec(manifest.Platform) + components = append(components, imgspecv1.Descriptor{ MediaType: manifest.MediaType, Size: manifest.Size, Digest: manifest.Digest, URLs: slices.Clone(manifest.URLs), - Platform: &imgspecv1.Platform{ - OS: manifest.Platform.OS, - Architecture: manifest.Platform.Architecture, - OSFeatures: slices.Clone(manifest.Platform.OSFeatures), - OSVersion: manifest.Platform.OSVersion, - Variant: manifest.Platform.Variant, - }, - } - components = append(components, converted) + Platform: &platform, + }) } oci := OCI1IndexPublicFromComponents(components, nil) return oci, nil @@ -312,3 +300,15 @@ func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) { } return schema2ListFromPublic(public), nil } + +// ociPlatformFromSchema2PlatformSpec converts a schema2 platform p to the OCI struccture. +func ociPlatformFromSchema2PlatformSpec(p Schema2PlatformSpec) imgspecv1.Platform { + return imgspecv1.Platform{ + Architecture: p.Architecture, + OS: p.OS, + OSVersion: p.OSVersion, + OSFeatures: slices.Clone(p.OSFeatures), + Variant: p.Variant, + // Features is not supported in OCI, and discarded. + } +} diff --git a/internal/manifest/docker_schema2_list_test.go b/internal/manifest/docker_schema2_list_test.go index 97cdb4361f..2824cf01a2 100644 --- a/internal/manifest/docker_schema2_list_test.go +++ b/internal/manifest/docker_schema2_list_test.go @@ -10,6 +10,7 @@ import ( imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" ) func TestSchema2ListPublicFromManifest(t *testing.T) { @@ -81,11 +82,11 @@ func TestSchema2ListEditInstances(t *testing.T) { err = list.EditInstances(editInstances) require.NoError(t, err) - // Add new elements to the end of old list to maintain order - originalListOrder = append(originalListOrder, digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")) - originalListOrder = append(originalListOrder, digest.Digest("sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc")) - // Verify order - assert.Equal(t, list.Instances(), originalListOrder) + // Verify new elements are added to the end of old list + assert.Equal(t, append(slices.Clone(originalListOrder), + digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + digest.Digest("sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"), + ), list.Instances()) } func TestSchema2ListFromManifest(t *testing.T) { diff --git a/internal/manifest/errors.go b/internal/manifest/errors.go index 6ebe4b24cf..6c8e233d97 100644 --- a/internal/manifest/errors.go +++ b/internal/manifest/errors.go @@ -1,6 +1,10 @@ package manifest -import "fmt" +import ( + "fmt" + + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) // FIXME: This is a duplicate of c/image/manifestDockerV2Schema2ConfigMediaType. // Deduplicate that, depending on outcome of https://github.com/containers/image/pull/1791 . @@ -26,8 +30,20 @@ type NonImageArtifactError struct { mimeType string } -// NewNonImageArtifactError returns a NonImageArtifactError about an artifact with mimeType. -func NewNonImageArtifactError(mimeType string) error { +// NewNonImageArtifactError returns a NonImageArtifactError about an artifact manifest. +// +// This is typically called if manifest.Config.MediaType != imgspecv1.MediaTypeImageConfig . +func NewNonImageArtifactError(manifest *imgspecv1.Manifest) error { + // Callers decide based on manifest.Config.MediaType that this is not an image; + // in that case manifest.ArtifactType can be optionally defined, and if it is, it is typically + // more relevant because config may be ~absent with imgspecv1.MediaTypeEmptyJSON. + // + // If ArtifactType and Config.MediaType are both defined and non-trivial, presumably + // ArtifactType is the “top-level” one, although that’s not defined by the spec. + mimeType := manifest.ArtifactType + if mimeType == "" { + mimeType = manifest.Config.MediaType + } return NonImageArtifactError{mimeType: mimeType} } diff --git a/internal/manifest/manifest.go b/internal/manifest/manifest.go index 1dbcc14182..6f7bc8bbe6 100644 --- a/internal/manifest/manifest.go +++ b/internal/manifest/manifest.go @@ -3,6 +3,7 @@ package manifest import ( "encoding/json" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/libtrust" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -14,7 +15,7 @@ import ( const ( // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json" - // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature + // DockerV2Schema1SignedMediaType MIME type represents Docker manifest schema 1 with a JWS signature DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json" @@ -165,3 +166,26 @@ func NormalizedMIMEType(input string) string { return DockerV2Schema1SignedMediaType } } + +// CompressionAlgorithmIsUniversallySupported returns true if MIMETypeSupportsCompressionAlgorithm(mimeType, algo) returns true for all mimeType values. +func CompressionAlgorithmIsUniversallySupported(algo compressiontypes.Algorithm) bool { + switch algo.Name() { // Should this use InternalUnstableUndocumentedMIMEQuestionMark() ? + case compressiontypes.GzipAlgorithmName: + return true + default: + return false + } +} + +// MIMETypeSupportsCompressionAlgorithm returns true if mimeType can represent algo. +func MIMETypeSupportsCompressionAlgorithm(mimeType string, algo compressiontypes.Algorithm) bool { + if CompressionAlgorithmIsUniversallySupported(algo) { + return true + } + switch algo.Name() { // Should this use InternalUnstableUndocumentedMIMEQuestionMark() ? + case compressiontypes.ZstdAlgorithmName, compressiontypes.ZstdChunkedAlgorithmName: + return mimeType == imgspecv1.MediaTypeImageManifest + default: // Includes Bzip2AlgorithmName and XzAlgorithmName, which are defined names but are not supported anywhere + return false + } +} diff --git a/internal/manifest/manifest_test.go b/internal/manifest/manifest_test.go index 8dc9879192..0b01549670 100644 --- a/internal/manifest/manifest_test.go +++ b/internal/manifest/manifest_test.go @@ -1,10 +1,12 @@ package manifest import ( + "fmt" "os" "path/filepath" "testing" + "github.com/containers/image/v5/pkg/compression" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/stretchr/testify/assert" @@ -132,3 +134,51 @@ func TestNormalizedMIMEType(t *testing.T) { assert.Equal(t, DockerV2Schema1SignedMediaType, res, c) } } + +func TestCompressionAlgorithmIsUniversallySupported(t *testing.T) { + for _, algo := range []compression.Algorithm{compression.Gzip} { + res := CompressionAlgorithmIsUniversallySupported(algo) + assert.True(t, res, algo.Name()) + } + + for _, algo := range []compression.Algorithm{ + compression.Bzip2, + compression.Xz, + compression.Zstd, + compression.ZstdChunked, + } { + res := CompressionAlgorithmIsUniversallySupported(algo) + assert.False(t, res, algo.Name()) + } +} + +func TestMIMETypeSupportsCompressionAlgorithm(t *testing.T) { + allMIMETypes := []string{imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType, DockerV2Schema1SignedMediaType, DockerV2Schema1MediaType} + + for _, algo := range []compression.Algorithm{compression.Gzip} { + for _, mt := range allMIMETypes { + res := MIMETypeSupportsCompressionAlgorithm(mt, algo) + assert.True(t, res, fmt.Sprintf("%s, %s", mt, algo.Name())) + } + } + + for _, algo := range []compression.Algorithm{ + compression.Bzip2, + compression.Xz, + } { + for _, mt := range allMIMETypes { + res := MIMETypeSupportsCompressionAlgorithm(mt, algo) + assert.False(t, res, fmt.Sprintf("%s, %s", mt, algo.Name())) + } + } + + for _, algo := range []compression.Algorithm{ + compression.Zstd, + compression.ZstdChunked, + } { + for _, mt := range allMIMETypes { + res := MIMETypeSupportsCompressionAlgorithm(mt, algo) + assert.Equal(t, mt == imgspecv1.MediaTypeImageManifest, res, fmt.Sprintf("%s, %s", mt, algo.Name())) + } + } +} diff --git a/internal/manifest/oci_index.go b/internal/manifest/oci_index.go index 8832caa3ee..d8d06513b5 100644 --- a/internal/manifest/oci_index.go +++ b/internal/manifest/oci_index.go @@ -167,7 +167,9 @@ func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error { } } if len(addedEntries) != 0 { - index.Manifests = append(index.Manifests, addedEntries...) + // slices.Clone() here to ensure the slice uses a private backing array; + // an external caller could have manually created OCI1IndexPublic with a slice with extra capacity. + index.Manifests = append(slices.Clone(index.Manifests), addedEntries...) } if len(addedEntries) != 0 || updatedAnnotations { slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) int { @@ -220,7 +222,7 @@ func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip case ic.manifestPosition != other.manifestPosition: return ic.manifestPosition < other.manifestPosition } - panic("internal error: invalid comparision between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition. + panic("internal error: invalid comparison between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition. } // chooseInstance is a private equivalent to ChooseInstanceByCompression, @@ -239,13 +241,7 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi for manifestIndex, d := range index.Manifests { candidate := instanceCandidate{platformIndex: math.MaxInt, manifestPosition: manifestIndex, isZstd: instanceIsZstd(d), digest: d.Digest} if d.Platform != nil { - imagePlatform := imgspecv1.Platform{ - Architecture: d.Platform.Architecture, - OS: d.Platform.OS, - OSVersion: d.Platform.OSVersion, - OSFeatures: slices.Clone(d.Platform.OSFeatures), - Variant: d.Platform.Variant, - } + imagePlatform := ociPlatformClone(*d.Platform) platformIndex := slices.IndexFunc(wantedPlatforms, func(wantedPlatform imgspecv1.Platform) bool { return platform.MatchesPlatform(imagePlatform, wantedPlatform) }) @@ -299,13 +295,8 @@ func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotation for i, component := range components { var platform *imgspecv1.Platform if component.Platform != nil { - platform = &imgspecv1.Platform{ - Architecture: component.Platform.Architecture, - OS: component.Platform.OS, - OSVersion: component.Platform.OSVersion, - OSFeatures: slices.Clone(component.Platform.OSFeatures), - Variant: component.Platform.Variant, - } + platformCopy := ociPlatformClone(*component.Platform) + platform = &platformCopy } m := imgspecv1.Descriptor{ MediaType: component.MediaType, @@ -342,22 +333,15 @@ func (index *OCI1IndexPublic) ToSchema2List() (*Schema2ListPublic, error) { Architecture: runtime.GOARCH, } } - converted := Schema2ManifestDescriptor{ + components = append(components, Schema2ManifestDescriptor{ Schema2Descriptor{ MediaType: manifest.MediaType, Size: manifest.Size, Digest: manifest.Digest, URLs: slices.Clone(manifest.URLs), }, - Schema2PlatformSpec{ - OS: platform.OS, - Architecture: platform.Architecture, - OSFeatures: slices.Clone(platform.OSFeatures), - OSVersion: platform.OSVersion, - Variant: platform.Variant, - }, - } - components = append(components, converted) + schema2PlatformSpecFromOCIPlatform(*platform), + }) } s2 := Schema2ListPublicFromComponents(components) return s2, nil @@ -431,3 +415,32 @@ func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) { } return oci1IndexFromPublic(public), nil } + +// ociPlatformClone returns an independent copy of p. +func ociPlatformClone(p imgspecv1.Platform) imgspecv1.Platform { + // The only practical way in Go to give read-only access to an array is to copy it. + // The only practical way in Go to copy a deep structure is to either do it manually field by field, + // or to use reflection (incl. a round-trip through JSON, which uses reflection). + // + // The combination of the two is just sad, and leads to code like this, which will + // need to be updated with every new Platform field. + return imgspecv1.Platform{ + Architecture: p.Architecture, + OS: p.OS, + OSVersion: p.OSVersion, + OSFeatures: slices.Clone(p.OSFeatures), + Variant: p.Variant, + } +} + +// schema2PlatformSpecFromOCIPlatform converts an OCI platform p to the schema2 structure. +func schema2PlatformSpecFromOCIPlatform(p imgspecv1.Platform) Schema2PlatformSpec { + return Schema2PlatformSpec{ + Architecture: p.Architecture, + OS: p.OS, + OSVersion: p.OSVersion, + OSFeatures: slices.Clone(p.OSFeatures), + Variant: p.Variant, + Features: nil, + } +} diff --git a/internal/manifest/oci_index_test.go b/internal/manifest/oci_index_test.go index a856290cf1..204af46330 100644 --- a/internal/manifest/oci_index_test.go +++ b/internal/manifest/oci_index_test.go @@ -86,7 +86,7 @@ func TestOCI1EditInstances(t *testing.T) { list, err = ListFromBlob(validManifest, GuessMIMEType(validManifest)) require.NoError(t, err) - // Verfiy correct zstd sorting + // Verify correct zstd sorting editInstances = []ListEdit{} annotations := map[string]string{"io.github.containers.compression.zstd": "true"} // without zstd @@ -177,13 +177,13 @@ func TestOCI1IndexChooseInstanceByCompression(t *testing.T) { {"amd64", "", "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", false}, // out of multiple gzip in arm64 select the first one to ensure original logic is prevented {"arm64", "", "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", false}, - // select a signle gzip s390x image + // select a single gzip s390x image {"s390x", "", "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", false}, // out of gzip and zstd in amd64 select the first gzip image {"amd64", "", "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", true}, // out of multiple gzip in arm64 select the first one to ensure original logic is prevented {"arm64", "", "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", true}, - // select a signle gzip s390x image + // select a single gzip s390x image {"s390x", "", "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", true}, }, unmatchedInstances: []string{ @@ -215,7 +215,7 @@ func TestOCI1IndexChooseInstanceByCompression(t *testing.T) { }, { listFile: "oci1.index.zstd-selection2.json", - // out of list where first instance is gzip , select the first occurance of zstd out of many + // out of list where first instance is gzip , select the first occurrence of zstd out of many matchedInstances: []expectedMatch{ {"amd64", "", "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", false}, {"amd64", "", "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", true}, diff --git a/internal/pkg/platform/platform_matcher.go b/internal/pkg/platform/platform_matcher.go index 7a4efb73e2..30da87f97a 100644 --- a/internal/pkg/platform/platform_matcher.go +++ b/internal/pkg/platform/platform_matcher.go @@ -154,6 +154,10 @@ var compatibility = map[string][]string{ // the most compatible platform is first. // If some option (arch, os, variant) is not present, a value from current platform is detected. func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) { + // Note that this does not use Platform.OSFeatures and Platform.OSVersion at all. + // The fields are not specified by the OCI specification, as of version 1.1, usefully enough + // to be interoperable, anyway. + wantedArch := runtime.GOARCH wantedVariant := "" if ctx != nil && ctx.ArchitectureChoice != "" { diff --git a/internal/private/private.go b/internal/private/private.go index 95d561fcdd..72b574a5bd 100644 --- a/internal/private/private.go +++ b/internal/private/private.go @@ -117,6 +117,7 @@ type TryReusingBlobOptions struct { EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. SrcRef reference.Named // A reference to the source image that contains the input blob. + TOCDigest *digest.Digest // If specified, the blob can be looked up in the destination also by its TOC digest. } // ReusedBlob is information about a blob reused in a destination. diff --git a/internal/testing/gpgagent/gpg_agent.go b/internal/testing/gpgagent/gpg_agent.go index 3de34eb662..148b455988 100644 --- a/internal/testing/gpgagent/gpg_agent.go +++ b/internal/testing/gpgagent/gpg_agent.go @@ -3,12 +3,14 @@ package gpgagent import ( "os" "os/exec" + + "golang.org/x/exp/slices" ) // Kill the running gpg-agent to drop unlocked keys. // This is useful to ensure tests don’t leave processes around (in TestMain), or for testing handling of invalid passphrases. func KillGPGAgent(gpgHomeDir string) error { cmd := exec.Command("gpgconf", "--kill", "gpg-agent") - cmd.Env = append(os.Environ(), "GNUPGHOME="+gpgHomeDir) + cmd.Env = append(slices.Clone(os.Environ()), "GNUPGHOME="+gpgHomeDir) return cmd.Run() } diff --git a/manifest/docker_schema1.go b/manifest/docker_schema1.go index 7b9c4b58fb..762815570c 100644 --- a/manifest/docker_schema1.go +++ b/manifest/docker_schema1.go @@ -10,6 +10,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/internal/set" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/regexp" "github.com/docker/docker/api/types/versions" @@ -142,6 +143,15 @@ func (m *Schema1) LayerInfos() []LayerInfo { return layers } +const fakeSchema1MIMEType = DockerV2Schema2LayerMediaType // Used only in schema1CompressionMIMETypeSets +var schema1CompressionMIMETypeSets = []compressionMIMETypeSet{ + { + mtsUncompressed: fakeSchema1MIMEType, + compressiontypes.GzipAlgorithmName: fakeSchema1MIMEType, + compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType, + }, +} + // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { // Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well. @@ -150,10 +160,18 @@ func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { } m.FSLayers = make([]Schema1FSLayers, len(layerInfos)) for i, info := range layerInfos { + // There are no MIME types in schema1, but we do a “conversion” here to reject unsupported compression algorithms, + // in a way that is consistent with the other schema implementations. + if _, err := updatedMIMEType(schema1CompressionMIMETypeSets, fakeSchema1MIMEType, info); err != nil { + return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err) + } // (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest, // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. // So, we don't bother recomputing the IDs in m.History.V1Compatibility. m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest + if info.CryptoOperation != types.PreserveOriginalCrypto { + return fmt.Errorf("encryption change (for layer %q) is not supported in schema1 manifests", info.Digest) + } } return nil } diff --git a/manifest/docker_schema1_test.go b/manifest/docker_schema1_test.go index 390c8816a0..0c86036456 100644 --- a/manifest/docker_schema1_test.go +++ b/manifest/docker_schema1_test.go @@ -1,11 +1,13 @@ package manifest import ( + "encoding/json" "os" "path/filepath" "testing" "time" + "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" "github.com/stretchr/testify/assert" @@ -21,6 +23,26 @@ var schema1FixtureLayerDiffIDs = []digest.Digest{ "sha256:8c064bb1f60e84fa8cc6079b6d2e76e0423389fd6aeb7e497dfdae5e05b2b25b", } +// assertJSONEqualsFixture tests that jsonBytes is structurally equal to fixture, +// possibly ignoring ignoreFields +func assertJSONEqualsFixture(t *testing.T, jsonBytes []byte, fixture string, ignoreFields ...string) { + var contents map[string]any + err := json.Unmarshal(jsonBytes, &contents) + require.NoError(t, err) + + fixtureBytes, err := os.ReadFile(filepath.Join("fixtures", fixture)) + require.NoError(t, err) + var fixtureContents map[string]any + + err = json.Unmarshal(fixtureBytes, &fixtureContents) + require.NoError(t, err) + for _, f := range ignoreFields { + delete(contents, f) + delete(fixtureContents, f) + } + assert.Equal(t, fixtureContents, contents) +} + func manifestSchema1FromFixture(t *testing.T, fixture string) *Schema1 { manifest, err := os.ReadFile(filepath.Join("fixtures", fixture)) require.NoError(t, err) @@ -177,6 +199,159 @@ func TestSchema1LayerInfos(t *testing.T) { }, m.LayerInfos()) } +func TestSchema1UpdateLayerInfos(t *testing.T) { + for _, c := range []struct { + name string + sourceFixture string + updates []types.BlobInfo + expectedFixture string // or "" to indicate an expected failure + }{ + { + name: "gzip → uncompressed", + sourceFixture: "v2s1.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef", + Size: 32654, + CompressionOperation: types.Decompress, + }, + { + Digest: "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef", + Size: 16724, + CompressionOperation: types.Decompress, + }, + { + Digest: "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef", + Size: 73109, + CompressionOperation: types.Decompress, + }, + }, + expectedFixture: "v2s1.manifest.json", // MIME type is not stored, and we didn’t change the digests in this test, so we should not see any changes. + }, + { + name: "uncompressed → gzip", + sourceFixture: "v2s1.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef", + Size: 32654, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Gzip, + }, + { + Digest: "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef", + Size: 16724, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Gzip, + }, + { + Digest: "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef", + Size: 73109, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Gzip, + }, + }, + expectedFixture: "v2s1.manifest.json", // MIME type is not stored, and we didn’t change the digests in this test, so we should not see any changes. + }, + { + name: "gzip → zstd", + sourceFixture: "v2s1.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", + Size: 32654, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Zstd, + }, + { + Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", + Size: 16724, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Zstd, + }, + { + Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", + Size: 73109, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Zstd, + }, + }, + expectedFixture: "", // zstd is not supported for docker images + }, + { + name: "uncompressed → gzip encrypted", + sourceFixture: "v2s1.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + Size: 32654, + Annotations: map[string]string{"org.opencontainers.image.enc.…": "layer1"}, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Gzip, + CryptoOperation: types.Encrypt, + }, + { + Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + Size: 16724, + Annotations: map[string]string{"org.opencontainers.image.enc.…": "layer2"}, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Gzip, + CryptoOperation: types.Encrypt, + }, + { + Digest: "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", + Size: 73109, + Annotations: map[string]string{"org.opencontainers.image.enc.…": "layer2"}, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Gzip, + CryptoOperation: types.Encrypt, + }, + }, + expectedFixture: "", // Encryption is not supported + }, + { + name: "gzip → uncompressed decrypted", // We can’t represent encrypted images anyway, but verify that we reject decryption attempts. + sourceFixture: "v2s1.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", + Size: 32654, + CompressionOperation: types.Decompress, + CryptoOperation: types.Decrypt, + }, + { + Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", + Size: 16724, + CompressionOperation: types.Decompress, + CryptoOperation: types.Decrypt, + }, + { + Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", + Size: 73109, + CompressionOperation: types.Decompress, + CryptoOperation: types.Decrypt, + }, + }, + expectedFixture: "", // Decryption is not supported + }, + } { + manifest := manifestSchema1FromFixture(t, c.sourceFixture) + + err := manifest.UpdateLayerInfos(c.updates) + if c.expectedFixture == "" { + assert.Error(t, err, c.name) + } else { + require.NoError(t, err, c.name) + + updatedManifestBytes, err := manifest.Serialize() + require.NoError(t, err, c.name) + + // Drop "signatures" which is generated by AddDummyV2S1Signature + assertJSONEqualsFixture(t, updatedManifestBytes, c.expectedFixture, "signatures") + } + } +} + func TestSchema1ImageID(t *testing.T) { m := manifestSchema1FromFixture(t, "schema2-to-schema1-by-docker.json") id, err := m.ImageID(schema1FixtureLayerDiffIDs) diff --git a/manifest/docker_schema2.go b/manifest/docker_schema2.go index 3c9745dde5..20b721f4ca 100644 --- a/manifest/docker_schema2.go +++ b/manifest/docker_schema2.go @@ -247,6 +247,9 @@ func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { m.LayersDescriptors[i].Digest = info.Digest m.LayersDescriptors[i].Size = info.Size m.LayersDescriptors[i].URLs = info.URLs + if info.CryptoOperation != types.PreserveOriginalCrypto { + return fmt.Errorf("encryption change (for layer %q) is not supported in schema2 manifests", info.Digest) + } } return nil } diff --git a/manifest/docker_schema2_test.go b/manifest/docker_schema2_test.go index bdd10d0987..6a3aa3be15 100644 --- a/manifest/docker_schema2_test.go +++ b/manifest/docker_schema2_test.go @@ -64,132 +64,202 @@ func TestSchema2FromManifest(t *testing.T) { testValidManifestWithExtraFieldsIsRejected(t, parser, validManifest, []string{"fsLayers", "history", "manifests"}) } -func TestUpdateLayerInfosV2S2GzipToZstd(t *testing.T) { - origManifest := manifestSchema2FromFixture(t, "v2s2.manifest.json") - err := origManifest.UpdateLayerInfos([]types.BlobInfo{ +func TestSchema2UpdateLayerInfos(t *testing.T) { + for _, c := range []struct { + name string + sourceFixture string + updates []types.BlobInfo + expectedFixture string // or "" to indicate an expected failure + }{ { - Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", - Size: 32654, - MediaType: DockerV2Schema2LayerMediaType, - CompressionOperation: types.Compress, - CompressionAlgorithm: &compression.Zstd, + name: "gzip → zstd", + sourceFixture: "v2s2.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", + Size: 32654, + MediaType: DockerV2Schema2LayerMediaType, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Zstd, + }, + { + Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", + Size: 16724, + MediaType: DockerV2Schema2LayerMediaType, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Zstd, + }, + { + Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", + Size: 73109, + MediaType: DockerV2Schema2LayerMediaType, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Zstd, + }, + }, + expectedFixture: "", // zstd is not supported for docker images }, { - Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", - Size: 16724, - MediaType: DockerV2Schema2LayerMediaType, - CompressionOperation: types.Compress, - CompressionAlgorithm: &compression.Zstd, + name: "invalid compression operation", + sourceFixture: "v2s2.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", + Size: 32654, + MediaType: DockerV2Schema2LayerMediaType, + CompressionOperation: types.Decompress, + }, + { + Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", + Size: 16724, + MediaType: DockerV2Schema2LayerMediaType, + CompressionOperation: types.Decompress, + }, + { + Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", + Size: 73109, + MediaType: DockerV2Schema2LayerMediaType, + CompressionOperation: 42, // MUST fail here + }, + }, + expectedFixture: "", }, { - Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", - Size: 73109, - MediaType: DockerV2Schema2LayerMediaType, - CompressionOperation: types.Compress, - CompressionAlgorithm: &compression.Zstd, - }, - }) - assert.NotNil(t, err) // zstd is not supported for docker images -} - -func TestUpdateLayerInfosV2S2InvalidCompressionOperation(t *testing.T) { - origManifest := manifestSchema2FromFixture(t, "v2s2.manifest.json") - err := origManifest.UpdateLayerInfos([]types.BlobInfo{ - { - Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", - Size: 32654, - MediaType: DockerV2Schema2LayerMediaType, - CompressionOperation: types.Decompress, - }, - { - Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", - Size: 16724, - MediaType: DockerV2Schema2LayerMediaType, - CompressionOperation: types.Decompress, + name: "invalid compression algorithm", + sourceFixture: "v2s2.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", + Size: 32654, + MediaType: DockerV2Schema2LayerMediaType, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Gzip, + }, + { + Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", + Size: 16724, + MediaType: DockerV2Schema2LayerMediaType, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Gzip, + }, + { + Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", + Size: 73109, + MediaType: DockerV2Schema2LayerMediaType, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Zstd, // MUST fail here + }, + }, + expectedFixture: "", }, { - Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", - Size: 73109, - MediaType: DockerV2Schema2LayerMediaType, - CompressionOperation: 42, // MUST fail here + name: "nondistributable → gzip", + sourceFixture: "v2s2.nondistributable.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", + Size: 32654, + MediaType: DockerV2Schema2ForeignLayerMediaType, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Gzip, + }, + }, + expectedFixture: "v2s2.nondistributable.gzip.manifest.json", }, - }) - assert.NotNil(t, err) -} - -func TestUpdateLayerInfosV2S2InvalidCompressionAlgorithm(t *testing.T) { - origManifest := manifestSchema2FromFixture(t, "v2s2.manifest.json") - err := origManifest.UpdateLayerInfos([]types.BlobInfo{ { - Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", - Size: 32654, - MediaType: DockerV2Schema2LayerMediaType, - CompressionOperation: types.Compress, - CompressionAlgorithm: &compression.Gzip, + name: "nondistributable gzip → uncompressed", + sourceFixture: "v2s2.nondistributable.gzip.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", + Size: 32654, + MediaType: DockerV2Schema2ForeignLayerMediaType, + CompressionOperation: types.Decompress, + }, + }, + expectedFixture: "v2s2.nondistributable.manifest.json", }, { - Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", - Size: 16724, - MediaType: DockerV2Schema2LayerMediaType, - CompressionOperation: types.Compress, - CompressionAlgorithm: &compression.Gzip, + name: "uncompressed → gzip encrypted", + sourceFixture: "v2s2.uncompressed.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + Size: 32654, + Annotations: map[string]string{"org.opencontainers.image.enc.…": "layer1"}, + MediaType: DockerV2SchemaLayerMediaTypeUncompressed, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Gzip, + CryptoOperation: types.Encrypt, + }, + { + Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + Size: 16724, + Annotations: map[string]string{"org.opencontainers.image.enc.…": "layer2"}, + MediaType: DockerV2SchemaLayerMediaTypeUncompressed, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Gzip, + CryptoOperation: types.Encrypt, + }, + { + Digest: "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", + Size: 73109, + Annotations: map[string]string{"org.opencontainers.image.enc.…": "layer2"}, + MediaType: DockerV2SchemaLayerMediaTypeUncompressed, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Gzip, + CryptoOperation: types.Encrypt, + }, + }, + expectedFixture: "", // Encryption is not supported }, { - Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", - Size: 73109, - MediaType: DockerV2Schema2LayerMediaType, - CompressionOperation: types.Compress, - CompressionAlgorithm: &compression.Zstd, // MUST fail here - }, - }) - assert.NotNil(t, err) -} - -func TestUpdateLayerInfosV2S2NondistributableToGzip(t *testing.T) { - origManifest := manifestSchema2FromFixture(t, "v2s2.nondistributable.manifest.json") - - err := origManifest.UpdateLayerInfos([]types.BlobInfo{ - { - Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", - Size: 32654, - MediaType: DockerV2Schema2ForeignLayerMediaType, - CompressionOperation: types.Compress, - CompressionAlgorithm: &compression.Gzip, + name: "gzip → uncompressed decrypted", // We can’t represent encrypted images anyway, but verify that we reject decryption attempts. + sourceFixture: "v2s2.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", + Size: 32654, + MediaType: DockerV2Schema2LayerMediaType, + CompressionOperation: types.Decompress, + CryptoOperation: types.Decrypt, + }, + { + Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", + Size: 16724, + MediaType: DockerV2Schema2LayerMediaType, + CompressionOperation: types.Decompress, + CryptoOperation: types.Decrypt, + }, + { + Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", + Size: 73109, + MediaType: DockerV2Schema2LayerMediaType, + CompressionOperation: types.Decompress, + CryptoOperation: types.Decrypt, + }, + }, + expectedFixture: "", // Decryption is not supported }, - }) - assert.Nil(t, err) - - updatedManifestBytes, err := origManifest.Serialize() - assert.Nil(t, err) - - expectedManifest := manifestSchema2FromFixture(t, "v2s2.nondistributable.gzip.manifest.json") - expectedManifestBytes, err := expectedManifest.Serialize() - assert.Nil(t, err) - - assert.Equal(t, string(expectedManifestBytes), string(updatedManifestBytes)) -} + } { + manifest := manifestSchema2FromFixture(t, c.sourceFixture) -func TestUpdateLayerInfosV2S2NondistributableGzipToUncompressed(t *testing.T) { - origManifest := manifestSchema2FromFixture(t, "v2s2.nondistributable.gzip.manifest.json") - - err := origManifest.UpdateLayerInfos([]types.BlobInfo{ - { - Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", - Size: 32654, - MediaType: DockerV2Schema2ForeignLayerMediaType, - CompressionOperation: types.Decompress, - }, - }) - assert.Nil(t, err) + err := manifest.UpdateLayerInfos(c.updates) + if c.expectedFixture == "" { + assert.Error(t, err, c.name) + } else { + require.NoError(t, err, c.name) - updatedManifestBytes, err := origManifest.Serialize() - assert.Nil(t, err) + updatedManifestBytes, err := manifest.Serialize() + require.NoError(t, err, c.name) - expectedManifest := manifestSchema2FromFixture(t, "v2s2.nondistributable.manifest.json") - expectedManifestBytes, err := expectedManifest.Serialize() - assert.Nil(t, err) + expectedManifest := manifestSchema2FromFixture(t, c.expectedFixture) + expectedManifestBytes, err := expectedManifest.Serialize() + require.NoError(t, err, c.name) - assert.Equal(t, string(expectedManifestBytes), string(updatedManifestBytes)) + assert.Equal(t, string(expectedManifestBytes), string(updatedManifestBytes), c.name) + } + } } func TestSchema2ImageID(t *testing.T) { diff --git a/manifest/fixtures/ociv1.encrypted.manifest.json b/manifest/fixtures/ociv1.encrypted.manifest.json new file mode 100644 index 0000000000..a4fa87f6d9 --- /dev/null +++ b/manifest/fixtures/ociv1.encrypted.manifest.json @@ -0,0 +1,39 @@ +{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "config": { + "mediaType": "application/vnd.oci.image.config.v1+json", + "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7", + "size": 7023 + }, + "layers": [ + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip+encrypted", + "digest": "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "size": 32654, + "annotations": { + "org.opencontainers.image.enc.…": "layer1" + } + }, + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip+encrypted", + "digest": "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + "size": 16724, + "annotations": { + "org.opencontainers.image.enc.…": "layer2" + } + }, + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip+encrypted", + "digest": "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", + "size": 73109, + "annotations": { + "org.opencontainers.image.enc.…": "layer2" + } + } + ], + "annotations": { + "com.example.key1": "value1", + "com.example.key2": "value2" + } +} \ No newline at end of file diff --git a/manifest/manifest.go b/manifest/manifest.go index 959aac935e..828b8da0b7 100644 --- a/manifest/manifest.go +++ b/manifest/manifest.go @@ -16,7 +16,7 @@ import ( const ( // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 DockerV2Schema1MediaType = manifest.DockerV2Schema1MediaType - // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature + // DockerV2Schema1SignedMediaType MIME type represents Docker manifest schema 1 with a JWS signature DockerV2Schema1SignedMediaType = manifest.DockerV2Schema1SignedMediaType // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 DockerV2Schema2MediaType = manifest.DockerV2Schema2MediaType diff --git a/manifest/oci.go b/manifest/oci.go index a70470d99a..6d5acb45d8 100644 --- a/manifest/oci.go +++ b/manifest/oci.go @@ -9,6 +9,7 @@ import ( compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" ociencspec "github.com/containers/ocicrypt/spec" + chunkedToc "github.com/containers/storage/pkg/chunked/toc" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -202,7 +203,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type // Most software calling this without human intervention is going to expect the values to be realistic and relevant, // and is probably better served by failing; we can always re-visit that later if we fail now, but // if we started returning some data for OCI artifacts now, we couldn’t start failing in this function later. - return nil, manifest.NewNonImageArtifactError(m.Config.MediaType) + return nil, manifest.NewNonImageArtifactError(&m.Manifest) } config, err := configGetter(m.ConfigInfo()) @@ -235,7 +236,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type } // ImageID computes an ID which can uniquely identify this image by its contents. -func (m *OCI1) ImageID([]digest.Digest) (string, error) { +func (m *OCI1) ImageID(diffIDs []digest.Digest) (string, error) { // The way m.Config.Digest “uniquely identifies” an image is // by containing RootFS.DiffIDs, which identify the layers of the image. // For non-image artifacts, the we can’t expect the config to change @@ -253,15 +254,50 @@ func (m *OCI1) ImageID([]digest.Digest) (string, error) { // (The only known caller of ImageID is storage/storageImageDestination.computeID, // which can’t work with non-image artifacts.) if m.Config.MediaType != imgspecv1.MediaTypeImageConfig { - return "", manifest.NewNonImageArtifactError(m.Config.MediaType) + return "", manifest.NewNonImageArtifactError(&m.Manifest) } if err := m.Config.Digest.Validate(); err != nil { return "", err } + + // If there is any layer that is using partial content, we calculate the image ID + // in a different way since the diffID cannot be validated as for regular pulled images. + for _, layer := range m.Layers { + toc, err := chunkedToc.GetTOCDigest(layer.Annotations) + if err != nil { + return "", fmt.Errorf("error looking up annotation for layer %q: %w", layer.Digest, err) + } + if toc != nil { + return m.calculateImageIDForPartialImage(diffIDs) + } + } + return m.Config.Digest.Hex(), nil } +func (m *OCI1) calculateImageIDForPartialImage(diffIDs []digest.Digest) (string, error) { + newID := digest.Canonical.Digester() + for i, layer := range m.Layers { + diffID := diffIDs[i] + _, err := newID.Hash().Write([]byte(diffID.Hex())) + if err != nil { + return "", fmt.Errorf("error writing diffID %q: %w", diffID, err) + } + toc, err := chunkedToc.GetTOCDigest(layer.Annotations) + if err != nil { + return "", fmt.Errorf("error looking up annotation for layer %q: %w", layer.Digest, err) + } + if toc != nil { + _, err = newID.Hash().Write([]byte(toc.Hex())) + if err != nil { + return "", fmt.Errorf("error writing TOC %q: %w", toc, err) + } + } + } + return newID.Digest().Hex(), nil +} + // CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image // (and the code can handle that). // NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted diff --git a/manifest/oci_test.go b/manifest/oci_test.go index 470724226b..e27275b7f3 100644 --- a/manifest/oci_test.go +++ b/manifest/oci_test.go @@ -69,279 +69,299 @@ func TestOCI1FromManifest(t *testing.T) { testValidManifestWithExtraFieldsIsRejected(t, parser, validManifest, []string{"fsLayers", "history", "manifests"}) } -func TestUpdateLayerInfosOCIGzipToZstd(t *testing.T) { - manifest := manifestOCI1FromFixture(t, "ociv1.manifest.json") - - err := manifest.UpdateLayerInfos([]types.BlobInfo{ - { - Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", - Size: 32654, - MediaType: imgspecv1.MediaTypeImageLayerGzip, - CompressionOperation: types.Compress, - CompressionAlgorithm: &compression.Zstd, - }, - { - Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", - Size: 16724, - MediaType: imgspecv1.MediaTypeImageLayerGzip, - CompressionOperation: types.Compress, - CompressionAlgorithm: &compression.Zstd, - }, - { - Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", - Size: 73109, - MediaType: imgspecv1.MediaTypeImageLayerGzip, - CompressionOperation: types.Compress, - CompressionAlgorithm: &compression.Zstd, - }, - }) - assert.Nil(t, err) - - updatedManifestBytes, err := manifest.Serialize() - assert.Nil(t, err) - - expectedManifest := manifestOCI1FromFixture(t, "ociv1.zstd.manifest.json") - expectedManifestBytes, err := expectedManifest.Serialize() - assert.Nil(t, err) - - assert.Equal(t, string(expectedManifestBytes), string(updatedManifestBytes)) -} - -func TestUpdateLayerInfosOCIZstdToGzip(t *testing.T) { - manifest := manifestOCI1FromFixture(t, "ociv1.zstd.manifest.json") - - err := manifest.UpdateLayerInfos([]types.BlobInfo{ - { - Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", - Size: 32654, - MediaType: imgspecv1.MediaTypeImageLayerZstd, - CompressionOperation: types.Compress, - CompressionAlgorithm: &compression.Gzip, - }, - { - Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", - Size: 16724, - MediaType: imgspecv1.MediaTypeImageLayerZstd, - CompressionOperation: types.Compress, - CompressionAlgorithm: &compression.Gzip, - }, - { - Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", - Size: 73109, - MediaType: imgspecv1.MediaTypeImageLayerZstd, - CompressionOperation: types.Compress, - CompressionAlgorithm: &compression.Gzip, - }, - }) - assert.Nil(t, err) - - updatedManifestBytes, err := manifest.Serialize() - assert.Nil(t, err) - - expectedManifest := manifestOCI1FromFixture(t, "ociv1.manifest.json") - expectedManifestBytes, err := expectedManifest.Serialize() - assert.Nil(t, err) - - assert.Equal(t, string(expectedManifestBytes), string(updatedManifestBytes)) -} - -func TestUpdateLayerInfosOCIZstdToUncompressed(t *testing.T) { - manifest := manifestOCI1FromFixture(t, "ociv1.zstd.manifest.json") - - err := manifest.UpdateLayerInfos([]types.BlobInfo{ - { - Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", - Size: 32654, - MediaType: imgspecv1.MediaTypeImageLayerZstd, - CompressionOperation: types.Decompress, - }, - { - Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", - Size: 16724, - MediaType: imgspecv1.MediaTypeImageLayerZstd, - CompressionOperation: types.Decompress, - }, - { - Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", - Size: 73109, - MediaType: imgspecv1.MediaTypeImageLayerZstd, - CompressionOperation: types.Decompress, - }, - }) - assert.Nil(t, err) - - updatedManifestBytes, err := manifest.Serialize() - assert.Nil(t, err) - - expectedManifest := manifestOCI1FromFixture(t, "ociv1.uncompressed.manifest.json") - expectedManifestBytes, err := expectedManifest.Serialize() - assert.Nil(t, err) - - assert.Equal(t, string(expectedManifestBytes), string(updatedManifestBytes)) -} +func TestOCI1UpdateLayerInfos(t *testing.T) { + customCompression := compression.Algorithm{} -func TestUpdateLayerInfosInvalidCompressionOperation(t *testing.T) { - manifest := manifestOCI1FromFixture(t, "ociv1.zstd.manifest.json") - err := manifest.UpdateLayerInfos([]types.BlobInfo{ + for _, c := range []struct { + name string + sourceFixture string + updates []types.BlobInfo + expectedFixture string // or "" to indicate an expected failure + }{ { - Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", - Size: 32654, - MediaType: imgspecv1.MediaTypeImageLayerZstd, - CompressionOperation: types.Compress, - CompressionAlgorithm: &compression.Gzip, + name: "gzip → zstd", + sourceFixture: "ociv1.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", + Size: 32654, + MediaType: imgspecv1.MediaTypeImageLayerGzip, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Zstd, + }, + { + Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", + Size: 16724, + MediaType: imgspecv1.MediaTypeImageLayerGzip, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Zstd, + }, + { + Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", + Size: 73109, + MediaType: imgspecv1.MediaTypeImageLayerGzip, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Zstd, + }, + }, + expectedFixture: "ociv1.zstd.manifest.json", }, { - Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", - Size: 16724, - MediaType: imgspecv1.MediaTypeImageLayerZstd, - CompressionOperation: 42, // MUST fail here - CompressionAlgorithm: &compression.Gzip, + name: "zstd → gzip", + sourceFixture: "ociv1.zstd.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", + Size: 32654, + MediaType: imgspecv1.MediaTypeImageLayerZstd, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Gzip, + }, + { + Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", + Size: 16724, + MediaType: imgspecv1.MediaTypeImageLayerZstd, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Gzip, + }, + { + Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", + Size: 73109, + MediaType: imgspecv1.MediaTypeImageLayerZstd, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Gzip, + }, + }, + expectedFixture: "ociv1.manifest.json", }, { - Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", - Size: 73109, - MediaType: imgspecv1.MediaTypeImageLayerZstd, - CompressionOperation: types.Compress, - CompressionAlgorithm: &compression.Gzip, + name: "zstd → uncompressed", + sourceFixture: "ociv1.zstd.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", + Size: 32654, + MediaType: imgspecv1.MediaTypeImageLayerZstd, + CompressionOperation: types.Decompress, + }, + { + Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", + Size: 16724, + MediaType: imgspecv1.MediaTypeImageLayerZstd, + CompressionOperation: types.Decompress, + }, + { + Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", + Size: 73109, + MediaType: imgspecv1.MediaTypeImageLayerZstd, + CompressionOperation: types.Decompress, + }, + }, + expectedFixture: "ociv1.uncompressed.manifest.json", }, - }) - assert.NotNil(t, err) -} - -func TestUpdateLayerInfosInvalidCompressionAlgorithm(t *testing.T) { - manifest := manifestOCI1FromFixture(t, "ociv1.zstd.manifest.json") - - customCompression := compression.Algorithm{} - err := manifest.UpdateLayerInfos([]types.BlobInfo{ { - Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", - Size: 32654, - MediaType: imgspecv1.MediaTypeImageLayerZstd, - CompressionOperation: types.Compress, - CompressionAlgorithm: &compression.Gzip, + name: "invalid compression operation", + sourceFixture: "ociv1.zstd.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", + Size: 32654, + MediaType: imgspecv1.MediaTypeImageLayerZstd, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Gzip, + }, + { + Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", + Size: 16724, + MediaType: imgspecv1.MediaTypeImageLayerZstd, + CompressionOperation: 42, // MUST fail here + CompressionAlgorithm: &compression.Gzip, + }, + { + Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", + Size: 73109, + MediaType: imgspecv1.MediaTypeImageLayerZstd, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Gzip, + }, + }, + expectedFixture: "", }, { - Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", - Size: 16724, - MediaType: imgspecv1.MediaTypeImageLayerZstd, - CompressionOperation: 42, - CompressionAlgorithm: &compression.Gzip, + name: "invalid compression algorithm", + sourceFixture: "ociv1.zstd.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", + Size: 32654, + MediaType: imgspecv1.MediaTypeImageLayerZstd, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Gzip, + }, + { + Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", + Size: 16724, + MediaType: imgspecv1.MediaTypeImageLayerZstd, + CompressionOperation: 42, + CompressionAlgorithm: &compression.Gzip, + }, + { + Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", + Size: 73109, + MediaType: imgspecv1.MediaTypeImageLayerZstd, + CompressionOperation: types.Compress, + CompressionAlgorithm: &customCompression, // MUST fail here + }, + }, + expectedFixture: "", }, { - Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", - Size: 73109, - MediaType: imgspecv1.MediaTypeImageLayerZstd, - CompressionOperation: types.Compress, - CompressionAlgorithm: &customCompression, // MUST fail here + name: "gzip → uncompressed", + sourceFixture: "ociv1.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", + Size: 32654, + MediaType: imgspecv1.MediaTypeImageLayerGzip, + CompressionOperation: types.Decompress, + }, + { + Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", + Size: 16724, + MediaType: imgspecv1.MediaTypeImageLayerGzip, + CompressionOperation: types.Decompress, + }, + { + Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", + Size: 73109, + MediaType: imgspecv1.MediaTypeImageLayerGzip, + CompressionOperation: types.Decompress, + }, + }, + expectedFixture: "ociv1.uncompressed.manifest.json", }, - }) - assert.NotNil(t, err) -} - -func TestUpdateLayerInfosOCIGzipToUncompressed(t *testing.T) { - manifest := manifestOCI1FromFixture(t, "ociv1.manifest.json") - - err := manifest.UpdateLayerInfos([]types.BlobInfo{ { - Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", - Size: 32654, - MediaType: imgspecv1.MediaTypeImageLayerGzip, - CompressionOperation: types.Decompress, + name: "nondistributable → gzip", + sourceFixture: "ociv1.nondistributable.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", + Size: 32654, + MediaType: imgspecv1.MediaTypeImageLayerGzip, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Gzip, + }, + }, + expectedFixture: "ociv1.nondistributable.gzip.manifest.json", }, { - Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", - Size: 16724, - MediaType: imgspecv1.MediaTypeImageLayerGzip, - CompressionOperation: types.Decompress, + name: "nondistributable → zstd", + sourceFixture: "ociv1.nondistributable.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", + Size: 32654, + MediaType: imgspecv1.MediaTypeImageLayerGzip, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Zstd, + }, + }, + expectedFixture: "ociv1.nondistributable.zstd.manifest.json", }, { - Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", - Size: 73109, - MediaType: imgspecv1.MediaTypeImageLayerGzip, - CompressionOperation: types.Decompress, + name: "nondistributable gzip → uncompressed", + sourceFixture: "ociv1.nondistributable.gzip.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", + Size: 32654, + MediaType: imgspecv1.MediaTypeImageLayerGzip, + CompressionOperation: types.Decompress, + }, + }, + expectedFixture: "ociv1.nondistributable.manifest.json", }, - }) - assert.Nil(t, err) - - updatedManifestBytes, err := manifest.Serialize() - assert.Nil(t, err) - - expectedManifest := manifestOCI1FromFixture(t, "ociv1.uncompressed.manifest.json") - expectedManifestBytes, err := expectedManifest.Serialize() - assert.Nil(t, err) - - assert.Equal(t, string(expectedManifestBytes), string(updatedManifestBytes)) -} - -func TestUpdateLayerInfosOCINondistributableToGzip(t *testing.T) { - manifest := manifestOCI1FromFixture(t, "ociv1.nondistributable.manifest.json") - - err := manifest.UpdateLayerInfos([]types.BlobInfo{ { - Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", - Size: 32654, - MediaType: imgspecv1.MediaTypeImageLayerGzip, - CompressionOperation: types.Compress, - CompressionAlgorithm: &compression.Gzip, + name: "uncompressed → gzip encrypted", + sourceFixture: "ociv1.uncompressed.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + Size: 32654, + Annotations: map[string]string{"org.opencontainers.image.enc.…": "layer1"}, + MediaType: imgspecv1.MediaTypeImageLayer, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Gzip, + CryptoOperation: types.Encrypt, + }, + { + Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + Size: 16724, + Annotations: map[string]string{"org.opencontainers.image.enc.…": "layer2"}, + MediaType: imgspecv1.MediaTypeImageLayer, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Gzip, + CryptoOperation: types.Encrypt, + }, + { + Digest: "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", + Size: 73109, + Annotations: map[string]string{"org.opencontainers.image.enc.…": "layer2"}, + MediaType: imgspecv1.MediaTypeImageLayer, + CompressionOperation: types.Compress, + CompressionAlgorithm: &compression.Gzip, + CryptoOperation: types.Encrypt, + }, + }, + expectedFixture: "ociv1.encrypted.manifest.json", }, - }) - assert.Nil(t, err) - - updatedManifestBytes, err := manifest.Serialize() - assert.Nil(t, err) - - expectedManifest := manifestOCI1FromFixture(t, "ociv1.nondistributable.gzip.manifest.json") - expectedManifestBytes, err := expectedManifest.Serialize() - assert.Nil(t, err) - - assert.Equal(t, string(expectedManifestBytes), string(updatedManifestBytes)) -} - -func TestUpdateLayerInfosOCINondistributableToZstd(t *testing.T) { - manifest := manifestOCI1FromFixture(t, "ociv1.nondistributable.manifest.json") - - err := manifest.UpdateLayerInfos([]types.BlobInfo{ { - Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", - Size: 32654, - MediaType: imgspecv1.MediaTypeImageLayerGzip, - CompressionOperation: types.Compress, - CompressionAlgorithm: &compression.Zstd, + name: "gzip encrypted → uncompressed decrypted", + sourceFixture: "ociv1.encrypted.manifest.json", + updates: []types.BlobInfo{ + { + Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", + Size: 32654, + MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted", + CompressionOperation: types.Decompress, + CryptoOperation: types.Decrypt, + }, + { + Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", + Size: 16724, + MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted", + CompressionOperation: types.Decompress, + CryptoOperation: types.Decrypt, + }, + { + Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", + Size: 73109, + MediaType: "application/vnd.oci.image.layer.v1.tar+gzip+encrypted", + CompressionOperation: types.Decompress, + CryptoOperation: types.Decrypt, + }, + }, + expectedFixture: "ociv1.uncompressed.manifest.json", }, - }) - assert.Nil(t, err) - - updatedManifestBytes, err := manifest.Serialize() - assert.Nil(t, err) - - expectedManifest := manifestOCI1FromFixture(t, "ociv1.nondistributable.zstd.manifest.json") - expectedManifestBytes, err := expectedManifest.Serialize() - assert.Nil(t, err) + } { + manifest := manifestOCI1FromFixture(t, c.sourceFixture) - assert.Equal(t, string(expectedManifestBytes), string(updatedManifestBytes)) -} - -func TestUpdateLayerInfosOCINondistributableGzipToUncompressed(t *testing.T) { - manifest := manifestOCI1FromFixture(t, "ociv1.nondistributable.gzip.manifest.json") - - err := manifest.UpdateLayerInfos([]types.BlobInfo{ - { - Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", - Size: 32654, - MediaType: imgspecv1.MediaTypeImageLayerGzip, - CompressionOperation: types.Decompress, - }, - }) - assert.Nil(t, err) + err := manifest.UpdateLayerInfos(c.updates) + if c.expectedFixture == "" { + assert.Error(t, err, c.name) + } else { + require.NoError(t, err, c.name) - updatedManifestBytes, err := manifest.Serialize() - assert.Nil(t, err) + updatedManifestBytes, err := manifest.Serialize() + require.NoError(t, err, c.name) - expectedManifest := manifestOCI1FromFixture(t, "ociv1.nondistributable.manifest.json") - expectedManifestBytes, err := expectedManifest.Serialize() - assert.Nil(t, err) + expectedManifest := manifestOCI1FromFixture(t, c.expectedFixture) + expectedManifestBytes, err := expectedManifest.Serialize() + require.NoError(t, err, c.name) - assert.Equal(t, string(expectedManifestBytes), string(updatedManifestBytes)) + assert.Equal(t, string(expectedManifestBytes), string(updatedManifestBytes), c.name) + } + } } func TestOCI1Inspect(t *testing.T) { diff --git a/oci/archive/oci_dest.go b/oci/archive/oci_dest.go index 8386c47a3f..6ca618e351 100644 --- a/oci/archive/oci_dest.go +++ b/oci/archive/oci_dest.go @@ -13,6 +13,7 @@ import ( "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) @@ -169,10 +170,15 @@ func (d *ociArchiveImageDestination) Commit(ctx context.Context, unparsedTopleve // tar converts the directory at src and saves it to dst func tarDirectory(src, dst string) error { // input is a stream of bytes from the archive of the directory at path - input, err := archive.Tar(src, archive.Uncompressed) + input, err := archive.TarWithOptions(src, &archive.TarOptions{ + Compression: archive.Uncompressed, + // Don’t include the data about the user account this code is running under. + ChownOpts: &idtools.IDPair{UID: 0, GID: 0}, + }) if err != nil { return fmt.Errorf("retrieving stream of bytes from %q: %w", src, err) } + defer input.Close() // creates the tar file outFile, err := os.Create(dst) diff --git a/oci/archive/oci_dest_test.go b/oci/archive/oci_dest_test.go index a67112cf30..6284e4834f 100644 --- a/oci/archive/oci_dest_test.go +++ b/oci/archive/oci_dest_test.go @@ -1,5 +1,45 @@ package archive -import "github.com/containers/image/v5/internal/private" +import ( + "archive/tar" + "io" + "os" + "path/filepath" + "testing" + + "github.com/containers/image/v5/internal/private" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) var _ private.ImageDestination = (*ociArchiveImageDestination)(nil) + +func TestTarDirectory(t *testing.T) { + srcDir := t.TempDir() + err := os.WriteFile(filepath.Join(srcDir, "regular"), []byte("contents"), 0o600) + require.NoError(t, err) + + dest := filepath.Join(t.TempDir(), "file.tar") + err = tarDirectory(srcDir, dest) + require.NoError(t, err) + + f, err := os.Open(dest) + require.NoError(t, err) + defer f.Close() + reader := tar.NewReader(f) + numItems := 0 + for { + hdr, err := reader.Next() + if err == io.EOF { + break + } + require.NoError(t, err) + // Test that the header does not expose data about the local account + assert.Equal(t, 0, hdr.Uid) + assert.Equal(t, 0, hdr.Gid) + assert.Empty(t, hdr.Uname) + assert.Empty(t, hdr.Gname) + numItems++ + } + assert.Equal(t, 1, numItems) +} diff --git a/oci/archive/oci_src.go b/oci/archive/oci_src.go index 6c9ee33402..ee8409896c 100644 --- a/oci/archive/oci_src.go +++ b/oci/archive/oci_src.go @@ -28,6 +28,18 @@ func (e ImageNotFoundError) Error() string { return fmt.Sprintf("no descriptor found for reference %q", e.ref.image) } +// ArchiveFileNotFoundError occurs when the archive file does not exist. +type ArchiveFileNotFoundError struct { + // ref is the image reference + ref ociArchiveReference + // path is the file path that was not present + path string +} + +func (e ArchiveFileNotFoundError) Error() string { + return fmt.Sprintf("archive file not found: %q", e.path) +} + type ociArchiveImageSource struct { impl.Compat diff --git a/oci/archive/oci_src_test.go b/oci/archive/oci_src_test.go index 6f00afc0cc..a78246ce97 100644 --- a/oci/archive/oci_src_test.go +++ b/oci/archive/oci_src_test.go @@ -1,5 +1,26 @@ package archive -import "github.com/containers/image/v5/internal/private" +import ( + "path/filepath" + "testing" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) var _ private.ImageSource = (*ociArchiveImageSource)(nil) + +func TestNewImageSourceNotFound(t *testing.T) { + sysctx := types.SystemContext{} + emptyDir := t.TempDir() + archivePath := filepath.Join(emptyDir, "foo.ociarchive") + imgref, err := ParseReference(archivePath) + require.NoError(t, err) + _, err = LoadManifestDescriptorWithContext(&sysctx, imgref) + assert.NotNil(t, err) + var aerr ArchiveFileNotFoundError + assert.ErrorAs(t, err, &aerr) + assert.Equal(t, aerr.path, archivePath) +} diff --git a/oci/archive/oci_transport.go b/oci/archive/oci_transport.go index 2a03feeeac..d5fee36310 100644 --- a/oci/archive/oci_transport.go +++ b/oci/archive/oci_transport.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "io/fs" "os" "strings" @@ -171,18 +172,24 @@ func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) // creates the temporary directory and copies the tarred content to it func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (tempDirOCIRef, error) { + src := ref.resolvedFile + arch, err := os.Open(src) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return tempDirOCIRef{}, ArchiveFileNotFoundError{ref: ref, path: src} + } else { + return tempDirOCIRef{}, err + } + } + defer arch.Close() + tempDirRef, err := createOCIRef(sys, ref.image) if err != nil { return tempDirOCIRef{}, fmt.Errorf("creating oci reference: %w", err) } - src := ref.resolvedFile dst := tempDirRef.tempDirectory + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. - arch, err := os.Open(src) - if err != nil { - return tempDirOCIRef{}, err - } - defer arch.Close() if err := archive.NewDefaultArchiver().Untar(arch, dst, &archive.TarOptions{NoLchown: true}); err != nil { if err := tempDirRef.deleteTempDir(); err != nil { return tempDirOCIRef{}, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err) diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/02ea786cb1ff44d997661886a4186cbd8a1dc466938712bf7281379209476022 b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/02ea786cb1ff44d997661886a4186cbd8a1dc466938712bf7281379209476022 new file mode 100644 index 0000000000..add1797963 --- /dev/null +++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/02ea786cb1ff44d997661886a4186cbd8a1dc466938712bf7281379209476022 @@ -0,0 +1 @@ +insert binary content here #26559 diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/0dc27f36a618c110ae851662c13283e9fbc1b5a5de003befc4bcefa5a05d2eef b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/0dc27f36a618c110ae851662c13283e9fbc1b5a5de003befc4bcefa5a05d2eef new file mode 100644 index 0000000000..b3a7a96cd0 --- /dev/null +++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/0dc27f36a618c110ae851662c13283e9fbc1b5a5de003befc4bcefa5a05d2eef @@ -0,0 +1,16 @@ +{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "config": { + "mediaType": "application/vnd.oci.image.config.v1+json", + "digest": "sha256:913cf3a39d377faf89ed388ad913a318a390488c9f34c46e43424795cdabffe8", + "size": 585 + }, + "layers": [ + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "digest": "sha256:a6f737ac2b84bc463f2ff721af39588c69646c82f79f3808236178e02e35b922", + "size": 33 + } + ] +} \ No newline at end of file diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/39c524417bb4228f9fcb0aef43a680b5fd6b9f3a1df2fd50509d047e47dad8be b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/39c524417bb4228f9fcb0aef43a680b5fd6b9f3a1df2fd50509d047e47dad8be new file mode 100644 index 0000000000..8b7c8e3682 --- /dev/null +++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/39c524417bb4228f9fcb0aef43a680b5fd6b9f3a1df2fd50509d047e47dad8be @@ -0,0 +1,16 @@ +{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "config": { + "mediaType": "application/vnd.oci.image.config.v1+json", + "digest": "sha256:f732172ad8d2a666550fa3ec37a5153d59acc95744562ae64cf62ded46de101a", + "size": 583 + }, + "layers": [ + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "digest": "sha256:02ea786cb1ff44d997661886a4186cbd8a1dc466938712bf7281379209476022", + "size": 34 + } + ] +} \ No newline at end of file diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/557ac7d133b7770216a8101268640edf4e88beab1b4e1e1bfc9b1891a1cab861 b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/557ac7d133b7770216a8101268640edf4e88beab1b4e1e1bfc9b1891a1cab861 new file mode 100644 index 0000000000..19c1c1276f --- /dev/null +++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/557ac7d133b7770216a8101268640edf4e88beab1b4e1e1bfc9b1891a1cab861 @@ -0,0 +1 @@ +insert binary content here #9811 diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/5b2aba4d3c27bc6493633d0ec446b25c8d0a5c9cfe99894bcdff0aee80813805 b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/5b2aba4d3c27bc6493633d0ec446b25c8d0a5c9cfe99894bcdff0aee80813805 new file mode 100644 index 0000000000..aba2333ead --- /dev/null +++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/5b2aba4d3c27bc6493633d0ec446b25c8d0a5c9cfe99894bcdff0aee80813805 @@ -0,0 +1,16 @@ +{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "config": { + "mediaType": "application/vnd.oci.image.config.v1+json", + "digest": "sha256:df11bc189adeb50dadb3291a3a7f2c34b36e0efdba0df70f2c8a2d761b215cde", + "size": 585 + }, + "layers": [ + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "digest": "sha256:986315a0e599fac2b80eb31db2124dab8d3de04d7ca98b254999bd913c1f73fe", + "size": 33 + } + ] +} \ No newline at end of file diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/7ffdfe7d276286b39a203dcc247949cf47c91d2d5e10a53a675c0962ed9e4402 b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/7ffdfe7d276286b39a203dcc247949cf47c91d2d5e10a53a675c0962ed9e4402 new file mode 100644 index 0000000000..f21c274635 --- /dev/null +++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/7ffdfe7d276286b39a203dcc247949cf47c91d2d5e10a53a675c0962ed9e4402 @@ -0,0 +1 @@ +{"created":"2023-08-07T19:38:34.915445772Z","architecture":"386","os":"linux","config":{"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh"]},"rootfs":{"type":"layers","diff_ids":["sha256:53bfdd548f8566a059cd188348b202a50fb9d39ce80eb5b8f0c670dfa9bc6569"]},"history":[{"created":"2023-08-07T19:38:34.803529816Z","created_by":"/bin/sh -c #(nop) ADD file:c06b4f6991638e506d4d0a4d70c4a78ba30b971767802af4c6b837cdf59d4303 in / "},{"created":"2023-08-07T19:38:34.915445772Z","created_by":"/bin/sh -c #(nop) CMD [\"/bin/sh\"]","empty_layer":true}]} \ No newline at end of file diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/861d3c014b0e3edcf80e6221247d6b2921a4f892feb9bafe9515b9975b78c44f b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/861d3c014b0e3edcf80e6221247d6b2921a4f892feb9bafe9515b9975b78c44f new file mode 100644 index 0000000000..85617bd7ac --- /dev/null +++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/861d3c014b0e3edcf80e6221247d6b2921a4f892feb9bafe9515b9975b78c44f @@ -0,0 +1,24 @@ +{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.index.v1+json", + "manifests": [ + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:39c524417bb4228f9fcb0aef43a680b5fd6b9f3a1df2fd50509d047e47dad8be", + "size": 525, + "platform": { + "architecture": "amd64", + "os": "linux" + } + }, + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:be6036f9b6a4e120a04868c47f1b8674f58b2fe5e410cba9f585a13ca8946cf0", + "size": 525, + "platform": { + "architecture": "386", + "os": "linux" + } + } + ] +} \ No newline at end of file diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/913cf3a39d377faf89ed388ad913a318a390488c9f34c46e43424795cdabffe8 b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/913cf3a39d377faf89ed388ad913a318a390488c9f34c46e43424795cdabffe8 new file mode 100644 index 0000000000..ebe323d4df --- /dev/null +++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/913cf3a39d377faf89ed388ad913a318a390488c9f34c46e43424795cdabffe8 @@ -0,0 +1 @@ +{"created":"2023-08-07T19:20:20.894140623Z","architecture":"amd64","os":"linux","config":{"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh"]},"rootfs":{"type":"layers","diff_ids":["sha256:4693057ce2364720d39e57e85a5b8e0bd9ac3573716237736d6470ec5b7b7230"]},"history":[{"created":"2023-08-07T19:20:20.71894984Z","created_by":"/bin/sh -c #(nop) ADD file:32ff5e7a78b890996ee4681cc0a26185d3e9acdb4eb1e2aaccb2411f922fed6b in / "},{"created":"2023-08-07T19:20:20.894140623Z","created_by":"/bin/sh -c #(nop) CMD [\"/bin/sh\"]","empty_layer":true}]} \ No newline at end of file diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/93cbd11a4f41467a0409b975499ae711bc6f8222de38d9f1b5a4097583195ad5 b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/93cbd11a4f41467a0409b975499ae711bc6f8222de38d9f1b5a4097583195ad5 new file mode 100644 index 0000000000..ccf025c98f --- /dev/null +++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/93cbd11a4f41467a0409b975499ae711bc6f8222de38d9f1b5a4097583195ad5 @@ -0,0 +1,16 @@ +{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "config": { + "mediaType": "application/vnd.oci.image.config.v1+json", + "digest": "sha256:913cf3a39d377faf89ed388ad913a318a390488c9f34c46e43424795cdabffe8", + "size": 584 + }, + "layers": [ + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "digest": "sha256:557ac7d133b7770216a8101268640edf4e88beab1b4e1e1bfc9b1891a1cab861", + "size": 33 + } + ] +} \ No newline at end of file diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/986315a0e599fac2b80eb31db2124dab8d3de04d7ca98b254999bd913c1f73fe b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/986315a0e599fac2b80eb31db2124dab8d3de04d7ca98b254999bd913c1f73fe new file mode 100644 index 0000000000..a0cd5aab0e --- /dev/null +++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/986315a0e599fac2b80eb31db2124dab8d3de04d7ca98b254999bd913c1f73fe @@ -0,0 +1 @@ +insert binary content here #7959 diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/a2f798327b3f25e3eff54badcb769953de235e62e3e32051d57a5e66246de4a1 b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/a2f798327b3f25e3eff54badcb769953de235e62e3e32051d57a5e66246de4a1 new file mode 100644 index 0000000000..aeecdfac4e --- /dev/null +++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/a2f798327b3f25e3eff54badcb769953de235e62e3e32051d57a5e66246de4a1 @@ -0,0 +1,24 @@ +{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.index.v1+json", + "manifests": [ + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:93cbd11a4f41467a0409b975499ae711bc6f8222de38d9f1b5a4097583195ad5", + "size": 525, + "platform": { + "architecture": "amd64", + "os": "linux" + } + }, + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:f6d60fd529b234d3e28837e15294d935f55da58ce57c4f9218cad38d0be82ce3", + "size": 525, + "platform": { + "architecture": "386", + "os": "linux" + } + } + ] +} \ No newline at end of file diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/a6f737ac2b84bc463f2ff721af39588c69646c82f79f3808236178e02e35b922 b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/a6f737ac2b84bc463f2ff721af39588c69646c82f79f3808236178e02e35b922 new file mode 100644 index 0000000000..f26e504a8e --- /dev/null +++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/a6f737ac2b84bc463f2ff721af39588c69646c82f79f3808236178e02e35b922 @@ -0,0 +1 @@ +insert binary content here #1234 diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/aab808b283c3f654d84358a40ce8766ecd552249305141de88f0ca61f3d1368f b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/aab808b283c3f654d84358a40ce8766ecd552249305141de88f0ca61f3d1368f new file mode 100644 index 0000000000..e1d45d3569 --- /dev/null +++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/aab808b283c3f654d84358a40ce8766ecd552249305141de88f0ca61f3d1368f @@ -0,0 +1 @@ +{"created":"2023-08-07T19:38:27.007952531Z","architecture":"386","os":"linux","config":{"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh"]},"rootfs":{"type":"layers","diff_ids":["sha256:f05b0759429ba12d5fda46c196f253cc1cab8f56cd874e9e7be674fc1b8337de"]},"history":[{"created":"2023-08-07T19:38:26.69689892Z","created_by":"/bin/sh -c #(nop) ADD file:4b33c52e11b19fde30197c62ead0b77bde28d34edaa08346a5302cd892d3cebe in / "},{"created":"2023-08-07T19:38:27.007952531Z","created_by":"/bin/sh -c #(nop) CMD [\"/bin/sh\"]","empty_layer":true}]} \ No newline at end of file diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/be6036f9b6a4e120a04868c47f1b8674f58b2fe5e410cba9f585a13ca8946cf0 b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/be6036f9b6a4e120a04868c47f1b8674f58b2fe5e410cba9f585a13ca8946cf0 new file mode 100644 index 0000000000..c2d027aa76 --- /dev/null +++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/be6036f9b6a4e120a04868c47f1b8674f58b2fe5e410cba9f585a13ca8946cf0 @@ -0,0 +1,16 @@ +{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "config": { + "mediaType": "application/vnd.oci.image.config.v1+json", + "digest": "sha256:7ffdfe7d276286b39a203dcc247949cf47c91d2d5e10a53a675c0962ed9e4402", + "size": 583 + }, + "layers": [ + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "digest": "sha256:e2f7e0374fd6a03d9c373f4d9a0c7802045cc3ddcc1433e89d83b81fa7007242", + "size": 33 + } + ] +} \ No newline at end of file diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/df11bc189adeb50dadb3291a3a7f2c34b36e0efdba0df70f2c8a2d761b215cde b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/df11bc189adeb50dadb3291a3a7f2c34b36e0efdba0df70f2c8a2d761b215cde new file mode 100644 index 0000000000..1ff4ad5415 --- /dev/null +++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/df11bc189adeb50dadb3291a3a7f2c34b36e0efdba0df70f2c8a2d761b215cde @@ -0,0 +1 @@ +{"created":"2023-08-07T19:20:26.426857961Z","architecture":"amd64","os":"linux","config":{"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh"]},"rootfs":{"type":"layers","diff_ids":["sha256:36b50b131297b8860da51b2d2b24bb4c08dfbdf2789b08e3cc0f187c98637a19"]},"history":[{"created":"2023-08-07T19:20:26.326707843Z","created_by":"/bin/sh -c #(nop) ADD file:6dd87346b8be240b21b4f4d9296253bf0d28b6579aa52d2118872e3936963b6b in / "},{"created":"2023-08-07T19:20:26.426857961Z","created_by":"/bin/sh -c #(nop) CMD [\"/bin/sh\"]","empty_layer":true}]} \ No newline at end of file diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/e19729d5a968c71b4b691d60f4a6f85f93c303bb88635dcfef36e23b76cb7b3a b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/e19729d5a968c71b4b691d60f4a6f85f93c303bb88635dcfef36e23b76cb7b3a new file mode 100644 index 0000000000..832c1185d8 --- /dev/null +++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/e19729d5a968c71b4b691d60f4a6f85f93c303bb88635dcfef36e23b76cb7b3a @@ -0,0 +1 @@ +insert binary content here #28017 diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/e2f7e0374fd6a03d9c373f4d9a0c7802045cc3ddcc1433e89d83b81fa7007242 b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/e2f7e0374fd6a03d9c373f4d9a0c7802045cc3ddcc1433e89d83b81fa7007242 new file mode 100644 index 0000000000..a18eab8965 --- /dev/null +++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/e2f7e0374fd6a03d9c373f4d9a0c7802045cc3ddcc1433e89d83b81fa7007242 @@ -0,0 +1 @@ +insert binary content here #4794 diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/f6d60fd529b234d3e28837e15294d935f55da58ce57c4f9218cad38d0be82ce3 b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/f6d60fd529b234d3e28837e15294d935f55da58ce57c4f9218cad38d0be82ce3 new file mode 100644 index 0000000000..fb85ad20ac --- /dev/null +++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/f6d60fd529b234d3e28837e15294d935f55da58ce57c4f9218cad38d0be82ce3 @@ -0,0 +1,16 @@ +{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "config": { + "mediaType": "application/vnd.oci.image.config.v1+json", + "digest": "sha256:aab808b283c3f654d84358a40ce8766ecd552249305141de88f0ca61f3d1368f", + "size": 582 + }, + "layers": [ + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "digest": "sha256:e19729d5a968c71b4b691d60f4a6f85f93c303bb88635dcfef36e23b76cb7b3a", + "size": 34 + } + ] +} \ No newline at end of file diff --git a/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/f732172ad8d2a666550fa3ec37a5153d59acc95744562ae64cf62ded46de101a b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/f732172ad8d2a666550fa3ec37a5153d59acc95744562ae64cf62ded46de101a new file mode 100644 index 0000000000..016b01bc3d --- /dev/null +++ b/oci/layout/fixtures/delete_image_multiple_images/blobs/sha256/f732172ad8d2a666550fa3ec37a5153d59acc95744562ae64cf62ded46de101a @@ -0,0 +1 @@ +{"created":"2023-08-07T19:20:31.99661329Z","architecture":"amd64","os":"linux","config":{"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh"]},"rootfs":{"type":"layers","diff_ids":["sha256:0e182002b05f2ab123995821ef14f1cda765a0c31f7a6d260221558f6466535e"]},"history":[{"created":"2023-08-07T19:20:31.893185238Z","created_by":"/bin/sh -c #(nop) ADD file:76d829bbce3dd420a8419919b0916c0fda917011d1e6752ca5b9e53d5ca890a6 in / "},{"created":"2023-08-07T19:20:31.99661329Z","created_by":"/bin/sh -c #(nop) CMD [\"/bin/sh\"]","empty_layer":true}]} \ No newline at end of file diff --git a/oci/layout/fixtures/delete_image_multiple_images/index.json b/oci/layout/fixtures/delete_image_multiple_images/index.json new file mode 100644 index 0000000000..d781143f54 --- /dev/null +++ b/oci/layout/fixtures/delete_image_multiple_images/index.json @@ -0,0 +1,61 @@ +{ + "schemaVersion": 2, + "manifests": [ + { + "mediaType": "application/vnd.oci.image.index.v1+json", + "digest": "sha256:a2f798327b3f25e3eff54badcb769953de235e62e3e32051d57a5e66246de4a1", + "size": 759, + "annotations": { + "org.opencontainers.image.ref.name": "latest" + } + }, + { + "mediaType": "application/vnd.oci.image.index.v1+json", + "digest": "sha256:a2f798327b3f25e3eff54badcb769953de235e62e3e32051d57a5e66246de4a1", + "size": 759, + "annotations": { + "org.opencontainers.image.ref.name": "3.18.3" + } + }, + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:93cbd11a4f41467a0409b975499ae711bc6f8222de38d9f1b5a4097583195ad5", + "size": 525, + "annotations": { + "org.opencontainers.image.ref.name": "3" + } + }, + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:93cbd11a4f41467a0409b975499ae711bc6f8222de38d9f1b5a4097583195ad5", + "size": 525, + "annotations": { + "org.opencontainers.image.ref.name": "3.18" + } + }, + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:5b2aba4d3c27bc6493633d0ec446b25c8d0a5c9cfe99894bcdff0aee80813805", + "size": 525, + "annotations": { + "org.opencontainers.image.ref.name": "3.17.5" + } + }, + { + "mediaType": "application/vnd.oci.image.index.v1+json", + "digest": "sha256:861d3c014b0e3edcf80e6221247d6b2921a4f892feb9bafe9515b9975b78c44f", + "size": 759, + "annotations": { + "org.opencontainers.image.ref.name": "3.16.7" + } + }, + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:0dc27f36a618c110ae851662c13283e9fbc1b5a5de003befc4bcefa5a05d2eef", + "size": 525, + "annotations": { + "org.opencontainers.image.ref.name": "1.0.0" + } + } + ] +} \ No newline at end of file diff --git a/oci/layout/fixtures/delete_image_multiple_images/info.txt b/oci/layout/fixtures/delete_image_multiple_images/info.txt new file mode 100644 index 0000000000..4ec2b148c9 --- /dev/null +++ b/oci/layout/fixtures/delete_image_multiple_images/info.txt @@ -0,0 +1,61 @@ +This is tree representation of the fixture to help writing the tests: + +7 references in the index, 10 descriptors and 19 blobs in the blob directory + +index.json +│ +├── 3.17.5 +│ └── manifest: 5b2aba4d3c27bc6493633d0ec446b25c8d0a5c9cfe99894bcdff0aee80813805 +│ config: df11bc189adeb50dadb3291a3a7f2c34b36e0efdba0df70f2c8a2d761b215cde +│ layers: 986315a0e599fac2b80eb31db2124dab8d3de04d7ca98b254999bd913c1f73fe +│ +├── 3.18 +│ └── manifest: 93cbd11a4f41467a0409b975499ae711bc6f8222de38d9f1b5a4097583195ad5 +│ config: 913cf3a39d377faf89ed388ad913a318a390488c9f34c46e43424795cdabffe8 +│ layers: 557ac7d133b7770216a8101268640edf4e88beab1b4e1e1bfc9b1891a1cab861 +│ +├── 3 +│ └── manifest: 93cbd11a4f41467a0409b975499ae711bc6f8222de38d9f1b5a4097583195ad5 +│ config: 913cf3a39d377faf89ed388ad913a318a390488c9f34c46e43424795cdabffe8 +│ layers: 557ac7d133b7770216a8101268640edf4e88beab1b4e1e1bfc9b1891a1cab861 +│ +├── 1.0.0 +│ └── manifest: 0dc27f36a618c110ae851662c13283e9fbc1b5a5de003befc4bcefa5a05d2eef +│ config: 913cf3a39d377faf89ed388ad913a318a390488c9f34c46e43424795cdabffe8 +│ layers: a6f737ac2b84bc463f2ff721af39588c69646c82f79f3808236178e02e35b922 +│ +├── latest +│ └── index: a2f798327b3f25e3eff54badcb769953de235e62e3e32051d57a5e66246de4a1 +│ ├── linux/am64 +│ │ └── manifest: 93cbd11a4f41467a0409b975499ae711bc6f8222de38d9f1b5a4097583195ad5 +│ │ config: 913cf3a39d377faf89ed388ad913a318a390488c9f34c46e43424795cdabffe8 +│ │ layers: 557ac7d133b7770216a8101268640edf4e88beab1b4e1e1bfc9b1891a1cab861 +│ │ +│ └── linux/386 +│ └── manifest: f6d60fd529b234d3e28837e15294d935f55da58ce57c4f9218cad38d0be82ce3 +│ config: aab808b283c3f654d84358a40ce8766ecd552249305141de88f0ca61f3d1368f +│ layers: e19729d5a968c71b4b691d60f4a6f85f93c303bb88635dcfef36e23b76cb7b3a +│ +├── 3.18.3 +│ └── index: a2f798327b3f25e3eff54badcb769953de235e62e3e32051d57a5e66246de4a1 +│ ├── linux/am64 +│ │ └── manifest: 93cbd11a4f41467a0409b975499ae711bc6f8222de38d9f1b5a4097583195ad5 +│ │ config: 913cf3a39d377faf89ed388ad913a318a390488c9f34c46e43424795cdabffe8 +│ │ layers: 557ac7d133b7770216a8101268640edf4e88beab1b4e1e1bfc9b1891a1cab861 +│ │ +│ └── linux/386 +│ └── manifest: f6d60fd529b234d3e28837e15294d935f55da58ce57c4f9218cad38d0be82ce3 +│ config: aab808b283c3f654d84358a40ce8766ecd552249305141de88f0ca61f3d1368f +│ layers: e19729d5a968c71b4b691d60f4a6f85f93c303bb88635dcfef36e23b76cb7b3a +│ +├── 3.16.7 +│ └── index: 861d3c014b0e3edcf80e6221247d6b2921a4f892feb9bafe9515b9975b78c44f +│ ├── linux/am64 +│ │ └── manifest: 39c524417bb4228f9fcb0aef43a680b5fd6b9f3a1df2fd50509d047e47dad8be +│ │ config: f732172ad8d2a666550fa3ec37a5153d59acc95744562ae64cf62ded46de101a +│ │ layers: 02ea786cb1ff44d997661886a4186cbd8a1dc466938712bf7281379209476022 +│ │ +│ └── linux/386 +│ └── manifest: be6036f9b6a4e120a04868c47f1b8674f58b2fe5e410cba9f585a13ca8946cf0 +│ config: 7ffdfe7d276286b39a203dcc247949cf47c91d2d5e10a53a675c0962ed9e4402 +│ layers: e2f7e0374fd6a03d9c373f4d9a0c7802045cc3ddcc1433e89d83b81fa7007242 diff --git a/oci/layout/fixtures/delete_image_multiple_images/oci-layout b/oci/layout/fixtures/delete_image_multiple_images/oci-layout new file mode 100644 index 0000000000..21b1439d1c --- /dev/null +++ b/oci/layout/fixtures/delete_image_multiple_images/oci-layout @@ -0,0 +1 @@ +{"imageLayoutVersion": "1.0.0"} \ No newline at end of file diff --git a/oci/layout/fixtures/delete_image_only_one_image/blobs/sha256/0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc b/oci/layout/fixtures/delete_image_only_one_image/blobs/sha256/0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc new file mode 100644 index 0000000000..e7e64ba41b --- /dev/null +++ b/oci/layout/fixtures/delete_image_only_one_image/blobs/sha256/0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc @@ -0,0 +1 @@ +insert binary content here #9671 diff --git a/oci/layout/fixtures/delete_image_only_one_image/blobs/sha256/a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423 b/oci/layout/fixtures/delete_image_only_one_image/blobs/sha256/a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423 new file mode 100644 index 0000000000..f0f06201be --- /dev/null +++ b/oci/layout/fixtures/delete_image_only_one_image/blobs/sha256/a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423 @@ -0,0 +1,30 @@ +{ + "created": "2019-08-20T20:19:55.211423266Z", + "architecture": "amd64", + "os": "linux", + "config": { + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Cmd": [ + "/bin/sh" + ] + }, + "rootfs": { + "type": "layers", + "diff_ids": [ + "sha256:03901b4a2ea88eeaad62dbe59b072b28b6efa00491962b8741081c5df50c65e0" + ] + }, + "history": [ + { + "created": "2019-08-20T20:19:55.062606894Z", + "created_by": "/bin/sh -c #(nop) ADD file:fe64057fbb83dccb960efabbf1cd8777920ef279a7fa8dbca0a8801c651bdf7c in / " + }, + { + "created": "2019-08-20T20:19:55.211423266Z", + "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\"]", + "empty_layer": true + } + ] +} diff --git a/oci/layout/fixtures/delete_image_only_one_image/blobs/sha256/eaa95f3cfaac07c8a5153eb77c933269586ad0226c83405776be08547e4d2a18 b/oci/layout/fixtures/delete_image_only_one_image/blobs/sha256/eaa95f3cfaac07c8a5153eb77c933269586ad0226c83405776be08547e4d2a18 new file mode 100644 index 0000000000..1ff195d0f3 --- /dev/null +++ b/oci/layout/fixtures/delete_image_only_one_image/blobs/sha256/eaa95f3cfaac07c8a5153eb77c933269586ad0226c83405776be08547e4d2a18 @@ -0,0 +1,16 @@ +{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "config": { + "mediaType": "application/vnd.oci.image.config.v1+json", + "digest": "sha256:a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423", + "size": 585 + }, + "layers": [ + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "digest": "sha256:0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc", + "size": 33 + } + ] +} diff --git a/oci/layout/fixtures/delete_image_only_one_image/index.json b/oci/layout/fixtures/delete_image_only_one_image/index.json new file mode 100644 index 0000000000..b0a0c98478 --- /dev/null +++ b/oci/layout/fixtures/delete_image_only_one_image/index.json @@ -0,0 +1,13 @@ +{ + "schemaVersion": 2, + "manifests": [ + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:eaa95f3cfaac07c8a5153eb77c933269586ad0226c83405776be08547e4d2a18", + "size": 476, + "annotations": { + "org.opencontainers.image.ref.name": "latest" + } + } + ] +} diff --git a/oci/layout/fixtures/delete_image_only_one_image/oci-layout b/oci/layout/fixtures/delete_image_only_one_image/oci-layout new file mode 100644 index 0000000000..21b1439d1c --- /dev/null +++ b/oci/layout/fixtures/delete_image_only_one_image/oci-layout @@ -0,0 +1 @@ +{"imageLayoutVersion": "1.0.0"} \ No newline at end of file diff --git a/oci/layout/fixtures/delete_image_shared_blobs_dir/blobs/sha256/a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423 b/oci/layout/fixtures/delete_image_shared_blobs_dir/blobs/sha256/a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423 new file mode 100644 index 0000000000..f0f06201be --- /dev/null +++ b/oci/layout/fixtures/delete_image_shared_blobs_dir/blobs/sha256/a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423 @@ -0,0 +1,30 @@ +{ + "created": "2019-08-20T20:19:55.211423266Z", + "architecture": "amd64", + "os": "linux", + "config": { + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Cmd": [ + "/bin/sh" + ] + }, + "rootfs": { + "type": "layers", + "diff_ids": [ + "sha256:03901b4a2ea88eeaad62dbe59b072b28b6efa00491962b8741081c5df50c65e0" + ] + }, + "history": [ + { + "created": "2019-08-20T20:19:55.062606894Z", + "created_by": "/bin/sh -c #(nop) ADD file:fe64057fbb83dccb960efabbf1cd8777920ef279a7fa8dbca0a8801c651bdf7c in / " + }, + { + "created": "2019-08-20T20:19:55.211423266Z", + "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\"]", + "empty_layer": true + } + ] +} diff --git a/oci/layout/fixtures/delete_image_shared_blobs_dir/index.json b/oci/layout/fixtures/delete_image_shared_blobs_dir/index.json new file mode 100644 index 0000000000..49925c19ae --- /dev/null +++ b/oci/layout/fixtures/delete_image_shared_blobs_dir/index.json @@ -0,0 +1,13 @@ +{ + "schemaVersion": 2, + "manifests": [ + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:eaa95f3cfaac07c8a5153eb77c933269586ad0226c83405776be08547e4d2a18", + "size": 405, + "annotations": { + "org.opencontainers.image.ref.name": "latest" + } + } + ] +} diff --git a/oci/layout/fixtures/delete_image_shared_blobs_dir/oci-layout b/oci/layout/fixtures/delete_image_shared_blobs_dir/oci-layout new file mode 100644 index 0000000000..21b1439d1c --- /dev/null +++ b/oci/layout/fixtures/delete_image_shared_blobs_dir/oci-layout @@ -0,0 +1 @@ +{"imageLayoutVersion": "1.0.0"} \ No newline at end of file diff --git a/oci/layout/fixtures/delete_image_shared_blobs_dir/shared_blobs/sha256/0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc b/oci/layout/fixtures/delete_image_shared_blobs_dir/shared_blobs/sha256/0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc new file mode 100644 index 0000000000..e7e64ba41b --- /dev/null +++ b/oci/layout/fixtures/delete_image_shared_blobs_dir/shared_blobs/sha256/0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc @@ -0,0 +1 @@ +insert binary content here #9671 diff --git a/oci/layout/fixtures/delete_image_shared_blobs_dir/shared_blobs/sha256/eaa95f3cfaac07c8a5153eb77c933269586ad0226c83405776be08547e4d2a18 b/oci/layout/fixtures/delete_image_shared_blobs_dir/shared_blobs/sha256/eaa95f3cfaac07c8a5153eb77c933269586ad0226c83405776be08547e4d2a18 new file mode 100644 index 0000000000..1ff195d0f3 --- /dev/null +++ b/oci/layout/fixtures/delete_image_shared_blobs_dir/shared_blobs/sha256/eaa95f3cfaac07c8a5153eb77c933269586ad0226c83405776be08547e4d2a18 @@ -0,0 +1,16 @@ +{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "config": { + "mediaType": "application/vnd.oci.image.config.v1+json", + "digest": "sha256:a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423", + "size": 585 + }, + "layers": [ + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "digest": "sha256:0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc", + "size": 33 + } + ] +} diff --git a/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc new file mode 100644 index 0000000000..e7e64ba41b --- /dev/null +++ b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc @@ -0,0 +1 @@ +insert binary content here #9671 diff --git a/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/49d1584496c6e196f512c4a9f52b17b187642269d84c044538523c5b69a660b3 b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/49d1584496c6e196f512c4a9f52b17b187642269d84c044538523c5b69a660b3 new file mode 100644 index 0000000000..e59a5f804a --- /dev/null +++ b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/49d1584496c6e196f512c4a9f52b17b187642269d84c044538523c5b69a660b3 @@ -0,0 +1,16 @@ +{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "config": { + "mediaType": "application/vnd.oci.image.config.v1+json", + "digest": "sha256:a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423", + "size": 740 + }, + "layers": [ + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "digest": "sha256:0c8b263642b51b5c1dc40fe402ae2e97119c6007b6e52146419985ec1f0092dc", + "size": 33 + } + ] +} diff --git a/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423 b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423 new file mode 100644 index 0000000000..f0f06201be --- /dev/null +++ b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/a527179158cd5cebc11c152b8637b47ce96c838ba2aa0de66d14f45cedc11423 @@ -0,0 +1,30 @@ +{ + "created": "2019-08-20T20:19:55.211423266Z", + "architecture": "amd64", + "os": "linux", + "config": { + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Cmd": [ + "/bin/sh" + ] + }, + "rootfs": { + "type": "layers", + "diff_ids": [ + "sha256:03901b4a2ea88eeaad62dbe59b072b28b6efa00491962b8741081c5df50c65e0" + ] + }, + "history": [ + { + "created": "2019-08-20T20:19:55.062606894Z", + "created_by": "/bin/sh -c #(nop) ADD file:fe64057fbb83dccb960efabbf1cd8777920ef279a7fa8dbca0a8801c651bdf7c in / " + }, + { + "created": "2019-08-20T20:19:55.211423266Z", + "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\"]", + "empty_layer": true + } + ] +} diff --git a/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/ce229a4eb5797ecd3a3a1846613b6b49811f79e38b5b0ce666268ba4b6c68e41 b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/ce229a4eb5797ecd3a3a1846613b6b49811f79e38b5b0ce666268ba4b6c68e41 new file mode 100644 index 0000000000..9578f7d9ec --- /dev/null +++ b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/ce229a4eb5797ecd3a3a1846613b6b49811f79e38b5b0ce666268ba4b6c68e41 @@ -0,0 +1,30 @@ +{ + "created": "2019-08-20T20:20:55.211423266Z", + "architecture": "amd64", + "os": "linux", + "config": { + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Cmd": [ + "/bin/sh" + ] + }, + "rootfs": { + "type": "layers", + "diff_ids": [ + "sha256:03901b4a2ea88eeaad62dbe59b072b28b6efa00491962b8741081c5df50c65e0" + ] + }, + "history": [ + { + "created": "2019-08-20T20:19:55.062606894Z", + "created_by": "/bin/sh -c #(nop) ADD file:fe64057fbb83dccb960efabbf1cd8777920ef279a7fa8dbca0a8801c651bdf7c in / " + }, + { + "created": "2019-08-20T20:19:55.211423266Z", + "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\"]", + "empty_layer": true + } + ] +} diff --git a/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/ecfa463536cb5472e238aadc4df81d4785d5d6373027c488a2db8a6e76fe88ed b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/ecfa463536cb5472e238aadc4df81d4785d5d6373027c488a2db8a6e76fe88ed new file mode 100644 index 0000000000..16b7c27bd4 --- /dev/null +++ b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/ecfa463536cb5472e238aadc4df81d4785d5d6373027c488a2db8a6e76fe88ed @@ -0,0 +1,16 @@ +{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "config": { + "mediaType": "application/vnd.oci.image.config.v1+json", + "digest": "sha256:ce229a4eb5797ecd3a3a1846613b6b49811f79e38b5b0ce666268ba4b6c68e41", + "size": 740 + }, + "layers": [ + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "digest": "sha256:fa00bb4e2adbc73a5da1fd54d2a840020592530a8d4e8de9888b9e9a533419d8", + "size": 33 + } + ] +} diff --git a/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/fa00bb4e2adbc73a5da1fd54d2a840020592530a8d4e8de9888b9e9a533419d8 b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/fa00bb4e2adbc73a5da1fd54d2a840020592530a8d4e8de9888b9e9a533419d8 new file mode 100644 index 0000000000..3badd42d29 --- /dev/null +++ b/oci/layout/fixtures/delete_image_two_identical_references/blobs/sha256/fa00bb4e2adbc73a5da1fd54d2a840020592530a8d4e8de9888b9e9a533419d8 @@ -0,0 +1 @@ +insert binary content here 32515 diff --git a/oci/layout/fixtures/delete_image_two_identical_references/index.json b/oci/layout/fixtures/delete_image_two_identical_references/index.json new file mode 100644 index 0000000000..80850d4b91 --- /dev/null +++ b/oci/layout/fixtures/delete_image_two_identical_references/index.json @@ -0,0 +1,21 @@ +{ + "schemaVersion": 2, + "manifests": [ + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:49d1584496c6e196f512c4a9f52b17b187642269d84c044538523c5b69a660b3", + "size": 476, + "annotations": { + "org.opencontainers.image.ref.name": "1.0.0" + } + }, + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:ecfa463536cb5472e238aadc4df81d4785d5d6373027c488a2db8a6e76fe88ed", + "size": 476, + "annotations": { + "org.opencontainers.image.ref.name": "1.0.0" + } + } + ] +} diff --git a/oci/layout/fixtures/delete_image_two_identical_references/oci-layout b/oci/layout/fixtures/delete_image_two_identical_references/oci-layout new file mode 100644 index 0000000000..21b1439d1c --- /dev/null +++ b/oci/layout/fixtures/delete_image_two_identical_references/oci-layout @@ -0,0 +1 @@ +{"imageLayoutVersion": "1.0.0"} \ No newline at end of file diff --git a/oci/layout/fixtures/manifest/index.json b/oci/layout/fixtures/manifest/index.json index fd6930cf1c..7e779082ce 100644 --- a/oci/layout/fixtures/manifest/index.json +++ b/oci/layout/fixtures/manifest/index.json @@ -1 +1,17 @@ -{"schemaVersion":2,"manifests":[{"mediaType":"application/vnd.oci.image.manifest.v1+json","digest":"sha256:84afb6189c4d69f2d040c5f1dc4e0a16fed9b539ce9cfb4ac2526ae4e0576cc0","size":496,"annotations":{"org.opencontainers.image.ref.name":"v0.1.1"},"platform":{"architecture":"amd64","os":"linux"}}]} \ No newline at end of file +{ + "schemaVersion": 2, + "manifests": [ + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:84afb6189c4d69f2d040c5f1dc4e0a16fed9b539ce9cfb4ac2526ae4e0576cc0", + "size": 496, + "annotations": { + "org.opencontainers.image.ref.name": "v0.1.1" + }, + "platform": { + "architecture": "amd64", + "os": "linux" + } + } + ] +} \ No newline at end of file diff --git a/oci/layout/oci_delete.go b/oci/layout/oci_delete.go new file mode 100644 index 0000000000..8dd54f255a --- /dev/null +++ b/oci/layout/oci_delete.go @@ -0,0 +1,240 @@ +package layout + +import ( + "context" + "encoding/json" + "fmt" + "io/fs" + "os" + + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" +) + +// DeleteImage deletes the named image from the directory, if supported. +func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + sharedBlobsDir := "" + if sys != nil && sys.OCISharedBlobDirPath != "" { + sharedBlobsDir = sys.OCISharedBlobDirPath + } + + descriptor, descriptorIndex, err := ref.getManifestDescriptor() + if err != nil { + return err + } + + var blobsUsedByImage map[digest.Digest]int + + switch descriptor.MediaType { + case imgspecv1.MediaTypeImageManifest: + blobsUsedByImage, err = ref.getBlobsUsedInSingleImage(&descriptor, sharedBlobsDir) + case imgspecv1.MediaTypeImageIndex: + blobsUsedByImage, err = ref.getBlobsUsedInImageIndex(&descriptor, sharedBlobsDir) + default: + return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType) + } + if err != nil { + return err + } + + blobsToDelete, err := ref.getBlobsToDelete(blobsUsedByImage, sharedBlobsDir) + if err != nil { + return err + } + + err = ref.deleteBlobs(blobsToDelete) + if err != nil { + return err + } + + return ref.deleteReferenceFromIndex(descriptorIndex) +} + +func (ref ociReference) getBlobsUsedInSingleImage(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) { + manifest, err := ref.getManifest(descriptor, sharedBlobsDir) + if err != nil { + return nil, err + } + blobsUsedInManifest := ref.getBlobsUsedInManifest(manifest) + blobsUsedInManifest[descriptor.Digest]++ // Add the current manifest to the list of blobs used by this reference + + return blobsUsedInManifest, nil +} + +func (ref ociReference) getBlobsUsedInImageIndex(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) { + blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir) + if err != nil { + return nil, err + } + index, err := parseIndex(blobPath) + if err != nil { + return nil, err + } + + blobsUsedInImageRefIndex := make(map[digest.Digest]int) + err = ref.addBlobsUsedInIndex(blobsUsedInImageRefIndex, index, sharedBlobsDir) + if err != nil { + return nil, err + } + blobsUsedInImageRefIndex[descriptor.Digest]++ // Add the nested index in the list of blobs used by this reference + + return blobsUsedInImageRefIndex, nil +} + +// Updates a map of digest with the usage count, so a blob that is referenced three times will have 3 in the map +func (ref ociReference) addBlobsUsedInIndex(destination map[digest.Digest]int, index *imgspecv1.Index, sharedBlobsDir string) error { + for _, descriptor := range index.Manifests { + destination[descriptor.Digest]++ + switch descriptor.MediaType { + case imgspecv1.MediaTypeImageManifest: + manifest, err := ref.getManifest(&descriptor, sharedBlobsDir) + if err != nil { + return err + } + for digest, count := range ref.getBlobsUsedInManifest(manifest) { + destination[digest] += count + } + case imgspecv1.MediaTypeImageIndex: + blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir) + if err != nil { + return err + } + index, err := parseIndex(blobPath) + if err != nil { + return err + } + err = ref.addBlobsUsedInIndex(destination, index, sharedBlobsDir) + if err != nil { + return err + } + default: + return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType) + } + } + + return nil +} + +func (ref ociReference) getBlobsUsedInManifest(manifest *imgspecv1.Manifest) map[digest.Digest]int { + blobsUsedInManifest := make(map[digest.Digest]int, 0) + + blobsUsedInManifest[manifest.Config.Digest]++ + for _, layer := range manifest.Layers { + blobsUsedInManifest[layer.Digest]++ + } + + return blobsUsedInManifest +} + +// This takes in a map of the digest and their usage count in the manifest to be deleted +// It will compare it to the digest usage in the root index, and return a set of the blobs that can be safely deleted +func (ref ociReference) getBlobsToDelete(blobsUsedByDescriptorToDelete map[digest.Digest]int, sharedBlobsDir string) (*set.Set[digest.Digest], error) { + rootIndex, err := ref.getIndex() + if err != nil { + return nil, err + } + blobsUsedInRootIndex := make(map[digest.Digest]int) + err = ref.addBlobsUsedInIndex(blobsUsedInRootIndex, rootIndex, sharedBlobsDir) + if err != nil { + return nil, err + } + + blobsToDelete := set.New[digest.Digest]() + + for digest, count := range blobsUsedInRootIndex { + if count-blobsUsedByDescriptorToDelete[digest] == 0 { + blobsToDelete.Add(digest) + } + } + + return blobsToDelete, nil +} + +// This transport never generates layouts where blobs for an image are both in the local blobs directory +// and the shared one; it’s either one or the other, depending on how OCISharedBlobDirPath is set. +// +// But we can’t correctly compute use counts for OCISharedBlobDirPath (because we don't know what +// the other layouts sharing that directory are, and we might not even have permission to read them), +// so we can’t really delete any blobs in that case. +// Checking the _local_ blobs directory, and deleting blobs from there, doesn't really hurt, +// in case the layout was created using some other tool or without OCISharedBlobDirPath set, so let's silently +// check for local blobs (but we should make no noise if the blobs are actually in the shared directory). +// +// So, NOTE: the blobPath() call below hard-codes "" even in calls where OCISharedBlobDirPath is set +func (ref ociReference) deleteBlobs(blobsToDelete *set.Set[digest.Digest]) error { + for _, digest := range blobsToDelete.Values() { + blobPath, err := ref.blobPath(digest, "") //Only delete in the local directory, see comment above + if err != nil { + return err + } + err = deleteBlob(blobPath) + if err != nil { + return err + } + } + + return nil +} + +func deleteBlob(blobPath string) error { + logrus.Debug(fmt.Sprintf("Deleting blob at %q", blobPath)) + + err := os.Remove(blobPath) + if err != nil && !os.IsNotExist(err) { + return err + } else { + return nil + } +} + +func (ref ociReference) deleteReferenceFromIndex(referenceIndex int) error { + index, err := ref.getIndex() + if err != nil { + return err + } + + index.Manifests = slices.Delete(index.Manifests, referenceIndex, referenceIndex+1) + + return saveJSON(ref.indexPath(), index) +} + +func saveJSON(path string, content any) error { + // If the file already exists, get its mode to preserve it + var mode fs.FileMode + existingfi, err := os.Stat(path) + if err != nil { + if !os.IsNotExist(err) { + return err + } else { // File does not exist, use default mode + mode = 0644 + } + } else { + mode = existingfi.Mode() + } + + file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + defer file.Close() + + return json.NewEncoder(file).Encode(content) +} + +func (ref ociReference) getManifest(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (*imgspecv1.Manifest, error) { + manifestPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir) + if err != nil { + return nil, err + } + + manifest, err := parseJSON[imgspecv1.Manifest](manifestPath) + if err != nil { + return nil, err + } + + return manifest, nil +} diff --git a/oci/layout/oci_delete_test.go b/oci/layout/oci_delete_test.go new file mode 100644 index 0000000000..7e06456ffd --- /dev/null +++ b/oci/layout/oci_delete_test.go @@ -0,0 +1,298 @@ +package layout + +import ( + "context" + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + cp "github.com/otiai10/copy" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestReferenceDeleteImage_onlyOneImage(t *testing.T) { + tmpDir := loadFixture(t, "delete_image_only_one_image") + + ref, err := NewReference(tmpDir, "latest") + require.NoError(t, err) + + err = ref.DeleteImage(context.Background(), nil) + require.NoError(t, err) + + // Check that all blobs were deleted + blobsDir := filepath.Join(tmpDir, "blobs") + files, err := os.ReadDir(filepath.Join(blobsDir, "sha256")) + require.NoError(t, err) + require.Empty(t, files) + + // Check that the index is empty as there is only one image in the fixture + ociRef, ok := ref.(ociReference) + require.True(t, ok) + index, err := ociRef.getIndex() + require.NoError(t, err) + require.Equal(t, 0, len(index.Manifests)) +} + +func TestReferenceDeleteImage_onlyOneImage_emptyImageName(t *testing.T) { + tmpDir := loadFixture(t, "delete_image_only_one_image") + + ref, err := NewReference(tmpDir, "") + require.NoError(t, err) + + err = ref.DeleteImage(context.Background(), nil) + require.NoError(t, err) + + // Check that all blobs were deleted + blobsDir := filepath.Join(tmpDir, "blobs") + files, err := os.ReadDir(filepath.Join(blobsDir, "sha256")) + require.NoError(t, err) + require.Empty(t, files) + + // Check that the index is empty as there is only one image in the fixture + ociRef, ok := ref.(ociReference) + require.True(t, ok) + index, err := ociRef.getIndex() + require.NoError(t, err) + require.Equal(t, 0, len(index.Manifests)) +} + +func TestReferenceDeleteImage_sharedBlobDir(t *testing.T) { + tmpDir := loadFixture(t, "delete_image_shared_blobs_dir") + + ref, err := NewReference(tmpDir, "latest") + require.NoError(t, err) + + sys := &types.SystemContext{OCISharedBlobDirPath: filepath.Join(tmpDir, "shared_blobs")} + err = ref.DeleteImage(context.Background(), sys) + require.NoError(t, err) + + // Check that the only blob in the local directory was deleted + blobsDir := filepath.Join(tmpDir, "blobs") + files, err := os.ReadDir(filepath.Join(blobsDir, "sha256")) + require.NoError(t, err) + require.Empty(t, files) + + // Check that the blobs in the shared blob directory are still present + sharedBlobsDir := filepath.Join(tmpDir, "shared_blobs") + files, err = os.ReadDir(filepath.Join(sharedBlobsDir, "sha256")) + require.NoError(t, err) + require.Equal(t, 2, len(files)) + + // Check that the index is empty as there is only one image in the fixture + ociRef, ok := ref.(ociReference) + require.True(t, ok) + index, err := ociRef.getIndex() + require.NoError(t, err) + require.Equal(t, 0, len(index.Manifests)) +} + +func TestReferenceDeleteImage_multipleImages(t *testing.T) { + tmpDir := loadFixture(t, "delete_image_multiple_images") + + ref, err := NewReference(tmpDir, "3.17.5") + require.NoError(t, err) + + err = ref.DeleteImage(context.Background(), nil) + require.NoError(t, err) + + // Check that the relevant blobs were deleted/preserved + blobsDir := filepath.Join(tmpDir, "blobs") + files, err := os.ReadDir(filepath.Join(blobsDir, "sha256")) + require.NoError(t, err) + require.Equal(t, 16, len(files)) + assertBlobDoesNotExist(t, blobsDir, "sha256:5b2aba4d3c27bc6493633d0ec446b25c8d0a5c9cfe99894bcdff0aee80813805") + assertBlobDoesNotExist(t, blobsDir, "sha256:df11bc189adeb50dadb3291a3a7f2c34b36e0efdba0df70f2c8a2d761b215cde") + assertBlobDoesNotExist(t, blobsDir, "sha256:986315a0e599fac2b80eb31db2124dab8d3de04d7ca98b254999bd913c1f73fe") + + // Check the index + ociRef, ok := ref.(ociReference) + require.True(t, ok) + // .. Check that the index has been reduced to the correct size + index, err := ociRef.getIndex() + require.NoError(t, err) + require.Equal(t, 6, len(index.Manifests)) + // .. Check that the image is not in the index anymore + for _, descriptor := range index.Manifests { + switch descriptor.Annotations[imgspecv1.AnnotationRefName] { + case "3.17.5": + assert.Fail(t, "image still present in the index after deletion") + default: + continue + } + } +} + +func TestReferenceDeleteImage_multipleImages_blobsUsedByOtherImages(t *testing.T) { + tmpDir := loadFixture(t, "delete_image_multiple_images") + + ref, err := NewReference(tmpDir, "1.0.0") + require.NoError(t, err) + + err = ref.DeleteImage(context.Background(), nil) + require.NoError(t, err) + + // Check that the relevant blobs were deleted/preserved + blobsDir := filepath.Join(tmpDir, "blobs") + files, err := os.ReadDir(filepath.Join(blobsDir, "sha256")) + require.NoError(t, err) + require.Equal(t, 17, len(files)) + assertBlobExists(t, blobsDir, "sha256:df11bc189adeb50dadb3291a3a7f2c34b36e0efdba0df70f2c8a2d761b215cde") + assertBlobDoesNotExist(t, blobsDir, "sha256:0dc27f36a618c110ae851662c13283e9fbc1b5a5de003befc4bcefa5a05d2eef") + assertBlobDoesNotExist(t, blobsDir, "sha256:a6f737ac2b84bc463f2ff721af39588c69646c82f79f3808236178e02e35b922") + + // Check the index + ociRef, ok := ref.(ociReference) + require.True(t, ok) + // .. Check that the index has been reduced to the correct size + index, err := ociRef.getIndex() + require.NoError(t, err) + require.Equal(t, 6, len(index.Manifests)) + // .. Check that the image is not in the index anymore + for _, descriptor := range index.Manifests { + switch descriptor.Annotations[imgspecv1.AnnotationRefName] { + case "1.0.0": + assert.Fail(t, "image still present in the index after deletion") + default: + continue + } + } +} + +func TestReferenceDeleteImage_multipleImages_imageDoesNotExist(t *testing.T) { + tmpDir := loadFixture(t, "delete_image_multiple_images") + + ref, err := NewReference(tmpDir, "does-not-exist") + assert.NoError(t, err) + + err = ref.DeleteImage(context.Background(), nil) + assert.Error(t, err) +} + +func TestReferenceDeleteImage_multipleImages_emptyImageName(t *testing.T) { + tmpDir := loadFixture(t, "delete_image_multiple_images") + + ref, err := NewReference(tmpDir, "") + require.NoError(t, err) + + err = ref.DeleteImage(context.Background(), nil) + require.Error(t, err) +} + +func TestReferenceDeleteImage_multipleImages_nestedIndexImage(t *testing.T) { + tmpDir := loadFixture(t, "delete_image_multiple_images") + + ref, err := NewReference(tmpDir, "3.16.7") + require.NoError(t, err) + + err = ref.DeleteImage(context.Background(), nil) + require.NoError(t, err) + + // Check that the relevant blobs were deleted/preserved + blobsDir := filepath.Join(tmpDir, "blobs") + files, err := os.ReadDir(filepath.Join(blobsDir, "sha256")) + require.NoError(t, err) + require.Equal(t, 12, len(files)) + assertBlobDoesNotExist(t, blobsDir, "sha256:861d3c014b0e3edcf80e6221247d6b2921a4f892feb9bafe9515b9975b78c44f") + assertBlobDoesNotExist(t, blobsDir, "sha256:39c524417bb4228f9fcb0aef43a680b5fd6b9f3a1df2fd50509d047e47dad8be") + assertBlobDoesNotExist(t, blobsDir, "sha256:f732172ad8d2a666550fa3ec37a5153d59acc95744562ae64cf62ded46de101a") + assertBlobDoesNotExist(t, blobsDir, "sha256:02ea786cb1ff44d997661886a4186cbd8a1dc466938712bf7281379209476022") + assertBlobDoesNotExist(t, blobsDir, "sha256:be6036f9b6a4e120a04868c47f1b8674f58b2fe5e410cba9f585a13ca8946cf0") + assertBlobDoesNotExist(t, blobsDir, "sha256:7ffdfe7d276286b39a203dcc247949cf47c91d2d5e10a53a675c0962ed9e4402") + assertBlobDoesNotExist(t, blobsDir, "sha256:e2f7e0374fd6a03d9c373f4d9a0c7802045cc3ddcc1433e89d83b81fa7007242") + + // Check the index + ociRef, ok := ref.(ociReference) + require.True(t, ok) + // .. Check that the index has been reduced to the correct size + index, err := ociRef.getIndex() + require.NoError(t, err) + require.Equal(t, 6, len(index.Manifests)) + // .. Check that the image is not in the index anymore + for _, descriptor := range index.Manifests { + switch descriptor.Annotations[imgspecv1.AnnotationRefName] { + case "3.16.7": + assert.Fail(t, "image still present in the index after deletion") + default: + continue + } + } +} + +func TestReferenceDeleteImage_multipleImages_nestedIndexImage_refWithSameContent(t *testing.T) { + tmpDir := loadFixture(t, "delete_image_multiple_images") + + ref, err := NewReference(tmpDir, "3.18.3") + require.NoError(t, err) + + err = ref.DeleteImage(context.Background(), nil) + require.NoError(t, err) + + // Check that the relevant blobs were deleted/preserved + blobsDir := filepath.Join(tmpDir, "blobs") + files, err := os.ReadDir(filepath.Join(blobsDir, "sha256")) + require.NoError(t, err) + require.Equal(t, 19, len(files)) + + // Check the index + ociRef, ok := ref.(ociReference) + require.True(t, ok) + // .. Check that the index has been reduced to the correct size + index, err := ociRef.getIndex() + require.NoError(t, err) + require.Equal(t, 6, len(index.Manifests)) +} + +func TestReferenceDeleteImage_multipleImages_twoIdenticalReferences(t *testing.T) { + tmpDir := loadFixture(t, "delete_image_two_identical_references") + + ref, err := NewReference(tmpDir, "1.0.0") + require.NoError(t, err) + + err = ref.DeleteImage(context.Background(), nil) + require.NoError(t, err) + + // Check that the relevant blobs were deleted/preserved - in this case only the first reference should be deleted + blobsDir := filepath.Join(tmpDir, "blobs") + files, err := os.ReadDir(filepath.Join(blobsDir, "sha256")) + require.NoError(t, err) + require.Equal(t, 3, len(files)) + assertBlobExists(t, blobsDir, "sha256:ecfa463536cb5472e238aadc4df81d4785d5d6373027c488a2db8a6e76fe88ed") + assertBlobExists(t, blobsDir, "sha256:ce229a4eb5797ecd3a3a1846613b6b49811f79e38b5b0ce666268ba4b6c68e41") + assertBlobExists(t, blobsDir, "sha256:fa00bb4e2adbc73a5da1fd54d2a840020592530a8d4e8de9888b9e9a533419d8") + + // Check the index + ociRef, ok := ref.(ociReference) + require.True(t, ok) + // .. Check that the index has been reduced to the correct size + index, err := ociRef.getIndex() + require.NoError(t, err) + require.Equal(t, 1, len(index.Manifests)) +} + +func loadFixture(t *testing.T, fixtureName string) string { + tmpDir := t.TempDir() + err := cp.Copy(fmt.Sprintf("fixtures/%v/", fixtureName), tmpDir) + require.NoError(t, err) + return tmpDir +} + +func assertBlobExists(t *testing.T, blobsDir string, blobDigest string) { + digest, err := digest.Parse(blobDigest) + require.NoError(t, err) + blobPath := filepath.Join(blobsDir, digest.Algorithm().String(), digest.Hex()) + _, err = os.Stat(blobPath) + require.NoError(t, err) +} + +func assertBlobDoesNotExist(t *testing.T, blobsDir string, blobDigest string) { + digest, err := digest.Parse(blobDigest) + require.NoError(t, err) + blobPath := filepath.Join(blobsDir, digest.Algorithm().String(), digest.Hex()) + _, err = os.Stat(blobPath) + require.True(t, os.IsNotExist(err)) +} diff --git a/oci/layout/oci_dest.go b/oci/layout/oci_dest.go index 8ff43d4480..100d16763f 100644 --- a/oci/layout/oci_dest.go +++ b/oci/layout/oci_dest.go @@ -19,6 +19,7 @@ import ( digest "github.com/opencontainers/go-digest" imgspec "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/slices" ) type ociImageDestination struct { @@ -84,7 +85,7 @@ func newImageDestination(sys *types.SystemContext, ref ociReference) (private.Im // Per the OCI image specification, layouts MUST have a "blobs" subdirectory, // but it MAY be empty (e.g. if we never end up calling PutBlob) // https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19 - if err := ensureDirectoryExists(filepath.Join(d.ref.dir, "blobs")); err != nil { + if err := ensureDirectoryExists(filepath.Join(d.ref.dir, imgspecv1.ImageBlobsDir)); err != nil { return nil, err } return d, nil @@ -271,8 +272,8 @@ func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { return } } - // It's a new entry to be added to the index. - d.index.Manifests = append(d.index.Manifests, *desc) + // It's a new entry to be added to the index. Use slices.Clone() to avoid a remote dependency on how d.index was created. + d.index.Manifests = append(slices.Clone(d.index.Manifests), *desc) } // Commit marks the process of storing the image as successful and asks for the image to be persisted. @@ -283,7 +284,13 @@ func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error { - if err := os.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { + layoutBytes, err := json.Marshal(imgspecv1.ImageLayout{ + Version: imgspecv1.ImageLayoutVersion, + }) + if err != nil { + return err + } + if err := os.WriteFile(d.ref.ociLayoutPath(), layoutBytes, 0644); err != nil { return err } indexJSON, err := json.Marshal(d.index) diff --git a/oci/layout/oci_src.go b/oci/layout/oci_src.go index 6b423f3b05..f5f1debc9f 100644 --- a/oci/layout/oci_src.go +++ b/oci/layout/oci_src.go @@ -60,7 +60,7 @@ func newImageSource(sys *types.SystemContext, ref ociReference) (private.ImageSo client := &http.Client{} client.Transport = tr - descriptor, err := ref.getManifestDescriptor() + descriptor, _, err := ref.getManifestDescriptor() if err != nil { return nil, err } diff --git a/oci/layout/oci_transport.go b/oci/layout/oci_transport.go index 6586b84402..1e26dc5244 100644 --- a/oci/layout/oci_transport.go +++ b/oci/layout/oci_transport.go @@ -160,48 +160,56 @@ func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) // getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together // with an error. func (ref ociReference) getIndex() (*imgspecv1.Index, error) { - indexJSON, err := os.Open(ref.indexPath()) + return parseIndex(ref.indexPath()) +} + +func parseIndex(path string) (*imgspecv1.Index, error) { + return parseJSON[imgspecv1.Index](path) +} + +func parseJSON[T any](path string) (*T, error) { + content, err := os.Open(path) if err != nil { return nil, err } - defer indexJSON.Close() + defer content.Close() - index := &imgspecv1.Index{} - if err := json.NewDecoder(indexJSON).Decode(index); err != nil { + obj := new(T) + if err := json.NewDecoder(content).Decode(obj); err != nil { return nil, err } - return index, nil + return obj, nil } -func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { +func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, error) { index, err := ref.getIndex() if err != nil { - return imgspecv1.Descriptor{}, err + return imgspecv1.Descriptor{}, -1, err } if ref.image == "" { // return manifest if only one image is in the oci directory if len(index.Manifests) != 1 { // ask user to choose image when more than one image in the oci directory - return imgspecv1.Descriptor{}, ErrMoreThanOneImage + return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage } - return index.Manifests[0], nil + return index.Manifests[0], 0, nil } else { // if image specified, look through all manifests for a match var unsupportedMIMETypes []string - for _, md := range index.Manifests { + for i, md := range index.Manifests { if refName, ok := md.Annotations[imgspecv1.AnnotationRefName]; ok && refName == ref.image { if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex { - return md, nil + return md, i, nil } unsupportedMIMETypes = append(unsupportedMIMETypes, md.MediaType) } } if len(unsupportedMIMETypes) != 0 { - return imgspecv1.Descriptor{}, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes) + return imgspecv1.Descriptor{}, -1, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes) } } - return imgspecv1.Descriptor{}, ImageNotFoundError{ref} + return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref} } // LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name @@ -211,7 +219,8 @@ func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, if !ok { return imgspecv1.Descriptor{}, errors.New("error typecasting, need type ociRef") } - return ociRef.getManifestDescriptor() + md, _, err := ociRef.getManifestDescriptor() + return md, err } // NewImageSource returns a types.ImageSource for this reference. @@ -226,19 +235,14 @@ func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.Syst return newImageDestination(sys, ref) } -// DeleteImage deletes the named image from the registry, if supported. -func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return errors.New("Deleting images not implemented for oci: images") -} - // ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions. func (ref ociReference) ociLayoutPath() string { - return filepath.Join(ref.dir, "oci-layout") + return filepath.Join(ref.dir, imgspecv1.ImageLayoutFile) } // indexPath returns a path for the index.json within a directory using OCI conventions. func (ref ociReference) indexPath() string { - return filepath.Join(ref.dir, "index.json") + return filepath.Join(ref.dir, imgspecv1.ImageIndexFile) } // blobPath returns a path for a blob within a directory using OCI image-layout conventions. @@ -246,9 +250,11 @@ func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (st if err := digest.Validate(); err != nil { return "", fmt.Errorf("unexpected digest reference %s: %w", digest, err) } - blobDir := filepath.Join(ref.dir, "blobs") + var blobDir string if sharedBlobDir != "" { blobDir = sharedBlobDir + } else { + blobDir = filepath.Join(ref.dir, imgspecv1.ImageBlobsDir) } return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil } diff --git a/oci/layout/oci_transport_test.go b/oci/layout/oci_transport_test.go index fd348deb69..8beb52dd19 100644 --- a/oci/layout/oci_transport_test.go +++ b/oci/layout/oci_transport_test.go @@ -17,20 +17,21 @@ func TestGetManifestDescriptor(t *testing.T) { emptyDir := t.TempDir() for _, c := range []struct { - dir, image string - expected *imgspecv1.Descriptor // nil if a failure ie expected. errorIs / errorAs allows more specific checks. - errorIs error - errorAs any + dir, image string + expectedDescriptor *imgspecv1.Descriptor // nil if a failure ie expected. errorIs / errorAs allows more specific checks. + expectedIndex int + errorIs error + errorAs any }{ { // Index is missing - dir: emptyDir, - image: "", - expected: nil, + dir: emptyDir, + image: "", + expectedDescriptor: nil, }, { // A valid reference to the only manifest dir: "fixtures/manifest", image: "", - expected: &imgspecv1.Descriptor{ + expectedDescriptor: &imgspecv1.Descriptor{ MediaType: "application/vnd.oci.image.manifest.v1+json", Digest: "sha256:84afb6189c4d69f2d040c5f1dc4e0a16fed9b539ce9cfb4ac2526ae4e0576cc0", Size: 496, @@ -40,52 +41,56 @@ func TestGetManifestDescriptor(t *testing.T) { OS: "linux", }, }, + expectedIndex: 0, }, { // An ambiguous reference to a multi-manifest directory - dir: "fixtures/two_images_manifest", - image: "", - expected: nil, - errorIs: ErrMoreThanOneImage, + dir: "fixtures/two_images_manifest", + image: "", + expectedDescriptor: nil, + errorIs: ErrMoreThanOneImage, }, { // A valid reference in a multi-manifest directory dir: "fixtures/name_lookups", image: "a", - expected: &imgspecv1.Descriptor{ + expectedDescriptor: &imgspecv1.Descriptor{ MediaType: "application/vnd.oci.image.manifest.v1+json", Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", Size: 1, Annotations: map[string]string{"org.opencontainers.image.ref.name": "a"}, }, + expectedIndex: 0, }, { // A valid reference in a multi-manifest directory dir: "fixtures/name_lookups", image: "b", - expected: &imgspecv1.Descriptor{ + expectedDescriptor: &imgspecv1.Descriptor{ MediaType: "application/vnd.oci.image.manifest.v1+json", Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", Size: 2, Annotations: map[string]string{"org.opencontainers.image.ref.name": "b"}, }, + expectedIndex: 1, }, { // No entry found - dir: "fixtures/name_lookups", - image: "this-does-not-exist", - expected: nil, - errorAs: &ImageNotFoundError{}, + dir: "fixtures/name_lookups", + image: "this-does-not-exist", + expectedDescriptor: nil, + errorAs: &ImageNotFoundError{}, }, { // Entries with invalid MIME types found - dir: "fixtures/name_lookups", - image: "invalid-mime", - expected: nil, + dir: "fixtures/name_lookups", + image: "invalid-mime", + expectedDescriptor: nil, }, } { ref, err := NewReference(c.dir, c.image) require.NoError(t, err) - res, err := ref.(ociReference).getManifestDescriptor() - if c.expected != nil { + res, i, err := ref.(ociReference).getManifestDescriptor() + if c.expectedDescriptor != nil { require.NoError(t, err) - assert.Equal(t, *c.expected, res) + assert.Equal(t, c.expectedIndex, i) + assert.Equal(t, *c.expectedDescriptor, res) } else { require.Error(t, err) if c.errorIs != nil { @@ -319,12 +324,6 @@ func TestReferenceNewImageDestination(t *testing.T) { defer dest.Close() } -func TestReferenceDeleteImage(t *testing.T) { - ref, _ := refToTempOCI(t) - err := ref.DeleteImage(context.Background(), nil) - assert.Error(t, err) -} - func TestReferenceOCILayoutPath(t *testing.T) { ref, tmpDir := refToTempOCI(t) ociRef, ok := ref.(ociReference) diff --git a/pkg/blobinfocache/boltdb/boltdb.go b/pkg/blobinfocache/boltdb/boltdb.go index a472efd95b..9a8fa22ab9 100644 --- a/pkg/blobinfocache/boltdb/boltdb.go +++ b/pkg/blobinfocache/boltdb/boltdb.go @@ -23,7 +23,7 @@ var ( // uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest. uncompressedDigestBucket = []byte("uncompressedDigest") - // digestCompressorBucket stores a mapping from any digest to a compressor, or blobinfocache.Uncompressed + // digestCompressorBucket stores a mapping from any digest to a compressor, or blobinfocache.Uncompressed (not blobinfocache.UnknownCompression). // It may not exist in caches created by older versions, even if uncompressedDigestBucket is present. digestCompressorBucket = []byte("digestCompressor") // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest @@ -98,6 +98,11 @@ type cache struct { // New returns a BlobInfoCache implementation which uses a BoltDB file at path. // // Most users should call blobinfocache.DefaultCache instead. +// +// Deprecated: The BoltDB implementation triggers a panic() on some database format errors; that does not allow +// practical error recovery / fallback. +// +// Use blobinfocache.DefaultCache if at all possible; if not, the pkg/blobinfocache/sqlite implementation. func New(path string) types.BlobInfoCache { return new2(path) } @@ -105,6 +110,15 @@ func new2(path string) *cache { return &cache{path: path} } +// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close(). +// Note that public callers may call the types.BlobInfoCache operations without Open()/Close(). +func (bdc *cache) Open() { +} + +// Close destroys state created by Open(). +func (bdc *cache) Close() { +} + // view returns runs the specified fn within a read-only transaction on the database. func (bdc *cache) view(fn func(tx *bolt.Tx) error) (retErr error) { // bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist, @@ -282,13 +296,14 @@ func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope type }) // FIXME? Log error (but throttle the log volume on repeated accesses)? } -// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in scopeBucket with corresponding compression info from compressionBucket (if compressionBucket is not nil), and returns the result of appending them to candidates. -func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket, compressionBucket *bolt.Bucket, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime { +// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in scopeBucket +// (which might be nil) with corresponding compression +// info from compressionBucket (which might be nil), and returns the result of appending them +// to candidates. +// v2Output allows including candidates with unknown location, and filters out candidates +// with unknown compression. +func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket, compressionBucket *bolt.Bucket, digest digest.Digest, v2Output bool) []prioritize.CandidateWithTime { digestKey := []byte(digest.String()) - b := scopeBucket.Bucket(digestKey) - if b == nil { - return candidates - } compressorName := blobinfocache.UnknownCompression if compressionBucket != nil { // the bucket won't exist if the cache was created by a v1 implementation and @@ -297,28 +312,44 @@ func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW compressorName = string(compressorNameValue) } } - if compressorName == blobinfocache.UnknownCompression && requireCompressionInfo { + if compressorName == blobinfocache.UnknownCompression && v2Output { return candidates } - _ = b.ForEach(func(k, v []byte) error { - t := time.Time{} - if err := t.UnmarshalBinary(v); err != nil { - return err - } + var b *bolt.Bucket + if scopeBucket != nil { + b = scopeBucket.Bucket(digestKey) + } + if b != nil { + _ = b.ForEach(func(k, v []byte) error { + t := time.Time{} + if err := t.UnmarshalBinary(v); err != nil { + return err + } + candidates = append(candidates, prioritize.CandidateWithTime{ + Candidate: blobinfocache.BICReplacementCandidate2{ + Digest: digest, + CompressorName: compressorName, + Location: types.BICLocationReference{Opaque: string(k)}, + }, + LastSeen: t, + }) + return nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? + } else if v2Output { candidates = append(candidates, prioritize.CandidateWithTime{ Candidate: blobinfocache.BICReplacementCandidate2{ - Digest: digest, - CompressorName: compressorName, - Location: types.BICLocationReference{Opaque: string(k)}, + Digest: digest, + CompressorName: compressorName, + UnknownLocation: true, + Location: types.BICLocationReference{Opaque: ""}, }, - LastSeen: t, + LastSeen: time.Time{}, }) - return nil - }) // FIXME? Log error (but throttle the log volume on repeated accesses)? + } return candidates } -// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) that could possibly be reused // within the specified (transport scope) (if they still exist, which is not guaranteed). // // If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, @@ -328,27 +359,22 @@ func (bdc *cache) CandidateLocations2(transport types.ImageTransport, scope type return bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, true) } -func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 { +func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 { res := []prioritize.CandidateWithTime{} var uncompressedDigestValue digest.Digest // = "" if err := bdc.view(func(tx *bolt.Tx) error { scopeBucket := tx.Bucket(knownLocationsBucket) - if scopeBucket == nil { - return nil + if scopeBucket != nil { + scopeBucket = scopeBucket.Bucket([]byte(transport.Name())) } - scopeBucket = scopeBucket.Bucket([]byte(transport.Name())) - if scopeBucket == nil { - return nil - } - scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque)) - if scopeBucket == nil { - return nil + if scopeBucket != nil { + scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque)) } // compressionBucket won't have been created if previous writers never recorded info about compression, // and we don't want to fail just because of that compressionBucket := tx.Bucket(digestCompressorBucket) - res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, primaryDigest, requireCompressionInfo) + res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, primaryDigest, v2Output) if canSubstitute { if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" { b := tx.Bucket(digestByUncompressedBucket) @@ -361,7 +387,7 @@ func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types return err } if d != primaryDigest && d != uncompressedDigestValue { - res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, d, requireCompressionInfo) + res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, d, v2Output) } return nil }); err != nil { @@ -370,7 +396,7 @@ func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types } } if uncompressedDigestValue != primaryDigest { - res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, uncompressedDigestValue, requireCompressionInfo) + res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, uncompressedDigestValue, v2Output) } } } @@ -385,7 +411,7 @@ func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused // within the specified (transport scope) (if they still exist, which is not guaranteed). // -// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same // uncompressed digest. func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { diff --git a/pkg/blobinfocache/default.go b/pkg/blobinfocache/default.go index 83034b618d..037572b0ee 100644 --- a/pkg/blobinfocache/default.go +++ b/pkg/blobinfocache/default.go @@ -6,8 +6,8 @@ import ( "path/filepath" "github.com/containers/image/v5/internal/rootless" - "github.com/containers/image/v5/pkg/blobinfocache/boltdb" "github.com/containers/image/v5/pkg/blobinfocache/memory" + "github.com/containers/image/v5/pkg/blobinfocache/sqlite" "github.com/containers/image/v5/types" "github.com/sirupsen/logrus" ) @@ -15,7 +15,7 @@ import ( const ( // blobInfoCacheFilename is the file name used for blob info caches. // If the format changes in an incompatible way, increase the version number. - blobInfoCacheFilename = "blob-info-cache-v1.boltdb" + blobInfoCacheFilename = "blob-info-cache-v1.sqlite" // systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes. systemBlobInfoCacheDir = "/var/lib/containers/cache" ) @@ -57,10 +57,20 @@ func DefaultCache(sys *types.SystemContext) types.BlobInfoCache { } path := filepath.Join(dir, blobInfoCacheFilename) if err := os.MkdirAll(dir, 0700); err != nil { - logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err) + logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", path, err) return memory.New() } - logrus.Debugf("Using blob info cache at %s", path) - return boltdb.New(path) + // It might make sense to keep a single sqlite cache object, and a single initialized sqlite connection, open + // as global singleton, for the vast majority of callers who don’t override thde cache location. + // OTOH that would keep a file descriptor open forever, even for long-term callers who copy images rarely, + // and the performance benefit to this over using an Open()/Close() pair for a single image copy is < 10%. + + cache, err := sqlite.New(path) + if err != nil { + logrus.Debugf("Error creating a SQLite blob info cache at %s, using a memory-only cache: %v", path, err) + return memory.New() + } + logrus.Debugf("Using SQLite blob info cache at %s", path) + return cache } diff --git a/pkg/blobinfocache/default_test.go b/pkg/blobinfocache/default_test.go index 83a3fea98a..41c32fe363 100644 --- a/pkg/blobinfocache/default_test.go +++ b/pkg/blobinfocache/default_test.go @@ -9,8 +9,8 @@ import ( "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" - "github.com/containers/image/v5/pkg/blobinfocache/boltdb" "github.com/containers/image/v5/pkg/blobinfocache/memory" + "github.com/containers/image/v5/pkg/blobinfocache/sqlite" "github.com/containers/image/v5/types" "github.com/stretchr/testify/assert" ) @@ -103,8 +103,10 @@ func TestDefaultCache(t *testing.T) { // Success normalDir := filepath.Join(tmpDir, "normal") c := DefaultCache(&types.SystemContext{BlobInfoCacheDir: normalDir}) - // This is ugly hard-coding internals of boltDBCache: - assert.Equal(t, boltdb.New(filepath.Join(normalDir, blobInfoCacheFilename)), c) + // This is ugly hard-coding internals of sqlite.cache + sqliteCache, err := sqlite.New(filepath.Join(normalDir, blobInfoCacheFilename)) + require.NoError(t, err) + assert.Equal(t, sqliteCache, c) // Error running blobInfoCacheDir: // Use t.Setenv() just as a way to set up cleanup to original values; then os.Unsetenv() to test a situation where the values are not set. @@ -117,7 +119,7 @@ func TestDefaultCache(t *testing.T) { // Error creating the parent directory: unwritableDir := filepath.Join(tmpDir, "unwritable") - err := os.Mkdir(unwritableDir, 0700) + err = os.Mkdir(unwritableDir, 0700) require.NoError(t, err) defer func() { err = os.Chmod(unwritableDir, 0700) // To make it possible to remove it again diff --git a/pkg/blobinfocache/internal/prioritize/prioritize.go b/pkg/blobinfocache/internal/prioritize/prioritize.go index bc9315f6ef..470fca0c18 100644 --- a/pkg/blobinfocache/internal/prioritize/prioritize.go +++ b/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -10,15 +10,20 @@ import ( "github.com/opencontainers/go-digest" ) -// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates, +// replacementAttempts is the number of blob replacement candidates with known location returned by destructivelyPrioritizeReplacementCandidates, // and therefore ultimately by types.BlobInfoCache.CandidateLocations. // This is a heuristic/guess, and could well use a different value. const replacementAttempts = 5 +// replacementUnknownLocationAttempts is the number of blob replacement candidates with unknown Location returned by destructivelyPrioritizeReplacementCandidates, +// and therefore ultimately by blobinfocache.BlobInfoCache2.CandidateLocations2. +// This is a heuristic/guess, and could well use a different value. +const replacementUnknownLocationAttempts = 2 + // CandidateWithTime is the input to types.BICReplacementCandidate prioritization. type CandidateWithTime struct { Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate - LastSeen time.Time // Time the candidate was last known to exist (either read or written) + LastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation) } // candidateSortState is a local state implementing sort.Interface on candidates to prioritize, @@ -77,9 +82,22 @@ func (css *candidateSortState) Swap(i, j int) { css.cs[i], css.cs[j] = css.cs[j], css.cs[i] } -// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the -// number of entries to limit, only to make testing simpler. -func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []blobinfocache.BICReplacementCandidate2 { +func min(a, b int) int { + if a < b { + return a + } + return b +} + +// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the +// number of entries to limit for known and unknown location separately, only to make testing simpler. +// TODO: following function is not destructive any more in the nature instead prioritized result is actually copies of the original +// candidate set, so In future we might wanna re-name this public API and remove the destructive prefix. +func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, totalLimit int, noLocationLimit int) []blobinfocache.BICReplacementCandidate2 { + // split unknown candidates and known candidates + // and limit them separately. + var knownLocationCandidates []CandidateWithTime + var unknownLocationCandidates []CandidateWithTime // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should // compare equal. // FIXME: Use slices.SortFunc after we update to Go 1.20 (Go 1.21?) and Time.Compare and cmp.Compare are available. @@ -88,24 +106,34 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest: primaryDigest, uncompressedDigest: uncompressedDigest, }) + for _, candidate := range cs { + if candidate.Candidate.UnknownLocation { + unknownLocationCandidates = append(unknownLocationCandidates, candidate) + } else { + knownLocationCandidates = append(knownLocationCandidates, candidate) + } + } - resLength := len(cs) - if resLength > maxCandidates { - resLength = maxCandidates + knownLocationCandidatesUsed := min(len(knownLocationCandidates), totalLimit) + remainingCapacity := totalLimit - knownLocationCandidatesUsed + unknownLocationCandidatesUsed := min(noLocationLimit, min(remainingCapacity, len(unknownLocationCandidates))) + res := make([]blobinfocache.BICReplacementCandidate2, knownLocationCandidatesUsed) + for i := 0; i < knownLocationCandidatesUsed; i++ { + res[i] = knownLocationCandidates[i].Candidate } - res := make([]blobinfocache.BICReplacementCandidate2, resLength) - for i := range res { - res[i] = cs[i].Candidate + // If candidates with unknown location are found, lets add them to final list + for i := 0; i < unknownLocationCandidatesUsed; i++ { + res = append(res, unknownLocationCandidates[i].Candidate) } return res } // DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times, -// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest), -// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations. +// the primary digest the user actually asked for, the corresponding uncompressed digest (if known, possibly equal to the primary digest) returns an +// appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations. // // WARNING: The array of candidates is destructively modified. (The implementation of this function could of course // make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.) func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []blobinfocache.BICReplacementCandidate2 { - return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts) + return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts, replacementUnknownLocationAttempts) } diff --git a/pkg/blobinfocache/internal/prioritize/prioritize_test.go b/pkg/blobinfocache/internal/prioritize/prioritize_test.go index 77167e904c..f66d842061 100644 --- a/pkg/blobinfocache/internal/prioritize/prioritize_test.go +++ b/pkg/blobinfocache/internal/prioritize/prioritize_test.go @@ -32,6 +32,10 @@ var ( {blobinfocache.BICReplacementCandidate2{Digest: digestCompressedPrimary, Location: types.BICLocationReference{Opaque: "P2"}, CompressorName: compressiontypes.GzipAlgorithmName}, time.Unix(1, 1)}, {blobinfocache.BICReplacementCandidate2{Digest: digestCompressedB, Location: types.BICLocationReference{Opaque: "B2"}, CompressorName: blobinfocache.Uncompressed}, time.Unix(2, 0)}, {blobinfocache.BICReplacementCandidate2{Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U1"}, CompressorName: blobinfocache.UnknownCompression}, time.Unix(1, 0)}, + {blobinfocache.BICReplacementCandidate2{Digest: digestUncompressed, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression}, time.Time{}}, + {blobinfocache.BICReplacementCandidate2{Digest: digestCompressedA, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression}, time.Time{}}, + {blobinfocache.BICReplacementCandidate2{Digest: digestCompressedB, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression}, time.Time{}}, + {blobinfocache.BICReplacementCandidate2{Digest: digestCompressedPrimary, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression}, time.Time{}}, }, primaryDigest: digestCompressedPrimary, uncompressedDigest: digestUncompressed, @@ -46,12 +50,16 @@ var ( {Digest: digestCompressedA, Location: types.BICLocationReference{Opaque: "A1"}, CompressorName: compressiontypes.XzAlgorithmName}, {Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U2"}, CompressorName: compressiontypes.GzipAlgorithmName}, {Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U1"}, CompressorName: blobinfocache.UnknownCompression}, + {Digest: digestCompressedPrimary, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression}, + {Digest: digestCompressedA, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression}, + {Digest: digestCompressedB, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression}, + {Digest: digestUncompressed, UnknownLocation: true, Location: types.BICLocationReference{Opaque: ""}, CompressorName: blobinfocache.UnknownCompression}, } ) func TestCandidateSortStateLen(t *testing.T) { css := cssLiteral - assert.Equal(t, 8, css.Len()) + assert.Equal(t, 12, css.Len()) css.cs = []CandidateWithTime{} assert.Equal(t, 0, css.Len()) @@ -156,13 +164,15 @@ func TestCandidateSortStateSwap(t *testing.T) { } func TestDestructivelyPrioritizeReplacementCandidatesWithMax(t *testing.T) { - for _, max := range []int{0, 1, replacementAttempts, 100} { - // Just a smoke test; we mostly rely on test coverage in TestCandidateSortStateLess - res := destructivelyPrioritizeReplacementCandidatesWithMax(slices.Clone(cssLiteral.cs), digestCompressedPrimary, digestUncompressed, max) - if max > len(cssExpectedReplacementCandidates) { - max = len(cssExpectedReplacementCandidates) + totalUnknownLocationCandidates := 4 + for _, totalLimit := range []int{0, 1, replacementAttempts, 100, replacementUnknownLocationAttempts} { + for _, noLocationLimit := range []int{0, 1, replacementAttempts, 100, replacementUnknownLocationAttempts} { + totalKnownLocationCandidates := len(cssExpectedReplacementCandidates) - totalUnknownLocationCandidates + allowedUnknown := min(noLocationLimit, totalUnknownLocationCandidates) + expectedLen := min(totalKnownLocationCandidates+allowedUnknown, totalLimit) + res := destructivelyPrioritizeReplacementCandidatesWithMax(slices.Clone(cssLiteral.cs), digestCompressedPrimary, digestUncompressed, totalLimit, noLocationLimit) + assert.Equal(t, cssExpectedReplacementCandidates[:expectedLen], res) } - assert.Equal(t, cssExpectedReplacementCandidates[:max], res) } } diff --git a/pkg/blobinfocache/internal/test/test.go b/pkg/blobinfocache/internal/test/test.go index ce4ce5e302..c310bb6ae5 100644 --- a/pkg/blobinfocache/internal/test/test.go +++ b/pkg/blobinfocache/internal/test/test.go @@ -16,6 +16,7 @@ const ( digestUncompressed = digest.Digest("sha256:2222222222222222222222222222222222222222222222222222222222222222") digestCompressedA = digest.Digest("sha256:3333333333333333333333333333333333333333333333333333333333333333") digestCompressedB = digest.Digest("sha256:4444444444444444444444444444444444444444444444444444444444444444") + digestUncompressedC = digest.Digest("sha256:7777777777777777777777777777777777777777777777777777777777777777") digestCompressedUnrelated = digest.Digest("sha256:5555555555555555555555555555555555555555555555555555555555555555") compressorNameU = "compressorName/U" compressorNameA = "compressorName/A" @@ -26,7 +27,7 @@ const ( // GenericCache runs an implementation-independent set of tests, given a // newTestCache, which can be called repeatedly and always returns a fresh cache instance func GenericCache(t *testing.T, newTestCache func(t *testing.T) blobinfocache.BlobInfoCache2) { - for _, s := range []struct { + subs := []struct { name string fn func(t *testing.T, cache blobinfocache.BlobInfoCache2) }{ @@ -35,9 +36,22 @@ func GenericCache(t *testing.T, newTestCache func(t *testing.T) blobinfocache.Bl {"RecordKnownLocations", testGenericRecordKnownLocations}, {"CandidateLocations", testGenericCandidateLocations}, {"CandidateLocations2", testGenericCandidateLocations2}, - } { - t.Run(s.name, func(t *testing.T) { + } + + // Without Open()/Close() + for _, s := range subs { + t.Run("no Open: "+s.name, func(t *testing.T) { + cache := newTestCache(t) + s.fn(t, cache) + }) + } + + // With Open()/Close() + for _, s := range subs { + t.Run("with Open: "+s.name, func(t *testing.T) { cache := newTestCache(t) + cache.Open() + defer cache.Close() s.fn(t, cache) }) } @@ -205,6 +219,27 @@ func testGenericCandidateLocations2(t *testing.T, cache blobinfocache.BlobInfoCa assert.Equal(t, []blobinfocache.BICReplacementCandidate2{}, cache.CandidateLocations2(transport, scope, digestUnknown, false)) assert.Equal(t, []blobinfocache.BICReplacementCandidate2{}, cache.CandidateLocations2(transport, scope, digestUnknown, true)) + // If a record exists with compression without Location then + // then return a record without location and with `UnknownLocation: true` + cache.RecordDigestCompressorName(digestUncompressedC, "somecompression") + assert.Equal(t, []blobinfocache.BICReplacementCandidate2{ + { + Digest: digestUncompressedC, + CompressorName: "somecompression", + UnknownLocation: true, + Location: types.BICLocationReference{Opaque: ""}, + }}, cache.CandidateLocations2(transport, scope, digestUncompressedC, true)) + // When another entry with scope and Location is set then it should be returned as it has higher + // priority. + cache.RecordKnownLocation(transport, scope, digestUncompressedC, types.BICLocationReference{Opaque: "somelocation"}) + assert.Equal(t, []blobinfocache.BICReplacementCandidate2{ + { + Digest: digestUncompressedC, + CompressorName: "somecompression", + UnknownLocation: false, + Location: types.BICLocationReference{Opaque: "somelocation"}, + }}, cache.CandidateLocations2(transport, scope, digestUncompressedC, true)) + // Record "2" entries before "1" entries; then results should sort "1" (more recent) before "2" (older) for _, suffix := range []string{"2", "1"} { for _, e := range digestNameSet { @@ -212,7 +247,9 @@ func testGenericCandidateLocations2(t *testing.T, cache blobinfocache.BlobInfoCa } } - // Clear any "known" compression values, except on the first loop where they've never been set + // Clear any "known" compression values, except on the first loop where they've never been set. + // This probably triggers “Compressor for blob with digest … previously recorded as …, now unknown” warnings here, for test purposes; + // that shouldn’t happen in real-world usage. if scopeIndex != 0 { for _, e := range digestNameSet { cache.RecordDigestCompressorName(e.d, blobinfocache.UnknownCompression) diff --git a/pkg/blobinfocache/memory/memory.go b/pkg/blobinfocache/memory/memory.go index 427610fab0..16193db952 100644 --- a/pkg/blobinfocache/memory/memory.go +++ b/pkg/blobinfocache/memory/memory.go @@ -27,7 +27,7 @@ type cache struct { uncompressedDigests map[digest.Digest]digest.Digest digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference - compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown, for each digest + compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown (not blobinfocache.UnknownCompression), for each digest } // New returns a BlobInfoCache implementation which is in-memory only. @@ -51,6 +51,15 @@ func new2() *cache { } } +// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close(). +// Note that public callers may call the types.BlobInfoCache operations without Open()/Close(). +func (mem *cache) Open() { +} + +// Close destroys state created by Open(). +func (mem *cache) Close() { +} + // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. // May return anyDigest if it is known to be uncompressed. // Returns "" if nothing is known about the digest (it may be compressed or uncompressed). @@ -114,6 +123,9 @@ func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope type func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compressorName string) { mem.mutex.Lock() defer mem.mutex.Unlock() + if previous, ok := mem.compressors[blobDigest]; ok && previous != compressorName { + logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", blobDigest, previous, compressorName) + } if compressorName == blobinfocache.UnknownCompression { delete(mem.compressors, blobDigest) return @@ -121,24 +133,39 @@ func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compresso mem.compressors[blobDigest] = compressorName } -// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. -func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime { +// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory +// with corresponding compression info from mem.compressors, and returns the result of appending +// them to candidates. v2Output allows including candidates with unknown location, and filters out +// candidates with unknown compression. +func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) []prioritize.CandidateWithTime { + compressorName := blobinfocache.UnknownCompression + if v, ok := mem.compressors[digest]; ok { + compressorName = v + } + if compressorName == blobinfocache.UnknownCompression && v2Output { + return candidates + } locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present - for l, t := range locations { - compressorName, compressorKnown := mem.compressors[digest] - if !compressorKnown { - if requireCompressionInfo { - continue - } - compressorName = blobinfocache.UnknownCompression + if len(locations) > 0 { + for l, t := range locations { + candidates = append(candidates, prioritize.CandidateWithTime{ + Candidate: blobinfocache.BICReplacementCandidate2{ + Digest: digest, + CompressorName: compressorName, + Location: l, + }, + LastSeen: t, + }) } + } else if v2Output { candidates = append(candidates, prioritize.CandidateWithTime{ Candidate: blobinfocache.BICReplacementCandidate2{ - Digest: digest, - CompressorName: compressorName, - Location: l, + Digest: digest, + CompressorName: compressorName, + UnknownLocation: true, + Location: types.BICLocationReference{Opaque: ""}, }, - LastSeen: t, + LastSeen: time.Time{}, }) } return candidates @@ -154,33 +181,34 @@ func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, false)) } -// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) that could possibly be reused // within the specified (transport scope) (if they still exist, which is not guaranteed). // -// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same // uncompressed digest. func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 { return mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, true) } -func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 { +func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 { mem.mutex.Lock() defer mem.mutex.Unlock() res := []prioritize.CandidateWithTime{} - res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, requireCompressionInfo) + res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, v2Output) var uncompressedDigest digest.Digest // = "" if canSubstitute { if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" { - if otherDigests, ok := mem.digestsByUncompressed[uncompressedDigest]; ok { + otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map + if otherDigests != nil { for _, d := range otherDigests.Values() { if d != primaryDigest && d != uncompressedDigest { - res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo) + res = mem.appendReplacementCandidates(res, transport, scope, d, v2Output) } } } if uncompressedDigest != primaryDigest { - res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, requireCompressionInfo) + res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, v2Output) } } } diff --git a/pkg/blobinfocache/sqlite/sqlite.go b/pkg/blobinfocache/sqlite/sqlite.go new file mode 100644 index 0000000000..d8bde2fa0e --- /dev/null +++ b/pkg/blobinfocache/sqlite/sqlite.go @@ -0,0 +1,575 @@ +// Package boltdb implements a BlobInfoCache backed by SQLite. +package sqlite + +import ( + "database/sql" + "errors" + "fmt" + "sync" + "time" + + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" + "github.com/containers/image/v5/types" + _ "github.com/mattn/go-sqlite3" // Registers the "sqlite3" backend backend for database/sql + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +const ( + // NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade + // we can simply start over with a different filename; update blobInfoCacheFilename. + // That also means we don’t have to worry about co-existing readers/writers which know different versions of the schema + // (which would require compatibility in both directions). + + // Assembled sqlite options used when opening the database. + sqliteOptions = "?" + + // Deal with timezone automatically. + // go-sqlite3 always _records_ timestamps as a text: time in local time + a time zone offset. + // _loc affects how the values are _parsed_: (which timezone is assumed for numeric timestamps or for text which does not specify an offset, or) + // if the time zone offset matches the specified time zone, the timestamp is assumed to be in that time zone / location; + // (otherwise an unnamed time zone carrying just a hard-coded offset, but no location / DST rules is used). + "_loc=auto" + + // Force an fsync after each transaction (https://www.sqlite.org/pragma.html#pragma_synchronous). + "&_sync=FULL" + + // Allow foreign keys (https://www.sqlite.org/pragma.html#pragma_foreign_keys). + // We don’t currently use any foreign keys, but this is a good choice long-term (not default in SQLite only for historical reasons). + "&_foreign_keys=1" + + // Use BEGIN EXCLUSIVE (https://www.sqlite.org/lang_transaction.html); + // i.e. obtain a write lock for _all_ transactions at the transaction start (never use a read lock, + // never upgrade from a read to a write lock - that can fail if multiple read lock owners try to do that simultaneously). + // + // This, together with go-sqlite3’s default for _busy_timeout=5000, means that we should never see a “database is locked” error, + // the database should block on the exclusive lock when starting a transaction, and the problematic case of two simultaneous + // holders of a read lock trying to upgrade to a write lock (and one necessarily failing) is prevented. + // Compare https://github.com/mattn/go-sqlite3/issues/274 . + // + // Ideally the BEGIN / BEGIN EXCLUSIVE decision could be made per-transaction, compare https://github.com/mattn/go-sqlite3/pull/1167 + // or https://github.com/mattn/go-sqlite3/issues/400 . + // The currently-proposed workaround is to create two different SQL “databases” (= connection pools) with different _txlock settings, + // which seems rather wasteful. + "&_txlock=exclusive" +) + +// cache is a BlobInfoCache implementation which uses a SQLite file at the specified path. +type cache struct { + path string + + // The database/sql package says “It is rarely necessary to close a DB.”, and steers towards a long-term *sql.DB connection pool. + // That’s probably very applicable for database-backed services, where the database is the primary data store. That’s not necessarily + // the case for callers of c/image, where image operations might be a small proportion of the total runtime, and the cache is fairly + // incidental even to the image operations. It’s also hard for us to use that model, because the public BlobInfoCache object doesn’t have + // a Close method, so creating a lot of single-use caches could leak data. + // + // Instead, the private BlobInfoCache2 interface provides Open/Close methods, and they are called by c/image/copy.Image. + // This amortizes the cost of opening/closing the SQLite state over a single image copy, while keeping no long-term resources open. + // Some rough benchmarks in https://github.com/containers/image/pull/2092 suggest relative costs on the order of "25" for a single + // *sql.DB left open long-term, "27" for a *sql.DB open for a single image copy, and "40" for opening/closing a *sql.DB for every + // single transaction; so the Open/Close per image copy seems a reasonable compromise (especially compared to the previous implementation, + // somewhere around "700"). + + lock sync.Mutex + // The following fields can only be accessed with lock held. + refCount int // number of outstanding Open() calls + db *sql.DB // nil if not set (may happen even if refCount > 0 on errors) +} + +// New returns BlobInfoCache implementation which uses a SQLite file at path. +// +// Most users should call blobinfocache.DefaultCache instead. +func New(path string) (types.BlobInfoCache, error) { + return new2(path) +} + +func new2(path string) (*cache, error) { + db, err := rawOpen(path) + if err != nil { + return nil, fmt.Errorf("initializing blob info cache at %q: %w", path, err) + } + defer db.Close() + + // We don’t check the schema before every operation, because that would be costly + // and because we assume schema changes will be handled by using a different path. + if err := ensureDBHasCurrentSchema(db); err != nil { + return nil, err + } + + return &cache{ + path: path, + refCount: 0, + db: nil, + }, nil +} + +// rawOpen returns a new *sql.DB for path. +// The caller should arrange for it to be .Close()d. +func rawOpen(path string) (*sql.DB, error) { + // This exists to centralize the use of sqliteOptions. + return sql.Open("sqlite3", path+sqliteOptions) +} + +// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close(). +// Note that public callers may call the types.BlobInfoCache operations without Open()/Close(). +func (sqc *cache) Open() { + sqc.lock.Lock() + defer sqc.lock.Unlock() + + if sqc.refCount == 0 { + db, err := rawOpen(sqc.path) + if err != nil { + logrus.Warnf("Error opening (previously-successfully-opened) blob info cache at %q: %v", sqc.path, err) + db = nil // But still increase sqc.refCount, because a .Close() will happen + } + sqc.db = db + } + sqc.refCount++ +} + +// Close destroys state created by Open(). +func (sqc *cache) Close() { + sqc.lock.Lock() + defer sqc.lock.Unlock() + + switch sqc.refCount { + case 0: + logrus.Errorf("internal error using pkg/blobinfocache/sqlite.cache: Close() without a matching Open()") + return + case 1: + if sqc.db != nil { + sqc.db.Close() + sqc.db = nil + } + } + sqc.refCount-- +} + +type void struct{} // So that we don’t have to write struct{}{} all over the place + +// transaction calls fn within a read-write transaction in sqc. +func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) { + db, closeDB, err := func() (*sql.DB, func(), error) { // A scope for defer + sqc.lock.Lock() + defer sqc.lock.Unlock() + + if sqc.db != nil { + return sqc.db, func() {}, nil + } + db, err := rawOpen(sqc.path) + if err != nil { + return nil, nil, fmt.Errorf("opening blob info cache at %q: %w", sqc.path, err) + } + return db, func() { db.Close() }, nil + }() + if err != nil { + var zeroRes T // A zero value of T + return zeroRes, err + } + defer closeDB() + + return dbTransaction(db, fn) +} + +// dbTransaction calls fn within a read-write transaction in db. +func dbTransaction[T any](db *sql.DB, fn func(tx *sql.Tx) (T, error)) (T, error) { + // Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive discussion. + + var zeroRes T // A zero value of T + + tx, err := db.Begin() + if err != nil { + return zeroRes, fmt.Errorf("beginning transaction: %w", err) + } + succeeded := false + defer func() { + if !succeeded { + if err := tx.Rollback(); err != nil { + logrus.Errorf("Rolling back transaction: %v", err) + } + } + }() + + res, err := fn(tx) + if err != nil { + return zeroRes, err + } + if err := tx.Commit(); err != nil { + return zeroRes, fmt.Errorf("committing transaction: %w", err) + } + + succeeded = true + return res, nil +} + +// querySingleValue executes a SELECT which is expected to return at most one row with a single column. +// It returns (value, true, nil) on success, or (value, false, nil) if no row was returned. +func querySingleValue[T any](tx *sql.Tx, query string, params ...any) (T, bool, error) { + var value T + if err := tx.QueryRow(query, params...).Scan(&value); err != nil { + var zeroValue T // A zero value of T + if errors.Is(err, sql.ErrNoRows) { + return zeroValue, false, nil + } + return zeroValue, false, err + } + return value, true, nil +} + +// ensureDBHasCurrentSchema adds the necessary tables and indices to a database. +// This is typically used when creating a previously-nonexistent database. +// We don’t really anticipate schema migrations; with c/image usually vendored, not using +// shared libraries, migrating a schema on an existing database would affect old-version users. +// Instead, schema changes are likely to be implemented by using a different cache file name, +// and leaving existing caches around for old users. +func ensureDBHasCurrentSchema(db *sql.DB) error { + // Considered schema design alternatives: + // + // (Overall, considering the overall network latency and disk I/O costs of many-megabyte layer pulls which are happening while referring + // to the blob info cache, it seems reasonable to prioritize readability over microoptimization of this database.) + // + // * This schema uses the text representation of digests. + // + // We use the fairly wasteful text with hexadecimal digits because digest.Digest does not define a binary representation; + // and the way digest.Digest.Hex() is deprecated in favor of digest.Digest.Encoded(), and the way digest.Algorithm + // is documented to “define the string encoding” suggests that assuming a hexadecimal representation and turning that + // into binary ourselves is not a good idea in general; we would have to special-case the currently-known algorithm + // — and that would require us to implement two code paths, one of them basically never exercised / never tested. + // + // * There are two separate items for recording the uncompressed digest and digest compressors. + // Alternatively, we could have a single "digest facts" table with NULLable columns. + // + // The way the BlobInfoCache API works, we are only going to write one value at a time, so + // sharing a table would not be any more efficient for writes (same number of lookups, larger row tuples). + // Reads in candidateLocations would not be more efficient either, the searches in DigestCompressors and DigestUncompressedPairs + // do not coincide (we want a compressor for every candidate, but the uncompressed digest only for the primary digest; and then + // we search in DigestUncompressedPairs by uncompressed digest, not by the primary key). + // + // Also, using separate items allows the single-item writes to be done using a simple INSERT OR REPLACE, instead of having to + // do a more verbose ON CONFLICT(…) DO UPDATE SET … = …. + // + // * Joins (the two that exist in appendReplacementCandidates) are based on the text representation of digests. + // + // Using integer primary keys might make the joins themselves a bit more efficient, but then we would need to involve an extra + // join to translate from/to the user-provided digests anyway. If anything, that extra join (potentially more btree lookups) + // is probably costlier than comparing a few more bytes of data. + // + // Perhaps more importantly, storing digest texts directly makes the database dumps much easier to read for humans without + // having to do extra steps to decode the integers into digest values (either by running sqlite commands with joins, or mentally). + // + items := []struct{ itemName, command string }{ + { + "DigestUncompressedPairs", + `CREATE TABLE IF NOT EXISTS DigestUncompressedPairs(` + + // index implied by PRIMARY KEY + `anyDigest TEXT PRIMARY KEY NOT NULL,` + + // DigestUncompressedPairs_index_uncompressedDigest + `uncompressedDigest TEXT NOT NULL + )`, + }, + { + "DigestUncompressedPairs_index_uncompressedDigest", + `CREATE INDEX IF NOT EXISTS DigestUncompressedPairs_index_uncompressedDigest ON DigestUncompressedPairs(uncompressedDigest)`, + }, + { + "DigestCompressors", + `CREATE TABLE IF NOT EXISTS DigestCompressors(` + + // index implied by PRIMARY KEY + `digest TEXT PRIMARY KEY NOT NULL,` + + // May include blobinfocache.Uncompressed (not blobinfocache.UnknownCompression). + `compressor TEXT NOT NULL + )`, + }, + { + "KnownLocations", + `CREATE TABLE IF NOT EXISTS KnownLocations( + transport TEXT NOT NULL, + scope TEXT NOT NULL, + digest TEXT NOT NULL, + location TEXT NOT NULL,` + + // TIMESTAMP is parsed by SQLITE as a NUMERIC affinity, but go-sqlite3 stores text in the (Go formatting semantics) + // format "2006-01-02 15:04:05.999999999-07:00". + // See also the _loc option in the sql.Open data source name. + `time TIMESTAMP NOT NULL,` + + // Implies an index. + // We also search by (transport, scope, digest), that doesn’t need an extra index + // because it is a prefix of the implied primary-key index. + `PRIMARY KEY (transport, scope, digest, location) + )`, + }, + } + + _, err := dbTransaction(db, func(tx *sql.Tx) (void, error) { + // If the the last-created item exists, assume nothing needs to be done. + lastItemName := items[len(items)-1].itemName + _, found, err := querySingleValue[int](tx, "SELECT 1 FROM sqlite_schema WHERE name=?", lastItemName) + if err != nil { + return void{}, fmt.Errorf("checking if SQLite schema item %q exists: %w", lastItemName, err) + } + if !found { + // Item does not exist, assuming a fresh database. + for _, i := range items { + if _, err := tx.Exec(i.command); err != nil { + return void{}, fmt.Errorf("creating item %s: %w", i.itemName, err) + } + } + } + return void{}, nil + }) + return err +} + +// uncompressedDigest implements types.BlobInfoCache.UncompressedDigest within a transaction. +func (sqc *cache) uncompressedDigest(tx *sql.Tx, anyDigest digest.Digest) (digest.Digest, error) { + uncompressedString, found, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestUncompressedPairs WHERE anyDigest = ?", anyDigest.String()) + if err != nil { + return "", err + } + if found { + d, err := digest.Parse(uncompressedString) + if err != nil { + return "", err + } + return d, nil + + } + // A record as uncompressedDigest implies that anyDigest must already refer to an uncompressed digest. + // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings + // when we already record a (compressed, uncompressed) pair. + _, found, err = querySingleValue[int](tx, "SELECT 1 FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", anyDigest.String()) + if err != nil { + return "", err + } + if found { + return anyDigest, nil + } + return "", nil +} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (sqc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + res, err := transaction(sqc, func(tx *sql.Tx) (digest.Digest, error) { + return sqc.uncompressedDigest(tx, anyDigest) + }) + if err != nil { + return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return res +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (sqc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + previousString, gotPrevious, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestUncompressedPairs WHERE anyDigest = ?", anyDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for uncompressed digest for %q", anyDigest) + } + if gotPrevious { + previous, err := digest.Parse(previousString) + if err != nil { + return void{}, err + } + if previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) + } + } + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestUncompressedPairs(anyDigest, uncompressedDigest) VALUES (?, ?)", + anyDigest.String(), uncompressed.String()); err != nil { + return void{}, fmt.Errorf("recording uncompressed digest %q for %q: %w", uncompressed, anyDigest, err) + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, location types.BICLocationReference) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + if _, err := tx.Exec("INSERT OR REPLACE INTO KnownLocations(transport, scope, digest, location, time) VALUES (?, ?, ?, ?, ?)", + transport.Name(), scope.Opaque, digest.String(), location.Opaque, time.Now()); err != nil { // Possibly overwriting an older entry. + return void{}, fmt.Errorf("recording known location %q for (%q, %q, %q): %w", + location.Opaque, transport.Name(), scope.Opaque, digest.String(), err) + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// RecordDigestCompressorName records a compressor for the blob with the specified digest, +// or Uncompressed or UnknownCompression. +// WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a +// digest just because some remote author claims so (e.g. because a manifest says so); +// otherwise the cache could be poisoned and cause us to make incorrect edits to type +// information in a manifest. +func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + previous, gotPrevious, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", anyDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for compressor of for %q", anyDigest) + } + if gotPrevious && previous != compressorName { + logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, compressorName) + } + if compressorName == blobinfocache.UnknownCompression { + if _, err := tx.Exec("DELETE FROM DigestCompressors WHERE digest = ?", anyDigest.String()); err != nil { + return void{}, fmt.Errorf("deleting compressor for digest %q: %w", anyDigest, err) + } + } else { + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestCompressors(digest, compressor) VALUES (?, ?)", + anyDigest.String(), compressorName); err != nil { + return void{}, fmt.Errorf("recording compressor %q for %q: %w", compressorName, anyDigest, err) + } + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), +// and returns the result of appending them to candidates. v2Output allows including candidates with unknown +// location, and filters out candidates with unknown compression. +func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) ([]prioritize.CandidateWithTime, error) { + var rows *sql.Rows + var err error + if v2Output { + rows, err = tx.Query("SELECT location, time, compressor FROM KnownLocations JOIN DigestCompressors "+ + "ON KnownLocations.digest = DigestCompressors.digest "+ + "WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?", + transport.Name(), scope.Opaque, digest.String()) + } else { + rows, err = tx.Query("SELECT location, time, IFNULL(compressor, ?) FROM KnownLocations "+ + "LEFT JOIN DigestCompressors ON KnownLocations.digest = DigestCompressors.digest "+ + "WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?", + blobinfocache.UnknownCompression, + transport.Name(), scope.Opaque, digest.String()) + } + if err != nil { + return nil, fmt.Errorf("looking up candidate locations: %w", err) + } + defer rows.Close() + + res := []prioritize.CandidateWithTime{} + for rows.Next() { + var location string + var time time.Time + var compressorName string + if err := rows.Scan(&location, &time, &compressorName); err != nil { + return nil, fmt.Errorf("scanning candidate: %w", err) + } + res = append(res, prioritize.CandidateWithTime{ + Candidate: blobinfocache.BICReplacementCandidate2{ + Digest: digest, + CompressorName: compressorName, + Location: types.BICLocationReference{Opaque: location}, + }, + LastSeen: time, + }) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("iterating through locations: %w", err) + } + + if len(res) == 0 && v2Output { + compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String()) + if err != nil { + return nil, fmt.Errorf("scanning compressorName: %w", err) + } + if found { + res = append(res, prioritize.CandidateWithTime{ + Candidate: blobinfocache.BICReplacementCandidate2{ + Digest: digest, + CompressorName: compressor, + UnknownLocation: true, + Location: types.BICLocationReference{Opaque: ""}, + }, + LastSeen: time.Time{}, + }) + } + } + candidates = append(candidates, res...) + return candidates, nil +} + +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) +// that could possibly be reused within the specified (transport scope) (if they still +// exist, which is not guaranteed). +// +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if +// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look +// up variants of the blob which have the same uncompressed digest. +// +// The CompressorName fields in returned data must never be UnknownCompression. +func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 { + return sqc.candidateLocations(transport, scope, digest, canSubstitute, true) +} + +func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 { + var uncompressedDigest digest.Digest // = "" + res, err := transaction(sqc, func(tx *sql.Tx) ([]prioritize.CandidateWithTime, error) { + res := []prioritize.CandidateWithTime{} + res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, v2Output) + if err != nil { + return nil, err + } + if canSubstitute { + uncompressedDigest, err = sqc.uncompressedDigest(tx, primaryDigest) + if err != nil { + return nil, err + } + + // FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries. + // (In the extreme, we could turn _everything_ this function does into a single query. + // And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.) + // For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations. + rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String()) + if err != nil { + return nil, fmt.Errorf("querying for other digests: %w", err) + } + defer rows.Close() + for rows.Next() { + var otherDigestString string + if err := rows.Scan(&otherDigestString); err != nil { + return nil, fmt.Errorf("scanning other digest: %w", err) + } + otherDigest, err := digest.Parse(otherDigestString) + if err != nil { + return nil, err + } + if otherDigest != primaryDigest && otherDigest != uncompressedDigest { + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Output) + if err != nil { + return nil, err + } + } + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("iterating through other digests: %w", err) + } + + if uncompressedDigest != primaryDigest { + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Output) + if err != nil { + return nil, err + } + } + } + return res, nil + }) + if err != nil { + return []blobinfocache.BICReplacementCandidate2{} // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest) + +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (sqc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return blobinfocache.CandidateLocationsFromV2(sqc.candidateLocations(transport, scope, digest, canSubstitute, false)) +} diff --git a/pkg/blobinfocache/sqlite/sqlite_test.go b/pkg/blobinfocache/sqlite/sqlite_test.go new file mode 100644 index 0000000000..76f6bc67c6 --- /dev/null +++ b/pkg/blobinfocache/sqlite/sqlite_test.go @@ -0,0 +1,25 @@ +package sqlite + +import ( + "path/filepath" + "testing" + + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/pkg/blobinfocache/internal/test" + "github.com/stretchr/testify/require" +) + +var _ blobinfocache.BlobInfoCache2 = &cache{} + +func newTestCache(t *testing.T) blobinfocache.BlobInfoCache2 { + dir := t.TempDir() + cache, err := new2(filepath.Join(dir, "db.sqlite")) + require.NoError(t, err) + return cache +} + +func TestNew(t *testing.T) { + test.GenericCache(t, newTestCache) +} + +// FIXME: Tests for the various corner cases / failure cases of sqlite.cache should be added here. diff --git a/pkg/docker/config/config.go b/pkg/docker/config/config.go index b987c58060..c61065cb01 100644 --- a/pkg/docker/config/config.go +++ b/pkg/docker/config/config.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "io/fs" "os" "os/exec" "path/filepath" @@ -61,78 +62,6 @@ func newAuthPathDefault(path string) authPath { return authPath{path: path, legacyFormat: false} } -// SetCredentials stores the username and password in a location -// appropriate for sys and the users’ configuration. -// A valid key is a repository, a namespace within a registry, or a registry hostname; -// using forms other than just a registry may fail depending on configuration. -// Returns a human-readable description of the location that was updated. -// NOTE: The return value is only intended to be read by humans; its form is not an API, -// it may change (or new forms can be added) any time. -func SetCredentials(sys *types.SystemContext, key, username, password string) (string, error) { - isNamespaced, err := validateKey(key) - if err != nil { - return "", err - } - - helpers, err := sysregistriesv2.CredentialHelpers(sys) - if err != nil { - return "", err - } - - // Make sure to collect all errors. - var multiErr error - for _, helper := range helpers { - var desc string - var err error - switch helper { - // Special-case the built-in helpers for auth files. - case sysregistriesv2.AuthenticationFileHelper: - desc, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) { - if ch, exists := fileContents.CredHelpers[key]; exists { - if isNamespaced { - return false, "", unsupportedNamespaceErr(ch) - } - desc, err := setCredsInCredHelper(ch, key, username, password) - if err != nil { - return false, "", err - } - return false, desc, nil - } - creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) - newCreds := dockerAuthConfig{Auth: creds} - fileContents.AuthConfigs[key] = newCreds - return true, "", nil - }) - // External helpers. - default: - if isNamespaced { - err = unsupportedNamespaceErr(helper) - } else { - desc, err = setCredsInCredHelper(helper, key, username, password) - } - } - if err != nil { - multiErr = multierror.Append(multiErr, err) - logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", key, helper, err) - continue - } - logrus.Debugf("Stored credentials for %s in credential helper %s", key, helper) - return desc, nil - } - return "", multiErr -} - -func unsupportedNamespaceErr(helper string) error { - return fmt.Errorf("namespaced key is not supported for credential helper %s", helper) -} - -// SetAuthentication stores the username and password in the credential helper or file -// See the documentation of SetCredentials for format of "key" -func SetAuthentication(sys *types.SystemContext, key, username, password string) error { - _, err := SetCredentials(sys, key, username, password) - return err -} - // GetAllCredentials returns the registry credentials for all registries stored // in any of the configured credential helpers. func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthConfig, error) { @@ -370,17 +299,79 @@ func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string) return creds.Username, creds.Password, nil } -// RemoveAuthentication removes credentials for `key` from all possible -// sources such as credential helpers and auth files. +// SetCredentials stores the username and password in a location +// appropriate for sys and the users’ configuration. // A valid key is a repository, a namespace within a registry, or a registry hostname; // using forms other than just a registry may fail depending on configuration. -func RemoveAuthentication(sys *types.SystemContext, key string) error { - isNamespaced, err := validateKey(key) +// Returns a human-readable description of the location that was updated. +// NOTE: The return value is only intended to be read by humans; its form is not an API, +// it may change (or new forms can be added) any time. +func SetCredentials(sys *types.SystemContext, key, username, password string) (string, error) { + helpers, jsonEditor, key, isNamespaced, err := prepareForEdit(sys, key, true) if err != nil { - return err + return "", err } - helpers, err := sysregistriesv2.CredentialHelpers(sys) + // Make sure to collect all errors. + var multiErr error + for _, helper := range helpers { + var desc string + var err error + switch helper { + // Special-case the built-in helpers for auth files. + case sysregistriesv2.AuthenticationFileHelper: + desc, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) { + if ch, exists := fileContents.CredHelpers[key]; exists { + if isNamespaced { + return false, "", unsupportedNamespaceErr(ch) + } + desc, err := setCredsInCredHelper(ch, key, username, password) + if err != nil { + return false, "", err + } + return false, desc, nil + } + creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) + newCreds := dockerAuthConfig{Auth: creds} + fileContents.AuthConfigs[key] = newCreds + return true, "", nil + }) + // External helpers. + default: + if isNamespaced { + err = unsupportedNamespaceErr(helper) + } else { + desc, err = setCredsInCredHelper(helper, key, username, password) + } + } + if err != nil { + multiErr = multierror.Append(multiErr, err) + logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", key, helper, err) + continue + } + logrus.Debugf("Stored credentials for %s in credential helper %s", key, helper) + return desc, nil + } + return "", multiErr +} + +func unsupportedNamespaceErr(helper string) error { + return fmt.Errorf("namespaced key is not supported for credential helper %s", helper) +} + +// SetAuthentication stores the username and password in the credential helper or file +// See the documentation of SetCredentials for format of "key" +func SetAuthentication(sys *types.SystemContext, key, username, password string) error { + _, err := SetCredentials(sys, key, username, password) + return err +} + +// RemoveAuthentication removes credentials for `key` from all possible +// sources such as credential helpers and auth files. +// A valid key is a repository, a namespace within a registry, or a registry hostname; +// using forms other than just a registry may fail depending on configuration. +func RemoveAuthentication(sys *types.SystemContext, key string) error { + helpers, jsonEditor, key, isNamespaced, err := prepareForEdit(sys, key, true) if err != nil { return err } @@ -411,7 +402,7 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error { switch helper { // Special-case the built-in helper for auth files. case sysregistriesv2.AuthenticationFileHelper: - _, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) { + _, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) { if innerHelper, exists := fileContents.CredHelpers[key]; exists { removeFromCredHelper(innerHelper) } @@ -443,7 +434,7 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error { // RemoveAllAuthentication deletes all the credentials stored in credential // helpers and auth files. func RemoveAllAuthentication(sys *types.SystemContext) error { - helpers, err := sysregistriesv2.CredentialHelpers(sys) + helpers, jsonEditor, _, _, err := prepareForEdit(sys, "", false) if err != nil { return err } @@ -454,7 +445,7 @@ func RemoveAllAuthentication(sys *types.SystemContext) error { switch helper { // Special-case the built-in helper for auth files. case sysregistriesv2.AuthenticationFileHelper: - _, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) { + _, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) { for registry, helper := range fileContents.CredHelpers { // Helpers in auth files are expected // to exist, so no special treatment @@ -497,6 +488,46 @@ func RemoveAllAuthentication(sys *types.SystemContext) error { return multiErr } +// prepareForEdit processes sys and key (if keyRelevant) to return: +// - a list of credential helpers +// - a function which can be used to edit the JSON file +// - the key value to actually use in credential helpers / JSON +// - a boolean which is true if key is namespaced (and should not be used with credential helpers). +func prepareForEdit(sys *types.SystemContext, key string, keyRelevant bool) ([]string, func(*types.SystemContext, func(*dockerConfigFile) (bool, string, error)) (string, error), string, bool, error) { + var isNamespaced bool + if keyRelevant { + ns, err := validateKey(key) + if err != nil { + return nil, nil, "", false, err + } + isNamespaced = ns + } + + if sys != nil && sys.DockerCompatAuthFilePath != "" { + if sys.AuthFilePath != "" { + return nil, nil, "", false, errors.New("AuthFilePath and DockerCompatAuthFilePath can not be set simultaneously") + } + if keyRelevant { + if isNamespaced { + return nil, nil, "", false, fmt.Errorf("Credentials cannot be recorded in Docker-compatible format with namespaced key %q", key) + } + if key == "docker.io" { + key = "https://index.docker.io/v1/" + } + } + + // Do not use helpers defined in sysregistriesv2 because Docker isn’t aware of them. + return []string{sysregistriesv2.AuthenticationFileHelper}, modifyDockerConfigJSON, key, false, nil + } + + helpers, err := sysregistriesv2.CredentialHelpers(sys) + if err != nil { + return nil, nil, "", false, err + } + + return helpers, modifyJSON, key, isNamespaced, nil +} + func listCredsInCredHelper(credHelper string) (map[string]string, error) { helperName := fmt.Sprintf("docker-credential-%s", credHelper) p := helperclient.NewShellProgramFunc(helperName) @@ -513,9 +544,17 @@ func getPathToAuth(sys *types.SystemContext) (authPath, bool, error) { // it exists only to allow testing it with an artificial runtime.GOOS. func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool, error) { if sys != nil { + if sys.AuthFilePath != "" && sys.DockerCompatAuthFilePath != "" { + return authPath{}, false, errors.New("AuthFilePath and DockerCompatAuthFilePath can not be set simultaneously") + } if sys.AuthFilePath != "" { return newAuthPathDefault(sys.AuthFilePath), true, nil } + // When reading, we can process auth.json and Docker’s config.json with the same code. + // When writing, prepareForEdit chooses an appropriate jsonEditor implementation. + if sys.DockerCompatAuthFilePath != "" { + return newAuthPathDefault(sys.DockerCompatAuthFilePath), true, nil + } if sys.LegacyFormatAuthFilePath != "" { return authPath{path: sys.LegacyFormatAuthFilePath, legacyFormat: true}, true, nil } @@ -626,6 +665,86 @@ func modifyJSON(sys *types.SystemContext, editor func(fileContents *dockerConfig return description, nil } +// modifyDockerConfigJSON finds a docker config.json file, calls editor on the contents, and +// writes it back if editor returns true. +// Returns a human-readable description of the file, to be returned by SetCredentials. +// +// The editor may also return a human-readable description of the updated location; if it is "", +// the file itself is used. +func modifyDockerConfigJSON(sys *types.SystemContext, editor func(fileContents *dockerConfigFile) (bool, string, error)) (string, error) { + if sys == nil || sys.DockerCompatAuthFilePath == "" { + return "", errors.New("internal error: modifyDockerConfigJSON called with DockerCompatAuthFilePath not set") + } + path := sys.DockerCompatAuthFilePath + + dir := filepath.Dir(path) + if err := os.MkdirAll(dir, 0700); err != nil { + return "", err + } + + // Try hard not to clobber fields we don’t understand, even fields which may be added in future Docker versions. + var rawContents map[string]json.RawMessage + originalBytes, err := os.ReadFile(path) + switch { + case err == nil: + if err := json.Unmarshal(originalBytes, &rawContents); err != nil { + return "", fmt.Errorf("unmarshaling JSON at %q: %w", path, err) + } + case errors.Is(err, fs.ErrNotExist): + rawContents = map[string]json.RawMessage{} + default: // err != nil + return "", err + } + + syntheticContents := dockerConfigFile{ + AuthConfigs: map[string]dockerAuthConfig{}, + CredHelpers: map[string]string{}, + } + // json.Unmarshal also falls back to case-insensitive field matching; this code does not do that. Presumably + // config.json is mostly maintained by machines doing `docker login`, so the files should, hopefully, not contain field names with + // unexpected case. + if rawAuths, ok := rawContents["auths"]; ok { + // This conversion will lose fields we don’t know about; when updating an entry, we can’t tell whether an unknown field + // should be preserved or discarded (because it is made obsolete/unwanted with the new credentials). + // It might make sense to track which entries of "auths" we actually modified, and to not touch any others. + if err := json.Unmarshal(rawAuths, &syntheticContents.AuthConfigs); err != nil { + return "", fmt.Errorf(`unmarshaling "auths" in JSON at %q: %w`, path, err) + } + } + if rawCH, ok := rawContents["credHelpers"]; ok { + if err := json.Unmarshal(rawCH, &syntheticContents.CredHelpers); err != nil { + return "", fmt.Errorf(`unmarshaling "credHelpers" in JSON at %q: %w`, path, err) + + } + } + + updated, description, err := editor(&syntheticContents) + if err != nil { + return "", fmt.Errorf("updating %q: %w", path, err) + } + if updated { + rawAuths, err := json.MarshalIndent(syntheticContents.AuthConfigs, "", "\t") + if err != nil { + return "", fmt.Errorf("marshaling JSON %q: %w", path, err) + } + rawContents["auths"] = rawAuths + // We never modify syntheticContents.CredHelpers, so we don’t need to update it. + newData, err := json.MarshalIndent(rawContents, "", "\t") + if err != nil { + return "", fmt.Errorf("marshaling JSON %q: %w", path, err) + } + + if err = ioutils.AtomicWriteFile(path, newData, 0600); err != nil { + return "", fmt.Errorf("writing to file %q: %w", path, err) + } + } + + if description == "" { + description = path + } + return description, nil +} + func getCredsFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) { helperName := fmt.Sprintf("docker-credential-%s", credHelper) p := helperclient.NewShellProgramFunc(helperName) diff --git a/pkg/docker/config/config_test.go b/pkg/docker/config/config_test.go index 12975b7428..5b5402ed6f 100644 --- a/pkg/docker/config/config_test.go +++ b/pkg/docker/config/config_test.go @@ -10,6 +10,10 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/types" + dockerReference "github.com/distribution/reference" + "github.com/docker/cli/cli/config" + configtypes "github.com/docker/cli/cli/config/types" + "github.com/docker/docker/registry" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -447,6 +451,74 @@ func TestGetAuthFailsOnBadInput(t *testing.T) { assert.ErrorContains(t, err, "unmarshaling JSON") } +// TestGetCredentialsInteroperability verifies that Docker-created config files can be consumed by GetCredentials. +func TestGetCredentialsInteroperability(t *testing.T) { + const testUser = "some-user" + const testPassword = "some-password" + + for _, c := range []struct { + loginKey string // or "" for Docker's default. We must special-case that because (docker login docker.io) works, but (docker logout docker.io) doesn't! + queryKey string + }{ + {"example.com", "example.com"}, + {"example.com", "example.com/ns/repo"}, + {"example.com:8000", "example.com:8000"}, + {"example.com:8000", "example.com:8000/ns/repo"}, + {"", "docker.io"}, + {"", "docker.io/library/busybox"}, + {"", "docker.io/notlibrary/busybox"}, + } { + configDir := t.TempDir() + configPath := filepath.Join(configDir, config.ConfigFileName) + + // Initially, there are no credentials + creds, err := GetCredentials(&types.SystemContext{DockerCompatAuthFilePath: configPath}, c.queryKey) + require.NoError(t, err) + assert.Equal(t, types.DockerAuthConfig{}, creds) + + // Log in. This is intended to match github.com/docker/cli/command/registry.runLogin + serverAddress := c.loginKey + if serverAddress == "" { + serverAddress = registry.IndexServer + } + configFile, err := config.Load(configDir) + require.NoError(t, err) + err = configFile.GetCredentialsStore(serverAddress).Store(configtypes.AuthConfig{ + ServerAddress: serverAddress, + Username: testUser, + Password: testPassword, + }) + require.NoError(t, err) + // We can find the credentials. + creds, err = GetCredentials(&types.SystemContext{DockerCompatAuthFilePath: configPath}, c.queryKey) + require.NoError(t, err) + assert.Equal(t, types.DockerAuthConfig{ + Username: testUser, + Password: testPassword, + }, creds) + + // Log out. This is intended to match github.com/docker/cli/command/registry.runLogout + var regsToLogout []string + if c.loginKey == "" { + regsToLogout = []string{registry.IndexServer} + } else { + hostnameAddress := registry.ConvertToHostname(c.loginKey) + regsToLogout = []string{c.loginKey, hostnameAddress, "http://" + hostnameAddress, "https://" + hostnameAddress} + } + succeeded := false + for _, r := range regsToLogout { + if err := configFile.GetCredentialsStore(r).Erase(r); err == nil { + succeeded = true + } + } + require.True(t, succeeded) + // We can’t find the credentials any more. + creds, err = GetCredentials(&types.SystemContext{DockerCompatAuthFilePath: configPath}, c.queryKey) + require.NoError(t, err) + assert.Equal(t, types.DockerAuthConfig{}, creds) + } +} + func TestGetAllCredentials(t *testing.T) { // Create a temporary authentication file. tmpFile, err := os.CreateTemp("", "auth.json.") @@ -787,6 +859,95 @@ func TestRemoveAuthentication(t *testing.T) { } } +// TestSetCredentialsInteroperability verifies that our config files can be consumed by Docker. +func TestSetCredentialsInteroperability(t *testing.T) { + const testUser = "some-user" + const testPassword = "some-password" + + for _, c := range []struct { + loginKey string // or "" for Docker's default. We must special-case that because (docker login docker.io) works, but (docker logout docker.io) doesn't! + queryRepo string + otherContents bool + loginKeyError bool + }{ + {loginKey: "example.com", queryRepo: "example.com/ns/repo"}, + {loginKey: "example.com:8000", queryRepo: "example.com:8000/ns/repo"}, + {loginKey: "docker.io", queryRepo: "docker.io/library/busybox"}, + {loginKey: "docker.io", queryRepo: "docker.io/notlibrary/busybox"}, + {loginKey: "example.com", queryRepo: "example.com/ns/repo", otherContents: true}, + {loginKey: "example.com/ns", queryRepo: "example.com/ns/repo", loginKeyError: true}, + {loginKey: "example.com:8000/ns", queryRepo: "example.com:8000/ns/repo", loginKeyError: true}, + } { + configDir := t.TempDir() + configPath := filepath.Join(configDir, config.ConfigFileName) + + // The credential lookups are intended to match github.com/docker/cli/command/image.RunPull . + dockerRef, err := dockerReference.ParseNormalizedNamed(c.queryRepo) + require.NoError(t, err) + dockerRef = dockerReference.TagNameOnly(dockerRef) + repoInfo, err := registry.ParseRepositoryInfo(dockerRef) + require.NoError(t, err) + configKey := repoInfo.Index.Name + if repoInfo.Index.Official { + configKey = registry.IndexServer + } + + if c.otherContents { + err := os.WriteFile(configPath, []byte(`{"auths":{"unmodified-domain.example":{"identitytoken":"identity"}},`+ + `"psFormat":"psFormatValue",`+ + `"credHelpers":{"helper-domain.example":"helper-name"}`+ + `}`), 0o700) + require.NoError(t, err) + } + + // Initially, there are no credentials + configFile, err := config.Load(configDir) + require.NoError(t, err) + creds, err := configFile.GetCredentialsStore(configKey).Get(configKey) + require.NoError(t, err) + assert.Equal(t, configtypes.AuthConfig{}, creds) + + // Log in. + _, err = SetCredentials(&types.SystemContext{DockerCompatAuthFilePath: configPath}, c.loginKey, testUser, testPassword) + if c.loginKeyError { + assert.Error(t, err) + continue + } + require.NoError(t, err) + // We can find the credentials. + configFile, err = config.Load(configDir) + require.NoError(t, err) + creds, err = configFile.GetCredentialsStore(configKey).Get(configKey) + require.NoError(t, err) + assert.Equal(t, configtypes.AuthConfig{ + ServerAddress: configKey, + Username: testUser, + Password: testPassword, + }, creds) + + // Log out. + err = RemoveAuthentication(&types.SystemContext{DockerCompatAuthFilePath: configPath}, c.loginKey) + require.NoError(t, err) + // We can’t find the credentials any more. + configFile, err = config.Load(configDir) + require.NoError(t, err) + creds, err = configFile.GetCredentialsStore(configKey).Get(configKey) + require.NoError(t, err) + assert.Equal(t, configtypes.AuthConfig{}, creds) + + if c.otherContents { + creds, err = configFile.GetCredentialsStore("unmodified-domain.example").Get("unmodified-domain.example") + require.NoError(t, err) + assert.Equal(t, configtypes.AuthConfig{ + ServerAddress: "unmodified-domain.example", + IdentityToken: "identity", + }, creds) + assert.Equal(t, "psFormatValue", configFile.PsFormat) + assert.Equal(t, map[string]string{"helper-domain.example": "helper-name"}, configFile.CredentialHelpers) + } + } +} + func TestValidateKey(t *testing.T) { // Invalid keys for _, key := range []string{ diff --git a/pkg/shortnames/shortnames.go b/pkg/shortnames/shortnames.go index eeb7c1effd..a15b2b56e1 100644 --- a/pkg/shortnames/shortnames.go +++ b/pkg/shortnames/shortnames.go @@ -11,6 +11,7 @@ import ( "github.com/containers/image/v5/types" "github.com/manifoldco/promptui" "github.com/opencontainers/go-digest" + "golang.org/x/exp/slices" "golang.org/x/term" ) @@ -169,7 +170,7 @@ func (r *Resolved) Description() string { // pull errors must equal the amount of pull candidates. func (r *Resolved) FormatPullErrors(pullErrors []error) error { if len(pullErrors) > 0 && len(pullErrors) != len(r.PullCandidates) { - pullErrors = append(pullErrors, + pullErrors = append(slices.Clone(pullErrors), fmt.Errorf("internal error: expected %d instead of %d errors for %d pull candidates", len(r.PullCandidates), len(pullErrors), len(r.PullCandidates))) } diff --git a/pkg/tlsclientconfig/tlsclientconfig.go b/pkg/tlsclientconfig/tlsclientconfig.go index 56b0d49390..c6ec84bd5a 100644 --- a/pkg/tlsclientconfig/tlsclientconfig.go +++ b/pkg/tlsclientconfig/tlsclientconfig.go @@ -66,7 +66,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error { if err != nil { return err } - tlsc.Certificates = append(tlsc.Certificates, cert) + tlsc.Certificates = append(slices.Clone(tlsc.Certificates), cert) } if strings.HasSuffix(f.Name(), ".key") { keyName := f.Name() diff --git a/signature/fulcio_cert.go b/signature/fulcio_cert.go index ef5d3df6f0..c11fa46a9d 100644 --- a/signature/fulcio_cert.go +++ b/signature/fulcio_cert.go @@ -1,3 +1,6 @@ +//go:build !containers_image_fulcio_stub +// +build !containers_image_fulcio_stub + package signature import ( diff --git a/signature/fulcio_cert_stub.go b/signature/fulcio_cert_stub.go new file mode 100644 index 0000000000..c0b48dafa7 --- /dev/null +++ b/signature/fulcio_cert_stub.go @@ -0,0 +1,28 @@ +//go:build containers_image_fulcio_stub +// +build containers_image_fulcio_stub + +package signature + +import ( + "crypto" + "crypto/ecdsa" + "crypto/x509" + "errors" +) + +type fulcioTrustRoot struct { + caCertificates *x509.CertPool + oidcIssuer string + subjectEmail string +} + +func (f *fulcioTrustRoot) validate() error { + return errors.New("fulcio disabled at compile-time") +} + +func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte, + untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string, + untrustedPayloadBytes []byte) (crypto.PublicKey, error) { + return nil, errors.New("fulcio disabled at compile-time") + +} diff --git a/signature/fulcio_cert_test.go b/signature/fulcio_cert_test.go index d30731ccfd..ccf619f4d0 100644 --- a/signature/fulcio_cert_test.go +++ b/signature/fulcio_cert_test.go @@ -1,3 +1,6 @@ +//go:build !containers_image_fulcio_stub +// +build !containers_image_fulcio_stub + package signature import ( @@ -18,6 +21,7 @@ import ( "github.com/sigstore/sigstore/pkg/cryptoutils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" ) // assert that crypto.PublicKey matches the on in certPEM. @@ -132,7 +136,7 @@ func TestFulcioIssuerInCertificate(t *testing.T) { extensions: []pkix.Extension{ { Id: certificate.OIDIssuerV2, - Value: append(asn1MarshalTest(t, "https://", "utf8"), asn1MarshalTest(t, "example.com", "utf8")...), + Value: append(slices.Clone(asn1MarshalTest(t, "https://", "utf8")), asn1MarshalTest(t, "example.com", "utf8")...), }, }, errorFragment: "invalid ASN.1 in OIDC issuer v2 extension, trailing data", diff --git a/signature/internal/rekor_set.go b/signature/internal/rekor_set.go index d439b5f7a7..d86e98a45b 100644 --- a/signature/internal/rekor_set.go +++ b/signature/internal/rekor_set.go @@ -1,3 +1,6 @@ +//go:build !containers_image_rekor_stub +// +build !containers_image_rekor_stub + package internal import ( diff --git a/signature/internal/rekor_set_stub.go b/signature/internal/rekor_set_stub.go new file mode 100644 index 0000000000..7c121cc2ee --- /dev/null +++ b/signature/internal/rekor_set_stub.go @@ -0,0 +1,15 @@ +//go:build containers_image_rekor_stub +// +build containers_image_rekor_stub + +package internal + +import ( + "crypto/ecdsa" + "time" +) + +// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data. +// Returns bundle upload time on success. +func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) { + return time.Time{}, NewInvalidSignatureError("rekor disabled at compile-time") +} diff --git a/signature/internal/rekor_set_test.go b/signature/internal/rekor_set_test.go index 0cc8483d4a..0040b7b4c2 100644 --- a/signature/internal/rekor_set_test.go +++ b/signature/internal/rekor_set_test.go @@ -1,3 +1,6 @@ +//go:build !containers_image_rekor_stub +// +build !containers_image_rekor_stub + package internal import ( diff --git a/signature/internal/sigstore_payload_test.go b/signature/internal/sigstore_payload_test.go index eaee202a34..9c4db40140 100644 --- a/signature/internal/sigstore_payload_test.go +++ b/signature/internal/sigstore_payload_test.go @@ -14,6 +14,7 @@ import ( "github.com/sigstore/sigstore/pkg/cryptoutils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" ) // A short-hand way to get a JSON object field value or panic. No error handling done, we know @@ -283,7 +284,7 @@ func TestVerifySigstorePayload(t *testing.T) { for _, invalidSig := range [][]byte{ {}, // Empty signature []byte("invalid signature"), - append(validSignatureBytes, validSignatureBytes...), + append(slices.Clone(validSignatureBytes), validSignatureBytes...), } { recorded = acceptanceData{} res, err = VerifySigstorePayload(publicKey, sigstoreSig.UntrustedPayload(), base64.StdEncoding.EncodeToString(invalidSig), recordingRules) diff --git a/signature/policy_eval_sigstore_test.go b/signature/policy_eval_sigstore_test.go index f4dd11368e..b460071237 100644 --- a/signature/policy_eval_sigstore_test.go +++ b/signature/policy_eval_sigstore_test.go @@ -1,3 +1,6 @@ +//go:build !containers_image_fulcio_stub +// +build !containers_image_fulcio_stub + // Policy evaluation for prCosignSigned. package signature diff --git a/signature/sigstore/copied.go b/signature/sigstore/copied.go index f9c7f6a5ed..2e510f60e3 100644 --- a/signature/sigstore/copied.go +++ b/signature/sigstore/copied.go @@ -10,9 +10,9 @@ import ( "errors" "fmt" + "github.com/secure-systems-lab/go-securesystemslib/encrypted" "github.com/sigstore/sigstore/pkg/cryptoutils" "github.com/sigstore/sigstore/pkg/signature" - "github.com/theupdateframework/go-tuf/encrypted" ) // The following code was copied from github.com/sigstore. diff --git a/signature/sigstore/fulcio/fulcio.go b/signature/sigstore/fulcio/fulcio.go index 0e6746abb3..4ba98b9865 100644 --- a/signature/sigstore/fulcio/fulcio.go +++ b/signature/sigstore/fulcio/fulcio.go @@ -1,3 +1,6 @@ +//go:build !containers_image_fulcio_stub +// +build !containers_image_fulcio_stub + package fulcio import ( diff --git a/signature/sigstore/fulcio/fulcio_stub.go b/signature/sigstore/fulcio/fulcio_stub.go new file mode 100644 index 0000000000..4f4d435c1d --- /dev/null +++ b/signature/sigstore/fulcio/fulcio_stub.go @@ -0,0 +1,45 @@ +//go:build containers_image_fulcio_stub +// +build containers_image_fulcio_stub + +package fulcio + +import ( + "fmt" + "io" + "net/url" + + "github.com/containers/image/v5/signature/sigstore/internal" +) + +func WithFulcioAndPreexistingOIDCIDToken(fulcioURL *url.URL, oidcIDToken string) internal.Option { + return func(s *internal.SigstoreSigner) error { + return fmt.Errorf("fulcio disabled at compile time") + } +} + +// WithFulcioAndDeviceAuthorizationGrantOIDC sets up signing to use a short-lived key and a Fulcio-issued certificate +// based on an OIDC ID token obtained using a device authorization grant (RFC 8628). +// +// interactiveOutput must be directly accessible to a human user in real time (i.e. not be just a log file). +func WithFulcioAndDeviceAuthorizationGrantOIDC(fulcioURL *url.URL, oidcIssuerURL *url.URL, oidcClientID, oidcClientSecret string, + interactiveOutput io.Writer) internal.Option { + return func(s *internal.SigstoreSigner) error { + return fmt.Errorf("fulcio disabled at compile time") + } +} + +// WithFulcioAndInterativeOIDC sets up signing to use a short-lived key and a Fulcio-issued certificate +// based on an interactively-obtained OIDC ID token. +// The token is obtained +// - directly using a browser, listening on localhost, automatically opening a browser to the OIDC issuer, +// to be redirected on localhost. (I.e. the current environment must allow launching a browser that connect back to the current process; +// either or both may be impossible in a container or a remote VM). +// - or by instructing the user to manually open a browser, obtain the OIDC code, and interactively input it as text. +// +// interactiveInput and interactiveOutput must both be directly operable by a human user in real time (i.e. not be just a log file). +func WithFulcioAndInteractiveOIDC(fulcioURL *url.URL, oidcIssuerURL *url.URL, oidcClientID, oidcClientSecret string, + interactiveInput io.Reader, interactiveOutput io.Writer) internal.Option { + return func(s *internal.SigstoreSigner) error { + return fmt.Errorf("fulcio disabled at compile time") + } +} diff --git a/signature/sigstore/rekor/rekor.go b/signature/sigstore/rekor/rekor.go index 0236f0aabb..f8ba6dc3fa 100644 --- a/signature/sigstore/rekor/rekor.go +++ b/signature/sigstore/rekor/rekor.go @@ -1,3 +1,6 @@ +//go:build !containers_image_rekor_stub +// +build !containers_image_rekor_stub + package rekor import ( diff --git a/signature/sigstore/rekor/rekor_stub.go b/signature/sigstore/rekor/rekor_stub.go new file mode 100644 index 0000000000..d61926530f --- /dev/null +++ b/signature/sigstore/rekor/rekor_stub.go @@ -0,0 +1,17 @@ +//go:build containers_image_rekor_stub +// +build containers_image_rekor_stub + +package rekor + +import ( + "fmt" + "net/url" + + signerInternal "github.com/containers/image/v5/signature/sigstore/internal" +) + +func WithRekor(rekorURL *url.URL) signerInternal.Option { + return func(s *signerInternal.SigstoreSigner) error { + return fmt.Errorf("rekor disabled at build time") + } +} diff --git a/signature/simplesigning/signer_test.go b/signature/simplesigning/signer_test.go index 0246c13c5a..e572eeb927 100644 --- a/signature/simplesigning/signer_test.go +++ b/signature/simplesigning/signer_test.go @@ -103,7 +103,7 @@ func TestSimpleSignerSignImageManifest(t *testing.T) { // Failures to sign need to be tested in two parts: First the failures that involve the wrong passphrase, then failures that // should manifest even with a valid passphrase or unlocked key (because the GPG agent is caching unlocked keys). - // Alternatively, we could be caling gpgagent.KillGPGAgent() all the time... + // Alternatively, we could be calling gpgagent.KillGPGAgent() all the time... type failingCase struct { name string opts []Option diff --git a/storage/storage_dest.go b/storage/storage_dest.go index 07e1d5e1f9..bbbff6cf98 100644 --- a/storage/storage_dest.go +++ b/storage/storage_dest.go @@ -77,13 +77,13 @@ type storageImageDestination struct { indexToStorageID map[int]*string // All accesses to below data are protected by `lock` which is made // *explicit* in the code. - blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs - fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes - filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them - currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed) - indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image - blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer - diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output + uncompressedOrTocDigest map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs or TOC IDs. + fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes + filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them + currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed) + indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image + blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer + diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output } // addedLayerInfo records data about a layer to use in this image. @@ -117,18 +117,18 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (* HasThreadSafePutBlob: true, }), - imageRef: imageRef, - directory: directory, - signatureses: make(map[digest.Digest][]byte), - blobDiffIDs: make(map[digest.Digest]digest.Digest), - blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer), - fileSizes: make(map[digest.Digest]int64), - filenames: make(map[digest.Digest]string), - SignatureSizes: []int{}, - SignaturesSizes: make(map[digest.Digest][]int), - indexToStorageID: make(map[int]*string), - indexToAddedLayerInfo: make(map[int]addedLayerInfo), - diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput), + imageRef: imageRef, + directory: directory, + signatureses: make(map[digest.Digest][]byte), + uncompressedOrTocDigest: make(map[digest.Digest]digest.Digest), + blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer), + fileSizes: make(map[digest.Digest]int64), + filenames: make(map[digest.Digest]string), + SignatureSizes: []int{}, + SignaturesSizes: make(map[digest.Digest][]int), + indexToStorageID: make(map[int]*string), + indexToAddedLayerInfo: make(map[int]addedLayerInfo), + diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput), } dest.Compat = impl.AddCompat(dest) return dest, nil @@ -227,7 +227,7 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf // Record information about the blob. s.lock.Lock() - s.blobDiffIDs[blobDigest] = diffID.Digest() + s.uncompressedOrTocDigest[blobDigest] = diffID.Digest() s.fileSizes[blobDigest] = counter.Count s.filenames[blobDigest] = filename s.lock.Unlock() @@ -289,7 +289,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces blobDigest := srcInfo.Digest s.lock.Lock() - s.blobDiffIDs[blobDigest] = blobDigest + s.uncompressedOrTocDigest[blobDigest] = blobDigest s.fileSizes[blobDigest] = 0 s.filenames[blobDigest] = "" s.diffOutputs[blobDigest] = out @@ -321,7 +321,7 @@ func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, }) } -// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (digest, size or -1), filling s.blobDiffIDs and other metadata. +// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (digest, size or -1), filling s.uncompressedOrTocDigest and other metadata. // The caller must arrange the blob to be eventually committed using s.commitLayer(). func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { // lock the entire method as it executes fairly quickly @@ -335,7 +335,7 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, digest, err) } else if err == nil { // Record the uncompressed value so that we can use it to calculate layer IDs. - s.blobDiffIDs[digest] = aLayer.UncompressedDigest() + s.uncompressedOrTocDigest[digest] = aLayer.UncompressedDigest() s.blobAdditionalLayer[digest] = aLayer return true, private.ReusedBlob{ Digest: digest, @@ -366,7 +366,7 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, } if len(layers) > 0 { // Save this for completeness. - s.blobDiffIDs[digest] = layers[0].UncompressedDigest + s.uncompressedOrTocDigest[digest] = layers[0].UncompressedDigest return true, private.ReusedBlob{ Digest: digest, Size: layers[0].UncompressedSize, @@ -380,7 +380,7 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, } if len(layers) > 0 { // Record the uncompressed value so that we can use it to calculate layer IDs. - s.blobDiffIDs[digest] = layers[0].UncompressedDigest + s.uncompressedOrTocDigest[digest] = layers[0].UncompressedDigest return true, private.ReusedBlob{ Digest: digest, Size: layers[0].CompressedSize, @@ -398,7 +398,7 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, } if len(layers) > 0 { if size != -1 { - s.blobDiffIDs[digest] = layers[0].UncompressedDigest + s.uncompressedOrTocDigest[digest] = layers[0].UncompressedDigest return true, private.ReusedBlob{ Digest: digest, Size: size, @@ -407,7 +407,7 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, if !options.CanSubstitute { return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", digest) } - s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest + s.uncompressedOrTocDigest[uncompressedDigest] = layers[0].UncompressedDigest return true, private.ReusedBlob{ Digest: uncompressedDigest, Size: layers[0].UncompressedSize, @@ -416,6 +416,25 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, } } + tocDigest := digest + if options.TOCDigest != nil { + tocDigest = *options.TOCDigest + } + + // Check if we have a chunked layer in storage with the same TOC digest. + layers, err = s.imageRef.transport.store.LayersByTOCDigest(tocDigest) + if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { + return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, tocDigest, err) + } + if len(layers) > 0 { + // Save this for completeness. + s.uncompressedOrTocDigest[digest] = layers[0].TOCDigest + return true, private.ReusedBlob{ + Digest: layers[0].TOCDigest, + Size: layers[0].UncompressedSize, + }, nil + } + // Nope, we don't have it. return false, private.ReusedBlob{}, nil } @@ -438,16 +457,20 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string { continue } blobSum := m.FSLayers[i].BlobSum - diffID, ok := s.blobDiffIDs[blobSum] + diffID, ok := s.uncompressedOrTocDigest[blobSum] if !ok { logrus.Infof("error looking up diffID for layer %q", blobSum.String()) return "" } diffIDs = append([]digest.Digest{diffID}, diffIDs...) } - case *manifest.Schema2, *manifest.OCI1: - // We know the ID calculation for these formats doesn't actually use the diffIDs, - // so we don't need to populate the diffID list. + case *manifest.Schema2: + // We know the ID calculation doesn't actually use the diffIDs, so we don't need to populate + // the diffID list. + case *manifest.OCI1: + for _, l := range m.Layers { + diffIDs = append(diffIDs, l.Digest) + } default: return "" } @@ -518,7 +541,7 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo) } s.lock.Unlock() // Note: commitLayer locks on-demand. - if err := s.commitLayer(index, info, -1); err != nil { + if stopQueue, err := s.commitLayer(index, info, -1); stopQueue || err != nil { return err } s.lock.Lock() @@ -532,18 +555,32 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo) return nil } +// getDiffIDOrTOCDigest returns the diffID for the specified digest or the digest for the TOC, if known. +func (s *storageImageDestination) getDiffIDOrTOCDigest(uncompressedDigest digest.Digest) (digest.Digest, bool) { + s.lock.Lock() + defer s.lock.Unlock() + + if d, found := s.diffOutputs[uncompressedDigest]; found { + return d.TOCDigest, found + } + d, found := s.uncompressedOrTocDigest[uncompressedDigest] + return d, found +} + // commitLayer commits the specified layer with the given index to the storage. -// size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs. +// size can usually be -1; it can be provided if the layer is not known to be already present in uncompressedOrTocDigest. +// +// If the layer cannot be committed yet, the function returns (true, nil). // // Note that the previous layer is expected to already be committed. // // Caution: this function must be called without holding `s.lock`. Callers // must guarantee that, at any given time, at most one goroutine may execute // `commitLayer()`. -func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) error { +func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) (bool, error) { // Already committed? Return early. if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted { - return nil + return false, nil } // Start with an empty string or the previous layer ID. Note that @@ -557,68 +594,96 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si // Carry over the previous ID for empty non-base layers. if info.emptyLayer { s.indexToStorageID[index] = &lastLayer - return nil + return false, nil } // Check if there's already a layer with the ID that we'd give to the result of applying // this layer blob to its parent, if it has one, or the blob's hex value otherwise. - s.lock.Lock() - diffID, haveDiffID := s.blobDiffIDs[info.digest] - s.lock.Unlock() - if !haveDiffID { + // The diffIDOrTOCDigest refers either to the DiffID or the digest of the TOC. + diffIDOrTOCDigest, haveDiffIDOrTOCDigest := s.getDiffIDOrTOCDigest(info.digest) + if !haveDiffIDOrTOCDigest { // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), // or to even check if we had it. // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller // that relies on using a blob digest that has never been seen by the store had better call // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only // so far we are going to accommodate that (if we should be doing that at all). - logrus.Debugf("looking for diffID for blob %+v", info.digest) + logrus.Debugf("looking for diffID or TOC digest for blob %+v", info.digest) // Use tryReusingBlobAsPending, not the top-level TryReusingBlobWithOptions, to prevent recursion via queueOrCommit. has, _, err := s.tryReusingBlobAsPending(info.digest, size, &private.TryReusingBlobOptions{ Cache: none.NoCache, CanSubstitute: false, }) if err != nil { - return fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err) + return false, fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err) } if !has { - return fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String()) + return false, fmt.Errorf("error determining uncompressed digest or TOC digest for blob %q", info.digest.String()) } - diffID, haveDiffID = s.blobDiffIDs[info.digest] - if !haveDiffID { - return fmt.Errorf("we have blob %q, but don't know its uncompressed digest", info.digest.String()) + diffIDOrTOCDigest, haveDiffIDOrTOCDigest = s.getDiffIDOrTOCDigest(info.digest) + if !haveDiffIDOrTOCDigest { + return false, fmt.Errorf("we have blob %q, but don't know its uncompressed or TOC digest", info.digest.String()) } } - id := diffID.Hex() + id := diffIDOrTOCDigest.Hex() if lastLayer != "" { - id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex() + id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffIDOrTOCDigest.Hex())).Hex() } if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { // There's already a layer that should have the right contents, just reuse it. lastLayer = layer.ID s.indexToStorageID[index] = &lastLayer - return nil + return false, nil } s.lock.Lock() diffOutput, ok := s.diffOutputs[info.digest] s.lock.Unlock() if ok { + if s.manifest == nil { + logrus.Debugf("Skipping commit for TOC=%q, manifest not yet available", id) + return true, nil + } + + man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest)) + if err != nil { + return false, fmt.Errorf("parsing manifest: %w", err) + } + + cb, err := s.getConfigBlob(man.ConfigInfo()) + if err != nil { + return false, err + } + + // retrieve the expected uncompressed digest from the config blob. + configOCI := &imgspecv1.Image{} + if err := json.Unmarshal(cb, configOCI); err != nil { + return false, err + } + if index >= len(configOCI.RootFS.DiffIDs) { + return false, fmt.Errorf("index %d out of range for configOCI.RootFS.DiffIDs", index) + } + layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil) if err != nil { - return err + return false, err } - // FIXME: what to do with the uncompressed digest? - diffOutput.UncompressedDigest = info.digest + // let the storage layer know what was the original uncompressed layer. + flags := make(map[string]interface{}) + flags[expectedLayerDiffIDFlag] = configOCI.RootFS.DiffIDs[index] + logrus.Debugf("Setting uncompressed digest to %q for layer %q", configOCI.RootFS.DiffIDs[index], id) + options := &graphdriver.ApplyDiffWithDifferOpts{ + Flags: flags, + } - if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil { + if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, options); err != nil { _ = s.imageRef.transport.store.Delete(layer.ID) - return err + return false, err } s.indexToStorageID[index] = &layer.ID - return nil + return false, nil } s.lock.Lock() @@ -627,11 +692,11 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si if ok { layer, err := al.PutAs(id, lastLayer, nil) if err != nil && !errors.Is(err, storage.ErrDuplicateID) { - return fmt.Errorf("failed to put layer from digest and labels: %w", err) + return false, fmt.Errorf("failed to put layer from digest and labels: %w", err) } lastLayer = layer.ID s.indexToStorageID[index] = &lastLayer - return nil + return false, nil } // Check if we previously cached a file with that blob's contents. If we didn't, @@ -642,7 +707,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si if !ok { // Try to find the layer with contents matching that blobsum. layer := "" - layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID) + layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffIDOrTOCDigest) if err2 == nil && len(layers) > 0 { layer = layers[0].ID } else { @@ -652,7 +717,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si } } if layer == "" { - return fmt.Errorf("locating layer for blob %q: %w", info.digest, err2) + return false, fmt.Errorf("locating layer for blob %q: %w", info.digest, err2) } // Read the layer's contents. noCompression := archive.Uncompressed @@ -661,17 +726,17 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si } diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions) if err2 != nil { - return fmt.Errorf("reading layer %q for blob %q: %w", layer, info.digest, err2) + return false, fmt.Errorf("reading layer %q for blob %q: %w", layer, info.digest, err2) } // Copy the layer diff to a file. Diff() takes a lock that it holds // until the ReadCloser that it returns is closed, and PutLayer() wants // the same lock, so the diff can't just be directly streamed from one // to the other. filename = s.computeNextBlobCacheFile() - file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0o600) if err != nil { diff.Close() - return fmt.Errorf("creating temporary file %q: %w", filename, err) + return false, fmt.Errorf("creating temporary file %q: %w", filename, err) } // Copy the data to the file. // TODO: This can take quite some time, and should ideally be cancellable using @@ -680,7 +745,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si diff.Close() file.Close() if err != nil { - return fmt.Errorf("storing blob to file %q: %w", filename, err) + return false, fmt.Errorf("storing blob to file %q: %w", filename, err) } // Make sure that we can find this file later, should we need the layer's // contents again. @@ -691,21 +756,21 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si // Read the cached blob and use it as a diff. file, err := os.Open(filename) if err != nil { - return fmt.Errorf("opening file %q: %w", filename, err) + return false, fmt.Errorf("opening file %q: %w", filename, err) } defer file.Close() // Build the new layer using the diff, regardless of where it came from. // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{ OriginalDigest: info.digest, - UncompressedDigest: diffID, + UncompressedDigest: diffIDOrTOCDigest, }, file) if err != nil && !errors.Is(err, storage.ErrDuplicateID) { - return fmt.Errorf("adding layer with blob %q: %w", info.digest, err) + return false, fmt.Errorf("adding layer with blob %q: %w", info.digest, err) } s.indexToStorageID[index] = &layer.ID - return nil + return false, nil } // Commit marks the process of storing the image as successful and asks for the image to be persisted. @@ -752,11 +817,13 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t // Extract, commit, or find the layers. for i, blob := range layerBlobs { - if err := s.commitLayer(i, addedLayerInfo{ + if stopQueue, err := s.commitLayer(i, addedLayerInfo{ digest: blob.Digest, emptyLayer: blob.EmptyLayer, }, blob.Size); err != nil { return err + } else if stopQueue { + return fmt.Errorf("Internal error: storageImageDestination.Commit(): commitLayer() not ready to commit for layer %q", blob.Digest) } } var lastLayer string diff --git a/storage/storage_reference.go b/storage/storage_reference.go index 49f7d03c85..a55e34054a 100644 --- a/storage/storage_reference.go +++ b/storage/storage_reference.go @@ -10,6 +10,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/containers/storage" digest "github.com/opencontainers/go-digest" @@ -101,6 +102,8 @@ func multiArchImageMatchesSystemContext(store storage.Store, img *storage.Image, // Resolve the reference's name to an image ID in the store, if there's already // one present with the same name or ID, and return the image. +// +// Returns an error matching ErrNoSuchImage if an image matching ref was not found. func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Image, error) { var loadedImage *storage.Image if s.id == "" && s.named != nil { @@ -283,3 +286,31 @@ func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemC func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { return newImageDestination(sys, s) } + +// ResolveReference finds the underlying storage image for a storage.Transport reference. +// It returns that image, and an updated reference which can be used to refer back to the _same_ +// image again. +// +// This matters if the input reference contains a tagged name; the destination of the tag can +// move in local storage. The updated reference returned by this function contains the resolved +// image ID, so later uses of that updated reference will either continue to refer to the same +// image, or fail. +// +// Note that it _is_ possible for the later uses to fail, either because the image was removed +// completely, or because the name used in the reference was untaged (even if the underlying image +// ID still exists in local storage). +// +// Returns an error matching ErrNoSuchImage if an image matching ref was not found. +func ResolveReference(ref types.ImageReference) (types.ImageReference, *storage.Image, error) { + sref, ok := ref.(*storageReference) + if !ok { + return nil, nil, fmt.Errorf("trying to resolve a non-%s: reference %q", Transport.Name(), + transports.ImageName(ref)) + } + clone := *sref // A shallow copy we can update + img, err := clone.resolveImage(nil) + if err != nil { + return nil, nil, err + } + return clone, img, nil +} diff --git a/storage/storage_reference_test.go b/storage/storage_reference_test.go index 6fa7d0e713..32590a06da 100644 --- a/storage/storage_reference_test.go +++ b/storage/storage_reference_test.go @@ -9,6 +9,9 @@ import ( "testing" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/pkg/blobinfocache/memory" + "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -97,14 +100,19 @@ func TestStorageReferenceDockerReference(t *testing.T) { } } -func TestStorageReferenceStringWithinTransport(t *testing.T) { - store := newStore(t) +// The […] part of references created for store +func storeSpecForStringWithinTransport(store storage.Store) string { optionsList := "" options := store.GraphOptions() if len(options) > 0 { optionsList = ":" + strings.Join(options, ",") } - storeSpec := fmt.Sprintf("[%s@%s+%s%s]", store.GraphDriverName(), store.GraphRoot(), store.RunRoot(), optionsList) + return fmt.Sprintf("[%s@%s+%s%s]", store.GraphDriverName(), store.GraphRoot(), store.RunRoot(), optionsList) +} + +func TestStorageReferenceStringWithinTransport(t *testing.T) { + store := newStore(t) + storeSpec := storeSpecForStringWithinTransport(store) for _, c := range validReferenceTestCases { ref, err := Transport.ParseReference(c.input) @@ -142,3 +150,47 @@ func TestStorageReferencePolicyConfigurationNamespaces(t *testing.T) { } // NewImage, NewImageSource, NewImageDestination, DeleteImage tested in storage_test.go + +func TestResolveReference(t *testing.T) { + // This is, so far, only a minimal smoke test + + ensureTestCanCreateImages(t) + + store := newStore(t) + storeSpec := storeSpecForStringWithinTransport(store) + cache := memory.New() + + id := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + // Create an image with a known name and ID + ref, err := Transport.ParseStoreReference(store, "test@"+id) + require.NoError(t, err) + createImage(t, ref, cache, []testBlob{makeLayer(t, archive.Gzip)}, nil) + + for _, c := range []struct { + input string + expected string // "" on error + }{ + { // No ID match + "@bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "", + }, + {"@" + id, "@" + id}, // ID-only lookup + {"test", "docker.io/library/test:latest@" + id}, // Name is resolved to include ID + {"nottest", ""}, // No name match + {"test@" + id, "docker.io/library/test:latest@" + id}, // Name+ID works, and is unchanged + {"nottest@" + id, ""}, // Name mismatch is rejected even with an ID + } { + input, err := Transport.ParseStoreReference(store, c.input) + require.NoError(t, err, c.input) + inputClone := *input + resolved, img, err := ResolveReference(input) + if c.expected == "" { + assert.Error(t, err, c.input) + } else { + require.NoError(t, err, c.input) + require.Equal(t, &inputClone, input) // input was not modified in-place + assert.Equal(t, id, img.ID, c.input) + assert.Equal(t, storeSpec+c.expected, resolved.StringWithinTransport(), c.input) + } + } +} diff --git a/storage/storage_src.go b/storage/storage_src.go index 66d04da315..7022d322ea 100644 --- a/storage/storage_src.go +++ b/storage/storage_src.go @@ -23,26 +23,39 @@ import ( "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/ioutils" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) +// getBlobMutexProtected is a struct to hold the state of the getBlobMutex mutex. +type getBlobMutexProtected struct { + // digestToLayerID is a lookup map from the layer digest (either the uncompressed digest or the TOC digest) to the + // layer ID in the store. + digestToLayerID map[digest.Digest]string + + // layerPosition stores where we are in reading a blob's layers + layerPosition map[digest.Digest]int +} + type storageImageSource struct { impl.Compat impl.PropertyMethodsInitialize stubs.NoGetBlobAtInitialize - imageRef storageReference - image *storage.Image - systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files - layerPosition map[digest.Digest]int // Where we are in reading a blob's layers - cachedManifest []byte // A cached copy of the manifest, if already known, or nil - getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions - SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice - SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice + imageRef storageReference + image *storage.Image + systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files + cachedManifest []byte // A cached copy of the manifest, if already known, or nil + getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions (it guards layerPosition and digestToLayerID) + getBlobMutexProtected getBlobMutexProtected + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice + SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice } +const expectedLayerDiffIDFlag = "expected-layer-diffid" + // newImageSource sets up an image for reading. func newImageSource(sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) { // First, locate the image. @@ -61,9 +74,12 @@ func newImageSource(sys *types.SystemContext, imageRef storageReference) (*stora imageRef: imageRef, systemContext: sys, image: img, - layerPosition: make(map[digest.Digest]int), SignatureSizes: []int{}, SignaturesSizes: make(map[digest.Digest][]int), + getBlobMutexProtected: getBlobMutexProtected{ + digestToLayerID: make(map[digest.Digest]string), + layerPosition: make(map[digest.Digest]int), + }, } image.Compat = impl.AddCompat(image) if img.Metadata != "" { @@ -90,6 +106,7 @@ func (s *storageImageSource) Close() error { func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) { // We need a valid digest value. digest := info.Digest + err = digest.Validate() if err != nil { return nil, 0, err @@ -99,10 +116,24 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil } - // Check if the blob corresponds to a diff that was used to initialize any layers. Our - // callers should try to retrieve layers using their uncompressed digests, so no need to - // check if they're using one of the compressed digests, which we can't reproduce anyway. - layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(digest) + var layers []storage.Layer + + // If the digest was overridden by LayerInfosForCopy, then we need to use the TOC digest + // to retrieve it from the storage. + s.getBlobMutex.Lock() + layerID, found := s.getBlobMutexProtected.digestToLayerID[digest] + s.getBlobMutex.Unlock() + + if found { + if layer, err := s.imageRef.transport.store.Layer(layerID); err == nil { + layers = []storage.Layer{*layer} + } + } else { + // Check if the blob corresponds to a diff that was used to initialize any layers. Our + // callers should try to retrieve layers using their uncompressed digests, so no need to + // check if they're using one of the compressed digests, which we can't reproduce anyway. + layers, _ = s.imageRef.transport.store.LayersByUncompressedDigest(digest) + } // If it's not a layer, then it must be a data item. if len(layers) == 0 { @@ -129,15 +160,20 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c return nil, 0, err } success := false + tmpFileRemovePending := true defer func() { if !success { tmpFile.Close() + if tmpFileRemovePending { + os.Remove(tmpFile.Name()) + } } }() // On Unix and modern Windows (2022 at least) we can eagerly unlink the file to ensure it's automatically // cleaned up on process termination (or if the caller forgets to invoke Close()) + // On older versions of Windows we will have to fallback to relying on the caller to invoke Close() if err := os.Remove(tmpFile.Name()); err != nil { - return nil, 0, err + tmpFileRemovePending = false } if _, err := io.Copy(tmpFile, rc); err != nil { @@ -148,6 +184,14 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c } success = true + + if tmpFileRemovePending { + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + return os.Remove(tmpFile.Name()) + }), n, nil + } + return tmpFile, n, nil } @@ -160,8 +204,8 @@ func (s *storageImageSource) getBlobAndLayerID(digest digest.Digest, layers []st // which claim to have the same contents, that we actually do have multiple layers, otherwise we could // just go ahead and use the first one every time. s.getBlobMutex.Lock() - i := s.layerPosition[digest] - s.layerPosition[digest] = i + 1 + i := s.getBlobMutexProtected.layerPosition[digest] + s.getBlobMutexProtected.layerPosition[digest] = i + 1 s.getBlobMutex.Unlock() if len(layers) > 0 { layer = layers[i%len(layers)] @@ -253,14 +297,35 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige if err != nil { return nil, fmt.Errorf("reading layer %q in image %q: %w", layerID, s.image.ID, err) } - if layer.UncompressedDigest == "" { - return nil, fmt.Errorf("uncompressed digest for layer %q is unknown", layerID) + if layer.UncompressedDigest == "" && layer.TOCDigest == "" { + return nil, fmt.Errorf("uncompressed digest and TOC digest for layer %q is unknown", layerID) } if layer.UncompressedSize < 0 { return nil, fmt.Errorf("uncompressed size for layer %q is unknown", layerID) } + + blobDigest := layer.UncompressedDigest + + if layer.TOCDigest != "" { + if layer.Flags == nil || layer.Flags[expectedLayerDiffIDFlag] == nil { + return nil, fmt.Errorf("TOC digest %q for layer %q is present but %q flag is not set", layer.TOCDigest, layerID, expectedLayerDiffIDFlag) + } + if expectedDigest, ok := layer.Flags[expectedLayerDiffIDFlag].(string); ok { + // if the layer is stored by its TOC, report the expected diffID as the layer Digest + // but store the TOC digest so we can later retrieve it from the storage. + blobDigest, err = digest.Parse(expectedDigest) + if err != nil { + return nil, fmt.Errorf("parsing expected diffID %q for layer %q: %w", expectedDigest, layerID, err) + } + } else { + return nil, fmt.Errorf("TOC digest %q for layer %q is present but %q flag is not a string", layer.TOCDigest, layerID, expectedLayerDiffIDFlag) + } + } + s.getBlobMutex.Lock() + s.getBlobMutexProtected.digestToLayerID[blobDigest] = layer.ID + s.getBlobMutex.Unlock() blobInfo := types.BlobInfo{ - Digest: layer.UncompressedDigest, + Digest: blobDigest, Size: layer.UncompressedSize, MediaType: uncompressedLayerType, } @@ -370,7 +435,7 @@ func (s *storageImageSource) getSize() (int64, error) { if err != nil { return -1, err } - if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 { + if (layer.TOCDigest == "" && layer.UncompressedDigest == "") || layer.UncompressedSize < 0 { return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID) } sum += layer.UncompressedSize diff --git a/storage/storage_test.go b/storage/storage_test.go index 247f0d4469..b0a07e507a 100644 --- a/storage/storage_test.go +++ b/storage/storage_test.go @@ -15,14 +15,14 @@ import ( "io" "os" "path/filepath" - "reflect" + "runtime" "strings" - "sync" "testing" "time" imanifest "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache/memory" "github.com/containers/image/v5/types" "github.com/containers/storage" @@ -30,10 +30,10 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/reexec" - ddigest "github.com/opencontainers/go-digest" + "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "golang.org/x/exp/slices" ) var ( @@ -174,126 +174,186 @@ func TestParseWithGraphDriverOptions(t *testing.T) { for _, optionList := range optionLists { store := newStoreWithGraphDriverOptions(t, optionList) ref, err := Transport.ParseStoreReference(store, "test") - if err != nil { - t.Fatalf("ParseStoreReference(%q, graph driver options %v) returned error %v", "test", optionList, err) - } - if ref == nil { - t.Fatalf("ParseStoreReference returned nil reference") - } + require.NoError(t, err, optionList) + require.NotNil(t, ref) spec := ref.StringWithinTransport() ref2, err := Transport.ParseReference(spec) - if err != nil { - t.Fatalf("ParseReference(%q) returned error %v", "test", err) - } - if ref == nil { - t.Fatalf("ParseReference returned nil reference") - } + require.NoError(t, err) + require.NotNil(t, ref) sref, ok := ref2.(*storageReference) - if !ok { - t.Fatalf("ParseReference returned a reference from transport %s, not one of ours", ref2.Transport().Name()) - } + require.True(t, ok, "transport %s", ref2.Transport().Name()) parsedOptions := sref.transport.store.GraphOptions() - if !slices.Equal(parsedOptions, optionList) { - t.Fatalf("Mismatched options: %#v and %#v", optionList, parsedOptions) - } + assert.Equal(t, optionList, parsedOptions) } } -func systemContext() *types.SystemContext { - return &types.SystemContext{} -} - -func makeLayer(t *testing.T, compression archive.Compression) (ddigest.Digest, int64, int64, []byte) { - var cwriter io.WriteCloser +// makeLayerGoroutine writes to pwriter, and on success, updates uncompressedCount +// before it terminates. +func makeLayerGoroutine(pwriter io.Writer, uncompressedCount *int64, compression archive.Compression) error { var uncompressed *ioutils.WriteCounter - var twriter *tar.Writer - preader, pwriter := io.Pipe() - tbuffer := bytes.Buffer{} if compression != archive.Uncompressed { compressor, err := archive.CompressStream(pwriter, compression) if err != nil { - t.Fatalf("Error compressing layer: %v", err) + return fmt.Errorf("compressing layer: %w", err) } - cwriter = compressor - uncompressed = ioutils.NewWriteCounter(cwriter) + defer compressor.Close() + uncompressed = ioutils.NewWriteCounter(compressor) } else { uncompressed = ioutils.NewWriteCounter(pwriter) } - twriter = tar.NewWriter(uncompressed) + twriter := tar.NewWriter(uncompressed) + // defer twriter.Close() + // should be called here to correctly terminate the archive. + // We do not do that, to workaround https://github.com/containers/storage/issues/1729 : + // tar-split runs a goroutine that consumes/forwards tar content and might access + // concurrently-freed objects if it sees a valid EOF marker. + // Instead, really on raw EOF to terminate the goroutine. + // This depends on implementation details of tar.Writer (that it does not do any + // internal buffering). + buf := make([]byte, layerSize) n, err := rand.Read(buf) if err != nil { - t.Fatalf("Error reading tar data: %v", err) + return fmt.Errorf("reading tar data: %w", err) } if n != len(buf) { - t.Fatalf("Short read reading tar data: %d < %d", n, len(buf)) + return fmt.Errorf("short read reading tar data: %d < %d", n, len(buf)) } for i := 1024; i < 2048; i++ { buf[i] = 0 } - wg := sync.WaitGroup{} - errs := make(chan error) - wg.Add(1) - go func() { - defer pwriter.Close() - if cwriter != nil { - defer cwriter.Close() - } - defer twriter.Close() - err := twriter.WriteHeader(&tar.Header{ - Name: "/random-single-file", - Mode: 0600, - Size: int64(len(buf)), - ModTime: time.Now(), - AccessTime: time.Now(), - ChangeTime: time.Now(), - Typeflag: tar.TypeReg, - }) - if err != nil { - errs <- fmt.Errorf("Error writing tar header: %v", err) - } - n, err := twriter.Write(buf) - if err != nil { - errs <- fmt.Errorf("Error writing tar header: %v", err) - } - if n != len(buf) { - errs <- fmt.Errorf("Short write writing tar header: %d < %d", n, len(buf)) - } - err = twriter.Flush() - if err != nil { - errs <- fmt.Errorf("Error flushing output to tar archive: %v", err) - } - }() + if err := twriter.WriteHeader(&tar.Header{ + Name: "/random-single-file", + Mode: 0600, + Size: int64(len(buf)), + ModTime: time.Now(), + AccessTime: time.Now(), + ChangeTime: time.Now(), + Typeflag: tar.TypeReg, + }); err != nil { + return fmt.Errorf("Error writing tar header: %w", err) + } + n, err = twriter.Write(buf) + if err != nil { + return fmt.Errorf("Error writing tar header: %w", err) + } + if n != len(buf) { + return fmt.Errorf("Short write writing tar header: %d < %d", n, len(buf)) + } + if err := twriter.Flush(); err != nil { + return fmt.Errorf("Error flushing output to tar archive: %w", err) + } + *uncompressedCount = uncompressed.Count + return nil +} + +type testBlob struct { + compressedDigest digest.Digest + uncompressedSize int64 + compressedSize int64 + data []byte +} + +func makeLayer(t *testing.T, compression archive.Compression) testBlob { + preader, pwriter := io.Pipe() + var uncompressedCount int64 go func() { - wg.Wait() - close(errs) + err := errors.New("Internal error: unexpected panic in makeLayer") + defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily. + _ = pwriter.CloseWithError(err) + }() + err = makeLayerGoroutine(pwriter, &uncompressedCount, compression) }() - for err := range errs { - if err != nil { - t.Fatal(err) + + tbuffer := bytes.Buffer{} + _, err := io.Copy(&tbuffer, preader) + require.NoError(t, err) + return testBlob{ + compressedDigest: digest.SHA256.FromBytes(tbuffer.Bytes()), + uncompressedSize: uncompressedCount, + compressedSize: int64(tbuffer.Len()), + data: tbuffer.Bytes(), + } +} + +func (l testBlob) storeBlob(t *testing.T, dest types.ImageDestination, cache types.BlobInfoCache, mimeType string) manifest.Schema2Descriptor { + _, err := dest.PutBlob(context.Background(), bytes.NewReader(l.data), types.BlobInfo{ + Size: l.compressedSize, + Digest: l.compressedDigest, + }, cache, false) + require.NoError(t, err) + return manifest.Schema2Descriptor{ + MediaType: mimeType, + Size: l.compressedSize, + Digest: l.compressedDigest, + } +} + +// ensureTestCanCreateImages skips the current test if it is not possible to create layers and images in a private store. +func ensureTestCanCreateImages(t *testing.T) { + t.Helper() + switch runtime.GOOS { + case "darwin": + return // Due to https://github.com/containers/storage/pull/811 , c/storage can be used on macOS unprivileged. + case "linux": + if os.Geteuid() != 0 { + t.Skip("test requires root privileges on Linux") } + default: + // Unknown, let’s leave the tests enabled so that this can be investigated when working on that architecture. } +} - _, err = io.Copy(&tbuffer, preader) - if err != nil { - t.Fatalf("Error reading layer tar: %v", err) +func createUncommittedImageDest(t *testing.T, ref types.ImageReference, cache types.BlobInfoCache, + layers []testBlob, config *testBlob) (types.ImageDestination, types.UnparsedImage) { + dest, err := ref.NewImageDestination(context.Background(), nil) + require.NoError(t, err) + + layerDescriptors := []manifest.Schema2Descriptor{} + for _, layer := range layers { + desc := layer.storeBlob(t, dest, cache, manifest.DockerV2Schema2LayerMediaType) + layerDescriptors = append(layerDescriptors, desc) + } + configDescriptor := manifest.Schema2Descriptor{} // might be good enough + if config != nil { + configDescriptor = config.storeBlob(t, dest, cache, manifest.DockerV2Schema2ConfigMediaType) + } + + manifest := manifest.Schema2FromComponents(configDescriptor, layerDescriptors) + manifestBytes, err := manifest.Serialize() + require.NoError(t, err) + err = dest.PutManifest(context.Background(), manifestBytes, nil) + require.NoError(t, err) + unparsedToplevel := unparsedImage{ + imageReference: nil, + manifestBytes: manifestBytes, + manifestType: manifest.MediaType, + signatures: nil, } - sum := ddigest.SHA256.FromBytes(tbuffer.Bytes()) - return sum, uncompressed.Count, int64(tbuffer.Len()), tbuffer.Bytes() + return dest, &unparsedToplevel +} + +func createImage(t *testing.T, ref types.ImageReference, cache types.BlobInfoCache, + layers []testBlob, config *testBlob) { + dest, unparsedToplevel := createUncommittedImageDest(t, ref, cache, layers, config) + err := dest.Commit(context.Background(), unparsedToplevel) + require.NoError(t, err) + err = dest.Close() + require.NoError(t, err) } func TestWriteRead(t *testing.T) { - if os.Geteuid() != 0 { - t.Skip("TestWriteRead requires root privileges") - } + ensureTestCanCreateImages(t) - config := `{"config":{"labels":{}},"created":"2006-01-02T15:04:05Z"}` - sum := ddigest.SHA256.FromBytes([]byte(config)) - configInfo := types.BlobInfo{ - Digest: sum, - Size: int64(len(config)), + configBytes := []byte(`{"config":{"labels":{}},"created":"2006-01-02T15:04:05Z"}`) + config := testBlob{ + compressedDigest: digest.SHA256.FromBytes(configBytes), + uncompressedSize: int64(len(configBytes)), + compressedSize: int64(len(configBytes)), + data: configBytes, } + manifests := []string{ //`{ // "schemaVersion": 2, @@ -344,496 +404,176 @@ func TestWriteRead(t *testing.T) { ] }`, } + // Start signatures with 0xA0 to fool internal/signature.FromBlob into thinking it is valid GPG signatures := [][]byte{ - []byte("Signature A"), - []byte("Signature B"), + []byte("\xA0Signature A"), + []byte("\xA0Signature B"), } + newStore(t) - ref, err := Transport.ParseReference("test") - if err != nil { - t.Fatalf("ParseReference(%q) returned error %v", "test", err) - } - if ref == nil { - t.Fatalf("ParseReference returned nil reference") - } cache := memory.New() + ref, err := Transport.ParseReference("test") + require.NoError(t, err) + for _, manifestFmt := range manifests { - dest, err := ref.NewImageDestination(context.Background(), systemContext()) - if err != nil { - t.Fatalf("NewImageDestination(%q) returned error %v", ref.StringWithinTransport(), err) - } - if dest == nil { - t.Fatalf("NewImageDestination(%q) returned no destination", ref.StringWithinTransport()) - } - if dest.Reference().StringWithinTransport() != ref.StringWithinTransport() { - t.Fatalf("NewImageDestination(%q) changed the reference to %q", ref.StringWithinTransport(), dest.Reference().StringWithinTransport()) - } + dest, err := ref.NewImageDestination(context.Background(), nil) + require.NoError(t, err) + require.Equal(t, ref.StringWithinTransport(), dest.Reference().StringWithinTransport()) t.Logf("supported manifest MIME types: %v", dest.SupportedManifestMIMETypes()) - if err := dest.SupportsSignatures(context.Background()); err != nil { - t.Fatalf("Destination image doesn't support signatures: %v", err) - } + err = dest.SupportsSignatures(context.Background()) + require.NoError(t, err) t.Logf("compress layers: %v", dest.DesiredLayerCompression()) compression := archive.Uncompressed if dest.DesiredLayerCompression() == types.Compress { compression = archive.Gzip } - digest, decompressedSize, size, blob := makeLayer(t, compression) - if _, err := dest.PutBlob(context.Background(), bytes.NewReader(blob), types.BlobInfo{ - Size: size, - Digest: digest, - }, cache, false); err != nil { - t.Fatalf("Error saving randomly-generated layer to destination: %v", err) - } - t.Logf("Wrote randomly-generated layer %q (%d/%d bytes) to destination", digest, size, decompressedSize) - if _, err := dest.PutBlob(context.Background(), strings.NewReader(config), configInfo, cache, false); err != nil { - t.Fatalf("Error saving config to destination: %v", err) - } - manifest := strings.ReplaceAll(manifestFmt, "%lh", digest.String()) - manifest = strings.ReplaceAll(manifest, "%ch", configInfo.Digest.String()) - manifest = strings.ReplaceAll(manifest, "%ls", fmt.Sprintf("%d", size)) - manifest = strings.ReplaceAll(manifest, "%cs", fmt.Sprintf("%d", configInfo.Size)) - li := digest.Hex() - manifest = strings.ReplaceAll(manifest, "%li", li) - manifest = strings.ReplaceAll(manifest, "%ci", sum.Hex()) + layer := makeLayer(t, compression) + _ = layer.storeBlob(t, dest, cache, manifest.DockerV2Schema2LayerMediaType) + t.Logf("Wrote randomly-generated layer %q (%d/%d bytes) to destination", layer.compressedDigest, layer.compressedSize, layer.uncompressedSize) + _ = config.storeBlob(t, dest, cache, manifest.DockerV2Schema2ConfigMediaType) + + manifest := strings.ReplaceAll(manifestFmt, "%lh", layer.compressedDigest.String()) + manifest = strings.ReplaceAll(manifest, "%ch", config.compressedDigest.String()) + manifest = strings.ReplaceAll(manifest, "%ls", fmt.Sprintf("%d", layer.compressedSize)) + manifest = strings.ReplaceAll(manifest, "%cs", fmt.Sprintf("%d", config.compressedSize)) + manifest = strings.ReplaceAll(manifest, "%li", layer.compressedDigest.Hex()) + manifest = strings.ReplaceAll(manifest, "%ci", config.compressedDigest.Hex()) t.Logf("this manifest is %q", manifest) - if err := dest.PutManifest(context.Background(), []byte(manifest), nil); err != nil { - t.Fatalf("Error saving manifest to destination: %v", err) - } - if err := dest.PutSignatures(context.Background(), signatures, nil); err != nil { - t.Fatalf("Error saving signatures to destination: %v", err) - } + err = dest.PutManifest(context.Background(), []byte(manifest), nil) + require.NoError(t, err) + err = dest.PutSignatures(context.Background(), signatures, nil) + require.NoError(t, err) unparsedToplevel := unparsedImage{ imageReference: nil, manifestBytes: []byte(manifest), manifestType: imanifest.GuessMIMEType([]byte(manifest)), signatures: signatures, } - if err := dest.Commit(context.Background(), &unparsedToplevel); err != nil { - t.Fatalf("Error committing changes to destination: %v", err) - } - dest.Close() + err = dest.Commit(context.Background(), &unparsedToplevel) + require.NoError(t, err) + err = dest.Close() + require.NoError(t, err) - img, err := ref.NewImage(context.Background(), systemContext()) - if err != nil { - t.Fatalf("NewImage(%q) returned error %v", ref.StringWithinTransport(), err) - } + img, err := ref.NewImage(context.Background(), nil) + require.NoError(t, err) imageConfigInfo := img.ConfigInfo() if imageConfigInfo.Digest != "" { blob, err := img.ConfigBlob(context.Background()) - if err != nil { - t.Fatalf("image %q claimed there was a config blob, but couldn't produce it: %v", ref.StringWithinTransport(), err) - } - sum := ddigest.SHA256.FromBytes(blob) - if sum != configInfo.Digest { - t.Fatalf("image config blob digest for %q doesn't match", ref.StringWithinTransport()) - } - if int64(len(blob)) != configInfo.Size { - t.Fatalf("image config size for %q changed from %d to %d", ref.StringWithinTransport(), configInfo.Size, len(blob)) - } + require.NoError(t, err) + sum := digest.SHA256.FromBytes(blob) + assert.Equal(t, config.compressedDigest, sum) + assert.Len(t, blob, int(config.compressedSize)) } layerInfos := img.LayerInfos() - if layerInfos == nil { - t.Fatalf("image for %q returned empty layer list", ref.StringWithinTransport()) - } + assert.NotNil(t, layerInfos) imageInfo, err := img.Inspect(context.Background()) - if err != nil { - t.Fatalf("Inspect(%q) returned error %v", ref.StringWithinTransport(), err) - } - if imageInfo.Created.IsZero() { - t.Fatalf("Image %q claims to have been created at time 0", ref.StringWithinTransport()) - } + require.NoError(t, err) + assert.False(t, imageInfo.Created.IsZero()) - src, err := ref.NewImageSource(context.Background(), systemContext()) - if err != nil { - t.Fatalf("NewImageSource(%q) returned error %v", ref.StringWithinTransport(), err) - } - if src == nil { - t.Fatalf("NewImageSource(%q) returned no source", ref.StringWithinTransport()) - } - // Note that we would strip a digest here, but not a tag. + src, err := ref.NewImageSource(context.Background(), nil) + require.NoError(t, err) if src.Reference().StringWithinTransport() != ref.StringWithinTransport() { // As long as it's only the addition of an ID suffix, that's okay. - if !strings.HasPrefix(src.Reference().StringWithinTransport(), ref.StringWithinTransport()+"@") { - t.Fatalf("NewImageSource(%q) changed the reference to %q", ref.StringWithinTransport(), src.Reference().StringWithinTransport()) - } + assert.True(t, strings.HasPrefix(src.Reference().StringWithinTransport(), ref.StringWithinTransport()+"@")) } _, manifestType, err := src.GetManifest(context.Background(), nil) - if err != nil { - t.Fatalf("GetManifest(%q) returned error %v", ref.StringWithinTransport(), err) - } + require.NoError(t, err) t.Logf("this manifest's type appears to be %q", manifestType) - sum, err = imanifest.Digest([]byte(manifest)) - if err != nil { - t.Fatalf("manifest.Digest() returned error %v", err) - } - retrieved, _, err := src.GetManifest(context.Background(), &sum) - if err != nil { - t.Fatalf("GetManifest(%q) with an instanceDigest is supposed to succeed", ref.StringWithinTransport()) - } - if string(retrieved) != manifest { - t.Fatalf("GetManifest(%q) with an instanceDigest retrieved a different manifest", ref.StringWithinTransport()) - } + instanceDigest, err := imanifest.Digest([]byte(manifest)) + require.NoError(t, err) + retrieved, _, err := src.GetManifest(context.Background(), &instanceDigest) + require.NoError(t, err) + assert.Equal(t, manifest, string(retrieved)) sigs, err := src.GetSignatures(context.Background(), nil) - if err != nil { - t.Fatalf("GetSignatures(%q) returned error %v", ref.StringWithinTransport(), err) - } - if len(sigs) < len(signatures) { - t.Fatalf("Lost %d signatures", len(signatures)-len(sigs)) - } - if len(sigs) > len(signatures) { - t.Fatalf("Gained %d signatures", len(sigs)-len(signatures)) - } - for i := range sigs { - if !bytes.Equal(sigs[i], signatures[i]) { - t.Fatalf("Signature %d was corrupted", i) - } - } - sigs2, err := src.GetSignatures(context.Background(), &sum) - if err != nil { - t.Fatalf("GetSignatures(%q) with instance %s returned error %v", ref.StringWithinTransport(), sum.String(), err) - } - if !reflect.DeepEqual(sigs, sigs2) { - t.Fatalf("GetSignatures(%q) with instance %s returned a different result", ref.StringWithinTransport(), sum.String()) - } + require.NoError(t, err) + assert.Equal(t, signatures, sigs) + sigs2, err := src.GetSignatures(context.Background(), &instanceDigest) + require.NoError(t, err) + assert.Equal(t, sigs, sigs2) for _, layerInfo := range layerInfos { buf := bytes.Buffer{} layer, size, err := src.GetBlob(context.Background(), layerInfo, cache) - if err != nil { - t.Fatalf("Error reading layer %q from %q", layerInfo.Digest, ref.StringWithinTransport()) - } + require.NoError(t, err) t.Logf("Decompressing blob %q, blob size = %d, layerInfo.Size = %d bytes", layerInfo.Digest, size, layerInfo.Size) hasher := sha256.New() compressed := ioutils.NewWriteCounter(hasher) countedLayer := io.TeeReader(layer, compressed) decompressed, err := archive.DecompressStream(countedLayer) - if err != nil { - t.Fatalf("Error decompressing layer %q from %q", layerInfo.Digest, ref.StringWithinTransport()) - } + require.NoError(t, err) n, err := io.Copy(&buf, decompressed) require.NoError(t, err) layer.Close() - if layerInfo.Size >= 0 && compressed.Count != layerInfo.Size { - t.Fatalf("Blob size is different than expected: %d != %d, read %d", compressed.Count, layerInfo.Size, n) + if layerInfo.Size >= 0 { + assert.Equal(t, layerInfo.Size, compressed.Count) + assert.Equal(t, layerInfo.Size, n) } - if size >= 0 && compressed.Count != size { - t.Fatalf("Blob size mismatch: %d != %d, read %d", compressed.Count, size, n) + if size >= 0 { + assert.Equal(t, size, compressed.Count) } sum := hasher.Sum(nil) - if ddigest.NewDigestFromBytes(ddigest.SHA256, sum) != layerInfo.Digest { - t.Fatalf("Layer blob digest for %q doesn't match", ref.StringWithinTransport()) - } - } - src.Close() - img.Close() - err = ref.DeleteImage(context.Background(), systemContext()) - if err != nil { - t.Fatalf("DeleteImage(%q) returned error %v", ref.StringWithinTransport(), err) + assert.Equal(t, layerInfo.Digest, digest.NewDigestFromBytes(digest.SHA256, sum)) } + err = src.Close() + require.NoError(t, err) + err = img.Close() + require.NoError(t, err) + err = ref.DeleteImage(context.Background(), nil) + require.NoError(t, err) } } func TestDuplicateName(t *testing.T) { - if os.Geteuid() != 0 { - t.Skip("TestDuplicateName requires root privileges") - } + ensureTestCanCreateImages(t) newStore(t) cache := memory.New() ref, err := Transport.ParseReference("test") - if err != nil { - t.Fatalf("ParseReference(%q) returned error %v", "test", err) - } - if ref == nil { - t.Fatalf("ParseReference returned nil reference") - } - - dest, err := ref.NewImageDestination(context.Background(), systemContext()) - if err != nil { - t.Fatalf("NewImageDestination(%q, first pass) returned error %v", ref.StringWithinTransport(), err) - } - if dest == nil { - t.Fatalf("NewImageDestination(%q, first pass) returned no destination", ref.StringWithinTransport()) - } - digest, _, size, blob := makeLayer(t, archive.Uncompressed) - if _, err := dest.PutBlob(context.Background(), bytes.NewReader(blob), types.BlobInfo{ - Size: size, - Digest: digest, - }, cache, false); err != nil { - t.Fatalf("Error saving randomly-generated layer to destination, first pass: %v", err) - } - manifest := fmt.Sprintf(` - { - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "layers": [ - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "digest": "%s", - "size": %d - } - ] - } - `, digest, size) - if err := dest.PutManifest(context.Background(), []byte(manifest), nil); err != nil { - t.Fatalf("Error storing manifest to destination: %v", err) - } - unparsedToplevel := unparsedImage{ - imageReference: nil, - manifestBytes: []byte(manifest), - manifestType: imanifest.GuessMIMEType([]byte(manifest)), - signatures: nil, - } - if err := dest.Commit(context.Background(), &unparsedToplevel); err != nil { - t.Fatalf("Error committing changes to destination, first pass: %v", err) - } - dest.Close() + require.NoError(t, err) - dest, err = ref.NewImageDestination(context.Background(), systemContext()) - if err != nil { - t.Fatalf("NewImageDestination(%q, second pass) returned error %v", ref.StringWithinTransport(), err) - } - if dest == nil { - t.Fatalf("NewImageDestination(%q, second pass) returned no destination", ref.StringWithinTransport()) - } - digest, _, size, blob = makeLayer(t, archive.Gzip) - if _, err := dest.PutBlob(context.Background(), bytes.NewReader(blob), types.BlobInfo{ - Size: size, - Digest: digest, - }, cache, false); err != nil { - t.Fatalf("Error saving randomly-generated layer to destination, second pass: %v", err) - } - manifest = fmt.Sprintf(` - { - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "layers": [ - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "digest": "%s", - "size": %d - } - ] - } - `, digest, size) - if err := dest.PutManifest(context.Background(), []byte(manifest), nil); err != nil { - t.Fatalf("Error storing manifest to destination: %v", err) - } - unparsedToplevel = unparsedImage{ - imageReference: nil, - manifestBytes: []byte(manifest), - manifestType: imanifest.GuessMIMEType([]byte(manifest)), - signatures: nil, - } - if err := dest.Commit(context.Background(), &unparsedToplevel); err != nil { - t.Fatalf("Error committing changes to destination, second pass: %v", err) - } - dest.Close() + createImage(t, ref, cache, []testBlob{makeLayer(t, archive.Uncompressed)}, nil) + createImage(t, ref, cache, []testBlob{makeLayer(t, archive.Gzip)}, nil) } func TestDuplicateID(t *testing.T) { - if os.Geteuid() != 0 { - t.Skip("TestDuplicateID requires root privileges") - } + ensureTestCanCreateImages(t) newStore(t) cache := memory.New() ref, err := Transport.ParseReference("@aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") - if err != nil { - t.Fatalf("ParseReference(%q) returned error %v", "test", err) - } - if ref == nil { - t.Fatalf("ParseReference returned nil reference") - } + require.NoError(t, err) - dest, err := ref.NewImageDestination(context.Background(), systemContext()) - if err != nil { - t.Fatalf("NewImageDestination(%q, first pass) returned error %v", ref.StringWithinTransport(), err) - } - if dest == nil { - t.Fatalf("NewImageDestination(%q, first pass) returned no destination", ref.StringWithinTransport()) - } - digest, _, size, blob := makeLayer(t, archive.Gzip) - if _, err := dest.PutBlob(context.Background(), bytes.NewReader(blob), types.BlobInfo{ - Size: size, - Digest: digest, - }, cache, false); err != nil { - t.Fatalf("Error saving randomly-generated layer to destination, first pass: %v", err) - } - manifest := fmt.Sprintf(` - { - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "layers": [ - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "digest": "%s", - "size": %d - } - ] - } - `, digest, size) - if err := dest.PutManifest(context.Background(), []byte(manifest), nil); err != nil { - t.Fatalf("Error storing manifest to destination: %v", err) - } - unparsedToplevel := unparsedImage{ - imageReference: nil, - manifestBytes: []byte(manifest), - manifestType: imanifest.GuessMIMEType([]byte(manifest)), - signatures: nil, - } - if err := dest.Commit(context.Background(), &unparsedToplevel); err != nil { - t.Fatalf("Error committing changes to destination, first pass: %v", err) - } - dest.Close() + createImage(t, ref, cache, []testBlob{makeLayer(t, archive.Gzip)}, nil) - dest, err = ref.NewImageDestination(context.Background(), systemContext()) - if err != nil { - t.Fatalf("NewImageDestination(%q, second pass) returned error %v", ref.StringWithinTransport(), err) - } - if dest == nil { - t.Fatalf("NewImageDestination(%q, second pass) returned no destination", ref.StringWithinTransport()) - } - digest, _, size, blob = makeLayer(t, archive.Gzip) - if _, err := dest.PutBlob(context.Background(), bytes.NewReader(blob), types.BlobInfo{ - Size: size, - Digest: digest, - }, cache, false); err != nil { - t.Fatalf("Error saving randomly-generated layer to destination, second pass: %v", err) - } - manifest = fmt.Sprintf(` - { - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "layers": [ - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "digest": "%s", - "size": %d - } - ] - } - `, digest, size) - if err := dest.PutManifest(context.Background(), []byte(manifest), nil); err != nil { - t.Fatalf("Error storing manifest to destination: %v", err) - } - unparsedToplevel = unparsedImage{ - imageReference: nil, - manifestBytes: []byte(manifest), - manifestType: imanifest.GuessMIMEType([]byte(manifest)), - signatures: nil, - } - if err := dest.Commit(context.Background(), &unparsedToplevel); !errors.Is(err, storage.ErrDuplicateID) { - if err != nil { - t.Fatalf("Wrong error committing changes to destination, second pass: %v", err) - } - t.Fatal("Incorrectly succeeded committing changes to destination, second pass: no error") - } - dest.Close() + dest, unparsedToplevel := createUncommittedImageDest(t, ref, cache, + []testBlob{makeLayer(t, archive.Gzip)}, nil) + err = dest.Commit(context.Background(), unparsedToplevel) + require.Error(t, err) + assert.ErrorIs(t, err, storage.ErrDuplicateID) + err = dest.Close() + require.NoError(t, err) } func TestDuplicateNameID(t *testing.T) { - if os.Geteuid() != 0 { - t.Skip("TestDuplicateNameID requires root privileges") - } + ensureTestCanCreateImages(t) newStore(t) cache := memory.New() ref, err := Transport.ParseReference("test@aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") - if err != nil { - t.Fatalf("ParseReference(%q) returned error %v", "test", err) - } - if ref == nil { - t.Fatalf("ParseReference returned nil reference") - } + require.NoError(t, err) - dest, err := ref.NewImageDestination(context.Background(), systemContext()) - if err != nil { - t.Fatalf("NewImageDestination(%q, first pass) returned error %v", ref.StringWithinTransport(), err) - } - if dest == nil { - t.Fatalf("NewImageDestination(%q, first pass) returned no destination", ref.StringWithinTransport()) - } - digest, _, size, blob := makeLayer(t, archive.Gzip) - if _, err := dest.PutBlob(context.Background(), bytes.NewReader(blob), types.BlobInfo{ - Size: size, - Digest: digest, - }, cache, false); err != nil { - t.Fatalf("Error saving randomly-generated layer to destination, first pass: %v", err) - } - manifest := fmt.Sprintf(` - { - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "layers": [ - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "digest": "%s", - "size": %d - } - ] - } - `, digest, size) - if err := dest.PutManifest(context.Background(), []byte(manifest), nil); err != nil { - t.Fatalf("Error storing manifest to destination: %v", err) - } - unparsedToplevel := unparsedImage{ - imageReference: nil, - manifestBytes: []byte(manifest), - manifestType: imanifest.GuessMIMEType([]byte(manifest)), - signatures: nil, - } - if err := dest.Commit(context.Background(), &unparsedToplevel); err != nil { - t.Fatalf("Error committing changes to destination, first pass: %v", err) - } - dest.Close() + createImage(t, ref, cache, []testBlob{makeLayer(t, archive.Gzip)}, nil) - dest, err = ref.NewImageDestination(context.Background(), systemContext()) - if err != nil { - t.Fatalf("NewImageDestination(%q, second pass) returned error %v", ref.StringWithinTransport(), err) - } - if dest == nil { - t.Fatalf("NewImageDestination(%q, second pass) returned no destination", ref.StringWithinTransport()) - } - digest, _, size, blob = makeLayer(t, archive.Gzip) - if _, err := dest.PutBlob(context.Background(), bytes.NewReader(blob), types.BlobInfo{ - Size: size, - Digest: digest, - }, cache, false); err != nil { - t.Fatalf("Error saving randomly-generated layer to destination, second pass: %v", err) - } - manifest = fmt.Sprintf(` - { - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "layers": [ - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "digest": "%s", - "size": %d - } - ] - } - `, digest, size) - if err := dest.PutManifest(context.Background(), []byte(manifest), nil); err != nil { - t.Fatalf("Error storing manifest to destination: %v", err) - } - unparsedToplevel = unparsedImage{ - imageReference: nil, - manifestBytes: []byte(manifest), - manifestType: imanifest.GuessMIMEType([]byte(manifest)), - signatures: nil, - } - if err := dest.Commit(context.Background(), &unparsedToplevel); !errors.Is(err, storage.ErrDuplicateID) { - if err != nil { - t.Fatalf("Wrong error committing changes to destination, second pass: %v", err) - } - t.Fatal("Incorrectly succeeded committing changes to destination, second pass: no error") - } - dest.Close() + dest, unparsedToplevel := createUncommittedImageDest(t, ref, cache, + []testBlob{makeLayer(t, archive.Gzip)}, nil) + err = dest.Commit(context.Background(), unparsedToplevel) + require.Error(t, err) + assert.ErrorIs(t, err, storage.ErrDuplicateID) + err = dest.Close() + require.NoError(t, err) } func TestNamespaces(t *testing.T) { @@ -876,237 +616,81 @@ func TestNamespaces(t *testing.T) { } func TestSize(t *testing.T) { - if os.Geteuid() != 0 { - t.Skip("TestSize requires root privileges") - } - - config := `{"config":{"labels":{}},"created":"2006-01-02T15:04:05Z"}` - sum := ddigest.SHA256.FromBytes([]byte(config)) - configInfo := types.BlobInfo{ - Digest: sum, - Size: int64(len(config)), - } + ensureTestCanCreateImages(t) newStore(t) cache := memory.New() - ref, err := Transport.ParseReference("test") - if err != nil { - t.Fatalf("ParseReference(%q) returned error %v", "test", err) - } - if ref == nil { - t.Fatalf("ParseReference returned nil reference") + layer1 := makeLayer(t, archive.Gzip) + layer2 := makeLayer(t, archive.Gzip) + configBytes := []byte(`{"config":{"labels":{}},"created":"2006-01-02T15:04:05Z"}`) + config := testBlob{ + compressedDigest: digest.SHA256.FromBytes(configBytes), + uncompressedSize: int64(len(configBytes)), + compressedSize: int64(len(configBytes)), + data: configBytes, } - dest, err := ref.NewImageDestination(context.Background(), systemContext()) - if err != nil { - t.Fatalf("NewImageDestination(%q) returned error %v", ref.StringWithinTransport(), err) - } - if dest == nil { - t.Fatalf("NewImageDestination(%q) returned no destination", ref.StringWithinTransport()) - } - if _, err := dest.PutBlob(context.Background(), strings.NewReader(config), configInfo, cache, false); err != nil { - t.Fatalf("Error saving config to destination: %v", err) - } - digest1, usize1, size1, blob := makeLayer(t, archive.Gzip) - if _, err := dest.PutBlob(context.Background(), bytes.NewReader(blob), types.BlobInfo{ - Size: size1, - Digest: digest1, - }, cache, false); err != nil { - t.Fatalf("Error saving randomly-generated layer 1 to destination: %v", err) - } - digest2, usize2, size2, blob := makeLayer(t, archive.Gzip) - if _, err := dest.PutBlob(context.Background(), bytes.NewReader(blob), types.BlobInfo{ - Size: size2, - Digest: digest2, - }, cache, false); err != nil { - t.Fatalf("Error saving randomly-generated layer 2 to destination: %v", err) - } - manifest := fmt.Sprintf(` - { - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "config": { - "mediaType": "application/vnd.docker.container.image.v1+json", - "size": %d, - "digest": "%s" - }, - "layers": [ - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "digest": "%s", - "size": %d - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "digest": "%s", - "size": %d - } - ] - } - `, configInfo.Size, configInfo.Digest, digest1, size1, digest2, size2) - if err := dest.PutManifest(context.Background(), []byte(manifest), nil); err != nil { - t.Fatalf("Error storing manifest to destination: %v", err) - } - unparsedToplevel := unparsedImage{ - imageReference: nil, - manifestBytes: []byte(manifest), - manifestType: imanifest.GuessMIMEType([]byte(manifest)), - signatures: nil, - } - if err := dest.Commit(context.Background(), &unparsedToplevel); err != nil { - t.Fatalf("Error committing changes to destination: %v", err) - } - dest.Close() + ref, err := Transport.ParseReference("test") + require.NoError(t, err) + + createImage(t, ref, cache, []testBlob{layer1, layer2}, &config) + + img, err := ref.NewImage(context.Background(), nil) + require.NoError(t, err) + manifest, _, err := img.Manifest(context.Background()) + require.NoError(t, err) - img, err := ref.NewImage(context.Background(), systemContext()) - if err != nil { - t.Fatalf("NewImage(%q) returned error %v", ref.StringWithinTransport(), err) - } usize, err := img.Size() - if usize == -1 || err != nil { - t.Fatalf("Error calculating image size: %v", err) - } - if int(usize) != len(config)+int(usize1)+int(usize2)+2*len(manifest) { - t.Fatalf("Unexpected image size: %d != %d + %d + %d + %d (%d)", usize, len(config), usize1, usize2, len(manifest), len(config)+int(usize1)+int(usize2)+2*len(manifest)) - } - img.Close() + require.NoError(t, err) + require.NotEqual(t, -1, usize) + + assert.Equal(t, config.compressedSize+layer1.uncompressedSize+layer2.uncompressedSize+2*int64(len(manifest)), usize) + err = img.Close() + require.NoError(t, err) } func TestDuplicateBlob(t *testing.T) { - if os.Geteuid() != 0 { - t.Skip("TestDuplicateBlob requires root privileges") - } - - config := `{"config":{"labels":{}},"created":"2006-01-02T15:04:05Z"}` - sum := ddigest.SHA256.FromBytes([]byte(config)) - configInfo := types.BlobInfo{ - Digest: sum, - Size: int64(len(config)), - } + ensureTestCanCreateImages(t) newStore(t) cache := memory.New() ref, err := Transport.ParseReference("test") - if err != nil { - t.Fatalf("ParseReference(%q) returned error %v", "test", err) - } - if ref == nil { - t.Fatalf("ParseReference returned nil reference") - } + require.NoError(t, err) - dest, err := ref.NewImageDestination(context.Background(), systemContext()) - if err != nil { - t.Fatalf("NewImageDestination(%q) returned error %v", ref.StringWithinTransport(), err) - } - if dest == nil { - t.Fatalf("NewImageDestination(%q) returned no destination", ref.StringWithinTransport()) - } - digest1, _, size1, blob1 := makeLayer(t, archive.Gzip) - if _, err := dest.PutBlob(context.Background(), bytes.NewReader(blob1), types.BlobInfo{ - Size: size1, - Digest: digest1, - }, cache, false); err != nil { - t.Fatalf("Error saving randomly-generated layer 1 to destination (first copy): %v", err) - } - digest2, _, size2, blob2 := makeLayer(t, archive.Gzip) - if _, err := dest.PutBlob(context.Background(), bytes.NewReader(blob2), types.BlobInfo{ - Size: size2, - Digest: digest2, - }, cache, false); err != nil { - t.Fatalf("Error saving randomly-generated layer 2 to destination (first copy): %v", err) - } - if _, err := dest.PutBlob(context.Background(), bytes.NewReader(blob1), types.BlobInfo{ - Size: size1, - Digest: digest1, - }, cache, false); err != nil { - t.Fatalf("Error saving randomly-generated layer 1 to destination (second copy): %v", err) - } - if _, err := dest.PutBlob(context.Background(), bytes.NewReader(blob2), types.BlobInfo{ - Size: size2, - Digest: digest2, - }, cache, false); err != nil { - t.Fatalf("Error saving randomly-generated layer 2 to destination (second copy): %v", err) - } - manifest := fmt.Sprintf(` - { - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "config": { - "mediaType": "application/vnd.docker.container.image.v1+json", - "size": %d, - "digest": "%s" - }, - "layers": [ - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "digest": "%s", - "size": %d - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "digest": "%s", - "size": %d - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "digest": "%s", - "size": %d - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "digest": "%s", - "size": %d - } - ] - } - `, configInfo.Size, configInfo.Digest, digest1, size1, digest2, size2, digest1, size1, digest2, size2) - if err := dest.PutManifest(context.Background(), []byte(manifest), nil); err != nil { - t.Fatalf("Error storing manifest to destination: %v", err) + layer1 := makeLayer(t, archive.Gzip) + layer2 := makeLayer(t, archive.Gzip) + configBytes := []byte(`{"config":{"labels":{}},"created":"2006-01-02T15:04:05Z"}`) + config := testBlob{ + compressedDigest: digest.SHA256.FromBytes(configBytes), + uncompressedSize: int64(len(configBytes)), + compressedSize: int64(len(configBytes)), + data: configBytes, } - unparsedToplevel := unparsedImage{ - imageReference: nil, - manifestBytes: []byte(manifest), - manifestType: imanifest.GuessMIMEType([]byte(manifest)), - signatures: nil, - } - if err := dest.Commit(context.Background(), &unparsedToplevel); err != nil { - t.Fatalf("Error committing changes to destination: %v", err) - } - dest.Close() - img, err := ref.NewImage(context.Background(), systemContext()) - if err != nil { - t.Fatalf("NewImage(%q) returned error %v", ref.StringWithinTransport(), err) - } - src, err := ref.NewImageSource(context.Background(), systemContext()) - if err != nil { - t.Fatalf("NewImageSource(%q) returned error %v", ref.StringWithinTransport(), err) - } + createImage(t, ref, cache, []testBlob{layer1, layer2, layer1, layer2}, &config) + + img, err := ref.NewImage(context.Background(), nil) + require.NoError(t, err) + src, err := ref.NewImageSource(context.Background(), nil) + require.NoError(t, err) source, ok := src.(*storageImageSource) - if !ok { - t.Fatalf("ImageSource is not a storage image") - } + require.True(t, ok) + layers := []string{} layersInfo, err := img.LayerInfosForCopy(context.Background()) - if err != nil { - t.Fatalf("LayerInfosForCopy() returned error %v", err) - } + require.NoError(t, err) for _, layerInfo := range layersInfo { digestLayers, _ := source.imageRef.transport.store.LayersByUncompressedDigest(layerInfo.Digest) rc, _, layerID, err := source.getBlobAndLayerID(layerInfo.Digest, digestLayers) - if err != nil { - t.Fatalf("getBlobAndLayerID(%q) returned error %v", layerInfo.Digest, err) - } + require.NoError(t, err) _, err = io.Copy(io.Discard, rc) require.NoError(t, err) rc.Close() layers = append(layers, layerID) } - if len(layers) != 4 { - t.Fatalf("Incorrect number of layers: %d", len(layers)) - } + assert.Len(t, layers, 4) for i, layerID := range layers { for j, otherID := range layers { if i != j && layerID == otherID { @@ -1114,8 +698,10 @@ func TestDuplicateBlob(t *testing.T) { } } } - src.Close() - img.Close() + err = src.Close() + require.NoError(t, err) + err = img.Close() + require.NoError(t, err) } type unparsedImage struct { diff --git a/storage/storage_transport.go b/storage/storage_transport.go index 58ba3ee651..b981953ad4 100644 --- a/storage/storage_transport.go +++ b/storage/storage_transport.go @@ -48,9 +48,26 @@ type StoreTransport interface { GetStoreIfSet() storage.Store // GetImage retrieves the image from the transport's store that's named // by the reference. + // Deprecated: Surprisingly, with a StoreTransport reference which contains an ID, + // this ignores that ID; and repeated calls of GetStoreImage with the same named reference + // can return different images, with no way for the caller to "freeze" the storage.Image identity + // without discarding the name entirely. + // + // Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns + // c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown. GetImage(types.ImageReference) (*storage.Image, error) // GetStoreImage retrieves the image from a specified store that's named // by the reference. + // + // Deprecated: Surprisingly, with a StoreTransport reference which contains an ID, + // this ignores that ID; and repeated calls of GetStoreImage with the same named reference + // can return different images, with no way for the caller to "freeze" the storage.Image identity + // without discarding the name entirely. + // + // Also, a StoreTransport reference already contains a store, so providing another one is redundant. + // + // Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns + // c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown. GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error) // ParseStoreReference parses a reference, overriding any store // specification that it may contain. @@ -196,7 +213,7 @@ func (s *storageTransport) GetStore() (storage.Store, error) { // Return the transport's previously-set store. If we don't have one // of those, initialize one now. if s.store == nil { - options, err := storage.DefaultStoreOptionsAutoDetectUID() + options, err := storage.DefaultStoreOptions() if err != nil { return nil, err } @@ -290,6 +307,15 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc return s.ParseStoreReference(store, reference) } +// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID, +// this ignores that ID; and repeated calls of GetStoreImage with the same named reference +// can return different images, with no way for the caller to "freeze" the storage.Image identity +// without discarding the name entirely. +// +// Also, a StoreTransport reference already contains a store, so providing another one is redundant. +// +// Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns +// c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown. func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) { dref := ref.DockerReference() if dref != nil { @@ -306,6 +332,13 @@ func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageRefe return nil, storage.ErrImageUnknown } +// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID, +// this ignores that ID; and repeated calls of GetStoreImage with the same named reference +// can return different images, with no way for the caller to "freeze" the storage.Image identity +// without discarding the name entirely. +// +// Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns +// c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown. func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) { store, err := s.GetStore() if err != nil { diff --git a/types/types.go b/types/types.go index 33adb5f1df..180a98c5ba 100644 --- a/types/types.go +++ b/types/types.go @@ -445,7 +445,7 @@ type ImageCloser interface { Close() error } -// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest +// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedImage type ManifestUpdateOptions struct { LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored. EmbeddedDockerReference reference.Named @@ -457,7 +457,7 @@ type ManifestUpdateOptions struct { // ManifestUpdateInformation is a component of ManifestUpdateOptions, named here // only to make writing struct literals possible. type ManifestUpdateInformation struct { - Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go) + Destination ImageDestination // and yes, UpdatedImage may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go) LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers) LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order. } @@ -594,6 +594,10 @@ type SystemContext struct { // this field is ignored if `AuthFilePath` is set (we favor the newer format); // only reading of this data is supported; LegacyFormatAuthFilePath string + // If set, a path to a Docker-compatible "config.json" file containing credentials; and no other files are processed. + // This must not be set if AuthFilePath is set. + // Only credentials and credential helpers in this file apre processed, not any other configuration in this file. + DockerCompatAuthFilePath string // If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match. ArchitectureChoice string // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. diff --git a/version/version.go b/version/version.go index c270910b0f..0a057ddf0c 100644 --- a/version/version.go +++ b/version/version.go @@ -6,9 +6,9 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 27 + VersionMinor = 29 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 + VersionPatch = 1 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "-dev"