Skip to content

Commit

Permalink
Skip pulling layers which can be provided from the backing store
Browse files Browse the repository at this point in the history
Signed-off-by: Kohei Tokunaga <[email protected]>
  • Loading branch information
ktock committed Jun 17, 2020
1 parent 4158eb2 commit 2d16660
Show file tree
Hide file tree
Showing 2 changed files with 97 additions and 11 deletions.
45 changes: 45 additions & 0 deletions copy/copy.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package copy
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
Expand Down Expand Up @@ -148,6 +149,24 @@ const (
// only accept one image (i.e., it cannot accept lists), an error
// should be returned.
CopySpecificImages

// Labels which indicate information about the targetting images and layers.
// This can be included in the BlobInfo as annotations and can be passed into
// the destination. These labels will help the destination to discover the
// targetting contents on that side and avoid duplication of downloads.
//
// layerTargetDiffID is a label contains the digest of the uncompressed contents
// of the targetting layer.
layerTargetDiffID = "containers/image/target.diffID"
// layerTargetDigest is a label contains the digest of the (possibly compressed)
// targetting layer.
layerTargetDigest = "containers/image/target.layerdigest"
// layerTargetReference is a label contains the reference string of the
// targetting image which contains the targetting layer.
layerTargetReference = "containers/image/target.reference"
// layerTargetImageLayers is a label contains the comma-separated cryptographic
// signatures of layers which are contained in the targetting image.
layerTargetImageLayers = "containers/image/target.layers"
)

// ImageListSelection is one of CopySystemImage, CopyAllImages, or
Expand Down Expand Up @@ -847,6 +866,17 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
}
}

// Get DiffIDs from config if possible
var chain []digest.Digest
if cfgInfo := ic.src.ConfigInfo(); cfgInfo.Digest != "" {
if configBlob, err := ic.src.ConfigBlob(ctx); err == nil {
imageConfig := &manifest.Schema2Image{}
if err := json.Unmarshal(configBlob, imageConfig); err == nil && len(srcInfos) == len(imageConfig.RootFS.DiffIDs) {
chain = imageConfig.RootFS.DiffIDs
}
}
}

if err := func() error { // A scope for defer
progressPool, progressCleanup := ic.c.newProgressPool(ctx)
defer func() {
Expand All @@ -855,11 +885,26 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
progressCleanup()
}()

var layerDigests string
for _, srcLayer := range srcInfos {
layerDigests += fmt.Sprintf("%s,", srcLayer.Digest.String())
}
for i, srcLayer := range srcInfos {
err = copySemaphore.Acquire(ctx, 1)
if err != nil {
return errors.Wrapf(err, "Can't acquire semaphore")
}
// Add labels containing information of the targetting image and layers.
// This will help the destination to discover the targetting layer on that side.
if srcLayer.Annotations == nil {
srcLayer.Annotations = make(map[string]string)
}
if len(chain) > i {
srcLayer.Annotations[layerTargetDiffID] = chain[i].String()
srcLayer.Annotations[layerTargetDigest] = srcLayer.Digest.String()
srcLayer.Annotations[layerTargetReference] = ic.c.rawSource.Reference().DockerReference().String()
srcLayer.Annotations[layerTargetImageLayers] = layerDigests
}
copyGroup.Add(1)
go copyLayerHelper(i, srcLayer, encLayerBitmap[i], progressPool)
}
Expand Down
63 changes: 52 additions & 11 deletions storage/storage_image.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
Expand Down Expand Up @@ -54,17 +55,18 @@ type storageImageSource struct {

type storageImageDestination struct {
imageRef storageReference
directory string // Temporary directory where we store blobs until Commit() time
nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
manifest []byte // Manifest contents, temporary
signatures []byte // Signature contents, temporary
signatureses map[digest.Digest][]byte // Instance signature contents, temporary
putBlobMutex sync.Mutex // Mutex to sync state for parallel PutBlob executions
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice
directory string // Temporary directory where we store blobs until Commit() time
nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
manifest []byte // Manifest contents, temporary
signatures []byte // Signature contents, temporary
signatureses map[digest.Digest][]byte // Instance signature contents, temporary
putBlobMutex sync.Mutex // Mutex to sync state for parallel PutBlob executions
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
blobInfos map[digest.Digest]types.BlobInfo // Mapping from layer blobsums to their corresponding information
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice
}

type storageImageCloser struct {
Expand Down Expand Up @@ -352,6 +354,7 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*
directory: directory,
signatureses: make(map[digest.Digest][]byte),
blobDiffIDs: make(map[digest.Digest]digest.Digest),
blobInfos: make(map[digest.Digest]types.BlobInfo),
fileSizes: make(map[digest.Digest]int64),
filenames: make(map[digest.Digest]string),
SignatureSizes: []int{},
Expand Down Expand Up @@ -486,6 +489,25 @@ func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo t
}, nil
}

// Check if we can get the blob from the underlying store. Here we use CreateLayer
// API for asking if the store has the targetting layer blob or not. The store can
// discover the targetting contents of the blob using the image and layers information
// passed through labels.
// The store can return storage.ErrTargetLayerAlreadyExists error for indicating the
// existence of the targetting contents on their side. We can get the layer information
// with normal APIs (LayersByUncompressedDigest, LayersByCompressedDigest, etc.)
if blobinfo.Annotations != nil {
tmpID := stringid.GenerateRandomID()
_, err := s.imageRef.transport.store.CreateLayer(tmpID, "", nil, "", false, &storage.LayerOptions{Labels: blobinfo.Annotations})
if err != nil && errors.Cause(err) == storage.ErrTargetLayerAlreadyExists {
s.blobInfos[blobinfo.Digest] = blobinfo // will be used during commit
} else if err == nil { // needs cleanup
if err := s.imageRef.transport.store.Delete(tmpID); err != nil {
return false, types.BlobInfo{}, err
}
}
}

// Check if we have a wasn't-compressed layer in storage that's based on that blob.
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest)
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
Expand Down Expand Up @@ -682,6 +704,25 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
lastLayer = layer.ID
continue
}

// If we have blobinfo, the underlying store already has the layer blob corresponding to
// this information, which we want to apply on top of the `lastLayer`. Here we expect
// the underlying store also has the chained view of this layer (on top of the
// `lastLayer`). The store can return storage.ErrTargetLayerAlreadyExists for indicating
// the existence of the chain.
if blobinfo, ok := s.blobInfos[blob.Digest]; ok {
if _, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{Labels: blobinfo.Annotations}); err != nil && errors.Cause(err) == storage.ErrTargetLayerAlreadyExists {
if layer, err := s.imageRef.transport.store.Layer(id); layer != nil && err == nil {
lastLayer = layer.ID
continue
}
} else if err == nil { // needs cleanup
if err := s.imageRef.transport.store.Delete(id); err != nil {
return err
}
}
}

// Check if we previously cached a file with that blob's contents. If we didn't,
// then we need to read the desired contents from a layer.
filename, ok := s.filenames[blob.Digest]
Expand Down

0 comments on commit 2d16660

Please sign in to comment.