Skip to content

Commit

Permalink
Skip pulling layers reusable from additional layer store
Browse files Browse the repository at this point in the history
Signed-off-by: Kohei Tokunaga <[email protected]>
  • Loading branch information
ktock committed Mar 24, 2021
1 parent 224346a commit 3378aec
Show file tree
Hide file tree
Showing 3 changed files with 73 additions and 21 deletions.
10 changes: 6 additions & 4 deletions copy/copy.go
Original file line number Diff line number Diff line change
Expand Up @@ -906,7 +906,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
}

data := make([]copyLayerData, numLayers)
copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress) {
copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress, srcRef reference.Named) {
defer copySemaphore.Release(1)
defer copyGroup.Done()
cld := copyLayerData{}
Expand All @@ -921,7 +921,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
}
} else {
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool, index)
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool, index, srcRef)
}
data[index] = cld
}
Expand Down Expand Up @@ -958,7 +958,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
return errors.Wrapf(err, "Can't acquire semaphore")
}
copyGroup.Add(1)
go copyLayerHelper(i, srcLayer, encLayerBitmap[i], progressPool)
go copyLayerHelper(i, srcLayer, encLayerBitmap[i], progressPool, ic.c.rawSource.Reference().DockerReference())
}

// A call to copyGroup.Wait() is done at this point by the defer above.
Expand Down Expand Up @@ -1143,7 +1143,8 @@ type diffIDResult struct {

// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it,
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, layerIndex int) (types.BlobInfo, digest.Digest, error) {
// srcRef can be used as an additional hint to the destination during checking whehter a layer can be reused but srcRef can be nil.
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, layerIndex int, srcRef reference.Named) (types.BlobInfo, digest.Digest, error) {
// If the srcInfo doesn't contain compression information, try to compute it from the
// MediaType, which was either read from a manifest by way of LayerInfos() or constructed
// by LayerInfosForCopy(), if it was supplied at all. If we succeed in copying the blob,
Expand Down Expand Up @@ -1190,6 +1191,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
Cache: ic.c.blobInfoCache,
CanSubstitute: ic.canSubstituteBlobs,
LayerIndex: &layerIndex,
SrcRef: srcRef,
}
reused, blobInfo, err = dest.TryReusingBlobWithOptions(ctx, srcInfo, options)
} else {
Expand Down
3 changes: 3 additions & 0 deletions internal/types/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"
"io"

"github.com/containers/image/v5/docker/reference"
publicTypes "github.com/containers/image/v5/types"
)

Expand Down Expand Up @@ -50,4 +51,6 @@ type TryReusingBlobOptions struct {
CanSubstitute bool
// The corresponding index in the layer slice.
LayerIndex *int
// The reference of the image that contains the target blob.
SrcRef reference.Named
}
81 changes: 64 additions & 17 deletions storage/storage_image.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,11 +76,12 @@ type storageImageDestination struct {
indexToStorageID map[int]*string
// All accesses to below data are protected by `lock` which is made
// *explicit* in the code.
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
indexToPulledBlob map[int]*types.BlobInfo // Mapping from layer (by index) to pulled down blob
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
indexToPulledBlob map[int]*types.BlobInfo // Mapping from layer (by index) to pulled down blob
blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
}

type storageImageCloser struct {
Expand Down Expand Up @@ -391,16 +392,17 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*
return nil, errors.Wrapf(err, "error creating a temporary directory")
}
image := &storageImageDestination{
imageRef: imageRef,
directory: directory,
signatureses: make(map[digest.Digest][]byte),
blobDiffIDs: make(map[digest.Digest]digest.Digest),
fileSizes: make(map[digest.Digest]int64),
filenames: make(map[digest.Digest]string),
SignatureSizes: []int{},
SignaturesSizes: make(map[digest.Digest][]int),
indexToStorageID: make(map[int]*string),
indexToPulledBlob: make(map[int]*types.BlobInfo),
imageRef: imageRef,
directory: directory,
signatureses: make(map[digest.Digest][]byte),
blobDiffIDs: make(map[digest.Digest]digest.Digest),
blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
fileSizes: make(map[digest.Digest]int64),
filenames: make(map[digest.Digest]string),
SignatureSizes: []int{},
SignaturesSizes: make(map[digest.Digest][]int),
indexToStorageID: make(map[int]*string),
indexToPulledBlob: make(map[int]*types.BlobInfo),
}
return image, nil
}
Expand All @@ -411,8 +413,11 @@ func (s *storageImageDestination) Reference() types.ImageReference {
return s.imageRef
}

// Close cleans up the temporary directory.
// Close cleans up the temporary directory and additional layer store handlers.
func (s *storageImageDestination) Close() error {
for _, al := range s.blobAdditionalLayer {
al.Release()
}
return os.RemoveAll(s.directory)
}

Expand Down Expand Up @@ -532,14 +537,37 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
// used the together. Mixing the two with the non "WithOptions" functions
// is not supported.
func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options internalTypes.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
reused, info, err := s.TryReusingBlob(ctx, blobinfo, options.Cache, options.CanSubstitute)
reused, info, err := s.tryReusingBlobWithSrcRef(ctx, blobinfo, options.Cache, options.CanSubstitute, options.SrcRef)
if err != nil || !reused || options.LayerIndex == nil {
return reused, info, err
}

return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex)
}

func (s *storageImageDestination) tryReusingBlobWithSrcRef(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool, ref reference.Named) (bool, types.BlobInfo, error) {
// lock the entire method as it executes fairly quickly
s.lock.Lock()
defer s.lock.Unlock()

// Check if we have the layer in the underlying additional layer store.
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, ref.String())
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for compressed layers with digest %q and labels`, blobinfo.Digest)
} else if err == nil {
// Record the uncompressed value so that we can use it to calculate layer IDs.
s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest()
s.blobAdditionalLayer[blobinfo.Digest] = aLayer
return true, types.BlobInfo{
Digest: blobinfo.Digest,
Size: aLayer.CompressedSize(),
MediaType: blobinfo.MediaType,
}, nil
}

return s.tryReusingBlobLocked(ctx, blobinfo, cache, canSubstitute)
}

// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
Expand All @@ -553,6 +581,11 @@ func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo t
// lock the entire method as it executes fairly quickly
s.lock.Lock()
defer s.lock.Unlock()

return s.tryReusingBlobLocked(ctx, blobinfo, cache, canSubstitute)
}

func (s *storageImageDestination) tryReusingBlobLocked(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if blobinfo.Digest == "" {
return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`)
}
Expand Down Expand Up @@ -804,6 +837,20 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
s.indexToStorageID[index] = &lastLayer
return nil
}

s.lock.Lock()
al, ok := s.blobAdditionalLayer[blob.Digest]
s.lock.Unlock()
if ok {
layer, err := al.PutAs(id, lastLayer, nil)
if err != nil {
return errors.Wrapf(err, "failed to put layer from digest and labels")
}
lastLayer = layer.ID
s.indexToStorageID[index] = &lastLayer
return nil
}

// Check if we previously cached a file with that blob's contents. If we didn't,
// then we need to read the desired contents from a layer.
s.lock.Lock()
Expand Down

0 comments on commit 3378aec

Please sign in to comment.