diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index 4d5b076891..5c534ad892 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -111,6 +111,7 @@ type copier struct { compressionLevel *int ociDecryptConfig *encconfig.DecryptConfig ociEncryptConfig *encconfig.EncryptConfig + fetchPartialBlobs bool } // imageCopier tracks state specific to a single image (possibly an item of a manifest list) @@ -182,6 +183,8 @@ type Options struct { // OciDecryptConfig contains the config that can be used to decrypt an image if it is // encrypted if non-nil. If nil, it does not attempt to decrypt an image. OciDecryptConfig *encconfig.DecryptConfig + + FetchPartialBlobs bool // attempt to fetch the blob partially. Experimental. } // validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value @@ -257,9 +260,10 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually // we might want to add a separate CommonCtx — or would that be too confusing? - blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx), - ociDecryptConfig: options.OciDecryptConfig, - ociEncryptConfig: options.OciEncryptConfig, + blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx), + ociDecryptConfig: options.OciDecryptConfig, + ociEncryptConfig: options.OciEncryptConfig, + fetchPartialBlobs: options.FetchPartialBlobs, } // Default to using gzip compression unless specified otherwise. if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil { @@ -959,9 +963,35 @@ func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) { } } +// customPartialBlobCounter provides a decorator function for the partial blobs retrieval progress bar +func customPartialBlobCounter(filler interface{}, wcc ...decor.WC) decor.Decorator { + producer := func(filler interface{}) decor.DecorFunc { + return func(s decor.Statistics) string { + // if the Refill value is set, reverse the bar + // so that it looks like ====+++++. + if s.Refill > 0 { + type revSetter interface { + SetReverse(bool) + } + if t, ok := filler.(revSetter); ok { + t.SetReverse(true) + } + } + if s.Total == 0 { + pairFmt := "%.1f / %.1f (skipped: %.1f)" + return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill)) + } + pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)" + percentage := 100.0 * float64(s.Refill) / float64(s.Total) + return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage) + } + } + return decor.Any(producer(filler), wcc...) +} + // createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter // is ioutil.Discard, the progress bar's output will be discarded -func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string, onComplete string) *mpb.Bar { +func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *mpb.Bar { // shortDigestLen is the length of the digest used for blobs. const shortDigestLen = 12 @@ -979,15 +1009,28 @@ func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind // Otherwise, use a spinner to indicate that something's happening. var bar *mpb.Bar if info.Size > 0 { - bar = pool.AddBar(info.Size, - mpb.BarFillerClearOnComplete(), - mpb.PrependDecorators( - decor.OnComplete(decor.Name(prefix), onComplete), - ), - mpb.AppendDecorators( - decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""), - ), - ) + if partial { + filler := mpb.NewBarFiller(mpb.DefaultBarStyle, false) + bar = pool.Add(info.Size, + filler, + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + customPartialBlobCounter(filler), + ), + ) + } else { + bar = pool.AddBar(info.Size, + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""), + ), + ) + } } else { bar = pool.AddSpinner(info.Size, mpb.SpinnerOnLeft, @@ -1016,7 +1059,7 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error { destInfo, err := func() (types.BlobInfo, error) { // A scope for defer progressPool, progressCleanup := c.newProgressPool(ctx) defer progressCleanup() - bar := c.createProgressBar(progressPool, srcInfo, "config", "done") + bar := c.createProgressBar(progressPool, false, srcInfo, "config", "done") destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar) if err != nil { return types.BlobInfo{}, err @@ -1041,6 +1084,23 @@ type diffIDResult struct { err error } +type imageSourceSeekableProxy struct { + source types.ImageSourceSeekable + progress chan int64 +} + +func (s imageSourceSeekableProxy) GetBlobAt(ctx context.Context, bInfo types.BlobInfo, chunks []types.ImageSourceChunk) (io.ReadCloser, string, error) { + rc, contentType, err := s.source.GetBlobAt(ctx, bInfo, chunks) + if err == nil { + total := int64(0) + for _, c := range chunks { + total += int64(c.Length) + } + s.progress <- total + } + return rc, contentType, err +} + // copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps compressing it if canCompress, // and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) { @@ -1048,15 +1108,21 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // Diffs are needed if we are encrypting an image or trying to decrypt an image diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" || toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil) + srcAlgo := "" + if srcInfo.CompressionAlgorithm != nil { + srcAlgo = srcInfo.CompressionAlgorithm.Name() + } // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source. - if !diffIDIsNeeded { + mustRecompress := srcInfo.CompressionOperation != types.PreserveOriginal || srcAlgo != ic.c.compressionFormat.Name() + if !diffIDIsNeeded && !mustRecompress { reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs) if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest) } + if reused { logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) - bar := ic.c.createProgressBar(pool, srcInfo, "blob", "skipped: already exists") + bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "skipped: already exists") bar.SetTotal(0, true) // Throw an event that the layer has been skipped @@ -1070,6 +1136,44 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to } } + imgSource, okSource := ic.c.rawSource.(types.ImageSourceSeekable) + imgDest, okDest := ic.c.dest.(types.ImageDestinationPartial) + if ic.c.fetchPartialBlobs && okSource && okDest && !diffIDIsNeeded { + bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done") + + progress := make(chan int64) + terminate := make(chan interface{}) + + defer close(terminate) + defer close(progress) + + proxy := imageSourceSeekableProxy{ + source: imgSource, + progress: progress, + } + go func() { + for { + select { + case written := <-progress: + bar.IncrInt64(written) + case <-terminate: + return + } + } + + }() + + bar.SetTotal(srcInfo.Size, false) + info, err := imgDest.PutBlobPartial(ctx, proxy, srcInfo, ic.c.blobInfoCache) + if err == nil { + bar.SetRefill(srcInfo.Size - bar.Current()) + bar.SetTotal(srcInfo.Size, true) + logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest) + return info, cachedDiffID, nil + } + logrus.Errorf("Failed to retrieve partial blob: %v", err) + } + // Fallback: copy the layer, computing the diffID if we need to do so srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) if err != nil { @@ -1077,7 +1181,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to } defer srcStream.Close() - bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done") + bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done") blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar) if err != nil { @@ -1426,7 +1530,9 @@ func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, compressi if err != nil { return } - defer compressor.Close() + defer func() { + err = compressor.Close() + }() buf := make([]byte, compressionBufferSize) diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index 70ca7661e2..18d7aa1b4c 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -270,6 +270,41 @@ func (s *dockerImageSource) HasThreadSafeGetBlob() bool { return true } +// GetBlobAt returns a stream for the specified blob. +func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []types.ImageSourceChunk) (io.ReadCloser, string, error) { + headers := make(map[string][]string) + + var rangeVals []string + for _, c := range chunks { + rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1)) + } + + headers["Range"] = []string{fmt.Sprintf("bytes=%s", strings.Join(rangeVals, ","))} + + if len(info.URLs) != 0 { + return nil, "", fmt.Errorf("external URLs not supported with GetBlobAt") + } + + path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String()) + logrus.Debugf("Downloading %s", path) + res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth, nil) + if err != nil { + return nil, "", err + } + if err := httpResponseToError(res, "Error fetching partial blob"); err != nil { + if res.Body != nil { + res.Body.Close() + } + return nil, "", err + } + if res.StatusCode != http.StatusPartialContent { + res.Body.Close() + return nil, "", errors.Errorf("Invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) + } + + return res.Body, res.Header.Get("Content-Type"), nil +} + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. diff --git a/vendor/github.com/containers/image/v5/docker/errors.go b/vendor/github.com/containers/image/v5/docker/errors.go index 5b5008af77..6f8d9b7ab1 100644 --- a/vendor/github.com/containers/image/v5/docker/errors.go +++ b/vendor/github.com/containers/image/v5/docker/errors.go @@ -17,6 +17,14 @@ var ( ErrTooManyRequests = errors.New("too many requests to registry") ) +// ErrBadRequest is returned when the status code returned is 400 +type ErrBadRequest struct { +} + +func (e ErrBadRequest) Error() string { + return fmt.Sprintf("http bad request") +} + // ErrUnauthorizedForCredentials is returned when the status code returned is 401 type ErrUnauthorizedForCredentials struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. Err error @@ -32,11 +40,15 @@ func httpResponseToError(res *http.Response, context string) error { switch res.StatusCode { case http.StatusOK: return nil + case http.StatusPartialContent: + return nil case http.StatusTooManyRequests: return ErrTooManyRequests case http.StatusUnauthorized: err := client.HandleErrorResponse(res) return ErrUnauthorizedForCredentials{Err: err} + case http.StatusBadRequest: + return ErrBadRequest{} default: if context != "" { context = context + ": " diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go index fa2b39e0ea..f6838163b1 100644 --- a/vendor/github.com/containers/image/v5/manifest/common.go +++ b/vendor/github.com/containers/image/v5/manifest/common.go @@ -2,6 +2,7 @@ package manifest import ( "fmt" + "strings" "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/types" @@ -63,7 +64,7 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType if mt == mimeType { // Found the variant name := mtsUncompressed if algorithm != nil { - name = algorithm.Name() + name = strings.Split(algorithm.Name(), ":")[0] } if res, ok := variants[name]; ok { if res != mtsUnsupportedMIMEType { diff --git a/vendor/github.com/containers/image/v5/pkg/compression/chunked_zstd.go b/vendor/github.com/containers/image/v5/pkg/compression/chunked_zstd.go new file mode 100644 index 0000000000..e8be034f97 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/chunked_zstd.go @@ -0,0 +1,392 @@ +package compression + +import ( + "bytes" + "crypto/sha256" + "encoding/base64" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "time" + + "github.com/klauspost/compress/zstd" + "github.com/vbatts/tar-split/archive/tar" +) + +type chunkedZstdWriter struct { + tarSplitOut *io.PipeWriter + tarSplitErr chan error +} + +func (w chunkedZstdWriter) Close() error { + err := <-w.tarSplitErr + if err != nil { + w.tarSplitOut.Close() + return err + } + return w.tarSplitOut.Close() +} + +func (w chunkedZstdWriter) Write(p []byte) (int, error) { + select { + case err := <-w.tarSplitErr: + w.tarSplitOut.Close() + return 0, err + default: + return w.tarSplitOut.Write(p) + } +} + +type FileMetadata struct { + Type string `json:"type"` + Name string `json:"name"` + Linkname string `json:"linkName,omitempty"` + Mode int64 `json:"mode,omitempty"` + Size int64 `json:"size"` + Uid int `json:"uid"` + Gid int `json:"gid"` + ModTime time.Time `json:"modTime"` + AccessTime time.Time `json:"accessTime"` + ChangeTime time.Time `json:"changeTime"` + Devmajor int64 `json:"devMajor"` + Devminor int64 `json:"devMinor"` + Xattrs map[string]string `json:"xattrs,omitempty"` + Checksum string `json:"checksum,omitempty"` + StartOffset int64 `json:"startOffset,omitempty"` + EndOffset int64 `json:"endOffset,omitempty"` +} + +func getType(t byte) (string, error) { + switch t { + case tar.TypeReg, tar.TypeRegA: + return "REG", nil + case tar.TypeLink: + return "LINK", nil + case tar.TypeChar: + return "CHAR", nil + case tar.TypeBlock: + return "BLOCK", nil + case tar.TypeDir: + return "DIR", nil + case tar.TypeFifo: + return "FIFO", nil + case tar.TypeSymlink: + return "SYMLINK", nil + } + return "", fmt.Errorf("unknown tarball type: %v", t) +} + +func TypeToTarType(t string) (byte, error) { + switch t { + case "REG": + return tar.TypeReg, nil + case "LINK": + return tar.TypeLink, nil + case "CHAR": + return tar.TypeChar, nil + case "BLOCK": + return tar.TypeBlock, nil + case "DIR": + return tar.TypeDir, nil + case "FIFO": + return tar.TypeFifo, nil + case "SYMLINK": + return tar.TypeSymlink, nil + } + return 0, fmt.Errorf("unknown type: %v", t) +} + +// sizeCounter is an io.Writer which only counts the total size of its input. +type sizeCounter struct{ size int64 } + +func (c *sizeCounter) Write(p []byte) (int, error) { + c.size += int64(len(p)) + return len(p), nil +} + +var ( + // when the zstd decoder encounters a skippable frame + 1 byte for the size, it + // will ignore it. + // https://tools.ietf.org/html/rfc8478#section-3.1.2 + skippableFrameMagic = []byte{0x50, 0x2a, 0x4d, 0x18} +) + +func isZstdSkippableFrameMagic(data []byte) bool { + if len(data) < 4 { + return false + } + return bytes.Equal(skippableFrameMagic, data[:4]) +} + +func ReadChunkedZstdManifest(blobStream io.ReaderAt, blobSize int64) ([]byte, error) { + footerSize := int64(32) + if blobSize <= footerSize { + return nil, fmt.Errorf("blob too small") + } + + footer := make([]byte, footerSize) + _, err := blobStream.ReadAt(footer, blobSize-footerSize) + if err != nil && err != io.EOF { + return nil, err + } + + frameLen := binary.LittleEndian.Uint32(footer[4:8]) + offset := binary.LittleEndian.Uint64(footer[8:16]) + length := binary.LittleEndian.Uint64(footer[16:24]) + lengthUncompressed := binary.LittleEndian.Uint64(footer[24:32]) + + if !isZstdSkippableFrameMagic(footer) || frameLen != 24 { + return nil, fmt.Errorf("unknown chunked zstd footer") + } + + // set a reasonable limit + if length > (1<<20)*50 { + return nil, fmt.Errorf("manifest too big") + } + if lengthUncompressed > (1<<20)*50 { + return nil, fmt.Errorf("manifest too big") + } + + manifest := make([]byte, length) + _, err = blobStream.ReadAt(manifest, int64(offset)) + if err != nil && err != io.EOF { + return nil, err + } + + decoder, err := zstd.NewReader(nil) + if err != nil { + return nil, err + } + defer decoder.Close() + + b := make([]byte, 0, lengthUncompressed) + if decoded, err := decoder.DecodeAll(manifest, b); err == nil { + return decoded, nil + } + + return manifest, nil +} + +func appendZstdSkippableFrame(dest io.Writer, data []byte) error { + if _, err := dest.Write(skippableFrameMagic); err != nil { + return err + } + + var size []byte = make([]byte, 4) + binary.LittleEndian.PutUint32(size, uint32(len(data))) + if _, err := dest.Write(size); err != nil { + return err + } + if _, err := dest.Write(data); err != nil { + return err + } + + return nil +} + +func writeZstdChunkedManifest(dest io.Writer, offset *sizeCounter, metadata []FileMetadata, level int) error { + // 8 is the size of the zstd skippable frame header + the frame size + manifestOffset := uint64(offset.size) + 8 + + // Generate the manifest + manifest, err := json.Marshal(metadata) + if err != nil { + return err + } + + var compressedBuffer bytes.Buffer + zstdWriter, err := zstdWriterWithLevel(&compressedBuffer, level) + if err != nil { + return err + } + if _, err := zstdWriter.Write(manifest); err != nil { + zstdWriter.Close() + return err + } + if err := zstdWriter.Close(); err != nil { + return err + } + + compressedManifest := compressedBuffer.Bytes() + + if err := appendZstdSkippableFrame(dest, compressedManifest); err != nil { + return err + } + + // Store the offset to the manifest and its size in LE order + var manifestDataLE []byte = make([]byte, 24) + binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset) + binary.LittleEndian.PutUint64(manifestDataLE[8:], uint64(len(compressedManifest))) + binary.LittleEndian.PutUint64(manifestDataLE[16:], uint64(len(manifest))) + + return appendZstdSkippableFrame(dest, manifestDataLE) +} + +func writeZstdChunkedStream(destFile io.Writer, r *io.PipeReader, level int) error { + // total written so far. Used to retrieve partial offsets in the file + sizeCounter := &sizeCounter{} + + dest := io.MultiWriter(destFile, sizeCounter) + + tr := tar.NewReader(r) + tr.RawAccounting = true + + buf := make([]byte, 4096) + + zstdWriter, err := zstdWriterWithLevel(dest, level) + if err != nil { + return err + } + defer func() { + if zstdWriter != nil { + zstdWriter.Close() + zstdWriter.Flush() + } + }() + + restartCompression := func() (int64, error) { + var offset int64 + if zstdWriter != nil { + if err := zstdWriter.Close(); err != nil { + return 0, err + } + if err := zstdWriter.Flush(); err != nil { + return 0, err + } + offset = sizeCounter.size + zstdWriter.Reset(dest) + } + return offset, nil + } + + var metadata []FileMetadata + for { + hdr, err := tr.Next() + if err != nil { + if err == io.EOF { + break + } + return err + } + + if _, err := zstdWriter.Write(tr.RawBytes()); err != nil { + return err + } + + payloadChecksum := sha256.New() + payloadDest := io.MultiWriter(payloadChecksum, zstdWriter) + + // Now handle the payload, if any + var startOffset, endOffset int64 + var checksum []byte + for { + read, errRead := tr.Read(buf) + if errRead != nil && errRead != io.EOF { + return err + } + + // restart the compression only if there is + // a payload. + if read > 0 { + if startOffset == 0 { + startOffset, err = restartCompression() + if err != nil { + return err + } + } + _, err := payloadDest.Write(buf[:read]) + if err != nil { + return err + } + } + if errRead == io.EOF { + if startOffset > 0 { + endOffset, err = restartCompression() + if err != nil { + return err + } + checksum = payloadChecksum.Sum(nil) + } + break + } + } + + typ, err := getType(hdr.Typeflag) + if err != nil { + return err + } + xattrs := make(map[string]string) + for k, v := range hdr.Xattrs { + xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v)) + } + m := FileMetadata{ + Type: typ, + Name: hdr.Name, + Linkname: hdr.Linkname, + Mode: hdr.Mode, + Size: hdr.Size, + Uid: hdr.Uid, + Gid: hdr.Gid, + ModTime: hdr.ModTime, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + Devmajor: hdr.Devmajor, + Devminor: hdr.Devminor, + Xattrs: xattrs, + Checksum: fmt.Sprintf("%x", checksum), + StartOffset: startOffset, + EndOffset: endOffset, + } + metadata = append(metadata, m) + } + + if err := zstdWriter.Close(); err != nil { + return err + } + if err := zstdWriter.Flush(); err != nil { + return err + } + zstdWriter = nil + + return writeZstdChunkedManifest(dest, sizeCounter, metadata, level) +} + +// chunkedZstdWriterWithLevel writes a zstd compressed tarball where each file is +// compressed separately so it can be addressed separately. Idea based on CRFS: +// https://github.com/google/crfs +// The difference with CRFS is that the zstd compression is used instead of gzip. +// The reason for it is that zstd supports embedding metadata ignored by the decoder +// as part of the compressed stream. +// A manifest json file with all the metadata is appended at the end of the tarball +// stream, using zstd skippable frames. +// The final file will look like: +// [FILE_1][FILE_2]..[FILE_N][SKIPPABLE FRAME 1][SKIPPABLE FRAME 2] +// Where: +// [FILE_N]: [ZSTD HEADER][TAR HEADER][PAYLOAD FILE_N][ZSTD FOOTER] +// [SKIPPABLE FRAME 1]: [ZSTD SKIPPABLE FRAME, SIZE=MANIFEST LENGTH][MANIFEST] +// [SKIPPABLE FRAME 2]: [ZSTD SKIPPABLE FRAME, SIZE=16][MANIFEST_OFFSET][MANIFEST_LENGTH][MANIFEST_LENGTH_UNCOMPRESSED] +// MANIFEST_OFFSET, MANIFEST_LENGTH and MANIFEST_LENGTH_UNCOMPRESSED are 64 bits unsigned in little endian format. +func chunkedZstdWriterWithLevel(out io.Writer, level int) (io.WriteCloser, error) { + ch := make(chan error, 1) + r, w := io.Pipe() + + go func() { + defer close(ch) + ch <- writeZstdChunkedStream(out, r, level) + }() + + return chunkedZstdWriter{ + tarSplitOut: w, + tarSplitErr: ch, + }, nil +} + +// chunkedZstdCompressor is a CompressorFunc for the zstd compression algorithm. +func chunkedZstdCompressor(r io.Writer, level *int) (io.WriteCloser, error) { + if level == nil { + l := 3 + level = &l + } + return chunkedZstdWriterWithLevel(r, *level) +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go index 04d231c6d6..d28ae46fff 100644 --- a/vendor/github.com/containers/image/v5/pkg/compression/compression.go +++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go @@ -27,12 +27,15 @@ var ( Xz = internal.NewAlgorithm("Xz", []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor) // Zstd compression. Zstd = internal.NewAlgorithm("zstd", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor) + // Zstd compression. + ChunkedZstd = internal.NewAlgorithm("zstd:chunked", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, chunkedZstdCompressor) compressionAlgorithms = map[string]Algorithm{ - Gzip.Name(): Gzip, - Bzip2.Name(): Bzip2, - Xz.Name(): Xz, - Zstd.Name(): Zstd, + Gzip.Name(): Gzip, + Bzip2.Name(): Bzip2, + Xz.Name(): Xz, + Zstd.Name(): Zstd, + ChunkedZstd.Name(): ChunkedZstd, } ) diff --git a/vendor/github.com/containers/image/v5/pkg/compression/zstd.go b/vendor/github.com/containers/image/v5/pkg/compression/zstd.go index 962fe96764..867d3ddbc0 100644 --- a/vendor/github.com/containers/image/v5/pkg/compression/zstd.go +++ b/vendor/github.com/containers/image/v5/pkg/compression/zstd.go @@ -40,7 +40,7 @@ func zstdWriter(dest io.Writer) (io.WriteCloser, error) { return zstd.NewWriter(dest) } -func zstdWriterWithLevel(dest io.Writer, level int) (io.WriteCloser, error) { +func zstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) { el := zstd.EncoderLevelFromZstd(level) return zstd.NewWriter(dest, zstd.WithEncoderLevel(el)) } diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go index 4001b65b61..fadfe1a356 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go @@ -8,8 +8,8 @@ import ( "github.com/BurntSushi/toml" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/lockfile" + "github.com/docker/docker/pkg/homedir" "github.com/pkg/errors" ) diff --git a/vendor/github.com/containers/image/v5/storage/chunked_zstd.go b/vendor/github.com/containers/image/v5/storage/chunked_zstd.go new file mode 100644 index 0000000000..19a8231715 --- /dev/null +++ b/vendor/github.com/containers/image/v5/storage/chunked_zstd.go @@ -0,0 +1,835 @@ +// +build !containers_image_storage_stub + +package storage + +import ( + archivetar "archive/tar" + "context" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime" + "mime/multipart" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "syscall" + "time" + + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/types" + "github.com/containers/storage/drivers" + driversCopy "github.com/containers/storage/drivers/copy" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + securejoin "github.com/cyphar/filepath-securejoin" + "github.com/klauspost/compress/zstd" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/archive/tar" + "golang.org/x/sys/unix" +) + +const ( + maxNumberMissingChunks = 1024 + newFileFlags = (unix.O_CREAT | unix.O_TRUNC | unix.O_WRONLY | unix.O_EXCL) +) + +type DirectDiffInput struct { + stream types.ImageSourceSeekable + store *storageImageDestination + manifest []byte + ctx context.Context + srcInfo types.BlobInfo + layersMetadata map[string][]compression.FileMetadata + layersTarget map[string]string +} + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} + +func copyFileContent(src, destFile, root string, dirfd int, missingDirsMode, mode os.FileMode) (*os.File, int64, error) { + st, err := os.Stat(src) + if err != nil { + return nil, -1, err + } + + copyWithFileRange, copyWithFileClone := true, true + + // If the destination file already exists, we shouldn't blow it away + dstFile, err := openFileUnderRoot(destFile, root, dirfd, newFileFlags, mode) + if err != nil { + return nil, -1, err + } + + err = driversCopy.CopyRegularToFile(src, dstFile, st, ©WithFileRange, ©WithFileClone) + if err != nil { + dstFile.Close() + return nil, -1, err + } + return dstFile, st.Size(), err +} + +func prepareOtherLayersCache(layersMetadata map[string][]compression.FileMetadata) map[string]map[string]*compression.FileMetadata { + maps := make(map[string]map[string]*compression.FileMetadata) + + for layerID, v := range layersMetadata { + r := make(map[string]*compression.FileMetadata) + for i := range v { + r[v[i].Checksum] = &v[i] + } + maps[layerID] = r + } + return maps +} + +func findFileInOtherLayers(file compression.FileMetadata, root string, dirfd int, layersMetadata map[string]map[string]*compression.FileMetadata, layersTarget map[string]string, missingDirsMode, mode os.FileMode) (*os.File, int64, error) { + // this is ugly, needs to be indexed + for layerID, checksums := range layersMetadata { + m, found := checksums[file.Checksum] + if !found { + continue + } + + source, ok := layersTarget[layerID] + if !ok { + continue + } + + sourceFile, err := securejoin.SecureJoin(source, m.Name) + if err != nil { + continue + } + + dstFile, written, err := copyFileContent(sourceFile, file.Name, root, dirfd, missingDirsMode, mode) + if err != nil { + continue + } + return dstFile, written, nil + } + return nil, 0, nil +} + +func findHostFile(file compression.FileMetadata, root string, dirfd int, missingDirsMode, mode os.FileMode) (*os.File, int64, error) { + hostContentChecksum := sha256.New() + + sourceFile := filepath.Join("/", file.Name) + + f, err := os.Open(sourceFile) + if err != nil { + return nil, 0, nil + } + defer f.Close() + + if _, err := io.Copy(hostContentChecksum, f); err != nil { + return nil, 0, nil + } + + checksum := fmt.Sprintf("%x", hostContentChecksum.Sum(nil)) + if checksum != file.Checksum { + return nil, 0, nil + } + + dstFile, written, err := copyFileContent(sourceFile, file.Name, root, dirfd, missingDirsMode, mode) + if err != nil { + return nil, 0, nil + } + return dstFile, written, nil +} + +func maybeDoIDRemap(manifest []compression.FileMetadata, options *archive.TarOptions) error { + if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 { + return nil + } + + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + + for i := range manifest { + if options.ChownOpts != nil { + manifest[i].Uid = options.ChownOpts.UID + manifest[i].Gid = options.ChownOpts.GID + } else { + pair := idtools.IDPair{ + UID: manifest[i].Uid, + GID: manifest[i].Gid, + } + var err error + manifest[i].Uid, manifest[i].Gid, err = idMappings.ToContainer(pair) + if err != nil { + return err + } + } + } + return nil +} + +type bufferedNetworkReaderBuffer struct { + data []byte + len int + consumed int + err error +} + +type bufferedNetworkReader struct { + stream io.Reader + emptyBuffer chan *bufferedNetworkReaderBuffer + readyBuffer chan *bufferedNetworkReaderBuffer + terminate chan bool + current *bufferedNetworkReaderBuffer + mutex sync.Mutex + gotEOF bool +} + +// handleBufferedNetworkReader runs in a goroutine +func handleBufferedNetworkReader(br *bufferedNetworkReader) { + defer close(br.readyBuffer) + for { + select { + case b := <-br.emptyBuffer: + b.len, b.err = br.stream.Read(b.data) + br.readyBuffer <- b + if b.err != nil { + return + } + case <-br.terminate: + return + } + } +} + +func (n *bufferedNetworkReader) Close() { + close(n.terminate) + close(n.emptyBuffer) +} + +func (n *bufferedNetworkReader) read(p []byte) (int, error) { + if n.current != nil { + copied := copy(p, n.current.data[n.current.consumed:n.current.len]) + n.current.consumed += copied + if n.current.consumed == n.current.len { + n.emptyBuffer <- n.current + n.current = nil + } + if copied > 0 { + return copied, nil + } + } + if n.gotEOF { + return 0, io.EOF + } + + var b *bufferedNetworkReaderBuffer + + select { + case b = <-n.readyBuffer: + if b.err != nil { + if b.err != io.EOF { + return b.len, b.err + } + n.gotEOF = true + } + b.consumed = 0 + n.current = b + return n.read(p) + case <-n.terminate: + return 0, io.EOF + } +} + +func (n *bufferedNetworkReader) Read(p []byte) (int, error) { + n.mutex.Lock() + defer n.mutex.Unlock() + + return n.read(p) +} + +func makeBufferedNetworkReader(stream io.Reader, nBuffers, bufferSize uint) *bufferedNetworkReader { + br := bufferedNetworkReader{ + stream: stream, + emptyBuffer: make(chan *bufferedNetworkReaderBuffer, nBuffers), + readyBuffer: make(chan *bufferedNetworkReaderBuffer, nBuffers), + terminate: make(chan bool), + } + + go func() { + handleBufferedNetworkReader(&br) + }() + + for i := uint(0); i < nBuffers; i++ { + b := bufferedNetworkReaderBuffer{ + data: make([]byte, bufferSize), + } + br.emptyBuffer <- &b + } + + return &br +} + +type missingFile struct { + File *compression.FileMetadata + Gap int64 +} + +func (m missingFile) Length() int64 { + return m.File.EndOffset - m.File.StartOffset +} + +type missingChunk struct { + RawChunk types.ImageSourceChunk + Files []missingFile +} + +func setPathAttrs(dest string, mode os.FileMode, metadata *compression.FileMetadata, options *archive.TarOptions) error { + return nil + t, err := compression.TypeToTarType(metadata.Type) + if err != nil { + return err + } + if t == tar.TypeSymlink { + return nil + } + + if err := os.Lchown(dest, metadata.Uid, metadata.Gid); err != nil { + if !options.IgnoreChownErrors { + return err + } + } + + for k, v := range metadata.Xattrs { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err + } + if err := system.Lsetxattr(dest, k, data, 0); err != nil { + return err + } + } + + ts := []syscall.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)} + if err := system.LUtimesNano(dest, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + + // Files must be already created with the correct mode, but we must make sure the setuid/setgid + // is set. + if t == tar.TypeDir || mode&(os.ModeSetuid|os.ModeSetgid) != 0 { + if err := os.Chmod(dest, mode); err != nil { + return err + } + } + return nil +} + +func setFileAttrs(file *os.File, mode os.FileMode, metadata *compression.FileMetadata, options *archive.TarOptions) error { + if file == nil || file.Fd() < 0 { + return errors.Errorf("invalid file") + } + path := fmt.Sprintf("/proc/self/fd/%d", file.Fd()) + return setPathAttrs(path, mode, metadata, options) +} + +func openFileUnderRoot(name, root string, dirfd int, flags uint64, mode os.FileMode) (*os.File, error) { + how := unix.OpenHow{ + Flags: flags, + Mode: uint64(mode & 07777), + Resolve: unix.RESOLVE_IN_ROOT, + } + + fd, err := unix.Openat2(dirfd, name, &how) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + path, err := securejoin.SecureJoin(root, name) + if err != nil { + return nil, err + } + + return os.OpenFile(path, int(flags), mode) +} + +func createFileFromZstdStream(dest string, dirfd int, reader io.Reader, missingDirsMode, mode os.FileMode, metadata *compression.FileMetadata, options *archive.TarOptions) (err error) { + file, err := openFileUnderRoot(metadata.Name, dest, dirfd, newFileFlags, mode) + if err != nil { + return err + } + defer func() { + err2 := file.Close() + if err == nil { + err = err2 + } + }() + + z, err := zstd.NewReader(reader) + if err != nil { + return err + } + defer z.Close() + + _, err = z.WriteTo(file) + if err != nil { + return err + } + return setFileAttrs(file, mode, metadata, options) +} + +func storeMissingFiles(stream io.Reader, contentType string, dest string, dirfd int, missingChunks []missingChunk, missingDirsMode os.FileMode, options *archive.TarOptions) error { + mediaType, params, err := mime.ParseMediaType(contentType) + if err != nil { + return err + } + if !strings.HasPrefix(mediaType, "multipart/") { + if len(missingChunks) != 1 { + return errors.Errorf("invalid response, no multipart for multiple chunks requested") + } + + for _, mf := range missingChunks[0].Files { + if mf.Gap > 0 { + limitReader := io.LimitReader(stream, mf.Gap) + _, err := io.Copy(ioutil.Discard, limitReader) + if err != nil { + return err + } + continue + } + limitReader := io.LimitReader(stream, mf.Length()) + + if err := createFileFromZstdStream(dest, dirfd, limitReader, missingDirsMode, os.FileMode(mf.File.Mode), mf.File, options); err != nil { + return err + } + } + return nil + } + + // multipart response + boundary, found := params["boundary"] + if !found { + return errors.Errorf("boundary param not found") + } + + buffered := makeBufferedNetworkReader(stream, 64, 16384) + defer buffered.Close() + mr := multipart.NewReader(buffered, boundary) + + for mc := 0; ; mc++ { + p, err := mr.NextPart() + if err == io.EOF { + break + } + if err != nil { + return err + } + + if mc == len(missingChunks) { + return errors.Errorf("too many chunks returned") + } + + for _, mf := range missingChunks[mc].Files { + if mf.Gap > 0 { + limitReader := io.LimitReader(p, mf.Gap) + _, err := io.Copy(ioutil.Discard, limitReader) + if err != nil { + return err + } + continue + } + + limitReader := io.LimitReader(p, mf.Length()) + + if err := createFileFromZstdStream(dest, dirfd, limitReader, missingDirsMode, os.FileMode(mf.File.Mode), mf.File, options); err != nil { + p.Close() + return err + } + } + p.Close() + } + return nil +} + +func mergeMissingChunks(missingChunks []missingChunk, target int) []missingChunk { + if len(missingChunks) <= target { + return missingChunks + } + + getGap := func(missingChunks []missingChunk, i int) int { + prev := missingChunks[i-1].RawChunk.Offset + missingChunks[i-1].RawChunk.Length + return int(missingChunks[i].RawChunk.Offset - prev) + } + + // this implementation doesn't account for duplicates, so it could merge + // more than necessary to reach the specified target. Since target itself + // is a heuristic value, it doesn't matter. + var gaps []int + for i := 1; i < len(missingChunks); i++ { + gaps = append(gaps, getGap(missingChunks, i)) + } + sort.Ints(gaps) + + toShrink := len(missingChunks) - target + targetValue := gaps[toShrink-1] + + newMissingChunks := missingChunks[0:1] + for i := 1; i < len(missingChunks); i++ { + gap := getGap(missingChunks, i) + if gap > targetValue { + newMissingChunks = append(newMissingChunks, missingChunks[i]) + } else { + prev := &newMissingChunks[len(newMissingChunks)-1] + gapFile := missingFile{ + Gap: int64(gap), + } + prev.RawChunk.Length += uint64(gap) + missingChunks[i].RawChunk.Length + prev.Files = append(append(prev.Files, gapFile), missingChunks[i].Files...) + } + } + + return newMissingChunks +} + +func retrieveMissingFiles(input *DirectDiffInput, dest string, dirfd int, missingChunks []missingChunk, missingDirsMode os.FileMode, options *archive.TarOptions) error { + var chunksToRequest []types.ImageSourceChunk + for _, c := range missingChunks { + chunksToRequest = append(chunksToRequest, c.RawChunk) + } + + // There are some missing files. Prepare a multirange request for the missing chunks. + var stream io.ReadCloser + var contentType string + var err error + + for stream == nil { + stream, contentType, err = input.stream.GetBlobAt(input.ctx, input.srcInfo, chunksToRequest) + if err != nil { + if _, ok := err.(docker.ErrBadRequest); ok { + requested := len(missingChunks) + // If the server cannot handle at least 64 chunks in a single request, just give up. + if requested < 64 { + return err + } + + // Merge more chunks to request + missingChunks = mergeMissingChunks(missingChunks, requested/2) + continue + } + return err + } + } + + if err := storeMissingFiles(stream, contentType, dest, dirfd, missingChunks, missingDirsMode, options); err != nil { + stream.Close() + return err + } + if err := stream.Close(); err != nil { + return err + } + + return nil +} + +func safeMkdir(target string, dirfd int, mode os.FileMode, metadata *compression.FileMetadata, options *archive.TarOptions) error { + parent := filepath.Dir(metadata.Name) + base := filepath.Base(metadata.Name) + + parentFd := dirfd + if parent != "." { + parentFile, err := openFileUnderRoot(parent, target, dirfd, unix.O_DIRECTORY|unix.O_RDONLY, 0) + if err != nil { + return err + } + defer parentFile.Close() + parentFd = int(parentFile.Fd()) + } + + if err := unix.Mkdirat(parentFd, base, uint32(mode)); err != nil { + return err + } + + file, err := openFileUnderRoot(metadata.Name, target, dirfd, unix.O_RDONLY, 0) + if err != nil { + return err + } + defer file.Close() + + return setFileAttrs(file, mode, metadata, options) +} + +func safeLink(target string, dirfd int, mode os.FileMode, metadata *compression.FileMetadata, options *archive.TarOptions) error { + sourceFile, err := openFileUnderRoot(metadata.Linkname, target, dirfd, unix.O_RDONLY, 0) + if err != nil { + return err + } + defer sourceFile.Close() + + destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) + destDirFd := dirfd + if destDir != "." { + f, err := openFileUnderRoot(destDir, target, dirfd, unix.O_RDONLY, 0) + if err != nil { + return err + } + defer f.Close() + destDirFd = int(f.Fd()) + } + + err = unix.Linkat(int(sourceFile.Fd()), "", destDirFd, destBase, unix.AT_EMPTY_PATH) + if err != nil { + return err + } + + newFile, err := openFileUnderRoot(metadata.Name, target, dirfd, unix.O_WRONLY, 0) + if err != nil { + return err + } + defer newFile.Close() + + return setFileAttrs(newFile, mode, metadata, options) +} + +func safeSymlink(target string, dirfd int, mode os.FileMode, metadata *compression.FileMetadata, options *archive.TarOptions) error { + destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) + destDirFd := dirfd + if destDir != "." { + f, err := openFileUnderRoot(destDir, target, dirfd, unix.O_RDONLY, 0) + if err != nil { + return err + } + defer f.Close() + destDirFd = int(f.Fd()) + } + + return unix.Symlinkat(metadata.Linkname, destDirFd, destBase) +} + +type whiteoutHandler struct { + Dirfd int + Root string +} + +func (d whiteoutHandler) Setxattr(path, name string, value []byte) error { + file, err := openFileUnderRoot(name, d.Root, d.Dirfd, unix.O_WRONLY, 0) + if err != nil { + return err + } + defer file.Close() + + return unix.Fsetxattr(int(file.Fd()), name, value, 0) +} + +func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error { + dir := filepath.Dir(path) + base := filepath.Base(path) + + dirfd := d.Dirfd + if dir != "" { + dir, err := openFileUnderRoot(dir, d.Root, d.Dirfd, unix.O_RDONLY, 0) + if err != nil { + return err + } + defer dir.Close() + + dirfd = int(dir.Fd()) + } + + return unix.Mknodat(dirfd, base, mode, dev) +} + +func checkChownErr(err error, name string, uid, gid int) error { + if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL { + return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid", uid, gid, name) + } + return err +} + +func (d whiteoutHandler) Chown(path string, uid, gid int) error { + file, err := openFileUnderRoot(path, d.Root, d.Dirfd, unix.O_RDONLY, 0) + if err != nil { + return err + } + defer file.Close() + + if err := unix.Fchownat(int(file.Fd()), "", uid, gid, unix.AT_EMPTY_PATH); err != nil { + var stat unix.Stat_t + if unix.Fstat(int(file.Fd()), &stat) == nil { + if stat.Uid == uint32(uid) && stat.Gid == uint32(gid) { + return nil + } + } + return checkChownErr(err, path, uid, gid) + } + return nil +} + +func chunkedZstdDiffer(data interface{}, dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) { + output := graphdriver.DriverWithDifferOutput{} + + input, ok := data.(*DirectDiffInput) + if !ok { + return output, errors.Errorf("internal error") + } + + // Generate the manifest + var manifest []compression.FileMetadata + if err := json.Unmarshal(input.manifest, &manifest); err != nil { + return output, err + } + + whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + + var missingChunks []missingChunk + + if err := maybeDoIDRemap(manifest, options); err != nil { + return output, err + } + + dirfd, err := unix.Open(dest, unix.O_RDONLY, 0) + if err != nil { + return output, err + } + defer unix.Close(dirfd) + + otherLayersCache := prepareOtherLayersCache(input.layersMetadata) + + missingDirsMode := os.FileMode(0700) + + missingChunksSize, totalChunksSize := int64(0), int64(0) + for i, r := range manifest { + mode := os.FileMode(r.Mode) + + r.Name = filepath.Clean(r.Name) + r.Linkname = filepath.Clean(r.Linkname) + + t, err := compression.TypeToTarType(r.Type) + if err != nil { + return output, err + } + if whiteoutConverter != nil { + hdr := archivetar.Header{ + Typeflag: t, + Name: r.Name, + Linkname: r.Linkname, + Size: r.Size, + Mode: r.Mode, + Uid: r.Uid, + Gid: r.Gid, + } + handler := whiteoutHandler{ + Dirfd: dirfd, + Root: dest, + } + writeFile, err := whiteoutConverter.ConvertReadWithHandler(&hdr, r.Name, &handler) + if err != nil { + return output, err + } + if !writeFile { + continue + } + } + switch t { + case tar.TypeReg: + // Create directly empty files. + if r.Size == 0 { + // Used to have a scope for cleanup. + createEmptyFile := func() error { + file, err := openFileUnderRoot(r.Name, dest, dirfd, newFileFlags, mode) + if err != nil { + return err + } + defer file.Close() + if err := setFileAttrs(file, mode, &r, options); err != nil { + return err + } + return nil + } + if err := createEmptyFile(); err != nil { + return output, err + } + continue + } + case tar.TypeDir: + if err := safeMkdir(dest, dirfd, mode, &r, options); err != nil { + return output, err + } + continue + case tar.TypeLink: + if err := safeLink(dest, dirfd, mode, &r, options); err != nil { + return output, err + } + continue + case tar.TypeSymlink: + if err := safeSymlink(dest, dirfd, mode, &r, options); err != nil { + return output, err + } + continue + } + + totalChunksSize += r.Size + + dstFile, _, err := findFileInOtherLayers(r, dest, dirfd, otherLayersCache, input.layersTarget, missingDirsMode, mode) + if err != nil { + return output, err + } + if dstFile != nil { + if err := setFileAttrs(dstFile, mode, &r, options); err != nil { + dstFile.Close() + return output, err + } + dstFile.Close() + continue + } + + dstFile, _, err = findHostFile(r, dest, dirfd, missingDirsMode, mode) + if err != nil { + return output, err + } + if dstFile != nil { + if err := setFileAttrs(dstFile, mode, &r, options); err != nil { + dstFile.Close() + return output, err + } + dstFile.Close() + continue + } + + missingChunksSize += r.Size + if t == tar.TypeReg { + rawChunk := types.ImageSourceChunk{ + Offset: uint64(r.StartOffset), + Length: uint64(r.EndOffset - r.StartOffset), + } + file := missingFile{ + File: &manifest[i], + } + missingChunks = append(missingChunks, missingChunk{ + RawChunk: rawChunk, + Files: []missingFile{ + file, + }, + }) + } + } + // There are some missing files. Prepare a multirange request for the missing chunks. + if len(missingChunks) > 0 { + missingChunks = mergeMissingChunks(missingChunks, maxNumberMissingChunks) + if err := retrieveMissingFiles(input, dest, dirfd, missingChunks, missingDirsMode, options); err != nil { + return output, err + } + } + + if totalChunksSize > 0 { + logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingChunksSize, totalChunksSize, float32(missingChunksSize*100.0)/float32(totalChunksSize)) + } + return output, nil +} diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go index d24f8bbee5..bffc73ed63 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_image.go +++ b/vendor/github.com/containers/image/v5/storage/storage_image.go @@ -20,8 +20,10 @@ import ( "github.com/containers/image/v5/internal/tmpdir" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/types" "github.com/containers/storage" + "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/ioutils" digest "github.com/opencontainers/go-digest" @@ -65,6 +67,12 @@ type storageImageDestination struct { filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice + + LayersMetadata map[string][]compression.FileMetadata `json:"-"` + LayersTarget map[string]string `json:"-"` + + diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput `json:"-"` + diffManifests map[digest.Digest]string `json:"-"` } type storageImageCloser struct { @@ -356,6 +364,8 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (* filenames: make(map[digest.Digest]string), SignatureSizes: []int{}, SignaturesSizes: make(map[digest.Digest][]int), + diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput), + diffManifests: make(map[digest.Digest]string), } return image, nil } @@ -368,6 +378,12 @@ func (s *storageImageDestination) Reference() types.ImageReference { // Close cleans up the temporary directory. func (s *storageImageDestination) Close() error { + for _, v := range s.diffOutputs { + if v.Target != "" { + os.RemoveAll(v.Target) + } + } + return os.RemoveAll(s.directory) } @@ -459,6 +475,118 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, }, nil } +type seekableToReaderAt struct { + stream types.ImageSourceSeekable + srcInfo types.BlobInfo + ctx context.Context +} + +func (s *seekableToReaderAt) ReadAt(p []byte, off int64) (n int, err error) { + chunk := types.ImageSourceChunk{ + Offset: uint64(off), + Length: uint64(len(p)), + } + reader, _, err := s.stream.GetBlobAt(s.ctx, s.srcInfo, []types.ImageSourceChunk{chunk}) + if err != nil { + return 0, err + } + defer reader.Close() + + total := 0 + for { + n, err := reader.Read(p[total:]) + if err != nil && err != io.EOF { + return 0, err + } + total += n + if err == io.EOF || total == len(p) { + break + } + } + return total, nil +} + +func (s *storageImageDestination) getLayersCache() (map[string][]compression.FileMetadata, map[string]string, error) { + s.putBlobMutex.Lock() + defer s.putBlobMutex.Unlock() + + if s.LayersMetadata != nil { + return s.LayersMetadata, s.LayersTarget, nil + } + + allLayers, err := s.imageRef.transport.store.Layers() + if err != nil { + return nil, nil, err + } + + layersMetadata := make(map[string][]compression.FileMetadata) + layersTarget := make(map[string]string) + for _, r := range allLayers { + if r.Metadata == "" { + continue + } + var metadataLayer []compression.FileMetadata + if err := json.Unmarshal([]byte(r.Metadata), &metadataLayer); err != nil { + continue + } + layersMetadata[r.ID] = metadataLayer + target, err := s.imageRef.transport.store.DifferTarget(r.ID) + if err != nil { + return nil, nil, err + } + layersTarget[r.ID] = target + } + s.LayersMetadata = layersMetadata + s.LayersTarget = layersTarget + + return layersMetadata, layersTarget, nil +} + +func (s *storageImageDestination) PutBlobPartial(ctx context.Context, stream types.ImageSourceSeekable, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) { + blobDigest := srcInfo.Digest + + readerAt := seekableToReaderAt{ + stream: stream, + srcInfo: srcInfo, + ctx: ctx, + } + + data, err := compression.ReadChunkedZstdManifest(&readerAt, srcInfo.Size) + if err != nil { + return srcInfo, err + } + + layersMetadata, layersTarget, err := s.getLayersCache() + if err != nil { + return srcInfo, err + } + + input := DirectDiffInput{ + stream: stream, + store: s, + manifest: data, + ctx: ctx, + srcInfo: srcInfo, + layersMetadata: layersMetadata, + layersTarget: layersTarget, + } + + out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, &input, chunkedZstdDiffer) + if err != nil { + return srcInfo, err + } + + s.putBlobMutex.Lock() + s.blobDiffIDs[blobDigest] = blobDigest + s.fileSizes[blobDigest] = 0 + s.filenames[blobDigest] = "" + s.diffOutputs[blobDigest] = out + s.diffManifests[blobDigest] = string(data) + s.putBlobMutex.Unlock() + + return srcInfo, nil +} + // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. @@ -682,6 +810,31 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t lastLayer = layer.ID continue } + + if diffOutput := s.diffOutputs[blob.Digest]; diffOutput != nil { + layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil) + if err != nil { + return err + } + + // FIXME: what to do with the uncompressed digest? + diffOutput.UncompressedDigest = blob.Digest + + if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil { + s.imageRef.transport.store.Delete(layer.ID) + return err + } + + manifest := s.diffManifests[blob.Digest] + + if err := s.imageRef.transport.store.SetMetadata(layer.ID, string(manifest)); err != nil { + s.imageRef.transport.store.Delete(layer.ID) + return errors.Wrapf(err, "error applying diff for layer with blob %q", blob.Digest) + } + lastLayer = layer.ID + continue + } + // Check if we previously cached a file with that blob's contents. If we didn't, // then we need to read the desired contents from a layer. filename, ok := s.filenames[blob.Digest] diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index 3c5126b4e5..c39e7789e8 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -331,6 +331,24 @@ type ImageDestination interface { Commit(ctx context.Context, unparsedToplevel UnparsedImage) error } +// ImageSourceChunk is a portion of a blob. +type ImageSourceChunk struct { + Offset uint64 + Length uint64 +} + +// ImageSourceSeekable is an image source that permits to fetch chunks of the entire blob. +type ImageSourceSeekable interface { + // GetBlobAt returns a stream for the specified blob. + GetBlobAt(context.Context, BlobInfo, []ImageSourceChunk) (io.ReadCloser, string, error) +} + +// ImageDestinationPartial is a service to store a blob by requesting the missing chunks to a ImageSourceSeekable. +type ImageDestinationPartial interface { + // PutBlobPartial writes contents of stream and returns data representing the result. + PutBlobPartial(ctx context.Context, stream ImageSourceSeekable, srcInfo BlobInfo, cache BlobInfoCache) (BlobInfo, error) +} + // ManifestTypeRejectedError is returned by ImageDestination.PutManifest if the destination is in principle available, // refuses specifically this manifest type, but may accept a different manifest type. type ManifestTypeRejectedError struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 14e553c9f1..28b5c9bdd7 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -11,7 +11,7 @@ const ( VersionPatch = 1 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" + VersionDev = "-dev" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index f9e8384bb6..4c37a61caf 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.24.1 +1.24.1-dev diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go index 5147b01d6b..e371078962 100644 --- a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go +++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go @@ -38,20 +38,14 @@ const ( Hardlink ) -func copyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { +// CopyRegularToFile copies the content of a file to another +func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: golint srcFile, err := os.Open(srcPath) if err != nil { return err } defer srcFile.Close() - // If the destination file already exists, we shouldn't blow it away - dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fileinfo.Mode()) - if err != nil { - return err - } - defer dstFile.Close() - if *copyWithFileClone { _, _, err = unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd()) if err == nil { @@ -76,6 +70,18 @@ func copyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRang return legacyCopy(srcFile, dstFile) } +// CopyRegular copies the content of a file to another +func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: golint + // If the destination file already exists, we shouldn't blow it away + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fileinfo.Mode()) + if err != nil { + return err + } + defer dstFile.Close() + + return CopyRegularToFile(srcPath, dstFile, fileinfo, copyWithFileRange, copyWithFileClone) +} + func doCopyWithFileRange(srcFile, dstFile *os.File, fileinfo os.FileInfo) error { amountLeftToCopy := fileinfo.Size() @@ -164,7 +170,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { return err2 } } else { - if err2 := copyRegular(srcPath, dstPath, f, ©WithFileRange, ©WithFileClone); err2 != nil { + if err2 := CopyRegular(srcPath, dstPath, f, ©WithFileRange, ©WithFileClone); err2 != nil { return err2 } copiedFiles[id] = dstPath diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go index 4d44f2f355..e97523c357 100644 --- a/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go @@ -2,7 +2,12 @@ package copy -import "github.com/containers/storage/pkg/chrootarchive" +import ( + "io" + "os" + + "github.com/containers/storage/pkg/chrootarchive" +) // Mode indicates whether to use hardlink or copy content type Mode int @@ -17,3 +22,19 @@ const ( func DirCopy(srcDir, dstDir string, _ Mode, _ bool) error { return chrootarchive.NewArchiver(nil).CopyWithTar(srcDir, dstDir) } + +// CopyRegularToFile copies the content of a file to another +func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { + f, err := os.Open(srcPath) + if err != nil { + return err + } + defer f.Close() + _, err = io.Copy(dstFile, f) + return err +} + +// CopyRegular copies the content of a file to another +func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { + return chrootarchive.NewArchiver(nil).CopyWithTar(srcPath, dstPath) +} diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index 2d6485e808..cb169f76d5 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -7,6 +7,7 @@ import ( "path/filepath" "strings" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/tar/storage" @@ -156,6 +157,29 @@ type Driver interface { LayerIDMapUpdater } +// DriverWithDifferOutput is the result of ApplyDiffWithDiffer +type DriverWithDifferOutput struct { + Target string + Size int64 + UIDs []uint32 + GIDs []uint32 + UncompressedDigest digest.Digest +} + +type DifferFunction func(data interface{}, dest string, options *archive.TarOptions) (DriverWithDifferOutput, error) + +// DriverWithDiffer is the interface for direct diff access. +type DriverWithDiffer interface { + Driver + // ApplyDiffWithDiffer applies the changes using the callback function. + // If id is empty, then a staging directory is created. The staging directory is guaranteed to be usable with ApplyDiffFromStagingDirectory. + ApplyDiffWithDiffer(id, parent string, options *ApplyDiffOpts, data interface{}, differ DifferFunction) (output DriverWithDifferOutput, err error) + // ApplyDiffFromStagingDirectory applies the changes using the specified staging directory. + ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *DriverWithDifferOutput, options *ApplyDiffOpts) error + // DifferTarget gets the location where files are stored for the layer. + DifferTarget(id string) (string, error) +} + // Capabilities defines a list of capabilities a driver may implement. // These capabilities are not required; however, they do determine how a // graphdriver can be used. diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index c1895c364a..cfa58d9171 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -1155,6 +1155,10 @@ func (f fileGetNilCloser) Close() error { return nil } +func (d *Driver) getStagingDir() string { + return filepath.Join(d.home, "staging") +} + // DiffGetter returns a FileGetCloser that can read files from the directory that // contains files for the layer differences. Used for direct access for tar-split. func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { @@ -1162,6 +1166,63 @@ func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil } +// ApplyDiff applies the changes in the new layer using the specified function +func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffOpts, data interface{}, differ graphdriver.DifferFunction) (output graphdriver.DriverWithDifferOutput, err error) { + var idMappings *idtools.IDMappings + if options != nil { + idMappings = options.Mappings + } + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + + applyDir := "" + + if id == "" { + err := os.MkdirAll(d.getStagingDir(), 0700) + if err != nil && !os.IsExist(err) { + return graphdriver.DriverWithDifferOutput{}, err + } + applyDir, err = ioutil.TempDir(d.getStagingDir(), "") + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + + } else { + applyDir = d.getDiffPath(id) + } + + logrus.Debugf("Applying differ in %s", applyDir) + + out, err := differ(data, applyDir, &archive.TarOptions{ + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + IgnoreChownErrors: d.options.ignoreChownErrors, + WhiteoutFormat: d.getWhiteoutFormat(), + InUserNS: rsystem.RunningInUserNS(), + }) + out.Target = applyDir + return out, err +} + +// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory. +func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffOpts) error { + if filepath.Dir(stagingDirectory) != d.getStagingDir() { + return fmt.Errorf("%q is not a staging directory", stagingDirectory) + } + + diff := d.getDiffPath(id) + if err := os.RemoveAll(diff); err != nil && !os.IsNotExist(err) { + return err + } + return os.Rename(stagingDirectory, diff) +} + +// DifferTarget gets the location where files are stored for the layer. +func (d *Driver) DifferTarget(id string) (string, error) { + return d.getDiffPath(id), nil +} + // ApplyDiff applies the new layer into a root func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (size int64, err error) { diff --git a/vendor/github.com/containers/storage/errors.go b/vendor/github.com/containers/storage/errors.go index 63cb9ab74d..0232f5b835 100644 --- a/vendor/github.com/containers/storage/errors.go +++ b/vendor/github.com/containers/storage/errors.go @@ -53,4 +53,6 @@ var ( ErrDigestUnknown = errors.New("could not compute digest of item") // ErrLayerNotMounted is returned when the requested information can only be computed for a mounted layer, and the layer is not mounted. ErrLayerNotMounted = errors.New("layer is not mounted") + // ErrNotSupported is returned when the requested functionality is not supported. + ErrNotSupported = errors.New("not supported") ) diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index 72f00b8d65..a5338daa38 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -240,6 +240,16 @@ type LayerStore interface { // applies its changes to a specified layer. ApplyDiff(to string, diff io.Reader) (int64, error) + // ApplyDiffWithDiffer applies the changes through the differ callback function. + // If to is the empty string, then a staging directory is created by the driver. + ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, data interface{}, differ drivers.DifferFunction) (*drivers.DriverWithDifferOutput, error) + + // ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff. + ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error + + // DifferTarget gets the location where files are stored for the layer. + DifferTarget(id string) (string, error) + // LoadLocked wraps Load in a locked state. This means it loads the store // and cleans-up invalid layers if needed. LoadLocked() error @@ -1412,6 +1422,76 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error return size, err } +func (r *layerStore) DifferTarget(id string) (string, error) { + ddriver, ok := r.driver.(drivers.DriverWithDiffer) + if !ok { + return "", ErrNotSupported + } + layer, ok := r.lookup(id) + if !ok { + return "", ErrLayerUnknown + } + return ddriver.DifferTarget(layer.ID) +} + +func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error { + ddriver, ok := r.driver.(drivers.DriverWithDiffer) + if !ok { + return ErrNotSupported + } + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + if options == nil { + options = &drivers.ApplyDiffOpts{ + Mappings: r.layerMappings(layer), + MountLabel: layer.MountLabel, + } + } + err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, stagingDirectory, diffOutput, options) + if err != nil { + return err + } + layer.UIDs = diffOutput.UIDs + layer.GIDs = diffOutput.GIDs + layer.UncompressedDigest = diffOutput.UncompressedDigest + layer.UncompressedSize = diffOutput.Size + err = r.Save() + return err +} + +func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, data interface{}, differ drivers.DifferFunction) (*drivers.DriverWithDifferOutput, error) { + ddriver, ok := r.driver.(drivers.DriverWithDiffer) + if !ok { + return nil, ErrNotSupported + } + + if to == "" { + output, err := ddriver.ApplyDiffWithDiffer("", "", options, data, differ) + return &output, err + } + + layer, ok := r.lookup(to) + if !ok { + return nil, ErrLayerUnknown + } + if options == nil { + options = &drivers.ApplyDiffOpts{ + Mappings: r.layerMappings(layer), + MountLabel: layer.MountLabel, + } + } + output, err := ddriver.ApplyDiffWithDiffer(layer.ID, layer.Parent, options, data, differ) + if err != nil { + return nil, err + } + layer.UIDs = output.UIDs + layer.GIDs = output.GIDs + err = r.Save() + return &output, err +} + func (r *layerStore) layersByDigestMap(m map[digest.Digest][]string, d digest.Digest) ([]Layer, error) { var layers []Layer for _, layerID := range m[d] { diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index 2f917344a2..7ebb4905c6 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -437,9 +437,16 @@ func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error { return nil } -type tarWhiteoutConverter interface { +type TarWhiteoutHandler interface { + Setxattr(path, name string, value []byte) error + Mknod(path string, mode uint32, dev int) error + Chown(path string, uid, gid int) error +} + +type TarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) + ConvertReadWithHandler(*tar.Header, string, TarWhiteoutHandler) (bool, error) } type tarAppender struct { @@ -455,7 +462,7 @@ type tarAppender struct { // non standard format. The whiteout files defined // by the AUFS standard are used as the tar whiteout // standard. - WhiteoutConverter tarWhiteoutConverter + WhiteoutConverter TarWhiteoutConverter // CopyPass indicates that the contents of any archive we're creating // will instantly be extracted and written to disk, so we can deviate // from the traditional behavior/format to get features like subsecond @@ -798,7 +805,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) compressWriter, options.ChownOpts, ) - ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + ta.WhiteoutConverter = GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) ta.CopyPass = options.CopyPass defer func() { @@ -958,7 +965,7 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err var dirs []*tar.Header idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMappings.RootPair() - whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + whiteoutConverter := GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) buffer := make([]byte, 1<<20) if options.ForceMask != nil { diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go b/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go index 6a5a867c79..c420ca3587 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go @@ -1945,7 +1945,7 @@ func (j *tarAppender) MarshalJSONBuf(buf fflib.EncodingBuffer) error { buf.WriteString(`,"ChownOpts":null`) } buf.WriteString(`,"WhiteoutConverter":`) - /* Interface types must use runtime reflection. type=archive.tarWhiteoutConverter kind=interface */ + /* Interface types must use runtime reflection. type=archive.TarWhiteoutConverter kind=interface */ err = buf.Encode(j.WhiteoutConverter) if err != nil { return err @@ -2393,10 +2393,10 @@ handle_ChownOpts: handle_WhiteoutConverter: - /* handler: j.WhiteoutConverter type=archive.tarWhiteoutConverter kind=interface quoted=false*/ + /* handler: j.WhiteoutConverter type=archive.TarWhiteoutConverter kind=interface quoted=false*/ { - /* Falling back. type=archive.tarWhiteoutConverter kind=interface */ + /* Falling back. type=archive.TarWhiteoutConverter kind=interface */ tbuf, err := fs.CaptureField(tok) if err != nil { return fs.WrapErr(err) diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go index 3faa238899..3a9c60a05b 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -12,7 +12,7 @@ import ( "golang.org/x/sys/unix" ) -func getWhiteoutConverter(format WhiteoutFormat, data interface{}) tarWhiteoutConverter { +func GetWhiteoutConverter(format WhiteoutFormat, data interface{}) TarWhiteoutConverter { if format == OverlayWhiteoutFormat { if rolayers, ok := data.([]string); ok && len(rolayers) > 0 { return overlayWhiteoutConverter{rolayers: rolayers} @@ -108,13 +108,13 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi return } -func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { +func (overlayWhiteoutConverter) ConvertReadWithHandler(hdr *tar.Header, path string, handler TarWhiteoutHandler) (bool, error) { base := filepath.Base(path) dir := filepath.Dir(path) // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay if base == WhiteoutOpaqueDir { - err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) + err := handler.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}) // don't write the file itself return false, err } @@ -124,10 +124,10 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, originalBase := base[len(WhiteoutPrefix):] originalPath := filepath.Join(dir, originalBase) - if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + if err := handler.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { return false, err } - if err := idtools.SafeChown(originalPath, hdr.Uid, hdr.Gid); err != nil { + if err := handler.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { return false, err } @@ -138,6 +138,26 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, return true, nil } +type directHandler struct { +} + +func (d directHandler) Setxattr(path, name string, value []byte) error { + return unix.Setxattr(path, name, value, 0) +} + +func (d directHandler) Mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, dev) +} + +func (d directHandler) Chown(path string, uid, gid int) error { + return idtools.SafeChown(path, uid, gid) +} + +func (o overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + var handler directHandler + return o.ConvertReadWithHandler(hdr, path, handler) +} + func isWhiteOut(stat os.FileInfo) bool { s := stat.Sys().(*syscall.Stat_t) return major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_other.go b/vendor/github.com/containers/storage/pkg/archive/archive_other.go index 08e3bc889f..62f42fac9b 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_other.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_other.go @@ -2,7 +2,7 @@ package archive -func getWhiteoutConverter(format WhiteoutFormat, data interface{}) tarWhiteoutConverter { +func GetWhiteoutConverter(format WhiteoutFormat, data interface{}) TarWhiteoutConverter { return nil } diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go index a55937b49c..cd12470f9d 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go @@ -156,6 +156,9 @@ type syncFileCloser struct { } func (w syncFileCloser) Close() error { + if !defaultWriterOptions.NoSync { + return w.File.Close() + } err := fdatasync(w.File) if err1 := w.File.Close(); err == nil { err = err1 diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.go b/vendor/github.com/containers/storage/pkg/unshare/unshare.go index a9210b0bf1..a08fb674da 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.go @@ -26,7 +26,6 @@ func HomeDir() (string, error) { return } homeDir, homeDirErr = usr.HomeDir, nil - return } homeDir, homeDirErr = home, nil }) diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index b9115f1958..7bb1aa225c 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -326,6 +326,17 @@ type Store interface { // } ApplyDiff(to string, diff io.Reader) (int64, error) + // ApplyDiffer applies a diff to a layer. + // It is the caller responsibility to clean the staging directory if it is not + // successfully applied with ApplyDiffFromStagingDirectory. + ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, data interface{}, differ drivers.DifferFunction) (*drivers.DriverWithDifferOutput, error) + + // ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff. + ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error + + // DifferTarget gets the path to the differ target. + DifferTarget(id string) (string, error) + // LayersByCompressedDigest returns a slice of the layers with the // specified compressed digest value recorded for them. LayersByCompressedDigest(d digest.Digest) ([]Layer, error) @@ -2824,6 +2835,60 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro return nil, ErrLayerUnknown } +func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error { + rlstore, err := s.LayerStore() + if err != nil { + return err + } + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return err + } + } + if !rlstore.Exists(to) { + return ErrLayerUnknown + } + return rlstore.ApplyDiffFromStagingDirectory(to, stagingDirectory, diffOutput, options) +} + +func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, data interface{}, differ drivers.DifferFunction) (*drivers.DriverWithDifferOutput, error) { + rlstore, err := s.LayerStore() + if err != nil { + return nil, err + } + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return nil, err + } + } + if to != "" && !rlstore.Exists(to) { + return nil, ErrLayerUnknown + } + return rlstore.ApplyDiffWithDiffer(to, options, data, differ) +} + +func (s *store) DifferTarget(id string) (string, error) { + rlstore, err := s.LayerStore() + if err != nil { + return "", err + } + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return "", err + } + } + if rlstore.Exists(id) { + return rlstore.DifferTarget(id) + } + return "", ErrLayerUnknown +} + func (s *store) ApplyDiff(to string, diff io.Reader) (int64, error) { rlstore, err := s.LayerStore() if err != nil {