pax_global_header00006660000000000000000000000064151262500360014512gustar00rootroot0000000000000052 comment=7253ccd011ea84e222808c5909c63c3f2abd8a85 golang-github-moby-go-archive-0.2.0/000077500000000000000000000000001512625003600172265ustar00rootroot00000000000000golang-github-moby-go-archive-0.2.0/.gitattributes000066400000000000000000000000501512625003600221140ustar00rootroot00000000000000*.go -text diff=golang *.go text eol=lf golang-github-moby-go-archive-0.2.0/.github/000077500000000000000000000000001512625003600205665ustar00rootroot00000000000000golang-github-moby-go-archive-0.2.0/.github/workflows/000077500000000000000000000000001512625003600226235ustar00rootroot00000000000000golang-github-moby-go-archive-0.2.0/.github/workflows/codeql.yml000066400000000000000000000035541512625003600246240ustar00rootroot00000000000000name: codeql # Default to 'contents: read', which grants actions to read commits. # # If any permission is set, any permission not included in the list is # implicitly set to "none". # # see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions permissions: contents: read concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true on: push: branches: - 'main' - 'release/*' tags: - 'v*' pull_request: # The branches below must be a subset of the branches above branches: - 'main' - 'release/*' schedule: # ┌───────────── minute (0 - 59) # │ ┌───────────── hour (0 - 23) # │ │ ┌───────────── day of the month (1 - 31) # │ │ │ ┌───────────── month (1 - 12) # │ │ │ │ ┌───────────── day of the week (0 - 6) (Sunday to Saturday) # │ │ │ │ │ # │ │ │ │ │ # │ │ │ │ │ # * * * * * - cron: '0 9 * * 4' jobs: codeql: runs-on: ubuntu-24.04 timeout-minutes: 10 permissions: actions: read contents: read security-events: write steps: - name: Checkout uses: actions/checkout@v6 with: fetch-depth: 2 - name: Update Go uses: actions/setup-go@v6 with: go-version: stable - name: Initialize CodeQL uses: github/codeql-action/init@v4 with: languages: go - name: Autobuild uses: github/codeql-action/autobuild@v4 - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v4 with: category: "/language:go" golang-github-moby-go-archive-0.2.0/.github/workflows/test.yml000066400000000000000000000023121512625003600243230ustar00rootroot00000000000000name: test on: push: branches: [ "main" ] pull_request: branches: [ "main" ] permissions: contents: read jobs: build: strategy: matrix: go-version: - 1.23.x # oldest supported (see go.mod) - oldstable - stable os: [ubuntu-latest, windows-latest, macos-latest] runs-on: ${{ matrix.os }} timeout-minutes: 10 steps: - uses: actions/checkout@v6 - name: Set up Go uses: actions/setup-go@v6 with: go-version: ${{ matrix.go-version }} - name: lint uses: golangci/golangci-lint-action@v9 with: version: v2.7 args: --print-resources-usage --timeout=10m --verbose - name: Test run: go test -v -cover "-coverprofile=coverage.txt" -covermode=atomic ./... shell: bash env: MSYS_NO_PATHCONV: '1' - name: Test (root) if: runner.os == 'Linux' run: go test -exec "sudo -E -n" -v -cover "-coverprofile=coverage.txt" -covermode=atomic ./... shell: bash env: MSYS_NO_PATHCONV: '1' - name: Codecov uses: codecov/codecov-action@v5 with: directory: ./ golang-github-moby-go-archive-0.2.0/.gitignore000066400000000000000000000000161512625003600212130ustar00rootroot00000000000000/coverage.txt golang-github-moby-go-archive-0.2.0/.golangci.yml000066400000000000000000000014641512625003600216170ustar00rootroot00000000000000version: "2" issues: # Disable maximum issues count per one linter. max-issues-per-linter: 0 # Disable maximum count of issues with the same text. max-same-issues: 0 linters: enable: - errorlint - unconvert - unparam exclusions: generated: disable presets: - comments - std-error-handling settings: staticcheck: # Enable all options, with some exceptions. # For defaults, see https://golangci-lint.run/usage/linters/#staticcheck checks: - all - -QF1008 # Omit embedded fields from selector expression; https://staticcheck.dev/docs/checks/#QF1008 - -ST1003 # Poorly chosen identifier; https://staticcheck.dev/docs/checks/#ST1003 formatters: enable: - gofumpt - goimports exclusions: generated: disable golang-github-moby-go-archive-0.2.0/LICENSE000066400000000000000000000261361512625003600202430ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. golang-github-moby-go-archive-0.2.0/archive.go000066400000000000000000001047731512625003600212120ustar00rootroot00000000000000// Package archive provides helper functions for dealing with archive files. package archive import ( "archive/tar" "context" "errors" "fmt" "io" "os" "path/filepath" "runtime" "strings" "syscall" "time" "github.com/containerd/log" "github.com/moby/patternmatcher" "github.com/moby/sys/sequential" "github.com/moby/sys/user" "github.com/moby/go-archive/compression" "github.com/moby/go-archive/tarheader" ) // ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a // tar, but that do not have their own header entry. // // The permissions mask is stored in a constant instead of locally to ensure that magic numbers do not // proliferate in the codebase. The default value 0755 has been selected based on the default umask of 0022, and // a convention of mkdir(1) calling mkdir(2) with permissions of 0777, resulting in a final value of 0755. // // This value is currently implementation-defined, and not captured in any cross-runtime specification. Thus, it is // subject to change in Moby at any time -- image authors who require consistent or known directory permissions // should explicitly control them by ensuring that header entries exist for any applicable path. const ImpliedDirectoryMode = 0o755 type ( // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int ChownOpts struct { UID int GID int } // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression compression.Compression NoLchown bool IDMap user.IdentityMapping ChownOpts *ChownOpts IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack // and from the standard format on unpack. WhiteoutFormat WhiteoutFormat // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool // Allow unpacking to succeed in spite of failures to set extended // attributes on the unpacked files due to the destination filesystem // not supporting them or a lack of permissions. Extended attributes // were probably in the archive for a reason, so set this option at // your own peril. BestEffortXattrs bool } ) // Archiver implements the Archiver interface and allows the reuse of most utility functions of // this package with a pluggable Untar function. Also, to facilitate the passing of specific id // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error IDMapping user.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { return &Archiver{Untar: Untar} } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. type breakoutError error const ( AUFSWhiteoutFormat WhiteoutFormat = 0 // AUFSWhiteoutFormat is the default format for whiteouts OverlayWhiteoutFormat WhiteoutFormat = 1 // OverlayWhiteoutFormat formats whiteout according to the overlay standard. ) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { file, err := os.Open(path) if err != nil { return false } defer file.Close() rdr, err := compression.DecompressStream(file) if err != nil { return false } defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil } // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to // modify the contents or header of an entry in the archive. If the file already // exists in the archive the TarModifierFunc will be called with the Header and // a reader which will return the files content. If the file does not exist both // header and content will be nil. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the // tar stream are modified if they match any of the keys in mods. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { pipeReader, pipeWriter := io.Pipe() go func() { tarReader := tar.NewReader(inputTarStream) tarWriter := tar.NewWriter(pipeWriter) defer inputTarStream.Close() defer tarWriter.Close() modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { header, data, err := modifier(name, original, tarReader) switch { case err != nil: return err case header == nil: return nil } if header.Name == "" { header.Name = name } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err } if len(data) != 0 { if _, err := tarWriter.Write(data); err != nil { return err } } return nil } var err error var originalHeader *tar.Header for { originalHeader, err = tarReader.Next() if errors.Is(err, io.EOF) { break } if err != nil { pipeWriter.CloseWithError(err) return } modifier, ok := mods[originalHeader.Name] if !ok { // No modifiers for this file, copy the header and data if err := tarWriter.WriteHeader(originalHeader); err != nil { pipeWriter.CloseWithError(err) return } if err := copyWithBuffer(tarWriter, tarReader); err != nil { pipeWriter.CloseWithError(err) return } continue } delete(mods, originalHeader.Name) if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { pipeWriter.CloseWithError(err) return } } // Apply the modifiers that haven't matched any files in the archive for name, modifier := range mods { if err := modify(name, nil, modifier, nil); err != nil { pipeWriter.CloseWithError(err) return } } pipeWriter.Close() }() return pipeReader } // FileInfoHeader creates a populated Header from fi. // // Compared to the archive/tar package, this function fills in less information // but is safe to call from a chrooted process. The AccessTime and ChangeTime // fields are not set in the returned header, ModTime is truncated to one-second // precision, and the Uname and Gname fields are only set when fi is a FileInfo // value returned from tar.Header.FileInfo(). func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tarheader.FileInfoHeaderNoLookups(fi, link) if err != nil { return nil, err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) hdr.Name = canonicalTarName(name, fi.IsDir()) return hdr, nil } const paxSchilyXattr = "SCHILY.xattr." // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { const ( // Values based on linux/include/uapi/linux/capability.h xattrCapsSz2 = 20 versionOffset = 3 vfsCapRevision2 = 2 vfsCapRevision3 = 3 ) capability, _ := lgetxattr(path, "security.capability") if capability != nil { if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 capability = capability[:xattrCapsSz2] } if hdr.PAXRecords == nil { hdr.PAXRecords = make(map[string]string) } hdr.PAXRecords[paxSchilyXattr+"security.capability"] = string(capability) } return nil } type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) } type tarAppender struct { TarWriter *tar.Writer // for hardlink mapping SeenFiles map[uint64]string IdentityMapping user.IdentityMapping ChownOpts *ChownOpts // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter } func newTarAppender(idMapping user.IdentityMapping, writer io.Writer, chownOpts *ChownOpts) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), IdentityMapping: idMapping, ChownOpts: chownOpts, } } // canonicalTarName provides a platform-independent and consistent POSIX-style // path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) string { name = filepath.ToSlash(name) // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name } // addTarFile adds to the tar archive a file from `path` as `name` func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } var link string if fi.Mode()&os.ModeSymlink != 0 { var err error link, err = os.Readlink(path) if err != nil { return err } } hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { inode, err := getInodeFromStat(fi.Sys()) if err != nil { return err } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } // check whether the file is overlayfs whiteout // if yes, skip re-mapping container ID mappings. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 // handle re-mapping container ID mappings back to host ID mappings before // writing tar headers/files. We skip whiteout files because they were written // by the kernel and already have proper ownership relative to the host if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { uid, gid, err := getFileUIDGID(fi.Sys()) if err != nil { return err } hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(uid, gid) if err != nil { return err } } // explicitly override with ChownOpts if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { return err } // If a new whiteout file exists, write original hdr, then // replace hdr with wo to be written after. Whiteouts should // always be written after the original. Note the original // hdr may have been updated to be a whiteout with returning // a whiteout header if wo != nil { if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { return fmt.Errorf("tar: cannot use whiteout for non-empty file") } hdr = wo } } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { // We use sequential file access to avoid depleting the standby list on // Windows. On Linux, this equates to a regular os.Open. file, err := sequential.Open(path) if err != nil { return err } err = copyWithBuffer(ta.TarWriter, file) file.Close() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, opts *TarOptions) error { var ( Lchown = true inUserns, bestEffortXattrs bool chownOpts *ChownOpts ) // TODO(thaJeztah): make opts a required argument. if opts != nil { Lchown = !opts.NoLchown inUserns = opts.InUserNS // TODO(thaJeztah): consider deprecating opts.InUserNS and detect locally. chownOpts = opts.ChownOpts bestEffortXattrs = opts.BestEffortXattrs } // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); err != nil || !fi.IsDir() { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg: // Source is regular file. We use sequential file access to avoid depleting // the standby list on Windows. On Linux, this equates to a regular os.OpenFile. file, err := sequential.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if err := copyWithBuffer(file, reader); err != nil { _ = file.Close() return err } _ = file.Close() case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns log.G(context.TODO()).WithFields(log.Fields{"path": path, "type": hdr.Typeflag}).Debug("skipping device nodes in a userns") return nil } // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { if inUserns && errors.Is(err, syscall.EPERM) { // In most cases, cannot create a fifo if running in user namespace log.G(context.TODO()).WithFields(log.Fields{"error": err, "path": path, "type": hdr.Typeflag}).Debug("creating fifo node in a userns") return nil } return err } case tar.TypeLink: // #nosec G305 -- The target path is checked for path traversal. targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // #nosec G305 -- The target path is checked for path traversal. // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: log.G(context.TODO()).Debug("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &ChownOpts{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { var msg string if inUserns && errors.Is(err, syscall.EINVAL) { msg = " (try increasing the number of subordinate IDs in /etc/subuid and /etc/subgid)" } return fmt.Errorf("failed to Lchown %q for UID %d, GID %d%s: %w", path, hdr.Uid, hdr.Gid, msg, err) } } var xattrErrs []string for key, value := range hdr.PAXRecords { xattr, ok := strings.CutPrefix(key, paxSchilyXattr) if !ok { continue } if err := lsetxattr(path, xattr, []byte(value), 0); err != nil { if bestEffortXattrs && errors.Is(err, syscall.ENOTSUP) || errors.Is(err, syscall.EPERM) { // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). xattrErrs = append(xattrErrs, err.Error()) continue } return err } } if len(xattrErrs) > 0 { log.G(context.TODO()).WithFields(log.Fields{ "errors": xattrErrs, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := boundTime(latestTime(hdr.AccessTime, hdr.ModTime)) mTime := boundTime(hdr.ModTime) // chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := chtimes(path, aTime, mTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := chtimes(path, aTime, mTime); err != nil { return err } } else { if err := lchtimes(path, aTime, mTime); err != nil { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, comp compression.Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: comp}) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { tb, err := NewTarballer(srcPath, options) if err != nil { return nil, err } go tb.Do() return tb.Reader(), nil } // Tarballer is a lower-level interface to TarWithOptions which gives the caller // control over which goroutine the archiving operation executes on. type Tarballer struct { srcPath string options *TarOptions pm *patternmatcher.PatternMatcher pipeReader *io.PipeReader pipeWriter *io.PipeWriter compressWriter io.WriteCloser whiteoutConverter tarWhiteoutConverter } // NewTarballer constructs a new tarballer. The arguments are the same as for // TarWithOptions. func NewTarballer(srcPath string, options *TarOptions) (*Tarballer, error) { pm, err := patternmatcher.New(options.ExcludePatterns) if err != nil { return nil, err } pipeReader, pipeWriter := io.Pipe() compressWriter, err := compression.CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } return &Tarballer{ // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath: addLongPathPrefix(srcPath), options: options, pm: pm, pipeReader: pipeReader, pipeWriter: pipeWriter, compressWriter: compressWriter, whiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat), }, nil } // Reader returns the reader for the created archive. func (t *Tarballer) Reader() io.ReadCloser { return t.pipeReader } // Do performs the archiving operation in the background. The resulting archive // can be read from t.Reader(). Do should only be called once on each Tarballer // instance. func (t *Tarballer) Do() { ta := newTarAppender( t.options.IDMap, t.compressWriter, t.options.ChownOpts, ) ta.WhiteoutConverter = t.whiteoutConverter defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { log.G(context.TODO()).Errorf("Can't close tar writer: %s", err) } if err := t.compressWriter.Close(); err != nil { log.G(context.TODO()).Errorf("Can't close compress writer: %s", err) } if err := t.pipeWriter.Close(); err != nil { log.G(context.TODO()).Errorf("Can't close pipe writer: %s", err) } }() // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this stat, err := os.Lstat(t.srcPath) if err != nil { return } if !stat.IsDir() { // We can't later join a non-dir with any includes because the // 'walk' will error if "file/." is stat-ed and "file" is not a // directory. So, we must split the source path and use the // basename as the include. if len(t.options.IncludeFiles) > 0 { log.G(context.TODO()).Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(t.srcPath) t.srcPath = dir t.options.IncludeFiles = []string{base} } if len(t.options.IncludeFiles) == 0 { t.options.IncludeFiles = []string{"."} } seen := make(map[string]bool) for _, include := range t.options.IncludeFiles { rebaseName := t.options.RebaseNames[include] var ( parentMatchInfo []patternmatcher.MatchInfo parentDirs []string ) walkRoot := getWalkRoot(t.srcPath, include) // TODO(thaJeztah): should this error be handled? _ = filepath.WalkDir(walkRoot, func(filePath string, f os.DirEntry, err error) error { if err != nil { log.G(context.TODO()).Errorf("Tar: Can't stat file %s to tar: %s", t.srcPath, err) return nil } relFilePath, err := filepath.Rel(t.srcPath, filePath) if err != nil || (!t.options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil } if t.options.IncludeSourceDir && include == "." && relFilePath != "." { relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { for len(parentDirs) != 0 { lastParentDir := parentDirs[len(parentDirs)-1] if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { break } parentDirs = parentDirs[:len(parentDirs)-1] parentMatchInfo = parentMatchInfo[:len(parentMatchInfo)-1] } var matchInfo patternmatcher.MatchInfo if len(parentMatchInfo) != 0 { skip, matchInfo, err = t.pm.MatchesUsingParentResults(relFilePath, parentMatchInfo[len(parentMatchInfo)-1]) } else { skip, matchInfo, err = t.pm.MatchesUsingParentResults(relFilePath, patternmatcher.MatchInfo{}) } if err != nil { log.G(context.TODO()).Errorf("Error matching %s: %v", relFilePath, err) return err } if f.IsDir() { parentDirs = append(parentDirs, relFilePath) parentMatchInfo = append(parentMatchInfo, matchInfo) } } if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. if !f.IsDir() { return nil } // No exceptions (!...) in patterns so just skip dir if !t.pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) for _, pat := range t.pm.Patterns() { if !pat.Exclusion() { continue } if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } } // No matching exclusion dir so just skip dir return filepath.SkipDir } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource. if rebaseName != "" { var replacement string if rebaseName != string(filepath.Separator) { // Special case the root directory to replace with an // empty string instead so that we don't end up with // double slashes in the paths. replacement = rebaseName } relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { log.G(context.TODO()).Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if errors.Is(err, io.ErrClosedPipe) { return err } } return nil }) } } // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) var dirs []*tar.Header whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if errors.Is(err, io.EOF) { // end of tar archive break } if err != nil { return err } // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { log.G(context.TODO()).Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // Ensure that the parent directory exists. err = createImpliedDirectories(dest, hdr, options) if err != nil { return err } // #nosec G305 -- The joined path is checked for path traversal. path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !fi.IsDir() || hdr.Typeflag != tar.TypeDir { if err := os.RemoveAll(path); err != nil { return err } } } if err := remapIDs(options.IDMap, hdr); err != nil { return err } if whiteoutConverter != nil { writeFile, err := whiteoutConverter.ConvertRead(hdr, path) if err != nil { return err } if !writeFile { continue } } if err := createTarFile(path, dest, hdr, tr, options); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { // #nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice. path := filepath.Join(dest, hdr.Name) if err := chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime)); err != nil { return err } } return nil } // createImpliedDirectories will create all parent directories of the current path with default permissions, if they do // not already exist. This is possible as the tar format supports 'implicit' directories, where their existence is // defined by the paths of files in the tar, but there are no header entries for the directories themselves, and thus // we most both create them and choose metadata like permissions. // // The caller should have performed filepath.Clean(hdr.Name), so hdr.Name will now be in the filepath format for the OS // on which the daemon is running. This precondition is required because this function assumes a OS-specific path // separator when checking that a path is not the root. func createImpliedDirectories(dest string, hdr *tar.Header, options *TarOptions) error { // Not the root directory, ensure that the parent directory exists if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { // RootPair() is confined inside this loop as most cases will not require a call, so we can spend some // unneeded function calls in the uncommon case to encapsulate logic -- implied directories are a niche // usage that reduces the portability of an image. uid, gid := options.IDMap.RootPair() err = user.MkdirAllAndChown(parentPath, ImpliedDirectoryMode, uid, gid, user.WithOnlyNew) if err != nil { return err } } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, false) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { return errors.New("empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } r := tarArchive if decompress { decompressedArchive, err := compression.DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return Unpack(r, dest, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { archive, err := Tar(src, compression.None) if err != nil { return err } defer archive.Close() return archiver.Untar(archive, dst, &TarOptions{ IDMap: archiver.IDMapping, }) } // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() return archiver.Untar(archive, dst, &TarOptions{ IDMap: archiver.IDMapping, }) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner uid, gid := archiver.IDMapping.RootPair() // Create dst, copy src's content into it if err := user.MkdirAllAndChown(dst, 0o755, uid, gid, user.WithOnlyNew); err != nil { return err } return archiver.TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return errors.New("can't copy a directory") } // Clean up the trailing slash. This must be done in an operating // system specific manner. if dst[len(dst)-1] == os.PathSeparator { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := os.MkdirAll(filepath.Dir(dst), 0o700); err != nil { return err } r, w := io.Pipe() errC := make(chan error, 1) go func() { defer close(errC) errC <- func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tarheader.FileInfoHeaderNoLookups(srcSt, "") if err != nil { return err } hdr.Format = tar.FormatPAX hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) if err := remapIDs(archiver.IDMapping, hdr); err != nil { return err } tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if err := copyWithBuffer(tw, srcF); err != nil { return err } return nil }() }() defer func() { if er := <-errC; err == nil && er != nil { err = er } }() err = archiver.Untar(r, filepath.Dir(dst), nil) if err != nil { r.CloseWithError(err) } return err } // IdentityMapping returns the IdentityMapping of the archiver. func (archiver *Archiver) IdentityMapping() user.IdentityMapping { return archiver.IDMapping } func remapIDs(idMapping user.IdentityMapping, hdr *tar.Header) error { uid, gid, err := idMapping.ToHost(hdr.Uid, hdr.Gid) hdr.Uid, hdr.Gid = uid, gid return err } golang-github-moby-go-archive-0.2.0/archive_linux.go000066400000000000000000000060421512625003600224170ustar00rootroot00000000000000package archive import ( "archive/tar" "fmt" "os" "path/filepath" "strings" "github.com/moby/sys/userns" "golang.org/x/sys/unix" ) func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { if format == OverlayWhiteoutFormat { return overlayWhiteoutConverter{} } return nil } type overlayWhiteoutConverter struct{} func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, _ error) { // convert whiteouts to AUFS format if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { // we just rename the file and make it normal dir, filename := filepath.Split(hdr.Name) hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) hdr.Mode = 0o600 hdr.Typeflag = tar.TypeReg hdr.Size = 0 } if fi.Mode()&os.ModeDir == 0 { // FIXME(thaJeztah): return a sentinel error instead of nil, nil return nil, nil } opaqueXattrName := "trusted.overlay.opaque" if userns.RunningInUserNS() { opaqueXattrName = "user.overlay.opaque" } // convert opaque dirs to AUFS format by writing an empty file with the prefix opaque, err := lgetxattr(path, opaqueXattrName) if err != nil { return nil, err } if len(opaque) != 1 || opaque[0] != 'y' { // FIXME(thaJeztah): return a sentinel error instead of nil, nil return nil, nil } delete(hdr.PAXRecords, paxSchilyXattr+opaqueXattrName) // create a header for the whiteout file // it should inherit some properties from the parent, but be a regular file return &tar.Header{ Typeflag: tar.TypeReg, Mode: hdr.Mode & int64(os.ModePerm), Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), // #nosec G305 -- An archive is being created, not extracted. Size: 0, Uid: hdr.Uid, Uname: hdr.Uname, Gid: hdr.Gid, Gname: hdr.Gname, AccessTime: hdr.AccessTime, ChangeTime: hdr.ChangeTime, }, nil } func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { base := filepath.Base(path) dir := filepath.Dir(path) // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay if base == WhiteoutOpaqueDir { opaqueXattrName := "trusted.overlay.opaque" if userns.RunningInUserNS() { opaqueXattrName = "user.overlay.opaque" } err := unix.Setxattr(dir, opaqueXattrName, []byte{'y'}, 0) if err != nil { return false, fmt.Errorf("setxattr('%s', %s=y): %w", dir, opaqueXattrName, err) } // don't write the file itself return false, err } // if a file was deleted and we are using overlay, we need to create a character device if strings.HasPrefix(base, WhiteoutPrefix) { originalBase := base[len(WhiteoutPrefix):] originalPath := filepath.Join(dir, originalBase) if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { return false, fmt.Errorf("failed to mknod('%s', S_IFCHR, 0): %w", originalPath, err) } if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { return false, err } // don't write the file itself return false, nil } return true, nil } golang-github-moby-go-archive-0.2.0/archive_linux_test.go000066400000000000000000000132351512625003600234600ustar00rootroot00000000000000package archive import ( "archive/tar" "bytes" "errors" "io" "os" "path/filepath" "syscall" "testing" "github.com/moby/sys/userns" "golang.org/x/sys/unix" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) // setupOverlayTestDir creates files in a directory with overlay whiteouts // Tree layout // // . // ├── d1 # opaque, 0700 // │ └── f1 # empty file, 0600 // ├── d2 # opaque, 0750 // │ └── f1 # empty file, 0660 // └── d3 # 0700 // └── f1 # whiteout, 0644 func setupOverlayTestDir(t *testing.T, src string) { skip.If(t, os.Getuid() != 0, "skipping test that requires root") skip.If(t, userns.RunningInUserNS(), "skipping test that requires initial userns (trusted.overlay.opaque xattr cannot be set in userns, even with Ubuntu kernel)") // Create opaque directory containing single file and permission 0700 err := os.Mkdir(filepath.Join(src, "d1"), 0o700) assert.NilError(t, err) err = unix.Lsetxattr(filepath.Join(src, "d1"), "trusted.overlay.opaque", []byte("y"), 0) assert.NilError(t, err) err = os.WriteFile(filepath.Join(src, "d1", "f1"), []byte{}, 0o600) assert.NilError(t, err) // Create another opaque directory containing single file but with permission 0750 err = os.Mkdir(filepath.Join(src, "d2"), 0o750) assert.NilError(t, err) err = unix.Lsetxattr(filepath.Join(src, "d2"), "trusted.overlay.opaque", []byte("y"), 0) assert.NilError(t, err) err = os.WriteFile(filepath.Join(src, "d2", "f1"), []byte{}, 0o660) assert.NilError(t, err) // Create regular directory with deleted file err = os.Mkdir(filepath.Join(src, "d3"), 0o700) assert.NilError(t, err) err = unix.Mknod(filepath.Join(src, "d3", "f1"), unix.S_IFCHR, 0) assert.NilError(t, err) } func checkOpaqueness(t *testing.T, path string, opaque string) { xattrOpaque, err := lgetxattr(path, "trusted.overlay.opaque") assert.NilError(t, err) if string(xattrOpaque) != opaque { t.Fatalf("Unexpected opaque value: %q, expected %q", string(xattrOpaque), opaque) } } func checkOverlayWhiteout(t *testing.T, path string) { stat, err := os.Stat(path) assert.NilError(t, err) statT, ok := stat.Sys().(*syscall.Stat_t) if !ok { t.Fatalf("Unexpected type: %t, expected *syscall.Stat_t", stat.Sys()) } if statT.Rdev != 0 { t.Fatalf("Non-zero device number for whiteout") } } func checkFileMode(t *testing.T, path string, perm os.FileMode) { stat, err := os.Stat(path) assert.NilError(t, err) if stat.Mode() != perm { t.Fatalf("Unexpected file mode for %s: %o, expected %o", path, stat.Mode(), perm) } } func TestOverlayTarUntar(t *testing.T) { restore := overrideUmask(0) defer restore() src, err := os.MkdirTemp("", "docker-test-overlay-tar-src") assert.NilError(t, err) defer os.RemoveAll(src) setupOverlayTestDir(t, src) dst, err := os.MkdirTemp("", "docker-test-overlay-tar-dst") assert.NilError(t, err) defer os.RemoveAll(dst) options := &TarOptions{ WhiteoutFormat: OverlayWhiteoutFormat, } reader, err := TarWithOptions(src, options) assert.NilError(t, err) archive, err := io.ReadAll(reader) reader.Close() assert.NilError(t, err) // The archive should encode opaque directories and file whiteouts // in AUFS format. entries := make(map[string]struct{}) rdr := tar.NewReader(bytes.NewReader(archive)) for { h, err := rdr.Next() if errors.Is(err, io.EOF) { break } assert.NilError(t, err) assert.Check(t, is.Equal(h.Devmajor, int64(0)), "unexpected device file in archive") assert.Check(t, is.DeepEqual(h.PAXRecords, map[string]string(nil))) entries[h.Name] = struct{}{} } assert.DeepEqual(t, entries, map[string]struct{}{ "d1/": {}, "d1/" + WhiteoutOpaqueDir: {}, "d1/f1": {}, "d2/": {}, "d2/" + WhiteoutOpaqueDir: {}, "d2/f1": {}, "d3/": {}, "d3/" + WhiteoutPrefix + "f1": {}, }) err = Untar(bytes.NewReader(archive), dst, options) assert.NilError(t, err) checkFileMode(t, filepath.Join(dst, "d1"), 0o700|os.ModeDir) checkFileMode(t, filepath.Join(dst, "d2"), 0o750|os.ModeDir) checkFileMode(t, filepath.Join(dst, "d3"), 0o700|os.ModeDir) checkFileMode(t, filepath.Join(dst, "d1", "f1"), 0o600) checkFileMode(t, filepath.Join(dst, "d2", "f1"), 0o660) checkFileMode(t, filepath.Join(dst, "d3", "f1"), os.ModeCharDevice|os.ModeDevice) checkOpaqueness(t, filepath.Join(dst, "d1"), "y") checkOpaqueness(t, filepath.Join(dst, "d2"), "y") checkOpaqueness(t, filepath.Join(dst, "d3"), "") checkOverlayWhiteout(t, filepath.Join(dst, "d3", "f1")) } func TestOverlayTarAUFSUntar(t *testing.T) { restore := overrideUmask(0) defer restore() src, err := os.MkdirTemp("", "docker-test-overlay-tar-src") assert.NilError(t, err) defer os.RemoveAll(src) setupOverlayTestDir(t, src) dst, err := os.MkdirTemp("", "docker-test-overlay-tar-dst") assert.NilError(t, err) defer os.RemoveAll(dst) archive, err := TarWithOptions(src, &TarOptions{ WhiteoutFormat: OverlayWhiteoutFormat, }) assert.NilError(t, err) defer archive.Close() err = Untar(archive, dst, &TarOptions{ WhiteoutFormat: AUFSWhiteoutFormat, }) assert.NilError(t, err) checkFileMode(t, filepath.Join(dst, "d1"), 0o700|os.ModeDir) checkFileMode(t, filepath.Join(dst, "d1", WhiteoutOpaqueDir), 0o700) checkFileMode(t, filepath.Join(dst, "d2"), 0o750|os.ModeDir) checkFileMode(t, filepath.Join(dst, "d2", WhiteoutOpaqueDir), 0o750) checkFileMode(t, filepath.Join(dst, "d3"), 0o700|os.ModeDir) checkFileMode(t, filepath.Join(dst, "d1", "f1"), 0o600) checkFileMode(t, filepath.Join(dst, "d2", "f1"), 0o660) checkFileMode(t, filepath.Join(dst, "d3", WhiteoutPrefix+"f1"), 0o600) } golang-github-moby-go-archive-0.2.0/archive_other.go000066400000000000000000000001721512625003600223770ustar00rootroot00000000000000//go:build !linux package archive func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { return nil } golang-github-moby-go-archive-0.2.0/archive_test.go000066400000000000000000000763241512625003600222510ustar00rootroot00000000000000package archive import ( "archive/tar" "bytes" "errors" "fmt" "io" "io/fs" "os" "os/exec" "path/filepath" "runtime" "strings" "testing" "github.com/moby/sys/user" "github.com/moby/sys/userns" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" "github.com/moby/go-archive/compression" ) var defaultArchiver = NewDefaultArchiver() func defaultTarUntar(src, dst string) error { return defaultArchiver.TarUntar(src, dst) } func defaultUntarPath(src, dst string) error { return defaultArchiver.UntarPath(src, dst) } func defaultCopyFileWithTar(src, dst string) (err error) { return defaultArchiver.CopyFileWithTar(src, dst) } func defaultCopyWithTar(src, dst string) error { return defaultArchiver.CopyWithTar(src, dst) } // toUnixPath converts the given path to a unix-path, using forward-slashes, and // with the drive-letter replaced (e.g. "C:\temp\file.txt" becomes "/c/temp/file.txt"). // It is a no-op on non-Windows platforms. func toUnixPath(p string) string { if runtime.GOOS != "windows" { return p } p = filepath.ToSlash(p) // This should probably be more generic, but this suits our needs for now. if pth, ok := strings.CutPrefix(p, "C:/"); ok { return "/c/" + pth } if pth, ok := strings.CutPrefix(p, "D:/"); ok { return "/d/" + pth } return p } func TestIsArchivePathDir(t *testing.T) { tmp := t.TempDir() cmd := exec.Command("sh", "-c", fmt.Sprintf("mkdir -p %s/archivedir", toUnixPath(tmp))) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create directory (%v): %s", err, output) } if IsArchivePath(filepath.Join(tmp, "archivedir")) { t.Fatalf("Incorrectly recognised directory as an archive") } } func TestIsArchivePathInvalidFile(t *testing.T) { tmp := t.TempDir() cmd := exec.Command("sh", "-c", fmt.Sprintf("dd if=/dev/zero bs=1024 count=1 of=%[1]s/archive && gzip --stdout %[1]s/archive > %[1]s/archive.gz", toUnixPath(tmp))) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create archive file (%v): %s", err, output) } if IsArchivePath(filepath.Join(tmp, "archive")) { t.Fatalf("Incorrectly recognised invalid tar path as archive") } if IsArchivePath(filepath.Join(tmp, "archive.gz")) { t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") } } func TestIsArchivePathTar(t *testing.T) { tmp := t.TempDir() cmdStr := fmt.Sprintf("touch %[1]s/archivedata && tar -cf %[1]s/archive %[1]s/archivedata && gzip --stdout %[1]s/archive > %[1]s/archive.gz", toUnixPath(tmp)) cmd := exec.Command("sh", "-c", cmdStr) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create archive file (%v):\ncommand: %s\noutput: %s", err, cmd.String(), output) } if !IsArchivePath(filepath.Join(tmp, "/archive")) { t.Fatalf("Did not recognise valid tar path as archive") } if !IsArchivePath(filepath.Join(tmp, "archive.gz")) { t.Fatalf("Did not recognise valid compressed tar path as archive") } } func TestUntarPathWithInvalidDest(t *testing.T) { tempFolder := t.TempDir() invalidDestFolder := filepath.Join(tempFolder, "invalidDest") // Create a src file srcFile := filepath.Join(tempFolder, "src") tarFile := filepath.Join(tempFolder, "src.tar") f, err := os.Create(srcFile) if assert.Check(t, err) { _ = f.Close() } d, err := os.Create(invalidDestFolder) // being a file (not dir) should cause an error if assert.Check(t, err) { _ = d.Close() } // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := toUnixPath(srcFile) tarFileU := toUnixPath(tarFile) cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) output, err := cmd.CombinedOutput() assert.NilError(t, err, "command: %s\noutput: %s", cmd.String(), output) err = defaultUntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } } func TestUntarPathWithInvalidSrc(t *testing.T) { err := defaultUntarPath("/invalid/path", t.TempDir()) if err == nil { t.Fatalf("UntarPath with invalid src path should throw an error.") } } func TestUntarPath(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tmpFolder := t.TempDir() srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") f, err := os.Create(filepath.Join(tmpFolder, "src")) if assert.Check(t, err) { _ = f.Close() } destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0o740) if err != nil { t.Fatalf("Fail to create the destination file") } // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := toUnixPath(srcFile) tarFileU := toUnixPath(tarFile) cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) output, err := cmd.CombinedOutput() assert.NilError(t, err, "command: %s\noutput: %s", cmd.String(), output) err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath shouldn't throw an error, %s.", err) } expectedFile := filepath.Join(destFolder, srcFileU) _, err = os.Stat(expectedFile) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } // Do the same test as above but with the destination as file, it should fail func TestUntarPathWithDestinationFile(t *testing.T) { tmpFolder := t.TempDir() srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") f, err := os.Create(filepath.Join(tmpFolder, "src")) if assert.Check(t, err) { _ = f.Close() } // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := toUnixPath(srcFile) tarFileU := toUnixPath(tarFile) cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create archive file (%v):\ncommand: %s\noutput: %s", err, cmd.String(), output) } destFile := filepath.Join(tmpFolder, "dest") f, err = os.Create(destFile) if assert.Check(t, err) { _ = f.Close() } err = defaultUntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } } // Do the same test as above but with the destination folder already exists // and the destination file is a directory // It's working, see https://github.com/docker/docker/issues/10040 func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { tmpFolder := t.TempDir() srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") f, err := os.Create(srcFile) if assert.Check(t, err) { _ = f.Close() } // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := toUnixPath(srcFile) tarFileU := toUnixPath(tarFile) cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create archive file (%v):\ncommand: %s\noutput: %s", err, cmd.String(), output) } destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0o740) if err != nil { t.Fatalf("Fail to create the destination folder") } // Let's create a folder that will has the same path as the extracted file (from tar) destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) err = os.MkdirAll(destSrcFileAsFolder, 0o740) if err != nil { t.Fatal(err) } err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") } } func TestCopyWithTarInvalidSrc(t *testing.T) { tempFolder := t.TempDir() destFolder := filepath.Join(tempFolder, "dest") invalidSrc := filepath.Join(tempFolder, "doesnotexists") err := os.MkdirAll(destFolder, 0o740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(invalidSrc, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") tempFolder := t.TempDir() srcFolder := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") err := os.MkdirAll(srcFolder, 0o740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(srcFolder, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } } // Test CopyWithTar with a file as src func TestCopyWithTarSrcFile(t *testing.T) { folder := t.TempDir() dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) if err := os.MkdirAll(srcFolder, 0o740); err != nil { t.Fatal(err) } if err := os.MkdirAll(dest, 0o740); err != nil { t.Fatal(err) } if err := os.WriteFile(src, []byte("content"), 0o777); err != nil { t.Fatal(err) } if err := defaultCopyWithTar(src, dest); err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } // FIXME Check the content if _, err := os.Stat(dest); err != nil { t.Fatalf("Destination file should be the same as the source.") } } // Test CopyWithTar with a folder as src func TestCopyWithTarSrcFolder(t *testing.T) { folder := t.TempDir() dest := filepath.Join(folder, "dest") src := filepath.Join(folder, filepath.Join("src", "folder")) if err := os.MkdirAll(src, 0o740); err != nil { t.Fatal(err) } if err := os.MkdirAll(dest, 0o740); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(src, "file"), []byte("content"), 0o777); err != nil { t.Fatal(err) } if err := defaultCopyWithTar(src, dest); err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } // FIXME Check the content (the file inside) if _, err := os.Stat(dest); err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestCopyFileWithTarInvalidSrc(t *testing.T) { tempFolder := t.TempDir() destFolder := filepath.Join(tempFolder, "dest") err := os.MkdirAll(destFolder, 0o740) if err != nil { t.Fatal(err) } invalidFile := filepath.Join(tempFolder, "doesnotexists") err = defaultCopyFileWithTar(invalidFile, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { tempFolder := t.TempDir() srcFile := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") f, err := os.Create(srcFile) if assert.Check(t, err) { _ = f.Close() } err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } // FIXME Test the src file and content } func TestCopyFileWithTarSrcFolder(t *testing.T) { folder := t.TempDir() dest := filepath.Join(folder, "dest") src := filepath.Join(folder, "srcfolder") err := os.MkdirAll(src, 0o740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0o740) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(src, dest) if err == nil { t.Fatalf("CopyFileWithTar should throw an error with a folder.") } } func TestCopyFileWithTarSrcFile(t *testing.T) { folder := t.TempDir() dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) if err := os.MkdirAll(srcFolder, 0o740); err != nil { t.Fatal(err) } if err := os.MkdirAll(dest, 0o740); err != nil { t.Fatal(err) } if err := os.WriteFile(src, []byte("content"), 0o777); err != nil { t.Fatal(err) } if err := defaultCopyWithTar(src, dest+"/"); err != nil { t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) } if _, err := os.Stat(dest); err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestTarFiles(t *testing.T) { // try without hardlinks if err := checkNoChanges(t, 1000, false); err != nil { t.Fatal(err) } // try with hardlinks if err := checkNoChanges(t, 1000, true); err != nil { t.Fatal(err) } } func checkNoChanges(t *testing.T, fileNum int, hardlinks bool) error { srcDir, err := os.MkdirTemp(t.TempDir(), "srcDir") if err != nil { return err } destDir, err := os.MkdirTemp(t.TempDir(), "destDir") if err != nil { return err } _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) if err != nil { return err } err = defaultTarUntar(srcDir, destDir) if err != nil { return err } changes, err := ChangesDirs(destDir, srcDir) if err != nil { return err } if len(changes) > 0 { return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) } return nil } func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { t.Fatal(err) } defer archive.Close() buf := make([]byte, 10) if _, err := io.ReadFull(archive, buf); err != nil { return nil, err } wrap := io.MultiReader(bytes.NewReader(buf), archive) detectedCompression := compression.Detect(buf) expected := options.Compression if detectedCompression.Extension() != expected.Extension() { return nil, fmt.Errorf("wrong compression detected; expected: %s, got: %s", expected.Extension(), detectedCompression.Extension()) } tmp := t.TempDir() if err := Untar(wrap, tmp, nil); err != nil { return nil, err } if _, err := os.Stat(tmp); err != nil { return nil, err } return ChangesDirs(origin, tmp) } func TestTarUntar(t *testing.T) { origin := t.TempDir() if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0o700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0o700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0o700); err != nil { t.Fatal(err) } for _, c := range []compression.Compression{ compression.None, compression.Gzip, } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, ExcludePatterns: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } if len(changes) != 1 || changes[0].Path != string(filepath.Separator)+"3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } } } func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { filePath := filepath.Join(t.TempDir(), "1") err := os.WriteFile(filePath, []byte("hello world"), 0o700) assert.NilError(t, err) idMaps := []user.IDMap{ 0: { ID: 0, ParentID: 0, Count: 65536, }, 1: { ID: 0, ParentID: 100000, Count: 65536, }, } tests := []struct { opts *TarOptions expectedUID int expectedGID int }{ {&TarOptions{ChownOpts: &ChownOpts{UID: 1337, GID: 42}}, 1337, 42}, {&TarOptions{ChownOpts: &ChownOpts{UID: 100001, GID: 100001}, IDMap: user.IdentityMapping{UIDMaps: idMaps, GIDMaps: idMaps}}, 100001, 100001}, {&TarOptions{ChownOpts: &ChownOpts{UID: 0, GID: 0}, NoLchown: false}, 0, 0}, {&TarOptions{ChownOpts: &ChownOpts{UID: 1, GID: 1}, NoLchown: true}, 1, 1}, {&TarOptions{ChownOpts: &ChownOpts{UID: 1000, GID: 1000}, NoLchown: true}, 1000, 1000}, } for _, tc := range tests { t.Run("", func(t *testing.T) { reader, err := TarWithOptions(filePath, tc.opts) assert.NilError(t, err) tr := tar.NewReader(reader) defer reader.Close() for { hdr, err := tr.Next() if errors.Is(err, io.EOF) { // end of tar archive break } assert.NilError(t, err) assert.Check(t, is.Equal(hdr.Uid, tc.expectedUID), "Uid equals expected value") assert.Check(t, is.Equal(hdr.Gid, tc.expectedGID), "Gid equals expected value") } }) } } func TestTarWithOptions(t *testing.T) { origin := t.TempDir() if _, err := os.MkdirTemp(origin, "folder"); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0o700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0o700); err != nil { t.Fatal(err) } tests := []struct { opts *TarOptions numChanges int }{ {&TarOptions{IncludeFiles: []string{"1"}}, 2}, {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, } for _, tc := range tests { changes, err := tarUntar(t, origin, tc.opts) if err != nil { t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) } if len(changes) != tc.numChanges { t.Errorf("Expected %d changes, got %d for %+v:", tc.numChanges, len(changes), tc.opts) } } } // Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz // use PAX Global Extended Headers. // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} tmpDir := t.TempDir() err := createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, nil) if err != nil { t.Fatal(err) } } // Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. // Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. func TestUntarUstarGnuConflict(t *testing.T) { f, err := os.Open("testdata/broken.tar") if err != nil { t.Fatal(err) } defer f.Close() found := false tr := tar.NewReader(f) // Iterate through the files in the archive. for { hdr, err := tr.Next() if errors.Is(err, io.EOF) { // end of tar archive break } if err != nil { t.Fatal(err) } if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { found = true break } } if !found { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := os.WriteFile(filepath.Join(targetPath, fileName), fileData, 0o700); err != nil { return 0, err } if makeLinks { if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } func BenchmarkTarUntar(b *testing.B) { origin, err := os.MkdirTemp(b.TempDir(), "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp(b.TempDir(), "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func BenchmarkTarUntarWithLinks(b *testing.B) { origin, err := os.MkdirTemp(b.TempDir(), "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := os.MkdirTemp(b.TempDir(), "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, true) if err != nil { b.Fatal(err) } b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func TestUntarInvalidFilenames(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0o644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0o644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarHardlinkToSymlink(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") for i, headers := range [][]*tar.Header{ { { Name: "symlink1", Typeflag: tar.TypeSymlink, Linkname: "regfile", Mode: 0o644, }, { Name: "symlink2", Typeflag: tar.TypeLink, Linkname: "symlink1", Mode: 0o644, }, { Name: "regfile", Typeflag: tar.TypeReg, Mode: 0o644, }, }, } { if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0o644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0o644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0o755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0o644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0o755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0o644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0o755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0o644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0o755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0o644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0o644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0o644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0o755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0o644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0o755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0o644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0o755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0o644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0o755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0o644, }, }, { // try writing to victim/newdir/newfile with a symlink in the path { // this header needs to be before the next one, or else there is an error Name: "dir/loophole", Typeflag: tar.TypeSymlink, Linkname: "../../victim", Mode: 0o755, }, { Name: "dir/loophole/newdir/newfile", Typeflag: tar.TypeReg, Mode: 0o644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := io.NopCloser(strings.NewReader("hello")) tmpArchive, err := newTempArchive(reader, "") assert.NilError(t, err) buf := make([]byte, 10) n, err := tmpArchive.Read(buf) assert.NilError(t, err) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } for i := 0; i < 3; i++ { if err = tmpArchive.Close(); err != nil { t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) } } } // TestXGlobalNoParent is a regression test to check parent directories are not created for PAX headers func TestXGlobalNoParent(t *testing.T) { buf := &bytes.Buffer{} w := tar.NewWriter(buf) err := w.WriteHeader(&tar.Header{ Name: "foo/bar", Typeflag: tar.TypeXGlobalHeader, }) assert.NilError(t, err) tmpDir := t.TempDir() err = Untar(buf, tmpDir, nil) assert.NilError(t, err) _, err = os.Lstat(filepath.Join(tmpDir, "foo")) assert.Check(t, err != nil) assert.Check(t, is.ErrorIs(err, os.ErrNotExist)) } // TestImpliedDirectoryPermissions ensures that directories implied by paths in the tar file, but without their own // header entries are created recursively with the default mode (permissions) stored in ImpliedDirectoryMode. This test // also verifies that the permissions of explicit directories are respected. func TestImpliedDirectoryPermissions(t *testing.T) { skip.If(t, os.Getuid() != 0, "skipping test that requires root") skip.If(t, runtime.GOOS == "windows", "skipping test that requires Unix permissions") buf := &bytes.Buffer{} headers := []tar.Header{{ Name: "deeply/nested/and/implied", }, { Name: "explicit/", Mode: 0o644, }, { Name: "explicit/permissions/", Mode: 0o600, }, { Name: "explicit/permissions/specified", Mode: 0o400, }} w := tar.NewWriter(buf) for _, header := range headers { err := w.WriteHeader(&header) assert.NilError(t, err) } tmpDir := t.TempDir() err := Untar(buf, tmpDir, nil) assert.NilError(t, err) assertMode := func(path string, expected uint32) { t.Helper() stat, err := os.Lstat(filepath.Join(tmpDir, path)) assert.Check(t, err) assert.Check(t, is.Equal(stat.Mode().Perm(), fs.FileMode(expected))) } assertMode("deeply", ImpliedDirectoryMode) assertMode("deeply/nested", ImpliedDirectoryMode) assertMode("deeply/nested/and", ImpliedDirectoryMode) assertMode("explicit", 0o644) assertMode("explicit/permissions", 0o600) assertMode("explicit/permissions/specified", 0o400) } func TestReplaceFileTarWrapper(t *testing.T) { filesInArchive := 20 tests := []struct { doc string filename string modifier TarModifierFunc expected string fileCount int }{ { doc: "Modifier creates a new file", filename: "newfile", modifier: createModifier(t), expected: "the new content", fileCount: filesInArchive + 1, }, { doc: "Modifier replaces a file", filename: "file-2", modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier replaces the last file", filename: fmt.Sprintf("file-%d", filesInArchive-1), modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier appends to a file", filename: "file-3", modifier: appendModifier, expected: "fooo\nnext line", fileCount: filesInArchive, }, } for _, tc := range tests { sourceArchive := buildSourceArchive(t, filesInArchive) defer sourceArchive.Close() resultArchive := ReplaceFileTarWrapper( sourceArchive, map[string]TarModifierFunc{tc.filename: tc.modifier}) actual := readFileFromArchive(t, resultArchive, tc.filename, tc.fileCount, tc.doc) assert.Check(t, is.Equal(tc.expected, actual), tc.doc) } } // TestPrefixHeaderReadable tests that files that could be created with the // version of this package that was built with <=go17 are still readable. func TestPrefixHeaderReadable(t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") skip.If(t, userns.RunningInUserNS(), "skipping test that requires more than 010000000 UIDs, which is unlikely to be satisfied when running in userns") // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go testFile := []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") tmpDir := t.TempDir() err := Untar(bytes.NewReader(testFile), tmpDir, nil) assert.NilError(t, err) baseName := "foo" pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName _, err = os.Lstat(filepath.Join(tmpDir, pth)) assert.NilError(t, err) } func buildSourceArchive(t *testing.T, numberOfFiles int) io.ReadCloser { srcDir, err := os.MkdirTemp(t.TempDir(), "docker-test-srcDir") assert.NilError(t, err) _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) assert.NilError(t, err) sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) assert.NilError(t, err) return sourceArchive } func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { return &tar.Header{ Mode: 0o600, Typeflag: tar.TypeReg, }, []byte("the new content"), nil } func createModifier(t *testing.T) TarModifierFunc { return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { assert.Check(t, is.Nil(content)) return createOrReplaceModifier(path, header, content) } } func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { buffer := bytes.Buffer{} if content != nil { if _, err := buffer.ReadFrom(content); err != nil { return nil, nil, err } } buffer.WriteString("\nnext line") return &tar.Header{Mode: 0o600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil } func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") destDir := t.TempDir() err := Untar(archive, destDir, nil) assert.NilError(t, err) files, _ := os.ReadDir(destDir) assert.Check(t, is.Len(files, expectedCount), doc) content, err := os.ReadFile(filepath.Join(destDir, name)) assert.Check(t, err) return string(content) } golang-github-moby-go-archive-0.2.0/archive_unix.go000066400000000000000000000050201512625003600222360ustar00rootroot00000000000000//go:build !windows package archive import ( "archive/tar" "errors" "os" "path/filepath" "strings" "syscall" "golang.org/x/sys/unix" ) // addLongPathPrefix adds the Windows long path prefix to the path provided if // it does not already have it. It is a no-op on platforms other than Windows. func addLongPathPrefix(srcPath string) string { return srcPath } // getWalkRoot calculates the root path when performing a TarWithOptions. // We use a separate function as this is platform specific. On Linux, we // can't use filepath.Join(srcPath,include) because this will clean away // a trailing "." or "/" which may be important. func getWalkRoot(srcPath string, include string) string { return strings.TrimSuffix(srcPath, string(filepath.Separator)) + string(filepath.Separator) + include } // chmodTarEntry is used to adjust the file permissions used in tar header based // on the platform the archival is done. func chmodTarEntry(perm os.FileMode) os.FileMode { return perm // noop for unix as golang APIs provide perm bits correctly } func getInodeFromStat(stat interface{}) (uint64, error) { s, ok := stat.(*syscall.Stat_t) if !ok { // FIXME(thaJeztah): this should likely return an error; see https://github.com/moby/moby/pull/49493#discussion_r1979152897 return 0, nil } return s.Ino, nil } func getFileUIDGID(stat interface{}) (int, int, error) { s, ok := stat.(*syscall.Stat_t) if !ok { return 0, 0, errors.New("cannot convert stat value to syscall.Stat_t") } return int(s.Uid), int(s.Gid), nil } // handleTarTypeBlockCharFifo is an OS-specific helper function used by // createTarFile to handle the following types of header: Block; Char; Fifo. // // Creating device nodes is not supported when running in a user namespace, // produces a [syscall.EPERM] in most cases. func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { mode := uint32(hdr.Mode & 0o7777) switch hdr.Typeflag { case tar.TypeBlock: mode |= unix.S_IFBLK case tar.TypeChar: mode |= unix.S_IFCHR case tar.TypeFifo: mode |= unix.S_IFIFO } return mknod(path, mode, unix.Mkdev(uint32(hdr.Devmajor), uint32(hdr.Devminor))) } func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := os.Chmod(path, hdrInfo.Mode()); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := os.Chmod(path, hdrInfo.Mode()); err != nil { return err } } return nil } golang-github-moby-go-archive-0.2.0/archive_unix_test.go000066400000000000000000000260741512625003600233110ustar00rootroot00000000000000//go:build !windows package archive import ( "archive/tar" "bytes" "errors" "fmt" "io" "os" "os/exec" "path/filepath" "strings" "syscall" "testing" "github.com/moby/sys/userns" "golang.org/x/sys/unix" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" "github.com/moby/go-archive/compression" ) func TestCanonicalTarName(t *testing.T) { cases := []struct { in string isDir bool expected string }{ {"foo", false, "foo"}, {"foo", true, "foo/"}, {"foo/bar", false, "foo/bar"}, {"foo/bar", true, "foo/bar/"}, } for _, v := range cases { if canonicalTarName(v.in, v.isDir) != v.expected { t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, canonicalTarName(v.in, v.isDir)) } } } func TestChmodTarEntry(t *testing.T) { cases := []struct { in, expected os.FileMode }{ {0o000, 0o000}, {0o777, 0o777}, {0o644, 0o644}, {0o755, 0o755}, {0o444, 0o444}, } for _, v := range cases { if out := chmodTarEntry(v.in); out != v.expected { t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) } } } func TestTarWithHardLink(t *testing.T) { origin, err := os.MkdirTemp("", "docker-test-tar-hardlink") assert.NilError(t, err) defer os.RemoveAll(origin) err = os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0o700) assert.NilError(t, err) err = os.Link(filepath.Join(origin, "1"), filepath.Join(origin, "2")) assert.NilError(t, err) var i1, i2 uint64 i1, err = getNlink(filepath.Join(origin, "1")) assert.NilError(t, err) // sanity check that we can hardlink if i1 != 2 { t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) } dest, err := os.MkdirTemp("", "docker-test-tar-hardlink-dest") assert.NilError(t, err) defer os.RemoveAll(dest) // we'll do this in two steps to separate failure fh, err := Tar(origin, compression.None) assert.NilError(t, err) // ensure we can read the whole thing with no error, before writing back out buf, err := io.ReadAll(fh) assert.NilError(t, err) bRdr := bytes.NewReader(buf) err = Untar(bRdr, dest, nil) assert.NilError(t, err) i1, err = getInode(filepath.Join(dest, "1")) assert.NilError(t, err) i2, err = getInode(filepath.Join(dest, "2")) assert.NilError(t, err) assert.Check(t, is.Equal(i1, i2)) } func TestTarWithHardLinkAndRebase(t *testing.T) { tmpDir, err := os.MkdirTemp("", "docker-test-tar-hardlink-rebase") assert.NilError(t, err) defer os.RemoveAll(tmpDir) origin := filepath.Join(tmpDir, "origin") err = os.Mkdir(origin, 0o700) assert.NilError(t, err) err = os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0o700) assert.NilError(t, err) err = os.Link(filepath.Join(origin, "1"), filepath.Join(origin, "2")) assert.NilError(t, err) var i1, i2 uint64 i1, err = getNlink(filepath.Join(origin, "1")) assert.NilError(t, err) // sanity check that we can hardlink if i1 != 2 { t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) } dest := filepath.Join(tmpDir, "dest") bRdr, err := TarResourceRebase(origin, "origin") assert.NilError(t, err) dstDir, srcBase := SplitPathDirEntry(origin) _, dstBase := SplitPathDirEntry(dest) content := RebaseArchiveEntries(bRdr, srcBase, dstBase) err = Untar(content, dstDir, &TarOptions{NoLchown: true, NoOverwriteDirNonDir: true}) assert.NilError(t, err) i1, err = getInode(filepath.Join(dest, "1")) assert.NilError(t, err) i2, err = getInode(filepath.Join(dest, "2")) assert.NilError(t, err) assert.Check(t, is.Equal(i1, i2)) } // TestUntarParentPathPermissions is a regression test to check that missing // parent directories are created with the expected permissions func TestUntarParentPathPermissions(t *testing.T) { skip.If(t, os.Getuid() != 0, "skipping test that requires root") buf := &bytes.Buffer{} w := tar.NewWriter(buf) err := w.WriteHeader(&tar.Header{Name: "foo/bar"}) assert.NilError(t, err) tmpDir, err := os.MkdirTemp("", t.Name()) assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(buf, tmpDir, nil) assert.NilError(t, err) fi, err := os.Lstat(filepath.Join(tmpDir, "foo")) assert.NilError(t, err) assert.Equal(t, fi.Mode(), 0o755|os.ModeDir) } func getNlink(path string) (uint64, error) { stat, err := os.Stat(path) if err != nil { return 0, err } statT, ok := stat.Sys().(*syscall.Stat_t) if !ok { return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) } // We need this conversion on ARM64 //nolint: unconvert return uint64(statT.Nlink), nil } func getInode(path string) (uint64, error) { stat, err := os.Stat(path) if err != nil { return 0, err } statT, ok := stat.Sys().(*syscall.Stat_t) if !ok { return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) } return statT.Ino, nil } func TestTarWithBlockCharFifo(t *testing.T) { skip.If(t, os.Getuid() != 0, "skipping test that requires root") skip.If(t, userns.RunningInUserNS(), "skipping test that requires initial userns") origin, err := os.MkdirTemp("", "docker-test-tar-hardlink") assert.NilError(t, err) defer os.RemoveAll(origin) err = os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0o700) assert.NilError(t, err) err = mknod(filepath.Join(origin, "2"), unix.S_IFBLK, unix.Mkdev(uint32(12), uint32(5))) assert.NilError(t, err) err = mknod(filepath.Join(origin, "3"), unix.S_IFCHR, unix.Mkdev(uint32(12), uint32(5))) assert.NilError(t, err) err = mknod(filepath.Join(origin, "4"), unix.S_IFIFO, unix.Mkdev(uint32(12), uint32(5))) assert.NilError(t, err) dest, err := os.MkdirTemp("", "docker-test-tar-hardlink-dest") assert.NilError(t, err) defer os.RemoveAll(dest) // we'll do this in two steps to separate failure fh, err := Tar(origin, compression.None) assert.NilError(t, err) // ensure we can read the whole thing with no error, before writing back out buf, err := io.ReadAll(fh) assert.NilError(t, err) bRdr := bytes.NewReader(buf) err = Untar(bRdr, dest, nil) assert.NilError(t, err) changes, err := ChangesDirs(origin, dest) assert.NilError(t, err) if len(changes) > 0 { t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes) } } // TestTarUntarWithXattr is Unix as Lsetxattr is not supported on Windows func TestTarUntarWithXattr(t *testing.T) { skip.If(t, os.Getuid() != 0, "skipping test that requires root") if _, err := exec.LookPath("setcap"); err != nil { t.Skip("setcap not installed") } if _, err := exec.LookPath("getcap"); err != nil { t.Skip("getcap not installed") } origin, err := os.MkdirTemp("", "docker-test-untar-origin") assert.NilError(t, err) defer os.RemoveAll(origin) err = os.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0o700) assert.NilError(t, err) err = os.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0o700) assert.NilError(t, err) err = os.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0o700) assert.NilError(t, err) // there is no known Go implementation of setcap/getcap with support for v3 file capability out, err := exec.Command("setcap", "cap_block_suspend+ep", filepath.Join(origin, "2")).CombinedOutput() assert.NilError(t, err, string(out)) tarball, err := Tar(origin, compression.None) assert.NilError(t, err) defer tarball.Close() rdr := tar.NewReader(tarball) for { h, err := rdr.Next() if errors.Is(err, io.EOF) { break } assert.NilError(t, err) capability, hasxattr := h.PAXRecords["SCHILY.xattr.security.capability"] switch h.Name { case "2": if assert.Check(t, hasxattr, "tar entry %q should have the 'security.capability' xattr", h.Name) { assert.Check(t, len(capability) > 0, "tar entry %q has a blank 'security.capability' xattr value") } default: assert.Check(t, !hasxattr, "tar entry %q should not have the 'security.capability' xattr", h.Name) } } for _, c := range []compression.Compression{ compression.None, compression.Gzip, } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, ExcludePatterns: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } if len(changes) != 1 || changes[0].Path != "/3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } out, err := exec.Command("getcap", filepath.Join(origin, "2")).CombinedOutput() assert.NilError(t, err, string(out)) assert.Check(t, is.Contains(string(out), "cap_block_suspend=ep"), "untar should have kept the 'security.capability' xattr") } } func TestCopyInfoDestinationPathSymlink(t *testing.T) { tmpDir, _ := getTestTempDirs(t) defer removeAllPaths(tmpDir) root := strings.TrimRight(tmpDir, "/") + "/" type FileTestData struct { resource FileData file string expected CopyInfo } testData := []FileTestData{ // Create a directory: /tmp/archive-copy-test*/dir1 // Test will "copy" file1 to dir1 {resource: FileData{filetype: Dir, path: "dir1", permissions: 0o740}, file: "file1", expected: CopyInfo{Path: root + "dir1/file1", Exists: false, IsDir: false}}, // Create a symlink directory to dir1: /tmp/archive-copy-test*/dirSymlink -> dir1 // Test will "copy" file2 to dirSymlink {resource: FileData{filetype: Symlink, path: "dirSymlink", contents: root + "dir1", permissions: 0o600}, file: "file2", expected: CopyInfo{Path: root + "dirSymlink/file2", Exists: false, IsDir: false}}, // Create a file in tmp directory: /tmp/archive-copy-test*/file1 // Test to cover when the full file path already exists. {resource: FileData{filetype: Regular, path: "file1", permissions: 0o600}, file: "", expected: CopyInfo{Path: root + "file1", Exists: true}}, // Create a directory: /tmp/archive-copy*/dir2 // Test to cover when the full directory path already exists {resource: FileData{filetype: Dir, path: "dir2", permissions: 0o740}, file: "", expected: CopyInfo{Path: root + "dir2", Exists: true, IsDir: true}}, // Create a symlink to a non-existent target: /tmp/archive-copy*/symlink1 -> noSuchTarget // Negative test to cover symlinking to a target that does not exit {resource: FileData{filetype: Symlink, path: "symlink1", contents: "noSuchTarget", permissions: 0o600}, file: "", expected: CopyInfo{Path: root + "noSuchTarget", Exists: false}}, // Create a file in tmp directory for next test: /tmp/existingfile {resource: FileData{filetype: Regular, path: "existingfile", permissions: 0o600}, file: "", expected: CopyInfo{Path: root + "existingfile", Exists: true}}, // Create a symlink to an existing file: /tmp/archive-copy*/symlink2 -> /tmp/existingfile // Test to cover when the parent directory of a new file is a symlink {resource: FileData{filetype: Symlink, path: "symlink2", contents: "existingfile", permissions: 0o600}, file: "", expected: CopyInfo{Path: root + "existingfile", Exists: true}}, } var dirs []FileData for _, data := range testData { dirs = append(dirs, data.resource) } provisionSampleDir(t, tmpDir, dirs) for _, info := range testData { p := filepath.Join(tmpDir, info.resource.path, info.file) ci, err := CopyInfoDestinationPath(p) assert.Check(t, err) assert.Check(t, is.DeepEqual(info.expected, ci)) } } golang-github-moby-go-archive-0.2.0/archive_windows.go000066400000000000000000000035021512625003600227500ustar00rootroot00000000000000package archive import ( "archive/tar" "os" "path/filepath" "strings" ) // longPathPrefix is the longpath prefix for Windows file paths. const longPathPrefix = `\\?\` // addLongPathPrefix adds the Windows long path prefix to the path provided if // it does not already have it. It is a no-op on platforms other than Windows. // // addLongPathPrefix is a copy of [github.com/docker/docker/pkg/longpath.AddPrefix]. func addLongPathPrefix(srcPath string) string { if strings.HasPrefix(srcPath, longPathPrefix) { return srcPath } if strings.HasPrefix(srcPath, `\\`) { // This is a UNC path, so we need to add 'UNC' to the path as well. return longPathPrefix + `UNC` + srcPath[1:] } return longPathPrefix + srcPath } // getWalkRoot calculates the root path when performing a TarWithOptions. // We use a separate function as this is platform specific. func getWalkRoot(srcPath string, include string) string { return filepath.Join(srcPath, include) } // chmodTarEntry is used to adjust the file permissions used in tar header based // on the platform the archival is done. func chmodTarEntry(perm os.FileMode) os.FileMode { // Remove group- and world-writable bits. perm &= 0o755 // Add the x bit: make everything +x on Windows return perm | 0o111 } func getInodeFromStat(stat interface{}) (uint64, error) { // do nothing. no notion of Inode in stat on Windows return 0, nil } // handleTarTypeBlockCharFifo is an OS-specific helper function used by // createTarFile to handle the following types of header: Block; Char; Fifo func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { return nil } func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { return nil } func getFileUIDGID(stat interface{}) (int, int, error) { // no notion of file ownership mapping yet on Windows return 0, 0, nil } golang-github-moby-go-archive-0.2.0/archive_windows_test.go000066400000000000000000000031021512625003600240030ustar00rootroot00000000000000//go:build windows package archive import ( "os" "path/filepath" "testing" ) func TestCopyFileWithInvalidDest(t *testing.T) { // TODO Windows: This is currently failing. Not sure what has // recently changed in CopyWithTar as used to pass. Further investigation // is required. t.Skip("Currently fails") folder, err := os.MkdirTemp("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := "c:dest" srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, "src", "src") if err := os.MkdirAll(srcFolder, 0o740); err != nil { t.Fatal(err) } if err := os.WriteFile(src, []byte("content"), 0o777); err != nil { t.Fatal(err) } err = defaultCopyWithTar(src, dest) if err == nil { t.Fatalf("archiver.CopyWithTar should throw an error on invalid dest.") } } func TestCanonicalTarName(t *testing.T) { cases := []struct { in string isDir bool expected string }{ {"foo", false, "foo"}, {"foo", true, "foo/"}, {`foo\bar`, false, "foo/bar"}, {`foo\bar`, true, "foo/bar/"}, } for _, v := range cases { if canonicalTarName(v.in, v.isDir) != v.expected { t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, canonicalTarName(v.in, v.isDir)) } } } func TestChmodTarEntry(t *testing.T) { cases := []struct { in, expected os.FileMode }{ {0o000, 0o111}, {0o777, 0o755}, {0o644, 0o755}, {0o755, 0o755}, {0o444, 0o555}, } for _, v := range cases { if out := chmodTarEntry(v.in); out != v.expected { t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) } } } golang-github-moby-go-archive-0.2.0/changes.go000066400000000000000000000274441512625003600212000ustar00rootroot00000000000000package archive import ( "archive/tar" "bytes" "context" "fmt" "io" "io/fs" "os" "path/filepath" "sort" "strings" "time" "github.com/containerd/log" "github.com/moby/sys/user" ) // ChangeType represents the change type. type ChangeType int const ( ChangeModify = 0 // ChangeModify represents the modify operation. ChangeAdd = 1 // ChangeAdd represents the add operation. ChangeDelete = 2 // ChangeDelete represents the delete operation. ) func (c ChangeType) String() string { switch c { case ChangeModify: return "C" case ChangeAdd: return "A" case ChangeDelete: return "D" } return "" } // Change represents a change, it wraps the change type and path. // It describes changes of the files in the path respect to the // parent layers. The change could be modify, add, delete. // This is used for layer diff. type Change struct { Path string Kind ChangeType } func (change *Change) String() string { return fmt.Sprintf("%s %s", change.Kind, change.Path) } // for sort.Sort type changesByPath []Change func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } func (c changesByPath) Len() int { return len(c) } func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } // Gnu tar doesn't have sub-second mtime precision. The go tar // writer (1.10+) does when using PAX format, but we round times to seconds // to ensure archives have the same hashes for backwards compatibility. // See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4. // // Non-sub-second is problematic when we apply changes via tar // files. We handle this by comparing for exact times, *or* same // second count and either a or b having exactly 0 nanoseconds func sameFsTime(a, b time.Time) bool { return a.Equal(b) || (a.Unix() == b.Unix() && (a.Nanosecond() == 0 || b.Nanosecond() == 0)) } // Changes walks the path rw and determines changes for the files in the path, // with respect to the parent layers func Changes(layers []string, rw string) ([]Change, error) { return collectChanges(layers, rw, aufsDeletedFile, aufsMetadataSkip) } func aufsMetadataSkip(path string) (skip bool, err error) { skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) if err != nil { skip = true } return skip, err } func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { f := filepath.Base(path) // If there is a whiteout, then the file was removed if strings.HasPrefix(f, WhiteoutPrefix) { originalFile := f[len(WhiteoutPrefix):] return filepath.Join(filepath.Dir(path), originalFile), nil } return "", nil } type ( skipChange func(string) (bool, error) deleteChange func(string, string, os.FileInfo) (string, error) ) func collectChanges(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { var ( changes []Change changedDirs = make(map[string]struct{}) ) err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { if err != nil { return err } // Rebase path path, err = filepath.Rel(rw, path) if err != nil { return err } // As this runs on the daemon side, file paths are OS specific. path = filepath.Join(string(os.PathSeparator), path) // Skip root if path == string(os.PathSeparator) { return nil } if sc != nil { if skip, err := sc(path); skip { return err } } change := Change{ Path: path, } deletedFile, err := dc(rw, path, f) if err != nil { return err } // Find out what kind of modification happened if deletedFile != "" { change.Path = deletedFile change.Kind = ChangeDelete } else { // Otherwise, the file was added change.Kind = ChangeAdd // ...Unless it already existed in a top layer, in which case, it's a modification for _, layer := range layers { stat, err := os.Stat(filepath.Join(layer, path)) if err != nil && !os.IsNotExist(err) { return err } if err == nil { // The file existed in the top layer, so that's a modification // However, if it's a directory, maybe it wasn't actually modified. // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar if stat.IsDir() && f.IsDir() { if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { // Both directories are the same, don't record the change return nil } } change.Kind = ChangeModify break } } } // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. // This block is here to ensure the change is recorded even if the // modify time, mode and size of the parent directory in the rw and ro layers are all equal. // Check https://github.com/docker/docker/pull/13590 for details. if f.IsDir() { changedDirs[path] = struct{}{} } if change.Kind == ChangeAdd || change.Kind == ChangeDelete { parent := filepath.Dir(path) if _, ok := changedDirs[parent]; !ok && parent != "/" { changes = append(changes, Change{Path: parent, Kind: ChangeModify}) changedDirs[parent] = struct{}{} } } // Record change changes = append(changes, change) return nil }) if err != nil && !os.IsNotExist(err) { return nil, err } return changes, nil } // FileInfo describes the information of a file. type FileInfo struct { parent *FileInfo name string stat fs.FileInfo children map[string]*FileInfo capability []byte added bool } // LookUp looks up the file information of a file. func (info *FileInfo) LookUp(path string) *FileInfo { // As this runs on the daemon side, file paths are OS specific. parent := info if path == string(os.PathSeparator) { return info } pathElements := strings.Split(path, string(os.PathSeparator)) for _, elem := range pathElements { if elem != "" { child := parent.children[elem] if child == nil { return nil } parent = child } } return parent } func (info *FileInfo) path() string { if info.parent == nil { // As this runs on the daemon side, file paths are OS specific. return string(os.PathSeparator) } return filepath.Join(info.parent.path(), info.name) } func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { sizeAtEntry := len(*changes) if oldInfo == nil { // add change := Change{ Path: info.path(), Kind: ChangeAdd, } *changes = append(*changes, change) info.added = true } // We make a copy so we can modify it to detect additions // also, we only recurse on the old dir if the new info is a directory // otherwise any previous delete/change is considered recursive oldChildren := make(map[string]*FileInfo) if oldInfo != nil && info.isDir() { for k, v := range oldInfo.children { oldChildren[k] = v } } for name, newChild := range info.children { oldChild := oldChildren[name] if oldChild != nil { // change? oldStat := oldChild.stat newStat := newChild.stat // Note: We can't compare inode or ctime or blocksize here, because these change // when copying a file into a container. However, that is not generally a problem // because any content change will change mtime, and any status change should // be visible when actually comparing the stat fields. The only time this // breaks down is if some code intentionally hides a change by setting // back mtime if statDifferent(oldStat, newStat) || !bytes.Equal(oldChild.capability, newChild.capability) { change := Change{ Path: newChild.path(), Kind: ChangeModify, } *changes = append(*changes, change) newChild.added = true } // Remove from copy so we can detect deletions delete(oldChildren, name) } newChild.addChanges(oldChild, changes) } for _, oldChild := range oldChildren { // delete change := Change{ Path: oldChild.path(), Kind: ChangeDelete, } *changes = append(*changes, change) } // If there were changes inside this directory, we need to add it, even if the directory // itself wasn't changed. This is needed to properly save and restore filesystem permissions. // As this runs on the daemon side, file paths are OS specific. if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { change := Change{ Path: info.path(), Kind: ChangeModify, } // Let's insert the directory entry before the recently added entries located inside this dir *changes = append(*changes, change) // just to resize the slice, will be overwritten copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) (*changes)[sizeAtEntry] = change } } // Changes add changes to file information. func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { var changes []Change info.addChanges(oldInfo, &changes) return changes } func newRootFileInfo() *FileInfo { // As this runs on the daemon side, file paths are OS specific. root := &FileInfo{ name: string(os.PathSeparator), children: make(map[string]*FileInfo), } return root } // ChangesDirs compares two directories and generates an array of Change objects describing the changes. // If oldDir is "", then all files in newDir will be Add-Changes. func ChangesDirs(newDir, oldDir string) ([]Change, error) { var oldRoot, newRoot *FileInfo if oldDir == "" { emptyDir, err := os.MkdirTemp("", "empty") if err != nil { return nil, err } defer os.Remove(emptyDir) oldDir = emptyDir } oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) if err != nil { return nil, err } return newRoot.Changes(oldRoot), nil } // ChangesSize calculates the size in bytes of the provided changes, based on newDir. func ChangesSize(newDir string, changes []Change) int64 { var ( size int64 sf = make(map[uint64]struct{}) ) for _, change := range changes { if change.Kind == ChangeModify || change.Kind == ChangeAdd { file := filepath.Join(newDir, change.Path) fileInfo, err := os.Lstat(file) if err != nil { log.G(context.TODO()).Errorf("Can not stat %q: %s", file, err) continue } if fileInfo != nil && !fileInfo.IsDir() { if hasHardlinks(fileInfo) { inode := getIno(fileInfo) if _, ok := sf[inode]; !ok { size += fileInfo.Size() sf[inode] = struct{}{} } } else { size += fileInfo.Size() } } } } return size } // ExportChanges produces an Archive from the provided changes, relative to dir. func ExportChanges(dir string, changes []Change, idMap user.IdentityMapping) (io.ReadCloser, error) { reader, writer := io.Pipe() go func() { ta := newTarAppender(idMap, writer, nil) sort.Sort(changesByPath(changes)) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this for _, change := range changes { if change.Kind == ChangeDelete { whiteOutDir := filepath.Dir(change.Path) whiteOutBase := filepath.Base(change.Path) whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) timestamp := time.Now() hdr := &tar.Header{ Name: whiteOut[1:], Size: 0, ModTime: timestamp, AccessTime: timestamp, ChangeTime: timestamp, } if err := ta.TarWriter.WriteHeader(hdr); err != nil { log.G(context.TODO()).Debugf("Can't write whiteout header: %s", err) } } else { path := filepath.Join(dir, change.Path) if err := ta.addTarFile(path, change.Path[1:]); err != nil { log.G(context.TODO()).Debugf("Can't add file %s to tar: %s", path, err) } } } // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { log.G(context.TODO()).Debugf("Can't close layer: %s", err) } if err := writer.Close(); err != nil { log.G(context.TODO()).Debugf("failed close Changes writer: %s", err) } }() return reader, nil } golang-github-moby-go-archive-0.2.0/changes_linux.go000066400000000000000000000164131512625003600224110ustar00rootroot00000000000000package archive import ( "fmt" "os" "path/filepath" "sort" "strings" "syscall" "unsafe" "golang.org/x/sys/unix" ) // walker is used to implement collectFileInfoForChanges on linux. Where this // method in general returns the entire contents of two directory trees, we // optimize some FS calls out on linux. In particular, we take advantage of the // fact that getdents(2) returns the inode of each file in the directory being // walked, which, when walking two trees in parallel to generate a list of // changes, can be used to prune subtrees without ever having to lstat(2) them // directly. Eliminating stat calls in this way can save up to seconds on large // images. type walker struct { dir1 string dir2 string root1 *FileInfo root2 *FileInfo } // collectFileInfoForChanges returns a complete representation of the trees // rooted at dir1 and dir2, with one important exception: any subtree or // leaf where the inode and device numbers are an exact match between dir1 // and dir2 will be pruned from the results. This method is *only* to be used // to generating a list of changes between the two directories, as it does not // reflect the full contents. func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { w := &walker{ dir1: dir1, dir2: dir2, root1: newRootFileInfo(), root2: newRootFileInfo(), } i1, err := os.Lstat(w.dir1) if err != nil { return nil, nil, err } i2, err := os.Lstat(w.dir2) if err != nil { return nil, nil, err } if err := w.walk("/", i1, i2); err != nil { return nil, nil, err } return w.root1, w.root2, nil } // Given a FileInfo, its path info, and a reference to the root of the tree // being constructed, register this file with the tree. func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { if fi == nil { return nil } parent := root.LookUp(filepath.Dir(path)) if parent == nil { return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) } info := &FileInfo{ name: filepath.Base(path), children: make(map[string]*FileInfo), parent: parent, } cpath := filepath.Join(dir, path) info.stat = fi info.capability, _ = lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access parent.children[info.name] = info return nil } // Walk a subtree rooted at the same path in both trees being iterated. For // example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { // Register these nodes with the return trees, unless we're still at the // (already-created) roots: if path != "/" { if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { return err } if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { return err } } is1Dir := i1 != nil && i1.IsDir() is2Dir := i2 != nil && i2.IsDir() sameDevice := false if i1 != nil && i2 != nil { si1 := i1.Sys().(*syscall.Stat_t) si2 := i2.Sys().(*syscall.Stat_t) if si1.Dev == si2.Dev { sameDevice = true } } // If these files are both non-existent, or leaves (non-dirs), we are done. if !is1Dir && !is2Dir { return nil } // Fetch the names of all the files contained in both directories being walked: var names1, names2 []nameIno if is1Dir { names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access if err != nil { return err } } if is2Dir { names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access if err != nil { return err } } // We have lists of the files contained in both parallel directories, sorted // in the same order. Walk them in parallel, generating a unique merged list // of all items present in either or both directories. var names []string ix1 := 0 ix2 := 0 for ix1 < len(names1) && ix2 < len(names2) { ni1 := names1[ix1] ni2 := names2[ix2] switch strings.Compare(ni1.name, ni2.name) { case -1: // ni1 < ni2 -- advance ni1 // we will not encounter ni1 in names2 names = append(names, ni1.name) ix1++ case 0: // ni1 == ni2 if ni1.ino != ni2.ino || !sameDevice { names = append(names, ni1.name) } ix1++ ix2++ case 1: // ni1 > ni2 -- advance ni2 // we will not encounter ni2 in names1 names = append(names, ni2.name) ix2++ } } for ix1 < len(names1) { names = append(names, names1[ix1].name) ix1++ } for ix2 < len(names2) { names = append(names, names2[ix2].name) ix2++ } // For each of the names present in either or both of the directories being // iterated, stat the name under each root, and recurse the pair of them: for _, name := range names { fname := filepath.Join(path, name) var cInfo1, cInfo2 os.FileInfo if is1Dir { cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access if err != nil && !os.IsNotExist(err) { return err } } if is2Dir { cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access if err != nil && !os.IsNotExist(err) { return err } } if err = w.walk(fname, cInfo1, cInfo2); err != nil { return err } } return nil } // {name,inode} pairs used to support the early-pruning logic of the walker type type nameIno struct { name string ino uint64 } type nameInoSlice []nameIno func (s nameInoSlice) Len() int { return len(s) } func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } // readdirnames is a hacked-apart version of the Go stdlib code, exposing inode // numbers further up the stack when reading directory contents. Unlike // os.Readdirnames, which returns a list of filenames, this function returns a // list of {filename,inode} pairs. func readdirnames(dirname string) (names []nameIno, err error) { var ( size = 100 buf = make([]byte, 4096) nbuf int bufp int nb int ) f, err := os.Open(dirname) if err != nil { return nil, err } defer f.Close() names = make([]nameIno, 0, size) // Empty with room to grow. for { // Refill the buffer if necessary if bufp >= nbuf { bufp = 0 nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux if nbuf < 0 { nbuf = 0 } if err != nil { return nil, os.NewSyscallError("readdirent", err) } if nbuf <= 0 { break // EOF } } // Drain the buffer nb, names = parseDirent(buf[bufp:nbuf], names) bufp += nb } sl := nameInoSlice(names) sort.Sort(sl) return sl, nil } // parseDirent is a minor modification of unix.ParseDirent (linux version) // which returns {name,inode} pairs instead of just names. func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { origlen := len(buf) for len(buf) > 0 { dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) // #nosec G103 -- Ignore "G103: Use of unsafe calls should be audited" buf = buf[dirent.Reclen:] if dirent.Ino == 0 { // File absent in directory. continue } b := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) // #nosec G103 -- Ignore "G103: Use of unsafe calls should be audited" name := string(b[0:clen(b[:])]) if name == "." || name == ".." { // Useless names continue } names = append(names, nameIno{name, dirent.Ino}) } return origlen - len(buf), names } func clen(n []byte) int { for i := 0; i < len(n); i++ { if n[i] == 0 { return i } } return len(n) } golang-github-moby-go-archive-0.2.0/changes_other.go000066400000000000000000000036701512625003600223740ustar00rootroot00000000000000//go:build !linux package archive import ( "fmt" "os" "path/filepath" "runtime" "strings" ) func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { var ( oldRoot, newRoot *FileInfo err1, err2 error errs = make(chan error, 2) ) go func() { oldRoot, err1 = collectFileInfo(oldDir) errs <- err1 }() go func() { newRoot, err2 = collectFileInfo(newDir) errs <- err2 }() // block until both routines have returned for i := 0; i < 2; i++ { if err := <-errs; err != nil { return nil, nil, err } } return oldRoot, newRoot, nil } func collectFileInfo(sourceDir string) (*FileInfo, error) { root := newRootFileInfo() err := filepath.WalkDir(sourceDir, func(path string, _ os.DirEntry, err error) error { if err != nil { return err } // Rebase path relPath, err := filepath.Rel(sourceDir, path) if err != nil { return err } // As this runs on the daemon side, file paths are OS specific. relPath = filepath.Join(string(os.PathSeparator), relPath) // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. // Temporary workaround. If the returned path starts with two backslashes, // trim it down to a single backslash. Only relevant on Windows. if runtime.GOOS == "windows" { if strings.HasPrefix(relPath, `\\`) { relPath = relPath[1:] } } if relPath == string(os.PathSeparator) { return nil } parent := root.LookUp(filepath.Dir(relPath)) if parent == nil { return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) } s, err := os.Lstat(path) if err != nil { return err } info := &FileInfo{ name: filepath.Base(relPath), children: make(map[string]*FileInfo), parent: parent, stat: s, } info.capability, _ = lgetxattr(path, "security.capability") parent.children[info.name] = info return nil }) if err != nil { return nil, err } return root, nil } golang-github-moby-go-archive-0.2.0/changes_posix_test.go000066400000000000000000000054741512625003600234600ustar00rootroot00000000000000package archive import ( "archive/tar" "errors" "fmt" "io" "os" "path" "sort" "testing" "github.com/moby/sys/user" ) func TestHardLinkOrder(t *testing.T) { names := []string{"file1.txt", "file2.txt", "file3.txt"} msg := []byte("Hey y'all") // Create dir src, err := os.MkdirTemp("", "docker-hardlink-test-src-") if err != nil { t.Fatal(err) } defer os.RemoveAll(src) for _, name := range names { func() { err := os.WriteFile(path.Join(src, name), msg, 0o666) if err != nil { t.Fatal(err) } }() } // Create dest, with changes that includes hardlinks dest, err := os.MkdirTemp("", "docker-hardlink-test-dest-") if err != nil { t.Fatal(err) } os.RemoveAll(dest) // we just want the name, at first if err := copyDir(src, dest); err != nil { t.Fatal(err) } defer os.RemoveAll(dest) for _, name := range names { for i := 0; i < 5; i++ { if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil { t.Fatal(err) } } } // get changes changes, err := ChangesDirs(dest, src) if err != nil { t.Fatal(err) } // sort sort.Sort(changesByPath(changes)) // ExportChanges ar, err := ExportChanges(dest, changes, user.IdentityMapping{}) if err != nil { t.Fatal(err) } hdrs, err := walkHeaders(ar) if err != nil { t.Fatal(err) } // reverse sort sort.Sort(sort.Reverse(changesByPath(changes))) // ExportChanges arRev, err := ExportChanges(dest, changes, user.IdentityMapping{}) if err != nil { t.Fatal(err) } hdrsRev, err := walkHeaders(arRev) if err != nil { t.Fatal(err) } // line up the two sets sort.Sort(tarHeaders(hdrs)) sort.Sort(tarHeaders(hdrsRev)) // compare Size and LinkName for i := range hdrs { if hdrs[i].Name != hdrsRev[i].Name { t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name) } if hdrs[i].Size != hdrsRev[i].Size { t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size) } if hdrs[i].Typeflag != hdrsRev[i].Typeflag { t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag) } if hdrs[i].Linkname != hdrsRev[i].Linkname { t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname) } } } type tarHeaders []tar.Header func (th tarHeaders) Len() int { return len(th) } func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] } func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name } func walkHeaders(r io.Reader) ([]tar.Header, error) { t := tar.NewReader(r) var headers []tar.Header for { hdr, err := t.Next() if err != nil { if errors.Is(err, io.EOF) { break } return headers, err } headers = append(headers, *hdr) } return headers, nil } golang-github-moby-go-archive-0.2.0/changes_test.go000066400000000000000000000433521512625003600222330ustar00rootroot00000000000000package archive import ( "errors" "os" "os/exec" "path" "path/filepath" "runtime" "sort" "syscall" "testing" "time" "github.com/moby/sys/user" "gotest.tools/v3/assert" "gotest.tools/v3/skip" ) func maxInt(x, y int) int { if x >= y { return x } return y } func copyDir(src, dst string) error { if runtime.GOOS != "windows" { return exec.Command("cp", "-a", src, dst).Run() } // Could have used xcopy src dst /E /I /H /Y /B. However, xcopy has the // unfortunate side effect of not preserving timestamps of newly created // directories in the target directory, so we don't get accurate changes. // Use robocopy instead. Note this isn't available in microsoft/nanoserver. // But it has gotchas. See https://weblogs.sqlteam.com/robv/archive/2010/02/17/61106.aspx err := exec.Command("robocopy", filepath.FromSlash(src), filepath.FromSlash(dst), "/SL", "/COPYALL", "/MIR").Run() var exitError *exec.ExitError if errors.As(err, &exitError) { if status, ok := exitError.Sys().(syscall.WaitStatus); ok { if status.ExitStatus()&24 == 0 { return nil } } } return err } type FileType uint32 const ( Regular FileType = 0 Dir FileType = 1 Symlink FileType = 2 ) type FileData struct { filetype FileType path string contents string permissions os.FileMode } func createSampleDir(t *testing.T, root string) { files := []FileData{ {filetype: Regular, path: "file1", contents: "file1\n", permissions: 0o600}, {filetype: Regular, path: "file2", contents: "file2\n", permissions: 0o666}, {filetype: Regular, path: "file3", contents: "file3\n", permissions: 0o404}, {filetype: Regular, path: "file4", contents: "file4\n", permissions: 0o600}, {filetype: Regular, path: "file5", contents: "file5\n", permissions: 0o600}, {filetype: Regular, path: "file6", contents: "file6\n", permissions: 0o600}, {filetype: Regular, path: "file7", contents: "file7\n", permissions: 0o600}, {filetype: Dir, path: "dir1", contents: "", permissions: 0o740}, {filetype: Regular, path: "dir1/file1-1", contents: "file1-1\n", permissions: 0o1444}, {filetype: Regular, path: "dir1/file1-2", contents: "file1-2\n", permissions: 0o666}, {filetype: Dir, path: "dir2", contents: "", permissions: 0o700}, {filetype: Regular, path: "dir2/file2-1", contents: "file2-1\n", permissions: 0o666}, {filetype: Regular, path: "dir2/file2-2", contents: "file2-2\n", permissions: 0o666}, {filetype: Dir, path: "dir3", contents: "", permissions: 0o700}, {filetype: Regular, path: "dir3/file3-1", contents: "file3-1\n", permissions: 0o666}, {filetype: Regular, path: "dir3/file3-2", contents: "file3-2\n", permissions: 0o666}, {filetype: Dir, path: "dir4", contents: "", permissions: 0o700}, {filetype: Regular, path: "dir4/file3-1", contents: "file4-1\n", permissions: 0o666}, {filetype: Regular, path: "dir4/file3-2", contents: "file4-2\n", permissions: 0o666}, {filetype: Symlink, path: "symlink1", contents: "target1", permissions: 0o666}, {filetype: Symlink, path: "symlink2", contents: "target2", permissions: 0o666}, {filetype: Symlink, path: "symlink3", contents: root + "/file1", permissions: 0o666}, {filetype: Symlink, path: "symlink4", contents: root + "/symlink3", permissions: 0o666}, {filetype: Symlink, path: "dirSymlink", contents: root + "/dir1", permissions: 0o740}, } provisionSampleDir(t, root, files) } func provisionSampleDir(t *testing.T, root string, files []FileData) { now := time.Now() for _, info := range files { p := path.Join(root, info.path) switch info.filetype { case Dir: err := os.MkdirAll(p, info.permissions) assert.NilError(t, err) case Regular: err := os.WriteFile(p, []byte(info.contents), info.permissions) assert.NilError(t, err) case Symlink: err := os.Symlink(info.contents, p) assert.NilError(t, err) } if info.filetype != Symlink { // Set a consistent ctime, atime for all files and dirs err := chtimes(p, now, now) assert.NilError(t, err) } } } func TestChangeString(t *testing.T) { actual := (&Change{Path: "change", Kind: ChangeModify}).String() if actual != "C change" { t.Fatalf("String() of a change with ChangeModify Kind should have been %s but was %s", "C change", actual) } actual = (&Change{Path: "change", Kind: ChangeAdd}).String() if actual != "A change" { t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", actual) } actual = (&Change{Path: "change", Kind: ChangeDelete}).String() if actual != "D change" { t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", actual) } } func TestChangesWithNoChanges(t *testing.T) { rwLayer, err := os.MkdirTemp("", "docker-changes-test") assert.NilError(t, err) defer os.RemoveAll(rwLayer) layer, err := os.MkdirTemp("", "docker-changes-test-layer") assert.NilError(t, err) defer os.RemoveAll(layer) createSampleDir(t, layer) changes, err := Changes([]string{layer}, rwLayer) assert.NilError(t, err) if len(changes) != 0 { t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes)) } } func TestChangesWithChanges(t *testing.T) { // Mock the readonly layer layer, err := os.MkdirTemp("", "docker-changes-test-layer") assert.NilError(t, err) defer os.RemoveAll(layer) createSampleDir(t, layer) assert.NilError(t, os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0o740)) // Mock the RW layer rwLayer, err := os.MkdirTemp("", "docker-changes-test") assert.NilError(t, err) defer os.RemoveAll(rwLayer) // Create a folder in RW layer dir1 := path.Join(rwLayer, "dir1") assert.NilError(t, os.MkdirAll(dir1, 0o740)) deletedFile := path.Join(dir1, ".wh.file1-2") assert.NilError(t, os.WriteFile(deletedFile, []byte{}, 0o600)) modifiedFile := path.Join(dir1, "file1-1") assert.NilError(t, os.WriteFile(modifiedFile, []byte{0x00}, 0o1444)) // Let's add a subfolder for a newFile subfolder := path.Join(dir1, "subfolder") assert.NilError(t, os.MkdirAll(subfolder, 0o740)) newFile := path.Join(subfolder, "newFile") assert.NilError(t, os.WriteFile(newFile, []byte{}, 0o740)) changes, err := Changes([]string{layer}, rwLayer) assert.NilError(t, err) expectedChanges := []Change{ {Path: filepath.FromSlash("/dir1"), Kind: ChangeModify}, {Path: filepath.FromSlash("/dir1/file1-1"), Kind: ChangeModify}, {Path: filepath.FromSlash("/dir1/file1-2"), Kind: ChangeDelete}, {Path: filepath.FromSlash("/dir1/subfolder"), Kind: ChangeModify}, {Path: filepath.FromSlash("/dir1/subfolder/newFile"), Kind: ChangeAdd}, } checkChanges(expectedChanges, changes, t) } // See https://github.com/docker/docker/pull/13590 func TestChangesWithChangesGH13590(t *testing.T) { // TODO Windows. Needs further investigation to identify the failure if runtime.GOOS == "windows" { t.Skip("needs more investigation") } baseLayer, err := os.MkdirTemp("", "docker-changes-test.") assert.NilError(t, err) defer os.RemoveAll(baseLayer) dir3 := path.Join(baseLayer, "dir1/dir2/dir3") assert.NilError(t, os.MkdirAll(dir3, 0o740)) file := path.Join(dir3, "file.txt") assert.NilError(t, os.WriteFile(file, []byte("hello"), 0o666)) layer, err := os.MkdirTemp("", "docker-changes-test2.") assert.NilError(t, err) defer os.RemoveAll(layer) // Test creating a new file if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { t.Fatalf("Cmd failed: %q", err) } assert.NilError(t, os.Remove(path.Join(layer, "dir1/dir2/dir3/file.txt"))) file = path.Join(layer, "dir1/dir2/dir3/file1.txt") assert.NilError(t, os.WriteFile(file, []byte("bye"), 0o666)) changes, err := Changes([]string{baseLayer}, layer) assert.NilError(t, err) expectedChanges := []Change{ {Path: "/dir1/dir2/dir3", Kind: ChangeModify}, {Path: "/dir1/dir2/dir3/file1.txt", Kind: ChangeAdd}, } checkChanges(expectedChanges, changes, t) // Now test changing a file layer, err = os.MkdirTemp("", "docker-changes-test3.") assert.NilError(t, err) defer os.RemoveAll(layer) if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { t.Fatalf("Cmd failed: %q", err) } file = path.Join(layer, "dir1/dir2/dir3/file.txt") assert.NilError(t, os.WriteFile(file, []byte("bye"), 0o666)) changes, err = Changes([]string{baseLayer}, layer) assert.NilError(t, err) expectedChanges = []Change{ {Path: "/dir1/dir2/dir3/file.txt", Kind: ChangeModify}, } checkChanges(expectedChanges, changes, t) } // Create a directory, copy it, make sure we report no changes between the two func TestChangesDirsEmpty(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("FIXME: broken on Windows 1903 and up; see https://github.com/moby/moby/pull/39846") } src, err := os.MkdirTemp("", "docker-changes-test") assert.NilError(t, err) defer os.RemoveAll(src) createSampleDir(t, src) dst := src + "-copy" err = copyDir(src, dst) assert.NilError(t, err) defer os.RemoveAll(dst) changes, err := ChangesDirs(dst, src) assert.NilError(t, err) if len(changes) != 0 { t.Fatalf("Reported changes for identical dirs: %v", changes) } assert.NilError(t, os.RemoveAll(src)) assert.NilError(t, os.RemoveAll(dst)) } func mutateSampleDir(t *testing.T, root string) { // Remove a regular file err := os.RemoveAll(path.Join(root, "file1")) assert.NilError(t, err) // Remove a directory err = os.RemoveAll(path.Join(root, "dir1")) assert.NilError(t, err) // Remove a symlink err = os.RemoveAll(path.Join(root, "symlink1")) assert.NilError(t, err) // Rewrite a file err = os.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0o777) assert.NilError(t, err) // Replace a file err = os.RemoveAll(path.Join(root, "file3")) assert.NilError(t, err) err = os.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0o404) assert.NilError(t, err) // Touch file err = chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)) assert.NilError(t, err) // Replace file with dir err = os.RemoveAll(path.Join(root, "file5")) assert.NilError(t, err) err = os.MkdirAll(path.Join(root, "file5"), 0o666) assert.NilError(t, err) // Create new file err = os.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0o777) assert.NilError(t, err) // Create new dir err = os.MkdirAll(path.Join(root, "dirnew"), 0o766) assert.NilError(t, err) // Create a new symlink err = os.Symlink("targetnew", path.Join(root, "symlinknew")) assert.NilError(t, err) // Change a symlink err = os.RemoveAll(path.Join(root, "symlink2")) assert.NilError(t, err) err = os.Symlink("target2change", path.Join(root, "symlink2")) assert.NilError(t, err) // Replace dir with file err = os.RemoveAll(path.Join(root, "dir2")) assert.NilError(t, err) err = os.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0o777) assert.NilError(t, err) // Touch dir err = chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)) assert.NilError(t, err) } func TestChangesDirsMutated(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("FIXME: broken on Windows 1903 and up; see https://github.com/moby/moby/pull/39846") } src, err := os.MkdirTemp("", "docker-changes-test") assert.NilError(t, err) createSampleDir(t, src) dst := src + "-copy" err = copyDir(src, dst) assert.NilError(t, err) defer func() { _ = os.RemoveAll(dst) _ = os.RemoveAll(src) }() mutateSampleDir(t, dst) changes, err := ChangesDirs(dst, src) assert.NilError(t, err) sort.Sort(changesByPath(changes)) expectedChanges := []Change{ {Path: filepath.FromSlash("/dir1"), Kind: ChangeDelete}, {Path: filepath.FromSlash("/dir2"), Kind: ChangeModify}, } // Note there is slight difference between the Linux and Windows // implementations here. Due to https://github.com/moby/moby/issues/9874, // and the fix at https://github.com/moby/moby/pull/11422, Linux does not // consider a change to the directory time as a change. Windows on NTFS // does. See https://github.com/moby/moby/pull/37982 for more information. // // Note also: https://github.com/moby/moby/pull/37982#discussion_r223523114 // that differences are ordered in the way the test is currently written, hence // this is in the middle of the list of changes rather than at the start or // end. Potentially can be addressed later. if runtime.GOOS == "windows" { expectedChanges = append(expectedChanges, Change{Path: filepath.FromSlash("/dir3"), Kind: ChangeModify}) } expectedChanges = append(expectedChanges, []Change{ {Path: filepath.FromSlash("/dirnew"), Kind: ChangeAdd}, {Path: filepath.FromSlash("/file1"), Kind: ChangeDelete}, {Path: filepath.FromSlash("/file2"), Kind: ChangeModify}, {Path: filepath.FromSlash("/file3"), Kind: ChangeModify}, {Path: filepath.FromSlash("/file4"), Kind: ChangeModify}, {Path: filepath.FromSlash("/file5"), Kind: ChangeModify}, {Path: filepath.FromSlash("/filenew"), Kind: ChangeAdd}, {Path: filepath.FromSlash("/symlink1"), Kind: ChangeDelete}, {Path: filepath.FromSlash("/symlink2"), Kind: ChangeModify}, {Path: filepath.FromSlash("/symlinknew"), Kind: ChangeAdd}, }...) for i := 0; i < maxInt(len(changes), len(expectedChanges)); i++ { if i >= len(expectedChanges) { t.Fatalf("unexpected change %s\n", changes[i].String()) } if i >= len(changes) { t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) } if changes[i].Path == expectedChanges[i].Path { if changes[i] != expectedChanges[i] { t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) } } else if changes[i].Path < expectedChanges[i].Path { t.Fatalf("unexpected change %q %q\n", changes[i].String(), expectedChanges[i].Path) } else { t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) } } } func TestApplyLayer(t *testing.T) { // TODO Windows. This is very close to working, but it fails with changes // to \symlinknew and \symlink2. The destination has an updated // Access/Modify/Change/Birth date to the source (~3/100th sec different). // Needs further investigation as to why, but I currently believe this is // just the way NTFS works. I don't think it's a bug in this test or archive. if runtime.GOOS == "windows" { t.Skip("needs further investigation") } // TODO macOS: Test is failing: changes_test.go:440: Unexpected differences after reapplying mutation: [{/symlinknew C} {/symlink2 C}] if runtime.GOOS == "darwin" { t.Skip("needs further investigation") } src, err := os.MkdirTemp("", "docker-changes-test") assert.NilError(t, err) createSampleDir(t, src) defer os.RemoveAll(src) dst := src + "-copy" err = copyDir(src, dst) assert.NilError(t, err) mutateSampleDir(t, dst) defer os.RemoveAll(dst) changes, err := ChangesDirs(dst, src) assert.NilError(t, err) layer, err := ExportChanges(dst, changes, user.IdentityMapping{}) assert.NilError(t, err) layerCopy, err := newTempArchive(layer, "") assert.NilError(t, err) _, err = ApplyLayer(src, layerCopy) assert.NilError(t, err) changes2, err := ChangesDirs(src, dst) assert.NilError(t, err) if len(changes2) != 0 { t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) } } func TestChangesSizeWithHardlinks(t *testing.T) { // TODO Windows. Needs further investigation. Likely in ChangeSizes not // coping correctly with hardlinks on Windows. if runtime.GOOS == "windows" { t.Skip("needs further investigation") } srcDir, err := os.MkdirTemp("", "docker-test-srcDir") assert.NilError(t, err) defer os.RemoveAll(srcDir) destDir, err := os.MkdirTemp("", "docker-test-destDir") assert.NilError(t, err) defer os.RemoveAll(destDir) creationSize, err := prepareUntarSourceDirectory(100, destDir, true) assert.NilError(t, err) changes, err := ChangesDirs(destDir, srcDir) assert.NilError(t, err) got := ChangesSize(destDir, changes) if got != int64(creationSize) { t.Errorf("Expected %d bytes of changes, got %d", creationSize, got) } } func TestChangesSizeWithNoChanges(t *testing.T) { size := ChangesSize("/tmp", nil) if size != 0 { t.Fatalf("ChangesSizes with no changes should be 0, was %d", size) } } func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) { size := ChangesSize("/tmp", []Change{ {Path: "deletedPath", Kind: ChangeDelete}, }) if size != 0 { t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) } } func TestChangesSize(t *testing.T) { parentPath, err := os.MkdirTemp("", "docker-changes-test") assert.NilError(t, err) defer os.RemoveAll(parentPath) addition := path.Join(parentPath, "addition") err = os.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0o744) assert.NilError(t, err) modification := path.Join(parentPath, "modification") err = os.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0o744) assert.NilError(t, err) size := ChangesSize(parentPath, []Change{ {Path: "addition", Kind: ChangeAdd}, {Path: "modification", Kind: ChangeModify}, }) if size != 6 { t.Fatalf("Expected 6 bytes of changes, got %d", size) } } func checkChanges(expectedChanges, changes []Change, t *testing.T) { skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root") sort.Sort(changesByPath(expectedChanges)) sort.Sort(changesByPath(changes)) for i := 0; i < maxInt(len(changes), len(expectedChanges)); i++ { if i >= len(expectedChanges) { t.Fatalf("unexpected change %s\n", changes[i].String()) } if i >= len(changes) { t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) } if changes[i].Path == expectedChanges[i].Path { if changes[i] != expectedChanges[i] { t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) } } else if changes[i].Path < expectedChanges[i].Path { t.Fatalf("unexpected change %s\n", changes[i].String()) } else { t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) } } } golang-github-moby-go-archive-0.2.0/changes_unix.go000066400000000000000000000024171512625003600222340ustar00rootroot00000000000000//go:build !windows package archive import ( "io/fs" "os" "syscall" ) func statDifferent(oldStat fs.FileInfo, newStat fs.FileInfo) bool { oldSys := oldStat.Sys().(*syscall.Stat_t) newSys := newStat.Sys().(*syscall.Stat_t) // Don't look at size for dirs, its not a good measure of change if oldStat.Mode() != newStat.Mode() || oldSys.Uid != newSys.Uid || oldSys.Gid != newSys.Gid || oldSys.Rdev != newSys.Rdev || // Don't look at size or modification time for dirs, its not a good // measure of change. See https://github.com/moby/moby/issues/9874 // for a description of the issue with modification time, and // https://github.com/moby/moby/pull/11422 for the change. // (Note that in the Windows implementation of this function, // modification time IS taken as a change). See // https://github.com/moby/moby/pull/37982 for more information. (!oldStat.Mode().IsDir() && (!sameFsTime(oldStat.ModTime(), newStat.ModTime()) || (oldStat.Size() != newStat.Size()))) { return true } return false } func (info *FileInfo) isDir() bool { return info.parent == nil || info.stat.Mode().IsDir() } func getIno(fi os.FileInfo) uint64 { return fi.Sys().(*syscall.Stat_t).Ino } func hasHardlinks(fi os.FileInfo) bool { return fi.Sys().(*syscall.Stat_t).Nlink > 1 } golang-github-moby-go-archive-0.2.0/changes_windows.go000066400000000000000000000015571512625003600227470ustar00rootroot00000000000000package archive import ( "io/fs" "os" ) func statDifferent(oldStat fs.FileInfo, newStat fs.FileInfo) bool { // Note there is slight difference between the Linux and Windows // implementations here. Due to https://github.com/moby/moby/issues/9874, // and the fix at https://github.com/moby/moby/pull/11422, Linux does not // consider a change to the directory time as a change. Windows on NTFS // does. See https://github.com/moby/moby/pull/37982 for more information. if !sameFsTime(oldStat.ModTime(), newStat.ModTime()) || oldStat.Mode() != newStat.Mode() || oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { return true } return false } func (info *FileInfo) isDir() bool { return info.parent == nil || info.stat.Mode().IsDir() } func getIno(fi os.FileInfo) (inode uint64) { return } func hasHardlinks(fi os.FileInfo) bool { return false } golang-github-moby-go-archive-0.2.0/chrootarchive/000077500000000000000000000000001512625003600220665ustar00rootroot00000000000000golang-github-moby-go-archive-0.2.0/chrootarchive/archive.go000066400000000000000000000062471512625003600240470ustar00rootroot00000000000000package chrootarchive import ( "errors" "io" "os" "path/filepath" "github.com/moby/sys/user" "github.com/moby/go-archive" "github.com/moby/go-archive/compression" ) // NewArchiver returns a new Archiver which uses chrootarchive.Untar func NewArchiver(idMapping user.IdentityMapping) *archive.Archiver { return &archive.Archiver{ Untar: Untar, IDMapping: idMapping, } } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { return untarHandler(tarArchive, dest, options, true, dest) } // UntarWithRoot is the same as `Untar`, but allows you to pass in a root directory // The root directory is the directory that will be chrooted to. // `dest` must be a path within `root`, if it is not an error will be returned. // // `root` should set to a directory which is not controlled by any potentially // malicious process. // // This should be used to prevent a potential attacker from manipulating `dest` // such that it would provide access to files outside of `dest` through things // like symlinks. Normally `ResolveSymlinksInScope` would handle this, however // sanitizing symlinks in this manner is inherently racey: // ref: CVE-2018-15664 func UntarWithRoot(tarArchive io.Reader, dest string, options *archive.TarOptions, root string) error { return untarHandler(tarArchive, dest, options, true, root) } // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { return untarHandler(tarArchive, dest, options, false, dest) } // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool, root string) error { if tarArchive == nil { return errors.New("empty archive") } if options == nil { options = &archive.TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } // If dest is inside a root then directory is created within chroot by extractor. // This case is only currently used by cp. if dest == root { uid, gid := options.IDMap.RootPair() dest = filepath.Clean(dest) if _, err := os.Stat(dest); os.IsNotExist(err) { if err := user.MkdirAllAndChown(dest, 0o755, uid, gid, user.WithOnlyNew); err != nil { return err } } } r := io.NopCloser(tarArchive) if decompress { decompressedArchive, err := compression.DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return invokeUnpack(r, dest, options, root) } // Tar tars the requested path while chrooted to the specified root. func Tar(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { if options == nil { options = &archive.TarOptions{} } return invokePack(srcPath, options, root) } golang-github-moby-go-archive-0.2.0/chrootarchive/archive_linux.go000066400000000000000000000022701512625003600252560ustar00rootroot00000000000000package chrootarchive import ( "fmt" "io" "golang.org/x/sys/unix" "github.com/moby/go-archive" ) func doUnpack(decompressedArchive io.Reader, relDest, root string, options *archive.TarOptions) error { done := make(chan error) err := goInChroot(root, func() { done <- archive.Unpack(decompressedArchive, relDest, options) }) if err != nil { return err } return <-done } func doPack(relSrc, root string, options *archive.TarOptions) (io.ReadCloser, error) { tb, err := archive.NewTarballer(relSrc, options) if err != nil { return nil, fmt.Errorf("error processing tar file: %w", err) } err = goInChroot(root, tb.Do) if err != nil { return nil, fmt.Errorf("could not chroot: %w", err) } return tb.Reader(), nil } func doUnpackLayer(root string, layer io.Reader, options *archive.TarOptions) (int64, error) { type result struct { layerSize int64 err error } done := make(chan result) err := goInChroot(root, func() { // We need to be able to set any perms _ = unix.Umask(0) size, err := archive.UnpackLayer("/", layer, options) done <- result{layerSize: size, err: err} }) if err != nil { return 0, err } res := <-done return res.layerSize, res.err } golang-github-moby-go-archive-0.2.0/chrootarchive/archive_test.go000066400000000000000000000230411512625003600250750ustar00rootroot00000000000000package chrootarchive import ( "bytes" "fmt" "hash/crc32" "io" "os" "path/filepath" "runtime" "strings" "testing" "time" "github.com/moby/sys/user" "gotest.tools/v3/skip" "github.com/moby/go-archive" "github.com/moby/go-archive/compression" ) var chrootArchiver = NewArchiver(user.IdentityMapping{}) func TarUntar(src, dst string) error { return chrootArchiver.TarUntar(src, dst) } func CopyFileWithTar(src, dst string) (err error) { return chrootArchiver.CopyFileWithTar(src, dst) } func UntarPath(src, dst string) error { return chrootArchiver.UntarPath(src, dst) } func CopyWithTar(src, dst string) error { return chrootArchiver.CopyWithTar(src, dst) } func TestChrootTarUntar(t *testing.T) { skip.If(t, os.Getuid() != 0, "skipping test that requires root") tmpdir := t.TempDir() src := filepath.Join(tmpdir, "src") if err := os.Mkdir(src, 0o700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0o644); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(src, "lolo"), []byte("hello lolo"), 0o644); err != nil { t.Fatal(err) } stream, err := archive.Tar(src, compression.None) if err != nil { t.Fatal(err) } dest := filepath.Join(tmpdir, "dest") if err := os.Mkdir(dest, 0o700); err != nil { t.Fatal(err) } if err := Untar(stream, dest, &archive.TarOptions{ExcludePatterns: []string{"lolo"}}); err != nil { t.Fatal(err) } } // gh#10426: Verify the fix for having a huge excludes list (like on `docker load` with large # of // local images) func TestChrootUntarWithHugeExcludesList(t *testing.T) { skip.If(t, os.Getuid() != 0, "skipping test that requires root") tmpdir := t.TempDir() src := filepath.Join(tmpdir, "src") if err := os.Mkdir(src, 0o700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0o644); err != nil { t.Fatal(err) } stream, err := archive.Tar(src, compression.None) if err != nil { t.Fatal(err) } dest := filepath.Join(tmpdir, "dest") if err := os.Mkdir(dest, 0o700); err != nil { t.Fatal(err) } options := &archive.TarOptions{} // 65534 entries of 64-byte strings ~= 4MB of environment space which should overflow // on most systems when passed via environment or command line arguments excludes := make([]string, 65534) var i rune for i = 0; i < 65534; i++ { excludes[i] = strings.Repeat(string(i), 64) } options.ExcludePatterns = excludes if err := Untar(stream, dest, options); err != nil { t.Fatal(err) } } func TestChrootUntarEmptyArchive(t *testing.T) { if err := Untar(nil, t.TempDir(), nil); err == nil { t.Fatal("expected error on empty archive") } } func prepareSourceDirectory(targetPath string, makeSymLinks bool) error { fileData := []byte("fooo") numberOfFiles := 10 for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := os.WriteFile(filepath.Join(targetPath, fileName), fileData, 0o700); err != nil { return err } if makeSymLinks { if err := os.Symlink(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { return err } } } return nil } func getHash(filename string) (uint32, error) { stream, err := os.ReadFile(filename) if err != nil { return 0, err } hash := crc32.NewIEEE() if _, err := hash.Write(stream); err != nil { return 0, err } return hash.Sum32(), nil } func compareDirectories(src string, dest string) error { changes, err := archive.ChangesDirs(dest, src) if err != nil { return err } if len(changes) > 0 { return fmt.Errorf("unexpected differences after untar: %v", changes) } return nil } func compareFiles(src string, dest string) error { srcHash, err := getHash(src) if err != nil { return err } destHash, err := getHash(dest) if err != nil { return err } if srcHash != destHash { return fmt.Errorf("%s is different from %s", src, dest) } return nil } func TestChrootTarUntarWithSymlink(t *testing.T) { skip.If(t, runtime.GOOS == "windows", "FIXME: figure out why this is failing") skip.If(t, os.Getuid() != 0, "skipping test that requires root") tmpdir := t.TempDir() src := filepath.Join(tmpdir, "src") if err := os.Mkdir(src, 0o700); err != nil { t.Fatal(err) } if err := prepareSourceDirectory(src, false); err != nil { t.Fatal(err) } dest := filepath.Join(tmpdir, "dest") if err := TarUntar(src, dest); err != nil { t.Fatal(err) } if err := compareDirectories(src, dest); err != nil { t.Fatal(err) } } func TestChrootCopyWithTar(t *testing.T) { skip.If(t, runtime.GOOS == "windows", "FIXME: figure out why this is failing") skip.If(t, os.Getuid() != 0, "skipping test that requires root") tmpdir := t.TempDir() src := filepath.Join(tmpdir, "src") if err := os.Mkdir(src, 0o700); err != nil { t.Fatal(err) } if err := prepareSourceDirectory(src, true); err != nil { t.Fatal(err) } // Copy directory dest := filepath.Join(tmpdir, "dest") if err := CopyWithTar(src, dest); err != nil { t.Fatal(err) } if err := compareDirectories(src, dest); err != nil { t.Fatal(err) } // Copy file srcfile := filepath.Join(src, "file-1") dest = filepath.Join(tmpdir, "destFile") destfile := filepath.Join(dest, "file-1") if err := CopyWithTar(srcfile, destfile); err != nil { t.Fatal(err) } if err := compareFiles(srcfile, destfile); err != nil { t.Fatal(err) } // Copy symbolic link srcLinkfile := filepath.Join(src, "file-1-link") dest = filepath.Join(tmpdir, "destSymlink") destLinkfile := filepath.Join(dest, "file-1-link") if err := CopyWithTar(srcLinkfile, destLinkfile); err != nil { t.Fatal(err) } if err := compareFiles(srcLinkfile, destLinkfile); err != nil { t.Fatal(err) } } func TestChrootCopyFileWithTar(t *testing.T) { skip.If(t, os.Getuid() != 0, "skipping test that requires root") tmpdir := t.TempDir() src := filepath.Join(tmpdir, "src") if err := os.Mkdir(src, 0o700); err != nil { t.Fatal(err) } if err := prepareSourceDirectory(src, true); err != nil { t.Fatal(err) } // Copy directory dest := filepath.Join(tmpdir, "dest") if err := CopyFileWithTar(src, dest); err == nil { t.Fatal("Expected error on copying directory") } // Copy file srcfile := filepath.Join(src, "file-1") dest = filepath.Join(tmpdir, "destFile") destfile := filepath.Join(dest, "file-1") if err := CopyFileWithTar(srcfile, destfile); err != nil { t.Fatal(err) } if err := compareFiles(srcfile, destfile); err != nil { t.Fatal(err) } // Copy symbolic link srcLinkfile := filepath.Join(src, "file-1-link") dest = filepath.Join(tmpdir, "destSymlink") destLinkfile := filepath.Join(dest, "file-1-link") if err := CopyFileWithTar(srcLinkfile, destLinkfile); err != nil { t.Fatal(err) } if err := compareFiles(srcLinkfile, destLinkfile); err != nil { t.Fatal(err) } } func TestChrootUntarPath(t *testing.T) { skip.If(t, runtime.GOOS == "windows", "FIXME: figure out why this is failing") skip.If(t, os.Getuid() != 0, "skipping test that requires root") tmpdir := t.TempDir() src := filepath.Join(tmpdir, "src") if err := os.Mkdir(src, 0o700); err != nil { t.Fatal(err) } if err := prepareSourceDirectory(src, false); err != nil { t.Fatal(err) } dest := filepath.Join(tmpdir, "dest") // Untar a directory if err := UntarPath(src, dest); err == nil { t.Fatal("Expected error on untaring a directory") } // Untar a tar file stream, err := archive.Tar(src, compression.None) if err != nil { t.Fatal(err) } buf := new(bytes.Buffer) if _, err := buf.ReadFrom(stream); err != nil { t.Fatal(err) } tarfile := filepath.Join(tmpdir, "src.tar") if err := os.WriteFile(tarfile, buf.Bytes(), 0o644); err != nil { t.Fatal(err) } if err := UntarPath(tarfile, dest); err != nil { t.Fatal(err) } if err := compareDirectories(src, dest); err != nil { t.Fatal(err) } } type slowEmptyTarReader struct { size int offset int chunkSize int } // Read is a slow reader of an empty tar (like the output of "tar c --files-from /dev/null") func (s *slowEmptyTarReader) Read(p []byte) (int, error) { time.Sleep(100 * time.Millisecond) count := s.chunkSize if len(p) < s.chunkSize { count = len(p) } for i := 0; i < count; i++ { p[i] = 0 } s.offset += count if s.offset > s.size { return count, io.EOF } return count, nil } func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) { skip.If(t, os.Getuid() != 0, "skipping test that requires root") tmpdir := t.TempDir() dest := filepath.Join(tmpdir, "dest") if err := os.Mkdir(dest, 0o700); err != nil { t.Fatal(err) } stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} if err := Untar(stream, dest, nil); err != nil { t.Fatal(err) } } func TestChrootApplyEmptyArchiveFromSlowReader(t *testing.T) { skip.If(t, os.Getuid() != 0, "skipping test that requires root") tmpdir := t.TempDir() dest := filepath.Join(tmpdir, "dest") if err := os.Mkdir(dest, 0o700); err != nil { t.Fatal(err) } stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} if _, err := ApplyLayer(dest, stream); err != nil { t.Fatal(err) } } func TestChrootApplyDotDotFile(t *testing.T) { skip.If(t, os.Getuid() != 0, "skipping test that requires root") tmpdir := t.TempDir() src := filepath.Join(tmpdir, "src") if err := os.Mkdir(src, 0o700); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(src, "..gitme"), []byte(""), 0o644); err != nil { t.Fatal(err) } stream, err := archive.Tar(src, compression.None) if err != nil { t.Fatal(err) } dest := filepath.Join(tmpdir, "dest") if err := os.Mkdir(dest, 0o700); err != nil { t.Fatal(err) } if _, err := ApplyLayer(dest, stream); err != nil { t.Fatal(err) } } golang-github-moby-go-archive-0.2.0/chrootarchive/archive_unix.go000066400000000000000000000026361512625003600251100ustar00rootroot00000000000000//go:build !windows package chrootarchive import ( "errors" "io" "path/filepath" "strings" "github.com/moby/go-archive" ) func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error { relDest, err := resolvePathInChroot(root, dest) if err != nil { return err } return doUnpack(decompressedArchive, relDest, root, options) } func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { relSrc, err := resolvePathInChroot(root, srcPath) if err != nil { return nil, err } // make sure we didn't trim a trailing slash with the call to `resolvePathInChroot` if strings.HasSuffix(srcPath, "/") && !strings.HasSuffix(relSrc, "/") { relSrc += "/" } return doPack(relSrc, root, options) } // resolvePathInChroot returns the equivalent to path inside a chroot rooted at root. // The returned path always begins with '/'. // // - resolvePathInChroot("/a/b", "/a/b/c/d") -> "/c/d" // - resolvePathInChroot("/a/b", "/a/b") -> "/" // // The implementation is buggy, and some bugs may be load-bearing. // Here be dragons. func resolvePathInChroot(root, path string) (string, error) { if root == "" { return "", errors.New("root path must not be empty") } rel, err := filepath.Rel(root, path) if err != nil { return "", err } if rel == "." { rel = "/" } if rel[0] != '/' { rel = "/" + rel } return rel, nil } golang-github-moby-go-archive-0.2.0/chrootarchive/archive_unix_nolinux.go000066400000000000000000000114161512625003600266600ustar00rootroot00000000000000//go:build unix && !linux package chrootarchive import ( "bytes" "encoding/json" "fmt" "io" "os" "syscall" "github.com/moby/sys/reexec" "golang.org/x/sys/unix" "github.com/moby/go-archive" ) const ( packCmd = "chrootarchive-pack-in-chroot" unpackCmd = "chrootarchive-unpack-in-chroot" unpackLayerCmd = "chrootarchive-unpack-layer-in-chroot" ) func init() { reexec.Register(packCmd, reexecMain(packInChroot)) reexec.Register(unpackCmd, reexecMain(unpackInChroot)) reexec.Register(unpackLayerCmd, reexecMain(unpackLayerInChroot)) } func reexecMain(f func(options archive.TarOptions, args ...string) error) func() { return func() { if len(os.Args) < 2 { fmt.Fprintln(os.Stderr, "root parameter is required") os.Exit(1) } options, err := recvOptions() root := os.Args[1] if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } if err := syscall.Chroot(root); err != nil { fmt.Fprintln( os.Stderr, os.PathError{Op: "chroot", Path: root, Err: err}, ) os.Exit(2) } if err := f(*options, os.Args[2:]...); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(3) } } } func doUnpack(decompressedArchive io.Reader, relDest, root string, options *archive.TarOptions) error { optionsR, optionsW, err := os.Pipe() if err != nil { return err } defer optionsW.Close() defer optionsR.Close() stderr := bytes.NewBuffer(nil) cmd := reexec.Command(unpackCmd, root, relDest) cmd.Stdin = decompressedArchive cmd.Stderr = stderr cmd.ExtraFiles = []*os.File{ optionsR, } if err = cmd.Start(); err != nil { return fmt.Errorf("re-exec error: %w", err) } if err = json.NewEncoder(optionsW).Encode(options); err != nil { return fmt.Errorf("tar options encoding failed: %w", err) } if err = cmd.Wait(); err != nil { return fmt.Errorf("%s: %w", stderr.String(), err) } return nil } func doPack(relSrc, root string, options *archive.TarOptions) (io.ReadCloser, error) { optionsR, optionsW, err := os.Pipe() if err != nil { return nil, err } defer optionsW.Close() defer optionsR.Close() stderr := bytes.NewBuffer(nil) cmd := reexec.Command(packCmd, root, relSrc) cmd.ExtraFiles = []*os.File{ optionsR, } cmd.Stderr = stderr stdout, err := cmd.StdoutPipe() if err != nil { return nil, err } r, w := io.Pipe() if err = cmd.Start(); err != nil { return nil, fmt.Errorf("re-exec error: %w", err) } go func() { _, _ = io.Copy(w, stdout) // Cleanup once stdout pipe is closed. if err = cmd.Wait(); err != nil { r.CloseWithError(fmt.Errorf("%s: %w", stderr.String(), err)) } else { r.Close() } }() if err = json.NewEncoder(optionsW).Encode(options); err != nil { return nil, fmt.Errorf("tar options encoding failed: %w", err) } return r, nil } func doUnpackLayer(root string, layer io.Reader, options *archive.TarOptions) (int64, error) { var result int64 optionsR, optionsW, err := os.Pipe() if err != nil { return 0, err } defer optionsW.Close() defer optionsR.Close() buffer := bytes.NewBuffer(nil) cmd := reexec.Command(unpackLayerCmd, root) cmd.Stdin = layer cmd.Stdout = buffer cmd.Stderr = buffer cmd.ExtraFiles = []*os.File{ optionsR, } if err = cmd.Start(); err != nil { return 0, fmt.Errorf("re-exec error: %w", err) } if err = json.NewEncoder(optionsW).Encode(options); err != nil { return 0, fmt.Errorf("tar options encoding failed: %w", err) } if err = cmd.Wait(); err != nil { return 0, fmt.Errorf("%s: %w", buffer.String(), err) } if err = json.NewDecoder(buffer).Decode(&result); err != nil { return 0, fmt.Errorf("json decoding error: %w", err) } return result, nil } func unpackInChroot(options archive.TarOptions, args ...string) error { if len(args) < 1 { return fmt.Errorf("destination parameter is required") } relDest := args[0] return archive.Unpack(os.Stdin, relDest, &options) } func packInChroot(options archive.TarOptions, args ...string) error { if len(args) < 1 { return fmt.Errorf("source parameter is required") } relSrc := args[0] tb, err := archive.NewTarballer(relSrc, &options) if err != nil { return err } go tb.Do() _, err = io.Copy(os.Stdout, tb.Reader()) return err } func unpackLayerInChroot(options archive.TarOptions, _args ...string) error { // We need to be able to set any perms _ = unix.Umask(0) size, err := archive.UnpackLayer("/", os.Stdin, &options) if err != nil { return err } return json.NewEncoder(os.Stdout).Encode(size) } func recvOptions() (*archive.TarOptions, error) { var options archive.TarOptions optionsPipe := os.NewFile(3, "tar-options") if optionsPipe == nil { return nil, fmt.Errorf("could not read tar options from the pipe") } defer optionsPipe.Close() err := json.NewDecoder(optionsPipe).Decode(&options) if err != nil { return &options, err } return &options, nil } golang-github-moby-go-archive-0.2.0/chrootarchive/archive_unix_nolinux_test.go000066400000000000000000000002611512625003600277130ustar00rootroot00000000000000//go:build unix && !linux package chrootarchive import ( "testing" "github.com/moby/sys/reexec" ) func TestMain(m *testing.M) { if reexec.Init() { return } m.Run() } golang-github-moby-go-archive-0.2.0/chrootarchive/archive_unix_test.go000066400000000000000000000120111512625003600261330ustar00rootroot00000000000000//go:build !windows package chrootarchive import ( gotar "archive/tar" "bytes" "errors" "io" "os" "path" "path/filepath" "strings" "testing" "golang.org/x/sys/unix" "gotest.tools/v3/assert" "gotest.tools/v3/skip" "github.com/moby/go-archive" ) // Test for CVE-2018-15664 // Assures that in the case where an "attacker" controlled path is a symlink to // some path outside of a container's rootfs that we do not copy data to a // container path that will actually overwrite data on the host func TestUntarWithMaliciousSymlinks(t *testing.T) { skip.If(t, os.Getuid() != 0, "skipping test that requires root") dir := t.TempDir() root := filepath.Join(dir, "root") err := os.Mkdir(root, 0o755) assert.NilError(t, err) // Add a file into a directory above root // Ensure that we can't access this file while tarring. err = os.WriteFile(filepath.Join(dir, "host-file"), []byte("I am a host file"), 0o644) assert.NilError(t, err) // Create some data to copy into the "container" root into // the symlinked path. // Before this change, the copy would overwrite the "host" content. // With this change it should not. data := filepath.Join(dir, "data") err = os.Mkdir(data, 0o755) assert.NilError(t, err) err = os.WriteFile(filepath.Join(data, "local-file"), []byte("pwn3d"), 0o644) assert.NilError(t, err) safe := filepath.Join(root, "safe") err = unix.Symlink(dir, safe) assert.NilError(t, err) rdr, err := archive.TarWithOptions(data, &archive.TarOptions{IncludeFiles: []string{"local-file"}, RebaseNames: map[string]string{"local-file": "host-file"}}) assert.NilError(t, err) // Use tee to test both the good case and the bad case w/o recreating the archive bufRdr := bytes.NewBuffer(nil) tee := io.TeeReader(rdr, bufRdr) err = UntarWithRoot(tee, safe, nil, root) assert.Assert(t, err != nil) assert.ErrorContains(t, err, "open /safe/host-file: no such file or directory") // Make sure the "host" file is still in tact // Before the fix the host file would be overwritten hostData, err := os.ReadFile(filepath.Join(dir, "host-file")) assert.NilError(t, err) assert.Equal(t, string(hostData), "I am a host file") _, err = io.Copy(io.Discard, tee) assert.NilError(t, err) // Now test by chrooting to an attacker controlled path // This should succeed as is and overwrite a "host" file // Note that this would be a mis-use of this function. err = UntarWithRoot(bufRdr, safe, nil, safe) assert.NilError(t, err) hostData, err = os.ReadFile(filepath.Join(dir, "host-file")) assert.NilError(t, err) assert.Equal(t, string(hostData), "pwn3d") } // Test for CVE-2018-15664 // Assures that in the case where an "attacker" controlled path is a symlink to // some path outside of a container's rootfs that we do not unwittingly leak // host data into the archive. func TestTarWithMaliciousSymlinks(t *testing.T) { skip.If(t, os.Getuid() != 0, "skipping test that requires root") dir, err := os.MkdirTemp("", t.Name()) assert.NilError(t, err) // defer os.RemoveAll(dir) t.Log(dir) root := filepath.Join(dir, "root") err = os.Mkdir(root, 0o755) assert.NilError(t, err) hostFileData := []byte("I am a host file") // Add a file into a directory above root // Ensure that we can't access this file while tarring. err = os.WriteFile(filepath.Join(dir, "host-file"), hostFileData, 0o644) assert.NilError(t, err) safe := filepath.Join(root, "safe") err = unix.Symlink(dir, safe) assert.NilError(t, err) data := filepath.Join(dir, "data") err = os.Mkdir(data, 0o755) assert.NilError(t, err) type testCase struct { p string includes []string } cases := []testCase{ {p: safe, includes: []string{"host-file"}}, {p: safe + "/", includes: []string{"host-file"}}, {p: safe, includes: nil}, {p: safe + "/", includes: nil}, {p: root, includes: []string{"safe/host-file"}}, {p: root, includes: []string{"/safe/host-file"}}, {p: root, includes: nil}, } maxBytes := len(hostFileData) for _, tc := range cases { t.Run(path.Join(tc.p+"_"+strings.Join(tc.includes, "_")), func(t *testing.T) { // Here if we use archive.TarWithOptions directly or change the "root" parameter // to be the same as "safe", data from the host will be leaked into the archive var opts *archive.TarOptions if tc.includes != nil { opts = &archive.TarOptions{ IncludeFiles: tc.includes, } } rdr, err := Tar(tc.p, opts, root) assert.NilError(t, err) defer rdr.Close() tr := gotar.NewReader(rdr) assert.Assert(t, !isDataInTar(t, tr, hostFileData, int64(maxBytes)), "host data leaked to archive") }) } } func isDataInTar(t *testing.T, tr *gotar.Reader, compare []byte, maxBytes int64) bool { for { h, err := tr.Next() if errors.Is(err, io.EOF) { break } assert.NilError(t, err) if h.Size == 0 { continue } assert.Assert(t, h.Size <= maxBytes, "%s: file size exceeds max expected size %d: %d", h.Name, maxBytes, h.Size) data := make([]byte, int(h.Size)) _, err = io.ReadFull(tr, data) assert.NilError(t, err) if bytes.Contains(data, compare) { return true } } return false } golang-github-moby-go-archive-0.2.0/chrootarchive/archive_windows.go000066400000000000000000000027351512625003600256170ustar00rootroot00000000000000package chrootarchive import ( "io" "strings" "github.com/moby/go-archive" ) // longPathPrefix is the longpath prefix for Windows file paths. const longPathPrefix = `\\?\` // addLongPathPrefix adds the Windows long path prefix to the path provided if // it does not already have it. It is a no-op on platforms other than Windows. // // addLongPathPrefix is a copy of [github.com/docker/docker/pkg/longpath.AddPrefix]. func addLongPathPrefix(srcPath string) string { if strings.HasPrefix(srcPath, longPathPrefix) { return srcPath } if strings.HasPrefix(srcPath, `\\`) { // This is a UNC path, so we need to add 'UNC' to the path as well. return longPathPrefix + `UNC` + srcPath[1:] } return longPathPrefix + srcPath } func invokeUnpack(decompressedArchive io.ReadCloser, dest string, options *archive.TarOptions, root string) error { // Windows is different to Linux here because Windows does not support // chroot. Hence there is no point sandboxing a chrooted process to // do the unpack. We call inline instead within the daemon process. return archive.Unpack(decompressedArchive, addLongPathPrefix(dest), options) } func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { // Windows is different to Linux here because Windows does not support // chroot. Hence there is no point sandboxing a chrooted process to // do the pack. We call inline instead within the daemon process. return archive.TarWithOptions(srcPath, options) } golang-github-moby-go-archive-0.2.0/chrootarchive/chroot_linux.go000066400000000000000000000023071512625003600251340ustar00rootroot00000000000000package chrootarchive import ( "github.com/moby/sys/mount" "golang.org/x/sys/unix" "github.com/moby/go-archive/internal/mounttree" "github.com/moby/go-archive/internal/unshare" ) // goInChroot starts fn in a goroutine where the root directory, current working // directory and umask are unshared from other goroutines and the root directory // has been changed to path. These changes are only visible to the goroutine in // which fn is executed. Any other goroutines, including ones started from fn, // will see the same root directory and file system attributes as the rest of // the process. func goInChroot(path string, fn func()) error { return unshare.Go( unix.CLONE_FS|unix.CLONE_NEWNS, func() error { // Make everything in new ns slave. // Don't use `private` here as this could race where the mountns gets a // reference to a mount and an unmount from the host does not propagate, // which could potentially cause transient errors for other operations, // even though this should be relatively small window here `slave` should // not cause any problems. if err := mount.MakeRSlave("/"); err != nil { return err } return mounttree.SwitchRoot(path) }, fn, ) } golang-github-moby-go-archive-0.2.0/chrootarchive/diff.go000066400000000000000000000014761512625003600233350ustar00rootroot00000000000000package chrootarchive import ( "io" "github.com/moby/go-archive" ) // ApplyLayer parses a diff in the standard layer format from `layer`, // and applies it to the directory `dest`. The stream `layer` can only be // uncompressed. // Returns the size in bytes of the contents of the layer. func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) } // ApplyUncompressedLayer parses a diff in the standard layer format from // `layer`, and applies it to the directory `dest`. The stream `layer` // can only be uncompressed. // Returns the size in bytes of the contents of the layer. func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { return applyLayerHandler(dest, layer, options, false) } golang-github-moby-go-archive-0.2.0/chrootarchive/diff_unix.go000066400000000000000000000016411512625003600243720ustar00rootroot00000000000000//go:build !windows package chrootarchive import ( "io" "path/filepath" "github.com/moby/sys/userns" "github.com/moby/go-archive" "github.com/moby/go-archive/compression" ) // applyLayerHandler parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. Returns the size in bytes of the // contents of the layer. func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, _ error) { if decompress { decompressed, err := compression.DecompressStream(layer) if err != nil { return 0, err } defer decompressed.Close() layer = decompressed } if options == nil { options = &archive.TarOptions{} } if userns.RunningInUserNS() { options.InUserNS = true } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } dest = filepath.Clean(dest) return doUnpackLayer(dest, layer, options) } golang-github-moby-go-archive-0.2.0/chrootarchive/diff_windows.go000066400000000000000000000015621512625003600251030ustar00rootroot00000000000000package chrootarchive import ( "fmt" "io" "path/filepath" "github.com/moby/go-archive" "github.com/moby/go-archive/compression" ) // applyLayerHandler parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. Returns the size in bytes of the // contents of the layer. func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { if decompress { decompressed, err := compression.DecompressStream(layer) if err != nil { return 0, err } defer decompressed.Close() layer = decompressed } // Ensure it is a Windows-style volume path dest = addLongPathPrefix(filepath.Clean(dest)) s, err := archive.UnpackLayer(dest, layer, nil) if err != nil { return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %w", layer, dest, err) } return s, nil } golang-github-moby-go-archive-0.2.0/compression/000077500000000000000000000000001512625003600215675ustar00rootroot00000000000000golang-github-moby-go-archive-0.2.0/compression/compression.go000066400000000000000000000143761512625003600244720ustar00rootroot00000000000000package compression import ( "bufio" "bytes" "compress/bzip2" "compress/gzip" "context" "errors" "fmt" "io" "os" "os/exec" "strconv" "sync" "github.com/containerd/log" "github.com/klauspost/compress/zstd" ) // Compression is the state represents if compressed or not. type Compression int const ( None Compression = 0 // None represents the uncompressed. Bzip2 Compression = 1 // Bzip2 is bzip2 compression algorithm. Gzip Compression = 2 // Gzip is gzip compression algorithm. Xz Compression = 3 // Xz is xz compression algorithm. Zstd Compression = 4 // Zstd is zstd compression algorithm. ) // Extension returns the extension of a file that uses the specified compression algorithm. func (c *Compression) Extension() string { switch *c { case None: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" case Zstd: return "tar.zst" default: return "" } } type readCloserWrapper struct { io.Reader closer func() error } func (r *readCloserWrapper) Close() error { if r.closer != nil { return r.closer() } return nil } type nopWriteCloser struct { io.Writer } func (nopWriteCloser) Close() error { return nil } var bufioReader32KPool = &sync.Pool{ New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) }, } type bufferedReader struct { buf *bufio.Reader } func newBufferedReader(r io.Reader) *bufferedReader { buf := bufioReader32KPool.Get().(*bufio.Reader) buf.Reset(r) return &bufferedReader{buf} } func (r *bufferedReader) Read(p []byte) (int, error) { if r.buf == nil { return 0, io.EOF } n, err := r.buf.Read(p) if errors.Is(err, io.EOF) { r.buf.Reset(nil) bufioReader32KPool.Put(r.buf) r.buf = nil } return n, err } func (r *bufferedReader) Peek(n int) ([]byte, error) { if r.buf == nil { return nil, io.EOF } return r.buf.Peek(n) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { buf := newBufferedReader(archive) bs, err := buf.Peek(10) if err != nil && !errors.Is(err, io.EOF) { // Note: we'll ignore any io.EOF error because there are some odd // cases where the layer.tar file will be empty (zero bytes) and // that results in an io.EOF from the Peek() call. So, in those // cases we'll just treat it as a non-compressed stream and // that means just create an empty layer. // See Issue 18170 return nil, err } switch compression := Detect(bs); compression { case None: return &readCloserWrapper{ Reader: buf, }, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) gzReader, err := gzipDecompress(ctx, buf) if err != nil { cancel() return nil, err } return &readCloserWrapper{ Reader: gzReader, closer: func() error { cancel() return gzReader.Close() }, }, nil case Bzip2: bz2Reader := bzip2.NewReader(buf) return &readCloserWrapper{ Reader: bz2Reader, }, nil case Xz: ctx, cancel := context.WithCancel(context.Background()) xzReader, err := xzDecompress(ctx, buf) if err != nil { cancel() return nil, err } return &readCloserWrapper{ Reader: xzReader, closer: func() error { cancel() return xzReader.Close() }, }, nil case Zstd: zstdReader, err := zstd.NewReader(buf) if err != nil { return nil, err } return &readCloserWrapper{ Reader: zstdReader, closer: func() error { zstdReader.Close() return nil }, }, nil default: return nil, fmt.Errorf("unsupported compression format (%d)", compression) } } // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { switch compression { case None: return nopWriteCloser{dest}, nil case Gzip: return gzip.NewWriter(dest), nil case Bzip2: // archive/bzip2 does not support writing. return nil, errors.New("unsupported compression format: tar.bz2") case Xz: // there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, errors.New("unsupported compression format: tar.xz") default: return nil, fmt.Errorf("unsupported compression format (%d)", compression) } } func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) } func gzipDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { log.G(ctx).WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { log.G(ctx).Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { log.G(ctx).Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } log.G(ctx).Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func cmdStream(cmd *exec.Cmd, in io.Reader) (io.ReadCloser, error) { reader, writer := io.Pipe() cmd.Stdin = in cmd.Stdout = writer var errBuf bytes.Buffer cmd.Stderr = &errBuf // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } // Ensure the command has exited before we clean anything up done := make(chan struct{}) // Copy stdout to the returned pipe go func() { if err := cmd.Wait(); err != nil { _ = writer.CloseWithError(fmt.Errorf("%w: %s", err, errBuf.String())) } else { _ = writer.Close() } close(done) }() return &readCloserWrapper{ Reader: reader, closer: func() error { // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as // cmd.Wait waits for any non-file stdout/stderr/stdin to close. err := reader.Close() <-done return err }, }, nil } golang-github-moby-go-archive-0.2.0/compression/compression_detect.go000066400000000000000000000031341512625003600260100ustar00rootroot00000000000000package compression import ( "bytes" "encoding/binary" ) const ( zstdMagicSkippableStart = 0x184D2A50 zstdMagicSkippableMask = 0xFFFFFFF0 ) var ( bzip2Magic = []byte{0x42, 0x5A, 0x68} gzipMagic = []byte{0x1F, 0x8B, 0x08} xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} ) type matcher = func([]byte) bool // Detect detects the compression algorithm of the source. func Detect(source []byte) Compression { compressionMap := map[Compression]matcher{ Bzip2: magicNumberMatcher(bzip2Magic), Gzip: magicNumberMatcher(gzipMagic), Xz: magicNumberMatcher(xzMagic), Zstd: zstdMatcher(), } for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} { fn := compressionMap[compression] if fn(source) { return compression } } return None } func magicNumberMatcher(m []byte) matcher { return func(source []byte) bool { return bytes.HasPrefix(source, m) } } // zstdMatcher detects zstd compression algorithm. // Zstandard compressed data is made of one or more frames. // There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. // See https://datatracker.ietf.org/doc/html/rfc8878#section-3 for more details. func zstdMatcher() matcher { return func(source []byte) bool { if bytes.HasPrefix(source, zstdMagic) { // Zstandard frame return true } // skippable frame if len(source) < 8 { return false } // magic number from 0x184D2A50 to 0x184D2A5F. if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { return true } return false } } golang-github-moby-go-archive-0.2.0/compression/compression_fuzzer_test.go000066400000000000000000000003001512625003600271140ustar00rootroot00000000000000package compression import ( "bytes" "testing" ) func FuzzDecompressStream(f *testing.F) { f.Fuzz(func(t *testing.T, data []byte) { _, _ = DecompressStream(bytes.NewReader(data)) }) } golang-github-moby-go-archive-0.2.0/compression/compression_test.go000066400000000000000000000173541512625003600255300ustar00rootroot00000000000000package compression import ( "compress/gzip" "fmt" "io" "os" "os/exec" "path/filepath" "reflect" "runtime" "strings" "testing" "time" "gotest.tools/v3/assert" ) func TestExtension(t *testing.T) { tests := []struct { compression Compression extension string }{ {compression: -1, extension: ""}, {compression: None, extension: "tar"}, {compression: Bzip2, extension: "tar.bz2"}, {compression: Gzip, extension: "tar.gz"}, {compression: Xz, extension: "tar.xz"}, {compression: Zstd, extension: "tar.zst"}, } for _, tc := range tests { if actual := tc.compression.Extension(); actual != tc.extension { t.Errorf("expected %s extension got %s", tc.extension, actual) } } } func TestDetectCompressionZstd(t *testing.T) { // test zstd compression without skippable frames. compressedData := []byte{ 0x28, 0xb5, 0x2f, 0xfd, // magic number of Zstandard frame: 0xFD2FB528 0x04, 0x00, 0x31, 0x00, 0x00, // frame header 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, // data block "docker" 0x16, 0x0e, 0x21, 0xc3, // content checksum } compression := Detect(compressedData) if compression != Zstd { t.Fatal("Unexpected compression") } // test zstd compression with skippable frames. hex := []byte{ 0x50, 0x2a, 0x4d, 0x18, // magic number of skippable frame: 0x184D2A50 to 0x184D2A5F 0x04, 0x00, 0x00, 0x00, // frame size 0x5d, 0x00, 0x00, 0x00, // user data 0x28, 0xb5, 0x2f, 0xfd, // magic number of Zstandard frame: 0xFD2FB528 0x04, 0x00, 0x31, 0x00, 0x00, // frame header 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, // data block "docker" 0x16, 0x0e, 0x21, 0xc3, // content checksum } compression = Detect(hex) if compression != Zstd { t.Fatal("Unexpected compression") } } // toUnixPath converts the given path to a unix-path, using forward-slashes, and // with the drive-letter replaced (e.g. "C:\temp\file.txt" becomes "/c/temp/file.txt"). // It is a no-op on non-Windows platforms. func toUnixPath(p string) string { if runtime.GOOS != "windows" { return p } p = filepath.ToSlash(p) // This should probably be more generic, but this suits our needs for now. if pth, ok := strings.CutPrefix(p, "C:/"); ok { return "/c/" + pth } if pth, ok := strings.CutPrefix(p, "D:/"); ok { return "/d/" + pth } return p } func testDecompressStream(t *testing.T, ext, compressCommand string) io.Reader { tmp := t.TempDir() archivePath := toUnixPath(filepath.Join(tmp, "archive")) cmd := exec.Command("sh", "-c", fmt.Sprintf("touch %[1]s && %[2]s %[1]s", archivePath, compressCommand)) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create archive file (%v):\ncommand: %s\noutput: %s", err, cmd.String(), output) } filename := "archive." + ext archive, err := os.Open(filepath.Join(tmp, filename)) if err != nil { t.Fatalf("Failed to open file %s: %v", filename, err) } defer archive.Close() r, err := DecompressStream(archive) if err != nil { t.Fatalf("Failed to decompress %s: %v", filename, err) } if _, err = io.ReadAll(r); err != nil { t.Fatalf("Failed to read the decompressed stream: %v ", err) } if err = r.Close(); err != nil { t.Fatalf("Failed to close the decompressed stream: %v ", err) } return r } func TestDecompressStreamGzip(t *testing.T) { testDecompressStream(t, "gz", "gzip -f") } func TestDecompressStreamBzip2(t *testing.T) { // TODO Windows: Failing with "bzip2.exe: Can't open input file (...)/archive: No such file or directory." if runtime.GOOS == "windows" { t.Skip("Failing on Windows CI machines") } testDecompressStream(t, "bz2", "bzip2 -f") } func TestDecompressStreamXz(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Xz not present in msys2") } testDecompressStream(t, "xz", "xz -f") } func TestDecompressStreamZstd(t *testing.T) { // TODO Windows: Failing with "zstd: can't stat (...)/archive : No such file or directory -- ignored" if runtime.GOOS == "windows" { t.Skip("Failing on Windows CI machines") } if _, err := exec.LookPath("zstd"); err != nil { t.Skip("zstd not installed") } testDecompressStream(t, "zst", "zstd -f") } func TestCompressStreamXzUnsupported(t *testing.T) { dest, err := os.Create(filepath.Join(t.TempDir(), "dest")) if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestCompressStreamBzip2Unsupported(t *testing.T) { dest, err := os.Create(filepath.Join(t.TempDir(), "dest")) if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Bzip2) if err == nil { t.Fatalf("Should fail as bzip2 is unsupported for compression format.") } } func TestCompressStreamInvalid(t *testing.T) { dest, err := os.Create(filepath.Join(t.TempDir(), "dest")) if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, -1) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, err := cmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s, output: %s", err, out) } errCh := make(chan error, 1) go func() { _, err := io.Copy(io.Discard, out) errCh <- err }() select { case err := <-errCh: if err != nil { t.Fatalf("Command should not have failed (err=%.100s...)", err) } case <-time.After(5 * time.Second): t.Fatalf("Command did not complete in 5 seconds; probable deadlock") } } func TestCmdStreamBad(t *testing.T) { // TODO Windows: Figure out why this is failing in CI but not locally if runtime.GOOS == "windows" { t.Skip("Failing on Windows CI machines") } badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") out, err := cmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } if output, err := io.ReadAll(out); err == nil { t.Fatalf("Command should have failed") } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { t.Fatalf("Wrong error value (%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("sh", "-c", "echo hello; exit 0") out, err := cmdStream(cmd, nil) if err != nil { t.Fatal(err) } if output, err := io.ReadAll(out); err != nil { t.Fatalf("Command should not have failed (err=%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestDisablePigz(t *testing.T) { _, err := exec.LookPath("unpigz") if err != nil { t.Log("Test will not check full path when Pigz not installed") } t.Setenv("MOBY_DISABLE_PIGZ", "true") r := testDecompressStream(t, "gz", "gzip -f") // wrapped in closer to cancel contex and release buffer to pool wrapper := r.(*readCloserWrapper) assert.Equal(t, reflect.TypeOf(wrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } func TestPigz(t *testing.T) { r := testDecompressStream(t, "gz", "gzip -f") // wrapper for buffered reader and context cancel wrapper := r.(*readCloserWrapper) _, err := exec.LookPath("unpigz") if err == nil { t.Log("Tested whether Pigz is used, as it installed") // For the command wait wrapper cmdWaitCloserWrapper := wrapper.Reader.(*readCloserWrapper) assert.Equal(t, reflect.TypeOf(cmdWaitCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{})) } else { t.Log("Tested whether Pigz is not used, as it not installed") assert.Equal(t, reflect.TypeOf(wrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } } golang-github-moby-go-archive-0.2.0/copy.go000066400000000000000000000421621512625003600205340ustar00rootroot00000000000000package archive import ( "archive/tar" "context" "errors" "io" "os" "path/filepath" "strings" "sync" "github.com/containerd/log" ) // Errors used or returned by this file. var ( ErrNotDirectory = errors.New("not a directory") ErrDirNotExists = errors.New("no such directory") ErrCannotCopyDir = errors.New("cannot copy directory") ErrInvalidCopySource = errors.New("invalid copy source content") ) var copyPool = sync.Pool{ New: func() interface{} { s := make([]byte, 32*1024); return &s }, } func copyWithBuffer(dst io.Writer, src io.Reader) error { buf := copyPool.Get().(*[]byte) _, err := io.CopyBuffer(dst, src, *buf) copyPool.Put(buf) return err } // PreserveTrailingDotOrSeparator returns the given cleaned path (after // processing using any utility functions from the path or filepath stdlib // packages) and appends a trailing `/.` or `/` if its corresponding original // path (from before being processed by utility functions from the path or // filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned // path already ends in a `.` path segment, then another is not added. If the // clean path already ends in a path separator, then another is not added. func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string) string { // Ensure paths are in platform semantics cleanedPath = normalizePath(cleanedPath) originalPath = normalizePath(originalPath) if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { if !hasTrailingPathSeparator(cleanedPath) { // Add a separator if it doesn't already end with one (a cleaned // path would only end in a separator if it is the root). cleanedPath += string(filepath.Separator) } cleanedPath += "." } if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { cleanedPath += string(filepath.Separator) } return cleanedPath } // assertsDirectory returns whether the given path is // asserted to be a directory, i.e., the path ends with // a trailing '/' or `/.`, assuming a path separator of `/`. func assertsDirectory(path string) bool { return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) } // hasTrailingPathSeparator returns whether the given // path ends with the system's path separator character. func hasTrailingPathSeparator(path string) bool { return len(path) > 0 && path[len(path)-1] == filepath.Separator } // specifiesCurrentDir returns whether the given path specifies // a "current directory", i.e., the last path segment is `.`. func specifiesCurrentDir(path string) bool { return filepath.Base(path) == "." } // SplitPathDirEntry splits the given path between its directory name and its // basename by first cleaning the path but preserves a trailing "." if the // original path specified the current directory. func SplitPathDirEntry(path string) (dir, base string) { cleanedPath := filepath.Clean(filepath.FromSlash(path)) if specifiesCurrentDir(path) { cleanedPath += string(os.PathSeparator) + "." } return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) } // TarResource archives the resource described by the given CopyInfo to a Tar // archive. A non-nil error is returned if sourcePath does not exist or is // asserted to be a directory but exists as another type of file. // // This function acts as a convenient wrapper around TarWithOptions, which // requires a directory as the source path. TarResource accepts either a // directory or a file path and correctly sets the Tar options. func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) } // TarResourceRebase is like TarResource but renames the first path element of // items in the resulting tar archive to match the given rebaseName if not "". func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, _ error) { sourcePath = normalizePath(sourcePath) if _, err := os.Lstat(sourcePath); err != nil { // Catches the case where the source does not exist or is not a // directory if asserted to be a directory, as this also causes an // error. return nil, err } // Separate the source path between its directory and // the entry in that directory which we are archiving. sourceDir, sourceBase := SplitPathDirEntry(sourcePath) opts := TarResourceRebaseOpts(sourceBase, rebaseName) log.G(context.TODO()).Debugf("copying %q from %q", sourceBase, sourceDir) return TarWithOptions(sourceDir, opts) } // TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase // parameters to be sent to TarWithOptions (the TarOptions struct) func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions { filter := []string{sourceBase} return &TarOptions{ IncludeFiles: filter, IncludeSourceDir: true, RebaseNames: map[string]string{ sourceBase: rebaseName, }, } } // CopyInfo holds basic info about the source // or destination path of a copy operation. type CopyInfo struct { Path string Exists bool IsDir bool RebaseName string } // CopyInfoSourcePath stats the given path to create a CopyInfo // struct representing that resource for the source of an archive copy // operation. The given path should be an absolute local path. A source path // has all symlinks evaluated that appear before the last path separator ("/" // on Unix). As it is to be a copy source, the path must exist. func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { // normalize the file path and then evaluate the symbol link // we will use the target file instead of the symbol link if // followLink is set path = normalizePath(path) resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) if err != nil { return CopyInfo{}, err } stat, err := os.Lstat(resolvedPath) if err != nil { return CopyInfo{}, err } return CopyInfo{ Path: resolvedPath, Exists: true, IsDir: stat.IsDir(), RebaseName: rebaseName, }, nil } // CopyInfoDestinationPath stats the given path to create a CopyInfo // struct representing that resource for the destination of an archive copy // operation. The given path should be an absolute local path. func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. path = normalizePath(path) originalPath := path stat, err := os.Lstat(path) if err == nil && stat.Mode()&os.ModeSymlink == 0 { // The path exists and is not a symlink. return CopyInfo{ Path: path, Exists: true, IsDir: stat.IsDir(), }, nil } // While the path is a symlink. for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { if n > maxSymlinkIter { // Don't follow symlinks more than this arbitrary number of times. return CopyInfo{}, errors.New("too many symlinks in " + originalPath) } // The path is a symbolic link. We need to evaluate it so that the // destination of the copy operation is the link target and not the // link itself. This is notably different than CopyInfoSourcePath which // only evaluates symlinks before the last appearing path separator. // Also note that it is okay if the last path element is a broken // symlink as the copy operation should create the target. var linkTarget string linkTarget, err = os.Readlink(path) if err != nil { return CopyInfo{}, err } if !filepath.IsAbs(linkTarget) { // Join with the parent directory. dstParent, _ := SplitPathDirEntry(path) linkTarget = filepath.Join(dstParent, linkTarget) } path = linkTarget stat, err = os.Lstat(path) } if err != nil { // It's okay if the destination path doesn't exist. We can still // continue the copy operation if the parent directory exists. if !os.IsNotExist(err) { return CopyInfo{}, err } // Ensure destination parent dir exists. dstParent, _ := SplitPathDirEntry(path) parentDirStat, err := os.Stat(dstParent) if err != nil { return CopyInfo{}, err } if !parentDirStat.IsDir() { return CopyInfo{}, ErrNotDirectory } return CopyInfo{Path: path}, nil } // The path exists after resolving symlinks. return CopyInfo{ Path: path, Exists: true, IsDir: stat.IsDir(), }, nil } // PrepareArchiveCopy prepares the given srcContent archive, which should // contain the archived resource described by srcInfo, to the destination // described by dstInfo. Returns the possibly modified content archive along // with the path to the destination directory which it should be extracted to. func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { // Ensure in platform semantics srcInfo.Path = normalizePath(srcInfo.Path) dstInfo.Path = normalizePath(dstInfo.Path) // Separate the destination path between its directory and base // components in case the source archive contents need to be rebased. dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) _, srcBase := SplitPathDirEntry(srcInfo.Path) switch { case dstInfo.Exists && dstInfo.IsDir: // The destination exists as a directory. No alteration // to srcContent is needed as its contents can be // simply extracted to the destination directory. return dstInfo.Path, io.NopCloser(srcContent), nil case dstInfo.Exists && srcInfo.IsDir: // The destination exists as some type of file and the source // content is a directory. This is an error condition since // you cannot copy a directory to an existing file location. return "", nil, ErrCannotCopyDir case dstInfo.Exists: // The destination exists as some type of file and the source content // is also a file. The source content entry will have to be renamed to // have a basename which matches the destination path's basename. if len(srcInfo.RebaseName) != 0 { srcBase = srcInfo.RebaseName } return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil case srcInfo.IsDir: // The destination does not exist and the source content is an archive // of a directory. The archive should be extracted to the parent of // the destination path instead, and when it is, the directory that is // created as a result should take the name of the destination path. // The source content entries will have to be renamed to have a // basename which matches the destination path's basename. if len(srcInfo.RebaseName) != 0 { srcBase = srcInfo.RebaseName } return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil case assertsDirectory(dstInfo.Path): // The destination does not exist and is asserted to be created as a // directory, but the source content is not a directory. This is an // error condition since you cannot create a directory from a file // source. return "", nil, ErrDirNotExists default: // The last remaining case is when the destination does not exist, is // not asserted to be a directory, and the source content is not an // archive of a directory. It this case, the destination file will need // to be created when the archive is extracted and the source content // entry will have to be renamed to have a basename which matches the // destination path's basename. if len(srcInfo.RebaseName) != 0 { srcBase = srcInfo.RebaseName } return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil } } // RebaseArchiveEntries rewrites the given srcContent archive replacing // an occurrence of oldBase with newBase at the beginning of entry names. func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { if oldBase == string(os.PathSeparator) { // If oldBase specifies the root directory, use an empty string as // oldBase instead so that newBase doesn't replace the path separator // that all paths will start with. oldBase = "" } rebased, w := io.Pipe() go func() { srcTar := tar.NewReader(srcContent) rebasedTar := tar.NewWriter(w) for { hdr, err := srcTar.Next() if errors.Is(err, io.EOF) { // Signals end of archive. rebasedTar.Close() w.Close() return } if err != nil { w.CloseWithError(err) return } // srcContent tar stream, as served by TarWithOptions(), is // definitely in PAX format, but tar.Next() mistakenly guesses it // as USTAR, which creates a problem: if the newBase is >100 // characters long, WriteHeader() returns an error like // "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...". // // To fix, set the format to PAX here. See docker/for-linux issue #484. hdr.Format = tar.FormatPAX hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) if hdr.Typeflag == tar.TypeLink { hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) } if err = rebasedTar.WriteHeader(hdr); err != nil { w.CloseWithError(err) return } // Ignoring GoSec G110. See https://github.com/securego/gosec/pull/433 // and https://cure53.de/pentest-report_opa.pdf, which recommends to // replace io.Copy with io.CopyN7. The latter allows to specify the // maximum number of bytes that should be read. By properly defining // the limit, it can be assured that a GZip compression bomb cannot // easily cause a Denial-of-Service. // After reviewing with @tonistiigi and @cpuguy83, this should not // affect us, because here we do not read into memory, hence should // not be vulnerable to this code consuming memory. //nolint:gosec // G110: Potential DoS vulnerability via decompression bomb (gosec) if _, err = io.Copy(rebasedTar, srcTar); err != nil { w.CloseWithError(err) return } } }() return rebased } // CopyResource performs an archive copy from the given source path to the // given destination path. The source path MUST exist and the destination // path's parent directory must exist. func CopyResource(srcPath, dstPath string, followLink bool) error { var ( srcInfo CopyInfo err error ) // Ensure in platform semantics srcPath = normalizePath(srcPath) dstPath = normalizePath(dstPath) // Clean the source and destination paths. srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { return err } content, err := TarResource(srcInfo) if err != nil { return err } defer content.Close() return CopyTo(content, srcInfo, dstPath) } // CopyTo handles extracting the given content whose // entries should be sourced from srcInfo to dstPath. func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { // The destination path need not exist, but CopyInfoDestinationPath will // ensure that at least the parent directory exists. dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) if err != nil { return err } dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) if err != nil { return err } defer copyArchive.Close() options := &TarOptions{ NoLchown: true, NoOverwriteDirNonDir: true, } return Untar(copyArchive, dstDir, options) } // ResolveHostSourcePath decides real path need to be copied with parameters such as // whether to follow symbol link or not, if followLink is true, resolvedPath will return // link target of any symbol link file, else it will only resolve symlink of directory // but return symbol link file itself without resolving. func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, _ error) { if followLink { var err error resolvedPath, err = filepath.EvalSymlinks(path) if err != nil { return "", "", err } resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) } else { dirPath, basePath := filepath.Split(path) // if not follow symbol link, then resolve symbol link of parent dir resolvedDirPath, err := filepath.EvalSymlinks(dirPath) if err != nil { return "", "", err } // resolvedDirPath will have been cleaned (no trailing path separators) so // we can manually join it with the base path element. resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { rebaseName = filepath.Base(path) } } return resolvedPath, rebaseName, nil } // GetRebaseName normalizes and compares path and resolvedPath, // return completed resolved path and rebased file name func GetRebaseName(path, resolvedPath string) (string, string) { // linkTarget will have been cleaned (no trailing path separators and dot) so // we can manually join it with them var rebaseName string if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { resolvedPath += string(filepath.Separator) + "." } if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { resolvedPath += string(filepath.Separator) } if filepath.Base(path) != filepath.Base(resolvedPath) { // In the case where the path had a trailing separator and a symlink // evaluation has changed the last path component, we will need to // rebase the name in the archive that is being copied to match the // originally requested name. rebaseName = filepath.Base(path) } return resolvedPath, rebaseName } golang-github-moby-go-archive-0.2.0/copy_unix.go000066400000000000000000000002151512625003600215700ustar00rootroot00000000000000//go:build !windows package archive import ( "path/filepath" ) func normalizePath(path string) string { return filepath.ToSlash(path) } golang-github-moby-go-archive-0.2.0/copy_unix_test.go000066400000000000000000000705321512625003600226400ustar00rootroot00000000000000//go:build !windows // TODO Windows: Some of these tests may be salvageable and portable to Windows. package archive import ( "bytes" "crypto/sha256" "encoding/hex" "errors" "fmt" "io" "os" "path/filepath" "strings" "testing" "gotest.tools/v3/assert" ) func removeAllPaths(paths ...string) { for _, path := range paths { os.RemoveAll(path) } } func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) { var err error tmpDirA, err = os.MkdirTemp("", "archive-copy-test") assert.NilError(t, err) tmpDirB, err = os.MkdirTemp("", "archive-copy-test") assert.NilError(t, err) return tmpDirA, tmpDirB } func isNotDir(err error) bool { return strings.Contains(err.Error(), "not a directory") } func joinTrailingSep(pathElements ...string) string { joined := filepath.Join(pathElements...) return fmt.Sprintf("%s%c", joined, filepath.Separator) } func fileContentsEqual(t *testing.T, filenameA, filenameB string) error { t.Logf("checking for equal file contents: %q and %q\n", filenameA, filenameB) fileA, err := os.Open(filenameA) if err != nil { return err } defer fileA.Close() fileB, err := os.Open(filenameB) if err != nil { return err } defer fileB.Close() hasher := sha256.New() if _, err := io.Copy(hasher, fileA); err != nil { return err } hashA := hasher.Sum(nil) hasher.Reset() if _, err := io.Copy(hasher, fileB); err != nil { return err } hashB := hasher.Sum(nil) if !bytes.Equal(hashA, hashB) { return fmt.Errorf("file content hashes not equal - expected %s, got %s", hex.EncodeToString(hashA), hex.EncodeToString(hashB)) } return nil } func dirContentsEqual(t *testing.T, newDir, oldDir string) error { t.Logf("checking for equal directory contents: %q and %q", newDir, oldDir) c, err := ChangesDirs(newDir, oldDir) if err != nil { return err } if len(c) != 0 { return fmt.Errorf("expected no changes between directories, but got: %v", c) } return nil } func logDirContents(t *testing.T, dirPath string) { t.Logf("logging directory contents: %q", dirPath) err := filepath.WalkDir(dirPath, func(path string, info os.DirEntry, err error) error { if err != nil { t.Errorf("stat error for path %q: %s", path, err) return nil } if info.IsDir() { path = joinTrailingSep(path) } t.Logf("\t%s", path) return nil }) assert.NilError(t, err) } func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) { t.Logf("copying from %q to %q (not follow symbol link)", srcPath, dstPath) return CopyResource(srcPath, dstPath, false) } func testCopyHelperFSym(t *testing.T, srcPath, dstPath string) (err error) { t.Logf("copying from %q to %q (follow symbol link)", srcPath, dstPath) return CopyResource(srcPath, dstPath, true) } // Basic assumptions about SRC and DST: // 1. SRC must exist. // 2. If SRC ends with a trailing separator, it must be a directory. // 3. DST parent directory must exist. // 4. If DST exists as a file, it must not end with a trailing separator. // First get these easy error cases out of the way. // Test for error when SRC does not exist. func TestCopyErrSrcNotExists(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) if _, err := CopyInfoSourcePath(filepath.Join(tmpDirA, "file1"), false); !os.IsNotExist(err) { t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) } } // Test for error when SRC ends in a trailing // path separator but it exists as a file. func TestCopyErrSrcNotDir(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A with some sample files and directories. createSampleDir(t, tmpDirA) if _, err := CopyInfoSourcePath(joinTrailingSep(tmpDirA, "file1"), false); !isNotDir(err) { t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) } } // Test for error when SRC is a valid file or directory, // but the DST parent directory does not exist. func TestCopyErrDstParentNotExists(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A with some sample files and directories. createSampleDir(t, tmpDirA) srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} // Try with a file source. content, err := TarResource(srcInfo) if err != nil { t.Fatalf("unexpected error %T: %s", err, err) } defer content.Close() // Copy to a file whose parent does not exist. if err = CopyTo(content, srcInfo, filepath.Join(tmpDirB, "fakeParentDir", "file1")); err == nil { t.Fatal("expected IsNotExist error, but got nil instead") } if !os.IsNotExist(err) { t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) } // Try with a directory source. srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} content, err = TarResource(srcInfo) if err != nil { t.Fatalf("unexpected error %T: %s", err, err) } defer content.Close() // Copy to a directory whose parent does not exist. if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "fakeParentDir", "fakeDstDir")); err == nil { t.Fatal("expected IsNotExist error, but got nil instead") } if !os.IsNotExist(err) { t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) } } // Test for error when DST ends in a trailing // path separator but exists as a file. func TestCopyErrDstNotDir(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A and B with some sample files and directories. createSampleDir(t, tmpDirA) createSampleDir(t, tmpDirB) // Try with a file source. srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} content, err := TarResource(srcInfo) if err != nil { t.Fatalf("unexpected error %T: %s", err, err) } defer content.Close() if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { t.Fatal("expected IsNotDir error, but got nil instead") } if !isNotDir(err) { t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) } // Try with a directory source. srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} content, err = TarResource(srcInfo) if err != nil { t.Fatalf("unexpected error %T: %s", err, err) } defer content.Close() if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { t.Fatal("expected IsNotDir error, but got nil instead") } if !isNotDir(err) { t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) } } // Test to check if CopyTo works with a long (>100 characters) destination file name. // This is a regression (see https://github.com/docker/for-linux/issues/484). func TestCopyLongDstFilename(t *testing.T) { const longName = "a_very_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_long_filename_that_is_101_characters" tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A with some sample files and directories. createSampleDir(t, tmpDirA) srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} content, err := TarResource(srcInfo) if err != nil { t.Fatalf("unexpected error %T: %s", err, err) } defer content.Close() err = CopyTo(content, srcInfo, filepath.Join(tmpDirB, longName)) if err != nil { t.Fatalf("unexpected error %T: %s", err, err) } } // Possibilities are reduced to the remaining 10 cases: // // case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action // =================================================================================================== // A | no | - | no | - | no | create file // B | no | - | no | - | yes | error // C | no | - | yes | no | - | overwrite file // D | no | - | yes | yes | - | create file in dst dir // E | yes | no | no | - | - | create dir, copy contents // F | yes | no | yes | no | - | error // G | yes | no | yes | yes | - | copy dir and contents // H | yes | yes | no | - | - | create dir, copy contents // I | yes | yes | yes | no | - | error // J | yes | yes | yes | yes | - | copy dir contents // // A. SRC specifies a file and DST (no trailing path separator) doesn't exist. // // This should create a file with the name DST and copy the contents of the source // file into it. func TestCopyCaseA(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A with some sample files and directories. createSampleDir(t, tmpDirA) srcPath := filepath.Join(tmpDirA, "file1") dstPath := filepath.Join(tmpDirB, "itWorks.txt") var err error if err = testCopyHelper(t, srcPath, dstPath); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } err = fileContentsEqual(t, srcPath, dstPath) assert.NilError(t, err) os.Remove(dstPath) symlinkPath := filepath.Join(tmpDirA, "symlink3") symlinkPath1 := filepath.Join(tmpDirA, "symlink4") linkTarget := filepath.Join(tmpDirA, "file1") if err = testCopyHelperFSym(t, symlinkPath, dstPath); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } err = fileContentsEqual(t, linkTarget, dstPath) assert.NilError(t, err) os.Remove(dstPath) if err = testCopyHelperFSym(t, symlinkPath1, dstPath); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } err = fileContentsEqual(t, linkTarget, dstPath) assert.NilError(t, err) } // B. SRC specifies a file and DST (with trailing path separator) doesn't exist. // // This should cause an error because the copy operation cannot create a directory // when copying a single file. func TestCopyCaseB(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A with some sample files and directories. createSampleDir(t, tmpDirA) srcPath := filepath.Join(tmpDirA, "file1") dstDir := joinTrailingSep(tmpDirB, "testDir") var err error if err = testCopyHelper(t, srcPath, dstDir); err == nil { t.Fatal("expected ErrDirNotExists error, but got nil instead") } if !errors.Is(err, ErrDirNotExists) { t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) } symlinkPath := filepath.Join(tmpDirA, "symlink3") if err = testCopyHelperFSym(t, symlinkPath, dstDir); err == nil { t.Fatal("expected ErrDirNotExists error, but got nil instead") } if !errors.Is(err, ErrDirNotExists) { t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) } } // C. SRC specifies a file and DST exists as a file. // // This should overwrite the file at DST with the contents of the source file. func TestCopyCaseC(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A and B with some sample files and directories. createSampleDir(t, tmpDirA) createSampleDir(t, tmpDirB) srcPath := filepath.Join(tmpDirA, "file1") dstPath := filepath.Join(tmpDirB, "file2") var err error // Ensure they start out different. if err = fileContentsEqual(t, srcPath, dstPath); err == nil { t.Fatal("expected different file contents") } if err = testCopyHelper(t, srcPath, dstPath); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } err = fileContentsEqual(t, srcPath, dstPath) assert.NilError(t, err) } // C. Symbol link following version: SRC specifies a file and DST exists as a file. // // This should overwrite the file at DST with the contents of the source file. func TestCopyCaseCFSym(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A and B with some sample files and directories. createSampleDir(t, tmpDirA) createSampleDir(t, tmpDirB) symlinkPathBad := filepath.Join(tmpDirA, "symlink1") symlinkPath := filepath.Join(tmpDirA, "symlink3") linkTarget := filepath.Join(tmpDirA, "file1") dstPath := filepath.Join(tmpDirB, "file2") var err error // first to test broken link if err = testCopyHelperFSym(t, symlinkPathBad, dstPath); err == nil { t.Fatalf("unexpected error %T: %s", err, err) } // test symbol link -> symbol link -> target // Ensure they start out different. if err = fileContentsEqual(t, linkTarget, dstPath); err == nil { t.Fatal("expected different file contents") } if err = testCopyHelperFSym(t, symlinkPath, dstPath); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } err = fileContentsEqual(t, linkTarget, dstPath) assert.NilError(t, err) } // D. SRC specifies a file and DST exists as a directory. // // This should place a copy of the source file inside it using the basename from // SRC. Ensure this works whether DST has a trailing path separator or not. func TestCopyCaseD(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A and B with some sample files and directories. createSampleDir(t, tmpDirA) createSampleDir(t, tmpDirB) srcPath := filepath.Join(tmpDirA, "file1") dstDir := filepath.Join(tmpDirB, "dir1") dstPath := filepath.Join(dstDir, "file1") var err error // Ensure that dstPath doesn't exist. if _, err = os.Stat(dstPath); !os.IsNotExist(err) { t.Fatalf("did not expect dstPath %q to exist", dstPath) } if err = testCopyHelper(t, srcPath, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } err = fileContentsEqual(t, srcPath, dstPath) assert.NilError(t, err) // Now try again but using a trailing path separator for dstDir. if err = os.RemoveAll(dstDir); err != nil { t.Fatalf("unable to remove dstDir: %s", err) } if err = os.MkdirAll(dstDir, os.FileMode(0o755)); err != nil { t.Fatalf("unable to make dstDir: %s", err) } dstDir = joinTrailingSep(tmpDirB, "dir1") if err = testCopyHelper(t, srcPath, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } err = fileContentsEqual(t, srcPath, dstPath) assert.NilError(t, err) } // D. Symbol link following version: SRC specifies a file and DST exists as a directory. // // This should place a copy of the source file inside it using the basename from // SRC. Ensure this works whether DST has a trailing path separator or not. func TestCopyCaseDFSym(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A and B with some sample files and directories. createSampleDir(t, tmpDirA) createSampleDir(t, tmpDirB) srcPath := filepath.Join(tmpDirA, "symlink4") linkTarget := filepath.Join(tmpDirA, "file1") dstDir := filepath.Join(tmpDirB, "dir1") dstPath := filepath.Join(dstDir, "symlink4") var err error // Ensure that dstPath doesn't exist. if _, err = os.Stat(dstPath); !os.IsNotExist(err) { t.Fatalf("did not expect dstPath %q to exist", dstPath) } if err = testCopyHelperFSym(t, srcPath, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } err = fileContentsEqual(t, linkTarget, dstPath) assert.NilError(t, err) // Now try again but using a trailing path separator for dstDir. if err = os.RemoveAll(dstDir); err != nil { t.Fatalf("unable to remove dstDir: %s", err) } if err = os.MkdirAll(dstDir, os.FileMode(0o755)); err != nil { t.Fatalf("unable to make dstDir: %s", err) } dstDir = joinTrailingSep(tmpDirB, "dir1") if err = testCopyHelperFSym(t, srcPath, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } err = fileContentsEqual(t, linkTarget, dstPath) assert.NilError(t, err) } // E. SRC specifies a directory and DST does not exist. // // This should create a directory at DST and copy the contents of the SRC directory // into the DST directory. Ensure this works whether DST has a trailing path // separator or not. func TestCopyCaseE(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A with some sample files and directories. createSampleDir(t, tmpDirA) srcDir := filepath.Join(tmpDirA, "dir1") dstDir := filepath.Join(tmpDirB, "testDir") var err error if err = testCopyHelper(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = dirContentsEqual(t, dstDir, srcDir); err != nil { t.Log("dir contents not equal") logDirContents(t, tmpDirA) logDirContents(t, tmpDirB) t.Fatal(err) } // Now try again but using a trailing path separator for dstDir. if err = os.RemoveAll(dstDir); err != nil { t.Fatalf("unable to remove dstDir: %s", err) } dstDir = joinTrailingSep(tmpDirB, "testDir") if err = testCopyHelper(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } err = dirContentsEqual(t, dstDir, srcDir) assert.NilError(t, err) } // E. Symbol link following version: SRC specifies a directory and DST does not exist. // // This should create a directory at DST and copy the contents of the SRC directory // into the DST directory. Ensure this works whether DST has a trailing path // separator or not. func TestCopyCaseEFSym(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A with some sample files and directories. createSampleDir(t, tmpDirA) srcDir := filepath.Join(tmpDirA, "dirSymlink") linkTarget := filepath.Join(tmpDirA, "dir1") dstDir := filepath.Join(tmpDirB, "testDir") var err error if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { t.Log("dir contents not equal") logDirContents(t, tmpDirA) logDirContents(t, tmpDirB) t.Fatal(err) } // Now try again but using a trailing path separator for dstDir. if err = os.RemoveAll(dstDir); err != nil { t.Fatalf("unable to remove dstDir: %s", err) } dstDir = joinTrailingSep(tmpDirB, "testDir") if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } err = dirContentsEqual(t, dstDir, linkTarget) assert.NilError(t, err) } // F. SRC specifies a directory and DST exists as a file. // // This should cause an error as it is not possible to overwrite a file with a // directory. func TestCopyCaseF(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A and B with some sample files and directories. createSampleDir(t, tmpDirA) createSampleDir(t, tmpDirB) srcDir := filepath.Join(tmpDirA, "dir1") symSrcDir := filepath.Join(tmpDirA, "dirSymlink") dstFile := filepath.Join(tmpDirB, "file1") var err error if err = testCopyHelper(t, srcDir, dstFile); err == nil { t.Fatal("expected ErrCannotCopyDir error, but got nil instead") } if !errors.Is(err, ErrCannotCopyDir) { t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) } // now test with symbol link if err = testCopyHelperFSym(t, symSrcDir, dstFile); err == nil { t.Fatal("expected ErrCannotCopyDir error, but got nil instead") } if !errors.Is(err, ErrCannotCopyDir) { t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) } } // G. SRC specifies a directory and DST exists as a directory. // // This should copy the SRC directory and all its contents to the DST directory. // Ensure this works whether DST has a trailing path separator or not. func TestCopyCaseG(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A and B with some sample files and directories. createSampleDir(t, tmpDirA) createSampleDir(t, tmpDirB) srcDir := filepath.Join(tmpDirA, "dir1") dstDir := filepath.Join(tmpDirB, "dir2") resultDir := filepath.Join(dstDir, "dir1") var err error if err = testCopyHelper(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } err = dirContentsEqual(t, resultDir, srcDir) assert.NilError(t, err) // Now try again but using a trailing path separator for dstDir. if err = os.RemoveAll(dstDir); err != nil { t.Fatalf("unable to remove dstDir: %s", err) } if err = os.MkdirAll(dstDir, os.FileMode(0o755)); err != nil { t.Fatalf("unable to make dstDir: %s", err) } dstDir = joinTrailingSep(tmpDirB, "dir2") if err = testCopyHelper(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } err = dirContentsEqual(t, resultDir, srcDir) assert.NilError(t, err) } // G. Symbol link version: SRC specifies a directory and DST exists as a directory. // // This should copy the SRC directory and all its contents to the DST directory. // Ensure this works whether DST has a trailing path separator or not. func TestCopyCaseGFSym(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A and B with some sample files and directories. createSampleDir(t, tmpDirA) createSampleDir(t, tmpDirB) srcDir := filepath.Join(tmpDirA, "dirSymlink") linkTarget := filepath.Join(tmpDirA, "dir1") dstDir := filepath.Join(tmpDirB, "dir2") resultDir := filepath.Join(dstDir, "dirSymlink") var err error if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } err = dirContentsEqual(t, resultDir, linkTarget) assert.NilError(t, err) // Now try again but using a trailing path separator for dstDir. if err = os.RemoveAll(dstDir); err != nil { t.Fatalf("unable to remove dstDir: %s", err) } if err = os.MkdirAll(dstDir, os.FileMode(0o755)); err != nil { t.Fatalf("unable to make dstDir: %s", err) } dstDir = joinTrailingSep(tmpDirB, "dir2") if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } err = dirContentsEqual(t, resultDir, linkTarget) assert.NilError(t, err) } // H. SRC specifies a directory's contents only and DST does not exist. // // This should create a directory at DST and copy the contents of the SRC // directory (but not the directory itself) into the DST directory. Ensure // this works whether DST has a trailing path separator or not. func TestCopyCaseH(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A with some sample files and directories. createSampleDir(t, tmpDirA) srcDir := joinTrailingSep(tmpDirA, "dir1") + "." dstDir := filepath.Join(tmpDirB, "testDir") var err error if err = testCopyHelper(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = dirContentsEqual(t, dstDir, srcDir); err != nil { t.Log("dir contents not equal") logDirContents(t, tmpDirA) logDirContents(t, tmpDirB) t.Fatal(err) } // Now try again but using a trailing path separator for dstDir. if err = os.RemoveAll(dstDir); err != nil { t.Fatalf("unable to remove dstDir: %s", err) } dstDir = joinTrailingSep(tmpDirB, "testDir") if err = testCopyHelper(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = dirContentsEqual(t, dstDir, srcDir); err != nil { t.Log("dir contents not equal") logDirContents(t, tmpDirA) logDirContents(t, tmpDirB) t.Fatal(err) } } // H. Symbol link following version: SRC specifies a directory's contents only and DST does not exist. // // This should create a directory at DST and copy the contents of the SRC // directory (but not the directory itself) into the DST directory. Ensure // this works whether DST has a trailing path separator or not. func TestCopyCaseHFSym(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A with some sample files and directories. createSampleDir(t, tmpDirA) srcDir := joinTrailingSep(tmpDirA, "dirSymlink") + "." linkTarget := filepath.Join(tmpDirA, "dir1") dstDir := filepath.Join(tmpDirB, "testDir") var err error if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { t.Log("dir contents not equal") logDirContents(t, tmpDirA) logDirContents(t, tmpDirB) t.Fatal(err) } // Now try again but using a trailing path separator for dstDir. if err = os.RemoveAll(dstDir); err != nil { t.Fatalf("unable to remove dstDir: %s", err) } dstDir = joinTrailingSep(tmpDirB, "testDir") if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { t.Log("dir contents not equal") logDirContents(t, tmpDirA) logDirContents(t, tmpDirB) t.Fatal(err) } } // I. SRC specifies a directory's contents only and DST exists as a file. // // This should cause an error as it is not possible to overwrite a file with a // directory. func TestCopyCaseI(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A and B with some sample files and directories. createSampleDir(t, tmpDirA) createSampleDir(t, tmpDirB) srcDir := joinTrailingSep(tmpDirA, "dir1") + "." symSrcDir := filepath.Join(tmpDirB, "dirSymlink") dstFile := filepath.Join(tmpDirB, "file1") var err error if err = testCopyHelper(t, srcDir, dstFile); err == nil { t.Fatal("expected ErrCannotCopyDir error, but got nil instead") } if !errors.Is(err, ErrCannotCopyDir) { t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) } // now try with symbol link of dir if err = testCopyHelperFSym(t, symSrcDir, dstFile); err == nil { t.Fatal("expected ErrCannotCopyDir error, but got nil instead") } if !errors.Is(err, ErrCannotCopyDir) { t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) } } // J. SRC specifies a directory's contents only and DST exists as a directory. // // This should copy the contents of the SRC directory (but not the directory // itself) into the DST directory. Ensure this works whether DST has a // trailing path separator or not. func TestCopyCaseJ(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A and B with some sample files and directories. createSampleDir(t, tmpDirA) createSampleDir(t, tmpDirB) srcDir := joinTrailingSep(tmpDirA, "dir1") + "." dstDir := filepath.Join(tmpDirB, "dir5") var err error // first to create an empty dir if err = os.MkdirAll(dstDir, os.FileMode(0o755)); err != nil { t.Fatalf("unable to make dstDir: %s", err) } if err = testCopyHelper(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } err = dirContentsEqual(t, dstDir, srcDir) assert.NilError(t, err) // Now try again but using a trailing path separator for dstDir. if err = os.RemoveAll(dstDir); err != nil { t.Fatalf("unable to remove dstDir: %s", err) } if err = os.MkdirAll(dstDir, os.FileMode(0o755)); err != nil { t.Fatalf("unable to make dstDir: %s", err) } dstDir = joinTrailingSep(tmpDirB, "dir5") if err = testCopyHelper(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } err = dirContentsEqual(t, dstDir, srcDir) assert.NilError(t, err) } // J. Symbol link following version: SRC specifies a directory's contents only and DST exists as a directory. // // This should copy the contents of the SRC directory (but not the directory // itself) into the DST directory. Ensure this works whether DST has a // trailing path separator or not. func TestCopyCaseJFSym(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) // Load A and B with some sample files and directories. createSampleDir(t, tmpDirA) createSampleDir(t, tmpDirB) srcDir := joinTrailingSep(tmpDirA, "dirSymlink") + "." linkTarget := filepath.Join(tmpDirA, "dir1") dstDir := filepath.Join(tmpDirB, "dir5") var err error // first to create an empty dir if err = os.MkdirAll(dstDir, os.FileMode(0o755)); err != nil { t.Fatalf("unable to make dstDir: %s", err) } if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } err = dirContentsEqual(t, dstDir, linkTarget) assert.NilError(t, err) // Now try again but using a trailing path separator for dstDir. if err = os.RemoveAll(dstDir); err != nil { t.Fatalf("unable to remove dstDir: %s", err) } if err = os.MkdirAll(dstDir, os.FileMode(0o755)); err != nil { t.Fatalf("unable to make dstDir: %s", err) } dstDir = joinTrailingSep(tmpDirB, "dir5") if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } err = dirContentsEqual(t, dstDir, linkTarget) assert.NilError(t, err) } golang-github-moby-go-archive-0.2.0/copy_windows.go000066400000000000000000000001721512625003600223010ustar00rootroot00000000000000package archive import ( "path/filepath" ) func normalizePath(path string) string { return filepath.FromSlash(path) } golang-github-moby-go-archive-0.2.0/dev_freebsd.go000066400000000000000000000002441512625003600220250ustar00rootroot00000000000000//go:build freebsd package archive import "golang.org/x/sys/unix" func mknod(path string, mode uint32, dev uint64) error { return unix.Mknod(path, mode, dev) } golang-github-moby-go-archive-0.2.0/dev_unix.go000066400000000000000000000002661512625003600214020ustar00rootroot00000000000000//go:build !windows && !freebsd package archive import "golang.org/x/sys/unix" func mknod(path string, mode uint32, dev uint64) error { return unix.Mknod(path, mode, int(dev)) } golang-github-moby-go-archive-0.2.0/diff.go000066400000000000000000000173471512625003600205010ustar00rootroot00000000000000package archive import ( "archive/tar" "context" "errors" "fmt" "io" "os" "path/filepath" "runtime" "strings" "github.com/containerd/log" "github.com/moby/go-archive/compression" ) // UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be // compressed or uncompressed. // Returns the size in bytes of the contents of the layer. func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { tr := tar.NewReader(layer) var dirs []*tar.Header unpackedPaths := make(map[string]struct{}) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } aufsTempdir := "" aufsHardlinks := make(map[string]*tar.Header) // Iterate through the files in the archive. for { hdr, err := tr.Next() if errors.Is(err, io.EOF) { // end of tar archive break } if err != nil { return 0, err } size += hdr.Size // Normalize name, for safety and for a simple is-root check hdr.Name = filepath.Clean(hdr.Name) // Windows does not support filenames with colons in them. Ignore // these files. This is not a problem though (although it might // appear that it is). Let's suppose a client is running docker pull. // The daemon it points to is Windows. Would it make sense for the // client to be doing a docker pull Ubuntu for example (which has files // with colons in the name under /usr/share/man/man3)? No, absolutely // not as it would really only make sense that they were pulling a // Windows image. However, for development, it is necessary to be able // to pull Linux images which are in the repository. // // TODO Windows. Once the registry is aware of what images are Windows- // specific or Linux-specific, this warning should be changed to an error // to cater for the situation where someone does manage to upload a Linux // image but have it tagged as Windows inadvertently. if runtime.GOOS == "windows" { if strings.Contains(hdr.Name, ":") { log.G(context.TODO()).Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) continue } } // Ensure that the parent directory exists. err = createImpliedDirectories(dest, hdr, options) if err != nil { return 0, err } // Skip AUFS metadata dirs if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { // Regular files inside /.wh..wh.plnk can be used as hardlink targets // We don't want this directory, but we need the files in them so that // such hardlinks can be resolved. if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { basename := filepath.Base(hdr.Name) aufsHardlinks[basename] = hdr if aufsTempdir == "" { if aufsTempdir, err = os.MkdirTemp(dest, "dockerplnk"); err != nil { return 0, err } defer os.RemoveAll(aufsTempdir) } if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, options); err != nil { return 0, err } } if hdr.Name != WhiteoutOpaqueDir { continue } } // #nosec G305 -- The joined path is guarded against path traversal. path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return 0, err } // Note as these operations are platform specific, so must the slash be. if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } base := filepath.Base(path) if strings.HasPrefix(base, WhiteoutPrefix) { dir := filepath.Dir(path) if base == WhiteoutOpaqueDir { _, err := os.Lstat(dir) if err != nil { return 0, err } err = filepath.WalkDir(dir, func(path string, info os.DirEntry, err error) error { if err != nil { if os.IsNotExist(err) { err = nil // parent was deleted } return err } if path == dir { return nil } if _, exists := unpackedPaths[path]; !exists { return os.RemoveAll(path) } return nil }) if err != nil { return 0, err } } else { originalBase := base[len(WhiteoutPrefix):] originalPath := filepath.Join(dir, originalBase) if err := os.RemoveAll(originalPath); err != nil { return 0, err } } } else { // If path exits we almost always just want to remove and replace it. // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if !fi.IsDir() || hdr.Typeflag != tar.TypeDir { if err := os.RemoveAll(path); err != nil { return 0, err } } } srcData := io.Reader(tr) srcHdr := hdr // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so // we manually retarget these into the temporary files we extracted them into if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { linkBasename := filepath.Base(hdr.Linkname) srcHdr = aufsHardlinks[linkBasename] if srcHdr == nil { return 0, errors.New("invalid aufs hardlink") } tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) if err != nil { return 0, err } defer tmpFile.Close() srcData = tmpFile } if err := remapIDs(options.IDMap, srcHdr); err != nil { return 0, err } if err := createTarFile(path, dest, srcHdr, srcData, options); err != nil { return 0, err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } unpackedPaths[path] = struct{}{} } } for _, hdr := range dirs { // #nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice. path := filepath.Join(dest, hdr.Name) if err := chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return 0, err } } return size, nil } // ApplyLayer parses a diff in the standard layer format from `layer`, // and applies it to the directory `dest`. The stream `layer` can be // compressed or uncompressed. // Returns the size in bytes of the contents of the layer. func ApplyLayer(dest string, layer io.Reader) (int64, error) { return applyLayerHandler(dest, layer, &TarOptions{}, true) } // ApplyUncompressedLayer parses a diff in the standard layer format from // `layer`, and applies it to the directory `dest`. The stream `layer` // can only be uncompressed. // Returns the size in bytes of the contents of the layer. func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { return applyLayerHandler(dest, layer, options, false) } // IsEmpty checks if the tar archive is empty (doesn't contain any entries). func IsEmpty(rd io.Reader) (bool, error) { decompRd, err := compression.DecompressStream(rd) if err != nil { return true, fmt.Errorf("failed to decompress archive: %w", err) } defer decompRd.Close() tarReader := tar.NewReader(decompRd) if _, err := tarReader.Next(); err != nil { if errors.Is(err, io.EOF) { return true, nil } return false, fmt.Errorf("failed to read next archive header: %w", err) } return false, nil } // do the bulk load of ApplyLayer, but allow for not calling DecompressStream func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { dest = filepath.Clean(dest) // We need to be able to set any perms restore := overrideUmask(0) defer restore() if decompress { decompLayer, err := compression.DecompressStream(layer) if err != nil { return 0, err } defer decompLayer.Close() layer = decompLayer } return UnpackLayer(dest, layer, options) } golang-github-moby-go-archive-0.2.0/diff_test.go000066400000000000000000000162071512625003600215320ustar00rootroot00000000000000package archive import ( "archive/tar" "io" "os" "path/filepath" "reflect" "testing" "github.com/moby/go-archive/compression" ) func TestApplyLayerInvalidFilenames(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0o644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0o644, }, }, } { if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestApplyLayerInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0o644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0o644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0o755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0o644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0o755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0o644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0o755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0o644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0o755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0o644, }, }, } { if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestApplyLayerInvalidSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0o644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0o644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0o755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0o644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0o755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0o644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0o755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0o644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0o755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0o644, }, }, } { if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestApplyLayerWhiteouts(t *testing.T) { wd, err := os.MkdirTemp("", "graphdriver-test-whiteouts") if err != nil { return } defer os.RemoveAll(wd) base := []string{ ".baz", "bar/", "bar/bax", "bar/bay/", "baz", "foo/", "foo/.abc", "foo/.bcd/", "foo/.bcd/a", "foo/cde/", "foo/cde/def", "foo/cde/efg", "foo/fgh", "foobar", } type tcase struct { change, expected []string } tcases := []tcase{ { change: base, expected: base, }, { change: []string{ ".bay", ".wh.baz", "foo/", "foo/.bce", "foo/.wh..wh..opq", "foo/cde/", "foo/cde/efg", }, expected: []string{ ".bay", ".baz", "bar/", "bar/bax", "bar/bay/", "foo/", "foo/.bce", "foo/cde/", "foo/cde/efg", "foobar", }, }, { change: []string{ ".bay", ".wh..baz", ".wh.foobar", "foo/", "foo/.abc", "foo/.wh.cde", "bar/", }, expected: []string{ ".bay", "bar/", "bar/bax", "bar/bay/", "foo/", "foo/.abc", "foo/.bce", }, }, { change: []string{ ".abc", ".wh..wh..opq", "foobar", }, expected: []string{ ".abc", "foobar", }, }, } for i, tc := range tcases { l, err := makeTestLayer(tc.change) if err != nil { t.Fatal(err) } _, err = UnpackLayer(wd, l, nil) if err != nil { t.Fatal(err) } err = l.Close() if err != nil { t.Fatal(err) } paths, err := readDirContents(wd) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(tc.expected, paths) { t.Fatalf("invalid files for layer %d: expected %q, got %q", i, tc.expected, paths) } } } type readCloserWrapper struct { io.Reader closer func() error } func (r *readCloserWrapper) Close() error { if r.closer != nil { return r.closer() } return nil } func makeTestLayer(paths []string) (_ io.ReadCloser, retErr error) { tmpDir, err := os.MkdirTemp("", "graphdriver-test-mklayer") if err != nil { return nil, err } defer func() { if retErr != nil { os.RemoveAll(tmpDir) } }() for _, p := range paths { // Source files are always in Unix format. But we use filepath on // creation to be platform agnostic. if p[len(p)-1] == '/' { if err = os.MkdirAll(filepath.Join(tmpDir, p), 0o700); err != nil { return nil, err } } else { if err = os.WriteFile(filepath.Join(tmpDir, p), nil, 0o600); err != nil { return nil, err } } } archive, err := Tar(tmpDir, compression.None) if err != nil { return nil, err } return &readCloserWrapper{ Reader: archive, closer: func() error { err := archive.Close() os.RemoveAll(tmpDir) return err }, }, nil } func readDirContents(root string) ([]string, error) { var files []string err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { if err != nil { return err } if path == root { return nil } rel, err := filepath.Rel(root, path) if err != nil { return err } if info.IsDir() { rel = rel + string(filepath.Separator) } // Append in Unix semantics files = append(files, filepath.ToSlash(rel)) return nil }) if err != nil { return nil, err } return files, nil } golang-github-moby-go-archive-0.2.0/diff_unix.go000066400000000000000000000011721512625003600215310ustar00rootroot00000000000000//go:build !windows package archive import "golang.org/x/sys/unix" // overrideUmask sets current process's file mode creation mask to newmask // and returns a function to restore it. // // WARNING for readers stumbling upon this code. Changing umask in a multi- // threaded environment isn't safe. Don't use this without understanding the // risks, and don't export this function for others to use (we shouldn't even // be using this ourself). // // FIXME(thaJeztah): we should get rid of these hacks if possible. func overrideUmask(newMask int) func() { oldMask := unix.Umask(newMask) return func() { unix.Umask(oldMask) } } golang-github-moby-go-archive-0.2.0/diff_windows.go000066400000000000000000000001661512625003600222420ustar00rootroot00000000000000package archive // overrideUmask is a no-op on windows. func overrideUmask(newmask int) func() { return func() {} } golang-github-moby-go-archive-0.2.0/example_changes.go000066400000000000000000000043321512625003600227020ustar00rootroot00000000000000//go:build ignore // Simple tool to create an archive stream from an old and new directory // // By default it will stream the comparison of two temporary directories with junk files package main import ( "flag" "fmt" "io" "os" "path" "github.com/sirupsen/logrus" "github.com/moby/go-archive" ) var ( flDebug = flag.Bool("D", false, "debugging output") flNewDir = flag.String("newdir", "", "") flOldDir = flag.String("olddir", "", "") log = log.G(ctx).New() ) func main() { flag.Usage = func() { fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") fmt.Printf("%s [OPTIONS]\n", os.Args[0]) flag.PrintDefaults() } flag.Parse() log.Out = os.Stderr if (len(os.Getenv("DEBUG")) > 0) || *flDebug { log.G(ctx).SetLevel(logrus.DebugLevel) } var newDir, oldDir string if len(*flNewDir) == 0 { var err error newDir, err = os.MkdirTemp("", "docker-test-newDir") if err != nil { log.Fatal(err) } defer os.RemoveAll(newDir) if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { log.Fatal(err) } } else { newDir = *flNewDir } if len(*flOldDir) == 0 { oldDir, err := os.MkdirTemp("", "docker-test-oldDir") if err != nil { log.Fatal(err) } defer os.RemoveAll(oldDir) } else { oldDir = *flOldDir } changes, err := archive.ChangesDirs(newDir, oldDir) if err != nil { log.Fatal(err) } a, err := archive.ExportChanges(newDir, changes) if err != nil { log.Fatal(err) } defer a.Close() i, err := io.Copy(os.Stdout, a) if err != nil && !errors.Is(err, io.EOF) { log.Fatal(err) } fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := os.WriteFile(path.Join(targetPath, fileName), fileData, 0o700); err != nil { return 0, err } if makeLinks { if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } golang-github-moby-go-archive-0.2.0/fuzz_test.go000066400000000000000000000011341512625003600216110ustar00rootroot00000000000000package archive import ( "bytes" "testing" fuzz "github.com/AdaLogics/go-fuzz-headers" ) func FuzzUntar(f *testing.F) { f.Fuzz(func(t *testing.T, data []byte) { ff := fuzz.NewConsumer(data) tarBytes, err := ff.TarBytes() if err != nil { return } options := &TarOptions{} err = ff.GenerateStruct(options) if err != nil { return } tmpDir := t.TempDir() _ = Untar(bytes.NewReader(tarBytes), tmpDir, options) }) } func FuzzApplyLayer(f *testing.F) { f.Fuzz(func(t *testing.T, data []byte) { tmpDir := t.TempDir() _, _ = ApplyLayer(tmpDir, bytes.NewReader(data)) }) } golang-github-moby-go-archive-0.2.0/go.mod000066400000000000000000000011441512625003600203340ustar00rootroot00000000000000module github.com/moby/go-archive go 1.23.0 require ( github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 github.com/containerd/log v0.1.0 github.com/klauspost/compress v1.18.2 github.com/moby/patternmatcher v0.6.0 github.com/moby/sys/mount v0.3.4 github.com/moby/sys/mountinfo v0.7.2 github.com/moby/sys/reexec v0.1.0 github.com/moby/sys/sequential v0.6.0 github.com/moby/sys/user v0.4.0 github.com/moby/sys/userns v0.1.0 golang.org/x/sys v0.31.0 gotest.tools/v3 v3.5.2 ) require ( github.com/google/go-cmp v0.7.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect ) golang-github-moby-go-archive-0.2.0/go.sum000066400000000000000000000071111512625003600203610ustar00rootroot00000000000000github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/mount v0.3.4 h1:yn5jq4STPztkkzSKpZkLcmjue+bZJ0u2AuQY1iNI1Ww= github.com/moby/sys/mount v0.3.4/go.mod h1:KcQJMbQdJHPlq5lcYT+/CjatWM4PuxKe+XLSVS4J6Os= github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= github.com/moby/sys/reexec v0.1.0 h1:RrBi8e0EBTLEgfruBOFcxtElzRGTEUkeIFaVXgU7wok= github.com/moby/sys/reexec v0.1.0/go.mod h1:EqjBg8F3X7iZe5pU6nRZnYCMUTXoxsjiIfHup5wYIN8= github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= golang-github-moby-go-archive-0.2.0/internal/000077500000000000000000000000001512625003600210425ustar00rootroot00000000000000golang-github-moby-go-archive-0.2.0/internal/mounttree/000077500000000000000000000000001512625003600230645ustar00rootroot00000000000000golang-github-moby-go-archive-0.2.0/internal/mounttree/switchroot_linux.go000066400000000000000000000055751512625003600270530ustar00rootroot00000000000000package mounttree import ( "fmt" "os" "path/filepath" "github.com/moby/sys/mount" "github.com/moby/sys/mountinfo" "golang.org/x/sys/unix" ) // SwitchRoot changes path to be the root of the mount tree and changes the // current working directory to the new root. // // This function bind-mounts onto path; it is the caller's responsibility to set // the desired propagation mode of path's parent mount beforehand to prevent // unwanted propagation into different mount namespaces. func SwitchRoot(path string) error { if mounted, _ := mountinfo.Mounted(path); !mounted { if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil { return realChroot(path) } } // setup oldRoot for pivot_root pivotDir, err := os.MkdirTemp(path, ".pivot_root") if err != nil { return fmt.Errorf("error setting up pivot dir: %w", err) } var mounted bool defer func() { if mounted { // make sure pivotDir is not mounted before we try to remove it if errCleanup := unix.Unmount(pivotDir, unix.MNT_DETACH); errCleanup != nil { if err == nil { err = errCleanup } return } } errCleanup := os.Remove(pivotDir) // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful // because we already cleaned it up on failed pivot_root if errCleanup != nil && !os.IsNotExist(errCleanup) { errCleanup = fmt.Errorf("error cleaning up after pivot: %w", errCleanup) if err == nil { err = errCleanup } } }() if err := unix.PivotRoot(path, pivotDir); err != nil { // If pivot fails, fall back to the normal chroot after cleaning up temp dir if err := os.Remove(pivotDir); err != nil { return fmt.Errorf("error cleaning up after failed pivot: %w", err) } return realChroot(path) } mounted = true // This is the new path for where the old root (prior to the pivot) has been moved to // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction pivotDir = filepath.Join("/", filepath.Base(pivotDir)) if err := unix.Chdir("/"); err != nil { return fmt.Errorf("error changing to new root: %w", err) } // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil { return fmt.Errorf("error making old root private after pivot: %w", err) } // Now unmount the old root so it's no longer visible from the new root if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil { return fmt.Errorf("error while unmounting old root after pivot: %w", err) } mounted = false return nil } func realChroot(path string) error { if err := unix.Chroot(path); err != nil { return fmt.Errorf("error after fallback to chroot: %w", err) } if err := unix.Chdir("/"); err != nil { return fmt.Errorf("error changing to new root after chroot: %w", err) } return nil } golang-github-moby-go-archive-0.2.0/internal/unshare/000077500000000000000000000000001512625003600225075ustar00rootroot00000000000000golang-github-moby-go-archive-0.2.0/internal/unshare/unshare_linux.go000066400000000000000000000153041512625003600257250ustar00rootroot00000000000000package unshare import ( "fmt" "os" "runtime" "golang.org/x/sys/unix" ) func init() { // The startup thread of a process is special in a few different ways. // Most pertinent to the discussion at hand, any per-thread kernel state // reflected in the /proc/[pid]/ directory for a process is taken from // the state of the startup thread. Same goes for /proc/self/; it shows // the state of the current process' startup thread, no matter which // thread the files are being opened from. For most programs this is a // distinction without a difference as the kernel state, such as the // mount namespace and current working directory, is shared among (and // kept synchronized across) all threads of a process. But things start // to break down once threads start unsharing and modifying parts of // their kernel state. // // The Go runtime schedules goroutines to execute on the startup thread, // same as any other. How this could be problematic is best illustrated // with a concrete example. Consider what happens if a call to // Go(unix.CLONE_NEWNS, ...) spawned a goroutine which gets scheduled // onto the startup thread. The thread's mount namespace will be // unshared and modified. The contents of the /proc/[pid]/mountinfo file // will then describe the mount tree of the unshared namespace, not the // namespace of any other thread. It will remain this way until the // process exits. (The startup thread is special in another way: exiting // it puts the process into a "non-waitable zombie" state. To avoid this // fate, the Go runtime parks the thread instead of exiting if a // goroutine returns while locked to the startup thread. More // information can be found in the Go runtime sources: // `go doc -u -src runtime.mexit`.) The github.com/moby/sys/mountinfo // package reads from /proc/self/mountinfo, so will read the mount tree // for the wrong namespace if the startup thread has had its mount // namespace unshared! The /proc/thread-self/ directory, introduced in // Linux 3.17, is one potential solution to this problem, but every // package which opens files in /proc/self/ would need to be updated, // and fallbacks to /proc/self/task/[tid]/ would be required to support // older kernels. Overlooking any reference to /proc/self/ would // manifest as stochastically-reproducible bugs, so this is far from an // ideal solution. // // Reading from /proc/self/ would not be a problem if we could prevent // the per-thread state of the startup thread from being modified // nondeterministically in the first place. We can accomplish this // simply by locking the main() function to the startup thread! Doing so // excludes any other goroutine from being scheduled on the thread. runtime.LockOSThread() } // reversibleSetnsFlags maps the unshare(2) flags whose effects can be fully // reversed using setns(2). The values are the basenames of the corresponding // /proc/self/task/[tid]/ns/ magic symlinks to use to save and restore the // state. var reversibleSetnsFlags = map[int]string{ unix.CLONE_NEWCGROUP: "cgroup", unix.CLONE_NEWNET: "net", unix.CLONE_NEWUTS: "uts", unix.CLONE_NEWPID: "pid", unix.CLONE_NEWTIME: "time", // The following CLONE_NEW* flags are not included because they imply // another, irreversible flag when used with unshare(2). // - unix.CLONE_NEWIPC: implies CLONE_SYSVMEM // - unix.CLONE_NEWNS: implies CLONE_FS // - unix.CLONE_NEWUSER: implies CLONE_FS since Linux 3.9 } // Go calls the given functions in a new goroutine, locked to an OS thread, // which has had the parts of its execution state disassociated from the rest of // the current process using [unshare(2)]. It blocks until the new goroutine has // started and setupfn has returned. fn is only called if setupfn returns nil. A // nil setupfn or fn is equivalent to passing a no-op function. // // The disassociated execution state and any changes made to it are only visible // to the goroutine which the functions are called in. Any other goroutines, // including ones started from the function, will see the same execution state // as the rest of the process. // // The acceptable flags are documented in the [unshare(2)] Linux man-page. // The corresponding CLONE_* constants are defined in package [unix]. // // # Warning // // This function may terminate the thread which the new goroutine executed on // after fn returns, which could cause subprocesses started with the // [syscall.SysProcAttr] Pdeathsig field set to be signaled before process // termination. Any subprocess started before this function is called may be // affected, in addition to any subprocesses started inside setupfn or fn. // There are more details at https://go.dev/issue/27505. // // [unshare(2)]: https://man7.org/linux/man-pages/man2/unshare.2.html func Go(flags int, setupfn func() error, fn func()) error { started := make(chan error) maskedFlags := flags for f := range reversibleSetnsFlags { maskedFlags &^= f } isReversible := maskedFlags == 0 go func() { // Prepare to manipulate per-thread kernel state. runtime.LockOSThread() // Not all changes to the execution state can be reverted. // If an irreversible change to the execution state is made, our // only recourse is to have the tampered thread terminated by // returning from this function while the goroutine remains // wired to the thread. The Go runtime will terminate the thread // and replace it with a fresh one as needed. if isReversible { defer func() { if isReversible { // All execution state has been restored without error. // The thread is once again fungible. runtime.UnlockOSThread() } }() tid := unix.Gettid() for f, ns := range reversibleSetnsFlags { if flags&f != f { continue } // The /proc/thread-self directory was added in Linux 3.17. // We are not using it to maximize compatibility. pth := fmt.Sprintf("/proc/self/task/%d/ns/%s", tid, ns) fd, err := unix.Open(pth, unix.O_RDONLY|unix.O_CLOEXEC, 0) if err != nil { started <- &os.PathError{Op: "open", Path: pth, Err: err} return } defer func() { if isReversible { if err := unix.Setns(fd, 0); err != nil { isReversible = false } } _ = unix.Close(fd) }() } } // Threads are implemented under Linux as processes which share // a virtual memory space. Therefore in a multithreaded process // unshare(2) disassociates parts of the calling thread's // context from the thread it was clone(2)'d from. if err := unix.Unshare(flags); err != nil { started <- os.NewSyscallError("unshare", err) return } if setupfn != nil { if err := setupfn(); err != nil { started <- err return } } close(started) if fn != nil { fn() } }() return <-started } golang-github-moby-go-archive-0.2.0/path.go000066400000000000000000000015551512625003600205170ustar00rootroot00000000000000package archive // CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, // is the system drive. // On Linux: this is a no-op. // On Windows: this does the following> // CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. // This is used, for example, when validating a user provided path in docker cp. // If a drive letter is supplied, it must be the system drive. The drive letter // is always removed. Also, it translates it to OS semantics (IOW / to \). We // need the path in this syntax so that it can ultimately be concatenated with // a Windows long-path which doesn't support drive-letters. Examples: // C: --> Fail // C:\ --> \ // a --> a // /a --> \a // d:\ --> Fail func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { return checkSystemDriveAndRemoveDriveLetter(path) } golang-github-moby-go-archive-0.2.0/path_unix.go000066400000000000000000000003701512625003600215540ustar00rootroot00000000000000//go:build !windows package archive // checkSystemDriveAndRemoveDriveLetter is the non-Windows implementation // of CheckSystemDriveAndRemoveDriveLetter func checkSystemDriveAndRemoveDriveLetter(path string) (string, error) { return path, nil } golang-github-moby-go-archive-0.2.0/path_windows.go000066400000000000000000000012131512625003600222600ustar00rootroot00000000000000package archive import ( "fmt" "path/filepath" "strings" ) // checkSystemDriveAndRemoveDriveLetter is the Windows implementation // of CheckSystemDriveAndRemoveDriveLetter func checkSystemDriveAndRemoveDriveLetter(path string) (string, error) { if len(path) == 2 && string(path[1]) == ":" { return "", fmt.Errorf("no relative path specified in %q", path) } if !filepath.IsAbs(path) || len(path) < 2 { return filepath.FromSlash(path), nil } if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { return "", fmt.Errorf("the specified path is not on the system drive (C:)") } return filepath.FromSlash(path[2:]), nil } golang-github-moby-go-archive-0.2.0/path_windows_test.go000066400000000000000000000044521512625003600233270ustar00rootroot00000000000000package archive import ( "testing" ) // TestCheckSystemDriveAndRemoveDriveLetter tests CheckSystemDriveAndRemoveDriveLetter func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { // Fails if not C drive. _, err := CheckSystemDriveAndRemoveDriveLetter(`d:\`) if err == nil || err.Error() != "the specified path is not on the system drive (C:)" { t.Fatalf("Expected error for d:") } // Single character is unchanged var path string if path, err = CheckSystemDriveAndRemoveDriveLetter("z"); err != nil { t.Fatalf("Single character should pass") } if path != "z" { t.Fatalf("Single character should be unchanged") } // Two characters without colon is unchanged if path, err = CheckSystemDriveAndRemoveDriveLetter("AB"); err != nil { t.Fatalf("2 characters without colon should pass") } if path != "AB" { t.Fatalf("2 characters without colon should be unchanged") } // Abs path without drive letter if path, err = CheckSystemDriveAndRemoveDriveLetter(`\l`); err != nil { t.Fatalf("abs path no drive letter should pass") } if path != `\l` { t.Fatalf("abs path without drive letter should be unchanged") } // Abs path without drive letter, linux style if path, err = CheckSystemDriveAndRemoveDriveLetter(`/l`); err != nil { t.Fatalf("abs path no drive letter linux style should pass") } if path != `\l` { t.Fatalf("abs path without drive letter linux failed %s", path) } // Drive-colon should be stripped if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:\`); err != nil { t.Fatalf("An absolute path should pass") } if path != `\` { t.Fatalf(`An absolute path should have been shortened to \ %s`, path) } // Verify with a linux-style path if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:/`); err != nil { t.Fatalf("An absolute path should pass") } if path != `\` { t.Fatalf(`A linux style absolute path should have been shortened to \ %s`, path) } // Failure on c: if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:`); err == nil { t.Fatalf("c: should fail") } if err.Error() != `no relative path specified in "c:"` { t.Fatalf(path, err) } // Failure on d: if path, err = CheckSystemDriveAndRemoveDriveLetter(`d:`); err == nil { t.Fatalf("c: should fail") } if err.Error() != `no relative path specified in "d:"` { t.Fatalf(path, err) } } golang-github-moby-go-archive-0.2.0/tarheader/000077500000000000000000000000001512625003600211655ustar00rootroot00000000000000golang-github-moby-go-archive-0.2.0/tarheader/tarheader.go000066400000000000000000000047131512625003600234600ustar00rootroot00000000000000package tarheader import ( "archive/tar" "os" ) // assert that we implement [tar.FileInfoNames]. var _ tar.FileInfoNames = (*nosysFileInfo)(nil) // nosysFileInfo hides the system-dependent info of the wrapped FileInfo to // prevent tar.FileInfoHeader from introspecting it and potentially calling into // glibc. // // It implements [tar.FileInfoNames] to further prevent [tar.FileInfoHeader] // from performing any lookups on go1.23 and up. see https://go.dev/issue/50102 type nosysFileInfo struct { os.FileInfo } // Uname stubs out looking up username. It implements [tar.FileInfoNames] // to prevent [tar.FileInfoHeader] from loading libraries to perform // username lookups. func (fi nosysFileInfo) Uname() (string, error) { return "", nil } // Gname stubs out looking up group-name. It implements [tar.FileInfoNames] // to prevent [tar.FileInfoHeader] from loading libraries to perform // username lookups. func (fi nosysFileInfo) Gname() (string, error) { return "", nil } func (fi nosysFileInfo) Sys() interface{} { // A Sys value of type *tar.Header is safe as it is system-independent. // The tar.FileInfoHeader function copies the fields into the returned // header without performing any OS lookups. if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok { return sys } return nil } // FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi. // // Compared to the archive/tar.FileInfoHeader function, this function is safe to // call from a chrooted process as it does not populate fields which would // require operating system lookups. It behaves identically to // tar.FileInfoHeader when fi is a FileInfo value returned from // tar.Header.FileInfo(). // // When fi is a FileInfo for a native file, such as returned from os.Stat() and // os.Lstat(), the returned Header value differs from one returned from // tar.FileInfoHeader in the following ways. The Uname and Gname fields are not // set as OS lookups would be required to populate them. The AccessTime and // ChangeTime fields are not currently set (not yet implemented) although that // is subject to change. Callers which require the AccessTime or ChangeTime // fields to be zeroed should explicitly zero them out in the returned Header // value to avoid any compatibility issues in the future. func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link) if err != nil { return nil, err } return hdr, sysStat(fi, hdr) } golang-github-moby-go-archive-0.2.0/tarheader/tarheader_test.go000066400000000000000000000006441512625003600245160ustar00rootroot00000000000000package tarheader import ( "archive/tar" "os" "testing" ) func TestNosysFileInfo(t *testing.T) { st, err := os.Stat("tarheader_test.go") if err != nil { t.Fatal(err) } h, err := tar.FileInfoHeader(nosysFileInfo{st}, "") if err != nil { t.Fatal(err) } if h.Uname != "" { t.Errorf("uname should be empty; got %v", h.Uname) } if h.Gname != "" { t.Errorf("gname should be empty; got %v", h.Uname) } } golang-github-moby-go-archive-0.2.0/tarheader/tarheader_unix.go000066400000000000000000000024411512625003600245170ustar00rootroot00000000000000//go:build !windows package tarheader import ( "archive/tar" "os" "runtime" "syscall" "golang.org/x/sys/unix" ) // sysStat populates hdr from system-dependent fields of fi without performing // any OS lookups. func sysStat(fi os.FileInfo, hdr *tar.Header) error { // Devmajor and Devminor are only needed for special devices. // In FreeBSD, RDev for regular files is -1 (unless overridden by FS): // https://cgit.freebsd.org/src/tree/sys/kern/vfs_default.c?h=stable/13#n1531 // (NODEV is -1: https://cgit.freebsd.org/src/tree/sys/sys/param.h?h=stable/13#n241). // ZFS in particular does not override the default: // https://cgit.freebsd.org/src/tree/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c?h=stable/13#n2027 // Since `Stat_t.Rdev` is uint64, the cast turns -1 into (2^64 - 1). // Such large values cannot be encoded in a tar header. if runtime.GOOS == "freebsd" && hdr.Typeflag != tar.TypeBlock && hdr.Typeflag != tar.TypeChar { return nil } s, ok := fi.Sys().(*syscall.Stat_t) if !ok { return nil } hdr.Uid = int(s.Uid) hdr.Gid = int(s.Gid) if s.Mode&unix.S_IFBLK != 0 || s.Mode&unix.S_IFCHR != 0 { hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert } return nil } golang-github-moby-go-archive-0.2.0/tarheader/tarheader_windows.go000066400000000000000000000003551512625003600252300ustar00rootroot00000000000000package tarheader import ( "archive/tar" "os" ) // sysStat populates hdr from system-dependent fields of fi without performing // any OS lookups. It is a no-op on Windows. func sysStat(os.FileInfo, *tar.Header) error { return nil } golang-github-moby-go-archive-0.2.0/testdata/000077500000000000000000000000001512625003600210375ustar00rootroot00000000000000golang-github-moby-go-archive-0.2.0/testdata/broken.tar000066400000000000000000000330001512625003600230230ustar00rootroot00000000000000root/0040700000000000000000000000000012332704605010223 5ustar0000000000000000root/.cpanm/0040755000000000000000000000000012332704605011411 5ustar0000000000000000root/.cpanm/work/0040755000000000000000000000000012332704605012373 5ustar0000000000000000root/.cpanm/work/1395823785.24209/0040755000000000000000000000000012332704605014154 5ustar0000000000000000root/.cpanm/work/1395823785.24209/File-Find-Rule-0.33/0040755000000000000000000000000012332704605017177 5ustar0000000000000000root/.cpanm/work/1395823785.24209/File-Find-Rule-0.33/META.yml01006441Ӏ-0000000112211635626623021652 0ustar 00000000000000--- #YAML:1.0 name: File-Find-Rule version: 0.33 abstract: ~ author: [] license: unknown distribution_type: module configure_requires: ExtUtils::MakeMaker: 0 build_requires: ExtUtils::MakeMaker: 0 requires: File::Find: 0 File::Spec: 0 Number::Compare: 0 Test::More: 0 Text::Glob: 0.07 no_index: directory: - t - inc generated_by: ExtUtils::MakeMaker version 6.57_05 meta-spec: url: http://module-build.sourceforge.net/META-spec-v1.4.html version: 1.4 root/.cpanm/work/1395823785.24209/Plack-1.0030/0040755000000000000000000000000012332704605015665 5ustar 00000000000000root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/0040755000000000000000000000000012332704605016575 5ustar 00000000000000root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/0040755000000000000000000000000012332704605017433 5ustar 00000000000000Plack::Middleware::LighttpdScriptNameFix.3pm0100644000000000000000000001400512314512430027460 0ustar 00000000000000root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3.\" Automatically generated by Pod::Man 2.27 (Pod::Simple 3.28) .\" .\" Standard preamble: .\" ======================================================================== .de Sp \" Vertical space (when we can't use .PP) .if t .sp .5v .if n .sp .. .de Vb \" Begin verbatim text .ft CW .nf .ne \\$1 .. .de Ve \" End verbatim text .ft R .fi .. .\" Set up some character translations and predefined strings. \*(-- will .\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left .\" double quote, and \*(R" will give a right double quote. \*(C+ will .\" give a nicer C++. Capital omega is used to do unbreakable dashes and .\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff, .\" nothing in troff, for use with C<>. .tr \(*W- .ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p' .ie n \{\ . ds -- \(*W- . ds PI pi . if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch . if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch . ds L" "" . ds R" "" . ds C` "" . ds C' "" 'br\} .el\{\ . ds -- \|\(em\| . ds PI \(*p . ds L" `` . ds R" '' . ds C` . ds C' 'br\} .\" .\" Escape single quotes in literal strings from groff's Unicode transform. .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" .\" If the F register is turned on, we'll generate index entries on stderr for .\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index .\" entries marked with X<> in POD. Of course, you'll have to process the .\" output yourself in some meaningful fashion. .\" .\" Avoid warning from groff about undefined register 'F'. .de IX .. .nr rF 0 .if \n(.g .if rF .nr rF 1 .if (\n(rF:(\n(.g==0)) \{ . if \nF \{ . de IX . tm Index:\\$1\t\\n%\t"\\$2" .. . if !\nF==2 \{ . nr % 0 . nr F 2 . \} . \} .\} .rr rF .\" .\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2). .\" Fear. Run. Save yourself. No user-serviceable parts. . \" fudge factors for nroff and troff .if n \{\ . ds #H 0 . ds #V .8m . ds #F .3m . ds #[ \f1 . ds #] \fP .\} .if t \{\ . ds #H ((1u-(\\\\n(.fu%2u))*.13m) . ds #V .6m . ds #F 0 . ds #[ \& . ds #] \& .\} . \" simple accents for nroff and troff .if n \{\ . ds ' \& . ds ` \& . ds ^ \& . ds , \& . ds ~ ~ . ds / .\} .if t \{\ . ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u" . ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u' . ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u' . ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u' . ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u' . ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u' .\} . \" troff and (daisy-wheel) nroff accents .ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V' .ds 8 \h'\*(#H'\(*b\h'-\*(#H' .ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#] .ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H' .ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u' .ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#] .ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#] .ds ae a\h'-(\w'a'u*4/10)'e .ds Ae A\h'-(\w'A'u*4/10)'E . \" corrections for vroff .if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u' .if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u' . \" for low resolution devices (crt and lpr) .if \n(.H>23 .if \n(.V>19 \ \{\ . ds : e . ds 8 ss . ds o a . ds d- d\h'-1'\(ga . ds D- D\h'-1'\(hy . ds th \o'bp' . ds Th \o'LP' . ds ae ae . ds Ae AE .\} .rm #[ #] #H #V #F C .\" ======================================================================== .\" .IX Title "Plack::Middleware::LighttpdScriptNameFix 3pm" .TH Plack::Middleware::LighttpdScriptNameFix 3pm "2013-11-23" "perl v5.18.2" "User Contributed Perl Documentation" .\" For nroff, turn off justification. Always turn off hyphenation; it makes .\" way too many mistakes in technical documents. .if n .ad l .nh .SH "NAME" Plack::Middleware::LighttpdScriptNameFix \- fixes wrong SCRIPT_NAME and PATH_INFO that lighttpd sets .SH "SYNOPSIS" .IX Header "SYNOPSIS" .Vb 2 \& # in your app.psgi \& use Plack::Builder; \& \& builder { \& enable "LighttpdScriptNameFix"; \& $app; \& }; \& \& # Or from the command line \& plackup \-s FCGI \-e \*(Aqenable "LighttpdScriptNameFix"\*(Aq /path/to/app.psgi .Ve .SH "DESCRIPTION" .IX Header "DESCRIPTION" This middleware fixes wrong \f(CW\*(C`SCRIPT_NAME\*(C'\fR and \f(CW\*(C`PATH_INFO\*(C'\fR set by lighttpd when you mount your app under the root path (\*(L"/\*(R"). If you use lighttpd 1.4.23 or later you can instead enable \f(CW\*(C`fix\-root\-scriptname\*(C'\fR flag inside \f(CW\*(C`fastcgi.server\*(C'\fR instead of using this middleware. .SH "CONFIGURATION" .IX Header "CONFIGURATION" .IP "script_name" 4 .IX Item "script_name" Even with \f(CW\*(C`fix\-root\-scriptname\*(C'\fR, lighttpd \fIstill\fR sets weird \&\f(CW\*(C`SCRIPT_NAME\*(C'\fR and \f(CW\*(C`PATH_INFO\*(C'\fR if you mount your application at \f(CW""\fR or something that ends with \f(CW\*(C`/\*(C'\fR. Setting \f(CW\*(C`script_name\*(C'\fR option tells the middleware how to reconstruct the new correct \f(CW\*(C`SCRIPT_NAME\*(C'\fR and \&\f(CW\*(C`PATH_INFO\*(C'\fR. .Sp If you mount the app under \f(CW\*(C`/something/\*(C'\fR, you should set: .Sp .Vb 1 \& enable "LighttpdScriptNameFix", script_name => "/something"; .Ve .Sp and when a request for \f(CW\*(C`/something/a/b?param=1\*(C'\fR comes, \f(CW\*(C`SCRIPT_NAME\*(C'\fR becomes \f(CW\*(C`/something\*(C'\fR and \f(CW\*(C`PATH_INFO\*(C'\fR becomes \f(CW\*(C`/a/b\*(C'\fR. .Sp \&\f(CW\*(C`script_name\*(C'\fR option is set to empty by default, which means all the request path is set to \f(CW\*(C`PATH_INFO\*(C'\fR and it behaves like your fastcgi application is mounted in the root path. .SH "AUTHORS" .IX Header "AUTHORS" Yury Zavarin .PP Tatsuhiko Miyagawa .SH "SEE ALSO" .IX Header "SEE ALSO" Plack::Handler::FCGI golang-github-moby-go-archive-0.2.0/time.go000066400000000000000000000011041512625003600205070ustar00rootroot00000000000000package archive import ( "syscall" "time" "unsafe" ) var ( minTime = time.Unix(0, 0) maxTime time.Time ) func init() { if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { // This is a 64 bit timespec // os.Chtimes limits time to the following maxTime = time.Unix(0, 1<<63-1) } else { // This is a 32 bit timespec maxTime = time.Unix(1<<31-1, 0) } } func boundTime(t time.Time) time.Time { if t.Before(minTime) || t.After(maxTime) { return minTime } return t } func latestTime(t1, t2 time.Time) time.Time { if t1.Before(t2) { return t2 } return t1 } golang-github-moby-go-archive-0.2.0/time_nonwindows.go000066400000000000000000000020011512625003600227710ustar00rootroot00000000000000//go:build !windows package archive import ( "os" "time" "golang.org/x/sys/unix" ) // chtimes changes the access time and modified time of a file at the given path. // If the modified time is prior to the Unix Epoch (unixMinTime), or after the // end of Unix Time (unixEpochTime), os.Chtimes has undefined behavior. In this // case, Chtimes defaults to Unix Epoch, just in case. func chtimes(name string, atime time.Time, mtime time.Time) error { return os.Chtimes(name, atime, mtime) } func timeToTimespec(time time.Time) unix.Timespec { if time.IsZero() { // Return UTIME_OMIT special value return unix.Timespec{ Sec: 0, Nsec: (1 << 30) - 2, } } return unix.NsecToTimespec(time.UnixNano()) } func lchtimes(name string, atime time.Time, mtime time.Time) error { utimes := [2]unix.Timespec{ timeToTimespec(atime), timeToTimespec(mtime), } err := unix.UtimesNanoAt(unix.AT_FDCWD, name, utimes[0:], unix.AT_SYMLINK_NOFOLLOW) if err != nil && err != unix.ENOSYS { return err } return err } golang-github-moby-go-archive-0.2.0/time_windows.go000066400000000000000000000012661512625003600222720ustar00rootroot00000000000000package archive import ( "os" "time" "golang.org/x/sys/windows" ) func chtimes(name string, atime time.Time, mtime time.Time) error { if err := os.Chtimes(name, atime, mtime); err != nil { return err } pathp, err := windows.UTF16PtrFromString(name) if err != nil { return err } h, err := windows.CreateFile(pathp, windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) if err != nil { return err } defer windows.Close(h) c := windows.NsecToFiletime(mtime.UnixNano()) return windows.SetFileTime(h, &c, nil, nil) } func lchtimes(name string, atime time.Time, mtime time.Time) error { return nil } golang-github-moby-go-archive-0.2.0/utils_test.go000066400000000000000000000140621512625003600217570ustar00rootroot00000000000000package archive import ( "archive/tar" "bytes" "errors" "fmt" "io" "os" "path/filepath" "time" ) var testUntarFns = map[string]func(string, io.Reader) error{ "untar": func(dest string, r io.Reader) error { return Untar(r, dest, nil) }, "applylayer": func(dest string, r io.Reader) error { _, err := ApplyLayer(dest, r) return err }, } // testBreakout is a helper function that, within the provided `tmpdir` directory, // creates a `victim` folder with a generated `hello` file in it. // `untar` extracts to a directory named `dest`, the tar file created from `headers`. // // Here are the tested scenarios: // - removed `victim` folder (write) // - removed files from `victim` folder (write) // - new files in `victim` folder (write) // - modified files in `victim` folder (write) // - file in `dest` with same content as `victim/hello` (read) // // When using testBreakout make sure you cover one of the scenarios listed above. func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { tmpdir, err := os.MkdirTemp("", tmpdir) if err != nil { return err } defer os.RemoveAll(tmpdir) dest := filepath.Join(tmpdir, "dest") if err := os.Mkdir(dest, 0o755); err != nil { return err } victim := filepath.Join(tmpdir, "victim") if err := os.Mkdir(victim, 0o755); err != nil { return err } hello := filepath.Join(victim, "hello") helloData, err := time.Now().MarshalText() if err != nil { return err } if err := os.WriteFile(hello, helloData, 0o644); err != nil { return err } helloStat, err := os.Stat(hello) if err != nil { return err } reader, writer := io.Pipe() go func() { t := tar.NewWriter(writer) for _, hdr := range headers { _ = t.WriteHeader(hdr) } _ = t.Close() }() untar := testUntarFns[untarFn] if untar == nil { return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn) } if err := untar(dest, reader); err != nil { if !errors.As(err, new(breakoutError)) { // If untar returns an error unrelated to an archive breakout, // then consider this an unexpected error and abort. return err } // Here, untar detected the breakout. // Let's move on verifying that indeed there was no breakout. fmt.Println("breakoutError:", err) } // Check victim folder f, err := os.Open(victim) if err != nil { // codepath taken if victim folder was removed return fmt.Errorf("archive breakout: error reading %q: %w", victim, err) } defer f.Close() // Check contents of victim folder // // We are only interested in getting 2 files from the victim folder, because if all is well // we expect only one result, the `hello` file. If there is a second result, it cannot // hold the same name `hello` and we assume that a new file got created in the victim folder. // That is enough to detect an archive breakout. names, err := f.Readdirnames(2) if err != nil { // codepath taken if victim is not a folder return fmt.Errorf("archive breakout: error reading directory content of %q: %w", victim, err) } for _, name := range names { if name != "hello" { // codepath taken if new file was created in victim folder return fmt.Errorf("archive breakout: new file %q", name) } } // Check victim/hello f, err = os.Open(hello) if err != nil { // codepath taken if read permissions were removed return fmt.Errorf("archive breakout: could not lstat %q: %w", hello, err) } defer f.Close() b, err := io.ReadAll(f) if err != nil { return err } fi, err := f.Stat() if err != nil { return err } if helloStat.IsDir() != fi.IsDir() || // TODO: cannot check for fi.ModTime() change helloStat.Mode() != fi.Mode() || helloStat.Size() != fi.Size() || !bytes.Equal(helloData, b) { // codepath taken if hello has been modified return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi) } // Check that nothing in dest/ has the same content as victim/hello. // Since victim/hello was generated with time.Now(), it is safe to assume // that any file whose content matches exactly victim/hello, managed somehow // to access victim/hello. return filepath.WalkDir(dest, func(path string, info os.DirEntry, err error) error { if info.IsDir() { if err != nil { // skip directory if error return filepath.SkipDir } // enter directory return nil } if err != nil { // skip file if error return nil } b, err := os.ReadFile(path) if err != nil { // Houston, we have a problem. Aborting (space)walk. return err } if bytes.Equal(helloData, b) { return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path) } return nil }) } // newTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func newTempArchive(src io.Reader, dir string) (*tempArchive, error) { f, err := os.CreateTemp(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &tempArchive{File: f, Size: size}, nil } // tempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. type tempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the tempArchive multiple times safely. func (archive *tempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *tempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { _ = archive.Close() _ = os.Remove(archive.File.Name()) } return n, err } golang-github-moby-go-archive-0.2.0/whiteouts.go000066400000000000000000000021141512625003600216060ustar00rootroot00000000000000package archive // Whiteouts are files with a special meaning for the layered filesystem. // Docker uses AUFS whiteout files inside exported archives. In other // filesystems these files are generated/handled on tar creation/extraction. // WhiteoutPrefix prefix means file is a whiteout. If this is followed by a // filename this means that file has been removed from the base layer. const WhiteoutPrefix = ".wh." // WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not // for removing an actual file. Normally these files are excluded from exported // archives. const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix // WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other // layers. Normally these should not go into exported archives and all changed // hardlinks should be copied to the top layer. const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" // WhiteoutOpaqueDir file means directory has been made opaque - meaning // readdir calls to this directory do not follow to lower layers. const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" golang-github-moby-go-archive-0.2.0/wrap.go000066400000000000000000000026121512625003600205270ustar00rootroot00000000000000package archive import ( "archive/tar" "bytes" "io" ) // Generate generates a new archive from the content provided // as input. // // `files` is a sequence of path/content pairs. A new file is // added to the archive for each pair. // If the last pair is incomplete, the file is created with an // empty content. For example: // // Generate("foo.txt", "hello world", "emptyfile") // // The above call will return an archive with 2 files: // - ./foo.txt with content "hello world" // - ./empty with empty content // // FIXME: stream content instead of buffering // FIXME: specify permissions and other archive metadata func Generate(input ...string) (io.Reader, error) { files := parseStringPairs(input...) buf := new(bytes.Buffer) tw := tar.NewWriter(buf) for _, file := range files { name, content := file[0], file[1] hdr := &tar.Header{ Name: name, Size: int64(len(content)), } if err := tw.WriteHeader(hdr); err != nil { return nil, err } if _, err := tw.Write([]byte(content)); err != nil { return nil, err } } if err := tw.Close(); err != nil { return nil, err } return buf, nil } func parseStringPairs(input ...string) [][2]string { output := make([][2]string, 0, len(input)/2+1) for i := 0; i < len(input); i += 2 { var pair [2]string pair[0] = input[i] if i+1 < len(input) { pair[1] = input[i+1] } output = append(output, pair) } return output } golang-github-moby-go-archive-0.2.0/wrap_test.go000066400000000000000000000043261512625003600215720ustar00rootroot00000000000000package archive import ( "archive/tar" "bytes" "errors" "io" "testing" "gotest.tools/v3/assert" ) func TestGenerateEmptyFile(t *testing.T) { archive, err := Generate("emptyFile") assert.NilError(t, err) if archive == nil { t.Fatal("The generated archive should not be nil.") } expectedFiles := [][]string{ {"emptyFile", ""}, } tr := tar.NewReader(archive) actualFiles := make([][]string, 0, 10) i := 0 for { hdr, err := tr.Next() if errors.Is(err, io.EOF) { break } assert.NilError(t, err) buf := new(bytes.Buffer) _, err = buf.ReadFrom(tr) assert.NilError(t, err) content := buf.String() actualFiles = append(actualFiles, []string{hdr.Name, content}) i++ } if len(actualFiles) != len(expectedFiles) { t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) } for i := 0; i < len(expectedFiles); i++ { actual := actualFiles[i] expected := expectedFiles[i] if actual[0] != expected[0] { t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) } if actual[1] != expected[1] { t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) } } } func TestGenerateWithContent(t *testing.T) { archive, err := Generate("file", "content") assert.NilError(t, err) if archive == nil { t.Fatal("The generated archive should not be nil.") } expectedFiles := [][]string{ {"file", "content"}, } tr := tar.NewReader(archive) actualFiles := make([][]string, 0, 10) i := 0 for { hdr, err := tr.Next() if errors.Is(err, io.EOF) { break } assert.NilError(t, err) buf := new(bytes.Buffer) _, err = buf.ReadFrom(tr) assert.NilError(t, err) content := buf.String() actualFiles = append(actualFiles, []string{hdr.Name, content}) i++ } if len(actualFiles) != len(expectedFiles) { t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) } for i := 0; i < len(expectedFiles); i++ { actual := actualFiles[i] expected := expectedFiles[i] if actual[0] != expected[0] { t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) } if actual[1] != expected[1] { t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) } } } golang-github-moby-go-archive-0.2.0/xattr_supported.go000066400000000000000000000026451512625003600230330ustar00rootroot00000000000000//go:build linux || darwin || freebsd || netbsd package archive import ( "errors" "fmt" "io/fs" "golang.org/x/sys/unix" ) // lgetxattr retrieves the value of the extended attribute identified by attr // and associated with the given path in the file system. // It returns a nil slice and nil error if the xattr is not set. func lgetxattr(path string, attr string) ([]byte, error) { // Start with a 128 length byte array dest := make([]byte, 128) sz, err := unix.Lgetxattr(path, attr, dest) for errors.Is(err, unix.ERANGE) { // Buffer too small, use zero-sized buffer to get the actual size sz, err = unix.Lgetxattr(path, attr, []byte{}) if err != nil { return nil, wrapPathError("lgetxattr", path, attr, err) } dest = make([]byte, sz) sz, err = unix.Lgetxattr(path, attr, dest) } if err != nil { if errors.Is(err, noattr) { return nil, nil } return nil, wrapPathError("lgetxattr", path, attr, err) } return dest[:sz], nil } // lsetxattr sets the value of the extended attribute identified by attr // and associated with the given path in the file system. func lsetxattr(path string, attr string, data []byte, flags int) error { return wrapPathError("lsetxattr", path, attr, unix.Lsetxattr(path, attr, data, flags)) } func wrapPathError(op, path, attr string, err error) error { if err == nil { return nil } return &fs.PathError{Op: op, Path: path, Err: fmt.Errorf("xattr %q: %w", attr, err)} } golang-github-moby-go-archive-0.2.0/xattr_supported_linux.go000066400000000000000000000001131512625003600242360ustar00rootroot00000000000000package archive import "golang.org/x/sys/unix" var noattr = unix.ENODATA golang-github-moby-go-archive-0.2.0/xattr_supported_unix.go000066400000000000000000000001631512625003600240670ustar00rootroot00000000000000//go:build darwin || freebsd || netbsd package archive import "golang.org/x/sys/unix" var noattr = unix.ENOATTR golang-github-moby-go-archive-0.2.0/xattr_unsupported.go000066400000000000000000000003541512625003600233710ustar00rootroot00000000000000//go:build !linux && !darwin && !freebsd && !netbsd package archive func lgetxattr(path string, attr string) ([]byte, error) { return nil, nil } func lsetxattr(path string, attr string, data []byte, flags int) error { return nil }