diff --git a/go.mod b/go.mod index f582d133840..0fb822ad39e 100644 --- a/go.mod +++ b/go.mod @@ -35,8 +35,8 @@ require ( github.com/stretchr/testify v1.11.1 go.etcd.io/bbolt v1.4.3 go.podman.io/common v0.65.1-0.20250916163606-92222dcd3da4 - go.podman.io/image/v5 v5.37.1-0.20250916163606-92222dcd3da4 - go.podman.io/storage v1.60.1-0.20250916163606-92222dcd3da4 + go.podman.io/image/v5 v5.38.0 + go.podman.io/storage v1.61.0 golang.org/x/crypto v0.43.0 golang.org/x/sync v0.17.0 golang.org/x/sys v0.37.0 @@ -56,7 +56,7 @@ require ( github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.17.0 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.18.0 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/containernetworking/plugins v1.8.0 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect @@ -65,7 +65,7 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/disiqueira/gotree/v3 v3.0.2 // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/docker-credential-helpers v0.9.3 // indirect + github.com/docker/docker-credential-helpers v0.9.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/go-jose/go-jose/v4 v4.0.5 // indirect @@ -82,14 +82,14 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jinzhu/copier v0.4.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/compress v1.18.1 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect github.com/manifoldco/promptui v0.9.0 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mattn/go-sqlite3 v1.14.32 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect - github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect + github.com/mistifyio/go-zfs/v4 v4.0.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/go-archive v0.1.0 // indirect github.com/moby/patternmatcher v0.6.0 // indirect @@ -115,7 +115,7 @@ require ( github.com/tchap/go-patricia/v2 v2.3.3 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/ulikunitz/xz v0.5.15 // indirect - github.com/vbatts/tar-split v0.12.1 // indirect + github.com/vbatts/tar-split v0.12.2 // indirect github.com/vbauerster/mpb/v8 v8.10.2 // indirect github.com/vishvananda/netlink v1.3.1 // indirect github.com/vishvananda/netns v0.0.5 // indirect @@ -137,3 +137,9 @@ require ( sigs.k8s.io/yaml v1.6.0 // indirect tags.cncf.io/container-device-interface/specs-go v1.0.0 // indirect ) + +replace go.podman.io/common => github.com/ninja-quokka/container-libs/common v0.66.1-0.20251021200235-096ee6bbf8e8 + +replace go.podman.io/storage => github.com/ninja-quokka/container-libs/storage v1.61.1-0.20251021200235-096ee6bbf8e8 + +replace go.podman.io/image/v5 => github.com/ninja-quokka/container-libs/image/v5 v5.38.1-0.20251021200235-096ee6bbf8e8 diff --git a/go.sum b/go.sum index abe4e30c488..e0df8b58ecc 100644 --- a/go.sum +++ b/go.sum @@ -41,8 +41,8 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/platforms v1.0.0-rc.1 h1:83KIq4yy1erSRgOVHNk1HYdPvzdJ5CnsWaRoJX4C41E= github.com/containerd/platforms v1.0.0-rc.1/go.mod h1:J71L7B+aiM5SdIEqmd9wp6THLVRzJGXfNuWCZCllLA4= -github.com/containerd/stargz-snapshotter/estargz v0.17.0 h1:+TyQIsR/zSFI1Rm31EQBwpAA1ovYgIKHy7kctL3sLcE= -github.com/containerd/stargz-snapshotter/estargz v0.17.0/go.mod h1:s06tWAiJcXQo9/8AReBCIo/QxcXFZ2n4qfsRnpl71SM= +github.com/containerd/stargz-snapshotter/estargz v0.18.0 h1:Ny5yptQgEXSkDFKvlKJGTvf1YJ+4xD8V+hXqoRG0n74= +github.com/containerd/stargz-snapshotter/estargz v0.18.0/go.mod h1:7hfU1BO2KB3axZl0dRQCdnHrIWw7TRDdK6L44Rdeuo0= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= github.com/containernetworking/cni v1.3.0 h1:v6EpN8RznAZj9765HhXQrtXgX+ECGebEYEmnuFjskwo= @@ -72,14 +72,14 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v28.4.0+incompatible h1:RBcf3Kjw2pMtwui5V0DIMdyeab8glEw5QY0UUU4C9kY= -github.com/docker/cli v28.4.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.5.1+incompatible h1:ESutzBALAD6qyCLqbQSEf1a/U8Ybms5agw59yGVc+yY= +github.com/docker/cli v28.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM= github.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= -github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= +github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI= +github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= @@ -142,8 +142,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= +github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -162,8 +162,8 @@ github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuE github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU= -github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= +github.com/mistifyio/go-zfs/v4 v4.0.0 h1:sU0+5dX45tdDK5xNZ3HBi95nxUc48FS92qbIZEvpAg4= +github.com/mistifyio/go-zfs/v4 v4.0.0/go.mod h1:weotFtXTHvBwhr9Mv96KYnDkTPBOHFUbm9cBmQpesL0= github.com/moby/buildkit v0.25.1 h1:j7IlVkeNbEo+ZLoxdudYCHpmTsbwKvhgc/6UJ/mY/o8= github.com/moby/buildkit v0.25.1/go.mod h1:phM8sdqnvgK2y1dPDnbwI6veUCXHOZ6KFSl6E164tkc= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= @@ -195,8 +195,14 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.25.3 h1:Ty8+Yi/ayDAGtk4XxmmfUy4GabvM+MegeB4cDLRi6nw= -github.com/onsi/ginkgo/v2 v2.25.3/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= +github.com/ninja-quokka/container-libs/common v0.66.1-0.20251021200235-096ee6bbf8e8 h1:p6SlGS7qszJoTTd5mNk4044NpKxSmHpUI+a9orl5I78= +github.com/ninja-quokka/container-libs/common v0.66.1-0.20251021200235-096ee6bbf8e8/go.mod h1:aNd2a0S7pY+fx1X5kpQYuF4hbwLU8ZOccuVrhu7h1Xc= +github.com/ninja-quokka/container-libs/image/v5 v5.38.1-0.20251021200235-096ee6bbf8e8 h1:hQY/TMikluimOe6OaVPxQsciFJTuyAA+hV4qxQtoEHY= +github.com/ninja-quokka/container-libs/image/v5 v5.38.1-0.20251021200235-096ee6bbf8e8/go.mod h1:CybNTHgA9rynbPXK868nisDRwUS5T7TknZzPyEb8hbA= +github.com/ninja-quokka/container-libs/storage v1.61.1-0.20251021200235-096ee6bbf8e8 h1:WGpbzuNNZ4l7IXJcpQGbc7A61AnwDoQfD38nlKZvhFY= +github.com/ninja-quokka/container-libs/storage v1.61.1-0.20251021200235-096ee6bbf8e8/go.mod h1:oLjD+/c+LyFgYoKLxXjPd0Wr0q5eAqFEOM4kaujLVvw= +github.com/onsi/ginkgo/v2 v2.26.0 h1:1J4Wut1IlYZNEAWIV3ALrT9NfiaGW2cDCJQSFQMs/gE= +github.com/onsi/ginkgo/v2 v2.26.0/go.mod h1:qhEywmzWTBUY88kfO0BRvX4py7scov9yR+Az2oavUzw= github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/opencontainers/cgroups v0.0.5 h1:DRITAqcOnY0uSBzIpt1RYWLjh5DPDiqUs4fY6Y0ktls= @@ -278,8 +284,8 @@ github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY= github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= -github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= +github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/vbauerster/mpb/v8 v8.10.2 h1:2uBykSHAYHekE11YvJhKxYmLATKHAGorZwFlyNw4hHM= github.com/vbauerster/mpb/v8 v8.10.2/go.mod h1:+Ja4P92E3/CorSZgfDtK46D7AVbDqmBQRTmyTqPElo0= github.com/vishvananda/netlink v1.3.1 h1:3AEMt62VKqz90r0tmNhog0r/PpWKmrEShJU0wJW6bV0= @@ -317,12 +323,6 @@ go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKr go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= -go.podman.io/common v0.65.1-0.20250916163606-92222dcd3da4 h1:YjBqTOxz4cqfpifcd71VoBl1FTQL2U2La5NgMqmRRqU= -go.podman.io/common v0.65.1-0.20250916163606-92222dcd3da4/go.mod h1:DyOdwtkwzYA8lE0TueJnxRju4Lmsrx6ZAC/ATAkYYck= -go.podman.io/image/v5 v5.37.1-0.20250916163606-92222dcd3da4 h1:hfc3lZaxi6KGnWN3IusIaCkcMPR4rTR+vWZzakeD1EA= -go.podman.io/image/v5 v5.37.1-0.20250916163606-92222dcd3da4/go.mod h1:cGWb3IyBziJGxhFikTOlt9Ap+zo6s3rz9Qd1rbzqs4s= -go.podman.io/storage v1.60.1-0.20250916163606-92222dcd3da4 h1:jo0PSKh6muU7rmhXXqOV9aK+HrA8koqs47KhBsZf6LY= -go.podman.io/storage v1.60.1-0.20250916163606-92222dcd3da4/go.mod h1:AeZXAN8Qu1gTlAEHIc6mVhxk+61oMSM3K3iLx5UAQWE= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go index 8b804b7dde4..16bdc90b202 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -42,6 +42,8 @@ import ( "golang.org/x/sync/errgroup" ) +type GzipHelperFunc func(io.Reader) (io.ReadCloser, error) + type options struct { chunkSize int compressionLevel int @@ -50,6 +52,7 @@ type options struct { compression Compression ctx context.Context minChunkSize int + gzipHelperFunc GzipHelperFunc } type Option func(o *options) error @@ -127,6 +130,18 @@ func WithMinChunkSize(minChunkSize int) Option { } } +// WithGzipHelperFunc option specifies a custom function to decompress gzip-compressed layers. +// When a gzip-compressed layer is detected, this function will be used instead of the +// Go standard library gzip decompression for better performance. +// The function should take an io.Reader as input and return an io.ReadCloser. +// If nil, the Go standard library gzip.NewReader will be used. +func WithGzipHelperFunc(gzipHelperFunc GzipHelperFunc) Option { + return func(o *options) error { + o.gzipHelperFunc = gzipHelperFunc + return nil + } +} + // Blob is an eStargz blob. type Blob struct { io.ReadCloser @@ -186,7 +201,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { rErr = fmt.Errorf("error from context %q: %w", cErr, rErr) } }() - tarBlob, err := decompressBlob(tarBlob, layerFiles) + tarBlob, err := decompressBlob(tarBlob, layerFiles, opts.gzipHelperFunc) if err != nil { return nil, err } @@ -649,7 +664,7 @@ func (cr *countReadSeeker) currentPos() int64 { return *cr.cPos } -func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) { +func decompressBlob(org *io.SectionReader, tmp *tempFiles, gzipHelperFunc GzipHelperFunc) (*io.SectionReader, error) { if org.Size() < 4 { return org, nil } @@ -660,7 +675,13 @@ func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, e var dR io.Reader if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) { // gzip - dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size())) + var dgR io.ReadCloser + var err error + if gzipHelperFunc != nil { + dgR, err = gzipHelperFunc(io.NewSectionReader(org, 0, org.Size())) + } else { + dgR, err = gzip.NewReader(io.NewSectionReader(org, 0, org.Size())) + } if err != nil { return nil, err } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go index f4d55465584..ff91a37add5 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -307,6 +307,15 @@ func (r *Reader) initFields() error { } } + if len(r.m) == 0 { + r.m[""] = &TOCEntry{ + Name: "", + Type: "dir", + Mode: 0755, + NumLink: 1, + } + } + return nil } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index a8dcdb868e6..ff165e090e3 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -38,7 +38,6 @@ import ( "reflect" "sort" "strings" - "testing" "time" "github.com/containerd/stargz-snapshotter/estargz/errorutil" @@ -49,16 +48,48 @@ import ( // TestingController is Compression with some helper methods necessary for testing. type TestingController interface { Compression - TestStreams(t *testing.T, b []byte, streams []int64) - DiffIDOf(*testing.T, []byte) string + TestStreams(t TestingT, b []byte, streams []int64) + DiffIDOf(TestingT, []byte) string String() string } +// TestingT is the minimal set of testing.T required to run the +// tests defined in CompressionTestSuite. This interface exists to prevent +// leaking the testing package from being exposed outside tests. +type TestingT interface { + Errorf(format string, args ...any) + FailNow() + Failed() bool + Fatal(args ...any) + Fatalf(format string, args ...any) + Logf(format string, args ...any) + Parallel() +} + +// Runner allows running subtests of TestingT. This exists instead of adding +// a Run method to TestingT interface because the Run implementation of +// testing.T would not satisfy the interface. +type Runner func(t TestingT, name string, fn func(t TestingT)) + +type TestRunner struct { + TestingT + Runner Runner +} + +func (r *TestRunner) Run(name string, run func(*TestRunner)) { + r.Runner(r.TestingT, name, func(t TestingT) { + run(&TestRunner{TestingT: t, Runner: r.Runner}) + }) +} + // CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them. -func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) { - t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) }) - t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) }) - t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) }) +func CompressionTestSuite(t *TestRunner, controllers ...TestingControllerFactory) { + t.Run("testBuild", func(t *TestRunner) { t.Parallel(); testBuild(t, controllers...) }) + t.Run("testDigestAndVerify", func(t *TestRunner) { + t.Parallel() + testDigestAndVerify(t, controllers...) + }) + t.Run("testWriteAndOpen", func(t *TestRunner) { t.Parallel(); testWriteAndOpen(t, controllers...) }) } type TestingControllerFactory func() TestingController @@ -79,7 +110,7 @@ var allowedPrefix = [4]string{"", "./", "/", "../"} // testBuild tests the resulting stargz blob built by this pkg has the same // contents as the normal stargz blob. -func testBuild(t *testing.T, controllers ...TestingControllerFactory) { +func testBuild(t *TestRunner, controllers ...TestingControllerFactory) { tests := []struct { name string chunkSize int @@ -165,7 +196,7 @@ func testBuild(t *testing.T, controllers ...TestingControllerFactory) { prefix := prefix for _, minChunkSize := range tt.minChunkSize { minChunkSize := minChunkSize - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *TestRunner) { tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) // Test divideEntries() entries, err := sortEntries(tarBlob, nil, nil) // identical order @@ -265,7 +296,7 @@ func testBuild(t *testing.T, controllers ...TestingControllerFactory) { } } -func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { +func isSameTarGz(t TestingT, cla TestingController, a []byte, clb TestingController, b []byte) bool { aGz, err := cla.Reader(bytes.NewReader(a)) if err != nil { t.Fatalf("failed to read A") @@ -325,7 +356,7 @@ func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingContr return true } -func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { +func isSameVersion(t TestingT, cla TestingController, a []byte, clb TestingController, b []byte) bool { aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla) if err != nil { t.Fatalf("failed to parse A: %v", err) @@ -339,7 +370,7 @@ func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingCon return aJTOC.Version == bJTOC.Version } -func isSameEntries(t *testing.T, a, b *Reader) bool { +func isSameEntries(t TestingT, a, b *Reader) bool { aroot, ok := a.Lookup("") if !ok { t.Fatalf("failed to get root of A") @@ -353,7 +384,7 @@ func isSameEntries(t *testing.T, a, b *Reader) bool { return contains(t, aEntry, bEntry) && contains(t, bEntry, aEntry) } -func compressBlob(t *testing.T, src *io.SectionReader, srcCompression int) *io.SectionReader { +func compressBlob(t TestingT, src *io.SectionReader, srcCompression int) *io.SectionReader { buf := new(bytes.Buffer) var w io.WriteCloser var err error @@ -387,7 +418,7 @@ type stargzEntry struct { // contains checks if all child entries in "b" are also contained in "a". // This function also checks if the files/chunks contain the same contents among "a" and "b". -func contains(t *testing.T, a, b stargzEntry) bool { +func contains(t TestingT, a, b stargzEntry) bool { ae, ar := a.e, a.r be, br := b.e, b.r t.Logf("Comparing: %q vs %q", ae.Name, be.Name) @@ -498,7 +529,7 @@ func equalEntry(a, b *TOCEntry) bool { a.Digest == b.Digest } -func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) { +func readOffset(t TestingT, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) { ce, ok := e.r.ChunkEntryForOffset(e.e.Name, offset) if !ok { return nil, 0, false @@ -517,7 +548,7 @@ func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) return data[:n], offset + ce.ChunkSize, true } -func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string { +func dumpTOCJSON(t TestingT, tocJSON *JTOC) string { jtocData, err := json.Marshal(*tocJSON) if err != nil { t.Fatalf("failed to marshal TOC JSON: %v", err) @@ -531,20 +562,19 @@ func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string { const chunkSize = 3 -// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int) -type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) +type check func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) // testDigestAndVerify runs specified checks against sample stargz blobs. -func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) { +func testDigestAndVerify(t *TestRunner, controllers ...TestingControllerFactory) { tests := []struct { name string - tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) + tarInit func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) checks []check minChunkSize []int }{ { name: "no-regfile", - tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( dir("test/"), ) @@ -559,7 +589,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) }, { name: "small-files", - tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( regDigest(t, "baz.txt", "", dgstMap), regDigest(t, "foo.txt", "a", dgstMap), @@ -583,7 +613,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) }, { name: "big-files", - tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), regDigest(t, "foo.txt", "a", dgstMap), @@ -607,7 +637,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) { name: "with-non-regfiles", minChunkSize: []int{0, 64000}, - tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), regDigest(t, "foo.txt", "a", dgstMap), @@ -654,7 +684,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) srcTarFormat := srcTarFormat for _, minChunkSize := range tt.minChunkSize { minChunkSize := minChunkSize - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *TestRunner) { // Get original tar file and chunk digests dgstMap := make(map[string]digest.Digest) tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) @@ -690,7 +720,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) // checkStargzTOC checks the TOC JSON of the passed stargz has the expected // digest and contains valid chunks. It walks all entries in the stargz and // checks all chunk digests stored to the TOC JSON match the actual contents. -func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { +func checkStargzTOC(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), WithDecompressors(controller), @@ -801,7 +831,7 @@ func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM // checkVerifyTOC checks the verification works for the TOC JSON of the passed // stargz. It walks all entries in the stargz and checks the verifications for // all chunks work. -func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { +func checkVerifyTOC(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), WithDecompressors(controller), @@ -882,9 +912,9 @@ func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM // checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be // detected during the verification and the verification returns an error. func checkVerifyInvalidTOCEntryFail(filename string) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { funcs := map[string]rewriteFunc{ - "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + "lost digest in a entry": func(t TestingT, toc *JTOC, sgz *io.SectionReader) { var found bool for _, e := range toc.Entries { if cleanEntryName(e.Name) == filename { @@ -902,7 +932,7 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { t.Fatalf("rewrite target not found") } }, - "duplicated entry offset": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + "duplicated entry offset": func(t TestingT, toc *JTOC, sgz *io.SectionReader) { var ( sampleEntry *TOCEntry targetEntry *TOCEntry @@ -929,7 +959,7 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { } for name, rFunc := range funcs { - t.Run(name, func(t *testing.T) { + t.Run(name, func(t *TestRunner) { newSgz, newTocDigest := rewriteTOCJSON(t, io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), rFunc, controller) buf := new(bytes.Buffer) if _, err := io.Copy(buf, newSgz); err != nil { @@ -958,7 +988,7 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { // checkVerifyInvalidStargzFail checks if the verification detects that the // given stargz file doesn't match to the expected digest and returns error. func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { cl := newController() rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl)) if err != nil { @@ -990,7 +1020,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { // checkVerifyBrokenContentFail checks if the verifier detects broken contents // that doesn't match to the expected digest and returns error. func checkVerifyBrokenContentFail(filename string) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { // Parse stargz file sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), @@ -1047,9 +1077,9 @@ func chunkID(name string, offset, size int64) string { return fmt.Sprintf("%s-%d-%d", cleanEntryName(name), offset, size) } -type rewriteFunc func(t *testing.T, toc *JTOC, sgz *io.SectionReader) +type rewriteFunc func(t TestingT, toc *JTOC, sgz *io.SectionReader) -func rewriteTOCJSON(t *testing.T, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) { +func rewriteTOCJSON(t TestingT, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) { decodedJTOC, jtocOffset, err := parseStargz(sgz, controller) if err != nil { t.Fatalf("failed to extract TOC JSON: %v", err) @@ -1120,7 +1150,7 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT return decodedJTOC, tocOffset, nil } -func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) { +func testWriteAndOpen(t *TestRunner, controllers ...TestingControllerFactory) { const content = "Some contents" invalidUtf8 := "\xff\xfe\xfd" @@ -1464,7 +1494,7 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) { for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { srcTarFormat := srcTarFormat for _, lossless := range []bool{true, false} { - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *TestRunner) { var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat) origTarDgstr := digest.Canonical.Digester() tr = io.TeeReader(tr, origTarDgstr.Hash()) @@ -1530,6 +1560,9 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) { if err != nil { t.Fatalf("stargz.Open: %v", err) } + if _, ok := r.Lookup(""); !ok { + t.Fatalf("failed to lookup rootdir: %v", err) + } wantTOCVersion := 1 if tt.wantTOCVersion > 0 { wantTOCVersion = tt.wantTOCVersion @@ -1628,7 +1661,7 @@ func digestFor(content string) string { type numTOCEntries int -func (n numTOCEntries) check(t *testing.T, r *Reader) { +func (n numTOCEntries) check(t TestingT, r *Reader) { if r.toc == nil { t.Fatal("nil TOC") } @@ -1648,15 +1681,15 @@ func (n numTOCEntries) check(t *testing.T, r *Reader) { func checks(s ...stargzCheck) []stargzCheck { return s } type stargzCheck interface { - check(t *testing.T, r *Reader) + check(t TestingT, r *Reader) } -type stargzCheckFn func(*testing.T, *Reader) +type stargzCheckFn func(TestingT, *Reader) -func (f stargzCheckFn) check(t *testing.T, r *Reader) { f(t, r) } +func (f stargzCheckFn) check(t TestingT, r *Reader) { f(t, r) } func maxDepth(max int) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { e, ok := r.Lookup("") if !ok { t.Fatal("root directory not found") @@ -1673,7 +1706,7 @@ func maxDepth(max int) stargzCheck { }) } -func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr error) { +func getMaxDepth(t TestingT, e *TOCEntry, current, limit int) (max int, rErr error) { if current > limit { return -1, fmt.Errorf("walkMaxDepth: exceeds limit: current:%d > limit:%d", current, limit) @@ -1695,7 +1728,7 @@ func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr e } func hasFileLen(file string, wantLen int) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == file { if ent.Type != "reg" { @@ -1711,7 +1744,7 @@ func hasFileLen(file string, wantLen int) stargzCheck { } func hasFileXattrs(file, name, value string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == file { if ent.Type != "reg" { @@ -1738,7 +1771,7 @@ func hasFileXattrs(file, name, value string) stargzCheck { } func hasFileDigest(file string, digest string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { ent, ok := r.Lookup(file) if !ok { t.Fatalf("didn't find TOCEntry for file %q", file) @@ -1750,7 +1783,7 @@ func hasFileDigest(file string, digest string) stargzCheck { } func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { extraMap := make(map[string]chunkInfo) for _, e := range extra { extraMap[e.name] = e @@ -1797,7 +1830,7 @@ func hasFileContentsWithPreRead(file string, offset int, want string, extra ...c } func hasFileContentsRange(file string, offset int, want string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { f, err := r.OpenFile(file) if err != nil { t.Fatal(err) @@ -1814,7 +1847,7 @@ func hasFileContentsRange(file string, offset int, want string) stargzCheck { } func hasChunkEntries(file string, wantChunks int) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { ent, ok := r.Lookup(file) if !ok { t.Fatalf("no file for %q", file) @@ -1858,7 +1891,7 @@ func hasChunkEntries(file string, wantChunks int) stargzCheck { } func entryHasChildren(dir string, want ...string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { want := append([]string(nil), want...) var got []string ent, ok := r.Lookup(dir) @@ -1877,7 +1910,7 @@ func entryHasChildren(dir string, want ...string) stargzCheck { } func hasDir(file string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == cleanEntryName(file) { if ent.Type != "dir" { @@ -1891,7 +1924,7 @@ func hasDir(file string) stargzCheck { } func hasDirLinkCount(file string, count int) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == cleanEntryName(file) { if ent.Type != "dir" { @@ -1909,7 +1942,7 @@ func hasDirLinkCount(file string, count int) stargzCheck { } func hasMode(file string, mode os.FileMode) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == cleanEntryName(file) { if ent.Stat().Mode() != mode { @@ -1924,7 +1957,7 @@ func hasMode(file string, mode os.FileMode) stargzCheck { } func hasSymlink(file, target string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == file { if ent.Type != "symlink" { @@ -1940,7 +1973,7 @@ func hasSymlink(file, target string) stargzCheck { } func lookupMatch(name string, want *TOCEntry) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { e, ok := r.Lookup(name) if !ok { t.Fatalf("failed to Lookup entry %q", name) @@ -1953,7 +1986,7 @@ func lookupMatch(name string, want *TOCEntry) stargzCheck { } func hasEntryOwner(entry string, owner owner) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { ent, ok := r.Lookup(strings.TrimSuffix(entry, "/")) if !ok { t.Errorf("entry %q not found", entry) @@ -1967,7 +2000,7 @@ func hasEntryOwner(entry string, owner owner) stargzCheck { } func mustSameEntry(files ...string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { var first *TOCEntry for _, f := range files { if first == nil { @@ -2039,7 +2072,7 @@ func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format return f(tw, prefix, format) } -func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader { +func buildTar(t TestingT, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader { format := tar.FormatUnknown for _, opt := range opts { switch v := opt.(type) { @@ -2248,7 +2281,7 @@ func noPrefetchLandmark() tarEntry { }) } -func regDigest(t *testing.T, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry { +func regDigest(t TestingT, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry { if digestMap == nil { t.Fatalf("digest map mustn't be nil") } @@ -2318,7 +2351,7 @@ func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() } func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() } func (f fileInfoOnlyMode) Sys() interface{} { return nil } -func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) { +func CheckGzipHasStreams(t TestingT, b []byte, streams []int64) { if len(streams) == 0 { return // nop } @@ -2356,7 +2389,7 @@ func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) { } } -func GzipDiffIDOf(t *testing.T, b []byte) string { +func GzipDiffIDOf(t TestingT, b []byte) string { h := sha256.New() zr, err := gzip.NewReader(bytes.NewReader(b)) if err != nil { diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index af53fb860cc..4e92f5998a8 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -6,11 +6,12 @@ package flate import ( - "encoding/binary" "errors" "fmt" "io" "math" + + "github.com/klauspost/compress/internal/le" ) const ( @@ -234,12 +235,9 @@ func (d *compressor) fillWindow(b []byte) { // Calculate 256 hashes at the time (more L1 cache hits) loops := (n + 256 - minMatchLength) / 256 - for j := 0; j < loops; j++ { + for j := range loops { startindex := j * 256 - end := startindex + 256 + minMatchLength - 1 - if end > n { - end = n - } + end := min(startindex+256+minMatchLength-1, n) tocheck := d.window[startindex:end] dstSize := len(tocheck) - minMatchLength + 1 @@ -269,18 +267,12 @@ func (d *compressor) fillWindow(b []byte) { // We only look at chainCount possibilities before giving up. // pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) { - minMatchLook := maxMatchLength - if lookahead < minMatchLook { - minMatchLook = lookahead - } + minMatchLook := min(lookahead, maxMatchLength) win := d.window[0 : pos+minMatchLook] // We quit when we get a match that's at least nice long - nice := len(win) - pos - if d.nice < nice { - nice = d.nice - } + nice := min(d.nice, len(win)-pos) // If we've got a match that's good enough, only look in 1/4 the chain. tries := d.chain @@ -288,10 +280,7 @@ func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, of wEnd := win[pos+length] wPos := win[pos:] - minIndex := pos - windowSize - if minIndex < 0 { - minIndex = 0 - } + minIndex := max(pos-windowSize, 0) offset = 0 if d.chain < 100 { @@ -374,7 +363,7 @@ func (d *compressor) writeStoredBlock(buf []byte) error { // of the supplied slice. // The caller must ensure that len(b) >= 4. func hash4(b []byte) uint32 { - return hash4u(binary.LittleEndian.Uint32(b), hashBits) + return hash4u(le.Load32(b, 0), hashBits) } // hash4 returns the hash of u to fit in a hash table with h bits. @@ -389,7 +378,7 @@ func bulkHash4(b []byte, dst []uint32) { if len(b) < 4 { return } - hb := binary.LittleEndian.Uint32(b) + hb := le.Load32(b, 0) dst[0] = hash4u(hb, hashBits) end := len(b) - 4 + 1 @@ -480,10 +469,7 @@ func (d *compressor) deflateLazy() { prevOffset := s.offset s.length = minMatchLength - 1 s.offset = 0 - minIndex := s.index - windowSize - if minIndex < 0 { - minIndex = 0 - } + minIndex := max(s.index-windowSize, 0) if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok { @@ -503,10 +489,7 @@ func (d *compressor) deflateLazy() { if prevLength < maxMatchLength-checkOff { prevIndex := s.index - 1 if prevIndex+prevLength < s.maxInsertIndex { - end := lookahead - if lookahead > maxMatchLength+checkOff { - end = maxMatchLength + checkOff - } + end := min(lookahead, maxMatchLength+checkOff) end += prevIndex // Hash at match end. @@ -603,15 +586,9 @@ func (d *compressor) deflateLazy() { // table. newIndex := s.index + prevLength - 1 // Calculate missing hashes - end := newIndex - if end > s.maxInsertIndex { - end = s.maxInsertIndex - } + end := min(newIndex, s.maxInsertIndex) end += minMatchLength - 1 - startindex := s.index + 1 - if startindex > s.maxInsertIndex { - startindex = s.maxInsertIndex - } + startindex := min(s.index+1, s.maxInsertIndex) tocheck := d.window[startindex:end] dstSize := len(tocheck) - minMatchLength + 1 if dstSize > 0 { diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go index bb36351a5af..cb855abc4ba 100644 --- a/vendor/github.com/klauspost/compress/flate/dict_decoder.go +++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go @@ -104,10 +104,7 @@ func (dd *dictDecoder) writeCopy(dist, length int) int { dstBase := dd.wrPos dstPos := dstBase srcPos := dstPos - dist - endPos := dstPos + length - if endPos > len(dd.hist) { - endPos = len(dd.hist) - } + endPos := min(dstPos+length, len(dd.hist)) // Copy non-overlapping section after destination position. // diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go index 0e8b1630c04..791c9dcbfd9 100644 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -7,7 +7,6 @@ package flate import ( "fmt" - "math/bits" "github.com/klauspost/compress/internal/le" ) @@ -151,29 +150,9 @@ func (e *fastGen) matchlen(s, t int, src []byte) int32 { panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) } } - s1 := min(s+maxMatchLength-4, len(src)) - left := s1 - s - n := int32(0) - for left >= 8 { - diff := le.Load64(src, s) ^ le.Load64(src, t) - if diff != 0 { - return n + int32(bits.TrailingZeros64(diff)>>3) - } - s += 8 - t += 8 - n += 8 - left -= 8 - } - - a := src[s:s1] + a := src[s:min(s+maxMatchLength-4, len(src))] b := src[t:] - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n + return int32(matchLen(a, b)) } // matchlenLong will return the match length between offsets and t in src. @@ -193,29 +172,7 @@ func (e *fastGen) matchlenLong(s, t int, src []byte) int32 { panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) } } - // Extend the match to be as long as possible. - left := len(src) - s - n := int32(0) - for left >= 8 { - diff := le.Load64(src, s) ^ le.Load64(src, t) - if diff != 0 { - return n + int32(bits.TrailingZeros64(diff)>>3) - } - s += 8 - t += 8 - n += 8 - left -= 8 - } - - a := src[s:] - b := src[t:] - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n + return int32(matchLen(src[s:], src[t:])) } // Reset the encoding table. diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index afdc8c053a0..03a1796979b 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -211,7 +211,9 @@ func (w *huffmanBitWriter) flush() { n++ } w.bits = 0 - w.write(w.bytes[:n]) + if n > 0 { + w.write(w.bytes[:n]) + } w.nbytes = 0 } @@ -303,10 +305,7 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE w.codegenFreq[size]++ count-- for count >= 3 { - n := 6 - if n > count { - n = count - } + n := min(6, count) codegen[outIndex] = 16 outIndex++ codegen[outIndex] = uint8(n - 3) @@ -316,10 +315,7 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE } } else { for count >= 11 { - n := 138 - if n > count { - n = count - } + n := min(138, count) codegen[outIndex] = 18 outIndex++ codegen[outIndex] = uint8(n - 11) @@ -438,8 +434,8 @@ func (w *huffmanBitWriter) writeOutBits() { w.nbits -= 48 n := w.nbytes - // We over-write, but faster... - le.Store64(w.bytes[n:], bits) + // We overwrite, but faster... + le.Store64(w.bytes[:], n, bits) n += 6 if n >= bufferFlushSize { @@ -472,7 +468,7 @@ func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, n w.writeBits(int32(numOffsets-1), 5) w.writeBits(int32(numCodegens-4), 4) - for i := 0; i < numCodegens; i++ { + for i := range numCodegens { value := uint(w.codegenEncoding.codes[codegenOrder[i]].len()) w.writeBits(int32(value), 3) } @@ -650,7 +646,7 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b w.lastHeader = 0 } - numLiterals, numOffsets := w.indexTokens(tokens, !sync) + numLiterals, numOffsets := w.indexTokens(tokens, fillReuse && !sync) extraBits := 0 ssize, storable := w.storedSize(input) @@ -855,8 +851,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= c.code64() << (nbits & 63) nbits += c.len() if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 @@ -883,8 +878,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= c.code64() << (nbits & 63) nbits += c.len() if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 @@ -906,8 +900,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= uint64(extraLength) << (nbits & 63) nbits += extraLengthBits if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 @@ -932,8 +925,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= c.code64() << (nbits & 63) nbits += c.len() if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 @@ -954,8 +946,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) nbits += uint8(offsetComb) if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 @@ -1108,7 +1099,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // We must have at least 48 bits free. if nbits >= 8 { n := nbits >> 3 - le.Store64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[:], nbytes, bits) bits >>= (n * 8) & 63 nbits -= n * 8 nbytes += n @@ -1137,8 +1128,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // Remaining... for _, t := range input { if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index be7b58b473f..5f901bd0fe8 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -91,7 +91,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder { h := newHuffmanEncoder(literalCount) codes := h.codes var ch uint16 - for ch = 0; ch < literalCount; ch++ { + for ch = range uint16(literalCount) { var bits uint16 var size uint8 switch { diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index 0d7b437f1c6..6e90126db03 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -485,7 +485,7 @@ func (f *decompressor) readHuffman() error { f.nb -= 5 + 5 + 4 // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. - for i := 0; i < nclen; i++ { + for i := range nclen { for f.nb < 3 { if err := f.moreBits(); err != nil { return err @@ -776,7 +776,7 @@ func fixedHuffmanDecoderInit() { fixedOnce.Do(func() { // These come from the RFC section 3.2.6. var bits [288]int - for i := 0; i < 144; i++ { + for i := range 144 { bits[i] = 8 } for i := 144; i < 256; i++ { diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go index 6e5c21502f7..a22ad7d1255 100644 --- a/vendor/github.com/klauspost/compress/flate/level5.go +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -677,10 +677,7 @@ func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 { panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) } } - s1 := int(s) + maxMatchLength - 4 - if s1 > len(src) { - s1 = len(src) - } + s1 := min(int(s)+maxMatchLength-4, len(src)) // Extend the match to be as long as possible. return int32(matchLen(src[s:s1], src[t:])) diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go index 13b9b100dbc..90b74f7acdd 100644 --- a/vendor/github.com/klauspost/compress/flate/stateless.go +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -56,7 +56,7 @@ func NewStatelessWriter(dst io.Writer) io.WriteCloser { // bitWriterPool contains bit writers that can be reused. var bitWriterPool = sync.Pool{ - New: func() interface{} { + New: func() any { return newHuffmanBitWriter(nil) }, } @@ -184,7 +184,7 @@ func statelessEnc(dst *tokens, src []byte, startAt int16) { // Index until startAt if startAt > 0 { cv := load3232(src, 0) - for i := int16(0); i < startAt; i++ { + for i := range startAt { table[hashSL(cv)] = tableEntry{offset: i} cv = (cv >> 8) | (uint32(src[i+4]) << 24) } diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go index e82fa3bb7b6..d58b3fe4237 100644 --- a/vendor/github.com/klauspost/compress/fse/bitwriter.go +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -143,7 +143,7 @@ func (b *bitWriter) flush32() { // flushAlign will flush remaining full bytes and align to next byte boundary. func (b *bitWriter) flushAlign() { nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { + for i := range nbBytes { b.out = append(b.out, byte(b.bitContainer>>(i*8))) } b.nBits = 0 diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index 074018d8f94..8c8baa4fc2c 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -396,7 +396,7 @@ func (s *Scratch) buildCTable() error { if v > largeLimit { s.zeroBits = true } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + for range v { tableSymbol[position] = symbol position = (position + step) & tableMask for position > highThreshold { diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index 0ebc9aaac76..41db94cded0 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -85,7 +85,7 @@ func (b *bitWriter) flush32() { // flushAlign will flush remaining full bytes and align to next byte boundary. func (b *bitWriter) flushAlign() { nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { + for i := range nbBytes { b.out = append(b.out, byte(b.bitContainer>>(i*8))) } b.nBits = 0 diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 84aa3d12f00..a97cf1b5d35 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -276,7 +276,7 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) { offsetIdx := len(s.Out) s.Out = append(s.Out, sixZeros[:]...) - for i := 0; i < 4; i++ { + for i := range 4 { toDo := src if len(toDo) > segmentSize { toDo = toDo[:segmentSize] @@ -312,7 +312,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { segmentSize := (len(src) + 3) / 4 var wg sync.WaitGroup wg.Add(4) - for i := 0; i < 4; i++ { + for i := range 4 { toDo := src if len(toDo) > segmentSize { toDo = toDo[:segmentSize] @@ -326,7 +326,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { }(i) } wg.Wait() - for i := 0; i < 4; i++ { + for i := range 4 { o := s.tmpOut[i] if len(o) > math.MaxUint16 { // We cannot store the size in the jump table diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 0f56b02d747..7d0efa8818a 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -626,7 +626,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { var br [4]bitReaderBytes start := 6 - for i := 0; i < 3; i++ { + for i := range 3 { length := int(src[i*2]) | (int(src[i*2+1]) << 8) if start+length >= len(src) { return nil, errors.New("truncated input (or invalid offset)") @@ -798,10 +798,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } + endsAt := min(offset+remainBytes, len(out)) br := &br[i] bitsLeft := br.remaining() for bitsLeft > 0 { @@ -864,7 +861,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { var br [4]bitReaderBytes start := 6 - for i := 0; i < 3; i++ { + for i := range 3 { length := int(src[i*2]) | (int(src[i*2+1]) << 8) if start+length >= len(src) { return nil, errors.New("truncated input (or invalid offset)") @@ -1035,10 +1032,7 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } + endsAt := min(offset+remainBytes, len(out)) br := &br[i] bitsLeft := br.remaining() for bitsLeft > 0 { diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go index ba7e8e6b027..99ddd4af97c 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -58,7 +58,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { var br [4]bitReaderShifted // Decode "jump table" start := 6 - for i := 0; i < 3; i++ { + for i := range 3 { length := int(src[i*2]) | (int(src[i*2+1]) << 8) if start+length >= len(src) { return nil, errors.New("truncated input (or invalid offset)") @@ -109,10 +109,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } + endsAt := min(offset+remainBytes, len(out)) br := &br[i] bitsLeft := br.remaining() for bitsLeft > 0 { diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index 77ecd68e0a7..67d9e05b6cc 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -201,7 +201,7 @@ func (c cTable) write(s *Scratch) error { for i := range hist[:16] { hist[i] = 0 } - for n := uint8(0); n < maxSymbolValue; n++ { + for n := range maxSymbolValue { v := bitsToWeight[c[n].nBits] & 15 huffWeight[n] = v hist[v]++ @@ -271,7 +271,7 @@ func (c cTable) estTableSize(s *Scratch) (sz int, err error) { for i := range hist[:16] { hist[i] = 0 } - for n := uint8(0); n < maxSymbolValue; n++ { + for n := range maxSymbolValue { v := bitsToWeight[c[n].nBits] & 15 huffWeight[n] = v hist[v]++ diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go index 0cfb5c0e278..4f2a0d8c58d 100644 --- a/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go +++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go @@ -37,6 +37,6 @@ func Store32(b []byte, v uint32) { } // Store64 will store v at b. -func Store64(b []byte, v uint64) { - binary.LittleEndian.PutUint64(b, v) +func Store64[I Indexer](b []byte, i I, v uint64) { + binary.LittleEndian.PutUint64(b[i:], v) } diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go index ada45cd909e..218a38bc4a5 100644 --- a/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go +++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go @@ -38,18 +38,15 @@ func Load64[I Indexer](b []byte, i I) uint64 { // Store16 will store v at b. func Store16(b []byte, v uint16) { - //binary.LittleEndian.PutUint16(b, v) *(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v } // Store32 will store v at b. func Store32(b []byte, v uint32) { - //binary.LittleEndian.PutUint32(b, v) *(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v } -// Store64 will store v at b. -func Store64(b []byte, v uint64) { - //binary.LittleEndian.PutUint64(b, v) - *(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v +// Store64 will store v at b[i:]. +func Store64[I Indexer](b []byte, i I, v uint64) { + *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) = v } diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go index 40796a49d65..a2c82fcd226 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/decode.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -209,7 +209,7 @@ func (r *Reader) fill() error { if !r.readFull(r.buf[:len(magicBody)], false) { return r.err } - for i := 0; i < len(magicBody); i++ { + for i := range len(magicBody) { if r.buf[i] != magicBody[i] { r.err = ErrCorrupt return r.err diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go index 13c6040a5de..860a994167a 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -20,8 +20,10 @@ import ( func Encode(dst, src []byte) []byte { if n := MaxEncodedLen(len(src)); n < 0 { panic(ErrTooLarge) - } else if len(dst) < n { + } else if cap(dst) < n { dst = make([]byte, n) + } else { + dst = dst[:n] } // The block starts with the varint-encoded length of the decompressed bytes. diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go index 1952f175b0d..b22b297e62a 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -88,7 +88,7 @@ func (b *bitWriter) flush32() { // flushAlign will flush remaining full bytes and align to next byte boundary. func (b *bitWriter) flushAlign() { nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { + for i := range nbBytes { b.out = append(b.out, byte(b.bitContainer>>(i*8))) } b.nBits = 0 diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 0dd742fd2a6..2329e996f86 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -54,11 +54,11 @@ const ( ) var ( - huffDecoderPool = sync.Pool{New: func() interface{} { + huffDecoderPool = sync.Pool{New: func() any { return &huff0.Scratch{} }} - fseDecoderPool = sync.Pool{New: func() interface{} { + fseDecoderPool = sync.Pool{New: func() any { return &fseDecoder{} }} ) @@ -553,7 +553,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { if compMode&3 != 0 { return errors.New("corrupt block: reserved bits not zero") } - for i := uint(0); i < 3; i++ { + for i := range uint(3) { mode := seqCompMode((compMode >> (6 - i*2)) & 3) if debugDecoder { println("Table", tableIndex(i), "is", mode) diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index ea2a19376c1..30df5513d56 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -373,11 +373,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { if cap(dst) == 0 && !d.o.limitToCap { // Allocate len(input) * 2 by default if nothing is provided // and we didn't get frame content size. - size := len(input) * 2 - // Cap to 1 MB. - if size > 1<<20 { - size = 1 << 20 - } + size := min( + // Cap to 1 MB. + len(input)*2, 1<<20) if uint64(size) > d.o.maxDecodedSize { size = int(d.o.maxDecodedSize) } diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index b7b83164bc7..2ffbfdf379e 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -194,17 +194,17 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { hist := o.History contents := o.Contents debug := o.DebugOut != nil - println := func(args ...interface{}) { + println := func(args ...any) { if o.DebugOut != nil { fmt.Fprintln(o.DebugOut, args...) } } - printf := func(s string, args ...interface{}) { + printf := func(s string, args ...any) { if o.DebugOut != nil { fmt.Fprintf(o.DebugOut, s, args...) } } - print := func(args ...interface{}) { + print := func(args ...any) { if o.DebugOut != nil { fmt.Fprint(o.DebugOut, args...) } @@ -424,16 +424,10 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { } // Literal table - avgSize := litTotal - if avgSize > huff0.BlockSizeMax/2 { - avgSize = huff0.BlockSizeMax / 2 - } + avgSize := min(litTotal, huff0.BlockSizeMax/2) huffBuff := make([]byte, 0, avgSize) // Target size - div := litTotal / avgSize - if div < 1 { - div = 1 - } + div := max(litTotal/avgSize, 1) if debug { println("Huffman weights:") } @@ -454,7 +448,7 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { huffBuff = append(huffBuff, 255) } scratch := &huff0.Scratch{TableLog: 11} - for tries := 0; tries < 255; tries++ { + for tries := range 255 { scratch = &huff0.Scratch{TableLog: 11} _, _, err = huff0.Compress1X(huffBuff, scratch) if err == nil { @@ -471,7 +465,7 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { // Bail out.... Just generate something huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) - for i := 0; i < 128; i++ { + for i := range 128 { huffBuff = append(huffBuff, byte(i)) } continue diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index 7d250c67f59..c1192ec38f4 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -8,7 +8,7 @@ import ( ) const ( - dictShardBits = 6 + dictShardBits = 7 ) type fastBase struct { @@ -41,11 +41,9 @@ func (e *fastBase) AppendCRC(dst []byte) []byte { // or a window size small enough to contain the input size, if > 0. func (e *fastBase) WindowSize(size int64) int32 { if size > 0 && size < int64(e.maxMatchOff) { - b := int32(1) << uint(bits.Len(uint(size))) - // Keep minimum window. - if b < 1024 { - b = 1024 - } + b := max( + // Keep minimum window. + int32(1)< tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { s-- offset-- @@ -382,10 +377,7 @@ encodeLoop: nextEmit = s // Index skipped... - end := s - if s > sLimit+4 { - end = sLimit + 4 - } + end := min(s, sLimit+4) off := index0 + e.cur for index0 < end { cv0 := load6432(src, index0) @@ -444,10 +436,7 @@ encodeLoop: nextEmit = s // Index old s + 1 -> s - 1 or sLimit - end := s - if s > sLimit-4 { - end = sLimit - 4 - } + end := min(s, sLimit-4) off := index0 + e.cur for index0 < end { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 84a79fde767..85dcd28c32e 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -190,10 +190,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -252,10 +249,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -480,10 +474,7 @@ encodeLoop: l := matched // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- @@ -719,10 +710,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -783,10 +771,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -1005,10 +990,7 @@ encodeLoop: l := matched // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index d36be7bd8c2..cf8cad00dcf 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -13,7 +13,7 @@ const ( dFastLongLen = 8 // Bytes used for table hash dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table - dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + dLongTableShardSize = dFastLongTableSize / dLongTableShardCnt // Size of an individual shard dFastShortTableBits = tableBits // Bits used in the short match table dFastShortTableSize = 1 << dFastShortTableBits // Size of the table @@ -149,10 +149,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -266,10 +263,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- @@ -462,10 +456,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { repIndex-- start-- @@ -576,10 +567,7 @@ encodeLoop: l := int32(matchLen(src[s+4:], src[t+4:])) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] { s-- t-- @@ -809,10 +797,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -927,10 +912,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index f45a3da7dae..9180a3a5820 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -143,10 +143,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } + sMin := max(s-e.maxMatchOff, 0) for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { repIndex-- start-- @@ -223,10 +220,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- @@ -387,10 +381,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } + sMin := max(s-e.maxMatchOff, 0) for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { repIndex-- start-- @@ -469,10 +460,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] { s-- t-- @@ -655,10 +643,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } + sMin := max(s-e.maxMatchOff, 0) for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { repIndex-- start-- @@ -735,10 +720,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index e47af66e7c9..d88f067e5c2 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -238,10 +238,7 @@ func (d *frameDec) reset(br byteBuffer) error { if d.WindowSize == 0 && d.SingleSegment { // We may not need window in this case. - d.WindowSize = d.FrameContentSize - if d.WindowSize < MinWindowSize { - d.WindowSize = MinWindowSize - } + d.WindowSize = max(d.FrameContentSize, MinWindowSize) if d.WindowSize > d.o.maxDecodedSize { if debugDecoder { printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go index ab26326a8ff..3a0f4e7fbe6 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -149,7 +149,7 @@ func (s *fseEncoder) buildCTable() error { if v > largeLimit { s.zeroBits = true } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + for range v { tableSymbol[position] = symbol position = (position + step) & tableMask for position > highThreshold { diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index 9a7de82f9ef..0bfb0e43c7b 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -231,10 +231,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state out := s.out - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) if debugDecoder { println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index c59f17e07ad..1f8c3cec28c 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -79,10 +79,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { br := s.br - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) ctx := decodeSyncAsmContext{ llTable: s.litLengths.fse.dt[:maxTablesize], @@ -237,10 +234,7 @@ func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmC func (s *sequenceDecs) decode(seqs []seqVals) error { br := s.br - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) ctx := decodeAsmContext{ llTable: s.litLengths.fse.dt[:maxTablesize], diff --git a/vendor/github.com/klauspost/compress/zstd/simple_go124.go b/vendor/github.com/klauspost/compress/zstd/simple_go124.go new file mode 100644 index 00000000000..2efc0497bf9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/simple_go124.go @@ -0,0 +1,56 @@ +// Copyright 2025+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +//go:build go1.24 + +package zstd + +import ( + "errors" + "runtime" + "sync" + "weak" +) + +var weakMu sync.Mutex +var simpleEnc weak.Pointer[Encoder] +var simpleDec weak.Pointer[Decoder] + +// EncodeTo appends the encoded data from src to dst. +func EncodeTo(dst []byte, src []byte) []byte { + weakMu.Lock() + enc := simpleEnc.Value() + if enc == nil { + var err error + enc, err = NewWriter(nil, WithEncoderConcurrency(runtime.NumCPU()), WithWindowSize(1<<20), WithLowerEncoderMem(true), WithZeroFrames(true)) + if err != nil { + panic("failed to create simple encoder: " + err.Error()) + } + simpleEnc = weak.Make(enc) + } + weakMu.Unlock() + + return enc.EncodeAll(src, dst) +} + +// DecodeTo appends the decoded data from src to dst. +// The maximum decoded size is 1GiB, +// not including what may already be in dst. +func DecodeTo(dst []byte, src []byte) ([]byte, error) { + weakMu.Lock() + dec := simpleDec.Value() + if dec == nil { + var err error + dec, err = NewReader(nil, WithDecoderConcurrency(runtime.NumCPU()), WithDecoderLowmem(true), WithDecoderMaxMemory(1<<30)) + if err != nil { + weakMu.Unlock() + return nil, errors.New("failed to create simple decoder: " + err.Error()) + } + runtime.SetFinalizer(dec, func(d *Decoder) { + d.Close() + }) + simpleDec = weak.Make(dec) + } + weakMu.Unlock() + return dec.DecodeAll(src, dst) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go index a17381b8f89..336c2889304 100644 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -257,7 +257,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { if !r.readFull(r.buf[:len(snappyMagicBody)], false) { return written, r.err } - for i := 0; i < len(snappyMagicBody); i++ { + for i := range len(snappyMagicBody) { if r.buf[i] != snappyMagicBody[i] { println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) r.err = ErrSnappyCorrupt diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go index 29c15c8c4ef..3198d718926 100644 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -19,7 +19,7 @@ const ZipMethodWinZip = 93 const ZipMethodPKWare = 20 // zipReaderPool is the default reader pool. -var zipReaderPool = sync.Pool{New: func() interface{} { +var zipReaderPool = sync.Pool{New: func() any { z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) if err != nil { panic(err) diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 6252b46ae6f..1a869710d2c 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -98,13 +98,13 @@ var ( ErrDecoderNilInput = errors.New("nil input provided as reader") ) -func println(a ...interface{}) { +func println(a ...any) { if debug || debugDecoder || debugEncoder { log.Println(a...) } } -func printf(format string, a ...interface{}) { +func printf(format string, a ...any) { if debug || debugDecoder || debugEncoder { log.Printf(format, a...) } diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.envrc b/vendor/github.com/mistifyio/go-zfs/v4/.envrc similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/.envrc rename to vendor/github.com/mistifyio/go-zfs/v4/.envrc diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.gitignore b/vendor/github.com/mistifyio/go-zfs/v4/.gitignore similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/.gitignore rename to vendor/github.com/mistifyio/go-zfs/v4/.gitignore diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.golangci.yml b/vendor/github.com/mistifyio/go-zfs/v4/.golangci.yml similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/.golangci.yml rename to vendor/github.com/mistifyio/go-zfs/v4/.golangci.yml diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.yamllint b/vendor/github.com/mistifyio/go-zfs/v4/.yamllint similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/.yamllint rename to vendor/github.com/mistifyio/go-zfs/v4/.yamllint diff --git a/vendor/github.com/mistifyio/go-zfs/v3/CHANGELOG.md b/vendor/github.com/mistifyio/go-zfs/v4/CHANGELOG.md similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/CHANGELOG.md rename to vendor/github.com/mistifyio/go-zfs/v4/CHANGELOG.md diff --git a/vendor/github.com/mistifyio/go-zfs/v3/CONTRIBUTING.md b/vendor/github.com/mistifyio/go-zfs/v4/CONTRIBUTING.md similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/CONTRIBUTING.md rename to vendor/github.com/mistifyio/go-zfs/v4/CONTRIBUTING.md diff --git a/vendor/github.com/mistifyio/go-zfs/v3/LICENSE b/vendor/github.com/mistifyio/go-zfs/v4/LICENSE similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/LICENSE rename to vendor/github.com/mistifyio/go-zfs/v4/LICENSE diff --git a/vendor/github.com/mistifyio/go-zfs/v3/Makefile b/vendor/github.com/mistifyio/go-zfs/v4/Makefile similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/Makefile rename to vendor/github.com/mistifyio/go-zfs/v4/Makefile diff --git a/vendor/github.com/mistifyio/go-zfs/v3/README.md b/vendor/github.com/mistifyio/go-zfs/v4/README.md similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/README.md rename to vendor/github.com/mistifyio/go-zfs/v4/README.md diff --git a/vendor/github.com/mistifyio/go-zfs/v3/Vagrantfile b/vendor/github.com/mistifyio/go-zfs/v4/Vagrantfile similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/Vagrantfile rename to vendor/github.com/mistifyio/go-zfs/v4/Vagrantfile diff --git a/vendor/github.com/mistifyio/go-zfs/v3/error.go b/vendor/github.com/mistifyio/go-zfs/v4/error.go similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/error.go rename to vendor/github.com/mistifyio/go-zfs/v4/error.go diff --git a/vendor/github.com/mistifyio/go-zfs/v3/lint.mk b/vendor/github.com/mistifyio/go-zfs/v4/lint.mk similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/lint.mk rename to vendor/github.com/mistifyio/go-zfs/v4/lint.mk diff --git a/vendor/github.com/mistifyio/go-zfs/v3/rules.mk b/vendor/github.com/mistifyio/go-zfs/v4/rules.mk similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/rules.mk rename to vendor/github.com/mistifyio/go-zfs/v4/rules.mk diff --git a/vendor/github.com/mistifyio/go-zfs/v3/shell.nix b/vendor/github.com/mistifyio/go-zfs/v4/shell.nix similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/shell.nix rename to vendor/github.com/mistifyio/go-zfs/v4/shell.nix diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils.go b/vendor/github.com/mistifyio/go-zfs/v4/utils.go similarity index 88% rename from vendor/github.com/mistifyio/go-zfs/v3/utils.go rename to vendor/github.com/mistifyio/go-zfs/v4/utils.go index b69942b530f..8e49be39ff6 100644 --- a/vendor/github.com/mistifyio/go-zfs/v3/utils.go +++ b/vendor/github.com/mistifyio/go-zfs/v4/utils.go @@ -2,6 +2,7 @@ package zfs import ( "bytes" + "context" "errors" "fmt" "io" @@ -10,10 +11,37 @@ import ( "runtime" "strconv" "strings" + "sync/atomic" + "syscall" + "time" "github.com/google/uuid" ) +// Runner specifies the parameters used when executing ZFS commands. +type Runner struct { + // Timeout specifies how long to wait before sending a SIGTERM signal to the running process. + Timeout time.Duration + + // Grace specifies the time waited after signaling the running process with SIGTERM before it is forcefully + // killed with SIGKILL. + Grace time.Duration +} + +var defaultRunner atomic.Value + +func init() { + defaultRunner.Store(&Runner{}) +} + +func Default() *Runner { + return defaultRunner.Load().(*Runner) //nolint: forcetypeassert // Impossible for it to be anything else. +} + +func SetRunner(runner *Runner) { + defaultRunner.Store(runner) +} + type command struct { Command string Stdin io.Reader @@ -21,7 +49,19 @@ type command struct { } func (c *command) Run(arg ...string) ([][]string, error) { - cmd := exec.Command(c.Command, arg...) + var cmd *exec.Cmd + if Default().Timeout == 0 { + cmd = exec.Command(c.Command, arg...) + } else { + ctx, cancel := context.WithTimeout(context.Background(), Default().Timeout) + defer cancel() + + cmd = exec.CommandContext(ctx, c.Command, arg...) + cmd.Cancel = func() error { + return cmd.Process.Signal(syscall.SIGTERM) + } + cmd.WaitDelay = Default().Grace + } var stdout, stderr bytes.Buffer diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go b/vendor/github.com/mistifyio/go-zfs/v4/utils_notsolaris.go similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go rename to vendor/github.com/mistifyio/go-zfs/v4/utils_notsolaris.go diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go b/vendor/github.com/mistifyio/go-zfs/v4/utils_solaris.go similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go rename to vendor/github.com/mistifyio/go-zfs/v4/utils_solaris.go diff --git a/vendor/github.com/mistifyio/go-zfs/v3/zfs.go b/vendor/github.com/mistifyio/go-zfs/v4/zfs.go similarity index 99% rename from vendor/github.com/mistifyio/go-zfs/v3/zfs.go rename to vendor/github.com/mistifyio/go-zfs/v4/zfs.go index 1166bdc212f..dbcbc8ae307 100644 --- a/vendor/github.com/mistifyio/go-zfs/v3/zfs.go +++ b/vendor/github.com/mistifyio/go-zfs/v4/zfs.go @@ -312,7 +312,7 @@ func (d *Dataset) SetProperty(key, val string) error { // A full list of available ZFS properties may be found in the ZFS manual: // https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html. func (d *Dataset) GetProperty(key string) (string, error) { - out, err := zfsOutput("get", "-H", key, d.Name) + out, err := zfsOutput("get", "-Hp", key, d.Name) if err != nil { return "", err } diff --git a/vendor/github.com/mistifyio/go-zfs/v3/zpool.go b/vendor/github.com/mistifyio/go-zfs/v4/zpool.go similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/zpool.go rename to vendor/github.com/mistifyio/go-zfs/v4/zpool.go diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/common.go b/vendor/github.com/vbatts/tar-split/archive/tar/common.go index dee9e47e4ae..e687a08c966 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/common.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/common.go @@ -34,6 +34,7 @@ var ( errMissData = errors.New("archive/tar: sparse file references non-existent data") errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data") errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole") + errSparseTooLong = errors.New("archive/tar: sparse map too long") ) type headerError []string diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go index 248a7ccb15a..a645c41605f 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go @@ -581,12 +581,17 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { cntNewline int64 buf bytes.Buffer blk block + totalSize int ) // feedTokens copies data in blocks from r into buf until there are // at least cnt newlines in buf. It will not read more blocks than needed. feedTokens := func(n int64) error { for cntNewline < n { + totalSize += len(blk) + if totalSize > maxSpecialFileSize { + return errSparseTooLong + } if _, err := mustReadFull(r, blk[:]); err != nil { return err } @@ -619,8 +624,8 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { } // Parse for all member entries. - // numEntries is trusted after this since a potential attacker must have - // committed resources proportional to what this library used. + // numEntries is trusted after this since feedTokens limits the number of + // tokens based on maxSpecialFileSize. if err := feedTokens(2 * numEntries); err != nil { return nil, err } diff --git a/vendor/go.podman.io/common/pkg/config/config.go b/vendor/go.podman.io/common/pkg/config/config.go index 2a11ccb0054..8bc23deba1b 100644 --- a/vendor/go.podman.io/common/pkg/config/config.go +++ b/vendor/go.podman.io/common/pkg/config/config.go @@ -707,6 +707,13 @@ type Destination struct { // Identity file with ssh key, optional Identity string `json:",omitempty" toml:"identity,omitempty"` + // Path to TLS client certificate PEM file, optional + TLSCert string `json:",omitempty" toml:"tls_cert,omitempty"` + // Path to TLS client certificate private key PEM file, optional + TLSKey string `json:",omitempty" toml:"tls_key,omitempty"` + // Path to TLS certificate authority PEM file, optional + TLSCA string `json:",omitempty" toml:"tls_ca,omitempty"` + // isMachine describes if the remote destination is a machine. IsMachine bool `json:",omitempty" toml:"is_machine,omitempty"` } diff --git a/vendor/go.podman.io/common/pkg/config/containers.conf b/vendor/go.podman.io/common/pkg/config/containers.conf index 57a9fcfa4c4..2e392d048e1 100644 --- a/vendor/go.podman.io/common/pkg/config/containers.conf +++ b/vendor/go.podman.io/common/pkg/config/containers.conf @@ -779,10 +779,17 @@ default_sysctls = [ # rootful "unix:///run/podman/podman.sock (Default) # remote rootless ssh://engineering.lab.company.com/run/user/1000/podman/podman.sock # remote rootful ssh://root@10.10.1.136:22/run/podman/podman.sock +# tcp/tls remote tcp://10.10.1.136:9443 # # uri = "ssh://user@production.example.com/run/user/1001/podman/podman.sock" # Path to file containing ssh identity key # identity = "~/.ssh/id_rsa" +# Path to PEM file containing TLS client certificate +# tls_cert = "/path/to/certs/podman/tls.crt" +# Path to PEM file containing TLS client certificate private key +# tls_key = "/path/to/certs/podman/tls.key" +# Path to PEM file containing TLS certificate authority (CA) bundle +# tls_ca = "/path/to/certs/podman/ca.crt" # Directory for temporary files. Must be tmpfs (wiped after reboot) # diff --git a/vendor/go.podman.io/common/pkg/config/containers.conf-freebsd b/vendor/go.podman.io/common/pkg/config/containers.conf-freebsd index b57160b1097..bd999c339c9 100644 --- a/vendor/go.podman.io/common/pkg/config/containers.conf-freebsd +++ b/vendor/go.podman.io/common/pkg/config/containers.conf-freebsd @@ -598,10 +598,17 @@ default_sysctls = [ # rootful "unix:///run/podman/podman.sock (Default) # remote rootless ssh://engineering.lab.company.com/run/user/1000/podman/podman.sock # remote rootful ssh://root@10.10.1.136:22/run/podman/podman.sock +# tcp/tls remote tcp://10.10.1.136:9443 # # uri = "ssh://user@production.example.com/run/user/1001/podman/podman.sock" # Path to file containing ssh identity key # identity = "~/.ssh/id_rsa" +# Path to PEM file containing TLS client certificate +# tls_cert = "/path/to/certs/podman/tls.crt" +# Path to PEM file containing TLS client certificate private key +# tls_key = "/path/to/certs/podman/tls.key" +# Path to PEM file containing TLS certificate authority (CA) bundle +# tls_ca = "/path/to/certs/podman/ca.crt" # Directory for temporary files. Must be tmpfs (wiped after reboot) # diff --git a/vendor/go.podman.io/common/version/version.go b/vendor/go.podman.io/common/version/version.go index bdfd10c645a..4a2be1fe1e2 100644 --- a/vendor/go.podman.io/common/version/version.go +++ b/vendor/go.podman.io/common/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "0.66.0-dev" +const Version = "0.67.0-dev" diff --git a/vendor/go.podman.io/image/v5/version/version.go b/vendor/go.podman.io/image/v5/version/version.go index ac62a17cec9..e2dd251d2fa 100644 --- a/vendor/go.podman.io/image/v5/version/version.go +++ b/vendor/go.podman.io/image/v5/version/version.go @@ -6,7 +6,7 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 38 + VersionMinor = 39 // VersionPatch is for backwards-compatible bug fixes VersionPatch = 0 diff --git a/vendor/go.podman.io/storage/VERSION b/vendor/go.podman.io/storage/VERSION index 8a37c6cd9c5..bb55a51f64d 100644 --- a/vendor/go.podman.io/storage/VERSION +++ b/vendor/go.podman.io/storage/VERSION @@ -1 +1 @@ -1.61.0-dev +1.62.0-dev diff --git a/vendor/go.podman.io/storage/drivers/fsdiff.go b/vendor/go.podman.io/storage/drivers/fsdiff.go index 37684466d21..77f98d49da1 100644 --- a/vendor/go.podman.io/storage/drivers/fsdiff.go +++ b/vendor/go.podman.io/storage/drivers/fsdiff.go @@ -151,7 +151,7 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p // ApplyDiff extracts the changeset from the given diff into the // layer with the specified id and parent, returning the size of the // new layer in bytes. -func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error) { +func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts) (int64, error) { driver := gdw.ProtoDriver if options.Mappings == nil { @@ -164,7 +164,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts) } layerFs, err := driver.Get(id, mountOpts) if err != nil { - return + return -1, err } defer driverPut(driver, id, &err) @@ -185,19 +185,20 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts) } start := time.Now().UTC() logrus.Debug("Start untar layer") - if size, err = ApplyUncompressedLayer(layerFs, options.Diff, tarOptions); err != nil { + size, err := ApplyUncompressedLayer(layerFs, options.Diff, tarOptions) + if err != nil { logrus.Errorf("While applying layer: %s", err) - return + return -1, err } logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) - return + return size, nil } // DiffSize calculates the changes between the specified layer // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. -func (gdw *NaiveDiffDriver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { +func (gdw *NaiveDiffDriver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (int64, error) { driver := gdw.ProtoDriver if idMappings == nil { @@ -209,7 +210,7 @@ func (gdw *NaiveDiffDriver) DiffSize(id string, idMappings *idtools.IDMappings, changes, err := gdw.Changes(id, idMappings, parent, parentMappings, mountLabel) if err != nil { - return + return 0, err } options := MountOpts{ @@ -217,7 +218,7 @@ func (gdw *NaiveDiffDriver) DiffSize(id string, idMappings *idtools.IDMappings, } layerFs, err := driver.Get(id, options) if err != nil { - return + return 0, err } defer driverPut(driver, id, &err) diff --git a/vendor/go.podman.io/storage/drivers/zfs/zfs.go b/vendor/go.podman.io/storage/drivers/zfs/zfs.go index 8660dbaa3e8..b994278bb2f 100644 --- a/vendor/go.podman.io/storage/drivers/zfs/zfs.go +++ b/vendor/go.podman.io/storage/drivers/zfs/zfs.go @@ -12,7 +12,7 @@ import ( "sync" "time" - zfs "github.com/mistifyio/go-zfs/v3" + zfs "github.com/mistifyio/go-zfs/v4" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" graphdriver "go.podman.io/storage/drivers" diff --git a/vendor/go.podman.io/storage/internal/tempdir/tempdir.go b/vendor/go.podman.io/storage/internal/tempdir/tempdir.go index 666742c5a06..6522c45d18a 100644 --- a/vendor/go.podman.io/storage/internal/tempdir/tempdir.go +++ b/vendor/go.podman.io/storage/internal/tempdir/tempdir.go @@ -10,6 +10,7 @@ import ( "github.com/sirupsen/logrus" "go.podman.io/storage/internal/staging_lockfile" + "go.podman.io/storage/pkg/system" ) /* @@ -148,7 +149,7 @@ func RecoverStaleDirs(rootDir string) error { continue } - if rmErr := os.RemoveAll(tempDirPath); rmErr != nil { + if rmErr := system.EnsureRemoveAll(tempDirPath); rmErr != nil { recoveryErrors = append(recoveryErrors, fmt.Errorf("error removing stale temp dir: %w", rmErr)) } if unlockErr := instanceLock.UnlockAndDelete(); unlockErr != nil { @@ -218,7 +219,7 @@ func (td *TempDir) Cleanup() error { return nil } - if err := os.RemoveAll(td.tempDirPath); err != nil { + if err := system.EnsureRemoveAll(td.tempDirPath); err != nil { return fmt.Errorf("removing temp dir failed: %w", err) } diff --git a/vendor/go.podman.io/storage/layers.go b/vendor/go.podman.io/storage/layers.go index c6752927e3c..64d3f5c72cc 100644 --- a/vendor/go.podman.io/storage/layers.go +++ b/vendor/go.podman.io/storage/layers.go @@ -2694,5 +2694,5 @@ func closeAll(closes ...func() error) (rErr error) { rErr = fmt.Errorf("%v: %w", err, rErr) } } - return + return rErr } diff --git a/vendor/go.podman.io/storage/pkg/archive/archive.go b/vendor/go.podman.io/storage/pkg/archive/archive.go index 5f8647af7c2..5cdd7513075 100644 --- a/vendor/go.podman.io/storage/pkg/archive/archive.go +++ b/vendor/go.podman.io/storage/pkg/archive/archive.go @@ -417,9 +417,7 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro return nil, fmt.Errorf("tar: cannot canonicalize path: %w", err) } hdr.Name = name - if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { - return nil, err - } + setHeaderForSpecialDevice(hdr, name, fi.Sys()) return hdr, nil } diff --git a/vendor/go.podman.io/storage/pkg/archive/archive_linux.go b/vendor/go.podman.io/storage/pkg/archive/archive_linux.go index fd7123babc9..4613ee32f72 100644 --- a/vendor/go.podman.io/storage/pkg/archive/archive_linux.go +++ b/vendor/go.podman.io/storage/pkg/archive/archive_linux.go @@ -30,7 +30,7 @@ type overlayWhiteoutConverter struct { rolayers []string } -func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { +func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (*tar.Header, error) { // convert whiteouts to AUFS format if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { // we just rename the file and make it normal @@ -73,7 +73,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi // add a whiteout for this item in this layer. // create a header for the whiteout file // it should inherit some properties from the parent, but be a regular file - wo = &tar.Header{ + wo := &tar.Header{ Typeflag: tar.TypeReg, Mode: hdr.Mode & int64(os.ModePerm), Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), @@ -85,7 +85,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi AccessTime: hdr.AccessTime, ChangeTime: hdr.ChangeTime, } - break + return wo, nil } for dir := filepath.Dir(hdr.Name); dir != "" && dir != "." && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { // Check for whiteout for a parent directory in a parent layer. @@ -109,7 +109,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi } } - return + return nil, nil } func (overlayWhiteoutConverter) ConvertReadWithHandler(hdr *tar.Header, path string, handler TarWhiteoutHandler) (bool, error) { diff --git a/vendor/go.podman.io/storage/pkg/archive/archive_unix.go b/vendor/go.podman.io/storage/pkg/archive/archive_unix.go index 0ff57165846..b09b0644353 100644 --- a/vendor/go.podman.io/storage/pkg/archive/archive_unix.go +++ b/vendor/go.podman.io/storage/pkg/archive/archive_unix.go @@ -69,7 +69,7 @@ func chmodTarEntry(perm os.FileMode) os.FileMode { return perm // noop for unix as golang APIs provide perm bits correctly } -func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat any) (err error) { +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat any) { s, ok := stat.(*syscall.Stat_t) if ok { @@ -82,8 +82,6 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat any) (err erro hdr.Devminor = int64(minor(uint64(s.Rdev))) //nolint: unconvert,nolintlint } } - - return } func getInodeFromStat(stat any) (inode uint64) { @@ -93,7 +91,7 @@ func getInodeFromStat(stat any) (inode uint64) { inode = s.Ino } - return + return inode } func getFileUIDGID(stat any) (idtools.IDPair, error) { diff --git a/vendor/go.podman.io/storage/pkg/archive/archive_windows.go b/vendor/go.podman.io/storage/pkg/archive/archive_windows.go index 1183f4a282e..2c84e9ea541 100644 --- a/vendor/go.podman.io/storage/pkg/archive/archive_windows.go +++ b/vendor/go.podman.io/storage/pkg/archive/archive_windows.go @@ -52,14 +52,13 @@ func chmodTarEntry(perm os.FileMode) os.FileMode { return noPermPart | permPart } -func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) { // do nothing. no notion of Rdev, Nlink in stat on Windows - return } -func getInodeFromStat(stat interface{}) (inode uint64) { +func getInodeFromStat(stat interface{}) uint64 { // do nothing. no notion of Inode in stat on Windows - return + return 0 } // handleTarTypeBlockCharFifo is an OS-specific helper function used by diff --git a/vendor/go.podman.io/storage/pkg/archive/changes.go b/vendor/go.podman.io/storage/pkg/archive/changes.go index e9e35198198..a60c20dd361 100644 --- a/vendor/go.podman.io/storage/pkg/archive/changes.go +++ b/vendor/go.podman.io/storage/pkg/archive/changes.go @@ -86,12 +86,12 @@ func Changes(layers []string, rw string) ([]Change, error) { return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip, aufsWhiteoutPresent) } -func aufsMetadataSkip(path string) (skip bool, err error) { - skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) +func aufsMetadataSkip(path string) (bool, error) { + skip, err := filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) if err != nil { skip = true } - return + return skip, err } func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { diff --git a/vendor/go.podman.io/storage/pkg/archive/changes_windows.go b/vendor/go.podman.io/storage/pkg/archive/changes_windows.go index 947ec2d224f..997ee574e2e 100644 --- a/vendor/go.podman.io/storage/pkg/archive/changes_windows.go +++ b/vendor/go.podman.io/storage/pkg/archive/changes_windows.go @@ -20,8 +20,8 @@ func (info *FileInfo) isDir() bool { return info.parent == nil || info.stat.Mode().IsDir() } -func getIno(fi os.FileInfo) (inode uint64) { - return +func getIno(fi os.FileInfo) uint64 { + return 0 } func hasHardlinks(fi os.FileInfo) bool { diff --git a/vendor/go.podman.io/storage/pkg/archive/copy.go b/vendor/go.podman.io/storage/pkg/archive/copy.go index 308f132d586..3f91c72b991 100644 --- a/vendor/go.podman.io/storage/pkg/archive/copy.go +++ b/vendor/go.podman.io/storage/pkg/archive/copy.go @@ -93,13 +93,13 @@ func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { // TarResourceRebase is like TarResource but renames the first path element of // items in the resulting tar archive to match the given rebaseName if not "". -func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { +func TarResourceRebase(sourcePath, rebaseName string) (io.ReadCloser, error) { sourcePath = normalizePath(sourcePath) - if err = fileutils.Lexists(sourcePath); err != nil { + if err := fileutils.Lexists(sourcePath); err != nil { // Catches the case where the source does not exist or is not a // directory if asserted to be a directory, as this also causes an // error. - return + return nil, err } // Separate the source path between its directory and @@ -411,7 +411,7 @@ func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseNa if followLink { resolvedPath, err = filepath.EvalSymlinks(path) if err != nil { - return + return "", "", err } resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) @@ -422,7 +422,7 @@ func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseNa var resolvedDirPath string resolvedDirPath, err = filepath.EvalSymlinks(dirPath) if err != nil { - return + return "", "", err } // resolvedDirPath will have been cleaned (no trailing path separators) so // we can manually join it with the base path element. diff --git a/vendor/go.podman.io/storage/pkg/archive/time_linux.go b/vendor/go.podman.io/storage/pkg/archive/time_linux.go index 3448569b1eb..a5233c09f60 100644 --- a/vendor/go.podman.io/storage/pkg/archive/time_linux.go +++ b/vendor/go.podman.io/storage/pkg/archive/time_linux.go @@ -10,7 +10,7 @@ func timeToTimespec(time time.Time) (ts syscall.Timespec) { // Return UTIME_OMIT special value ts.Sec = 0 ts.Nsec = ((1 << 30) - 2) - return + return ts } return syscall.NsecToTimespec(time.UnixNano()) } diff --git a/vendor/go.podman.io/storage/pkg/archive/wrap.go b/vendor/go.podman.io/storage/pkg/archive/wrap.go index 903befd7630..f8a97254eed 100644 --- a/vendor/go.podman.io/storage/pkg/archive/wrap.go +++ b/vendor/go.podman.io/storage/pkg/archive/wrap.go @@ -45,8 +45,8 @@ func Generate(input ...string) (io.Reader, error) { return buf, nil } -func parseStringPairs(input ...string) (output [][2]string) { - output = make([][2]string, 0, len(input)/2+1) +func parseStringPairs(input ...string) [][2]string { + output := make([][2]string, 0, len(input)/2+1) for i := 0; i < len(input); i += 2 { var pair [2]string pair[0] = input[i] @@ -55,5 +55,5 @@ func parseStringPairs(input ...string) (output [][2]string) { } output = append(output, pair) } - return + return output } diff --git a/vendor/go.podman.io/storage/pkg/chunked/filesystem_linux.go b/vendor/go.podman.io/storage/pkg/chunked/filesystem_linux.go index 3f8311bffc0..ceba7d0f3d9 100644 --- a/vendor/go.podman.io/storage/pkg/chunked/filesystem_linux.go +++ b/vendor/go.podman.io/storage/pkg/chunked/filesystem_linux.go @@ -150,7 +150,7 @@ func timeToTimespec(time *time.Time) (ts unix.Timespec) { // Return UTIME_OMIT special value ts.Sec = 0 ts.Nsec = ((1 << 30) - 2) - return + return ts } return unix.NsecToTimespec(time.UnixNano()) } diff --git a/vendor/go.podman.io/storage/pkg/chunked/storage_linux.go b/vendor/go.podman.io/storage/pkg/chunked/storage_linux.go index e6e3c9c6d14..23baef9a458 100644 --- a/vendor/go.podman.io/storage/pkg/chunked/storage_linux.go +++ b/vendor/go.podman.io/storage/pkg/chunked/storage_linux.go @@ -1277,7 +1277,7 @@ func ensureAllBlobsDone(streamsOrErrors chan streamOrErr) (retErr error) { retErr = soe.err } } - return + return retErr } // getBlobAtConverterGoroutine reads from the streams and errs channels, then sends diff --git a/vendor/go.podman.io/storage/pkg/directory/directory_unix.go b/vendor/go.podman.io/storage/pkg/directory/directory_unix.go index 9855abd13e9..8f0a373911b 100644 --- a/vendor/go.podman.io/storage/pkg/directory/directory_unix.go +++ b/vendor/go.podman.io/storage/pkg/directory/directory_unix.go @@ -19,10 +19,10 @@ func Size(dir string) (size int64, err error) { } // Usage walks a directory tree and returns its total size in bytes and the number of inodes. -func Usage(dir string) (usage *DiskUsage, err error) { - usage = &DiskUsage{} +func Usage(dir string) (*DiskUsage, error) { + usage := &DiskUsage{} data := make(map[uint64]struct{}) - err = filepath.WalkDir(dir, func(d string, entry fs.DirEntry, err error) error { + err := filepath.WalkDir(dir, func(d string, entry fs.DirEntry, err error) error { if err != nil { // if dir does not exist, Usage() returns the error. // if dir/x disappeared while walking, Usage() ignores dir/x. @@ -58,5 +58,5 @@ func Usage(dir string) (usage *DiskUsage, err error) { }) // inode count is the number of unique inode numbers we saw usage.InodeCount = int64(len(data)) - return + return usage, err } diff --git a/vendor/go.podman.io/storage/pkg/directory/directory_windows.go b/vendor/go.podman.io/storage/pkg/directory/directory_windows.go index c2145c26fc8..6acedcc8ced 100644 --- a/vendor/go.podman.io/storage/pkg/directory/directory_windows.go +++ b/vendor/go.podman.io/storage/pkg/directory/directory_windows.go @@ -18,9 +18,9 @@ func Size(dir string) (size int64, err error) { } // Usage walks a directory tree and returns its total size in bytes and the number of inodes. -func Usage(dir string) (usage *DiskUsage, err error) { - usage = &DiskUsage{} - err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { +func Usage(dir string) (*DiskUsage, error) { + usage := &DiskUsage{} + err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { if err != nil { // if dir does not exist, Size() returns the error. // if dir/x disappeared while walking, Size() ignores dir/x. @@ -48,5 +48,5 @@ func Usage(dir string) (usage *DiskUsage, err error) { return nil }) - return + return usage, err } diff --git a/vendor/go.podman.io/storage/pkg/ioutils/bytespipe.go b/vendor/go.podman.io/storage/pkg/ioutils/bytespipe.go index cf605803598..47ab3450728 100644 --- a/vendor/go.podman.io/storage/pkg/ioutils/bytespipe.go +++ b/vendor/go.podman.io/storage/pkg/ioutils/bytespipe.go @@ -121,7 +121,8 @@ func (bp *BytesPipe) Close() error { // Read reads bytes from BytesPipe. // Data could be read only once. -func (bp *BytesPipe) Read(p []byte) (n int, err error) { +func (bp *BytesPipe) Read(p []byte) (int, error) { + var n int bp.mu.Lock() if bp.bufLen == 0 { if bp.closeErr != nil { @@ -158,7 +159,7 @@ func (bp *BytesPipe) Read(p []byte) (n int, err error) { bp.wait.Broadcast() bp.mu.Unlock() - return + return n, nil } func returnBuffer(b *fixedBuffer) { diff --git a/vendor/go.podman.io/storage/pkg/ioutils/readers.go b/vendor/go.podman.io/storage/pkg/ioutils/readers.go index 146e1a5ff05..aed1cb0331f 100644 --- a/vendor/go.podman.io/storage/pkg/ioutils/readers.go +++ b/vendor/go.podman.io/storage/pkg/ioutils/readers.go @@ -83,7 +83,7 @@ func (r *OnEOFReader) Read(p []byte) (n int, err error) { if err == io.EOF { r.runFunc() } - return + return n, err } // Close closes the file and run the function. diff --git a/vendor/go.podman.io/storage/pkg/ioutils/writers.go b/vendor/go.podman.io/storage/pkg/ioutils/writers.go index 0b6d0a7a6de..2a8007e4461 100644 --- a/vendor/go.podman.io/storage/pkg/ioutils/writers.go +++ b/vendor/go.podman.io/storage/pkg/ioutils/writers.go @@ -59,8 +59,8 @@ func NewWriteCounter(w io.Writer) *WriteCounter { } } -func (wc *WriteCounter) Write(p []byte) (count int, err error) { - count, err = wc.Writer.Write(p) +func (wc *WriteCounter) Write(p []byte) (int, error) { + count, err := wc.Writer.Write(p) wc.Count += int64(count) - return + return count, err } diff --git a/vendor/go.podman.io/storage/pkg/pools/pools.go b/vendor/go.podman.io/storage/pkg/pools/pools.go index 78b729c2e37..1179d9b9336 100644 --- a/vendor/go.podman.io/storage/pkg/pools/pools.go +++ b/vendor/go.podman.io/storage/pkg/pools/pools.go @@ -59,11 +59,11 @@ func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { } // Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. -func Copy(dst io.Writer, src io.Reader) (written int64, err error) { +func Copy(dst io.Writer, src io.Reader) (int64, error) { buf := BufioReader32KPool.Get(src) - written, err = io.Copy(dst, buf) + written, err := io.Copy(dst, buf) BufioReader32KPool.Put(buf) - return + return written, err } // NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back diff --git a/vendor/go.podman.io/storage/pkg/system/exitcode.go b/vendor/go.podman.io/storage/pkg/system/exitcode.go index 60f0514b1dd..4d7b5c88058 100644 --- a/vendor/go.podman.io/storage/pkg/system/exitcode.go +++ b/vendor/go.podman.io/storage/pkg/system/exitcode.go @@ -29,5 +29,5 @@ func ProcessExitCode(err error) (exitCode int) { exitCode = 127 } } - return + return exitCode } diff --git a/vendor/go.podman.io/storage/pkg/system/stat_netbsd.go b/vendor/go.podman.io/storage/pkg/system/stat_netbsd.go index 715f05b9387..57850a883fe 100644 --- a/vendor/go.podman.io/storage/pkg/system/stat_netbsd.go +++ b/vendor/go.podman.io/storage/pkg/system/stat_netbsd.go @@ -4,10 +4,12 @@ import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + return &StatT{ + size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil + mtim: s.Mtimespec, + }, nil } diff --git a/vendor/go.podman.io/storage/pkg/system/xattrs_freebsd.go b/vendor/go.podman.io/storage/pkg/system/xattrs_freebsd.go index 5d653976e54..f62f5f74549 100644 --- a/vendor/go.podman.io/storage/pkg/system/xattrs_freebsd.go +++ b/vendor/go.podman.io/storage/pkg/system/xattrs_freebsd.go @@ -17,12 +17,10 @@ const ( EOVERFLOW unix.Errno = unix.EOVERFLOW ) -var ( - namespaceMap = map[string]int{ - "user": EXTATTR_NAMESPACE_USER, - "system": EXTATTR_NAMESPACE_SYSTEM, - } -) +var namespaceMap = map[string]int{ + "user": EXTATTR_NAMESPACE_USER, + "system": EXTATTR_NAMESPACE_SYSTEM, +} func xattrToExtattr(xattr string) (namespace int, extattr string, err error) { namespaceName, extattr, found := strings.Cut(xattr, ".") diff --git a/vendor/go.podman.io/storage/pkg/truncindex/truncindex.go b/vendor/go.podman.io/storage/pkg/truncindex/truncindex.go index c14a5cc4d2a..9b60338aa90 100644 --- a/vendor/go.podman.io/storage/pkg/truncindex/truncindex.go +++ b/vendor/go.podman.io/storage/pkg/truncindex/truncindex.go @@ -43,8 +43,8 @@ type TruncIndex struct { // NewTruncIndex creates a new TruncIndex and initializes with a list of IDs. // Invalid IDs are _silently_ ignored. -func NewTruncIndex(ids []string) (idx *TruncIndex) { - idx = &TruncIndex{ +func NewTruncIndex(ids []string) *TruncIndex { + idx := &TruncIndex{ ids: make(map[string]struct{}), // Change patricia max prefix per node length, @@ -54,7 +54,7 @@ func NewTruncIndex(ids []string) (idx *TruncIndex) { for _, id := range ids { _ = idx.addID(id) // Ignore invalid IDs. Duplicate IDs are not a problem. } - return + return idx } func (idx *TruncIndex) addID(id string) error { diff --git a/vendor/modules.txt b/vendor/modules.txt index 1fc6089d29c..3c97621f776 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -41,8 +41,8 @@ github.com/containerd/log # github.com/containerd/platforms v1.0.0-rc.1 ## explicit; go 1.20 github.com/containerd/platforms -# github.com/containerd/stargz-snapshotter/estargz v0.17.0 -## explicit; go 1.23.0 +# github.com/containerd/stargz-snapshotter/estargz v0.18.0 +## explicit; go 1.24.0 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil # github.com/containerd/typeurl/v2 v2.2.3 @@ -136,7 +136,7 @@ github.com/docker/docker/client github.com/docker/docker/pkg/homedir github.com/docker/docker/pkg/jsonmessage github.com/docker/docker/pkg/stdcopy -# github.com/docker/docker-credential-helpers v0.9.3 +# github.com/docker/docker-credential-helpers v0.9.4 ## explicit; go 1.21 github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials @@ -208,8 +208,8 @@ github.com/jinzhu/copier # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.18.0 -## explicit; go 1.22 +# github.com/klauspost/compress v1.18.1 +## explicit; go 1.23 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse @@ -247,9 +247,9 @@ github.com/mattn/go-sqlite3 # github.com/miekg/pkcs11 v1.1.1 ## explicit; go 1.12 github.com/miekg/pkcs11 -# github.com/mistifyio/go-zfs/v3 v3.0.1 +# github.com/mistifyio/go-zfs/v4 v4.0.0 ## explicit; go 1.14 -github.com/mistifyio/go-zfs/v3 +github.com/mistifyio/go-zfs/v4 # github.com/moby/buildkit v0.25.1 ## explicit; go 1.24.0 github.com/moby/buildkit/frontend/dockerfile/command @@ -407,7 +407,7 @@ github.com/ulikunitz/xz github.com/ulikunitz/xz/internal/hash github.com/ulikunitz/xz/internal/xlog github.com/ulikunitz/xz/lzma -# github.com/vbatts/tar-split v0.12.1 +# github.com/vbatts/tar-split v0.12.2 ## explicit; go 1.17 github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm @@ -464,7 +464,7 @@ go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/internal/telemetry go.opentelemetry.io/otel/trace/noop -# go.podman.io/common v0.65.1-0.20250916163606-92222dcd3da4 +# go.podman.io/common v0.65.1-0.20250916163606-92222dcd3da4 => github.com/ninja-quokka/container-libs/common v0.66.1-0.20251021200235-096ee6bbf8e8 ## explicit; go 1.24.2 go.podman.io/common/internal go.podman.io/common/internal/attributedstring @@ -518,7 +518,7 @@ go.podman.io/common/pkg/umask go.podman.io/common/pkg/util go.podman.io/common/pkg/version go.podman.io/common/version -# go.podman.io/image/v5 v5.37.1-0.20250916163606-92222dcd3da4 +# go.podman.io/image/v5 v5.38.0 => github.com/ninja-quokka/container-libs/image/v5 v5.38.1-0.20251021200235-096ee6bbf8e8 ## explicit; go 1.24.0 go.podman.io/image/v5/copy go.podman.io/image/v5/directory @@ -586,7 +586,7 @@ go.podman.io/image/v5/transports go.podman.io/image/v5/transports/alltransports go.podman.io/image/v5/types go.podman.io/image/v5/version -# go.podman.io/storage v1.60.1-0.20250916163606-92222dcd3da4 +# go.podman.io/storage v1.61.0 => github.com/ninja-quokka/container-libs/storage v1.61.1-0.20251021200235-096ee6bbf8e8 ## explicit; go 1.24.0 go.podman.io/storage go.podman.io/storage/drivers @@ -826,3 +826,6 @@ tags.cncf.io/container-device-interface/pkg/parser # tags.cncf.io/container-device-interface/specs-go v1.0.0 ## explicit; go 1.19 tags.cncf.io/container-device-interface/specs-go +# go.podman.io/common => github.com/ninja-quokka/container-libs/common v0.66.1-0.20251021200235-096ee6bbf8e8 +# go.podman.io/storage => github.com/ninja-quokka/container-libs/storage v1.61.1-0.20251021200235-096ee6bbf8e8 +# go.podman.io/image/v5 => github.com/ninja-quokka/container-libs/image/v5 v5.38.1-0.20251021200235-096ee6bbf8e8