From a4c965a586b7016bb5d0840494c900bbd3738988 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Aug 2023 02:54:28 +0000 Subject: [PATCH 1/2] build(deps): bump github.com/anchore/syft from 0.85.0 to 0.87.1 Bumps [github.com/anchore/syft](https://github.com/anchore/syft) from 0.85.0 to 0.87.1 - [Release notes](https://github.com/anchore/syft/releases) - [Changelog](https://github.com/anchore/syft/blob/main/.goreleaser.yaml) - [Commits](https://github.com/anchore/syft/compare/v0.85.0...v0.87.1) --- updated-dependencies: - dependency-name: github.com/anchore/syft dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 46 +- go.sum | 108 +- vendor/dario.cat/mergo/.deepsource.toml | 12 + vendor/dario.cat/mergo/.gitignore | 33 + vendor/dario.cat/mergo/.travis.yml | 12 + vendor/dario.cat/mergo/CODE_OF_CONDUCT.md | 46 + vendor/dario.cat/mergo/CONTRIBUTING.md | 112 + vendor/dario.cat/mergo/LICENSE | 28 + vendor/dario.cat/mergo/README.md | 248 ++ vendor/dario.cat/mergo/SECURITY.md | 14 + vendor/dario.cat/mergo/doc.go | 148 ++ vendor/dario.cat/mergo/map.go | 178 ++ vendor/dario.cat/mergo/merge.go | 409 +++ vendor/dario.cat/mergo/mergo.go | 81 + .../go-crypto/openpgp/armor/armor.go | 11 +- .../ProtonMail/go-crypto/openpgp/keys.go | 4 +- .../go-crypto/openpgp/packet/public_key.go | 4 + .../go-crypto/openpgp/packet/signature.go | 2 +- .../openpgp/packet/symmetric_key_encrypted.go | 31 +- .../ProtonMail/go-crypto/openpgp/write.go | 2 +- .../github.com/anchore/stereoscope/client.go | 44 +- .../pkg/image/docker/daemon_provider.go | 28 +- .../anchore/stereoscope/pkg/image/layer.go | 1 + .../github.com/anchore/syft/CONTRIBUTORS.md | 4 + .../anchore/syft/internal/constants.go | 2 +- .../common/cyclonedxhelpers/decoder.go | 21 +- .../formats/common/cyclonedxhelpers/format.go | 12 +- .../common/spdxhelpers/document_name.go | 8 +- .../formats/common/spdxhelpers/license.go | 45 +- .../formats/common/spdxhelpers/source_info.go | 2 + .../common/spdxhelpers/to_format_model.go | 218 +- .../common/spdxhelpers/to_syft_model.go | 271 +- vendor/github.com/anchore/syft/syft/lib.go | 24 +- .../cataloger/binary/default_classifiers.go | 15 + .../syft/syft/pkg/cataloger/catalog.go | 9 +- .../syft/syft/pkg/cataloger/cataloger.go | 22 +- .../common/cpe/dictionary/data/cpe-index.json | 1298 ++++++++++ .../common/cpe/dictionary/generate_index.go | 3 + .../cataloger/common/cpe/dictionary/types.go | 15 + .../syft/pkg/cataloger/common/cpe/generate.go | 76 + .../syft/pkg/cataloger/common/cpe/java.go | 12 +- .../cataloger/common/cpe/java_groupid_map.go | 69 + .../anchore/syft/syft/pkg/cataloger/config.go | 30 +- .../syft/syft/pkg/cataloger/deb/cataloger.go | 2 +- .../syft/pkg/cataloger/dotnet/cataloger.go | 9 +- .../parse_dotnet_portable_executable.go | 87 + .../syft/pkg/cataloger/java/package_url.go | 2 +- .../cataloger/java/parse_gradle_lockfile.go | 9 + .../syft/pkg/cataloger/java/parse_pom_xml.go | 94 +- .../syft/pkg/cataloger/package_exclusions.go | 51 + .../syft/pkg/cataloger/python/cataloger.go | 15 +- .../cataloger/python/parse_requirements.go | 272 +- .../syft/syft/pkg/cataloger/rpm/cataloger.go | 9 +- .../syft/pkg/cataloger/swift/cataloger.go | 7 +- .../syft/syft/pkg/cataloger/swift/package.go | 40 +- .../cataloger/swift/parse_package_resolved.go | 134 + .../pkg/cataloger/swift/parse_podfile_lock.go | 2 +- .../dotnet_portable_executable_metadata.go | 11 + .../anchore/syft/syft/pkg/language.go | 2 +- .../anchore/syft/syft/pkg/metadata.go | 128 +- .../syft/pkg/python_requirements_metadata.go | 10 +- .../anchore/syft/syft/pkg/rpm_metadata.go | 2 +- .../syft/pkg/swiftpackagemanager_metadata.go | 5 + .../github.com/anchore/syft/syft/pkg/type.go | 6 + .../github.com/anchore/syft/syft/sbom/sbom.go | 14 +- .../syft/syft/source/directory_source.go | 3 + .../syft/source/stereoscope_image_source.go | 42 +- .../aquasecurity/go-pep440-version/.gitignore | 15 + .../aquasecurity/go-pep440-version/LICENSE | 201 ++ .../aquasecurity/go-pep440-version/README.md | 51 + .../go-pep440-version/specifier.go | 385 +++ .../go-pep440-version/specifier_option.go | 15 + .../aquasecurity/go-pep440-version/version.go | 358 +++ .../aquasecurity/go-version/LICENSE | 201 ++ .../aquasecurity/go-version/pkg/part/any.go | 33 + .../aquasecurity/go-version/pkg/part/empty.go | 28 + .../go-version/pkg/part/infinity.go | 51 + .../aquasecurity/go-version/pkg/part/int.go | 58 + .../aquasecurity/go-version/pkg/part/list.go | 149 ++ .../aquasecurity/go-version/pkg/part/part.go | 21 + .../go-version/pkg/part/string.go | 94 + .../docker/cli/cli/config/configfile/file.go | 1 - .../cli/connhelper/commandconn/commandconn.go | 2 +- .../docker/cli/cli/connhelper/connhelper.go | 7 +- .../docker/cli/cli/connhelper/ssh/ssh.go | 5 +- .../github.com/docker/docker/api/swagger.yaml | 46 +- .../docker/docker/api/types/configs.go | 4 +- .../docker/docker/api/types/image/opts.go | 4 +- .../docker/api/types/registry/registry.go | 6 +- .../github.com/docker/docker/client/client.go | 30 + .../docker/docker/client/container_create.go | 6 +- .../github.com/docker/docker/client/hijack.go | 13 +- .../docker/docker/client/interface.go | 4 +- .../docker/docker/client/request.go | 10 +- vendor/github.com/edsrzf/mmap-go/.gitignore | 11 + vendor/github.com/edsrzf/mmap-go/LICENSE | 25 + vendor/github.com/edsrzf/mmap-go/README.md | 14 + vendor/github.com/edsrzf/mmap-go/mmap.go | 117 + .../github.com/edsrzf/mmap-go/mmap_plan9.go | 27 + vendor/github.com/edsrzf/mmap-go/mmap_unix.go | 51 + .../github.com/edsrzf/mmap-go/mmap_windows.go | 154 ++ .../go-git/go-git/v5/COMPATIBILITY.md | 344 ++- .../github.com/go-git/go-git/v5/EXTENDING.md | 78 + .../github.com/go-git/go-git/v5/SECURITY.md | 38 + vendor/github.com/go-git/go-git/v5/blame.go | 612 +++-- .../go-git/v5/internal/path_util/path_util.go | 29 + vendor/github.com/go-git/go-git/v5/options.go | 3 + .../v5/plumbing/format/gitignore/dir.go | 4 + .../v5/plumbing/format/gitignore/pattern.go | 2 + .../go-git/v5/plumbing/object/commit.go | 11 + .../plumbing/protocol/packp/advrefs_decode.go | 1 + .../v5/plumbing/protocol/packp/uppackreq.go | 6 +- .../go-git/go-git/v5/plumbing/reference.go | 7 +- .../v5/plumbing/transport/http/common.go | 11 + .../transport/internal/common/common.go | 2 +- .../github.com/go-git/go-git/v5/references.go | 264 -- vendor/github.com/go-git/go-git/v5/remote.go | 118 +- .../github.com/go-git/go-git/v5/repository.go | 60 +- .../v5/storage/filesystem/dotgit/dotgit.go | 4 +- .../github.com/go-git/go-git/v5/submodule.go | 14 +- .../go-git/go-git/v5/worktree_status.go | 26 +- .../pkg/v1/layout/write.go | 1 + .../pkg/v1/mutate/mutate.go | 4 +- .../pkg/v1/remote/descriptor.go | 14 +- .../pkg/v1/remote/fetcher.go | 8 +- .../pkg/v1/remote/options.go | 3 +- .../pkg/v1/remote/transport/bearer.go | 137 +- .../pkg/v1/remote/transport/ping.go | 60 +- .../pkg/v1/remote/transport/schemer.go | 2 +- .../pkg/v1/remote/transport/transport.go | 47 +- .../pkg/v1/remote/write.go | 12 +- .../in-toto-golang/in_toto/attestations.go | 99 + .../in-toto-golang/in_toto/envelope.go | 166 ++ .../in-toto/in-toto-golang/in_toto/keylib.go | 30 +- .../in-toto/in-toto-golang/in_toto/model.go | 264 +- .../in-toto/in-toto-golang/in_toto/runlib.go | 152 +- .../slsa_provenance/v0.2/provenance.go | 7 + .../in_toto/slsa_provenance/v1/provenance.go | 151 ++ .../in-toto/in-toto-golang/in_toto/util.go | 43 + .../in-toto-golang/in_toto/verifylib.go | 251 +- vendor/github.com/saferwall/pe/.editorconfig | 23 + vendor/github.com/saferwall/pe/.gitattributes | 16 + vendor/github.com/saferwall/pe/.gitignore | 29 + vendor/github.com/saferwall/pe/CHANGELOG.md | 150 ++ .../saferwall/pe/CODE_OF_CONDUCT.md | 46 + vendor/github.com/saferwall/pe/LICENSE | 21 + vendor/github.com/saferwall/pe/README.md | 265 ++ vendor/github.com/saferwall/pe/anomaly.go | 218 ++ vendor/github.com/saferwall/pe/arch.go | 11 + .../github.com/saferwall/pe/boundimports.go | 154 ++ vendor/github.com/saferwall/pe/debug.go | 772 ++++++ .../github.com/saferwall/pe/delayimports.go | 155 ++ vendor/github.com/saferwall/pe/dosheader.go | 108 + vendor/github.com/saferwall/pe/dotnet.go | 773 ++++++ vendor/github.com/saferwall/pe/exception.go | 598 +++++ vendor/github.com/saferwall/pe/exports.go | 329 +++ vendor/github.com/saferwall/pe/file.go | 385 +++ vendor/github.com/saferwall/pe/globalptr.go | 36 + vendor/github.com/saferwall/pe/helper.go | 697 +++++ vendor/github.com/saferwall/pe/iat.go | 67 + vendor/github.com/saferwall/pe/imports.go | 797 ++++++ vendor/github.com/saferwall/pe/loadconfig.go | 1521 +++++++++++ vendor/github.com/saferwall/pe/log/README.md | 42 + vendor/github.com/saferwall/pe/log/filter.go | 96 + vendor/github.com/saferwall/pe/log/global.go | 122 + vendor/github.com/saferwall/pe/log/helper.go | 130 + vendor/github.com/saferwall/pe/log/level.go | 56 + vendor/github.com/saferwall/pe/log/log.go | 71 + vendor/github.com/saferwall/pe/log/std.go | 47 + vendor/github.com/saferwall/pe/log/value.go | 71 + vendor/github.com/saferwall/pe/ntheader.go | 602 +++++ vendor/github.com/saferwall/pe/ordlookup.go | 554 ++++ vendor/github.com/saferwall/pe/overlay.go | 44 + vendor/github.com/saferwall/pe/pe.go | 229 ++ vendor/github.com/saferwall/pe/reloc.go | 257 ++ vendor/github.com/saferwall/pe/resource.go | 2233 +++++++++++++++++ vendor/github.com/saferwall/pe/richheader.go | 529 ++++ vendor/github.com/saferwall/pe/section.go | 568 +++++ vendor/github.com/saferwall/pe/security.go | 503 ++++ .../github.com/saferwall/pe/staticcheck.conf | 1 + vendor/github.com/saferwall/pe/symbol.go | 459 ++++ vendor/github.com/saferwall/pe/tls.go | 189 ++ vendor/github.com/saferwall/pe/version.go | 380 +++ .../cjson/canonicaljson.go | 46 +- .../go-securesystemslib/dsse/envelope.go | 64 + .../go-securesystemslib/dsse/sign.go | 137 +- .../dsse/signerverifier.go | 43 + .../go-securesystemslib/dsse/verify.go | 26 +- .../signerverifier/ecdsa.go | 111 + .../signerverifier/ed25519.go | 98 + .../go-securesystemslib/signerverifier/rsa.go | 141 ++ .../signerverifier/signerverifier.go | 34 + .../signerverifier/utils.go | 150 ++ vendor/github.com/skeema/knownhosts/NOTICE | 2 +- vendor/github.com/skeema/knownhosts/README.md | 7 +- .../skeema/knownhosts/knownhosts.go | 46 +- .../tools-golang/spdx/v2/common/package.go | 6 +- vendor/github.com/sylabs/sif/v2/LICENSE.md | 2 +- .../sylabs/sif/v2/pkg/sif/create.go | 100 +- .../sylabs/sif/v2/pkg/sif/descriptor.go | 84 +- .../sylabs/sif/v2/pkg/sif/descriptor_input.go | 26 +- .../github.com/sylabs/sif/v2/pkg/sif/sif.go | 8 +- vendor/github.com/vifraa/gopom/example_pom.go | 72 +- vendor/github.com/vifraa/gopom/gopom.go | 384 +-- vendor/go.mozilla.org/pkcs7/.gitignore | 24 + vendor/go.mozilla.org/pkcs7/LICENSE | 22 + vendor/go.mozilla.org/pkcs7/Makefile | 20 + vendor/go.mozilla.org/pkcs7/README.md | 69 + vendor/go.mozilla.org/pkcs7/ber.go | 271 ++ vendor/go.mozilla.org/pkcs7/decrypt.go | 177 ++ vendor/go.mozilla.org/pkcs7/encrypt.go | 399 +++ vendor/go.mozilla.org/pkcs7/pkcs7.go | 291 +++ vendor/go.mozilla.org/pkcs7/sign.go | 429 ++++ vendor/go.mozilla.org/pkcs7/verify.go | 343 +++ .../go.mozilla.org/pkcs7/verify_test_dsa.go | 182 ++ vendor/golang.org/x/crypto/ssh/common.go | 51 +- vendor/golang.org/x/crypto/ssh/kex.go | 12 + vendor/golang.org/x/crypto/ssh/server.go | 21 +- vendor/golang.org/x/net/html/render.go | 28 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 2 +- vendor/golang.org/x/sys/unix/mmap_nomremap.go | 14 + vendor/golang.org/x/sys/unix/mremap.go | 21 +- vendor/golang.org/x/sys/unix/syscall_aix.go | 15 - vendor/golang.org/x/sys/unix/syscall_bsd.go | 14 - .../golang.org/x/sys/unix/syscall_darwin.go | 50 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 63 +- .../x/sys/unix/syscall_linux_amd64.go | 2 +- .../x/sys/unix/syscall_linux_arm64.go | 2 +- .../x/sys/unix/syscall_linux_loong64.go | 2 +- .../x/sys/unix/syscall_linux_mips64x.go | 2 +- .../x/sys/unix/syscall_linux_riscv64.go | 13 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 13 +- .../golang.org/x/sys/unix/syscall_solaris.go | 14 - vendor/golang.org/x/sys/unix/syscall_unix.go | 8 + .../x/sys/unix/syscall_zos_s390x.go | 14 - .../x/sys/unix/zerrors_linux_386.go | 9 + .../x/sys/unix/zerrors_linux_amd64.go | 9 + .../x/sys/unix/zerrors_linux_arm.go | 9 + .../x/sys/unix/zerrors_linux_arm64.go | 9 + .../x/sys/unix/zerrors_linux_loong64.go | 9 + .../x/sys/unix/zerrors_linux_mips.go | 9 + .../x/sys/unix/zerrors_linux_mips64.go | 9 + .../x/sys/unix/zerrors_linux_mips64le.go | 9 + .../x/sys/unix/zerrors_linux_mipsle.go | 9 + .../x/sys/unix/zerrors_linux_ppc.go | 9 + .../x/sys/unix/zerrors_linux_ppc64.go | 9 + .../x/sys/unix/zerrors_linux_ppc64le.go | 9 + .../x/sys/unix/zerrors_linux_riscv64.go | 9 + .../x/sys/unix/zerrors_linux_s390x.go | 9 + .../x/sys/unix/zerrors_linux_sparc64.go | 9 + .../golang.org/x/sys/unix/zsyscall_linux.go | 2 +- .../x/sys/unix/zsyscall_linux_riscv64.go | 16 + .../x/sys/unix/zsyscall_netbsd_386.go | 11 + .../x/sys/unix/zsyscall_netbsd_amd64.go | 11 + .../x/sys/unix/zsyscall_netbsd_arm.go | 11 + .../x/sys/unix/zsyscall_netbsd_arm64.go | 11 + .../x/sys/unix/zsysnum_linux_riscv64.go | 2 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 5 + .../x/sys/unix/ztypes_linux_riscv64.go | 23 + .../x/sys/windows/syscall_windows.go | 4 +- vendor/golang.org/x/text/language/match.go | 2 +- .../x/tools/go/types/objectpath/objectpath.go | 74 +- .../x/tools/internal/gocommand/invoke.go | 128 +- .../x/tools/internal/gocommand/version.go | 18 +- vendor/modules.txt | 69 +- 265 files changed, 26714 insertions(+), 2333 deletions(-) create mode 100644 vendor/dario.cat/mergo/.deepsource.toml create mode 100644 vendor/dario.cat/mergo/.gitignore create mode 100644 vendor/dario.cat/mergo/.travis.yml create mode 100644 vendor/dario.cat/mergo/CODE_OF_CONDUCT.md create mode 100644 vendor/dario.cat/mergo/CONTRIBUTING.md create mode 100644 vendor/dario.cat/mergo/LICENSE create mode 100644 vendor/dario.cat/mergo/README.md create mode 100644 vendor/dario.cat/mergo/SECURITY.md create mode 100644 vendor/dario.cat/mergo/doc.go create mode 100644 vendor/dario.cat/mergo/map.go create mode 100644 vendor/dario.cat/mergo/merge.go create mode 100644 vendor/dario.cat/mergo/mergo.go create mode 100644 vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/dictionary/data/cpe-index.json create mode 100644 vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/dictionary/generate_index.go create mode 100644 vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/dictionary/types.go create mode 100644 vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/java_groupid_map.go create mode 100644 vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/parse_dotnet_portable_executable.go create mode 100644 vendor/github.com/anchore/syft/syft/pkg/cataloger/package_exclusions.go create mode 100644 vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/parse_package_resolved.go create mode 100644 vendor/github.com/anchore/syft/syft/pkg/dotnet_portable_executable_metadata.go create mode 100644 vendor/github.com/anchore/syft/syft/pkg/swiftpackagemanager_metadata.go create mode 100644 vendor/github.com/aquasecurity/go-pep440-version/.gitignore create mode 100644 vendor/github.com/aquasecurity/go-pep440-version/LICENSE create mode 100644 vendor/github.com/aquasecurity/go-pep440-version/README.md create mode 100644 vendor/github.com/aquasecurity/go-pep440-version/specifier.go create mode 100644 vendor/github.com/aquasecurity/go-pep440-version/specifier_option.go create mode 100644 vendor/github.com/aquasecurity/go-pep440-version/version.go create mode 100644 vendor/github.com/aquasecurity/go-version/LICENSE create mode 100644 vendor/github.com/aquasecurity/go-version/pkg/part/any.go create mode 100644 vendor/github.com/aquasecurity/go-version/pkg/part/empty.go create mode 100644 vendor/github.com/aquasecurity/go-version/pkg/part/infinity.go create mode 100644 vendor/github.com/aquasecurity/go-version/pkg/part/int.go create mode 100644 vendor/github.com/aquasecurity/go-version/pkg/part/list.go create mode 100644 vendor/github.com/aquasecurity/go-version/pkg/part/part.go create mode 100644 vendor/github.com/aquasecurity/go-version/pkg/part/string.go create mode 100644 vendor/github.com/edsrzf/mmap-go/.gitignore create mode 100644 vendor/github.com/edsrzf/mmap-go/LICENSE create mode 100644 vendor/github.com/edsrzf/mmap-go/README.md create mode 100644 vendor/github.com/edsrzf/mmap-go/mmap.go create mode 100644 vendor/github.com/edsrzf/mmap-go/mmap_plan9.go create mode 100644 vendor/github.com/edsrzf/mmap-go/mmap_unix.go create mode 100644 vendor/github.com/edsrzf/mmap-go/mmap_windows.go create mode 100644 vendor/github.com/go-git/go-git/v5/EXTENDING.md create mode 100644 vendor/github.com/go-git/go-git/v5/SECURITY.md create mode 100644 vendor/github.com/go-git/go-git/v5/internal/path_util/path_util.go delete mode 100644 vendor/github.com/go-git/go-git/v5/references.go create mode 100644 vendor/github.com/in-toto/in-toto-golang/in_toto/attestations.go create mode 100644 vendor/github.com/in-toto/in-toto-golang/in_toto/envelope.go create mode 100644 vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1/provenance.go create mode 100644 vendor/github.com/saferwall/pe/.editorconfig create mode 100644 vendor/github.com/saferwall/pe/.gitattributes create mode 100644 vendor/github.com/saferwall/pe/.gitignore create mode 100644 vendor/github.com/saferwall/pe/CHANGELOG.md create mode 100644 vendor/github.com/saferwall/pe/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/saferwall/pe/LICENSE create mode 100644 vendor/github.com/saferwall/pe/README.md create mode 100644 vendor/github.com/saferwall/pe/anomaly.go create mode 100644 vendor/github.com/saferwall/pe/arch.go create mode 100644 vendor/github.com/saferwall/pe/boundimports.go create mode 100644 vendor/github.com/saferwall/pe/debug.go create mode 100644 vendor/github.com/saferwall/pe/delayimports.go create mode 100644 vendor/github.com/saferwall/pe/dosheader.go create mode 100644 vendor/github.com/saferwall/pe/dotnet.go create mode 100644 vendor/github.com/saferwall/pe/exception.go create mode 100644 vendor/github.com/saferwall/pe/exports.go create mode 100644 vendor/github.com/saferwall/pe/file.go create mode 100644 vendor/github.com/saferwall/pe/globalptr.go create mode 100644 vendor/github.com/saferwall/pe/helper.go create mode 100644 vendor/github.com/saferwall/pe/iat.go create mode 100644 vendor/github.com/saferwall/pe/imports.go create mode 100644 vendor/github.com/saferwall/pe/loadconfig.go create mode 100644 vendor/github.com/saferwall/pe/log/README.md create mode 100644 vendor/github.com/saferwall/pe/log/filter.go create mode 100644 vendor/github.com/saferwall/pe/log/global.go create mode 100644 vendor/github.com/saferwall/pe/log/helper.go create mode 100644 vendor/github.com/saferwall/pe/log/level.go create mode 100644 vendor/github.com/saferwall/pe/log/log.go create mode 100644 vendor/github.com/saferwall/pe/log/std.go create mode 100644 vendor/github.com/saferwall/pe/log/value.go create mode 100644 vendor/github.com/saferwall/pe/ntheader.go create mode 100644 vendor/github.com/saferwall/pe/ordlookup.go create mode 100644 vendor/github.com/saferwall/pe/overlay.go create mode 100644 vendor/github.com/saferwall/pe/pe.go create mode 100644 vendor/github.com/saferwall/pe/reloc.go create mode 100644 vendor/github.com/saferwall/pe/resource.go create mode 100644 vendor/github.com/saferwall/pe/richheader.go create mode 100644 vendor/github.com/saferwall/pe/section.go create mode 100644 vendor/github.com/saferwall/pe/security.go create mode 100644 vendor/github.com/saferwall/pe/staticcheck.conf create mode 100644 vendor/github.com/saferwall/pe/symbol.go create mode 100644 vendor/github.com/saferwall/pe/tls.go create mode 100644 vendor/github.com/saferwall/pe/version.go create mode 100644 vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/envelope.go create mode 100644 vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/signerverifier.go create mode 100644 vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go create mode 100644 vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go create mode 100644 vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go create mode 100644 vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go create mode 100644 vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go create mode 100644 vendor/go.mozilla.org/pkcs7/.gitignore create mode 100644 vendor/go.mozilla.org/pkcs7/LICENSE create mode 100644 vendor/go.mozilla.org/pkcs7/Makefile create mode 100644 vendor/go.mozilla.org/pkcs7/README.md create mode 100644 vendor/go.mozilla.org/pkcs7/ber.go create mode 100644 vendor/go.mozilla.org/pkcs7/decrypt.go create mode 100644 vendor/go.mozilla.org/pkcs7/encrypt.go create mode 100644 vendor/go.mozilla.org/pkcs7/pkcs7.go create mode 100644 vendor/go.mozilla.org/pkcs7/sign.go create mode 100644 vendor/go.mozilla.org/pkcs7/verify.go create mode 100644 vendor/go.mozilla.org/pkcs7/verify_test_dsa.go create mode 100644 vendor/golang.org/x/sys/unix/mmap_nomremap.go diff --git a/go.mod b/go.mod index 82b7334f..29c62833 100644 --- a/go.mod +++ b/go.mod @@ -4,14 +4,15 @@ go 1.20 require ( github.com/anchore/go-logger v0.0.0-20230531193951-db5ae83e7dbe - github.com/anchore/stereoscope v0.0.0-20230627195312-cd49355d934e - github.com/anchore/syft v0.85.0 - github.com/in-toto/in-toto-golang v0.4.1-0.20221018183522-731d0640b65f + github.com/anchore/stereoscope v0.0.0-20230727211946-d1f3d766295e + github.com/anchore/syft v0.87.1 + github.com/in-toto/in-toto-golang v0.9.0 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.9.3 ) require ( + dario.cat/mergo v1.0.0 // indirect github.com/CycloneDX/cyclonedx-go v0.7.1 // indirect github.com/DataDog/zstd v1.4.5 // indirect github.com/Masterminds/goutils v1.1.1 // indirect @@ -19,41 +20,44 @@ require ( github.com/Masterminds/semver/v3 v3.2.0 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20230518184743-7afd39499903 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 // indirect github.com/acobaugh/osrelease v0.1.0 // indirect github.com/acomagu/bufpipe v1.0.4 // indirect github.com/anchore/go-macholibre v0.0.0-20220308212642-53e6d0aaf6fb // indirect github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect github.com/anchore/packageurl-go v0.1.1-0.20230104203445-02e0a6721501 // indirect github.com/andybalholm/brotli v1.0.4 // indirect + github.com/aquasecurity/go-pep440-version v0.0.0-20210121094942-22b2f8951d46 // indirect + github.com/aquasecurity/go-version v0.0.0-20210121072130-637058cfe492 // indirect github.com/becheran/wildmatch-go v1.0.0 // indirect github.com/bmatcuk/doublestar/v4 v4.6.0 // indirect github.com/cloudflare/circl v1.3.3 // indirect github.com/containerd/containerd v1.7.0 // indirect github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect github.com/deitch/magic v0.0.0-20230404182410-1ff89d7342da // indirect - github.com/docker/cli v23.0.5+incompatible // indirect + github.com/docker/cli v24.0.0+incompatible // indirect github.com/docker/distribution v2.8.2+incompatible // indirect - github.com/docker/docker v24.0.2+incompatible // indirect + github.com/docker/docker v24.0.5+incompatible // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect github.com/dustin/go-humanize v1.0.1 // indirect + github.com/edsrzf/mmap-go v1.1.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/facebookincubator/nvdtools v0.1.5 // indirect github.com/gabriel-vasile/mimetype v1.4.0 // indirect github.com/github/go-spdx/v2 v2.1.2 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.4.1 // indirect - github.com/go-git/go-git/v5 v5.7.0 // indirect + github.com/go-git/go-git/v5 v5.8.1 // indirect github.com/go-restruct/restruct v1.2.0-alpha // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/go-cmp v0.5.9 // indirect - github.com/google/go-containerregistry v0.15.2 // indirect + github.com/google/go-containerregistry v0.16.1 // indirect github.com/google/licensecheck v0.3.1 // indirect github.com/google/uuid v1.3.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -85,37 +89,39 @@ require ( github.com/pierrec/lz4/v4 v4.1.15 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect + github.com/saferwall/pe v1.4.4 // indirect github.com/sassoftware/go-rpmutils v0.2.0 // indirect github.com/scylladb/go-set v1.0.3-0.20200225121959-cc7b2070d91e // indirect - github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.6.0 // indirect github.com/sergi/go-diff v1.3.1 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect github.com/shopspring/decimal v1.2.0 // indirect - github.com/skeema/knownhosts v1.1.1 // indirect - github.com/spdx/tools-golang v0.5.2 // indirect + github.com/skeema/knownhosts v1.2.0 // indirect + github.com/spdx/tools-golang v0.5.3 // indirect github.com/spf13/afero v1.9.5 // indirect github.com/spf13/cast v1.5.1 // indirect - github.com/sylabs/sif/v2 v2.8.1 // indirect + github.com/sylabs/sif/v2 v2.11.5 // indirect github.com/sylabs/squashfs v0.6.1 // indirect github.com/therootcompany/xz v1.0.1 // indirect github.com/ulikunitz/xz v0.5.10 // indirect github.com/vbatts/go-mtree v0.5.3 // indirect github.com/vbatts/tar-split v0.11.3 // indirect - github.com/vifraa/gopom v0.2.1 // indirect + github.com/vifraa/gopom v1.0.0 // indirect github.com/wagoodman/go-partybus v0.0.0-20230516145632-8ccac152c651 // indirect github.com/wagoodman/go-progress v0.0.0-20230301185719-21920a456ad5 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect + go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect go.uber.org/goleak v1.2.0 // indirect - golang.org/x/crypto v0.11.0 // indirect + golang.org/x/crypto v0.12.0 // indirect golang.org/x/exp v0.0.0-20230202163644-54bba9f4231b // indirect golang.org/x/mod v0.12.0 // indirect - golang.org/x/net v0.12.0 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect - golang.org/x/tools v0.8.0 // indirect + golang.org/x/net v0.14.0 // indirect + golang.org/x/sync v0.2.0 // indirect + golang.org/x/sys v0.11.0 // indirect + golang.org/x/term v0.11.0 // indirect + golang.org/x/text v0.12.0 // indirect + golang.org/x/tools v0.9.1 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/grpc v1.55.0 // indirect diff --git a/go.sum b/go.sum index f54eb4b8..e04abab4 100644 --- a/go.sum +++ b/go.sum @@ -48,6 +48,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -71,8 +73,8 @@ github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/ProtonMail/go-crypto v0.0.0-20230518184743-7afd39499903 h1:ZK3C5DtzV2nVAQTx5S5jQvMeDqWtD1By5mOoyY/xJek= -github.com/ProtonMail/go-crypto v0.0.0-20230518184743-7afd39499903/go.mod h1:8TI4H3IbrackdNgv+92dI+rhpCaLqM0IfpgCgenFvRE= +github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 h1:KLq8BE0KwCL+mmXnjLWEAOYO+2l2AE4YMmqG1ZpZHBs= +github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/acobaugh/osrelease v0.1.0 h1:Yb59HQDGGNhCj4suHaFQQfBps5wyoKLSSX/J/+UifRE= github.com/acobaugh/osrelease v0.1.0/go.mod h1:4bFEs0MtgHNHBrmHCt67gNisnabCRAlzdVasCEGHTWY= github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= @@ -90,16 +92,20 @@ github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092/go.mod github.com/anchore/go-testutils v0.0.0-20200925183923-d5f45b0d3c04 h1:VzprUTpc0vW0nnNKJfJieyH/TZ9UYAnTZs5/gHTdAe8= github.com/anchore/packageurl-go v0.1.1-0.20230104203445-02e0a6721501 h1:AV7qjwMcM4r8wFhJq3jLRztew3ywIyPTRapl2T1s9o8= github.com/anchore/packageurl-go v0.1.1-0.20230104203445-02e0a6721501/go.mod h1:Blo6OgJNiYF41ufcgHKkbCKF2MDOMlrqhXv/ij6ocR4= -github.com/anchore/stereoscope v0.0.0-20230627195312-cd49355d934e h1:zhk3ZLtomMJ750nNCE+c24PonMzoO/SeL/4uTr1L9kM= -github.com/anchore/stereoscope v0.0.0-20230627195312-cd49355d934e/go.mod h1:0LsgHgXO4QFnk2hsYwtqd3fR18PIZXlFLIl2qb9tu3g= -github.com/anchore/syft v0.85.0 h1:JShy/YIqffcIR3cvssABGr/yNDRCgZwpcQPcRLO2nHc= -github.com/anchore/syft v0.85.0/go.mod h1:nCMEh98C1BEfkH49HXKeJNPcUEfDM4B6xmptGT5Lv3Q= +github.com/anchore/stereoscope v0.0.0-20230727211946-d1f3d766295e h1:S6IhYpsBCpvphlHA1tN0glSG/kjVvFzC6OJuU2qW5Pc= +github.com/anchore/stereoscope v0.0.0-20230727211946-d1f3d766295e/go.mod h1:0LsgHgXO4QFnk2hsYwtqd3fR18PIZXlFLIl2qb9tu3g= +github.com/anchore/syft v0.87.1 h1:S9xAQzvSRjGTqnkHXxGNX7TQAU0vzr2HAbuD2Sklc1s= +github.com/anchore/syft v0.87.1/go.mod h1:FweiDRmNb6zq/+vD8rernOfCdkV6KBfppIKFX207eCY= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/aquasecurity/go-pep440-version v0.0.0-20210121094942-22b2f8951d46 h1:vmXNl+HDfqqXgr0uY1UgK1GAhps8nbAAtqHNBcgyf+4= +github.com/aquasecurity/go-pep440-version v0.0.0-20210121094942-22b2f8951d46/go.mod h1:olhPNdiiAAMiSujemd1O/sc6GcyePr23f/6uGKtthNg= +github.com/aquasecurity/go-version v0.0.0-20210121072130-637058cfe492 h1:rcEG5HI490FF0a7zuvxOxen52ddygCfNVjP0XOCMl+M= +github.com/aquasecurity/go-version v0.0.0-20210121072130-637058cfe492/go.mod h1:9Beu8XsUNNfzml7WBf3QmyPToP1wm1Gj/Vc5UJKqTzU= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= @@ -115,7 +121,7 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bmatcuk/doublestar/v4 v4.6.0 h1:HTuxyug8GyFbRkrffIpzNCSK4luc0TY3wzXvzIZhEXc= github.com/bmatcuk/doublestar/v4 v4.6.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bradleyjkemp/cupaloy/v2 v2.8.0 h1:any4BmKE+jGIaMpnU8YgH/I2LPiLBufr6oMMlVBbn9M= -github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -127,7 +133,6 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -147,6 +152,7 @@ github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSk github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -156,12 +162,12 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/deitch/magic v0.0.0-20230404182410-1ff89d7342da h1:ZOjWpVsFZ06eIhnh4mkaceTiVoktdU67+M7KDHJ268M= github.com/deitch/magic v0.0.0-20230404182410-1ff89d7342da/go.mod h1:B3tI9iGHi4imdLi4Asdha1Sc6feLMTfPLXh9IUYmysk= github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= -github.com/docker/cli v23.0.5+incompatible h1:ufWmAOuD3Vmr7JP2G5K3cyuNC4YZWiAsuDEvFVVDafE= -github.com/docker/cli v23.0.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v24.0.0+incompatible h1:0+1VshNwBQzQAx9lOl+OYCTCEAD8fKs/qeXMx3O0wqM= +github.com/docker/cli v24.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.2+incompatible h1:eATx+oLz9WdNVkQrr0qjQ8HvRJ4bOOxfzEo8R+dA3cg= -github.com/docker/docker v24.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v24.0.5+incompatible h1:WmgcE4fxyI6EEXxBRxsHnZXrO1pQ3smi0k/jho4HLeY= +github.com/docker/docker v24.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= @@ -173,6 +179,8 @@ github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj6 github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= +github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/elazarl/goproxy v0.0.0-20221015165544-a0805db90819 h1:RIB4cRk+lBqKK3Oy0r2gRX4ui7tuhiZq2SuTtTCi0/0= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= @@ -209,8 +217,8 @@ github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmS github.com/go-git/go-billy/v5 v5.4.1 h1:Uwp5tDRkPr+l/TnbHOQzp+tmJfLceOlbVucgpTz8ix4= github.com/go-git/go-billy/v5 v5.4.1/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f h1:Pz0DHeFij3XFhoBRGUDPzSJ+w2UcK5/0JvF8DRI58r8= -github.com/go-git/go-git/v5 v5.7.0 h1:t9AudWVLmqzlo+4bqdf7GY+46SUuRsx59SboFxkq2aE= -github.com/go-git/go-git/v5 v5.7.0/go.mod h1:coJHKEOk5kUClpsNlXrUvPrDxY3w3gjHvhcZd8Fodw8= +github.com/go-git/go-git/v5 v5.8.1 h1:Zo79E4p7TRk0xoRgMq0RShiTHGKcKI4+DI6BfJc/Q+A= +github.com/go-git/go-git/v5 v5.8.1/go.mod h1:FHFuoD6yGz5OSKEBK+aWN9Oah0q54Jxl0abmj6GnqAo= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -281,8 +289,8 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.15.2 h1:MMkSh+tjSdnmJZO7ljvEqV1DjfekB6VUEAZgy3a+TQE= -github.com/google/go-containerregistry v0.15.2/go.mod h1:wWK+LnOv4jXMM23IT/F1wdYftGWGr47Is8CG+pmHK1Q= +github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ= +github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/licensecheck v0.3.1 h1:QoxgoDkaeC4nFrtGN1jV7IPmDCHFNIVh54e5hSt6sPs= github.com/google/licensecheck v0.3.1/go.mod h1:ORkR35t/JjW+emNKtfJDII0zlciG9JgbT7SmsohlHmY= @@ -316,7 +324,7 @@ github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pf github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gookit/color v1.2.5/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg= -github.com/gookit/color v1.5.3 h1:twfIhZs4QLCtimkP7MOxlF3A0U/5cDPseRT9M/+2SCE= +github.com/gookit/color v1.5.4 h1:FZmqs7XOyGgCAxmWyPslpiok1k05wmY3SJTytgvYFs0= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= @@ -360,8 +368,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1: github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/in-toto/in-toto-golang v0.4.1-0.20221018183522-731d0640b65f h1:7giWxcSH1gUqLfQEB7XnTBc29+A0DvNxZY5XKoemhME= -github.com/in-toto/in-toto-golang v0.4.1-0.20221018183522-731d0640b65f/go.mod h1:/Rq0IZHLV7Ku5gielPT4wPHJfH1GdHMCq8+WPxw8/BE= +github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= +github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= @@ -505,8 +513,11 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/saferwall/pe v1.4.4 h1:Ml++7/2/Z1iKwV4zCsd1nIqTEAdUQKAetwbbcCarhOg= +github.com/saferwall/pe v1.4.4/go.mod h1:SNzv3cdgk8SBI0UwHfyTcdjawfdnN+nbydnEL7GZ25s= github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= github.com/sassoftware/go-rpmutils v0.2.0 h1:pKW0HDYMFWQ5b4JQPiI3WI12hGsVoW0V8+GMoZiI/JE= github.com/sassoftware/go-rpmutils v0.2.0/go.mod h1:TJJQYtLe/BeEmEjelI3b7xNZjzAukEkeWKmoakvaOoI= @@ -514,8 +525,8 @@ github.com/scylladb/go-set v1.0.3-0.20200225121959-cc7b2070d91e h1:7q6NSFZDeGfvv github.com/scylladb/go-set v1.0.3-0.20200225121959-cc7b2070d91e/go.mod h1:DkpGd78rljTxKAnTDPFqXSGxvETQnJyuSOQwsHycqfs= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= -github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE= -github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs= +github.com/secure-systems-lab/go-securesystemslib v0.6.0 h1:T65atpAVCJQK14UA57LMdZGpHi4QYSH/9FZyNGqMYIA= +github.com/secure-systems-lab/go-securesystemslib v0.6.0/go.mod h1:8Mtpo9JKks/qhPG4HGZ2LGMvrPbzuxwfz/f/zLfEWkk= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= @@ -524,18 +535,19 @@ github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skeema/knownhosts v1.1.1 h1:MTk78x9FPgDFVFkDLTrsnnfCJl7g1C/nnKvePgrIngE= -github.com/skeema/knownhosts v1.1.1/go.mod h1:g4fPeYpque7P0xefxtGzV81ihjC8sX2IqpAoNkjxbMo= +github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJCpM= +github.com/skeema/knownhosts v1.2.0/go.mod h1:g4fPeYpque7P0xefxtGzV81ihjC8sX2IqpAoNkjxbMo= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spdx/gordf v0.0.0-20201111095634-7098f93598fb/go.mod h1:uKWaldnbMnjsSAXRurWqqrdyZen1R7kxl8TkmWk2OyM= -github.com/spdx/tools-golang v0.5.2 h1:dtMNjJreWPe37584ajk7m/rQtfJaLpRMk7pUGgvekOg= -github.com/spdx/tools-golang v0.5.2/go.mod h1:/ETOahiAo96Ob0/RAIBmFZw6XN0yTnyr/uFZm2NTMhI= +github.com/spdx/tools-golang v0.5.3 h1:ialnHeEYUC4+hkm5vJm4qz2x+oEJbS0mAMFrNXdQraY= +github.com/spdx/tools-golang v0.5.3/go.mod h1:/ETOahiAo96Ob0/RAIBmFZw6XN0yTnyr/uFZm2NTMhI= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= @@ -565,8 +577,8 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/sylabs/sif/v2 v2.8.1 h1:whr4Vz12RXfLnYyVGHoD/rD/hbF2g9OW7BJHa+WIqW8= -github.com/sylabs/sif/v2 v2.8.1/go.mod h1:LQOdYXC9a8i7BleTKRw9lohi0rTbXkJOeS9u0ebvgyM= +github.com/sylabs/sif/v2 v2.11.5 h1:7ssPH3epSonsTrzbS1YxeJ9KuqAN7ISlSM61a7j/mQM= +github.com/sylabs/sif/v2 v2.11.5/go.mod h1:GBoZs9LU3e4yJH1dcZ3Akf/jsqYgy5SeguJQC+zd75Y= github.com/sylabs/squashfs v0.6.1 h1:4hgvHnD9JGlYWwT0bPYNt9zaz23mAV3Js+VEgQoRGYQ= github.com/sylabs/squashfs v0.6.1/go.mod h1:ZwpbPCj0ocIvMy2br6KZmix6Gzh6fsGQcCnydMF+Kx8= github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw= @@ -577,12 +589,13 @@ github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oW github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/vbatts/go-mtree v0.5.3 h1:S/jYlfG8rZ+a0bhZd+RANXejy7M4Js8fq9U+XoWTd5w= github.com/vbatts/go-mtree v0.5.3/go.mod h1:eXsdoPMdL2jcJx6HweWi9lYQxBsTp4lNhqqAjgkZUg8= github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck= github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY= -github.com/vifraa/gopom v0.2.1 h1:MYVMAMyiGzXPPy10EwojzKIL670kl5Zbae+o3fFvQEM= -github.com/vifraa/gopom v0.2.1/go.mod h1:oPa1dcrGrtlO37WPDBm5SqHAT+wTgF8An1Q71Z6Vv4o= +github.com/vifraa/gopom v1.0.0 h1:L9XlKbyvid8PAIK8nr0lihMApJQg/12OBvMA28BcWh0= +github.com/vifraa/gopom v1.0.0/go.mod h1:oPa1dcrGrtlO37WPDBm5SqHAT+wTgF8An1Q71Z6Vv4o= github.com/wagoodman/go-partybus v0.0.0-20230516145632-8ccac152c651 h1:jIVmlAFIqV3d+DOxazTR9v+zgj8+VYuQBzPgBZvWBHA= github.com/wagoodman/go-partybus v0.0.0-20230516145632-8ccac152c651/go.mod h1:b26F2tHLqaoRQf8DywqzVaV1MQ9yvjb0OMcNl7Nxu20= github.com/wagoodman/go-progress v0.0.0-20230301185719-21920a456ad5 h1:lwgTsTy18nYqASnH58qyfRW/ldj7Gt2zzBvgYPzdA4s= @@ -601,6 +614,8 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= +go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= +go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -631,9 +646,10 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -724,8 +740,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= -golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -755,8 +771,9 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -825,6 +842,7 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -834,16 +852,16 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -856,8 +874,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -920,8 +938,8 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= +golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1110,10 +1128,10 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -modernc.org/libc v1.22.5 h1:91BNch/e5B0uPbJFgqbxXuOnxBQjlS//icfQEGmvyjE= +modernc.org/libc v1.24.1 h1:uvJSeCKL/AgzBo2yYIPPTy82v21KgGnizcGYfBHaNuM= modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= -modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds= -modernc.org/sqlite v1.24.0 h1:EsClRIWHGhLTCX44p+Ri/JLD+vFGo0QGjasg2/F9TlI= +modernc.org/memory v1.6.0 h1:i6mzavxrE9a30whzMfwf7XWVODx2r5OYXvU46cirX7o= +modernc.org/sqlite v1.25.0 h1:AFweiwPNd/b3BoKnBOfFm+Y260guGMF+0UFk0savqeA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/dario.cat/mergo/.deepsource.toml b/vendor/dario.cat/mergo/.deepsource.toml new file mode 100644 index 00000000..a8bc979e --- /dev/null +++ b/vendor/dario.cat/mergo/.deepsource.toml @@ -0,0 +1,12 @@ +version = 1 + +test_patterns = [ + "*_test.go" +] + +[[analyzers]] +name = "go" +enabled = true + + [analyzers.meta] + import_path = "dario.cat/mergo" \ No newline at end of file diff --git a/vendor/dario.cat/mergo/.gitignore b/vendor/dario.cat/mergo/.gitignore new file mode 100644 index 00000000..529c3412 --- /dev/null +++ b/vendor/dario.cat/mergo/.gitignore @@ -0,0 +1,33 @@ +#### joe made this: http://goel.io/joe + +#### go #### +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +#### vim #### +# Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-v][a-z] +[._]sw[a-p] + +# Session +Session.vim + +# Temporary +.netrwhist +*~ +# Auto-generated tag files +tags diff --git a/vendor/dario.cat/mergo/.travis.yml b/vendor/dario.cat/mergo/.travis.yml new file mode 100644 index 00000000..d324c43b --- /dev/null +++ b/vendor/dario.cat/mergo/.travis.yml @@ -0,0 +1,12 @@ +language: go +arch: + - amd64 + - ppc64le +install: + - go get -t + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls +script: + - go test -race -v ./... +after_script: + - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md b/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..469b4490 --- /dev/null +++ b/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/dario.cat/mergo/CONTRIBUTING.md b/vendor/dario.cat/mergo/CONTRIBUTING.md new file mode 100644 index 00000000..0a1ff9f9 --- /dev/null +++ b/vendor/dario.cat/mergo/CONTRIBUTING.md @@ -0,0 +1,112 @@ + +# Contributing to mergo + +First off, thanks for taking the time to contribute! ❤️ + +All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 + +> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: +> - Star the project +> - Tweet about it +> - Refer this project in your project's readme +> - Mention the project at local meetups and tell your friends/colleagues + + +## Table of Contents + +- [Code of Conduct](#code-of-conduct) +- [I Have a Question](#i-have-a-question) +- [I Want To Contribute](#i-want-to-contribute) +- [Reporting Bugs](#reporting-bugs) +- [Suggesting Enhancements](#suggesting-enhancements) + +## Code of Conduct + +This project and everyone participating in it is governed by the +[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md). +By participating, you are expected to uphold this code. Please report unacceptable behavior +to <>. + + +## I Have a Question + +> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo). + +Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. + +If you then still feel the need to ask a question and need clarification, we recommend the following: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). +- Provide as much context as you can about what you're running into. +- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. + +We will then take care of the issue as soon as possible. + +## I Want To Contribute + +> ### Legal Notice +> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. + +### Reporting Bugs + + +#### Before Submitting a Bug Report + +A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. + +- Make sure that you are using the latest version. +- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)). +- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug). +- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. +- Collect information about the bug: +- Stack trace (Traceback) +- OS, Platform and Version (Windows, Linux, macOS, x86, ARM) +- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. +- Possibly your input and the output +- Can you reliably reproduce the issue? And can you also reproduce it with older versions? + + +#### How Do I Submit a Good Bug Report? + +> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to . + + +We use GitHub issues to track bugs and errors. If you run into an issue with the project: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) +- Explain the behavior you would expect and the actual behavior. +- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. +- Provide the information you collected in the previous section. + +Once it's filed: + +- The project team will label the issue accordingly. +- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. +- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone. + +### Suggesting Enhancements + +This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. + + +#### Before Submitting an Enhancement + +- Make sure that you are using the latest version. +- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration. +- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. +- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. + + +#### How Do I Submit a Good Enhancement Suggestion? + +Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues). + +- Use a **clear and descriptive title** for the issue to identify the suggestion. +- Provide a **step-by-step description of the suggested enhancement** in as many details as possible. +- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. +- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. +- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration. + + +## Attribution +This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)! diff --git a/vendor/dario.cat/mergo/LICENSE b/vendor/dario.cat/mergo/LICENSE new file mode 100644 index 00000000..68668029 --- /dev/null +++ b/vendor/dario.cat/mergo/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 Dario Castañé. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/dario.cat/mergo/README.md b/vendor/dario.cat/mergo/README.md new file mode 100644 index 00000000..7d0cf9f3 --- /dev/null +++ b/vendor/dario.cat/mergo/README.md @@ -0,0 +1,248 @@ +# Mergo + +[![GitHub release][5]][6] +[![GoCard][7]][8] +[![Test status][1]][2] +[![OpenSSF Scorecard][21]][22] +[![OpenSSF Best Practices][19]][20] +[![Coverage status][9]][10] +[![Sourcegraph][11]][12] +[![FOSSA status][13]][14] + +[![GoDoc][3]][4] +[![Become my sponsor][15]][16] +[![Tidelift][17]][18] + +[1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master +[2]: https://github.com/imdario/mergo/actions/workflows/tests.yml +[3]: https://godoc.org/github.com/imdario/mergo?status.svg +[4]: https://godoc.org/github.com/imdario/mergo +[5]: https://img.shields.io/github/release/imdario/mergo.svg +[6]: https://github.com/imdario/mergo/releases +[7]: https://goreportcard.com/badge/imdario/mergo +[8]: https://goreportcard.com/report/github.com/imdario/mergo +[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master +[10]: https://coveralls.io/github/imdario/mergo?branch=master +[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg +[12]: https://sourcegraph.com/github.com/imdario/mergo?badge +[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield +[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield +[15]: https://img.shields.io/github/sponsors/imdario +[16]: https://github.com/sponsors/imdario +[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo +[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo +[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge +[20]: https://bestpractices.coreinfrastructure.org/projects/7177 +[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge +[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo + +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. + +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. + +## Status + +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). + +### Important notes + +#### 1.0.0 + +In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. + +#### 0.3.9 + +Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. + +Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. + +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u dario.cat/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). + +### Donations + +If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: + +Buy Me a Coffee at ko-fi.com +Donate using Liberapay +Become my sponsor + +### Mergo in the wild + +- [moby/moby](https://github.com/moby/moby) +- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) +- [vmware/dispatch](https://github.com/vmware/dispatch) +- [Shopify/themekit](https://github.com/Shopify/themekit) +- [imdario/zas](https://github.com/imdario/zas) +- [matcornic/hermes](https://github.com/matcornic/hermes) +- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) +- [kataras/iris](https://github.com/kataras/iris) +- [michaelsauter/crane](https://github.com/michaelsauter/crane) +- [go-task/task](https://github.com/go-task/task) +- [sensu/uchiwa](https://github.com/sensu/uchiwa) +- [ory/hydra](https://github.com/ory/hydra) +- [sisatech/vcli](https://github.com/sisatech/vcli) +- [dairycart/dairycart](https://github.com/dairycart/dairycart) +- [projectcalico/felix](https://github.com/projectcalico/felix) +- [resin-os/balena](https://github.com/resin-os/balena) +- [go-kivik/kivik](https://github.com/go-kivik/kivik) +- [Telefonica/govice](https://github.com/Telefonica/govice) +- [supergiant/supergiant](supergiant/supergiant) +- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) +- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) +- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) +- [EagerIO/Stout](https://github.com/EagerIO/Stout) +- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) +- [russross/canvasassignments](https://github.com/russross/canvasassignments) +- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) +- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) +- [divshot/gitling](https://github.com/divshot/gitling) +- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) +- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) +- [elwinar/rambler](https://github.com/elwinar/rambler) +- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) +- [jfbus/impressionist](https://github.com/jfbus/impressionist) +- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) +- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) +- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) +- [thoas/picfit](https://github.com/thoas/picfit) +- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) +- [jnuthong/item_search](https://github.com/jnuthong/item_search) +- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) +- [containerssh/containerssh](https://github.com/containerssh/containerssh) +- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) +- [tjpnz/structbot](https://github.com/tjpnz/structbot) + +## Install + + go get dario.cat/mergo + + // use in your .go code + import ( + "dario.cat/mergo" + ) + +## Usage + +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + +```go +if err := mergo.Merge(&dst, src); err != nil { + // ... +} +``` + +Also, you can merge overwriting values using the transformer `WithOverride`. + +```go +if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { + // ... +} +``` + +Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. + +```go +if err := mergo.Map(&dst, srcMap); err != nil { + // ... +} +``` + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. + +Here is a nice example: + +```go +package main + +import ( + "fmt" + "dario.cat/mergo" +) + +type Foo struct { + A string + B int64 +} + +func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} +} +``` + +Note: if test are failing due missing package, please execute: + + go get gopkg.in/yaml.v3 + +### Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? + +```go +package main + +import ( + "fmt" + "dario.cat/mergo" + "reflect" + "time" +) + +type timeTransformer struct { +} + +func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil +} + +type Snapshot struct { + Time time.Time + // ... +} + +func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } +} +``` + +## Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) + +## About + +Written by [Dario Castañé](http://dario.im). + +## License + +[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). + +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/dario.cat/mergo/SECURITY.md b/vendor/dario.cat/mergo/SECURITY.md new file mode 100644 index 00000000..a5de61f7 --- /dev/null +++ b/vendor/dario.cat/mergo/SECURITY.md @@ -0,0 +1,14 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.3.x | :white_check_mark: | +| < 0.3 | :x: | + +## Security contact information + +To report a security vulnerability, please use the +[Tidelift security contact](https://tidelift.com/security). +Tidelift will coordinate the fix and disclosure. diff --git a/vendor/dario.cat/mergo/doc.go b/vendor/dario.cat/mergo/doc.go new file mode 100644 index 00000000..7d96ec05 --- /dev/null +++ b/vendor/dario.cat/mergo/doc.go @@ -0,0 +1,148 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. + +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +# Status + +It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. + +# Important notes + +1.0.0 + +In 1.0.0 Mergo moves to a vanity URL `dario.cat/mergo`. + +0.3.9 + +Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. + +Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. + +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u dario.cat/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). + +# Install + +Do your usual installation procedure: + + go get dario.cat/mergo + + // use in your .go code + import ( + "dario.cat/mergo" + ) + +# Usage + +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + + if err := mergo.Merge(&dst, src); err != nil { + // ... + } + +Also, you can merge overwriting values using the transformer WithOverride. + + if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { + // ... + } + +Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. + + if err := mergo.Map(&dst, srcMap); err != nil { + // ... + } + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. + +Here is a nice example: + + package main + + import ( + "fmt" + "dario.cat/mergo" + ) + + type Foo struct { + A string + B int64 + } + + func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} + } + +# Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? + + package main + + import ( + "fmt" + "dario.cat/mergo" + "reflect" + "time" + ) + + type timeTransformer struct { + } + + func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil + } + + type Snapshot struct { + Time time.Time + // ... + } + + func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } + } + +# Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario + +# About + +Written by Dario Castañé: https://da.rio.hn + +# License + +BSD 3-Clause license, as Go language. +*/ +package mergo diff --git a/vendor/dario.cat/mergo/map.go b/vendor/dario.cat/mergo/map.go new file mode 100644 index 00000000..b50d5c2a --- /dev/null +++ b/vendor/dario.cat/mergo/map.go @@ -0,0 +1,178 @@ +// Copyright 2014 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" + "unicode" + "unicode/utf8" +) + +func changeInitialCase(s string, mapper func(rune) rune) string { + if s == "" { + return s + } + r, n := utf8.DecodeRuneInString(s) + return string(mapper(r)) + s[n:] +} + +func isExported(field reflect.StructField) bool { + r, _ := utf8.DecodeRuneInString(field.Name) + return r >= 'A' && r <= 'Z' +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{typ, seen, addr} + } + zeroValue := reflect.Value{} + switch dst.Kind() { + case reflect.Map: + dstMap := dst.Interface().(map[string]interface{}) + for i, n := 0, src.NumField(); i < n; i++ { + srcType := src.Type() + field := srcType.Field(i) + if !isExported(field) { + continue + } + fieldName := field.Name + fieldName = changeInitialCase(fieldName, unicode.ToLower) + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) { + dstMap[fieldName] = src.Field(i).Interface() + } + } + case reflect.Ptr: + if dst.IsNil() { + v := reflect.New(dst.Type().Elem()) + dst.Set(v) + } + dst = dst.Elem() + fallthrough + case reflect.Struct: + srcMap := src.Interface().(map[string]interface{}) + for key := range srcMap { + config.overwriteWithEmptyValue = true + srcValue := srcMap[key] + fieldName := changeInitialCase(key, unicode.ToUpper) + dstElement := dst.FieldByName(fieldName) + if dstElement == zeroValue { + // We discard it because the field doesn't exist. + continue + } + srcElement := reflect.ValueOf(srcValue) + dstKind := dstElement.Kind() + srcKind := srcElement.Kind() + if srcKind == reflect.Ptr && dstKind != reflect.Ptr { + srcElement = srcElement.Elem() + srcKind = reflect.TypeOf(srcElement.Interface()).Kind() + } else if dstKind == reflect.Ptr { + // Can this work? I guess it can't. + if srcKind != reflect.Ptr && srcElement.CanAddr() { + srcPtr := srcElement.Addr() + srcElement = reflect.ValueOf(srcPtr) + srcKind = reflect.Ptr + } + } + + if !srcElement.IsValid() { + continue + } + if srcKind == dstKind { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if srcKind == reflect.Map { + if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else { + return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) + } + } + } + return +} + +// Map sets fields' values in dst from src. +// src can be a map with string keys or a struct. dst must be the opposite: +// if src is a map, dst must be a valid pointer to struct. If src is a struct, +// dst must be map[string]interface{}. +// It won't merge unexported (private) fields and will do recursively +// any exported field. +// If dst is a map, keys will be src fields' names in lower camel case. +// Missing key in src that doesn't match a field in dst will be skipped. This +// doesn't apply if dst is a map. +// This is separated method from Merge because it is cleaner and it keeps sane +// semantics: merging equal types, mapping different (restricted) types. +func Map(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, opts...) +} + +// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by +// non-empty src attribute values. +// Deprecated: Use Map(…) with WithOverride +func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, append(opts, WithOverride)...) +} + +func _map(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerArgument + } + var ( + vDst, vSrc reflect.Value + err error + ) + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + // To be friction-less, we redirect equal-type arguments + // to deepMerge. Only because arguments can be anything. + if vSrc.Kind() == vDst.Kind() { + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) + } + switch vSrc.Kind() { + case reflect.Struct: + if vDst.Kind() != reflect.Map { + return ErrExpectedMapAsDestination + } + case reflect.Map: + if vDst.Kind() != reflect.Struct { + return ErrExpectedStructAsDestination + } + default: + return ErrNotSupported + } + return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} diff --git a/vendor/dario.cat/mergo/merge.go b/vendor/dario.cat/mergo/merge.go new file mode 100644 index 00000000..0ef9b213 --- /dev/null +++ b/vendor/dario.cat/mergo/merge.go @@ -0,0 +1,409 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" +) + +func hasMergeableFields(dst reflect.Value) (exported bool) { + for i, n := 0, dst.NumField(); i < n; i++ { + field := dst.Type().Field(i) + if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { + exported = exported || hasMergeableFields(dst.Field(i)) + } else if isExportedComponent(&field) { + exported = exported || len(field.PkgPath) == 0 + } + } + return +} + +func isExportedComponent(field *reflect.StructField) bool { + pkgPath := field.PkgPath + if len(pkgPath) > 0 { + return false + } + c := field.Name[0] + if 'a' <= c && c <= 'z' || c == '_' { + return false + } + return true +} + +type Config struct { + Transformers Transformers + Overwrite bool + ShouldNotDereference bool + AppendSlice bool + TypeCheck bool + overwriteWithEmptyValue bool + overwriteSliceWithEmptyValue bool + sliceDeepCopy bool + debug bool +} + +type Transformers interface { + Transformer(reflect.Type) func(dst, src reflect.Value) error +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + typeCheck := config.TypeCheck + overwriteWithEmptySrc := config.overwriteWithEmptyValue + overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue + sliceDeepCopy := config.sliceDeepCopy + + if !src.IsValid() { + return + } + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{typ, seen, addr} + } + + if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { + if fn := config.Transformers.Transformer(dst.Type()); fn != nil { + err = fn(dst, src) + return + } + } + + switch dst.Kind() { + case reflect.Struct: + if hasMergeableFields(dst) { + for i, n := 0, dst.NumField(); i < n; i++ { + if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { + return + } + } + } else { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) { + dst.Set(src) + } + } + case reflect.Map: + if dst.IsNil() && !src.IsNil() { + if dst.CanSet() { + dst.Set(reflect.MakeMap(dst.Type())) + } else { + dst = src + return + } + } + + if src.Kind() != reflect.Map { + if overwrite && dst.CanSet() { + dst.Set(src) + } + return + } + + for _, key := range src.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + continue + } + dstElement := dst.MapIndex(key) + switch srcElement.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: + if srcElement.IsNil() { + if overwrite { + dst.SetMapIndex(key, srcElement) + } + continue + } + fallthrough + default: + if !srcElement.CanInterface() { + continue + } + switch reflect.TypeOf(srcElement.Interface()).Kind() { + case reflect.Struct: + fallthrough + case reflect.Ptr: + fallthrough + case reflect.Map: + srcMapElm := srcElement + dstMapElm := dstElement + if srcMapElm.CanInterface() { + srcMapElm = reflect.ValueOf(srcMapElm.Interface()) + if dstMapElm.IsValid() { + dstMapElm = reflect.ValueOf(dstMapElm.Interface()) + } + } + if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { + return + } + case reflect.Slice: + srcSlice := reflect.ValueOf(srcElement.Interface()) + + var dstSlice reflect.Value + if !dstElement.IsValid() || dstElement.IsNil() { + dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) + } else { + dstSlice = reflect.ValueOf(dstElement.Interface()) + } + + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { + if typeCheck && srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } + dstSlice = srcSlice + } else if config.AppendSlice { + if srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } + dstSlice = reflect.AppendSlice(dstSlice, srcSlice) + } else if sliceDeepCopy { + i := 0 + for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { + srcElement := srcSlice.Index(i) + dstElement := dstSlice.Index(i) + + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } + + } + dst.SetMapIndex(key, dstSlice) + } + } + + if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) { + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice { + continue + } + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map { + continue + } + } + + if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) { + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + dst.SetMapIndex(key, srcElement) + } + } + + // Ensure that all keys in dst are deleted if they are not in src. + if overwriteWithEmptySrc { + for _, key := range dst.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + dst.SetMapIndex(key, reflect.Value{}) + } + } + } + case reflect.Slice: + if !dst.CanSet() { + break + } + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { + dst.Set(src) + } else if config.AppendSlice { + if src.Type() != dst.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) + } + dst.Set(reflect.AppendSlice(dst, src)) + } else if sliceDeepCopy { + for i := 0; i < src.Len() && i < dst.Len(); i++ { + srcElement := src.Index(i) + dstElement := dst.Index(i) + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } + } + case reflect.Ptr: + fallthrough + case reflect.Interface: + if isReflectNil(src) { + if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + break + } + + if src.Kind() != reflect.Interface { + if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { + dst.Set(src) + } + } else if src.Kind() == reflect.Ptr { + if !config.ShouldNotDereference { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + } else { + if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { + dst.Set(src) + } + } + } else if dst.Elem().Type() == src.Type() { + if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { + return + } + } else { + return ErrDifferentArgumentsTypes + } + break + } + + if dst.IsNil() || overwrite { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { + dst.Set(src) + } + break + } + + if dst.Elem().Kind() == src.Elem().Kind() { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + break + } + default: + mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) + if mustSet { + if dst.CanSet() { + dst.Set(src) + } else { + dst = src + } + } + } + + return +} + +// Merge will fill any empty for value type attributes on the dst struct using corresponding +// src attributes if they themselves are not empty. dst and src must be valid same-type structs +// and dst must be a pointer to struct. +// It won't merge unexported (private) fields and will do recursively any exported field. +func Merge(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, opts...) +} + +// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by +// non-empty src attribute values. +// Deprecated: use Merge(…) with WithOverride +func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, append(opts, WithOverride)...) +} + +// WithTransformers adds transformers to merge, allowing to customize the merging of some types. +func WithTransformers(transformers Transformers) func(*Config) { + return func(config *Config) { + config.Transformers = transformers + } +} + +// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. +func WithOverride(config *Config) { + config.Overwrite = true +} + +// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. +func WithOverwriteWithEmptyValue(config *Config) { + config.Overwrite = true + config.overwriteWithEmptyValue = true +} + +// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. +func WithOverrideEmptySlice(config *Config) { + config.overwriteSliceWithEmptyValue = true +} + +// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty +// (i.e. a non-nil pointer is never considered empty). +func WithoutDereference(config *Config) { + config.ShouldNotDereference = true +} + +// WithAppendSlice will make merge append slices instead of overwriting it. +func WithAppendSlice(config *Config) { + config.AppendSlice = true +} + +// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). +func WithTypeCheck(config *Config) { + config.TypeCheck = true +} + +// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. +func WithSliceDeepCopy(config *Config) { + config.sliceDeepCopy = true + config.Overwrite = true +} + +func merge(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerArgument + } + var ( + vDst, vSrc reflect.Value + err error + ) + + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + if vDst.Type() != vSrc.Type() { + return ErrDifferentArgumentsTypes + } + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} + +// IsReflectNil is the reflect value provided nil +func isReflectNil(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: + // Both interface and slice are nil if first word is 0. + // Both are always bigger than a word; assume flagIndir. + return v.IsNil() + default: + return false + } +} diff --git a/vendor/dario.cat/mergo/mergo.go b/vendor/dario.cat/mergo/mergo.go new file mode 100644 index 00000000..0a721e2d --- /dev/null +++ b/vendor/dario.cat/mergo/mergo.go @@ -0,0 +1,81 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "errors" + "reflect" +) + +// Errors reported by Mergo when it finds invalid arguments. +var ( + ErrNilArguments = errors.New("src and dst must not be nil") + ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") + ErrNotSupported = errors.New("only structs, maps, and slices are supported") + ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") + ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") + ErrNonPointerArgument = errors.New("dst must be a pointer") +) + +// During deepMerge, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited are stored in a map indexed by 17 * a1 + a2; +type visit struct { + typ reflect.Type + next *visit + ptr uintptr +} + +// From src/pkg/encoding/json/encode.go. +func isEmptyValue(v reflect.Value, shouldDereference bool) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + return true + } + if shouldDereference { + return isEmptyValue(v.Elem(), shouldDereference) + } + return false + case reflect.Func: + return v.IsNil() + case reflect.Invalid: + return true + } + return false +} + +func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { + if dst == nil || src == nil { + err = ErrNilArguments + return + } + vDst = reflect.ValueOf(dst).Elem() + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice { + err = ErrNotSupported + return + } + vSrc = reflect.ValueOf(src) + // We check if vSrc is a pointer to dereference it. + if vSrc.Kind() == reflect.Ptr { + vSrc = vSrc.Elem() + } + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go index 0d6c5f3f..d7af9141 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go @@ -10,8 +10,9 @@ import ( "bufio" "bytes" "encoding/base64" - "github.com/ProtonMail/go-crypto/openpgp/errors" "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" ) // A Block represents an OpenPGP armored structure. @@ -208,12 +209,16 @@ TryNextBlock: break } - i := bytes.Index(line, []byte(": ")) + i := bytes.Index(line, []byte(":")) if i == -1 { goto TryNextBlock } lastKey = string(line[:i]) - p.Header[lastKey] = string(line[i+2:]) + var value string + if len(line) > i+2 { + value = string(line[i+2:]) + } + p.Header[lastKey] = value } p.lReader.in = r diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go index 7283ca91..2d7b0cf3 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go @@ -504,7 +504,7 @@ EachPacket: // Else, ignoring the signature as it does not follow anything // we would know to attach it to. case *packet.PrivateKey: - if pkt.IsSubkey == false { + if !pkt.IsSubkey { packets.Unread(p) break EachPacket } @@ -513,7 +513,7 @@ EachPacket: return nil, err } case *packet.PublicKey: - if pkt.IsSubkey == false { + if !pkt.IsSubkey { packets.Unread(p) break EachPacket } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go index ec903ee9..3402b8c1 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go @@ -415,6 +415,10 @@ func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) { return } + if len(pk.p.Bytes()) == 0 { + return errors.StructuralError("empty EdDSA public key") + } + pub := eddsa.NewPublicKey(c) switch flag := pk.p.Bytes()[0]; flag { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go index 6c58c86f..80d0bb98 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go @@ -904,7 +904,7 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp if sig.IssuerKeyId != nil && sig.Version == 4 { keyId := make([]byte, 8) binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) - subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, true, keyId}) + subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) } if sig.IssuerFingerprint != nil { contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go index a8abf2ff..bac2b132 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go @@ -7,13 +7,11 @@ package packet import ( "bytes" "crypto/cipher" - "crypto/sha256" "io" "strconv" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/s2k" - "golang.org/x/crypto/hkdf" ) // This is the largest session key that we'll support. Since at most 256-bit cipher @@ -45,13 +43,6 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { return errors.UnsupportedError("unknown SymmetricKeyEncrypted version") } - if ske.Version == 5 { - // Scalar octet count - if _, err := readFull(r, buf[:]); err != nil { - return err - } - } - // Cipher function if _, err := readFull(r, buf[:]); err != nil { return err @@ -67,11 +58,6 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { return errors.StructuralError("cannot read AEAD octet from packet") } ske.Mode = AEADMode(buf[0]) - - // Scalar octet count - if _, err := readFull(r, buf[:]); err != nil { - return err - } } var err error @@ -220,7 +206,7 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass case 5: ivLen := config.AEAD().Mode().IvLength() tagLen := config.AEAD().Mode().TagLength() - packetLength = 5 + len(s2kBytes) + ivLen + keySize + tagLen + packetLength = 3 + len(s2kBytes) + ivLen + keySize + tagLen } err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) if err != nil { @@ -230,20 +216,12 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass // Symmetric Key Encrypted Version buf := []byte{byte(version)} - if version == 5 { - // Scalar octet count - buf = append(buf, byte(3+len(s2kBytes)+config.AEAD().Mode().IvLength())) - } - // Cipher function buf = append(buf, byte(cipherFunc)) if version == 5 { // AEAD mode buf = append(buf, byte(config.AEAD().Mode())) - - // Scalar octet count - buf = append(buf, byte(len(s2kBytes))) } _, err = w.Write(buf) if err != nil { @@ -293,11 +271,6 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass } func getEncryptedKeyAeadInstance(c CipherFunction, mode AEADMode, inputKey, associatedData []byte) (aead cipher.AEAD) { - hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, associatedData) - - encryptionKey := make([]byte, c.KeySize()) - _, _ = readFull(hkdfReader, encryptionKey) - - blockCipher := c.new(encryptionKey) + blockCipher := c.new(inputKey) return mode.new(blockCipher) } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go index 864d8ca6..7fdd13a3 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go @@ -381,7 +381,7 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En } sig := to[i].PrimaryIdentity().SelfSignature - if sig.SEIPDv2 == false { + if !sig.SEIPDv2 { aeadSupported = false } diff --git a/vendor/github.com/anchore/stereoscope/client.go b/vendor/github.com/anchore/stereoscope/client.go index ea8ee736..8015a7c1 100644 --- a/vendor/github.com/anchore/stereoscope/client.go +++ b/vendor/github.com/anchore/stereoscope/client.go @@ -81,7 +81,10 @@ func GetImageFromSource(ctx context.Context, imgStr string, source image.Source, } } - provider, err := selectImageProvider(imgStr, source, cfg) + provider, cleanup, err := selectImageProvider(imgStr, source, cfg) + if cleanup != nil { + defer cleanup() + } if err != nil { return nil, err } @@ -99,44 +102,61 @@ func GetImageFromSource(ctx context.Context, imgStr string, source image.Source, return img, nil } -func selectImageProvider(imgStr string, source image.Source, cfg config) (image.Provider, error) { +// nolint:funlen +func selectImageProvider(imgStr string, source image.Source, cfg config) (image.Provider, func(), error) { var provider image.Provider tempDirGenerator := rootTempDirGenerator.NewGenerator() platformSelectionUnsupported := fmt.Errorf("specified platform=%q however image source=%q does not support selecting platform", cfg.Platform.String(), source.String()) + cleanup := func() {} + switch source { case image.DockerTarballSource: if cfg.Platform != nil { - return nil, platformSelectionUnsupported + return nil, cleanup, platformSelectionUnsupported } // note: the imgStr is the path on disk to the tar file provider = docker.NewProviderFromTarball(imgStr, tempDirGenerator) case image.DockerDaemonSource: c, err := dockerClient.GetClient() if err != nil { - return nil, err + return nil, cleanup, err + } + + cleanup = func() { + if err := c.Close(); err != nil { + log.Errorf("unable to close docker client: %+v", err) + } } + provider, err = docker.NewProviderFromDaemon(imgStr, tempDirGenerator, c, cfg.Platform) if err != nil { - return nil, err + return nil, cleanup, err } case image.PodmanDaemonSource: c, err := podman.GetClient() if err != nil { - return nil, err + return nil, cleanup, err + } + + cleanup = func() { + if err := c.Close(); err != nil { + log.Errorf("unable to close docker client: %+v", err) + } } + provider, err = docker.NewProviderFromDaemon(imgStr, tempDirGenerator, c, cfg.Platform) if err != nil { - return nil, err + return nil, cleanup, err } case image.OciDirectorySource: if cfg.Platform != nil { - return nil, platformSelectionUnsupported + return nil, cleanup, platformSelectionUnsupported } provider = oci.NewProviderFromPath(imgStr, tempDirGenerator) case image.OciTarballSource: if cfg.Platform != nil { - return nil, platformSelectionUnsupported + return nil, cleanup, platformSelectionUnsupported } provider = oci.NewProviderFromTarball(imgStr, tempDirGenerator) case image.OciRegistrySource: @@ -144,13 +164,13 @@ func selectImageProvider(imgStr string, source image.Source, cfg config) (image. provider = oci.NewProviderFromRegistry(imgStr, tempDirGenerator, cfg.Registry, cfg.Platform) case image.SingularitySource: if cfg.Platform != nil { - return nil, platformSelectionUnsupported + return nil, cleanup, platformSelectionUnsupported } provider = sif.NewProviderFromPath(imgStr, tempDirGenerator) default: - return nil, fmt.Errorf("unable to determine image source") + return nil, cleanup, fmt.Errorf("unable to determine image source") } - return provider, nil + return provider, cleanup, nil } // defaultPlatformIfNil sets the platform to use the host's architecture diff --git a/vendor/github.com/anchore/stereoscope/pkg/image/docker/daemon_provider.go b/vendor/github.com/anchore/stereoscope/pkg/image/docker/daemon_provider.go index bb389ce7..ca12155e 100644 --- a/vendor/github.com/anchore/stereoscope/pkg/image/docker/daemon_provider.go +++ b/vendor/github.com/anchore/stereoscope/pkg/image/docker/daemon_provider.go @@ -31,14 +31,16 @@ import ( // DaemonImageProvider is a image.Provider capable of fetching and representing a docker image from the docker daemon API. type DaemonImageProvider struct { - imageStr string - tmpDirGen *file.TempDirGenerator - client client.APIClient - platform *image.Platform + imageStr string + originalImageRef string + tmpDirGen *file.TempDirGenerator + client client.APIClient + platform *image.Platform } // NewProviderFromDaemon creates a new provider instance for a specific image that will later be cached to the given directory. func NewProviderFromDaemon(imgStr string, tmpDirGen *file.TempDirGenerator, c client.APIClient, platform *image.Platform) (*DaemonImageProvider, error) { + var originalRef string ref, err := name.ParseReference(imgStr, name.WithDefaultRegistry("")) if err != nil { return nil, err @@ -46,12 +48,14 @@ func NewProviderFromDaemon(imgStr string, tmpDirGen *file.TempDirGenerator, c cl tag, ok := ref.(name.Tag) if ok { imgStr = tag.Name() + originalRef = tag.String() // blindly takes the original input passed into Tag } return &DaemonImageProvider{ - imageStr: imgStr, - tmpDirGen: tmpDirGen, - client: c, - platform: platform, + imageStr: imgStr, + originalImageRef: originalRef, + tmpDirGen: tmpDirGen, + client: c, + platform: platform, }, nil } @@ -314,6 +318,12 @@ func (p *DaemonImageProvider) saveImage(ctx context.Context) (string, error) { func (p *DaemonImageProvider) pullImageIfMissing(ctx context.Context) error { // check if the image exists locally inspectResult, _, err := p.client.ImageInspectWithRaw(ctx, p.imageStr) + if err != nil { + inspectResult, _, err = p.client.ImageInspectWithRaw(ctx, p.originalImageRef) + if err == nil { + p.imageStr = strings.TrimSuffix(p.imageStr, ":latest") + } + } if err != nil { if client.IsErrNotFound(err) { if err = p.pull(ctx); err != nil { @@ -324,7 +334,7 @@ func (p *DaemonImageProvider) pullImageIfMissing(ctx context.Context) error { } } else { // looks like the image exists, but if the platform doesn't match what the user specified, we may need to - // pull the image again with the correct platofmr specifier, which will override the local tag. + // pull the image again with the correct platform specifier, which will override the local tag. if err := p.validatePlatform(inspectResult); err != nil { if err = p.pull(ctx); err != nil { return err diff --git a/vendor/github.com/anchore/stereoscope/pkg/image/layer.go b/vendor/github.com/anchore/stereoscope/pkg/image/layer.go index f803bca6..6c630e04 100644 --- a/vendor/github.com/anchore/stereoscope/pkg/image/layer.go +++ b/vendor/github.com/anchore/stereoscope/pkg/image/layer.go @@ -102,6 +102,7 @@ func (l *Layer) Read(catalog *FileCatalog, imgMetadata Metadata, idx int, uncomp types.OCIUncompressedLayer, types.OCIRestrictedLayer, types.OCIUncompressedRestrictedLayer, + types.OCILayerZStd, types.DockerLayer, types.DockerForeignLayer, types.DockerUncompressedLayer: diff --git a/vendor/github.com/anchore/syft/CONTRIBUTORS.md b/vendor/github.com/anchore/syft/CONTRIBUTORS.md index 3d97f34a..3f773335 100644 --- a/vendor/github.com/anchore/syft/CONTRIBUTORS.md +++ b/vendor/github.com/anchore/syft/CONTRIBUTORS.md @@ -5,3 +5,7 @@ The following Syft components were contributed by external authors/organizations ## GraalVM Native Image A cataloger contributed by Oracle Corporation that extracts packages given within GraalVM Native Image SBOMs. + +## Swift Package Manager + +A cataloger contributed by Axis Communications that catalogs packages resolved by Swift Package Manager. \ No newline at end of file diff --git a/vendor/github.com/anchore/syft/internal/constants.go b/vendor/github.com/anchore/syft/internal/constants.go index bf520ab9..354a5427 100644 --- a/vendor/github.com/anchore/syft/internal/constants.go +++ b/vendor/github.com/anchore/syft/internal/constants.go @@ -6,5 +6,5 @@ const ( // JSONSchemaVersion is the current schema version output by the JSON encoder // This is roughly following the "SchemaVer" guidelines for versioning the JSON schema. Please see schema/json/README.md for details on how to increment. - JSONSchemaVersion = "9.0.0" + JSONSchemaVersion = "10.0.0" ) diff --git a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/decoder.go b/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/decoder.go index ecfb9baf..741e51ea 100644 --- a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/decoder.go +++ b/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/decoder.go @@ -210,24 +210,33 @@ func collectRelationships(bom *cyclonedx.BOM, s *sbom.SBOM, idMap map[string]int return } for _, d := range *bom.Dependencies { - to, fromExists := idMap[d.Ref].(artifact.Identifiable) - if !fromExists { + if d.Dependencies == nil { continue } - if d.Dependencies == nil { + toPtr, toExists := idMap[d.Ref] + if !toExists { + continue + } + to, ok := common.PtrToStruct(toPtr).(artifact.Identifiable) + if !ok { continue } for _, t := range *d.Dependencies { - from, toExists := idMap[t].(artifact.Identifiable) - if !toExists { + fromPtr, fromExists := idMap[t] + if !fromExists { + continue + } + from, ok := common.PtrToStruct(fromPtr).(artifact.Identifiable) + if !ok { continue } s.Relationships = append(s.Relationships, artifact.Relationship{ From: from, To: to, - Type: artifact.DependencyOfRelationship, // FIXME this information is lost + // match assumptions in encoding, that this is the only type of relationship captured: + Type: artifact.DependencyOfRelationship, }) } } diff --git a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/format.go b/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/format.go index 34ca3509..9582ba85 100644 --- a/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/format.go +++ b/vendor/github.com/anchore/syft/syft/formats/common/cyclonedxhelpers/format.go @@ -143,27 +143,29 @@ func toDependencies(relationships []artifact.Relationship) []cyclonedx.Dependenc for _, r := range relationships { exists := isExpressiblePackageRelationship(r.Type) if !exists { - log.Debugf("unable to convert relationship from CycloneDX 1.4 JSON, dropping: %+v", r) + log.Debugf("unable to convert relationship type to CycloneDX JSON, dropping: %#v", r) continue } // we only capture package-to-package relationships for now - fromPkg, ok := r.From.(*pkg.Package) + fromPkg, ok := r.From.(pkg.Package) if !ok { + log.Tracef("unable to convert relationship fromPkg to CycloneDX JSON, dropping: %#v", r) continue } - toPkg, ok := r.To.(*pkg.Package) + toPkg, ok := r.To.(pkg.Package) if !ok { + log.Tracef("unable to convert relationship toPkg to CycloneDX JSON, dropping: %#v", r) continue } // ind dep innerDeps := []string{} - innerDeps = append(innerDeps, deriveBomRef(*fromPkg)) + innerDeps = append(innerDeps, deriveBomRef(fromPkg)) result = append(result, cyclonedx.Dependency{ - Ref: deriveBomRef(*toPkg), + Ref: deriveBomRef(toPkg), Dependencies: &innerDeps, }) } diff --git a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/document_name.go b/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/document_name.go index 6932f2b4..10d2e2dc 100644 --- a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/document_name.go +++ b/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/document_name.go @@ -4,12 +4,12 @@ import ( "github.com/anchore/syft/syft/source" ) -func DocumentName(srcMetadata source.Description) string { - if srcMetadata.Name != "" { - return srcMetadata.Name +func DocumentName(src source.Description) string { + if src.Name != "" { + return src.Name } - switch metadata := srcMetadata.Metadata.(type) { + switch metadata := src.Metadata.(type) { case source.StereoscopeImageSourceMetadata: return metadata.UserInput case source.DirectorySourceMetadata: diff --git a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/license.go b/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/license.go index 4b0d896a..e3352f27 100644 --- a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/license.go +++ b/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/license.go @@ -1,6 +1,8 @@ package spdxhelpers import ( + "crypto/sha256" + "fmt" "strings" "github.com/anchore/syft/internal/spdxlicense" @@ -27,29 +29,18 @@ func License(p pkg.Package) (concluded, declared string) { // https://spdx.github.io/spdx-spec/v2.3/SPDX-license-expressions/ pc, pd := parseLicenses(p.Licenses.ToSlice()) - for i, v := range pc { - if strings.HasPrefix(v, spdxlicense.LicenseRefPrefix) { - pc[i] = SanitizeElementID(v) - } - } - - for i, v := range pd { - if strings.HasPrefix(v, spdxlicense.LicenseRefPrefix) { - pd[i] = SanitizeElementID(v) - } - } - return joinLicenses(pc), joinLicenses(pd) } -func joinLicenses(licenses []string) string { +func joinLicenses(licenses []spdxLicense) string { if len(licenses) == 0 { return NOASSERTION } var newLicenses []string - for _, v := range licenses { + for _, l := range licenses { + v := l.id // check if license does not start or end with parens if !strings.HasPrefix(v, "(") && !strings.HasSuffix(v, ")") { // if license contains AND, OR, or WITH, then wrap in parens @@ -66,14 +57,31 @@ func joinLicenses(licenses []string) string { return strings.Join(newLicenses, " AND ") } -func parseLicenses(raw []pkg.License) (concluded, declared []string) { +type spdxLicense struct { + id string + value string +} + +func parseLicenses(raw []pkg.License) (concluded, declared []spdxLicense) { for _, l := range raw { - var candidate string + if l.Value == "" { + continue + } + + candidate := spdxLicense{} if l.SPDXExpression != "" { - candidate = l.SPDXExpression + candidate.id = l.SPDXExpression } else { // we did not find a valid SPDX license ID so treat as separate license - candidate = spdxlicense.LicenseRefPrefix + l.Value + if len(l.Value) <= 64 { + // if the license text is less than the size of the hash, + // just use it directly so the id is more readable + candidate.id = spdxlicense.LicenseRefPrefix + SanitizeElementID(l.Value) + } else { + hash := sha256.Sum256([]byte(l.Value)) + candidate.id = fmt.Sprintf("%s%x", spdxlicense.LicenseRefPrefix, hash) + } + candidate.value = l.Value } switch l.Type { @@ -83,5 +91,6 @@ func parseLicenses(raw []pkg.License) (concluded, declared []string) { declared = append(declared, candidate) } } + return concluded, declared } diff --git a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/source_info.go b/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/source_info.go index 8aeb5b35..2ec80786 100644 --- a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/source_info.go +++ b/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/source_info.go @@ -54,6 +54,8 @@ func SourceInfo(p pkg.Package) string { answer = "acquired package info from nix store path" case pkg.Rpkg: answer = "acquired package info from R-package DESCRIPTION file" + case pkg.SwiftPkg: + answer = "acquired package info from resolved Swift package manifest" default: answer = "acquired package info from the following paths" } diff --git a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/to_format_model.go b/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/to_format_model.go index 6b412a25..258c96f8 100644 --- a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/to_format_model.go +++ b/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/to_format_model.go @@ -9,10 +9,12 @@ import ( "strings" "time" + "github.com/docker/distribution/reference" "github.com/spdx/tools-golang/spdx" "golang.org/x/exp/maps" "golang.org/x/exp/slices" + "github.com/anchore/packageurl-go" "github.com/anchore/syft/internal" "github.com/anchore/syft/internal/log" "github.com/anchore/syft/internal/spdxlicense" @@ -21,10 +23,20 @@ import ( "github.com/anchore/syft/syft/formats/common/util" "github.com/anchore/syft/syft/pkg" "github.com/anchore/syft/syft/sbom" + "github.com/anchore/syft/syft/source" ) const ( noAssertion = "NOASSERTION" + + spdxPrimaryPurposeContainer = "CONTAINER" + spdxPrimaryPurposeFile = "FILE" + spdxPrimaryPurposeOther = "OTHER" + + prefixImage = "Image" + prefixDirectory = "Directory" + prefixFile = "File" + prefixUnknown = "Unknown" ) // ToFormatModel creates and populates a new SPDX document struct that follows the SPDX 2.3 @@ -33,23 +45,37 @@ const ( //nolint:funlen func ToFormatModel(s sbom.SBOM) *spdx.Document { name, namespace := DocumentNameAndNamespace(s.Source) + + packages := toPackages(s.Artifacts.Packages, s) + relationships := toRelationships(s.RelationshipsSorted()) // for valid SPDX we need a document describes relationship - // TODO: remove this placeholder after deciding on correct behavior - // for the primary package purpose field: - // https://spdx.github.io/spdx-spec/v2.3/package-information/#724-primary-package-purpose-field + describesID := spdx.ElementID("DOCUMENT") + + rootPackage := toRootPackage(s.Source) + if rootPackage != nil { + describesID = rootPackage.PackageSPDXIdentifier + + // add all relationships from the document root to all other packages + relationships = append(relationships, toRootRelationships(rootPackage, packages)...) + + // append the root package + packages = append(packages, rootPackage) + } + + // add a relationship for the package the document describes documentDescribesRelationship := &spdx.Relationship{ RefA: spdx.DocElementID{ ElementRefID: "DOCUMENT", }, Relationship: string(DescribesRelationship), RefB: spdx.DocElementID{ - ElementRefID: "DOCUMENT", + ElementRefID: describesID, }, - RelationshipComment: "", } + // add the root document relationship relationships = append(relationships, documentDescribesRelationship) return &spdx.Document{ @@ -123,19 +149,133 @@ func ToFormatModel(s sbom.SBOM) *spdx.Document { // Cardinality: optional, one CreatorComment: "", }, - Packages: toPackages(s.Artifacts.Packages, s), + Packages: packages, Files: toFiles(s), Relationships: relationships, OtherLicenses: toOtherLicenses(s.Artifacts.Packages), } } +func toRootRelationships(rootPackage *spdx.Package, packages []*spdx.Package) (out []*spdx.Relationship) { + for _, p := range packages { + out = append(out, &spdx.Relationship{ + RefA: spdx.DocElementID{ + ElementRefID: rootPackage.PackageSPDXIdentifier, + }, + Relationship: string(ContainsRelationship), + RefB: spdx.DocElementID{ + ElementRefID: p.PackageSPDXIdentifier, + }, + }) + } + return +} + +//nolint:funlen +func toRootPackage(s source.Description) *spdx.Package { + var prefix string + + name := s.Name + version := s.Version + + var purl *packageurl.PackageURL + purpose := "" + var checksums []spdx.Checksum + switch m := s.Metadata.(type) { + case source.StereoscopeImageSourceMetadata: + prefix = prefixImage + purpose = spdxPrimaryPurposeContainer + + qualifiers := packageurl.Qualifiers{ + { + Key: "arch", + Value: m.Architecture, + }, + } + + ref, _ := reference.Parse(m.UserInput) + if ref, ok := ref.(reference.NamedTagged); ok { + qualifiers = append(qualifiers, packageurl.Qualifier{ + Key: "tag", + Value: ref.Tag(), + }) + } + + c := toChecksum(m.ManifestDigest) + if c != nil { + checksums = append(checksums, *c) + purl = &packageurl.PackageURL{ + Type: "oci", + Name: s.Name, + Version: m.ManifestDigest, + Qualifiers: qualifiers, + } + } + + case source.DirectorySourceMetadata: + prefix = prefixDirectory + purpose = spdxPrimaryPurposeFile + + case source.FileSourceMetadata: + prefix = prefixFile + purpose = spdxPrimaryPurposeFile + + for _, d := range m.Digests { + checksums = append(checksums, spdx.Checksum{ + Algorithm: toChecksumAlgorithm(d.Algorithm), + Value: d.Value, + }) + } + default: + prefix = prefixUnknown + purpose = spdxPrimaryPurposeOther + + if name == "" { + name = s.ID + } + } + + p := &spdx.Package{ + PackageName: name, + PackageSPDXIdentifier: spdx.ElementID(SanitizeElementID(fmt.Sprintf("DocumentRoot-%s-%s", prefix, name))), + PackageVersion: version, + PackageChecksums: checksums, + PackageExternalReferences: nil, + PrimaryPackagePurpose: purpose, + PackageSupplier: &spdx.Supplier{ + Supplier: NOASSERTION, + }, + PackageDownloadLocation: NOASSERTION, + } + + if purl != nil { + p.PackageExternalReferences = []*spdx.PackageExternalReference{ + { + Category: string(PackageManagerReferenceCategory), + RefType: string(PurlExternalRefType), + Locator: purl.String(), + }, + } + } + + return p +} + func toSPDXID(identifiable artifact.Identifiable) spdx.ElementID { maxLen := 40 id := "" switch it := identifiable.(type) { case pkg.Package: - id = SanitizeElementID(fmt.Sprintf("Package-%s-%s-%s", it.Type, it.Name, it.ID())) + switch { + case it.Type != "" && it.Name != "": + id = fmt.Sprintf("Package-%s-%s-%s", it.Type, it.Name, it.ID()) + case it.Name != "": + id = fmt.Sprintf("Package-%s-%s", it.Name, it.ID()) + case it.Type != "": + id = fmt.Sprintf("Package-%s-%s", it.Type, it.ID()) + default: + id = fmt.Sprintf("Package-%s", it.ID()) + } case file.Coordinates: p := "" parts := strings.Split(it.RealPath, "/") @@ -150,12 +290,12 @@ func toSPDXID(identifiable artifact.Identifiable) spdx.ElementID { } p = path.Join(part, p) } - id = SanitizeElementID(fmt.Sprintf("File-%s-%s", p, it.ID())) + id = fmt.Sprintf("File-%s-%s", p, it.ID()) default: id = string(identifiable.ID()) } // NOTE: the spdx library prepend SPDXRef-, so we don't do it here - return spdx.ElementID(id) + return spdx.ElementID(SanitizeElementID(id)) } // packages populates all Package Information from the package Collection (see https://spdx.github.io/spdx-spec/3-package-information/) @@ -220,7 +360,7 @@ func toPackages(catalog *pkg.Collection, sbom sbom.SBOM) (results []*spdx.Packag // 7.6: Package Originator: may have single result for either Person or Organization, // or NOASSERTION // Cardinality: optional, one - PackageSupplier: nil, + PackageSupplier: toPackageSupplier(p), PackageOriginator: toPackageOriginator(p), @@ -377,6 +517,21 @@ func toPackageOriginator(p pkg.Package) *spdx.Originator { } } +func toPackageSupplier(p pkg.Package) *spdx.Supplier { + // this uses the Originator function for now until + // a better distinction can be made for supplier + kind, supplier := Originator(p) + if kind == "" || supplier == "" { + return &spdx.Supplier{ + Supplier: NOASSERTION, + } + } + return &spdx.Supplier{ + Supplier: supplier, + SupplierType: kind, + } +} + func formatSPDXExternalRefs(p pkg.Package) (refs []*spdx.PackageExternalReference) { for _, ref := range ExternalRefs(p) { refs = append(refs, &spdx.PackageExternalReference{ @@ -494,6 +649,18 @@ func toFileChecksums(digests []file.Digest) (checksums []spdx.Checksum) { return checksums } +// toChecksum takes a checksum in the format : and returns an spdx.Checksum or nil if the string is invalid +func toChecksum(algorithmHash string) *spdx.Checksum { + parts := strings.Split(algorithmHash, ":") + if len(parts) < 2 { + return nil + } + return &spdx.Checksum{ + Algorithm: toChecksumAlgorithm(parts[0]), + Value: parts[1], + } +} + func toChecksumAlgorithm(algorithm string) spdx.ChecksumAlgorithm { // this needs to be an uppercase version of our algorithm return spdx.ChecksumAlgorithm(strings.ToUpper(algorithm)) @@ -537,32 +704,31 @@ func toFileTypes(metadata *file.Metadata) (ty []string) { // other licenses are for licenses from the pkg.Package that do not have an SPDXExpression // field. The spdxexpression field is only filled given a validated Value field. func toOtherLicenses(catalog *pkg.Collection) []*spdx.OtherLicense { - licenses := map[string]bool{} - for _, p := range catalog.Sorted() { + licenses := map[string]spdxLicense{} + + for p := range catalog.Enumerate() { declaredLicenses, concludedLicenses := parseLicenses(p.Licenses.ToSlice()) - for _, license := range declaredLicenses { - if strings.HasPrefix(license, spdxlicense.LicenseRefPrefix) { - licenses[license] = true + for _, l := range declaredLicenses { + if l.value != "" { + licenses[l.id] = l } } - for _, license := range concludedLicenses { - if strings.HasPrefix(license, spdxlicense.LicenseRefPrefix) { - licenses[license] = true + for _, l := range concludedLicenses { + if l.value != "" { + licenses[l.id] = l } } } var result []*spdx.OtherLicense - sorted := maps.Keys(licenses) - slices.Sort(sorted) - for _, license := range sorted { - // separate the found value from the prefix - // this only contains licenses that are not found on the SPDX License List - name := strings.TrimPrefix(license, spdxlicense.LicenseRefPrefix) + ids := maps.Keys(licenses) + slices.Sort(ids) + for _, id := range ids { + license := licenses[id] result = append(result, &spdx.OtherLicense{ - LicenseIdentifier: SanitizeElementID(license), - ExtractedText: name, + LicenseIdentifier: license.id, + ExtractedText: license.value, }) } return result diff --git a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/to_syft_model.go b/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/to_syft_model.go index 54ecd145..f61f723c 100644 --- a/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/to_syft_model.go +++ b/vendor/github.com/anchore/syft/syft/formats/common/spdxhelpers/to_syft_model.go @@ -2,14 +2,19 @@ package spdxhelpers import ( "errors" + "fmt" "net/url" + "path" + "regexp" "strconv" "strings" "github.com/spdx/tools-golang/spdx" + "github.com/spdx/tools-golang/spdx/v2/common" "github.com/anchore/packageurl-go" "github.com/anchore/syft/internal/log" + "github.com/anchore/syft/internal/spdxlicense" "github.com/anchore/syft/syft/artifact" "github.com/anchore/syft/syft/cpe" "github.com/anchore/syft/syft/file" @@ -26,12 +31,10 @@ func ToSyftModel(doc *spdx.Document) (*sbom.SBOM, error) { return nil, errors.New("cannot convert SPDX document to Syft model because document is nil") } - spdxIDMap := make(map[string]interface{}) - - src := extractSourceFromNamespace(doc.DocumentNamespace) + spdxIDMap := make(map[string]any) s := &sbom.SBOM{ - Source: src, + Source: extractSource(spdxIDMap, doc), Artifacts: sbom.Artifacts{ Packages: pkg.NewCollection(), FileMetadata: map[file.Coordinates]file.Metadata{}, @@ -40,7 +43,7 @@ func ToSyftModel(doc *spdx.Document) (*sbom.SBOM, error) { }, } - collectSyftPackages(s, spdxIDMap, doc) + collectSyftPackages(s, spdxIDMap, doc.Packages) collectSyftFiles(s, spdxIDMap, doc) @@ -49,6 +52,166 @@ func ToSyftModel(doc *spdx.Document) (*sbom.SBOM, error) { return s, nil } +func isDirectory(name string) bool { + if name == "." || name == ".." || strings.HasSuffix(name, "/") || !strings.Contains(path.Base(name), ".") { + return true + } + return false +} + +func removePackage(packages []*spdx.Package, remove *spdx.Package) (pkgs []*spdx.Package) { + for _, p := range packages { + if p == remove { + continue + } + pkgs = append(pkgs, p) + } + return +} + +func removeRelationships(relationships []*spdx.Relationship, spdxID spdx.ElementID) (relations []*spdx.Relationship) { + for _, r := range relationships { + if r.RefA.ElementRefID == spdxID || r.RefB.ElementRefID == spdxID { + continue + } + relations = append(relations, r) + } + return +} + +func findRootPackages(doc *spdx.Document) (out []*spdx.Package) { + for _, p := range doc.Packages { + for _, r := range doc.Relationships { + describes := r.RefA.ElementRefID == "DOCUMENT" && + r.Relationship == spdx.RelationshipDescribes && + r.RefB.ElementRefID == p.PackageSPDXIdentifier + + describedBy := r.RefB.ElementRefID == "DOCUMENT" && + r.Relationship == spdx.RelationshipDescribedBy && + r.RefA.ElementRefID == p.PackageSPDXIdentifier + + if !describes && !describedBy { + continue + } + + out = append(out, p) + } + } + return +} + +func extractSource(spdxIDMap map[string]any, doc *spdx.Document) source.Description { + src := extractSourceFromNamespace(doc.DocumentNamespace) + + rootPackages := findRootPackages(doc) + + if len(rootPackages) != 1 { + return src + } + + p := rootPackages[0] + + switch p.PrimaryPackagePurpose { + case spdxPrimaryPurposeContainer: + src = containerSource(p) + case spdxPrimaryPurposeFile: + src = fileSource(p) + default: + return src + } + + spdxIDMap[string(p.PackageSPDXIdentifier)] = src + + doc.Packages = removePackage(doc.Packages, p) + doc.Relationships = removeRelationships(doc.Relationships, p.PackageSPDXIdentifier) + + return src +} + +func containerSource(p *spdx.Package) source.Description { + id := string(p.PackageSPDXIdentifier) + + container := p.PackageName + v := p.PackageVersion + if v != "" { + container += ":" + v + } + + digest := "" + if len(p.PackageChecksums) > 0 { + c := p.PackageChecksums[0] + digest = fmt.Sprintf("%s:%s", fromChecksumAlgorithm(c.Algorithm), c.Value) + } + return source.Description{ + ID: id, + Name: p.PackageName, + Version: p.PackageVersion, + Metadata: source.StereoscopeImageSourceMetadata{ + UserInput: container, + ID: id, + Layers: nil, // TODO handle formats with nested layer packages like Tern and K8s BOM tool + ManifestDigest: digest, + }, + } +} + +func fileSource(p *spdx.Package) source.Description { + typeRegex := regexp.MustCompile("^DocumentRoot-([^-]+)-.*$") + typeName := typeRegex.ReplaceAllString(string(p.PackageSPDXIdentifier), "$1") + + var version string + var metadata any + switch { + case typeName == prefixDirectory: + // is a Syft SBOM, explicitly a directory source + metadata, version = directorySourceMetadata(p) + case typeName == prefixFile: + // is a Syft SBOM, explicitly a file source + metadata, version = fileSourceMetadata(p) + case isDirectory(p.PackageName): + // is a non-Syft SBOM, which looks like a directory + metadata, version = directorySourceMetadata(p) + default: + // is a non-Syft SBOM, which is probably a file + metadata, version = fileSourceMetadata(p) + } + + return source.Description{ + ID: string(p.PackageSPDXIdentifier), + Name: p.PackageName, + Version: version, + Metadata: metadata, + } +} + +func fileSourceMetadata(p *spdx.Package) (any, string) { + version := p.PackageVersion + + m := source.FileSourceMetadata{ + Path: p.PackageName, + } + // if this is a Syft SBOM, we might have output a digest as the version + checksum := toChecksum(p.PackageVersion) + for _, d := range p.PackageChecksums { + if checksum != nil && checksum.Value == d.Value { + version = "" + } + m.Digests = append(m.Digests, file.Digest{ + Algorithm: fromChecksumAlgorithm(d.Algorithm), + Value: d.Value, + }) + } + + return m, version +} + +func directorySourceMetadata(p *spdx.Package) (any, string) { + return source.DirectorySourceMetadata{ + Path: p.PackageName, + Base: "", + }, p.PackageVersion +} + // NOTE(jonas): SPDX doesn't inform what an SBOM is about, // image, directory, for example. This is our best effort to determine // the scheme. Syft-generated SBOMs have in the namespace @@ -114,15 +277,25 @@ func findLinuxReleaseByPURL(doc *spdx.Document) *linux.Release { return nil } -func collectSyftPackages(s *sbom.SBOM, spdxIDMap map[string]interface{}, doc *spdx.Document) { - for _, p := range doc.Packages { +func collectSyftPackages(s *sbom.SBOM, spdxIDMap map[string]any, packages []*spdx.Package) { + for _, p := range packages { syftPkg := toSyftPackage(p) spdxIDMap[string(p.PackageSPDXIdentifier)] = syftPkg - s.Artifacts.Packages.Add(*syftPkg) + s.Artifacts.Packages.Add(syftPkg) } } -func collectSyftFiles(s *sbom.SBOM, spdxIDMap map[string]interface{}, doc *spdx.Document) { +func collectSyftFiles(s *sbom.SBOM, spdxIDMap map[string]any, doc *spdx.Document) { + for _, p := range doc.Packages { + for _, f := range p.Files { + l := toSyftLocation(f) + spdxIDMap[string(f.FileSPDXIdentifier)] = l + + s.Artifacts.FileMetadata[l.Coordinates] = toFileMetadata(f) + s.Artifacts.FileDigests[l.Coordinates] = toFileDigests(f) + } + } + for _, f := range doc.Files { l := toSyftLocation(f) spdxIDMap[string(f.FileSPDXIdentifier)] = l @@ -135,13 +308,17 @@ func collectSyftFiles(s *sbom.SBOM, spdxIDMap map[string]interface{}, doc *spdx. func toFileDigests(f *spdx.File) (digests []file.Digest) { for _, digest := range f.Checksums { digests = append(digests, file.Digest{ - Algorithm: string(digest.Algorithm), + Algorithm: fromChecksumAlgorithm(digest.Algorithm), Value: digest.Value, }) } return digests } +func fromChecksumAlgorithm(algorithm common.ChecksumAlgorithm) string { + return strings.ToLower(string(algorithm)) +} + func toFileMetadata(f *spdx.File) (meta file.Metadata) { // FIXME Syft is currently lossy due to the SPDX 2.2.1 spec not supporting arbitrary mimetypes for _, typ := range f.FileTypes { @@ -164,21 +341,28 @@ func toFileMetadata(f *spdx.File) (meta file.Metadata) { return meta } -func toSyftRelationships(spdxIDMap map[string]interface{}, doc *spdx.Document) []artifact.Relationship { - var out []artifact.Relationship +func toSyftRelationships(spdxIDMap map[string]any, doc *spdx.Document) []artifact.Relationship { + out := collectDocRelationships(spdxIDMap, doc) + + out = append(out, collectPackageFileRelationships(spdxIDMap, doc)...) + + return out +} + +func collectDocRelationships(spdxIDMap map[string]any, doc *spdx.Document) (out []artifact.Relationship) { for _, r := range doc.Relationships { - // FIXME what to do with r.RefA.DocumentRefID and r.RefA.SpecialID + // FIXME what to do with r.RefA.DocumentRefID and r.RefA.SpecialID if r.RefA.DocumentRefID != "" && requireAndTrimPrefix(r.RefA.DocumentRefID, "DocumentRef-") != string(doc.SPDXIdentifier) { log.Debugf("ignoring relationship to external document: %+v", r) continue } a := spdxIDMap[string(r.RefA.ElementRefID)] b := spdxIDMap[string(r.RefB.ElementRefID)] - from, fromOk := a.(*pkg.Package) - toPackage, toPackageOk := b.(*pkg.Package) - toLocation, toLocationOk := b.(*file.Location) + from, fromOk := a.(pkg.Package) + toPackage, toPackageOk := b.(pkg.Package) + toLocation, toLocationOk := b.(file.Location) if !fromOk || !(toPackageOk || toLocationOk) { - log.Debugf("unable to find valid relationship mapping from SPDX 2.2 JSON, ignoring: (from: %+v) (to: %+v)", a, b) + log.Debugf("unable to find valid relationship mapping from SPDX, ignoring: (from: %+v) (to: %+v)", a, b) continue } var to artifact.Identifiable @@ -219,6 +403,30 @@ func toSyftRelationships(spdxIDMap map[string]interface{}, doc *spdx.Document) [ return out } +// collectPackageFileRelationships add relationships for direct files +func collectPackageFileRelationships(spdxIDMap map[string]any, doc *spdx.Document) (out []artifact.Relationship) { + for _, p := range doc.Packages { + a := spdxIDMap[string(p.PackageSPDXIdentifier)] + from, fromOk := a.(pkg.Package) + if !fromOk { + continue + } + for _, f := range p.Files { + b := spdxIDMap[string(f.FileSPDXIdentifier)] + to, toLocationOk := b.(file.Location) + if !toLocationOk { + continue + } + out = append(out, artifact.Relationship{ + From: from, + To: to, + Type: artifact.ContainsRelationship, + }) + } + } + return out +} + func toSyftCoordinates(f *spdx.File) file.Coordinates { const layerIDPrefix = "layerID: " var fileSystemID string @@ -234,9 +442,9 @@ func toSyftCoordinates(f *spdx.File) file.Coordinates { } } -func toSyftLocation(f *spdx.File) *file.Location { +func toSyftLocation(f *spdx.File) file.Location { l := file.NewVirtualLocationFromCoordinates(toSyftCoordinates(f), f.FileName) - return &l + return l } func requireAndTrimPrefix(val interface{}, prefix string) string { @@ -280,16 +488,16 @@ func extractPkgInfo(p *spdx.Package) pkgInfo { } } -func toSyftPackage(p *spdx.Package) *pkg.Package { +func toSyftPackage(p *spdx.Package) pkg.Package { info := extractPkgInfo(p) metadataType, metadata := extractMetadata(p, info) - sP := pkg.Package{ + sP := &pkg.Package{ Type: info.typ, Name: p.PackageName, Version: p.PackageVersion, Licenses: pkg.NewLicenseSet(parseSPDXLicenses(p)...), CPEs: extractCPEs(p), - PURL: info.purl.String(), + PURL: purlValue(info.purl), Language: info.lang, MetadataType: metadataType, Metadata: metadata, @@ -297,7 +505,15 @@ func toSyftPackage(p *spdx.Package) *pkg.Package { sP.SetID() - return &sP + return *sP +} + +func purlValue(purl packageurl.PackageURL) string { + val := purl.String() + if _, err := packageurl.FromString(val); err != nil { + return "" + } + return val } func parseSPDXLicenses(p *spdx.Package) []pkg.License { @@ -321,10 +537,7 @@ func parseSPDXLicenses(p *spdx.Package) []pkg.License { } func cleanSPDXID(id string) string { - if strings.HasPrefix(id, "LicenseRef-") { - return strings.TrimPrefix(id, "LicenseRef-") - } - return id + return strings.TrimPrefix(id, spdxlicense.LicenseRefPrefix) } //nolint:funlen @@ -384,7 +597,7 @@ func extractMetadata(p *spdx.Package, info pkgInfo) (pkg.MetadataType, interface case pkg.JavaPkg: var digests []file.Digest for _, value := range p.PackageChecksums { - digests = append(digests, file.Digest{Algorithm: string(value.Algorithm), Value: value.Value}) + digests = append(digests, file.Digest{Algorithm: fromChecksumAlgorithm(value.Algorithm), Value: value.Value}) } return pkg.JavaMetadataType, pkg.JavaMetadata{ ArchiveDigests: digests, @@ -392,7 +605,7 @@ func extractMetadata(p *spdx.Package, info pkgInfo) (pkg.MetadataType, interface case pkg.GoModulePkg: var h1Digest string for _, value := range p.PackageChecksums { - digest, err := util.HDigestFromSHA(string(value.Algorithm), value.Value) + digest, err := util.HDigestFromSHA(fromChecksumAlgorithm(value.Algorithm), value.Value) if err != nil { log.Debugf("invalid h1digest: %v %v", value, err) continue diff --git a/vendor/github.com/anchore/syft/syft/lib.go b/vendor/github.com/anchore/syft/syft/lib.go index b4530701..d8ef7617 100644 --- a/vendor/github.com/anchore/syft/syft/lib.go +++ b/vendor/github.com/anchore/syft/syft/lib.go @@ -74,11 +74,33 @@ func CatalogPackages(src source.Source, cfg cataloger.Config) (*pkg.Collection, catalog, relationships, err := cataloger.Catalog(resolver, release, cfg.Parallelism, catalogers...) - relationships = append(relationships, newSourceRelationshipsFromCatalog(src, catalog)...) + // apply exclusions to the package catalog + // default config value for this is true + // https://github.com/anchore/syft/issues/931 + if cfg.ExcludeBinaryOverlapByOwnership { + for _, r := range relationships { + if cataloger.ExcludeBinaryByFileOwnershipOverlap(r, catalog) { + catalog.Delete(r.To.ID()) + relationships = removeRelationshipsByID(relationships, r.To.ID()) + } + } + } + // no need to consider source relationships for os -> binary exclusions + relationships = append(relationships, newSourceRelationshipsFromCatalog(src, catalog)...) return catalog, relationships, release, err } +func removeRelationshipsByID(relationships []artifact.Relationship, id artifact.ID) []artifact.Relationship { + var filtered []artifact.Relationship + for _, r := range relationships { + if r.To.ID() != id && r.From.ID() != id { + filtered = append(filtered, r) + } + } + return filtered +} + func newSourceRelationshipsFromCatalog(src source.Source, c *pkg.Collection) []artifact.Relationship { relationships := make([]artifact.Relationship, 0) // Should we pre-allocate this by giving catalog a Len() method? for p := range c.Enumerate() { diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/binary/default_classifiers.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/binary/default_classifiers.go index 5c2f2e17..93ed25d3 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/binary/default_classifiers.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/binary/default_classifiers.go @@ -254,6 +254,21 @@ var defaultClassifiers = []classifier{ PURL: mustPURL("pkg:golang/github.com/hashicorp/consul@version"), CPEs: singleCPE("cpe:2.3:a:hashicorp:consul:*:*:*:*:*:*:*:*"), }, + { + Class: "nginx-binary", + FileGlob: "**/nginx", + EvidenceMatcher: fileContentsVersionMatcher( + // [NUL]nginx version: nginx/1.25.1 - fetches '1.25.1' + // [NUL]nginx version: openresty/1.21.4.1 - fetches '1.21.4' as this is the nginx version part + `(?m)(\x00|\?)nginx version: [^\/]+\/(?P[0-9]+\.[0-9]+\.[0-9]+(?:\+\d+)?(?:-\d+)?)`, + ), + Package: "nginx", + PURL: mustPURL("pkg:generic/nginx@version"), + CPEs: []cpe.CPE{ + cpe.Must("cpe:2.3:a:f5:nginx:*:*:*:*:*:*:*:*"), + cpe.Must("cpe:2.3:a:nginx:nginx:*:*:*:*:*:*:*:*"), + }, + }, } // in both binaries and shared libraries, the version pattern is [NUL]3.11.2[NUL] diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/catalog.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/catalog.go index f982223e..840ea72b 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/catalog.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/catalog.go @@ -76,7 +76,14 @@ func runCataloger(cataloger pkg.Cataloger, resolver file.Resolver) (catalogerRes for _, p := range packages { // generate CPEs (note: this is excluded from package ID, so is safe to mutate) // we might have binary classified CPE already with the package so we want to append here - p.CPEs = append(p.CPEs, cpe.Generate(p)...) + + dictionaryCPE, ok := cpe.DictionaryFind(p) + if ok { + log.Debugf("used CPE dictionary to find CPE for %s package %q: %s", p.Type, p.Name, dictionaryCPE.BindToFmtString()) + p.CPEs = append(p.CPEs, dictionaryCPE) + } else { + p.CPEs = append(p.CPEs, cpe.Generate(p)...) + } // if we were not able to identify the language we have an opportunity // to try and get this value from the PURL. Worst case we assert that diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/cataloger.go index 78a99584..2d358002 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/cataloger.go @@ -45,8 +45,8 @@ func ImageCatalogers(cfg Config) []pkg.Cataloger { apkdb.NewApkdbCataloger(), binary.NewCataloger(), deb.NewDpkgdbCataloger(), - dotnet.NewDotnetDepsCataloger(), - golang.NewGoModuleBinaryCataloger(cfg.Go()), + dotnet.NewDotnetPortableExecutableCataloger(), + golang.NewGoModuleBinaryCataloger(cfg.Golang), java.NewJavaCataloger(cfg.Java()), java.NewNativeImageCataloger(), javascript.NewPackageCataloger(), @@ -71,10 +71,11 @@ func DirectoryCatalogers(cfg Config) []pkg.Cataloger { dart.NewPubspecLockCataloger(), deb.NewDpkgdbCataloger(), dotnet.NewDotnetDepsCataloger(), + dotnet.NewDotnetPortableExecutableCataloger(), elixir.NewMixLockCataloger(), erlang.NewRebarLockCataloger(), - golang.NewGoModFileCataloger(cfg.Go()), - golang.NewGoModuleBinaryCataloger(cfg.Go()), + golang.NewGoModFileCataloger(cfg.Golang), + golang.NewGoModuleBinaryCataloger(cfg.Golang), haskell.NewHackageCataloger(), java.NewJavaCataloger(cfg.Java()), java.NewJavaGradleLockfileCataloger(), @@ -84,7 +85,7 @@ func DirectoryCatalogers(cfg Config) []pkg.Cataloger { nix.NewStoreCataloger(), php.NewComposerLockCataloger(), portage.NewPortageCataloger(), - python.NewPythonIndexCataloger(), + python.NewPythonIndexCataloger(cfg.Python), python.NewPythonPackageCataloger(), rpm.NewFileCataloger(), rpm.NewRpmDBCataloger(), @@ -92,6 +93,7 @@ func DirectoryCatalogers(cfg Config) []pkg.Cataloger { rust.NewCargoLockCataloger(), sbom.NewSBOMCataloger(), swift.NewCocoapodsCataloger(), + swift.NewSwiftPackageManagerCataloger(), }, cfg.Catalogers) } @@ -105,10 +107,11 @@ func AllCatalogers(cfg Config) []pkg.Cataloger { dart.NewPubspecLockCataloger(), deb.NewDpkgdbCataloger(), dotnet.NewDotnetDepsCataloger(), + dotnet.NewDotnetPortableExecutableCataloger(), elixir.NewMixLockCataloger(), erlang.NewRebarLockCataloger(), - golang.NewGoModFileCataloger(cfg.Go()), - golang.NewGoModuleBinaryCataloger(cfg.Go()), + golang.NewGoModFileCataloger(cfg.Golang), + golang.NewGoModuleBinaryCataloger(cfg.Golang), haskell.NewHackageCataloger(), java.NewJavaCataloger(cfg.Java()), java.NewJavaGradleLockfileCataloger(), @@ -116,12 +119,12 @@ func AllCatalogers(cfg Config) []pkg.Cataloger { java.NewNativeImageCataloger(), javascript.NewLockCataloger(), javascript.NewPackageCataloger(), - kernel.NewLinuxKernelCataloger(cfg.Kernel()), + kernel.NewLinuxKernelCataloger(cfg.LinuxKernel), nix.NewStoreCataloger(), php.NewComposerInstalledCataloger(), php.NewComposerLockCataloger(), portage.NewPortageCataloger(), - python.NewPythonIndexCataloger(), + python.NewPythonIndexCataloger(cfg.Python), python.NewPythonPackageCataloger(), r.NewPackageCataloger(), rpm.NewFileCataloger(), @@ -132,6 +135,7 @@ func AllCatalogers(cfg Config) []pkg.Cataloger { rust.NewCargoLockCataloger(), sbom.NewSBOMCataloger(), swift.NewCocoapodsCataloger(), + swift.NewSwiftPackageManagerCataloger(), }, cfg.Catalogers) } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/dictionary/data/cpe-index.json b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/dictionary/data/cpe-index.json new file mode 100644 index 00000000..5e357e2b --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/dictionary/data/cpe-index.json @@ -0,0 +1,1298 @@ +{ + "ecosystems": { + "jenkins_plugins": { + "accurev": "cpe:2.3:a:jenkins:accurev:*:*:*:*:*:jenkins:*:*", + "active-choices": "cpe:2.3:a:jenkins:active_choices:*:*:*:*:*:jenkins:*:*", + "active-directory": "cpe:2.3:a:jenkins:active_directory:*:*:*:*:*:jenkins:*:*", + "alauda-kubernetes-support": "cpe:2.3:a:jenkins:alauda_kubernetes_support:*:*:*:*:*:jenkins:*:*", + "analysis-core": "cpe:2.3:a:jenkins:static_analysis_utilities:*:*:*:*:*:jenkins:*:*", + "anchore-container-scanner": "cpe:2.3:a:jenkins:anchore_container_image_scanner:*:*:*:*:*:jenkins:*:*", + "android-lint": "cpe:2.3:a:jenkins:android_lint:*:*:*:*:*:jenkins:*:*", + "ansible": "cpe:2.3:a:jenkins:ansible:*:*:*:*:*:jenkins:*:*", + "ansible-tower": "cpe:2.3:a:jenkins:ansible_tower:*:*:*:*:*:jenkins:*:*", + "app-detector": "cpe:2.3:a:jenkins:application_detector:*:*:*:*:*:jenkins:*:*", + "appdynamics": "cpe:2.3:a:jenkins:appdynamics:*:*:*:*:*:jenkins:*:*", + "aqua-microscanner": "cpe:2.3:a:jenkins:aqua_microscanner:*:*:*:*:*:jenkins:*:*", + "aqua-serverless": "cpe:2.3:a:jenkins:aqua_security_severless_scanner:*:*:*:*:*:jenkins:*:*", + "artifact-repository-parameter": "cpe:2.3:a:jenkins:artifact_repository_parameter:*:*:*:*:*:jenkins:*:*", + "assembla": "cpe:2.3:a:jenkins:assembla:*:*:*:*:*:jenkins:*:*", + "atlassian-bitbucket-server-integration": "cpe:2.3:a:jenkins:bitbucket_server_integration:*:*:*:*:*:jenkins:*:*", + "autocomplete-parameter": "cpe:2.3:a:jenkins:autocomplete_parameter:*:*:*:*:*:jenkins:*:*", + "avatar": "cpe:2.3:a:jenkins:avatar:*:*:*:*:*:jenkins:*:*", + "aws-beanstalk-publisher": "cpe:2.3:a:jenkins:aws_elastic_beanstalk_publisher:*:*:*:*:*:jenkins:*:*", + "aws-cloudwatch-logs-publisher": "cpe:2.3:a:jenkins:aws_cloudwatch_logs_publisher:*:*:*:*:*:jenkins:*:*", + "aws-codebuild": "cpe:2.3:a:jenkins:aws_codebuild:*:*:*:*:*:jenkins:*:*", + "aws-codedeploy": "cpe:2.3:a:jenkins:aws_codedeploy:*:*:*:*:*:jenkins:*:*", + "aws-global-configuration": "cpe:2.3:a:jenkins:aws_global_configuration:*:*:*:*:*:jenkins:*:*", + "aws-sam": "cpe:2.3:a:jenkins:amazon_web_services_service_application_model:*:*:*:*:*:jenkins:*:*", + "awseb-deployment": "cpe:2.3:a:jenkins:awseb_deployment:*:*:*:*:*:jenkins:*:*", + "azure-ad": "cpe:2.3:a:jenkins:azure_ad:*:*:*:*:*:jenkins:*:*", + "azure-publishersettings-credentials": "cpe:2.3:a:jenkins:azure_publishersettings_credentials:*:*:*:*:*:jenkins:*:*", + "azure-vm-agents": "cpe:2.3:a:jenkins:azure_vm_agents:*:*:*:*:*:jenkins:*:*", + "backlog": "cpe:2.3:a:jenkins:backlog:*:*:*:*:*:jenkins:*:*", + "badge": "cpe:2.3:a:jenkins:badge:*:*:*:*:*:jenkins:*:*", + "batch-task": "cpe:2.3:a:jenkins:batch_task:*:*:*:*:*:jenkins:*:*", + "bitbucket-approve": "cpe:2.3:a:jenkins:bitbucket_approve:*:*:*:*:*:jenkins:*:*", + "bitbucket-branch-source": "cpe:2.3:a:jenkins:bitbucket_branch_source:*:*:*:*:*:jenkins:*:*", + "bitbucket-oauth": "cpe:2.3:a:jenkins:bitbucket_oauth:*:*:*:*:*:jenkins:*:*", + "blackduck-hub": "cpe:2.3:a:jenkins:black_duck_hub:*:*:*:*:*:jenkins:*:*", + "brakeman": "cpe:2.3:a:jenkins:brakeman:*:*:*:*:*:jenkins:*:*", + "build-failure-analyzer": "cpe:2.3:a:jenkins:build_failure_analyzer:*:*:*:*:*:jenkins:*:*", + "build-metrics": "cpe:2.3:a:jenkins:build-metrics:*:*:*:*:*:jenkins:*:*", + "build-pipeline": "cpe:2.3:a:jenkins:build_pipeline:*:*:*:*:*:jenkins:*:*", + "build-with-parameters": "cpe:2.3:a:jenkins:build_with_parameters:*:*:*:*:*:jenkins:*:*", + "buildgraph-view": "cpe:2.3:a:jenkins:buildgraph-view:*:*:*:*:*:jenkins:*:*", + "cas": "cpe:2.3:a:jenkins:cas:*:*:*:*:*:jenkins:*:*", + "catalogic-ecx": "cpe:2.3:a:jenkins:ecx_copy_data_management:*:*:*:*:*:jenkins:*:*", + "cccc": "cpe:2.3:a:jenkins:cccc:*:*:*:*:*:jenkins:*:*", + "ccm": "cpe:2.3:a:jenkins:ccm:*:*:*:*:*:jenkins:*:*", + "claim": "cpe:2.3:a:jenkins:claim:*:*:*:*:*:jenkins:*:*", + "clearcase-release": "cpe:2.3:a:jenkins:clearcase_release:*:*:*:*:*:jenkins:*:*", + "cloud-stats": "cpe:2.3:a:jenkins:cloud_statistics:*:*:*:*:*:jenkins:*:*", + "cloudbees-jenkins-advisor": "cpe:2.3:a:jenkins:health_advisor_by_cloudbees:*:*:*:*:*:jenkins:*:*", + "cloudfoundry": "cpe:2.3:a:jenkins:cloud_foundry:*:*:*:*:*:jenkins:*:*", + "cobertura": "cpe:2.3:a:jenkins:cobertura:*:*:*:*:*:jenkins:*:*", + "code-coverage-api": "cpe:2.3:a:jenkins:code_coverage_api:*:*:*:*:*:jenkins:*:*", + "codefresh": "cpe:2.3:a:jenkins:codefresh_integration:*:*:*:*:*:jenkins:*:*", + "codescan": "cpe:2.3:a:jenkins:codescan:*:*:*:*:*:jenkins:*:*", + "collabnet": "cpe:2.3:a:jenkins:collabnet:*:*:*:*:*:jenkins:*:*", + "compact-columns": "cpe:2.3:a:jenkins:compact_columns:*:*:*:*:*:jenkins:*:*", + "computer-queue": "cpe:2.3:a:jenkins:computer_queue:*:*:*:*:*:jenkins:*:*", + "configuration-as-code": "cpe:2.3:a:jenkins:configuration_as_code:*:*:*:*:*:jenkins:*:*", + "configurationslicing": "cpe:2.3:a:jenkins:configuration_slicing:*:*:*:*:*:jenkins:*:*", + "copy-to-slave": "cpe:2.3:a:jenkins:copy_to_slave:*:*:*:*:*:jenkins:*:*", + "copyartifact": "cpe:2.3:a:jenkins:copy_artifact:*:*:*:*:*:jenkins:*:*", + "couchdb-statistics": "cpe:2.3:a:jenkins:couchdb-statistics:*:*:*:*:*:jenkins:*:*", + "covcomplplot": "cpe:2.3:a:jenkins:coverage\\/complexity_scatter_plot:*:*:*:*:*:jenkins:*:*", + "coverity": "cpe:2.3:a:jenkins:coverity:*:*:*:*:*:jenkins:*:*", + "cppncss": "cpe:2.3:a:jenkins:cppncss:*:*:*:*:*:jenkins:*:*", + "crap4j": "cpe:2.3:a:jenkins:crap4j:*:*:*:*:*:jenkins:*:*", + "crx-content-package-deployer": "cpe:2.3:a:jenkins:crx_content_package_deployer:*:*:*:*:*:jenkins:*:*", + "cryptomove": "cpe:2.3:a:jenkins:cryptomove:*:*:*:*:*:jenkins:*:*", + "cucumber-living-documentation": "cpe:2.3:a:jenkins:cucumber_living_documentation:*:*:*:*:*:jenkins:*:*", + "custom-job-icon": "cpe:2.3:a:jenkins:custom_job_icon:*:*:*:*:*:jenkins:*:*", + "cvs": "cpe:2.3:a:jenkins:current_versions_systems:*:*:*:*:*:jenkins:*:*", + "database": "cpe:2.3:a:jenkins:database:*:*:*:*:*:jenkins:*:*", + "datadog": "cpe:2.3:a:jenkins:datadog:*:*:*:*:*:jenkins:*:*", + "debian-package-builder": "cpe:2.3:a:jenkins:debian_package_builder:*:*:*:*:*:jenkins:*:*", + "delphix": "cpe:2.3:a:jenkins:delphix:*:*:*:*:*:jenkins:*:*", + "dependency-check": "cpe:2.3:a:jenkins:owasp_dependency-check:*:*:*:*:*:jenkins:*:*", + "dependency-track": "cpe:2.3:a:jenkins:owasp_dependency-track:*:*:*:*:*:jenkins:*:*", + "deploy": "cpe:2.3:a:jenkins:deploy:*:*:*:*:*:jenkins:*:*", + "description-column": "cpe:2.3:a:jenkins:description_column:*:*:*:*:*:jenkins:*:*", + "dingding-notifications": "cpe:2.3:a:jenkins:dingding:*:*:*:*:*:jenkins:*:*", + "docker": "cpe:2.3:a:jenkins:docker:*:*:*:*:*:jenkins:*:*", + "dry": "cpe:2.3:a:jenkins:dry:*:*:*:*:*:jenkins:*:*", + "dynamic-extended-choice-parameter": "cpe:2.3:a:jenkins:dynamic_extended_choice_parameter:*:*:*:*:*:jenkins:*:*", + "dynatrace": "cpe:2.3:a:jenkins:dynatrace_application_monitoring:*:*:*:*:*:jenkins:*:*", + "ec2": "cpe:2.3:a:jenkins:ec2:*:*:*:*:*:jenkins:*:*", + "echarts-api": "cpe:2.3:a:jenkins:echarts_api:*:*:*:*:*:jenkins:*:*", + "ecs-publisher": "cpe:2.3:a:trustsource:ecs_publisher:*:*:*:*:*:jenkins:*:*", + "eggplant": "cpe:2.3:a:jenkins:eggplant:*:*:*:*:*:jenkins:*:*", + "elasticbox": "cpe:2.3:a:jenkins:elasticbox_ci:*:*:*:*:*:jenkins:*:*", + "electricflow": "cpe:2.3:a:jenkins:electricflow:*:*:*:*:*:jenkins:*:*", + "eloyente": "cpe:2.3:a:jenkins:eloyente:*:*:*:*:*:jenkins:*:*", + "email-ext": "cpe:2.3:a:jenkins:email_extension:*:*:*:*:*:jenkins:*:*", + "embeddable-build-status": "cpe:2.3:a:jenkins:embeddable_build_status:*:*:*:*:*:jenkins:*:*", + "environment-manager-tools": "cpe:2.3:a:jenkins:parasoft_environment_manager:*:*:*:*:*:jenkins:*:*", + "extensivetesting": "cpe:2.3:a:jenkins:extensive_testing:*:*:*:*:*:jenkins:*:*", + "favorite": "cpe:2.3:a:jenkins:favorite_plugin:*:*:*:*:*:jenkins:*:*", + "filesystem_scm": "cpe:2.3:a:jenkins:file_system_scm:*:*:*:*:*:jenkins:*:*", + "fireline": "cpe:2.3:a:jenkins:360_fireline:*:*:*:*:*:jenkins:*:*", + "flaky-test-handler": "cpe:2.3:a:jenkins:flaky_test_handler:*:*:*:*:*:*:*:*", + "fortify-on-demand-uploader": "cpe:2.3:a:jenkins:fortify_on_demand_uploader:*:*:*:*:*:jenkins:*:*", + "gatling": "cpe:2.3:a:jenkins:gatling:*:*:*:*:*:jenkins:*:*", + "gerrit-trigger": "cpe:2.3:a:jenkins:gerrit_trigger:*:*:*:*:*:jenkins:*:*", + "git": "cpe:2.3:a:jenkins:git:*:*:*:*:*:jenkins:*:*", + "git-changelog": "cpe:2.3:a:jenkins:git_changelog:*:*:*:*:*:jenkins:*:*", + "git-parameter": "cpe:2.3:a:jenkins:git_parameter:*:*:*:*:*:jenkins:*:*", + "github-branch-source": "cpe:2.3:a:jenkins:github_branch_source:*:*:*:*:*:jenkins:*:*", + "github-coverage-reporter": "cpe:2.3:a:jenkins:github_coverage_reporter:*:*:*:*:*:jenkins:*:*", + "github-oauth": "cpe:2.3:a:jenkins:github_oauth:*:*:*:*:*:jenkins:*:*", + "gitlab": "cpe:2.3:a:jenkins:gitlab:*:*:*:*:*:jenkins:*:*", + "gitlab-oauth": "cpe:2.3:a:jenkins:gitlab_oauth:*:*:*:*:*:jenkins:*:*", + "gogs-webhook": "cpe:2.3:a:jenkins:gogs:*:*:*:*:*:jenkins:*:*", + "google-kubernetes-engine": "cpe:2.3:a:jenkins:google_kubernetes_engine:*:*:*:*:*:jenkins:*:*", + "google-oauth": "cpe:2.3:a:jenkins:google_oauth_credentials:*:*:*:*:*:jenkins:*:*", + "google-play-android-publisher": "cpe:2.3:a:jenkins:google-play-android-publisher:*:*:*:*:*:jenkins:*:*", + "groovy": "cpe:2.3:a:jenkins:groovy:*:*:*:*:*:jenkins:*:*", + "harvest": "cpe:2.3:a:jenkins:harvest_scm:*:*:*:*:*:jenkins:*:*", + "hipchat": "cpe:2.3:a:atlassian:hipchat:*:*:*:*:*:jenkins:*:*", + "hockeyapp": "cpe:2.3:a:jenkins:hockeyapp:*:*:*:*:*:jenkins:*:*", + "hp-quality-center": "cpe:2.3:a:hp_application_lifecycle_management_quality_center_project:hp_application_lifecycle_management_quality_center:*:*:*:*:*:jenkins:*:*", + "htmlpublisher": "cpe:2.3:a:jenkins:html_publisher:*:*:*:*:*:jenkins:*:*", + "http-request": "cpe:2.3:a:jenkins:http_request:*:*:*:*:*:jenkins:*:*", + "ibm-asoc": "cpe:2.3:a:jenkins:ibm_application_security_on_cloud:*:*:*:*:*:jenkins:*:*", + "image-gallery": "cpe:2.3:a:jenkins:image_gallery:*:*:*:*:*:jenkins:*:*", + "implied-labels": "cpe:2.3:a:jenkins:implied_labels:*:*:*:*:*:jenkins:*:*", + "incapptic-connect-uploader": "cpe:2.3:a:jenkins:incapptic_connect_uploader:*:*:*:*:*:jenkins:*:*", + "influxdb": "cpe:2.3:a:eficode:influxdb:*:*:*:*:*:jenkins:*:*", + "instant-messaging": "cpe:2.3:a:jenkins:instant-messaging:*:*:*:*:*:jenkins:*:*", + "ircbot": "cpe:2.3:a:jenkins:irc:*:*:*:*:*:jenkins:*:*", + "jclouds": "cpe:2.3:a:jenkins:jclouds:*:*:*:*:*:jenkins:*:*", + "jenkins-cloudformation": "cpe:2.3:a:jenkins:jenkins-cloudformation-plugin:*:*:*:*:*:jenkins:*:*", + "jira": "cpe:2.3:a:jenkins:jira:*:*:*:*:*:jenkins:*:*", + "jira-issue-updater": "cpe:2.3:a:jenkins:jira_issue_updater:*:*:*:*:*:jenkins:*:*", + "job-dsl": "cpe:2.3:a:jenkins:job_dsl:*:*:*:*:*:jenkins:*:*", + "job-import": "cpe:2.3:a:jenkins:job_import:*:*:*:*:*:jenkins:*:*", + "jobConfigHistory": "cpe:2.3:a:jobconfighistory_project:jobconfighistory:*:*:*:*:*:jenkins:*:*", + "jobgenerator": "cpe:2.3:a:jenkins:job_generator:*:*:*:*:*:jenkins:*:*", + "jsgames": "cpe:2.3:a:jenkins:jsgames:*:*:*:*:*:jenkins:*:*", + "junit": "cpe:2.3:a:jenkins:junit:*:*:*:*:*:jenkins:*:*", + "jx-resources": "cpe:2.3:a:jenkins:jx_resources:*:*:*:*:*:jenkins:*:*", + "kanboard": "cpe:2.3:a:jenkins:kanboard:*:*:*:*:*:jenkins:*:*", + "klocwork": "cpe:2.3:a:jenkins:klocwork_analysis:*:*:*:*:*:jenkins:*:*", + "kubernetes": "cpe:2.3:a:jenkins:kubernetes:*:*:*:*:*:jenkins:*:*", + "kubernetes-ci": "cpe:2.3:a:jenkins:kubernetes_ci:*:*:*:*:*:jenkins:*:*", + "kubernetes-pipeline": "cpe:2.3:a:jenkins:kubernetes_pipeline:*:*:*:*:*:jenkins:*:*", + "libvirt-slave": "cpe:2.3:a:jenkins:libvirt_slaves:*:*:*:*:*:jenkins:*:*", + "link-column": "cpe:2.3:a:jenkins:link_column:*:*:*:*:*:jenkins:*:*", + "liquibase-runner": "cpe:2.3:a:jenkins:liquibase_runner:*:*:*:*:*:jenkins:*:*", + "literate": "cpe:2.3:a:jenkins:literate:*:*:*:*:*:jenkins:*:*", + "lockable-resources": "cpe:2.3:a:jenkins:lockable_resources:*:*:*:*:*:jenkins:*:*", + "log-parser": "cpe:2.3:a:jenkins:log_parser:*:*:*:*:*:jenkins:*:*", + "logstash": "cpe:2.3:a:jenkins:logstash:*:*:*:*:*:jenkins:*:*", + "m2release": "cpe:2.3:a:jenkins:m2release:*:*:*:*:*:jenkins:*:*", + "mac": "cpe:2.3:a:jenkins:mac:*:*:*:*:*:jenkins:*:*", + "mail-commander": "cpe:2.3:a:jenkins:mail_commander:*:*:*:*:*:jenkins:*:*", + "mailer": "cpe:2.3:a:jenkins:mailer:*:*:*:*:*:*:*:*", + "mantis": "cpe:2.3:a:jenkins:mantis:*:*:*:*:*:jenkins:*:*", + "mashup-portlets": "cpe:2.3:a:jenkins:mashup_portlets:*:*:*:*:*:jenkins:*:*", + "mask-passwords": "cpe:2.3:a:jenkins:mask_passwords:*:*:*:*:*:jenkins:*:*", + "matrix-auth": "cpe:2.3:a:jenkins:matrix_authorization_strategy:*:*:*:*:*:jenkins:*:*", + "matrix-project": "cpe:2.3:a:jenkins:matrix_project:*:*:*:*:*:jenkins:*:*", + "mattermost": "cpe:2.3:a:jenkins:mattermost_notification:*:*:*:*:*:jenkins:*:*", + "maven": "cpe:2.3:a:jenkins:maven:*:*:*:*:*:jenkins:*:*", + "mercurial": "cpe:2.3:a:jenkins:mercurial:*:*:*:*:*:jenkins:*:*", + "mesos": "cpe:2.3:a:apache:mesos:*:*:*:*:*:jenkins:*:*", + "metrics": "cpe:2.3:a:jenkins:metrics:*:*:*:*:*:jenkins:*:*", + "mongodb": "cpe:2.3:a:jenkins:mongodb:*:*:*:*:*:jenkins:*:*", + "monitoring": "cpe:2.3:a:jenkins:monitoring:*:*:*:*:*:jenkins:*:*", + "neoload": "cpe:2.3:a:jenkins:neoload:*:*:*:*:*:jenkins:*:*", + "nerrvana": "cpe:2.3:a:jenkins:nerrvana:*:*:*:*:*:jenkins:*:*", + "nested-view": "cpe:2.3:a:jenkins:nested_view:*:*:*:*:*:jenkins:*:*", + "netsparker-cloud-scan": "cpe:2.3:a:jenkins:netsparker_cloud_scan:*:*:*:*:*:jenkins:*:*", + "ontrack": "cpe:2.3:a:jenkins:ontrack:*:*:*:*:*:jenkins:*:*", + "openshift-pipeline": "cpe:2.3:a:jenkins:openshift_pipeline:*:*:*:*:*:jenkins:*:*", + "ownership": "cpe:2.3:a:jenkins:job_and_node_ownership:*:*:*:*:*:jenkins:*:*", + "p4": "cpe:2.3:a:jenkins:p4:*:*:*:*:*:jenkins:*:*", + "pam-auth": "cpe:2.3:a:jenkins:pluggable_authentication_module:*:*:*:*:*:jenkins:*:*", + "pangolin-testrail-connector": "cpe:2.3:a:agiletestware:pangolin_connector_for_testrail:*:*:*:*:*:jenkins:*:*", + "parameterized-remote-trigger": "cpe:2.3:a:jenkins:parameterized_remote_trigger:*:*:*:*:*:jenkins:*:*", + "parameterized-trigger": "cpe:2.3:a:jenkins:parameterized_trigger:*:*:*:*:*:jenkins:*:*", + "parasoft-findings": "cpe:2.3:a:jenkins:parasoft_findings:*:*:*:*:*:jenkins:*:*", + "pegdown-formatter": "cpe:2.3:a:jenkins:pegdown_formatter:*:*:*:*:*:jenkins:*:*", + "perfecto": "cpe:2.3:a:jenkins:perfecto:*:*:*:*:*:jenkins:*:*", + "periodicbackup": "cpe:2.3:a:jenkins:periodic_backup:*:*:*:*:*:jenkins:*:*", + "persona": "cpe:2.3:a:jenkins:persona:*:*:*:*:*:jenkins:*:*", + "phoenix-autotest": "cpe:2.3:a:jenkins:pipeline\\:_phoenix_autotest:*:*:*:*:*:jenkins:*:*", + "pipeline-build-step": "cpe:2.3:a:jenkins:pipeline\\:_build_step:*:*:*:*:*:jenkins:*:*", + "pipeline-githubnotify-step": "cpe:2.3:a:jenkins:pipeline_github_notify_step:*:*:*:*:*:jenkins:*:*", + "pipeline-input-step": "cpe:2.3:a:jenkins:pipeline\\:input_step:*:*:*:*:*:jenkins:*:*", + "pipeline-maven": "cpe:2.3:a:jenkins:pipeline_maven_integration:*:*:*:*:*:jenkins:*:*", + "play": "cpe:2.3:a:jenkins:play_framework:*:*:*:*:*:jenkins:*:*", + "pollscm": "cpe:2.3:a:jenkins:poll_scm:*:*:*:*:*:jenkins:*:*", + "port-allocator": "cpe:2.3:a:jenkins:port_allocator:*:*:*:*:*:jenkins:*:*", + "proxmox": "cpe:2.3:a:jenkins:proxmox:*:*:*:*:*:jenkins:*:*", + "publish-over-ftp": "cpe:2.3:a:jenkins:publish_over_ftp:*:*:*:*:*:jenkins:*:*", + "publish-over-ssh": "cpe:2.3:a:jenkins:publish_over_ssh:*:*:*:*:*:jenkins:*:*", + "puppet-enterprise-pipeline": "cpe:2.3:a:jenkins:puppet_enterprise_pipeline:*:*:*:*:*:jenkins:*:*", + "pwauth": "cpe:2.3:a:jenkins:pwauth_security_realm:*:*:*:*:*:jenkins:*:*", + "quality-gates": "cpe:2.3:a:jenkins:quality_gates:*:*:*:*:*:jenkins:*:*", + "queue-cleanup": "cpe:2.3:a:jenkins:queue_cleanup:*:*:*:*:*:jenkins:*:*", + "radargun": "cpe:2.3:a:jenkins:radargun:*:*:*:*:*:jenkins:*:*", + "radiatorview": "cpe:2.3:a:jenkins:radiator_view:*:*:*:*:*:jenkins:*:*", + "rapiddeploy": "cpe:2.3:a:jenkins:rapiddeploy:*:*:*:*:*:jenkins:*:*", + "rebuild": "cpe:2.3:a:rebuild_project:rebuild:*:*:*:*:*:jenkins:*:*", + "redgate-sql-ci": "cpe:2.3:a:jenkins:redgate_sql_change_automation:*:*:*:*:*:jenkins:*:*", + "remote-jobs-view": "cpe:2.3:a:jenkins:remote-jobs-view:*:*:*:*:*:jenkins:*:*", + "repository-connector": "cpe:2.3:a:jenkins:repository_connector:*:*:*:*:*:jenkins:*:*", + "resource-disposer": "cpe:2.3:a:jenkins:resource_disposer:*:*:*:*:*:jenkins:*:*", + "rocketchatnotifier": "cpe:2.3:a:jenkins:rocketchat_notifier:*:*:*:*:*:jenkins:*:*", + "role-strategy": "cpe:2.3:a:jenkins:role-based_authorization_strategy:*:*:*:*:*:jenkins:*:*", + "rpmsign": "cpe:2.3:a:jenkins:rpmsign-plugin:*:*:*:*:*:jenkins:*:*", + "rundeck": "cpe:2.3:a:jenkins:rundeck:*:*:*:*:*:jenkins:*:*", + "saltstack": "cpe:2.3:a:jenkins:saltstack:*:*:*:*:*:jenkins:*:*", + "scm-filter-jervis": "cpe:2.3:a:jenkins:source_code_management_filter_jervis:*:*:*:*:*:jenkins:*:*", + "selected-tests-executor": "cpe:2.3:a:jenkins:tests_selector:*:*:*:*:*:jenkins:*:*", + "selenium": "cpe:2.3:a:jenkins:selenium:*:*:*:*:*:jenkins:*:*", + "shelve-project": "cpe:2.3:a:jenkins:shelve_project:*:*:*:*:*:jenkins:*:*", + "sidebar-link": "cpe:2.3:a:jenkins:sidebar_link:*:*:*:*:*:jenkins:*:*", + "simple-travis-runner": "cpe:2.3:a:jenkins:simple_travis_pipeline_runner:*:*:*:*:*:jenkins:*:*", + "sitemonitor": "cpe:2.3:a:jenkins:sitemonitor:*:*:*:*:*:jenkins:*:*", + "skytap-cloud": "cpe:2.3:a:jenkins:skytap_cloud_ci:*:*:*:*:*:jenkins:*:*", + "slack": "cpe:2.3:a:jenkins:slack_notification:*:*:*:*:*:jenkins:*:*", + "sms": "cpe:2.3:a:jenkins:sms_notification:*:*:*:*:*:jenkins:*:*", + "snsnotify": "cpe:2.3:a:jenkins:amazon_sns_build_notifier:*:*:*:*:*:jenkins:*:*", + "soapui-pro-functional-testing": "cpe:2.3:a:jenkins:soapui_pro_functional_testing:*:*:*:*:*:jenkins:*:*", + "sofy-ai": "cpe:2.3:a:jenkins:sofy.ai:*:*:*:*:*:jenkins:*:*", + "sonar-quality-gates": "cpe:2.3:a:jenkins:sonar_quality_gates:*:*:*:*:*:jenkins:*:*", + "sonargraph-integration": "cpe:2.3:a:jenkins:sonargraph_integration:*:*:*:*:*:jenkins:*:*", + "sonarqube": "cpe:2.3:a:sonarsource:sonarqube_scanner:*:*:*:*:*:jenkins:*:*", + "sounds": "cpe:2.3:a:jenkins:sounds:*:*:*:*:*:jenkins:*:*", + "speaks": "cpe:2.3:a:jenkins:speaks\\!:*:*:*:*:*:jenkins:*:*", + "sqlplus-script-runner": "cpe:2.3:a:jenkins:sqlplus_script_runner:*:*:*:*:*:jenkins:*:*", + "ssh": "cpe:2.3:a:jenkins:ssh:*:*:*:*:*:jenkins:*:*", + "ssh-slaves": "cpe:2.3:a:jenkins:ssh_slaves:*:*:*:*:*:jenkins:*:*", + "subversion": "cpe:2.3:a:jenkins-ci:subversion-plugin:*:*:*:*:*:*:*:*", + "svn-partial-release-mgr": "cpe:2.3:a:jenkins:subversion_partial_release_manager:*:*:*:*:*:jenkins:*:*", + "svn-release-mgr": "cpe:2.3:a:jenkins:subversion_release_manager:*:*:*:*:*:jenkins:*:*", + "swamp": "cpe:2.3:a:jenkins:swamp:*:*:*:*:*:jenkins:*:*", + "tap": "cpe:2.3:a:jenkins:tap:*:*:*:*:*:jenkins:*:*", + "testlink": "cpe:2.3:a:jenkins:testlink:*:*:*:*:*:jenkins:*:*", + "tfs": "cpe:2.3:a:jenkins:team_foundation_server:*:*:*:*:*:jenkins:*:*", + "timestamper": "cpe:2.3:a:jenkins:timestamper:*:*:*:*:*:jenkins:*:*", + "tinfoil-scan": "cpe:2.3:a:jenkins:tinfoil_security:*:*:*:*:*:jenkins:*:*", + "token-macro": "cpe:2.3:a:jenkins:token_macro:*:*:*:*:*:jenkins:*:*", + "translation": "cpe:2.3:a:jenkins:translation_assistance:*:*:*:*:*:jenkins:*:*", + "twitter": "cpe:2.3:a:jenkins:twitter:*:*:*:*:*:jenkins:*:*", + "upload-pgyer": "cpe:2.3:a:jenkins:upload_to_pgyer:*:*:*:*:*:jenkins:*:*", + "urltrigger": "cpe:2.3:a:jenkins:urltrigger:*:*:*:*:*:jenkins:*:*", + "usemango-runner": "cpe:2.3:a:jenkins:usemango_runner:*:*:*:*:*:jenkins:*:*", + "valgrind": "cpe:2.3:a:jenkins:valgrind:*:*:*:*:*:jenkins:*:*", + "validating-string-parameter": "cpe:2.3:a:jenkins:validating_string_parameter:*:*:*:*:*:jenkins:*:*", + "view-cloner": "cpe:2.3:a:jenkins:view-cloner:*:*:*:*:*:jenkins:*:*", + "view26": "cpe:2.3:a:jenkins:view26_test-reporting:*:*:*:*:*:jenkins:*:*", + "violation-comments-to-gitlab": "cpe:2.3:a:jenkins:violation_comments_to_gitlab:*:*:*:*:*:jenkins:*:*", + "visualworks-store": "cpe:2.3:a:jenkins:visualworks_store:*:*:*:*:*:jenkins:*:*", + "vncrecorder": "cpe:2.3:a:jenkins:vncrecorder:*:*:*:*:*:jenkins:*:*", + "vncviewer": "cpe:2.3:a:jenkins:vncviewer:*:*:*:*:*:jenkins:*:*", + "vsphere-cloud": "cpe:2.3:a:jenkins:vsphere:*:*:*:*:*:jenkins:*:*", + "walldisplay": "cpe:2.3:a:jenkins:wall_display:*:*:*:*:*:jenkins:*:*", + "warnings": "cpe:2.3:a:jenkins:warnings:*:*:*:*:*:jenkins:*:*", + "warnings-ng": "cpe:2.3:a:jenkins:warnings_next_generation:*:*:*:*:*:jenkins:*:*", + "weblogic-deployer": "cpe:2.3:a:jenkins:deploy_weblogic:*:*:*:*:*:jenkins:*:*", + "websphere-deployer": "cpe:2.3:a:jenkins:websphere_deployer:*:*:*:*:*:jenkins:*:*", + "whitesource": "cpe:2.3:a:jenkins:white_source:*:*:*:*:*:jenkins:*:*", + "workflow-cps": "cpe:2.3:a:jenkins:pipeline\\:_groovy:*:*:*:*:*:*:*:*", + "workflow-cps-global-lib": "cpe:2.3:a:jenkins:pipeline\\:shared_groovy_libraries:*:*:*:*:*:jenkins:*:*", + "workflow-remote-loader": "cpe:2.3:a:jenkins:pipeline_remote_loader:*:*:*:*:*:jenkins:*:*", + "xcode": "cpe:2.3:a:jenkins:xcode_integration:*:*:*:*:*:jenkins:*:*", + "xunit": "cpe:2.3:a:jenkins:xunit:*:*:*:*:*:jenkins:*:*", + "yaml-axis": "cpe:2.3:a:jenkins:yaml_axis:*:*:*:*:*:jenkins:*:*", + "yet-another-build-visualizer": "cpe:2.3:a:jenkins:yet_another_build_visualizer:*:*:*:*:*:*:*:*", + "youtrack": "cpe:2.3:a:jenkins:youtrack-plugin:*:*:*:*:*:jenkins:*:*", + "zap": "cpe:2.3:a:jenkins:owasp_zap:*:*:*:*:*:jenkins:*:*", + "zap-pipeline": "cpe:2.3:a:jenkins:zap_pipeline:*:*:*:*:*:jenkins:*:*", + "zephyr-for-jira-test-management": "cpe:2.3:a:jenkins:zephyr_for_jira_test_management:*:*:*:*:*:jenkins:*:*", + "zulip": "cpe:2.3:a:jenkins:zulip:*:*:*:*:*:jenkins:*:*" + }, + "npm": { + "11xiaoli": "cpe:2.3:a:11xiaoli_project:11xiaoli:*:*:*:*:*:node.js:*:*", + "22lixian": "cpe:2.3:a:22lixian_project:22lixian:*:*:*:*:*:node.js:*:*", + "360class.jansenhm": "cpe:2.3:a:360class.jansenhm_project:360class.jansenhm:*:*:*:*:*:node.js:*:*", + "626": "cpe:2.3:a:626_project:626:*:*:*:*:*:node.js:*:*", + "@actions/core": "cpe:2.3:a:toolkit_project:toolkit:*:*:*:*:*:node.js:*:*", + "@awsui/components-react": "cpe:2.3:a:amazon:awsui\\/components-react:*:*:*:*:*:node.js:*:*", + "@azure/ms-rest-nodeauth": "cpe:2.3:a:microsoft:ms-rest-nodeauth:*:*:*:*:*:node.js:*:*", + "@backstage/plugin-auth-backend": "cpe:2.3:a:linuxfoundation:auth_backend:*:*:*:*:*:node.js:*:*", + "@fastly/js-compute": "cpe:2.3:a:fastly:js-compute:*:*:*:*:*:node.js:*:*", + "@firebase/util": "cpe:2.3:a:google:firebase\\/util:*:*:*:*:*:node.js:*:*", + "@github/paste-markdown": "cpe:2.3:a:paste-markdown_project:paste-markdown:*:*:*:*:*:node.js:*:*", + "@nubosoftware/node-static": "cpe:2.3:a:\\@nubosoftware\\/node-static_project:\\@nubosoftware\\/node-static:*:*:*:*:*:node.js:*:*", + "@nuxt/devalue": "cpe:2.3:a:nuxtjs:\\@nuxt\\/devalue:*:*:*:*:*:node.js:*:*", + "@progfay/scrapbox-parser": "cpe:2.3:a:scrapbox-parser_project:scrapbox-parser:*:*:*:*:*:node.js:*:*", + "@rkesters/gnuplot": "cpe:2.3:a:gnuplot_project:gnuplot:*:*:*:*:*:node.js:*:*", + "@solana/pay": "cpe:2.3:a:solanalabs:pay:*:*:*:*:*:*:*:*", + "@strikeentco/set": "cpe:2.3:a:set_project:set:*:*:*:*:*:node.js:*:*", + "@thi.ng/egf": "cpe:2.3:a:\\@thi.ng\\/egf_project:\\@thi.ng\\/egf:*:*:*:*:*:node.js:*:*", + "@zxcvbn-ts/core": "cpe:2.3:a:zxcvbn-ts_project:zxcvbn-ts:*:*:*:*:*:node.js:*:*", + "Proto": "cpe:2.3:a:proto_project:proto:*:*:*:*:*:node.js:*:*", + "Templ8": "cpe:2.3:a:templ8_project:templ8:*:*:*:*:*:node.js:*:*", + "aaptjs": "cpe:2.3:a:aaptjs_project:aaptjs:*:*:*:*:*:node.js:*:*", + "abacus-ext-cmdline": "cpe:2.3:a:abacus-ext-cmdline_project:abacus-ext-cmdline:*:*:*:*:*:node.js:*:*", + "access-policy": "cpe:2.3:a:access-policy_project:access-policy:*:*:*:*:*:node.js:*:*", + "adamvr-geoip-lite": "cpe:2.3:a:adamvr-geoip-lite_project:adamvr-geoip-lite:*:*:*:*:*:node.js:*:*", + "adb-driver": "cpe:2.3:a:adb-driver_project:adb-driver:*:*:*:*:*:node.js:*:*", + "adm-zip": "cpe:2.3:a:adm-zip_project:adm-zip:*:*:*:*:*:node.js:*:*", + "air-sdk": "cpe:2.3:a:air-sdk_project:air-sdk:*:*:*:*:*:node.js:*:*", + "algoliasearch-helper": "cpe:2.3:a:algolia:algoliasearch-helper:*:*:*:*:*:node.js:*:*", + "alto-saxophone": "cpe:2.3:a:alto-saxophone_project:alto-saxophone:*:*:*:*:*:node.js:*:*", + "angular-expressions": "cpe:2.3:a:peerigon:angular-expressions:*:*:*:*:*:node.js:*:*", + "angular-http-server": "cpe:2.3:a:angular-http-server_project:angular-http-server:*:*:*:*:*:node.js:*:*", + "ansi-html": "cpe:2.3:a:ansi-html_project:ansi-html:*:*:*:*:*:node.js:*:*", + "ansi-regex": "cpe:2.3:a:ansi-regex_project:ansi-regex:*:*:*:*:*:node.js:*:*", + "ansi2html": "cpe:2.3:a:ansi2html_project:ansi2html:*:*:*:*:*:node.js:*:*", + "ansi_up": "cpe:2.3:a:ansi_up_project:ansi_up:*:*:*:*:*:node.js:*:*", + "apiconnect-cli-plugins": "cpe:2.3:a:apiconnect-cli-plugins_project:apiconnect-cli-plugins:*:*:*:*:*:node.js:*:*", + "apk-parser": "cpe:2.3:a:apk-parser_project:apk-parser:*:*:*:*:*:node.js:*:*", + "apk-parser2": "cpe:2.3:a:apk-parser2_project:apk-parser2:*:*:*:*:*:node.js:*:*", + "apk-parser3": "cpe:2.3:a:apk-parser3_project:apk-parser3:*:*:*:*:*:node.js:*:*", + "argencoders-notevil": "cpe:2.3:a:argencoders-notevil_project:argencoders-notevil:*:*:*:*:*:node.js:*:*", + "arr-flatten-unflatten": "cpe:2.3:a:arr-flatten-unflatten_project:arr-flatten-unflatten:*:*:*:*:*:node.js:*:*", + "arrayfire-js": "cpe:2.3:a:arrayfire-js_project:arrayfire-js:*:*:*:*:*:node.js:*:*", + "asciitable.js": "cpe:2.3:a:asciitable.js_project:asciitable.js:*:*:*:*:*:node.js:*:*", + "assign-deep": "cpe:2.3:a:assign-deep_project:assign-deep:*:*:*:*:*:node.js:*:*", + "async-git": "cpe:2.3:a:async-git_project:async-git:*:*:*:*:*:node.js:*:*", + "atlassian-connect-express": "cpe:2.3:a:atlassian:connect_express:*:*:*:*:*:node.js:*:*", + "atob": "cpe:2.3:a:atob_project:atob:*:*:*:*:*:node.js:*:*", + "atom-node-module-installer": "cpe:2.3:a:atom-node-module-installer_project:atom-node-module-installer:*:*:*:*:*:node.js:*:*", + "augustine": "cpe:2.3:a:augustine_project:augustine:*:*:*:*:*:node.js:*:*", + "aws-lambda-multipart-parser": "cpe:2.3:a:aws-lambda-multipart-parser_project:aws-lambda-multipart-parser:*:*:*:*:*:node.js:*:*", + "babelcli": "cpe:2.3:a:babelcli_project:babelcli:*:*:*:*:*:node.js:*:*", + "backbone": "cpe:2.3:a:backbone_project:backbone:*:*:*:*:*:node.js:*:*", + "badjs-sourcemap-server": "cpe:2.3:a:badjs-sourcemap-server_project:badjs-sourcemap-server:*:*:*:*:*:node.js:*:*", + "baryton-saxophone": "cpe:2.3:a:baryton-saxophone_project:baryton-saxophone:*:*:*:*:*:node.js:*:*", + "bassmaster": "cpe:2.3:a:bassmaster_project:bassmaster:*:*:*:*:*:*:*:*", + "bestzip": "cpe:2.3:a:bestzip_project:bestzip:*:*:*:*:*:node.js:*:*", + "bionode-sra": "cpe:2.3:a:bionode:bionode-sra:*:*:*:*:*:node.js:*:*", + "bitty": "cpe:2.3:a:bitty_project:bitty:*:*:*:*:*:node.js:*:*", + "bkjs-wand": "cpe:2.3:a:bkjs-wand_project:bkjs-wand:*:*:*:*:*:node.js:*:*", + "bodymen": "cpe:2.3:a:bodymen_project:bodymen:*:*:*:*:*:node.js:*:*", + "bootstrap-select": "cpe:2.3:a:snapappointments:bootstrap-select:*:*:*:*:*:node.js:*:*", + "botbait": "cpe:2.3:a:botbait_project:botbait:*:*:*:*:*:node.js:*:*", + "box2d-native": "cpe:2.3:a:box2d-native_project:box2d-native:*:*:*:*:*:node.js:*:*", + "braces": "cpe:2.3:a:braces_project:braces:*:*:*:*:*:node.js:*:*", + "bracket-template": "cpe:2.3:a:bracket-template_project:bracket-template:*:*:*:*:*:node.js:*:*", + "broccoli-closure": "cpe:2.3:a:broccoli-closure_project:broccoli-closure:*:*:*:*:*:node.js:*:*", + "broccoli-compass": "cpe:2.3:a:broccoli-compass_project:broccoli-compass:*:*:*:*:*:node.js:*:*", + "browserify-shim": "cpe:2.3:a:browserify-shim_project:browserify-shim:*:*:*:*:*:node.js:*:*", + "browserless-chrome": "cpe:2.3:a:browserless:chrome:*:*:*:*:*:node.js:*:*", + "browserslist": "cpe:2.3:a:browserslist_project:browserslist:*:*:*:*:*:node.js:*:*", + "bson-objectid": "cpe:2.3:a:bson-objectid_project:bson-objectid:*:*:*:*:*:node.js:*:*", + "buttle": "cpe:2.3:a:buttle_project:buttle:*:*:*:*:*:node.js:*:*", + "byucslabsix": "cpe:2.3:a:byucslabsix_project:byucslabsix:*:*:*:*:*:node.js:*:*", + "cache-base": "cpe:2.3:a:cache-base_project:cache-base:*:*:*:*:*:node.js:*:*", + "cached-path-relative": "cpe:2.3:a:cached-path-relative_project:cached-path-relative:*:*:*:*:*:node.js:*:*", + "call": "cpe:2.3:a:call_project:call:*:*:*:*:*:node.js:*:*", + "calmquist.static-server": "cpe:2.3:a:calmquist.static-server_project:calmquist.static-server:*:*:*:*:*:node.js:*:*", + "canvas": "cpe:2.3:a:automattic:canvas:*:*:*:*:*:node.js:*:*", + "caolilinode": "cpe:2.3:a:caolilinode_project:caolilinode:*:*:*:*:*:node.js:*:*", + "ced": "cpe:2.3:a:ced_project:ced:*:*:*:*:*:node.js:*:*", + "censorify.tanisjr": "cpe:2.3:a:censorify.tanisjr_project:censorify.tanisjr:*:*:*:*:*:node.js:*:*", + "changeset": "cpe:2.3:a:changeset_project:changeset:*:*:*:*:*:node.js:*:*", + "charset": "cpe:2.3:a:charset_project:charset:*:*:*:*:*:node.js:*:*", + "chatbyvista": "cpe:2.3:a:chatbyvista_project:chatbyvista:*:*:*:*:*:node.js:*:*", + "chromedriver": "cpe:2.3:a:chromedriver_project:chromedriver:*:*:*:*:*:node.js:*:*", + "chromedriver126": "cpe:2.3:a:chromedriver126_project:chromedriver126:*:*:*:*:*:node.js:*:*", + "citypredict.whauwiller": "cpe:2.3:a:citypredict.whauwiller_project:citypredict.whauwiller:*:*:*:*:*:node.js:*:*", + "clang-extra": "cpe:2.3:a:clang-extra_project:clang-extra:*:*:*:*:*:node.js:*:*", + "class-transformer": "cpe:2.3:a:class-transformer_project:class-transformer:*:*:*:*:*:node.js:*:*", + "cli": "cpe:2.3:a:cli_project:cli:*:*:*:*:*:node.js:*:*", + "closure-compiler-stream": "cpe:2.3:a:closure-compiler-stream_project:closure-compiler-stream:*:*:*:*:*:node.js:*:*", + "closure-util": "cpe:2.3:a:closure-util_project:closure-util:*:*:*:*:*:node.js:*:*", + "closurecompiler": "cpe:2.3:a:closurecompiler_project:closurecompiler:*:*:*:*:*:node.js:*:*", + "cloudpub-redis": "cpe:2.3:a:cloudpub-redis_project:cloudpub-redis:*:*:*:*:*:node.js:*:*", + "cmake": "cpe:2.3:a:cmake_project:cmake:*:*:*:*:*:node.js:*:*", + "co-cli-installer": "cpe:2.3:a:co-cli-installer_project:co-cli-installer:*:*:*:*:*:node.js:*:*", + "cobalt-cli": "cpe:2.3:a:cobalt-cli_project:cobalt-cli:*:*:*:*:*:node.js:*:*", + "codecov": "cpe:2.3:a:codecov:codecov:*:*:*:*:*:node.js:*:*", + "cofeescript": "cpe:2.3:a:cofeescript_project:cofeescript:*:*:*:*:*:node.js:*:*", + "collection.js": "cpe:2.3:a:collection.js_project:collection.js:*:*:*:*:*:node.js:*:*", + "color-string": "cpe:2.3:a:color-string_project:color-string:*:*:*:*:*:node.js:*:*", + "comb": "cpe:2.3:a:c2fo:comb:*:*:*:*:*:node.js:*:*", + "commentapp.stetsonwood": "cpe:2.3:a:commentapp.stetsonwood_project:commentapp.stetsonwood:*:*:*:*:*:node.js:*:*", + "compass-compile": "cpe:2.3:a:compass-compile_project:compass-compile:*:*:*:*:*:node.js:*:*", + "component-flatten": "cpe:2.3:a:component-flatten_project:component-flatten:*:*:*:*:*:node.js:*:*", + "conf-cfg-ini": "cpe:2.3:a:conf-cfg-ini_project:conf-cfg-ini:*:*:*:*:*:node.js:*:*", + "confinit": "cpe:2.3:a:confinit_project:confinit:*:*:*:*:*:node.js:*:*", + "confucious": "cpe:2.3:a:realseriousgames:confucious:*:*:*:*:*:node.js:*:*", + "connect-pg-simple": "cpe:2.3:a:connect-pg-simple_project:connect-pg-simple:*:*:*:*:*:node.js:*:*", + "connection-tester": "cpe:2.3:a:connection-tester_project:connection-tester:*:*:*:*:*:node.js:*:*", + "console-io": "cpe:2.3:a:console-io_project:console-io:*:*:*:*:*:node.js:*:*", + "content": "cpe:2.3:a:content_project:content:*:*:*:*:*:node.js:*:*", + "controlled-merge": "cpe:2.3:a:controlled-merge_project:controlled-merge:*:*:*:*:*:node.js:*:*", + "convert-svg-core": "cpe:2.3:a:convert-svg-core_project:convert-svg-core:*:*:*:*:*:node.js:*:*", + "cookie-signature": "cpe:2.3:a:cookie-signature_project:cookie-signature:*:*:*:*:*:node.js:*:*", + "copy-props": "cpe:2.3:a:gulpjs:copy-props:*:*:*:*:*:node.js:*:*", + "corenlp-js-prefab": "cpe:2.3:a:corenlp-js-prefab_project:corenlp-js-prefab:*:*:*:*:*:node.js:*:*", + "create-choo-app3": "cpe:2.3:a:create-choo-app3_project:create-choo-app3:*:*:*:*:*:node.js:*:*", + "create-choo-electron": "cpe:2.3:a:create-choo-electron_project:create-choo-electron:*:*:*:*:*:node.js:*:*", + "cross-env.js": "cpe:2.3:a:cross-env.js_project:cross-env.js:*:*:*:*:*:node.js:*:*", + "cross-fetch": "cpe:2.3:a:cross-fetch_project:cross-fetch:*:*:*:*:*:node.js:*:*", + "crossenv": "cpe:2.3:a:crossenv_project:crossenv:*:*:*:*:*:node.js:*:*", + "crud-file-server": "cpe:2.3:a:crud-file-server_project:crud-file-server:*:*:*:*:*:node.js:*:*", + "csrf-lite": "cpe:2.3:a:csrf-lite_project:csrf-lite:*:*:*:*:*:node.js:*:*", + "cuciuci": "cpe:2.3:a:cuciuci_project:cuciuci:*:*:*:*:*:node.js:*:*", + "cue-sdk-node": "cpe:2.3:a:cue-sdk-node_project:cue-sdk-node:*:*:*:*:*:node.js:*:*", + "cumulative-distribution-function": "cpe:2.3:a:cumulative-distribution-function_project:cumulative-distribution-function:*:*:*:*:*:node.js:*:*", + "curling": "cpe:2.3:a:curling_project:curling:*:*:*:*:*:node.js:*:*", + "curljs": "cpe:2.3:a:curljs_project:curljs:*:*:*:*:*:node.js:*:*", + "curlrequest": "cpe:2.3:a:curlrequest_project:curlrequest:*:*:*:*:*:node.js:*:*", + "curly-bracket-parser": "cpe:2.3:a:curly-bracket-parser_project:curly-bracket-parser:*:*:*:*:*:node.js:*:*", + "curses": "cpe:2.3:a:curses_project:curses:*:*:*:*:*:node.js:*:*", + "cyber-js": "cpe:2.3:a:cyber-js_project:cyber-js:*:*:*:*:*:node.js:*:*", + "cypserver": "cpe:2.3:a:cypserver_project:cypserver:*:*:*:*:*:node.js:*:*", + "d3.js": "cpe:2.3:a:d3.js_project:d3.js:*:*:*:*:*:node.js:*:*", + "dasafio": "cpe:2.3:a:dasafio_project:dasafio:*:*:*:*:*:node.js:*:*", + "datachannel-client": "cpe:2.3:a:datachannel-client_project:datachannel-client:*:*:*:*:*:node.js:*:*", + "datatables.net": "cpe:2.3:a:datatables:datatables.net:*:*:*:*:*:node.js:*:*", + "date-and-time": "cpe:2.3:a:date-and-time_project:date-and-time:*:*:*:*:*:node.js:*:*", + "dawnsparks-node-tesseract": "cpe:2.3:a:dawnsparks-node-tesseract_project:dawnsparks-node-tesseract:*:*:*:*:*:node.js:*:*", + "dcdcdcdcdc": "cpe:2.3:a:dcdcdcdcdc_project:dcdcdcdcdc:*:*:*:*:*:node.js:*:*", + "dcserver": "cpe:2.3:a:dcserver_project:dcserver:*:*:*:*:*:node.js:*:*", + "deap": "cpe:2.3:a:deap_project:deap:*:*:*:*:*:node.js:*:*", + "debug": "cpe:2.3:a:debug_project:debug:*:*:*:*:*:node.js:*:*", + "decal": "cpe:2.3:a:decal_project:decal:*:*:*:*:*:node.js:*:*", + "decompress": "cpe:2.3:a:decompress_project:decompress:*:*:*:*:*:node.js:*:*", + "deep-extend": "cpe:2.3:a:deep_extend_project:deep_extend:*:*:*:*:*:node.js:*:*", + "deep-get-set": "cpe:2.3:a:deep-get-set_project:deep-get-set:*:*:*:*:*:node.js:*:*", + "deep-set": "cpe:2.3:a:deep-set_project:deep-set:*:*:*:*:*:node.js:*:*", + "deep.assign": "cpe:2.3:a:deep.assign_project:deep.assign:*:*:*:*:*:node.js:*:*", + "deeply": "cpe:2.3:a:deeply_project:deeply:*:*:*:*:*:node.js:*:*", + "deepmergefn": "cpe:2.3:a:deepmergefn_project:deepmergefn:*:*:*:*:*:node.js:*:*", + "deepref": "cpe:2.3:a:deepref_project:deepref:*:*:*:*:*:node.js:*:*", + "deeps": "cpe:2.3:a:invertase:deeps:*:*:*:*:*:node.js:*:*", + "defaults-deep": "cpe:2.3:a:defaults-deep_project:defaults-deep:*:*:*:*:*:node.js:*:*", + "desafio": "cpe:2.3:a:desafio_project:desafio:*:*:*:*:*:node.js:*:*", + "devcert": "cpe:2.3:a:devcert_project:devcert:*:*:*:*:*:node.js:*:*", + "dgard8.lab6": "cpe:2.3:a:dgard8.lab6_project:dgard8.lab6:*:*:*:*:*:node.js:*:*", + "discordi.js": "cpe:2.3:a:discordi.js_project:discordi.js:*:*:*:*:*:node.js:*:*", + "diskusage-ng": "cpe:2.3:a:diskusage-ng_project:diskusage-ng:*:*:*:*:*:node.js:*:*", + "djv": "cpe:2.3:a:djv_project:djv:*:*:*:*:*:node.js:*:*", + "dmmcquay.lab6": "cpe:2.3:a:dmmcquay.lab6_project:dmmcquay.lab6:*:*:*:*:*:node.js:*:*", + "docker-cli-js": "cpe:2.3:a:quobject:docker-cli-js:*:*:*:*:*:node.js:*:*", + "docker-tester": "cpe:2.3:a:docker-tester_project:docker-tester:*:*:*:*:*:node.js:*:*", + "dot-notes": "cpe:2.3:a:dot-notes_project:dot-notes:*:*:*:*:*:node.js:*:*", + "dottie": "cpe:2.3:a:dottie_project:dottie:*:*:*:*:*:node.js:*:*", + "dwebp-bin": "cpe:2.3:a:dwebp-bin_project:dwebp-bin:*:*:*:*:*:node.js:*:*", + "dylmomo": "cpe:2.3:a:dylmomo_project:dylmomo:*:*:*:*:*:node.js:*:*", + "earlybird": "cpe:2.3:a:earlybird_project:earlybird:*:*:*:*:*:node.js:*:*", + "easy-static-server": "cpe:2.3:a:easy-static-server_project:easy-static-server:*:*:*:*:*:node.js:*:*", + "easyquick": "cpe:2.3:a:easyquick_project:easyquick:*:*:*:*:*:node.js:*:*", + "ecstatic": "cpe:2.3:a:ecstatic_project:ecstatic:*:*:*:*:*:node.js:*:*", + "effect": "cpe:2.3:a:effect_project:effect:*:*:*:*:*:node.js:*:*", + "elding": "cpe:2.3:a:elding_project:elding:*:*:*:*:*:node.js:*:*", + "electron": "cpe:2.3:a:electronjs:electron:*:*:*:*:*:node.js:*:*", + "electron-packager": "cpe:2.3:a:electron-packager_project:electron-packager:*:*:*:*:*:node.js:*:*", + "elliptic": "cpe:2.3:a:elliptic_project:elliptic:*:*:*:*:*:node.js:*:*", + "engine.io-client": "cpe:2.3:a:socket:engine.io-client:*:*:*:*:*:node.js:*:*", + "enserver": "cpe:2.3:a:enserver_project:enserver:*:*:*:*:*:node.js:*:*", + "es6-crawler-detect": "cpe:2.3:a:crawlerdetect_project:crawlerdetect:*:*:*:*:*:node.js:*:*", + "eslint-fixer": "cpe:2.3:a:eslint-fixer_project:eslint-fixer:*:*:*:*:*:node.js:*:*", + "eslint-utils": "cpe:2.3:a:eslint-utils_project:eslint-utils:*:*:*:*:*:node.js:*:*", + "ewgaddis.lab6": "cpe:2.3:a:ewgaddis.lab6_project:ewgaddis.lab6:*:*:*:*:*:node.js:*:*", + "exceljs": "cpe:2.3:a:exceljs_project:exceljs:*:*:*:*:*:node.js:*:*", + "express-cart": "cpe:2.3:a:express-cart_project:express-cart:*:*:*:*:*:node.js:*:*", + "express-jwt": "cpe:2.3:a:auth0:express-jwt:*:*:*:*:*:node.js:*:*", + "express-xss-sanitizer": "cpe:2.3:a:express_xss_sanitizer_project:express_xss_sanitizer:*:*:*:*:*:node.js:*:*", + "extend": "cpe:2.3:a:extend_project:extend:*:*:*:*:*:node.js:*:*", + "exxxxxxxxxxx": "cpe:2.3:a:exxxxxxxxxxx_project:exxxxxxxxxxx:*:*:*:*:*:node.js:*:*", + "f2e-server": "cpe:2.3:a:f2e-server_project:f2e-server:*:*:*:*:*:node.js:*:*", + "fabric-js": "cpe:2.3:a:fabric-js_project:fabric-js:*:*:*:*:*:node.js:*:*", + "fancy-server": "cpe:2.3:a:fancy-server_project:fancy-server:*:*:*:*:*:node.js:*:*", + "fast-csv": "cpe:2.3:a:c2fo:fast-csv:*:*:*:*:*:node.js:*:*", + "fast-http": "cpe:2.3:a:fast-http_project:fast-http:*:*:*:*:*:node.js:*:*", + "fast-http-cli": "cpe:2.3:a:fast-http-cli_project:fast-http-cli:*:*:*:*:*:node.js:*:*", + "ffmepg": "cpe:2.3:a:ffmepg_project:ffmepg:*:*:*:*:*:node.js:*:*", + "ffmpegdotjs": "cpe:2.3:a:ffmpegdotjs_project:ffmpegdotjs:*:*:*:*:*:node.js:*:*", + "fibjs": "cpe:2.3:a:fibjs_project:fibjs:*:*:*:*:*:node.js:*:*", + "file-upload-with-preview": "cpe:2.3:a:file-upload-with-preview_project:file-upload-with-preview:*:*:*:*:*:node.js:*:*", + "fis-kernel": "cpe:2.3:a:baidu:fis-kernel:*:*:*:*:*:node.js:*:*", + "fis-parser-sass-bin": "cpe:2.3:a:fis-parser-sass-bin_project:fis-parser-sass-bin:*:*:*:*:*:node.js:*:*", + "fis-sass-all": "cpe:2.3:a:fis-sass-all_project:fis-sass-all:*:*:*:*:*:node.js:*:*", + "flattenizer": "cpe:2.3:a:flattenizer_project:flattenizer:*:*:*:*:*:node.js:*:*", + "forwarded": "cpe:2.3:a:forwarded_project:forwarded:*:*:*:*:*:node.js:*:*", + "frames-compiler": "cpe:2.3:a:frames-compiler_project:frames-compiler:*:*:*:*:*:node.js:*:*", + "freediskspace": "cpe:2.3:a:freediskspace_project:freediskproject:*:*:*:*:*:node.js:*:*", + "fresh": "cpe:2.3:a:fresh_project:fresh:*:*:*:*:*:node.js:*:*", + "fs-path": "cpe:2.3:a:fs-path_project:fs-path:*:*:*:*:*:node.js:*:*", + "fsk-server": "cpe:2.3:a:fsk-server_project:fsk-server:*:*:*:*:*:node.js:*:*", + "fuseki": "cpe:2.3:a:fuseki_project:fuseki:*:*:*:*:*:node.js:*:*", + "galenframework-cli": "cpe:2.3:a:galenframework:galenframework-cli:*:*:*:*:*:node.js:*:*", + "gammautils": "cpe:2.3:a:gammautils_project:gammautils:*:*:*:*:*:node.js:*:*", + "gaoxiaotingtingting": "cpe:2.3:a:gaoxiaotingtingting_project:gaoxiaotingtingting:*:*:*:*:*:node.js:*:*", + "gaoxuyan": "cpe:2.3:a:gaoxuyan_project:gaoxuyan:*:*:*:*:*:*:*:*", + "gatsby-source-wordpress": "cpe:2.3:a:gatsbyjs:gatsby-source-wordpress:*:*:*:*:*:node.js:*:*", + "gedi": "cpe:2.3:a:gedi_project:gedi:*:*:*:*:*:node.js:*:*", + "general-file-server": "cpe:2.3:a:general-file-server_project:general-file-server:*:*:*:*:*:node.js:*:*", + "geojson2kml": "cpe:2.3:a:geojson2kml_project:geojson2kml:*:*:*:*:*:node.js:*:*", + "get-ip-range": "cpe:2.3:a:get-ip-range_project:get-ip-range:*:*:*:*:*:node.js:*:*", + "get-npm-package-version": "cpe:2.3:a:get-npm-package-version_project:get-npm-package-version:*:*:*:*:*:node.js:*:*", + "getcityapi.yoehoehne": "cpe:2.3:a:getcityapi.yoehoehne_project:getcityapi.yoehoehne:*:*:*:*:*:node.js:*:*", + "getobject": "cpe:2.3:a:getobject_project:getobject:*:*:*:*:*:node.js:*:*", + "gfe-sass": "cpe:2.3:a:gfe-sass_project:gfe-sass:*:*:*:*:*:node.js:*:*", + "git": "cpe:2.3:a:git_project:git:*:*:*:*:*:node.js:*:*", + "git-add-remote": "cpe:2.3:a:git-add-remote_project:git-add-remote:*:*:*:*:*:node.js:*:*", + "git-archive": "cpe:2.3:a:git-archive_project:git-archive:*:*:*:*:*:node.js:*:*", + "git-parse": "cpe:2.3:a:wayfair:git-parse:*:*:*:*:*:node.js:*:*", + "git-promise": "cpe:2.3:a:git-promise_project:git-promise:*:*:*:*:*:node.js:*:*", + "gitblame": "cpe:2.3:a:gitblame_project:gitblame:*:*:*:*:*:node.js:*:*", + "gitlabhook": "cpe:2.3:a:gitlabhook_project:gitlabhook:*:*:*:*:*:node.js:*:*", + "gitlogplus": "cpe:2.3:a:gitlogplus_project:gitlogplus:*:*:*:*:*:node.js:*:*", + "gitsome": "cpe:2.3:a:gitsome_project:gitsome:*:*:*:*:*:node.js:*:*", + "gomeplus-h5-proxy": "cpe:2.3:a:gomeplus-h5-proxy_project:gomeplus-h5-proxy:*:*:*:*:*:node.js:*:*", + "google-closure-tools-latest": "cpe:2.3:a:google-closure-tools-latest_project:google-closure-tools-latest:*:*:*:*:*:node.js:*:*", + "goserv": "cpe:2.3:a:goserv_project:goserv:*:*:*:*:*:node.js:*:*", + "graphql-playground-html": "cpe:2.3:a:prisma:graphql-playground-html:*:*:*:*:*:node.js:*:*", + "graphql-playground-middleware-express": "cpe:2.3:a:prisma:graphql-playground-middleware-express:*:*:*:*:*:node.js:*:*", + "graphql-playground-middleware-hapi": "cpe:2.3:a:prisma:graphql-playground-middleware-hapi:*:*:*:*:*:node.js:*:*", + "graphql-playground-middleware-koa": "cpe:2.3:a:prisma:graphql-playground-middleware-koa:*:*:*:*:*:node.js:*:*", + "graphql-playground-middleware-lambda": "cpe:2.3:a:prisma:graphql-playground-middleware-lambda:*:*:*:*:*:node.js:*:*", + "graphql-upload": "cpe:2.3:a:graphql-upload_project:graphql-upload:*:*:*:*:*:npmjs:*:*", + "growl": "cpe:2.3:a:growl_project:growl:*:*:*:*:*:node.js:*:*", + "grunt-ccompiler": "cpe:2.3:a:grunt-ccompiler_project:grunt-ccompiler:*:*:*:*:*:node.js:*:*", + "grunt-gh-pages": "cpe:2.3:a:grunt-gh-pages_project:grunt-gh-pages:*:*:*:*:*:node.js:*:*", + "grunt-karma": "cpe:2.3:a:grunt-karma_project:grunt-karma:*:*:*:*:*:node.js:*:*", + "grunt-webdriver-qunit": "cpe:2.3:a:grunt-webdriver-qunit_project:grunt-webdriver-qunit:*:*:*:*:*:node.js:*:*", + "gruntcli": "cpe:2.3:a:gruntcli_project:gruntcli:*:*:*:*:*:node.js:*:*", + "gry": "cpe:2.3:a:gry_project:gry:*:*:*:*:*:node.js:*:*", + "gulp-tape": "cpe:2.3:a:gulp-tape_project:gulp-tape:*:*:*:*:*:node.js:*:*", + "handlebars": "cpe:2.3:a:handlebarsjs:handlebars:*:*:*:*:*:node.js:*:*", + "hapi": "cpe:2.3:a:hapijs:hapi:*:*:*:*:*:node.js:*:*", + "hapi-auth-jwt2": "cpe:2.3:a:hapi-auth-jwt2_project:hapi-auth-jwt2:*:*:*:*:*:node.js:*:*", + "harp": "cpe:2.3:a:npmjs:harp:*:*:*:*:*:*:*:*", + "haxe": "cpe:2.3:a:haxe:haxe:*:*:*:*:*:node.js:*:*", + "haxeshim": "cpe:2.3:a:haxeshim_project:haxeshim:*:*:*:*:*:node.js:*:*", + "hcbserver": "cpe:2.3:a:hcbserver_project:hcbserver:*:*:*:*:*:node.js:*:*", + "headless-browser-lite": "cpe:2.3:a:headless-browser-lite_project:headless-browser-lite:*:*:*:*:*:node.js:*:*", + "healthcenter": "cpe:2.3:a:healthcenter_project:healthcenter:*:*:*:*:*:node.js:*:*", + "hekto": "cpe:2.3:a:hekto_project:hekto:*:*:*:*:*:node.js:*:*", + "herbivore": "cpe:2.3:a:herbivore_project:herbivore:*:*:*:*:*:node.js:*:*", + "heroku-addonpool": "cpe:2.3:a:heroku-addonpool_project:heroku-addonpool:*:*:*:*:*:node.js:*:*", + "heroku-env": "cpe:2.3:a:heroku-env_project:heroku-env:*:*:*:*:*:node.js:*:*", + "hftp": "cpe:2.3:a:hftp_project:hftp:*:*:*:*:*:node.js:*:*", + "highlight.js": "cpe:2.3:a:highlightjs:highlight.js:*:*:*:*:*:node.js:*:*", + "hostr": "cpe:2.3:a:hostr_project:hostr:*:*:*:*:*:node.js:*:*", + "hot-formula-parser": "cpe:2.3:a:hot-formula-parser_project:hot-formula-parser:*:*:*:*:*:node.js:*:*", + "html-pages": "cpe:2.3:a:html-pages_project:html-pages:*:*:*:*:*:node.js:*:*", + "html-pdf": "cpe:2.3:a:html-pdf_project:html-pdf:*:*:*:*:*:node.js:*:*", + "http-cache-semantics": "cpe:2.3:a:http-cache-semantics_project:http-cache-semantics:*:*:*:*:*:node.js:*:*", + "http-client": "cpe:2.3:a:http-client_project:http-client:*:*:*:*:*:node.js:*:*", + "http-file-server": "cpe:2.3:a:http-file-server_project:http-file-server:*:*:*:*:*:node.js:*:*", + "http-live-simulator": "cpe:2.3:a:http-live-simulator_project:http-live-simulator:*:*:*:*:*:node.js:*:*", + "http-proxy": "cpe:2.3:a:http-proxy_project:http-proxy:*:*:*:*:*:node.js:*:*", + "http-proxy.js": "cpe:2.3:a:http-proxy.js_project:http-proxy.js:*:*:*:*:*:node.js:*:*", + "http-server-node": "cpe:2.3:a:http-server-node_project:http-server-node:*:*:*:*:*:node.js:*:*", + "http_static_simple": "cpe:2.3:a:http_static_simple_project:http_static_simple:*:*:*:*:*:node.js:*:*", + "https-proxy-agent": "cpe:2.3:a:https-proxy-agent_project:https-proxy-agent:*:*:*:*:*:node.js:*:*", + "httpster": "cpe:2.3:a:httpster_project:httpster:*:*:*:*:*:node.js:*:*", + "httpsync": "cpe:2.3:a:httpsync_project:httpsync:*:*:*:*:*:node.js:*:*", + "hubl-server": "cpe:2.3:a:hubspot:hubl-server:*:*:*:*:*:node.js:*:*", + "hummus": "cpe:2.3:a:hummus_project:hummus:*:*:*:*:*:node.js:*:*", + "i18n-node-angular": "cpe:2.3:a:i18n-node-angular_project:i18n-node-angular:*:*:*:*:node.js:*:*:*", + "ibm_db": "cpe:2.3:a:ibm:ibm_db:*:*:*:*:*:node.js:*:*", + "iedriver": "cpe:2.3:a:iedriver_project:iedriver:*:*:*:*:*:node.js:*:*", + "igniteui": "cpe:2.3:a:infragistics:igniteui:*:*:*:*:*:node.js:*:*", + "ikst": "cpe:2.3:a:ikst_project:ikst:*:*:*:*:*:node.js:*:*", + "image-tiler": "cpe:2.3:a:image-tiler_project:image-tiler:*:*:*:*:*:node.js:*:*", + "imageoptim": "cpe:2.3:a:imageoptim_project:imageoptim:*:*:*:*:*:node.js:*:*", + "immer": "cpe:2.3:a:immer_project:immer:*:*:*:*:*:node.js:*:*", + "inert": "cpe:2.3:a:hapi:inert:*:*:*:*:*:node.js:*:*", + "infraserver": "cpe:2.3:a:infraserver_project:infraserver:*:*:*:*:*:node.js:*:*", + "ini": "cpe:2.3:a:ini_project:ini:*:*:*:*:*:node.js:*:*", + "ini-parser": "cpe:2.3:a:ini-parser_project:ini-parser:*:*:*:*:*:*:*:*", + "iniparserjs": "cpe:2.3:a:iniparserjs_project:iniparserjs:*:*:*:*:*:node.js:*:*", + "install-g-test": "cpe:2.3:a:install-g-test_project:install-g-test:*:*:*:*:*:node.js:*:*", + "install-package": "cpe:2.3:a:install-package_project:install-package:*:*:*:*:*:node.js:*:*", + "intsol-package": "cpe:2.3:a:intsol-package_project:intsol-package:*:*:*:*:*:node.js:*:*", + "iobroker.admin": "cpe:2.3:a:iobroker:iobroker.admin:*:*:*:*:*:node.js:*:*", + "iobroker.web": "cpe:2.3:a:iobroker:iobroker.web:*:*:*:*:*:node.js:*:*", + "ipip": "cpe:2.3:a:ipip_project:ipip:*:*:*:*:*:node.js:*:*", + "is-my-json-valid": "cpe:2.3:a:is-my-json-valid_project:is-my-json-valid:*:*:*:*:*:node.js:*:*", + "is-user-valid": "cpe:2.3:a:is-user-valid_project:is-user-valid:*:*:*:*:*:node.js:*:*", + "iter-http": "cpe:2.3:a:iter-http_project:iter-http:*:*:*:*:*:node.js:*:*", + "iter-server": "cpe:2.3:a:iter-server_project:iter-server:*:*:*:*:*:node.js:*:*", + "jadedown": "cpe:2.3:a:jadedown_project:jadedown:*:*:*:*:*:node.js:*:*", + "jailed": "cpe:2.3:a:jailed_project:jailed:*:*:*:*:*:node.js:*:*", + "jansenstuffpleasework": "cpe:2.3:a:jansenstuffpleasework_project:jansenstuffpleasework:*:*:*:*:*:node.js:*:*", + "jdf-sass": "cpe:2.3:a:jdf-sass_project:jdf-sass:*:*:*:*:*:node.js:*:*", + "jikes": "cpe:2.3:a:jikes_project:jikes:*:*:*:*:*:node.js:*:*", + "jison": "cpe:2.3:a:jison_project:jison:*:*:*:*:*:node.js:*:*", + "jn_jj_server": "cpe:2.3:a:jn_jj_server_project:jn_jj_server:*:*:*:*:*:node.js:*:*", + "jpv": "cpe:2.3:a:json_pattern_validator_project:json_pattern_validator:*:*:*:*:*:*:*:*", + "jquery": "cpe:2.3:a:jquery:jquery:*:*:*:*:*:node.js:*:*", + "jquery-file-upload": "cpe:2.3:a:jquery_file_upload_project:jquery_file_upload:*:*:*:*:*:*:*:*", + "jquery.js": "cpe:2.3:a:jquery.js_project:jquery.js:*:*:*:*:*:node.js:*:*", + "jquery.json-viewer": "cpe:2.3:a:jquery_json-viewer_project:jquery_json-viewer:*:*:*:*:*:node.js:*:*", + "jquey": "cpe:2.3:a:jquey_project:jquey:*:*:*:*:*:node.js:*:*", + "js-given": "cpe:2.3:a:js-given_project:js-given:*:*:*:*:*:node.js:*:*", + "jscover": "cpe:2.3:a:jscover_project:jscover:*:*:*:*:*:node.js:*:*", + "jsen": "cpe:2.3:a:jsen_project:jsen:*:*:*:*:*:node.js:*:*", + "jser-stat": "cpe:2.3:a:jser-stat_project:jser-stat:*:*:*:*:*:node.js:*:*", + "jshamcrest": "cpe:2.3:a:jshamcrest_project:jshamcrest:*:*:*:*:*:node.js:*:*", + "json": "cpe:2.3:a:json_project:json:*:*:*:*:*:node.js:*:*", + "json-pointer": "cpe:2.3:a:smallpdf:json-pointer:*:*:*:*:*:node.js:*:*", + "json8-merge-patch": "cpe:2.3:a:json8-merge-patch_project:json8-merge-patch:*:*:*:*:*:node.js:*:*", + "jsreport": "cpe:2.3:a:jsreport:jsreport:*:*:*:*:*:node.js:*:*", + "jstestdriver": "cpe:2.3:a:jstestdriver_project:jstestdriver:*:*:*:*:*:node.js:*:*", + "jsx-slack": "cpe:2.3:a:jsx-slack_project:jsx-slack:*:*:*:*:*:node.js:*:*", + "jszip": "cpe:2.3:a:jszip_project:jszip:*:*:*:*:*:node.js:*:*", + "just-extend": "cpe:2.3:a:just-extend_project:just-extend:*:*:*:*:*:node.js:*:*", + "just-safe-set": "cpe:2.3:a:just-safe-set_project:just-safe-set:*:*:*:*:*:node.js:*:*", + "jvminstall": "cpe:2.3:a:jvminstall_project:jvminstall:*:*:*:*:*:node.js:*:*", + "jwt-simple": "cpe:2.3:a:jwt-simple_project:jwt-simple:*:*:*:*:*:node.js:*:*", + "karma-mojo": "cpe:2.3:a:karma-mojo_project:karma-mojo:*:*:*:*:*:node.js:*:*", + "kerberos": "cpe:2.3:a:kerberos_project:kerberos:*:*:*:*:*:node.js:*:*", + "kill-by-port": "cpe:2.3:a:kill-by-port_project:kill-by-port:*:*:*:*:*:node.js:*:*", + "kill-port": "cpe:2.3:a:kill-port_project:kill-port:*:*:*:*:*:node.js:*:*", + "kill-port-process": "cpe:2.3:a:kill-port-process_project:kill-port-process:*:*:*:*:*:node.js:*:*", + "kill-process-by-name": "cpe:2.3:a:kill-process-by-name_project:kill-process-by-name:*:*:*:*:*:node.js:*:*", + "kill-process-on-port": "cpe:2.3:a:kill-process-on-port_project:kill-process-on-port:*:*:*:*:*:node.js:*:*", + "killing": "cpe:2.3:a:killing_project:killing:*:*:*:*:*:node.js:*:*", + "killport": "cpe:2.3:a:killport_project:killport:*:*:*:*:*:node.js:*:*", + "kindlegen": "cpe:2.3:a:hakatashi:kindlegen:*:*:*:*:*:node.js:*:*", + "klona": "cpe:2.3:a:klona_project:klona:*:*:*:*:*:node.js:*:*", + "knightjs": "cpe:2.3:a:knight_project:knight:*:*:*:*:*:node.js:*:*", + "lab6.brit95": "cpe:2.3:a:lab6.brit95_project:lab6.brit95:*:*:*:*:*:node.js:*:*", + "lab6drewfusbyu": "cpe:2.3:a:lab6drewfusbyu_project:lab6drewfusbyu:*:*:*:*:*:node.js:*:*", + "larvitbase-api": "cpe:2.3:a:larvit:larvitbase:*:*:*:*:*:node.js:*:*", + "lessindex": "cpe:2.3:a:lessindex_project:lessindex:*:*:*:*:*:node.js:*:*", + "lettersanitizer": "cpe:2.3:a:lettersanitizer_project:lettersanitizer:*:*:*:*:*:node.js:*:*", + "libnested": "cpe:2.3:a:libnested_project:libnested:*:*:*:*:*:node.js:*:*", + "libsbml": "cpe:2.3:a:libsbml_project:libsbml:*:*:*:*:*:node.js:*:*", + "libsbmlsim": "cpe:2.3:a:libsbmlsim_project:libsbmlsim:*:*:*:*:*:node.js:*:*", + "libxl": "cpe:2.3:a:libxl_project:libxl:*:*:*:*:*:node.js:*:*", + "limbus-buildgen": "cpe:2.3:a:limbus-buildgen_project:limbus-buildgen:*:*:*:*:*:node.js:*:*", + "list-n-stream": "cpe:2.3:a:list-n-stream_project:list-n-stream:*:*:*:*:*:node.js:*:*", + "lite-server": "cpe:2.3:a:lite-server_project:lite-server:*:*:*:*:*:node.js:*:*", + "lite-web-server": "cpe:2.3:a:lite-web-server_project:lite-web-server:*:*:*:*:*:node.js:*:*", + "liuyaserver": "cpe:2.3:a:liuyaserver_project:liuyaserver:*:*:*:*:*:node.js:*:*", + "liyujing": "cpe:2.3:a:liyujing_project:liyujing:*:*:*:*:*:node.js:*:*", + "localhost-now": "cpe:2.3:a:localhost-now_project:localhost-now:*:*:*:*:*:node.js:*:*", + "locutus": "cpe:2.3:a:locutus_project:locutus:*:*:*:*:*:node.js:*:*", + "lodash": "cpe:2.3:a:lodash:lodash:*:*:*:*:*:node.js:*:*", + "log4js": "cpe:2.3:a:log4js_project:log4js:*:*:*:*:*:node.js:*:*", + "looppake": "cpe:2.3:a:looppake_project:looppake:*:*:*:*:*:node.js:*:*", + "lsof": "cpe:2.3:a:isof_project:isof:*:*:*:*:*:node.js:*:*", + "ltt": "cpe:2.3:a:ltt_project:ltt:*:*:*:*:*:node.js:*:*", + "m-server": "cpe:2.3:a:npmjs:m-server:*:*:*:*:*:*:*:*", + "m.static": "cpe:2.3:a:m.static_project:m.static:*:*:*:*:*:node.js:*:*", + "macaca-chromedriver": "cpe:2.3:a:macacajs:macaca-chromedriver:*:*:*:*:*:node.js:*:*", + "macaca-chromedriver-zxa": "cpe:2.3:a:macaca-chromedriver-zxa_project:macaca-chromedriver-zxa:*:*:*:*:*:node.js:*:*", + "macfromip": "cpe:2.3:a:macfromip_project:macfromip:*:*:*:*:*:node.js:*:*", + "madge": "cpe:2.3:a:madge_project:madge:*:*:*:*:*:node.js:*:*", + "madlib-object-utils": "cpe:2.3:a:springtree:madlib-object-utils:*:*:*:*:*:node.js:*:*", + "mariadb": "cpe:2.3:a:mariadb_project:mariadb:*:*:*:*:*:node.js:*:*", + "marionette-socket-host": "cpe:2.3:a:marionette-socket-host_project:marionette-socket-host:*:*:*:*:*:node.js:*:*", + "markdown-it-highlightjs": "cpe:2.3:a:markdown-it-highlightjs_project:markdown-it-highlightjs:*:*:*:*:*:node.js:*:*", + "markdown-link-extractor": "cpe:2.3:a:markdown-link-extractor_project:markdown-link-extractor:*:*:*:*:*:node.js:*:*", + "markdown-pdf": "cpe:2.3:a:markdown-pdf_project:markdown-pdf:*:*:*:*:*:node.js:*:*", + "marked-tree": "cpe:2.3:a:marked-tree_project:marked-tree:*:*:*:*:*:node.js:*:*", + "massif": "cpe:2.3:a:massif_project:massif:*:*:*:*:*:node.js:*:*", + "mc-kill-port": "cpe:2.3:a:mc-kill-port_project:mc-kill-port:*:*:*:*:*:node.js:*:*", + "mcstatic": "cpe:2.3:a:mcstatic_project:mcstatic:*:*:*:*:*:node.js:*:*", + "mdx-mermaid": "cpe:2.3:a:mdx-mermaid_project:mdx-mermaid:*:*:*:*:*:*:*:*", + "memjs": "cpe:2.3:a:memcachier:memjs:*:*:*:*:*:node.js:*:*", + "merge-change": "cpe:2.3:a:merge-change_project:merge-change:*:*:*:*:*:node.js:*:*", + "merge-deep": "cpe:2.3:a:merge-deep_project:merge-deep:*:*:*:*:*:node.js:*:*", + "merge-object": "cpe:2.3:a:merge-object_project:merge-object:*:*:*:*:*:node.js:*:*", + "merge-options": "cpe:2.3:a:merge-options_project:merge-options:*:*:*:*:*:node.js:*:*", + "merge-recursive": "cpe:2.3:a:umbraengineering:merge-recursive:*:*:*:*:*:node.js:*:*", + "metascraper": "cpe:2.3:a:metascrape_project:metascrape:*:*:*:*:*:node.js:*:*", + "method-override": "cpe:2.3:a:expressjs:method-override:*:*:*:*:*:node.js:*:*", + "mfrserver": "cpe:2.3:a:mfrserver_project:mfrserver:*:*:*:*:*:node.js:*:*", + "mime": "cpe:2.3:a:mime_project:mime:*:*:*:*:*:node.js:*:*", + "min-http-server": "cpe:2.3:a:min-http-server_project:min-http-server:*:*:*:*:*:node.js:*:*", + "mock2easy": "cpe:2.3:a:mock2easy_project:mock2easy:*:*:*:*:*:node.js:*:*", + "mockserve": "cpe:2.3:a:mockserve_project:mockserve:*:*:*:*:*:node.js:*:*", + "moment": "cpe:2.3:a:momentjs:moment:*:*:*:*:*:node.js:*:*", + "mongodb-instance": "cpe:2.3:a:mongodb-instance_project:mongodb-instance:*:*:*:*:*:node.js:*:*", + "mongoose": "cpe:2.3:a:mongoosejs:mongoose:*:*:*:*:*:node.js:*:*", + "mongose": "cpe:2.3:a:mongose_project:mongose:*:*:*:*:*:node.js:*:*", + "monorepo-build": "cpe:2.3:a:monorepo-build_project:monorepo-build:*:*:*:*:*:node.js:*:*", + "mootools": "cpe:2.3:a:mootools_project:mootools:*:*:*:*:*:node.js:*:*", + "morgan-json": "cpe:2.3:a:morgan-json_project:morgan-json:*:*:*:*:*:node.js:*:*", + "morris.js": "cpe:2.3:a:morris.js_project:morris.js:*:*:*:*:*:node.js:*:*", + "mout": "cpe:2.3:a:moutjs:mout:*:*:*:*:*:node.js:*:*", + "mpath": "cpe:2.3:a:mpath_project:mpath:*:*:*:*:*:node.js:*:*", + "mqtt-packet": "cpe:2.3:a:mqtt-packet_project:mqtt-packet:*:*:*:*:*:node.js:*:*", + "mssql-node": "cpe:2.3:a:mssql-node_project:mssql-node:*:*:*:*:*:node.js:*:*", + "mssql.js": "cpe:2.3:a:mssql.js_project:mssql.js:*:*:*:*:*:node.js:*:*", + "myprolyz": "cpe:2.3:a:myprolyz_project:myprolyz:*:*:*:*:*:node.js:*:*", + "myserver.alexcthomas18": "cpe:2.3:a:myserver.alexcthomas18_project:myserver.alexcthomas18:*:*:*:*:*:node.js:*:*", + "mysql": "cpe:2.3:a:mysql_project:mysql:*:*:*:*:*:*:*:*", + "mysqljs": "cpe:2.3:a:mysqljs_project:mysqljs:*:*:*:*:*:node.js:*:*", + "mystem": "cpe:2.3:a:mystem_project:mystem:*:*:*:*:*:node.js:*:*", + "mystem-fix": "cpe:2.3:a:mystem-fix_project:mystem-fix:*:*:*:*:*:node.js:*:*", + "mystem-wrapper": "cpe:2.3:a:mystem-wrapper_project:mystem-wrapper:*:*:*:*:*:node.js:*:*", + "mystem3": "cpe:2.3:a:mystem3_project:mystem3:*:*:*:*:*:node.js:*:*", + "nanoid": "cpe:2.3:a:nanoid_project:nanoid:*:*:*:*:*:node.js:*:*", + "native-opencv": "cpe:2.3:a:native-opencv_project:native-opencv:*:*:*:*:*:node.js:*:*", + "nconf": "cpe:2.3:a:nconf_project:nconf:*:*:*:*:*:node.js:*:*", + "nconf-toml": "cpe:2.3:a:nconf-toml_project:nconf-toml:*:*:*:*:*:node.js:*:*", + "nested-object-assign": "cpe:2.3:a:getadigital:nested-object-assign:*:*:*:*:*:node.js:*:*", + "nestie": "cpe:2.3:a:nestie_project:nestie:*:*:*:*:*:node.js:*:*", + "netmask": "cpe:2.3:a:netmask_project:netmask:*:*:*:*:*:node.js:*:*", + "network-manager": "cpe:2.3:a:network-manager_project:network-manager:*:*:*:*:*:node.js:*:*", + "nis-utils": "cpe:2.3:a:nis-utils_project:nis-utils:*:*:*:*:*:node.js:*:*", + "node-air-sdk": "cpe:2.3:a:node-air-sdk_project:node-air-sdk:*:*:*:*:*:node.js:*:*", + "node-bluetooth": "cpe:2.3:a:node-bluetooth_project:node-bluetooth:*:*:*:*:*:node.js:*:*", + "node-bluetooth-serial-port": "cpe:2.3:a:node-bluetooth-serial-port_project:node-bluetooth-serial-port:*:*:*:*:*:node.js:*:*", + "node-browser": "cpe:2.3:a:node-browser_project:node-browser:*:*:*:*:*:node.js:*:*", + "node-bsdiff-android": "cpe:2.3:a:node-bsdiff-android_project:node-bsdiff-android:*:*:*:*:*:node.js:*:*", + "node-cli": "cpe:2.3:a:node-cli_project:node-cli:*:*:*:*:*:node.js:*:*", + "node-df": "cpe:2.3:a:node-df_project:node-df:*:*:*:*:*:node.js:*:*", + "node-extend": "cpe:2.3:a:node-extend_project:node-extend:*:*:*:*:*:node.js:*:*", + "node-key-sender": "cpe:2.3:a:node-key-sender_project:node-key-sender:*:*:*:*:*:node.js:*:*", + "node-latex-pdf": "cpe:2.3:a:node-latex-pdf_project:node-latex-pdf:*:*:*:*:*:node.js:*:*", + "node-macaddress": "cpe:2.3:a:node-macaddress_project:node-macaddress:*:*:*:*:*:node.js:*:*", + "node-mpv": "cpe:2.3:a:node-mpv_project:node-mpv:*:*:*:*:*:node.js:*:*", + "node-notifier": "cpe:2.3:a:node-notifier_project:node-notifier:*:*:*:*:*:node.js:*:*", + "node-oojs": "cpe:2.3:a:node-oojs_project:node-oojs:*:*:*:*:*:node.js:*:*", + "node-opcua": "cpe:2.3:a:node-opcua_project:node-opcua:*:*:*:*:*:node.js:*:*", + "node-opencv": "cpe:2.3:a:node-opencv_project:node-opencv:*:*:*:*:*:node.js:*:*", + "node-opensl": "cpe:2.3:a:node-opensl_project:node-opensl:*:*:*:*:*:node.js:*:*", + "node-openssl": "cpe:2.3:a:node-openssl_project:node-openssl:*:*:*:*:*:node.js:*:*", + "node-ps": "cpe:2.3:a:node-ps_project:node-ps:*:*:*:*:*:node.js:*:*", + "node-red-contrib-huemagic": "cpe:2.3:a:node-red-contrib-huemagic_project:node-red-contrib-huemagic:*:*:*:*:*:node.js:*:*", + "node-serialize": "cpe:2.3:a:node-serialize_project:node-serialize:*:*:*:*:*:node.js:*:*", + "node-server-forfront": "cpe:2.3:a:node-server-forfront_project:node-server-forfront:*:*:*:*:*:node.js:*:*", + "node-simple-router": "cpe:2.3:a:node-simple-router:node-simple-router:*:*:*:*:*:node.js:*:*", + "node-sqlite": "cpe:2.3:a:node-sqlite_project:node-sqlite:*:*:*:*:*:node.js:*:*", + "node-static": "cpe:2.3:a:node-static_project:node-static:*:*:*:*:*:node.js:*:*", + "node-thulac": "cpe:2.3:a:geohey:node-thulac:*:*:*:*:*:node.js:*:*", + "node-tkinter": "cpe:2.3:a:node-tkinter_project:node-tkinter:*:*:*:*:*:node.js:*:*", + "node.extend": "cpe:2.3:a:dreamerslab:node.extend:*:*:*:*:*:node.js:*:*", + "nodeaaaaa": "cpe:2.3:a:nodeaaaaa_project:nodeaaaaa:*:*:*:*:*:node.js:*:*", + "nodebb-plugin-blog-comments": "cpe:2.3:a:nodebb:blog_comments:*:*:*:*:*:node.js:*:*", + "nodecaffe": "cpe:2.3:a:nodecaffe_project:nodecaffe:*:*:*:*:*:node.js:*:*", + "nodee-utils": "cpe:2.3:a:nodee-utils_project:nodee-utils:*:*:*:*:*:node.js:*:*", + "nodefabric": "cpe:2.3:a:nodefabric_project:nodefabric:*:*:*:*:*:node.js:*:*", + "nodeffmpeg": "cpe:2.3:a:nodeffmpeg_project:nodeffmpeg:*:*:*:*:*:node.js:*:*", + "nodemailer-js": "cpe:2.3:a:nodemailer-js_project:nodemailer-js:*:*:*:*:*:node.js:*:*", + "nodemailer.js": "cpe:2.3:a:nodemailer.js_project:nodemailer.js:*:*:*:*:*:node.js:*:*", + "nodemssql": "cpe:2.3:a:nodemssql_project:nodemssql:*:*:*:*:*:node.js:*:*", + "nodepdf": "cpe:2.3:a:nodepdf_project:nodepdf:*:*:*:*:*:node.js:*:*", + "noderequest": "cpe:2.3:a:noderequest_project:noderequest:*:*:*:*:*:node.js:*:*", + "nodesass": "cpe:2.3:a:nodesass_project:nodesass:*:*:*:*:*:node.js:*:*", + "nodeschnaps": "cpe:2.3:a:nodeschnaps_project:nodeschnaps:*:*:*:*:*:node.js:*:*", + "nodesqlite": "cpe:2.3:a:nodesqlite_project:nodesqlite:*:*:*:*:*:node.js:*:*", + "nodewebkit": "cpe:2.3:a:nodewebkit_project:nodewebkit:*:*:*:*:*:node.js:*:*", + "normalize-url": "cpe:2.3:a:normalize-url_project:normalize-url:*:*:*:*:*:node.js:*:*", + "notevil": "cpe:2.3:a:notevil_project:notevil:*:*:*:*:*:node.js:*:*", + "npm": "cpe:2.3:a:node_packaged_modules_project:node_packaged_modules:*:*:*:*:*:node.js:*:*", + "npm-script-demo": "cpe:2.3:a:npm-script-demo_project:npm-script-demo:*:*:*:*:*:node.js:*:*", + "npos-tesseract": "cpe:2.3:a:npos-tesseract_project:npos-tesseract:*:*:*:*:*:node.js:*:*", + "nw-with-arm": "cpe:2.3:a:nw-with-arm_project:nw-with-arm:*:*:*:*:*:node.js:*:*", + "object-extend": "cpe:2.3:a:object-extend_project:object-extend:*:*:*:*:*:node.js:*:*", + "object-path": "cpe:2.3:a:object-path_project:object-path:*:*:*:*:*:node.js:*:*", + "octocat": "cpe:2.3:a:octocat_project:octocat:*:*:*:*:*:node.js:*:*", + "onion-oled-js": "cpe:2.3:a:onion-oled-js_project:onion-oled-js:*:*:*:*:*:node.js:*:*", + "op-browser": "cpe:2.3:a:op-browser_project:op-browser:*:*:*:*:*:node.js:*:*", + "open-device": "cpe:2.3:a:open-device_project:open-device:*:*:*:*:*:node.js:*:*", + "open-graph": "cpe:2.3:a:open-graph_project:open-graph:*:*:*:*:*:node.js:*:*", + "opencv.js": "cpe:2.3:a:opencv.js_project:opencv.js:*:*:*:*:*:node.js:*:*", + "openframe-ascii-image": "cpe:2.3:a:openframe-ascii-image_project:openframe-ascii-image:*:*:*:*:*:node.js:*:*", + "openframe-glslviewer": "cpe:2.3:a:openframe-glslviewer_project:openframe-glslviewer:*:*:*:*:*:node.js:*:*", + "openframe-image": "cpe:2.3:a:openframe-image_project:openframe-image:*:*:*:*:*:node.js:*:*", + "openssl.js": "cpe:2.3:a:openssl.js_project:openssl.js:*:*:*:*:*:node.js:*:*", + "operadriver": "cpe:2.3:a:cnpmjs:operadriver:*:*:*:*:*:node.js:*:*", + "osm-static-maps": "cpe:2.3:a:osm-static-maps_project:osm-static-maps:*:*:*:*:*:node.js:*:*", + "p4": "cpe:2.3:a:p4_project:p4:*:*:*:*:*:node.js:*:*", + "pandora-doomsday": "cpe:2.3:a:pandora-doomsday_project:pandora-doomsday:*:*:*:*:*:node.js:*:*", + "parse-server": "cpe:2.3:a:parseplatform:parse-server:*:*:*:*:*:node.js:*:*", + "parsejson": "cpe:2.3:a:parsejson_project:parsejson:*:*:*:*:*:node.js:*:*", + "passport-saml": "cpe:2.3:a:passport-saml_project:passport-saml:*:*:*:*:*:node.js:*:*", + "patchmerge": "cpe:2.3:a:patchmerge_project:patchmerge:*:*:*:*:*:node.js:*:*", + "path-parse": "cpe:2.3:a:path-parse_project:path-parse:*:*:*:*:*:node.js:*:*", + "payload": "cpe:2.3:a:payloadcms:payload:*:*:*:*:*:node.js:*:*", + "paypal-adaptive": "cpe:2.3:a:idea:paypal-adaptive:*:*:*:*:*:node.js:*:*", + "paypal-ipn": "cpe:2.3:a:paypal-ipn_project:paypal-ipn:*:*:*:*:*:node.js:*:*", + "pdf-image": "cpe:2.3:a:pdf-image_project:pdf-image:*:*:*:*:*:node.js:*:*", + "pdfinfojs": "cpe:2.3:a:pdfinfojs_project:pdfinfojs:*:*:*:*:*:node.js:*:*", + "peiserver": "cpe:2.3:a:peiserver_project:peiserver:*:*:*:*:*:node.js:*:*", + "pennyworth": "cpe:2.3:a:pennyworth_project:pennyworth:*:*:*:*:*:node.js:*:*", + "pg": "cpe:2.3:a:node-postgres:pg:*:*:*:*:*:node.js:*:*", + "phantomjs-cheniu": "cpe:2.3:a:phantomjs-cheniu_project:phantomjs-cheniu:*:*:*:*:*:node.js:*:*", + "phantomjs-seo": "cpe:2.3:a:phantomjs-seo_project:phantomjs-seo:*:*:*:*:*:node.js:*:*", + "picard": "cpe:2.3:a:picard_project:picard:*:*:*:*:*:node.js:*:*", + "picotts": "cpe:2.3:a:picotts_project:picotts:*:*:*:*:*:node.js:*:*", + "pixl-class": "cpe:2.3:a:pixlcore:pixl-class:*:*:*:*:*:node.js:*:*", + "pk-app-wonderbox": "cpe:2.3:a:pk-app-wonderbox_project:pk-app-wonderbox:*:*:*:*:*:node.js:*:*", + "plist": "cpe:2.3:a:plist_project:plist:*:*:*:*:*:node.js:*:*", + "pm2-kafka": "cpe:2.3:a:pm2-kafka_project:pm2-kafka:*:*:*:*:*:node.js:*:*", + "pngcrush-installer": "cpe:2.3:a:pngcrush-installer_project:pngcrush-installer:*:*:*:*:*:node.js:*:*", + "pomelo-monitor": "cpe:2.3:a:netease:pomelo-monitor:*:*:*:*:*:node.js:*:*", + "pooledwebsocket": "cpe:2.3:a:pooledwebsocket_project:pooledwebsocket:*:*:*:*:*:node.js:*:*", + "portkiller": "cpe:2.3:a:portkiller_project:portkiller:*:*:*:*:*:node.js:*:*", + "post-loader": "cpe:2.3:a:post-loader_project:post-loader:*:*:*:*:*:node.js:*:*", + "pouchdb": "cpe:2.3:a:pouchdb:pouchdb:*:*:*:*:*:node.js:*:*", + "prebuild-lwip": "cpe:2.3:a:prebuild-lwip_project:prebuild-lwip:*:*:*:*:*:node.js:*:*", + "printf": "cpe:2.3:a:adaltas:printf:*:*:*:*:*:node.js:*:*", + "private-ip": "cpe:2.3:a:private-ip_project:private-ip:*:*:*:*:*:node.js:*:*", + "proctree": "cpe:2.3:a:proctree_project:proctree:*:*:*:*:*:*:*:*", + "product-monitor": "cpe:2.3:a:product-monitor_project:product-monitor:*:*:*:*:*:node.js:*:*", + "progressbar.js": "cpe:2.3:a:progressbar.js_project:progressbar.js:*:*:*:*:*:node.js:*:*", + "promise-probe": "cpe:2.3:a:promise-probe_project:promise-probe:*:*:*:*:*:node.js:*:*", + "promisehelpers": "cpe:2.3:a:yola:promisehelpers:*:*:*:*:*:node.js:*:*", + "property-expr": "cpe:2.3:a:property-expr_project:property-expr:*:*:*:*:*:node.js:*:*", + "proxy": "cpe:2.3:a:proxy_project:proxy:*:*:*:*:*:node.js:*:*", + "proxy.js": "cpe:2.3:a:proxy.js_project:proxy.js:*:*:*:*:*:node.js:*:*", + "ps-kill": "cpe:2.3:a:ps-kill_project:ps-kill:*:*:*:*:*:node.js:*:*", + "ps-visitor": "cpe:2.3:a:ps-visitor_project:ps-visitor:*:*:*:*:*:node.js:*:*", + "public": "cpe:2.3:a:public_project:public:*:*:*:*:*:node.js:*:*", + "pug": "cpe:2.3:a:pugjs:pug:*:*:*:*:*:node.js:*:*", + "pullit": "cpe:2.3:a:pull_it_project:pull_it:*:*:*:*:*:node.js:*:*", + "pytservce": "cpe:2.3:a:pytservce_project:pytservce:*:*:*:*:*:node.js:*:*", + "qbs": "cpe:2.3:a:qbs_project:qbs:*:*:*:*:*:node.js:*:*", + "qinserve": "cpe:2.3:a:qinserve_project:qinserve:*:*:*:*:*:node.js:*:*", + "qs": "cpe:2.3:a:qs_project:qs:*:*:*:*:*:node.js:*:*", + "query-mysql": "cpe:2.3:a:query-mysql_project:query-mysql:*:*:*:*:*:node.js:*:*", + "quickserver": "cpe:2.3:a:quickserver_project:quickserver:*:*:*:*:*:node.js:*:*", + "randomatic": "cpe:2.3:a:randomatic_project:randomatic:*:*:*:*:*:node.js:*:*", + "rdf-graph-array": "cpe:2.3:a:rdf-graph-array_project:rdf-graph-array:*:*:*:*:*:node.js:*:*", + "react-adal": "cpe:2.3:a:react-adal_project:react-adal:*:*:*:*:*:node.js:*:*", + "react-draft-wysiwyg": "cpe:2.3:a:react_draft_wysiwyg_project:react_draft_wysiwyg:*:*:*:*:*:node.js:*:*", + "redis-srvr": "cpe:2.3:a:redis-srvr_project:redis-srvr:*:*:*:*:*:node.js:*:*", + "reduce-css-calc": "cpe:2.3:a:reduce-css-calc_project:reduce-css-calc:*:*:*:*:*:node.js:*:*", + "reecerver": "cpe:2.3:a:reecerver_project:reecerver:*:*:*:*:*:node.js:*:*", + "reg-keygen-git-hash-plugin": "cpe:2.3:a:reg-keygen-git-hash_project:reg-keygen-git-hash:*:*:*:*:*:reg-suit:*:*", + "regexfn": "cpe:2.3:a:regexfn_project:regexfn:*:*:*:*:*:node.js:*:*", + "remark-html": "cpe:2.3:a:remark:remark-html:*:*:*:*:*:node.js:*:*", + "remarkable": "cpe:2.3:a:remarkable_project:remarkable:*:*:*:*:*:node.js:*:*", + "resolve-path": "cpe:2.3:a:resolve-path_project:resolve-path:*:*:*:*:*:node.js:*:*", + "resourcehacker": "cpe:2.3:a:resourcehacker_project:resourcehacker:*:*:*:*:*:node.js:*:*", + "restafary": "cpe:2.3:a:restafary_project:restafary:*:*:*:*:*:node.js:*:*", + "restify-paginate": "cpe:2.3:a:restify-paginate_project:restify-paginate:*:*:*:*:*:node.js:*:*", + "riot-compiler": "cpe:2.3:a:riot.js:riot-compiler:*:*:*:*:*:node.js:*:*", + "ritp": "cpe:2.3:a:ritp_project:ritp:*:*:*:*:*:node.js:*:*", + "roar-pidusage": "cpe:2.3:a:roar-pidusage_project:roar-pidusage:*:*:*:*:*:node.js:*:*", + "rollup-plugin-dev-server": "cpe:2.3:a:rollup-plugin-dev-server_project:rollup-plugin-dev-server:*:*:*:*:*:node.js:*:*", + "rollup-plugin-serve": "cpe:2.3:a:rollup-plugin-serve_project:rollup-plugin-serve:*:*:*:*:*:node.js:*:*", + "rollup-plugin-server": "cpe:2.3:a:rollup-plugin-server_project:rollup-plugin-server:*:*:*:*:*:node.js:*:*", + "rpi-gpio": "cpe:2.3:a:rpi_project:rpi:*:*:*:*:*:node.js:*:*", + "rs-brightcove": "cpe:2.3:a:rs-brightcove_project:rs-brightcove:*:*:*:*:*:node.js:*:*", + "rsshub": "cpe:2.3:a:rsshub_project:rsshub:*:*:*:*:*:node.js:*:*", + "rtcmulticonnection-client": "cpe:2.3:a:rtcmulticonnection-client_project:rtcmulticonnection-client:*:*:*:*:*:node.js:*:*", + "safe-eval": "cpe:2.3:a:safe-eval_project:safe-eval:*:*:*:*:*:node.js:*:*", + "safe-flat": "cpe:2.3:a:safe-flat_project:safe-flat:*:*:*:*:*:*:*:*", + "safe-object2": "cpe:2.3:a:safe-object2_project:safe-object2:*:*:*:*:*:node.js:*:*", + "samba-client": "cpe:2.3:a:samba-client_project:samba-client:*:*:*:*:*:node.js:*:*", + "sanitize-html": "cpe:2.3:a:punkave:sanitize-html:*:*:*:*:*:node.js:*:*", + "save-server": "cpe:2.3:a:save-server_project:save-server:*:*:*:*:*:*:*:*", + "scaffold-helper": "cpe:2.3:a:scaffold-helper_project:scaffold-helper:*:*:*:*:*:node.js:*:*", + "scalajs-standalone-bin": "cpe:2.3:a:scalajs-standalone-bin_project:scalajs-standalone-bin:*:*:*:*:*:node.js:*:*", + "scniro-validator": "cpe:2.3:a:scniro-validator_project:scniro-validator:*:*:*:*:*:node.js:*:*", + "scott-blanch-weather-app": "cpe:2.3:a:scott-blanch-weather-app_project:scott-blanch-weather-app:*:*:*:*:*:node.js:*:*", + "script-manager": "cpe:2.3:a:script-manager_project:script-manager:*:*:*:*:*:node.js:*:*", + "scss-tokenizer": "cpe:2.3:a:scss-tokenizer_project:scss-tokenizer:*:*:*:*:*:node.js:*:*", + "sds": "cpe:2.3:a:sds_project:sds:*:*:*:*:*:node.js:*:*", + "section2.madisonjbrooks12": "cpe:2.3:a:section2.madisonjbrooks12_project:section2.madisonjbrooks12:*:*:*:*:*:node.js:*:*", + "seeftl": "cpe:2.3:a:seeftl_project:seeftl:*:*:*:*:*:node.js:*:*", + "selectize-plugin-a11y": "cpe:2.3:a:selectize-plugin-a11y_project:selectize-plugin-a11y:*:*:*:*:*:node.js:*:*", + "selenium-chromedriver": "cpe:2.3:a:selenium-chromedriver_project:selenium-chromedriver:*:*:*:*:*:node.js:*:*", + "selenium-standalone-painful": "cpe:2.3:a:selenium-standalone-painful_project:selenium-standalone-painful:*:*:*:*:*:node.js:*:*", + "selenium-wrapper": "cpe:2.3:a:selenium-wrapper_project:selenium-wrapper:*:*:*:*:*:node.js:*:*", + "semver": "cpe:2.3:a:npmjs:semver:*:*:*:*:*:node.js:*:*", + "semver-tags": "cpe:2.3:a:semver-tags_project:semver-tags:*:*:*:*:*:node.js:*:*", + "sencisho": "cpe:2.3:a:sencisho_project:sencisho:*:*:*:*:*:node.js:*:*", + "serc.js": "cpe:2.3:a:serc.js_project:serc.js:*:*:*:*:*:node.js:*:*", + "serve-here.js": "cpe:2.3:a:serve-here.js_project:serve-here.js:*:*:*:*:*:node.js:*:*", + "serve-lite": "cpe:2.3:a:serve-lite_project:serve-lite:*:*:*:*:*:node.js:*:*", + "serve46": "cpe:2.3:a:serve46_project:serve46:*:*:*:*:*:node.js:*:*", + "serverabc": "cpe:2.3:a:serverabc_project:serverabc:*:*:*:*:*:node.js:*:*", + "serverhuwenhui": "cpe:2.3:a:serverhuwenhui_project:serverhuwenhui:*:*:*:*:*:node.js:*:*", + "serverliujiayi1": "cpe:2.3:a:serverliujiayi1_project:serverliujiayi1:*:*:*:*:*:node.js:*:*", + "serverlyr": "cpe:2.3:a:serverlyr_project:serverlyr:*:*:*:*:*:node.js:*:*", + "serverwg": "cpe:2.3:a:serverwg_project:serverwg:*:*:*:*:*:node.js:*:*", + "serverwzl": "cpe:2.3:a:serverwzl_project:serverwzl:*:*:*:*:*:node.js:*:*", + "serverxxx": "cpe:2.3:a:serverxxx_project:serverxxx:*:*:*:*:*:node.js:*:*", + "serveryaozeyan": "cpe:2.3:a:serveryaozeyan_project:serveryaozeyan:*:*:*:*:*:node.js:*:*", + "serveryztyzt": "cpe:2.3:a:serveryztyzt_project:serveryztyzt:*:*:*:*:*:node.js:*:*", + "serverzyy": "cpe:2.3:a:serverzyy_project:serverzyy:*:*:*:*:*:node.js:*:*", + "servey": "cpe:2.3:a:servey_project:servey:*:*:*:*:*:node.js:*:*", + "set-object-value": "cpe:2.3:a:set-object-value_project:set-object-value:*:*:*:*:*:node.js:*:*", + "set-value": "cpe:2.3:a:set-value_project:set-value:*:*:*:*:*:node.js:*:*", + "sey": "cpe:2.3:a:sey_project:sey:*:*:*:*:*:node.js:*:*", + "sfml": "cpe:2.3:a:sfml_project:sfml:*:*:*:*:*:node.js:*:*", + "sgqserve": "cpe:2.3:a:sgqserve_project:sgqserve:*:*:*:*:*:node.js:*:*", + "shadowsock": "cpe:2.3:a:shadowsock_project:shadowsock:*:*:*:*:*:node.js:*:*", + "sharp": "cpe:2.3:a:sharp_project:sharp:*:*:*:*:*:node.js:*:*", + "shell-quote": "cpe:2.3:a:shell-quote_project:shell-quote:*:*:*:*:*:node.js:*:*", + "shenliru": "cpe:2.3:a:shenliru_project:shenliru:*:*:*:*:*:node.js:*:*", + "shescape": "cpe:2.3:a:shescape_project:shescape:*:*:*:*:*:node.js:*:*", + "shit-server": "cpe:2.3:a:shit-server_project:shit-server:*:*:*:*:*:node.js:*:*", + "shout": "cpe:2.3:a:shout_project:shout:*:*:*:*:*:node.js:*:*", + "simple-markdown": "cpe:2.3:a:khanacademy:simple-markdown:*:*:*:*:*:node.js:*:*", + "simple-npm-registry": "cpe:2.3:a:simple-npm-registry_project:simple-npm-registry:*:*:*:*:*:node.js:*:*", + "simplehttpserver": "cpe:2.3:a:simplehttpserver_project:simplehttpserver:*:*:*:*:*:node.js:*:*", + "slimerjs-edge": "cpe:2.3:a:slimerjs-edge_project:slimerjs-edge:*:*:*:*:*:node.js:*:*", + "slug": "cpe:2.3:a:slug_project:slug:*:*:*:*:*:node.js:*:*", + "sly07": "cpe:2.3:a:sly07_project:sly07:*:*:*:*:*:node.js:*:*", + "smb": "cpe:2.3:a:smb_project:smb:*:*:*:*:*:node.js:*:*", + "soci": "cpe:2.3:a:soci_project:soci:*:*:*:*:*:node.js:*:*", + "split-html-to-chars": "cpe:2.3:a:split-html-to-chars_project:split-html-to-chars:*:*:*:*:*:node.js:*:*", + "spritesheet-js": "cpe:2.3:a:spritesheet-js_project:spritesheet-js:*:*:*:*:*:node.js:*:*", + "sqlite.js": "cpe:2.3:a:sqlite.js_project:sqlite.js:*:*:*:*:*:node.js:*:*", + "sqliter": "cpe:2.3:a:sqliter_project:sqliter:*:*:*:*:*:node.js:*:*", + "sqlserver": "cpe:2.3:a:sqlserver_project:sqlserver:*:*:*:*:*:node.js:*:*", + "ssh2": "cpe:2.3:a:ssh2_project:ssh2:*:*:*:*:*:node.js:*:*", + "ssl-utils": "cpe:2.3:a:ssl-utils_project:ssl-utils:*:*:*:*:*:node.js:*:*", + "sspa": "cpe:2.3:a:sspa_project:sspa:*:*:*:*:*:node.js:*:*", + "static-dev-server": "cpe:2.3:a:static-dev-server_project:static-dev-server:*:*:*:*:*:node.js:*:*", + "static-eval": "cpe:2.3:a:static-eval_project:static-eval:*:*:*:*:*:node.js:*:*", + "static-html-server": "cpe:2.3:a:static-html-server_project:static-html-server:*:*:*:*:*:node.js:*:*", + "static-resource-server": "cpe:2.3:a:static-resource-server_project:static-resource-server:*:*:*:*:*:node.js:*:*", + "statichttpserver": "cpe:2.3:a:statichttpserver_project:statichttpserver:*:*:*:*:*:node.js:*:*", + "statics-server": "cpe:2.3:a:statics-server_project:statics-server:*:*:*:*:*:node.js:*:*", + "stattic": "cpe:2.3:a:stattic_project:stattic:*:*:*:*:*:node.js:*:*", + "strider-sauce": "cpe:2.3:a:strider-sauce_project:strider-sauce:*:*:*:*:*:node.js:*:*", + "string": "cpe:2.3:a:string_project:string:*:*:*:*:*:node.js:*:*", + "summit": "cpe:2.3:a:summit_project:summit:*:*:*:*:*:node.js:*:*", + "superagent": "cpe:2.3:a:superagent_project:superagent:*:*:*:*:*:node.js:*:*", + "superjson": "cpe:2.3:a:superjson_project:superjson:*:*:*:*:*:node.js:*:*", + "susu-sum": "cpe:2.3:a:susu-sum_project:susu-sum:*:*:*:*:*:node.js:*:*", + "swagger-ui-dist": "cpe:2.3:a:smartbear:swagger-ui-dist:*:*:*:*:*:node.js:*:*", + "sync-exec": "cpe:2.3:a:sync-exec_project:sync-exec:*:*:*:*:*:node.js:*:*", + "taffy": "cpe:2.3:a:taffydb:taffy:*:*:*:*:*:node.js:*:*", + "takeapeek": "cpe:2.3:a:takeapeek_project:takeapeek:*:*:*:*:*:*:*:*", + "tar": "cpe:2.3:a:tar_project:tar:*:*:*:*:*:node.js:*:*", + "teddy": "cpe:2.3:a:teddy_project:teddy:*:*:*:*:*:node.js:*:*", + "tencent-server": "cpe:2.3:a:tencent-server_project:tencent-server:*:*:*:*:*:node.js:*:*", + "that-value": "cpe:2.3:a:that-value_project:that-value:*:*:*:*:*:node.js:*:*", + "three": "cpe:2.3:a:three_project:three:*:*:*:*:*:node.js:*:*", + "tianma-static": "cpe:2.3:a:tianma-static_project:tianma-static:*:*:*:*:*:node.js:*:*", + "timespan": "cpe:2.3:a:timespan_project:timespan:*:*:*:*:*:node.js:*:*", + "tiny-conf": "cpe:2.3:a:tiny-conf_project:tiny-conf:*:*:*:*:*:node.js:*:*", + "tiny-csrf": "cpe:2.3:a:tiny-csrf_project:tiny-csrf:*:*:*:*:*:node.js:*:*", + "tiny-http": "cpe:2.3:a:tiny-http_project:tiny-http:*:*:*:*:*:node.js:*:*", + "tinyserver2": "cpe:2.3:a:tinyserver2_project:tinyserver2:*:*:*:*:*:node.js:*:*", + "tmock": "cpe:2.3:a:tmock_project:tmock:*:*:*:*:*:node.js:*:*", + "tmpl": "cpe:2.3:a:tmpl_project:tmpl:*:*:*:*:*:node.js:*:*", + "todo-regex": "cpe:2.3:a:todo-regex_project:todo-regex:*:*:*:*:*:node.js:*:*", + "tomita": "cpe:2.3:a:tomita_project:tomita:*:*:*:*:*:node.js:*:*", + "tomita-parser": "cpe:2.3:a:yandex:tomita-parser:*:*:*:*:*:node.js:*:*", + "tough-cookie": "cpe:2.3:a:salesforce:tough-cookie:*:*:*:*:*:node.js:*:*", + "traceroute": "cpe:2.3:a:traceroute_project:traceroute:*:*:*:*:*:node.js:*:*", + "trailing-slash": "cpe:2.3:a:trailing-slash_project:trailing-slash:*:*:*:*:*:node.js:*:*", + "tree-kill": "cpe:2.3:a:tree-kill_project:tree-kill:*:*:*:*:*:node.js:*:*", + "trim-newlines": "cpe:2.3:a:trim-newlines_project:trim-newlines:*:*:*:*:*:node.js:*:*", + "ts-nodash": "cpe:2.3:a:ts-nodash_project:ts-nodash:*:*:*:*:*:node.js:*:*", + "ts-process-promises": "cpe:2.3:a:ts-process-promises_project:ts-process-promises:*:*:*:*:*:node.js:*:*", + "ua-parser": "cpe:2.3:a:ua-parser_project:ua-parser:*:*:*:*:*:node.js:*:*", + "ua-parser-js": "cpe:2.3:a:ua-parser-js_project:ua-parser-js:*:*:*:*:*:node.js:*:*", + "uap-core": "cpe:2.3:a:uap-core_project:uap-core:*:*:*:*:*:node.js:*:*", + "uekw1511server": "cpe:2.3:a:uekw1511server_project:uekw1511server:*:*:*:*:*:node.js:*:*", + "umount": "cpe:2.3:a:umount_project:umount:*:*:*:*:*:node.js:*:*", + "undefsafe": "cpe:2.3:a:undefsafe_project:undefsafe:*:*:*:*:*:node.js:*:*", + "underscore": "cpe:2.3:a:underscorejs:underscore:*:*:*:*:*:node.js:*:*", + "underscore-99xp": "cpe:2.3:a:underscore-99xp_project:underscore-99xp:*:*:*:*:*:node.js:*:*", + "unicode": "cpe:2.3:a:unicode_project:unicode:*:*:*:*:*:node.js:*:*", + "unicorn-list": "cpe:2.3:a:unicorn-list_project:unicorn-list:*:*:*:*:*:node.js:*:*", + "unzipper": "cpe:2.3:a:unzipper_project:unzipper:*:*:*:*:*:node.js:*:*", + "uri-js": "cpe:2.3:a:uri-js_project:uri-js:*:*:*:*:*:node.js:*:*", + "url-js": "cpe:2.3:a:url-js_project:url-js:*:*:*:*:*:node.js:*:*", + "url-parse": "cpe:2.3:a:url-parse_project:url-parse:*:*:*:*:*:node.js:*:*", + "useragent": "cpe:2.3:a:useragent_project:useragent:*:*:*:*:*:node.js:*:*", + "utahcityfinder": "cpe:2.3:a:utahcityfinder_project:utahcityfinder:*:*:*:*:*:node.js:*:*", + "utilities": "cpe:2.3:a:utilities_project:utilities:*:*:*:*:*:node.js:*:*", + "utils-extend": "cpe:2.3:a:utils-extend_project:utils-extend:*:*:*:*:*:node.js:*:*", + "uv-tj-demo": "cpe:2.3:a:uv-tj-demo_project:uv-tj-demo:*:*:*:*:*:node.js:*:*", + "validate-color": "cpe:2.3:a:validate_color_project:validate_color:*:*:*:*:*:node.js:*:*", + "validate-data": "cpe:2.3:a:validate_data_project:validate_data:*:*:*:*:*:node.js:*:*", + "vega": "cpe:2.3:a:vega_project:vega:*:*:*:*:*:node.js:*:*", + "vega-functions": "cpe:2.3:a:vega-functions_project:vega-functions:*:*:*:*:*:node.js:*:*", + "vm2": "cpe:2.3:a:vm2_project:vm2:*:*:*:*:*:node.js:*:*", + "vmd": "cpe:2.3:a:vmd_project:vmd:*:*:*:*:*:node.js:*:*", + "vuelidate": "cpe:2.3:a:vuelidate_project:vuelidate:*:*:*:*:*:node.js:*:*", + "w-zip": "cpe:2.3:a:w-zip_project:w-zip:*:*:*:*:*:node.js:*:*", + "wangguojing123": "cpe:2.3:a:wanggoujing123_project:wanggoujing123:*:*:*:*:*:node.js:*:*", + "wasdk": "cpe:2.3:a:wasdk_project:wasdk:*:*:*:*:*:node.js:*:*", + "waterline-sequel": "cpe:2.3:a:balderdash:waterline-sequel:*:*:*:*:*:node.js:*:*", + "wc-cmd": "cpe:2.3:a:wc-cmd_project:wc-cmd:*:*:*:*:*:node.js:*:*", + "weather.swlyons": "cpe:2.3:a:weather.swlyons_project:weather.swlyons:*:*:*:*:*:node.js:*:*", + "webdriver-launcher": "cpe:2.3:a:webdriver-launcher_project:webdriver-launcher:*:*:*:*:*:node.js:*:*", + "webpack-subresource-integrity": "cpe:2.3:a:webpack-subresource-integrity_project:webpack-subresource-integrity:*:*:*:*:*:node.js:*:*", + "webrtc-native": "cpe:2.3:a:webrtc:webrtc-native:*:*:*:*:*:node.js:*:*", + "welcomyzt": "cpe:2.3:a:welcomyzt_project:welcomyzt:*:*:*:*:*:node.js:*:*", + "wffserve": "cpe:2.3:a:wffserve_project:wffserve:*:*:*:*:*:node.js:*:*", + "whereis": "cpe:2.3:a:whereis_project:whereis:*:*:*:*:*:node.js:*:*", + "whispercast": "cpe:2.3:a:whispercast_project:whispercast:*:*:*:*:*:node.js:*:*", + "wifey": "cpe:2.3:a:wifey_project:wifey:*:*:*:*:*:node.js:*:*", + "wifiscanner": "cpe:2.3:a:thingssdk:wifiscanner:*:*:*:*:*:node.js:*:*", + "wincred": "cpe:2.3:a:wincred_project:wincred:*:*:*:*:*:*:*:*", + "wind-mvc": "cpe:2.3:a:wind-mvc_project:wind-mvc:*:*:*:*:*:node.js:*:*", + "windows-build-tools": "cpe:2.3:a:windows-build-tools_project:windows-build-tools:*:*:*:*:*:node.js:*:*", + "windows-iedriver": "cpe:2.3:a:windows-iedriver_project:windows-iedriver:*:*:*:*:*:node.js:*:*", + "windows-latestchromedriver": "cpe:2.3:a:windows-latestchromedriver_project:windows-latestchromedriver:*:*:*:*:*:node.js:*:*", + "windows-selenium-chromedriver": "cpe:2.3:a:windows-selenium-chromedriver_project:windows-selenium-chromedriver:*:*:*:*:*:node.js:*:*", + "windows-seleniumjar": "cpe:2.3:a:windows-seleniumjar_project:windows-seleniumjar:*:*:*:*:*:node.js:*:*", + "windows-seleniumjar-mirror": "cpe:2.3:a:windows-seleniumjar-mirror_project:windows-seleniumjar-mirror:*:*:*:*:*:node.js:*:*", + "wintiwebdev": "cpe:2.3:a:wintiwebdev_project:wintiwebdev:*:*:*:*:*:node.js:*:*", + "wixtoolset": "cpe:2.3:a:wixtoolset_project:wixtoolset:*:*:*:*:*:node.js:*:*", + "word-wrap": "cpe:2.3:a:word-wrap_project:word-wrap:*:*:*:*:*:node.js:*:*", + "worksmith": "cpe:2.3:a:guidesmiths:worksmith:*:*:*:*:*:node.js:*:*", + "x-data-spreadsheet": "cpe:2.3:a:x-data-spreadsheet_project:x-data-spreadsheet:*:*:*:*:*:node.js:*:*", + "xd-testing": "cpe:2.3:a:xd-testing_project:xd-testing:*:*:*:*:*:node.js:*:*", + "xmldom": "cpe:2.3:a:xmldom_project:xmldom:*:*:*:*:*:node.js:*:*", + "xtalk": "cpe:2.3:a:xtalk_project:xtalk:*:*:*:*:*:node.js:*:*", + "y18n": "cpe:2.3:a:y18n_project:y18n:*:*:*:*:*:node.js:*:*", + "yargs-parser": "cpe:2.3:a:yargs:yargs-parser:*:*:*:*:*:node.js:*:*", + "yttivy": "cpe:2.3:a:yttivy_project:yttivy:*:*:*:*:*:node.js:*:*", + "yyooopack": "cpe:2.3:a:yyooopack_project:yyooopack:*:*:*:*:*:node.js:*:*", + "yzt": "cpe:2.3:a:yzt_project:yzt:*:*:*:*:*:node.js:*:*", + "zjjserver": "cpe:2.3:a:zjjserver_project:zjjserver:*:*:*:*:*:node.js:*:*", + "zwserver": "cpe:2.3:a:zwserver_project:zwserver:*:*:*:*:*:node.js:*:*" + }, + "pypi": { + "0.0.1": "cpe:2.3:a:pypi:pypi:*:*:*:*:*:*:*:*", + "AAmiles": "cpe:2.3:a:pypi:aamiles:*:*:*:*:*:pypi:*:*", + "Beaker": "cpe:2.3:a:beakerbrowser:beaker:*:*:*:*:*:python:*:*", + "Flask": "cpe:2.3:a:palletsprojects:flask:*:*:*:*:*:*:*:*", + "Flask-Security-Too": "cpe:2.3:a:flask-security-too_project:flask-security-too:*:*:*:*:*:*:*:*", + "Red-Dashboard": "cpe:2.3:a:cogboard:red-dashboard:*:*:*:*:*:*:*:*", + "XML2Dict": "cpe:2.3:a:xml2dict_project:xml2dict:*:*:*:*:*:python:*:*", + "aniso8601": "cpe:2.3:a:aniso8601_project:aniso8601:*:*:*:*:*:*:*:*", + "api-res-py": "cpe:2.3:a:api-res-py_project:api-res-py:*:*:*:*:*:python:*:*", + "cloudtoken": "cpe:2.3:a:atlassian:cloudtoken:*:*:*:*:*:*:*:*", + "conference-scheduler-cli": "cpe:2.3:a:pyconuk:conference-scheduler-cli:*:*:*:*:*:*:*:*", + "cryptography": "cpe:2.3:a:python-cryptography_project:python-cryptography:*:*:*:*:*:*:*:*", + "d8s-domains": "cpe:2.3:a:democritus_domains_project:democritus_domains:*:*:*:*:*:python:*:*", + "d8s-ip-addresses": "cpe:2.3:a:democritus_ip_addresses_project:democritus_ip_addresses:*:*:*:*:*:python:*:*", + "d8s-pdfs": "cpe:2.3:a:democritus_pdfs_project:democritus_pdfs:*:*:*:*:*:python:*:*", + "d8s-urls": "cpe:2.3:a:democritus_urls_project:democritus_urls:*:*:*:*:*:python:*:*", + "d8s-uuids": "cpe:2.3:a:democritus_uuids_project:democritus_uuids:*:*:*:*:*:python:*:*", + "decorator": "cpe:2.3:a:python:decorator:*:*:*:*:*:*:*:*", + "drf-jwt": "cpe:2.3:a:styria:django-rest-framework-json_web_tokens:*:*:*:*:*:*:*:*", + "easy-parse": "cpe:2.3:a:easy-parse_project:easy-parse:*:*:*:*:*:python:*:*", + "easy-xml": "cpe:2.3:a:easyxml_project:easyxml:*:*:*:*:*:python:*:*", + "enum34": "cpe:2.3:a:python:enum34:*:*:*:*:*:*:*:*", + "exotel": "cpe:2.3:a:exotel_project:exotel:*:*:*:*:*:python:*:*", + "flask-restx": "cpe:2.3:a:flask-restx_project:flask-restx:*:*:*:*:*:python:*:*", + "global-workqueue": "cpe:2.3:a:global-workqueue_project:global-workqueue:*:*:*:*:*:python:*:*", + "horus": "cpe:2.3:a:pylonsproject:horus:*:*:*:*:*:pyramid:*:*", + "html-to-csv": "cpe:2.3:a:html-to-csv_project:html-to-csv:*:*:*:*:*:python:*:*", + "jupyterhub-systemdspawner": "cpe:2.3:a:jupyterhub:systemdspawner:*:*:*:*:*:*:*:*", + "jw.util": "cpe:2.3:a:python:jw.util:*:*:*:*:*:python:*:*", + "keymaker": "cpe:2.3:a:keymaker_project:keymaker:*:*:*:*:*:*:*:*", + "ladon": "cpe:2.3:a:ladon_project:ladon:*:*:*:*:*:*:*:*", + "marshmallow": "cpe:2.3:a:marshmallow_project:marshmallow:*:*:*:*:*:python:*:*", + "mpxj": "cpe:2.3:a:mpxj:mpxj:*:*:*:*:*:python:*:*", + "networkx": "cpe:2.3:a:python:networkx:*:*:*:*:*:*:*:*", + "novajoin": "cpe:2.3:a:python:novajoin:*:*:*:*:*:*:*:*", + "oncall": "cpe:2.3:a:linkedin:oncall:*:*:*:*:*:*:*:*", + "openssh-key-parser": "cpe:2.3:a:openssh_key_parser_project:openssh_key_parser:*:*:*:*:*:python:*:*", + "ovirt-engine-sdk-python": "cpe:2.3:a:ovirt-engine-sdk-python_project:ovirt-engine-sdk-python:*:*:*:*:*:*:*:*", + "passeo": "cpe:2.3:a:passeo_project:passeo:*:*:*:*:*:python:*:*", + "pipreqs": "cpe:2.3:a:pipreqs_project:pipreqs:*:*:*:*:*:python:*:*", + "proxy.py": "cpe:2.3:a:proxy.py_project:proxy.py:*:*:*:*:*:*:*:*", + "py-bcrypt": "cpe:2.3:a:python:py-bcrypt:*:*:*:*:*:*:*:*", + "py7zr": "cpe:2.3:a:py7zr_project:py7zr:*:*:*:*:*:python:*:*", + "pybluemonday": "cpe:2.3:a:python:pybluemonday:*:*:*:*:*:*:*:*", + "pycryptodome": "cpe:2.3:a:python:pycryptodome:*:*:*:*:*:*:*:*", + "pyload-ng": "cpe:2.3:a:pyload-ng_project:pyload-ng:*:*:*:*:*:python:*:*", + "pymatgen": "cpe:2.3:a:pymatgen:pymatgen:*:*:*:*:*:*:*:*", + "pyo": "cpe:2.3:a:pyo_project:pyo:*:*:*:*:*:*:*:*", + "pypiserver": "cpe:2.3:a:python:pypiserver:*:*:*:*:*:*:*:*", + "pypolicyd-spf": "cpe:2.3:a:pypolicyd-spf_project:pypolicyd-spf:*:*:*:*:*:*:*:*", + "python-gnupg": "cpe:2.3:a:python:python-gnupg:*:*:*:*:*:*:*:*", + "python-libnmap": "cpe:2.3:a:python-libnmap_project:python-libnmap:*:*:*:*:*:python:*:*", + "reqmgr2": "cpe:2.3:a:reqmgr2_project:reqmgr2:*:*:*:*:*:python:*:*", + "reqmon": "cpe:2.3:a:reqmon_project:reqmon:*:*:*:*:*:python:*:*", + "requests-xml": "cpe:2.3:a:requests-xml_project:requests-xml:*:*:*:*:*:python:*:*", + "rope": "cpe:2.3:a:rope_project:rope:*:*:*:*:*:python:*:*", + "rply": "cpe:2.3:a:rply_project:rply:*:*:*:*:*:*:*:*", + "rsa": "cpe:2.3:a:python:rsa:*:*:*:*:*:python:*:*", + "ruamel.yaml": "cpe:2.3:a:ruamel.yaml_project:ruamel.yaml:*:*:*:*:*:*:*:*", + "simiki": "cpe:2.3:a:simiki_project:simiki:*:*:*:*:*:*:*:*", + "slashify": "cpe:2.3:a:google:slashify:*:*:*:*:*:node.js:*:*", + "sopel-plugins.channelmgnt": "cpe:2.3:a:mirahezebots:channelmgnt:*:*:*:*:*:sopel:*:*", + "spacy": "cpe:2.3:a:explosion:spacy:*:*:*:*:*:python:*:*", + "sqlparse": "cpe:2.3:a:sqlparse_project:sqlparse:*:*:*:*:*:python:*:*", + "tkvideoplayer": "cpe:2.3:a:python:tkvideoplayer:*:*:*:*:*:*:*:*", + "urllib3": "cpe:2.3:a:python:urllib3:*:*:*:*:*:*:*:*", + "validators": "cpe:2.3:a:validators_project:validators:*:*:*:*:*:python:*:*", + "vault-cli": "cpe:2.3:a:vault-cli_project:vault-cli:*:*:*:*:*:python:*:*", + "wmagent": "cpe:2.3:a:wmagent_project:wmagent:*:*:*:*:*:python:*:*", + "xmpp-http-upload": "cpe:2.3:a:xmpp-http-upload_project:xmpp-http-upload:*:*:*:*:*:*:*:*" + }, + "rubygems": { + "Arabic-Prawn": "cpe:2.3:a:dynamixsolutions:arabic_prawn:*:*:*:*:*:ruby:*:*", + "VladTheEnterprising": "cpe:2.3:a:vladtheenterprising_project:vladtheenterprising:*:*:*:*:*:ruby:*:*", + "actionview": "cpe:2.3:a:action_view_project:action_view:*:*:*:*:*:ruby:*:*", + "activesupport": "cpe:2.3:a:activesupport_project:activesupport:*:*:*:*:*:ruby:*:*", + "backup-agoddard": "cpe:2.3:a:backup-agoddard_project:backup-agoddard:*:*:*:*:*:ruby:*:*", + "backup_checksum": "cpe:2.3:a:backup_checksum_project:backup_checksum:*:*:*:*:*:ruby:*:*", + "better_errors": "cpe:2.3:a:better_errors_project:better_errors:*:*:*:*:*:ruby:*:*", + "bio-basespace-sdk": "cpe:2.3:a:basespace_ruby_sdk_project:basespace_ruby_sdk:*:*:*:*:*:ruby:*:*", + "brbackup": "cpe:2.3:a:brbackup_project:brbackup:*:*:*:*:*:ruby:*:*", + "bson": "cpe:2.3:a:bson_project:bson:*:*:*:*:*:ruby:*:*", + "cap-strap": "cpe:2.3:a:cap-strap_project:cap-strap:*:*:*:*:*:ruby:*:*", + "ciborg": "cpe:2.3:a:ciborg_project:ciborg:*:*:*:*:*:ruby:*:*", + "codders-dataset": "cpe:2.3:a:codders-dataset_project:codders-dataset:*:*:*:*:*:ruby:*:*", + "colorscore": "cpe:2.3:a:colorscore_project:colorscore:*:*:*:*:*:ruby:*:*", + "consul": "cpe:2.3:a:makandra:consul:*:*:*:*:*:ruby:*:*", + "cremefraiche": "cpe:2.3:a:uplawski:creme_fraiche:*:*:*:*:*:ruby:*:*", + "csv-safe": "cpe:2.3:a:csv-safe_project:csv-safe:*:*:*:*:*:ruby:*:*", + "csv_sniffer": "cpe:2.3:a:csv-sniffer_project:csv-sniffer:*:*:*:*:*:rust:*:*", + "curl": "cpe:2.3:a:curl_project:curl:*:*:*:*:*:ruby:*:*", + "datagrid": "cpe:2.3:a:datagrid_project:datagrid:*:*:*:*:*:ruby:*:*", + "echor": "cpe:2.3:a:echor_project:echor:*:*:*:*:*:ruby:*:*", + "field_test": "cpe:2.3:a:field_test_project:field_test:*:*:*:*:*:ruby:*:*", + "fileutils": "cpe:2.3:a:ruby:fileutils:*:*:*:*:*:*:*:*", + "fog-dragonfly": "cpe:2.3:a:mark_evans:fog-dragonfly:*:*:*:*:*:ruby:*:*", + "ftpd": "cpe:2.3:a:ftpd_project:ftpd:*:*:*:*:*:ruby:*:*", + "gemirro": "cpe:2.3:a:gemirro_project:gemirro:*:*:*:*:*:ruby:*:*", + "gibbon": "cpe:2.3:a:gibbon_project:gibbon:*:*:*:*:*:ruby:*:*", + "globalid": "cpe:2.3:a:rubyonrails:globalid:*:*:*:*:*:ruby:*:*", + "gollum-grit_adapter": "cpe:2.3:a:gollum_project:grit_adapter:*:*:*:*:*:*:*:*", + "gyazo": "cpe:2.3:a:gyazo_project:gyazo:*:*:*:*:*:ruby:*:*", + "haml": "cpe:2.3:a:haml:haml:*:*:*:*:*:ruby:*:*", + "json-jwt": "cpe:2.3:a:json-jwt_project:json-jwt:*:*:*:*:*:ruby:*:*", + "kafo": "cpe:2.3:a:theforeman:kafo:*:*:*:*:*:*:*:*", + "kajam": "cpe:2.3:a:kajam_project:kajam:*:*:*:*:*:ruby:*:*", + "karo": "cpe:2.3:a:karo_project:karo:*:*:*:*:*:ruby:*:*", + "kcapifony": "cpe:2.3:a:kcapifony_project:kcapifony:*:*:*:*:*:ruby:*:*", + "kitchen-terraform": "cpe:2.3:a:kitchen-terraform_project:kitchen-terraform:*:*:*:*:*:ruby:*:*", + "kramdown": "cpe:2.3:a:kramdown_project:kramdown:*:*:*:*:*:ruby:*:*", + "lawn-login": "cpe:2.3:a:lawn-login_project:lawn-login:*:*:*:*:*:ruby:*:*", + "lean-ruport": "cpe:2.3:a:lean-ruport_project:lean-ruport:*:*:*:*:*:ruby:*:*", + "loofah": "cpe:2.3:a:loofah_project:loofah:*:*:*:*:*:ruby:*:*", + "lynx": "cpe:2.3:a:lynx_project:lynx:*:*:*:*:*:ruby:*:*", + "moped": "cpe:2.3:a:moped_project:moped:*:*:*:*:*:ruby:*:*", + "net-ldap": "cpe:2.3:a:net-ldap_project:net-ldap:*:*:*:*:*:ruby:*:*", + "netaddr": "cpe:2.3:a:netaddr_project:netaddr:*:*:*:*:*:ruby:*:*", + "octopoller": "cpe:2.3:a:octopoller_project:octopoller:*:*:*:*:*:ruby:*:*", + "omniauth": "cpe:2.3:a:omniauth:omniauth:*:*:*:*:*:ruby:*:*", + "omniauth-auth0": "cpe:2.3:a:auth0:omniauth-auth0:*:*:*:*:*:ruby:*:*", + "omniauth-facebook": "cpe:2.3:a:omniauth-facebook_project:omniauth-facebook:*:*:*:*:*:ruby:*:*", + "omniauth-oauth2": "cpe:2.3:a:omniauth-oauth2_project:omniauth-oauth2:*:*:*:*:*:ruby:*:*", + "open-uri-cached": "cpe:2.3:a:open-uri-cached_project:open-uri-cached:*:*:*:*:*:ruby:*:*", + "openshift-origin-controller": "cpe:2.3:a:openshift-origin-controller_project:openshift-origin-controller:*:*:*:*:*:ruby:*:*", + "openssl": "cpe:2.3:a:ruby-lang:openssl:*:*:*:*:*:*:*:*", + "ox": "cpe:2.3:a:ox_project:ox:*:*:*:*:*:ruby:*:*", + "papercrop": "cpe:2.3:a:papercrop_project:papercrop:*:*:*:*:*:ruby:*:*", + "paranoid2": "cpe:2.3:a:anjlab:paranoid2:*:*:*:*:*:ruby:*:*", + "paratrooper-pingdom": "cpe:2.3:a:tobias_maier:paratrooper-pingdom:*:*:-:*:-:ruby:*:*", + "pdf_info": "cpe:2.3:a:newspaperclub:pdf_info:*:*:*:*:*:ruby:*:*", + "pdfkit": "cpe:2.3:a:pdfkit_project:pdfkit:*:*:*:*:*:ruby:*:*", + "point-cli": "cpe:2.3:a:point-cli_project:point-cli:*:*:*:*:*:ruby:*:*", + "private_address_check": "cpe:2.3:a:private_address_check_project:private_address_check:*:*:*:*:*:ruby:*:*", + "rack-cors": "cpe:2.3:a:rack-cors_project:rack-cors:*:*:*:*:*:ruby:*:*", + "rack-ssl": "cpe:2.3:a:joshua_peek:rack-ssl:*:*:*:*:*:ruby:*:*", + "rake": "cpe:2.3:a:ruby-lang:rake:*:*:*:*:*:*:*:*", + "random_password_generator": "cpe:2.3:a:random_password_generator_project:random_password_generator:*:*:*:*:*:ruby:*:*", + "rbovirt": "cpe:2.3:a:amos_benari:rbovirt:*:*:*:*:*:ruby:*:*", + "rest-client": "cpe:2.3:a:rest-client_project:rest-client:*:*:*:*:*:ruby:*:*", + "rexml": "cpe:2.3:a:ruby-lang:rexml:*:*:*:*:*:ruby:*:*", + "rgpg": "cpe:2.3:a:richard_cook:rgpg:*:*:*:*:*:ruby:*:*", + "ruby-mysql": "cpe:2.3:a:ruby-mysql_project:ruby-mysql:*:*:*:*:*:ruby:*:*", + "safemode": "cpe:2.3:a:safemode_project:safemode:*:*:*:*:*:ruby:*:*", + "secure_headers": "cpe:2.3:a:twitter:secure_headers:*:*:*:*:*:ruby:*:*", + "sfpagent": "cpe:2.3:a:herry:sfpagent:*:*:*:*:*:ruby:*:*", + "show_in_browser": "cpe:2.3:a:jonathan_leung:show_in_browser:*:*:*:*:*:ruby:*:*", + "simple_captcha2": "cpe:2.3:a:simple_captcha2_project:simple_captcha2:*:*:*:*:*:ruby:*:*", + "sounder": "cpe:2.3:a:adam_zaninovich:sounder:*:*:*:*:*:*:*:*", + "sprockets": "cpe:2.3:a:sprockets_project:sprockets:*:*:*:*:*:*:*:*", + "strong_password": "cpe:2.3:a:strong_password_project:strong_password:*:*:*:*:*:ruby:*:*", + "time": "cpe:2.3:a:ruby-lang:time:*:*:*:*:*:ruby:*:*", + "trilogy": "cpe:2.3:a:trilogy_project:trilogy:*:*:*:*:*:ruby:*:*", + "update_by_case": "cpe:2.3:a:update_by_case_project:update_by_case:*:*:*:*:*:ruby:*:*", + "uri": "cpe:2.3:a:ruby-lang:uri:*:*:*:*:*:ruby:*:*", + "websocket-extensions": "cpe:2.3:a:websocket-extensions_project:websocket-extensions:*:*:*:*:*:ruby:*:*" + }, + "rust_crates": { + "abomonation": "cpe:2.3:a:abomonation_project:abomonation:*:*:*:*:*:rust:*:*", + "abox": "cpe:2.3:a:abox_project:abox:*:*:*:*:*:rust:*:*", + "actix-codec": "cpe:2.3:a:actix:actix-codec:*:*:*:*:*:rust:*:*", + "actix-http": "cpe:2.3:a:actix:actix-http:*:*:*:*:*:rust:*:*", + "actix-service": "cpe:2.3:a:actix:actix-service:*:*:*:*:*:rust:*:*", + "actix-utils": "cpe:2.3:a:actix:actix-utils:*:*:*:*:*:rust:*:*", + "adtensor": "cpe:2.3:a:adtensor_project:adtensor:*:*:*:*:*:rust:*:*", + "alpm-rs": "cpe:2.3:a:alpm-rs_project:alpm-rs:*:*:*:*:*:rust:*:*", + "aovec": "cpe:2.3:a:aovec_project:aovec:*:*:*:*:*:rust:*:*", + "arr": "cpe:2.3:a:arr_project:arr:*:*:*:*:*:rust:*:*", + "array-queue": "cpe:2.3:a:array-queue_project:array-queue:*:*:*:*:*:rust:*:*", + "array-tools": "cpe:2.3:a:array-tools_project:array-tools:*:*:*:*:*:rust:*:*", + "asn1_der": "cpe:2.3:a:asn1_der_project:asn1_der:*:*:*:*:*:*:*:*", + "async-h1": "cpe:2.3:a:async-h1_project:async-h1:*:*:*:*:*:rust:*:*", + "atom": "cpe:2.3:a:atom_project:atom:*:*:*:*:*:rust:*:*", + "atomic-option": "cpe:2.3:a:atomic-option_project:atomic-option:*:*:*:*:*:rust:*:*", + "autorand": "cpe:2.3:a:autorand_project:autorand:*:*:*:*:*:rust:*:*", + "axum-core": "cpe:2.3:a:axum-core_project:axum-core:*:*:*:*:*:rust:*:*", + "bam": "cpe:2.3:a:bam_project:bam:*:*:*:*:*:rust:*:*", + "basic_dsp_matrix": "cpe:2.3:a:basic_dsp_matrix_project:basic_dsp_matrix:*:*:*:*:*:rust:*:*", + "bat": "cpe:2.3:a:bat_project:bat:*:*:*:*:*:rust:*:*", + "beef": "cpe:2.3:a:beef_project:beef:*:*:*:*:*:rust:*:*", + "bigint": "cpe:2.3:a:bigint_project:bigint:*:*:*:*:*:rust:*:*", + "binjs_io": "cpe:2.3:a:binjs_io_project:binjs_io:*:*:*:*:*:rust:*:*", + "bitvec": "cpe:2.3:a:bitvec_project:bitvec:*:*:*:*:*:rust:*:*", + "bra": "cpe:2.3:a:bra_project:bra:*:*:*:*:*:rust:*:*", + "branca": "cpe:2.3:a:hakobaito:branca:*:*:*:*:*:rust:*:*", + "buffoon": "cpe:2.3:a:buffoon_project:buffoon:*:*:*:*:*:rust:*:*", + "bzip2": "cpe:2.3:a:bzip2_project:bzip2:*:*:*:*:*:rust:*:*", + "cache": "cpe:2.3:a:cache_project:cache:*:*:*:*:*:rust:*:*", + "cached": "cpe:2.3:a:cached_project:cached:*:*:*:*:*:rust:*:*", + "cbox": "cpe:2.3:a:cbox_project:cbox:*:*:*:*:*:rust:*:*", + "ckb": "cpe:2.3:a:nervos:ckb:*:*:*:*:*:rust:*:*", + "conduit-hyper": "cpe:2.3:a:conduit-hyper_project:conduit-hyper:*:*:*:*:*:rust:*:*", + "conqueue": "cpe:2.3:a:conqueue_project:conqueue:*:*:*:*:*:rust:*:*", + "containers": "cpe:2.3:a:containers_project:containers:*:*:*:*:*:rust:*:*", + "cranelift-codegen": "cpe:2.3:a:bytecodealliance:cranelift-codegen:*:*:*:*:*:rust:*:*", + "crossbeam-channel": "cpe:2.3:a:crossbeam-channel_project:crossbeam-channel:*:*:*:*:*:rust:*:*", + "crypto2": "cpe:2.3:a:crypto2_project:crypto2:*:*:*:*:*:rust:*:*", + "dync": "cpe:2.3:a:dync_project:dync:*:*:*:*:*:rust:*:*", + "eventio": "cpe:2.3:a:petabi:eventio:*:*:*:*:*:rust:*:*", + "failure": "cpe:2.3:a:failure_project:failure:*:*:*:*:*:rust:*:*", + "fil-ocl": "cpe:2.3:a:fil-ocl_project:fil-ocl:*:*:*:*:*:rust:*:*", + "flatbuffers": "cpe:2.3:a:google:flatbuffers:*:*:*:*:*:rust:*:*", + "futures-intrusive": "cpe:2.3:a:futures-intrusive_project:futures-intrusive:*:*:*:*:*:rust:*:*", + "futures-task": "cpe:2.3:a:rust-lang:futures-task:*:*:*:*:*:rust:*:*", + "generator": "cpe:2.3:a:generator_project:generator:*:*:*:*:*:rust:*:*", + "generic-array": "cpe:2.3:a:generic-array_project:generic-array:*:*:*:*:*:rust:*:*", + "getrandom": "cpe:2.3:a:getrandom_project:getrandom:*:*:*:*:*:rust:*:*", + "gfx-auxil": "cpe:2.3:a:gfx-auxil_project:gfx-auxil:*:*:*:*:*:rust:*:*", + "glsl-layout": "cpe:2.3:a:glsl-layout_project:glsl-layout:*:*:*:*:*:rust:*:*", + "hashconsing": "cpe:2.3:a:hashconsing_project:hashconsing:*:*:*:*:*:rust:*:*", + "heapless": "cpe:2.3:a:heapless_project:heapless:*:*:*:*:*:rust:*:*", + "iced-x86": "cpe:2.3:a:iced-x86_project:iced-x86:*:*:*:*:*:rust:*:*", + "id-map": "cpe:2.3:a:id-map_project:id-map:*:*:*:*:*:rust:*:*", + "insert_many": "cpe:2.3:a:insert_many_project:insert_many:*:*:*:*:*:rust:*:*", + "internment": "cpe:2.3:a:internment_project:internment:*:*:*:*:*:rust:*:*", + "kamadak-exif": "cpe:2.3:a:kamadak-exif_project:kamadak-exif:*:*:*:*:*:rust:*:*", + "kekbit": "cpe:2.3:a:kekbit_project:kekbit:*:*:*:*:*:rust:*:*", + "lazy-init": "cpe:2.3:a:lazy-init_project:lazy-init:*:*:*:*:*:rust:*:*", + "lever": "cpe:2.3:a:lever_project:lever:*:*:*:*:*:rust:*:*", + "libpulse-binding": "cpe:2.3:a:libpulse-binding_project:libpulse-binding:*:*:*:*:*:rust:*:*", + "libsbc": "cpe:2.3:a:libsbc_project:libsbc:*:*:*:*:*:rust:*:*", + "linux-loader": "cpe:2.3:a:linux-loader_project:linux-loader:*:*:*:*:*:rust:*:*", + "lock_api": "cpe:2.3:a:lock_api_project:lock_api:*:*:*:*:*:rust:*:*", + "lru": "cpe:2.3:a:lru_project:lru:*:*:*:*:*:rust:*:*", + "lru-cache": "cpe:2.3:a:lru-cache_project:lru-cache:*:*:*:*:*:rust:*:*", + "lucet-runtime-internals": "cpe:2.3:a:lucet-runtime-internals_project:lucet-runtime-internals:*:*:*:*:*:rust:*:*", + "magnetic": "cpe:2.3:a:magnetic_project:magnetic:*:*:*:*:*:rust:*:*", + "may": "cpe:2.3:a:may_project:may:*:*:*:*:*:rust:*:*", + "metrics-util": "cpe:2.3:a:metrics-util_project:metrics-util:*:*:*:*:*:rust:*:*", + "molecule": "cpe:2.3:a:nervos:molecule:*:*:*:*:*:rust:*:*", + "mopa": "cpe:2.3:a:mopa_project:mopa:*:*:*:*:*:rust:*:*", + "mozwire": "cpe:2.3:a:mozwire_project:mozwire:*:*:*:*:*:rust:*:*", + "multiqueue2": "cpe:2.3:a:multiqueue2_project:multiqueue2:*:*:*:*:*:rust:*:*", + "nano_arena": "cpe:2.3:a:nano_arena_project:nano_arena:*:*:*:*:*:rust:*:*", + "nanorand": "cpe:2.3:a:nanorand_project:nanorand:*:*:*:*:*:rust:*:*", + "nb-connect": "cpe:2.3:a:nb-connect_project:nb-connect:*:*:*:*:*:rust:*:*", + "ncurses": "cpe:2.3:a:ncurses_project:ncurses:*:*:*:*:*:rust:*:*", + "outer_cgi": "cpe:2.3:a:outer_cgi_project:outer_cgi:*:*:*:*:*:rust:*:*", + "ozone": "cpe:2.3:a:ozone_project:ozone:*:*:*:*:*:rust:*:*", + "pancurses": "cpe:2.3:a:pancurses_project:pancurses:*:*:*:*:*:rust:*:*", + "parc": "cpe:2.3:a:parc_project:parc:*:*:*:*:*:rust:*:*", + "parse_duration": "cpe:2.3:a:parse_duration_project:parse_duration:*:*:*:*:*:rust:*:*", + "pnet": "cpe:2.3:a:pnet_project:pnet:*:*:*:*:*:rust:*:*", + "postscript": "cpe:2.3:a:postscript_project:postscript:*:*:*:*:*:rust:*:*", + "quinn": "cpe:2.3:a:quinn_project:quinn:*:*:*:*:*:rust:*:*", + "qwutils": "cpe:2.3:a:qwutils_project:qwutils:*:*:*:*:*:rust:*:*", + "rand": "cpe:2.3:a:rand_project:rand:*:*:*:*:*:*:*:*", + "raw-cpuid": "cpe:2.3:a:raw-cpuid_project:raw-cpuid:*:*:*:*:*:rust:*:*", + "rdiff": "cpe:2.3:a:rdiff_project:rdiff:*:*:*:*:*:rust:*:*", + "rio": "cpe:2.3:a:rio_project:rio:*:*:*:*:*:rust:*:*", + "rulinalg": "cpe:2.3:a:rulinalg_project:rulinalg:*:*:*:*:*:rust:*:*", + "rust-embed": "cpe:2.3:a:rust-embed_project:rust-embed:*:*:*:*:*:rust:*:*", + "scratchpad": "cpe:2.3:a:scratchpad_project:scratchpad:*:*:*:*:*:rust:*:*", + "sgx_tstd": "cpe:2.3:a:sgx_tstd_project:sgx_tstd:*:*:*:*:*:rust:*:*", + "slack_morphism": "cpe:2.3:a:slack_morphism_project:slack_morphism:*:*:*:*:*:rust:*:*", + "stack_dst": "cpe:2.3:a:stack_dst_project:stack_dst:*:*:*:*:*:rust:*:*", + "syncpool": "cpe:2.3:a:syncpool_project:syncpool:*:*:*:*:*:rust:*:*", + "sys-info": "cpe:2.3:a:sys-info_project:sys-info:*:*:*:*:*:rust:*:*", + "tectonic_xdv": "cpe:2.3:a:tectonic_xdv_project:tectonic_xdv:*:*:*:*:*:rust:*:*", + "thex": "cpe:2.3:a:thex_project:thex:*:*:*:*:*:rust:*:*", + "ticketed_lock": "cpe:2.3:a:ticketed_lock_project:ticketed_lock:*:*:*:*:*:rust:*:*", + "tiny_future": "cpe:2.3:a:tiny_future_project:tiny_future:*:*:*:*:*:rust:*:*", + "tokio": "cpe:2.3:a:tokio_project:tokio:*:*:*:*:*:rust:*:*", + "toodee": "cpe:2.3:a:toodee_project:toodee:*:*:*:*:*:rust:*:*", + "tough": "cpe:2.3:a:amazon:tough:*:*:*:*:*:*:*:*", + "traitobject": "cpe:2.3:a:traitobject_project:traitobject:*:*:*:*:*:rust:*:*", + "truetype": "cpe:2.3:a:truetype_project:truetype:*:*:*:*:*:rust:*:*", + "trust-dns-proto": "cpe:2.3:a:trust-dns-proto_project:trust-dns-proto:*:*:*:*:*:*:*:*", + "try-mutex": "cpe:2.3:a:try-mutex_project:try-mutex:*:*:*:*:*:rust:*:*", + "unicycle": "cpe:2.3:a:unicycle_project:unicycle:*:*:*:*:*:rust:*:*", + "uu_od": "cpe:2.3:a:uu_od_project:uu_od:*:*:*:*:*:rust:*:*", + "v9": "cpe:2.3:a:v9_project:v9:*:*:*:*:*:rust:*:*", + "va-ts": "cpe:2.3:a:va-ts_project:va-ts:*:*:*:*:*:rust:*:*", + "vec-const": "cpe:2.3:a:vec-const_project:vec-const:*:*:*:*:*:rust:*:*", + "versionize": "cpe:2.3:a:versionize_project:versionize:*:*:*:*:*:rust:*:*", + "ws": "cpe:2.3:a:ws-rs_project:ws-rs:*:*:*:*:*:rust:*:*", + "yottadb": "cpe:2.3:a:yottadb:yottadb:*:*:*:*:*:rust:*:*", + "zeroize_derive": "cpe:2.3:a:zeroize_derive_project:zeroize_derive:*:*:*:*:*:rust:*:*" + } + } +} \ No newline at end of file diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/dictionary/generate_index.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/dictionary/generate_index.go new file mode 100644 index 00000000..35a6e46d --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/dictionary/generate_index.go @@ -0,0 +1,3 @@ +package dictionary + +//go:generate go run ./index-generator/ -o data/cpe-index.json diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/dictionary/types.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/dictionary/types.go new file mode 100644 index 00000000..a8a5f8f7 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/dictionary/types.go @@ -0,0 +1,15 @@ +package dictionary + +const ( + EcosystemNPM = "npm" + EcosystemRubyGems = "rubygems" + EcosystemPyPI = "pypi" + EcosystemJenkinsPlugins = "jenkins_plugins" + EcosystemRustCrates = "rust_crates" +) + +type Indexed struct { + EcosystemPackages map[string]Packages `json:"ecosystems"` +} + +type Packages map[string]string diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/generate.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/generate.go index ba17df0d..b2c8ffd3 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/generate.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/generate.go @@ -3,16 +3,21 @@ package cpe import ( "bufio" "bytes" + _ "embed" + "encoding/json" "fmt" "sort" "strings" + "sync" "github.com/facebookincubator/nvdtools/wfn" "github.com/scylladb/go-set/strset" "github.com/anchore/syft/internal" + "github.com/anchore/syft/internal/log" "github.com/anchore/syft/syft/cpe" "github.com/anchore/syft/syft/pkg" + "github.com/anchore/syft/syft/pkg/cataloger/common/cpe/dictionary" ) // knownVendors contains vendor strings that are known to exist in @@ -32,6 +37,77 @@ func newCPE(product, vendor, version, targetSW string) *wfn.Attributes { return &c } +//go:embed dictionary/data/cpe-index.json +var indexedCPEDictionaryData []byte + +var indexedCPEDictionary *dictionary.Indexed +var indexedCPEDictionaryOnce sync.Once + +func GetIndexedDictionary() (_ *dictionary.Indexed, err error) { + indexedCPEDictionaryOnce.Do(func() { + err = json.Unmarshal(indexedCPEDictionaryData, &indexedCPEDictionary) + }) + + if err != nil { + return + } + + if indexedCPEDictionary == nil { + err = fmt.Errorf("failed to unmarshal indexed CPE dictionary") + return + } + + return indexedCPEDictionary, err +} + +func DictionaryFind(p pkg.Package) (cpe.CPE, bool) { + dict, err := GetIndexedDictionary() + if err != nil { + log.Debugf("dictionary CPE lookup not available: %+v", err) + return cpe.CPE{}, false + } + + var ( + cpeString string + ok bool + ) + + switch p.Type { + case pkg.NpmPkg: + cpeString, ok = dict.EcosystemPackages[dictionary.EcosystemNPM][p.Name] + + case pkg.GemPkg: + cpeString, ok = dict.EcosystemPackages[dictionary.EcosystemRubyGems][p.Name] + + case pkg.PythonPkg: + cpeString, ok = dict.EcosystemPackages[dictionary.EcosystemPyPI][p.Name] + + case pkg.JenkinsPluginPkg: + cpeString, ok = dict.EcosystemPackages[dictionary.EcosystemJenkinsPlugins][p.Name] + + case pkg.RustPkg: + cpeString, ok = dict.EcosystemPackages[dictionary.EcosystemRustCrates][p.Name] + + default: + // The dictionary doesn't support this package type yet. + return cpe.CPE{}, false + } + + if !ok { + // The dictionary doesn't have a CPE for this package. + return cpe.CPE{}, false + } + + parsedCPE, err := cpe.New(cpeString) + if err != nil { + return cpe.CPE{}, false + } + + parsedCPE.Version = p.Version + + return parsedCPE, true +} + // Generate Create a list of CPEs for a given package, trying to guess the vendor, product tuple. We should be trying to // generate the minimal set of representative CPEs, which implies that optional fields should not be included // (such as target SW). diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/java.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/java.go index 6de454c0..2e838de3 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/java.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/java.go @@ -181,13 +181,13 @@ func GroupIDsFromJavaPackage(p pkg.Package) (groupIDs []string) { return nil } - return GroupIDsFromJavaMetadata(metadata) + return GroupIDsFromJavaMetadata(p.Name, metadata) } -func GroupIDsFromJavaMetadata(metadata pkg.JavaMetadata) (groupIDs []string) { +func GroupIDsFromJavaMetadata(pkgName string, metadata pkg.JavaMetadata) (groupIDs []string) { groupIDs = append(groupIDs, groupIDsFromPomProperties(metadata.PomProperties)...) groupIDs = append(groupIDs, groupIDsFromPomProject(metadata.PomProject)...) - groupIDs = append(groupIDs, groupIDsFromJavaManifest(metadata.Manifest)...) + groupIDs = append(groupIDs, groupIDsFromJavaManifest(pkgName, metadata.Manifest)...) return groupIDs } @@ -241,7 +241,11 @@ func addGroupIDsFromGroupIDsAndArtifactID(groupID, artifactID string) (groupIDs return groupIDs } -func groupIDsFromJavaManifest(manifest *pkg.JavaManifest) []string { +func groupIDsFromJavaManifest(pkgName string, manifest *pkg.JavaManifest) []string { + if groupID, ok := defaultArtifactIDToGroupID[pkgName]; ok { + return []string{groupID} + } + if manifest == nil { return nil } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/java_groupid_map.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/java_groupid_map.go new file mode 100644 index 00000000..9e36db18 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/common/cpe/java_groupid_map.go @@ -0,0 +1,69 @@ +package cpe + +var defaultArtifactIDToGroupID = map[string]string{ + "ant": "org.apache.ant", + "ant-antlr": "org.apache.ant", + "ant-antunit": "org.apache.ant", + "ant-apache-bcel": "org.apache.ant", + "ant-apache-bsf": "org.apache.ant", + "ant-apache-log4j": "org.apache.ant", + "ant-apache-oro": "org.apache.ant", + "ant-apache-regexp": "org.apache.ant", + "ant-apache-resolver": "org.apache.ant", + "ant-apache-xalan2": "org.apache.ant", + "ant-commons-logging": "org.apache.ant", + "ant-commons-net": "org.apache.ant", + "ant-compress": "org.apache.ant", + "ant-dotnet": "org.apache.ant", + "ant-imageio": "org.apache.ant", + "ant-jai": "org.apache.ant", + "ant-jakartamail": "org.apache.ant", + "ant-javamail": "org.apache.ant", + "ant-jdepend": "org.apache.ant", + "ant-jmf": "org.apache.ant", + "ant-jsch": "org.apache.ant", + "ant-junit": "org.apache.ant", + "ant-junit4": "org.apache.ant", + "ant-junitlauncher": "org.apache.ant", + "ant-launcher": "org.apache.ant", + "ant-netrexx": "org.apache.ant", + "ant-nodeps": "org.apache.ant", + "ant-parent": "org.apache.ant", + "ant-starteam": "org.apache.ant", + "ant-stylebook": "org.apache.ant", + "ant-swing": "org.apache.ant", + "ant-testutil": "org.apache.ant", + "ant-trax": "org.apache.ant", + "ant-weblogic": "org.apache.ant", + "ant-xz": "org.apache.ant", + "spring": "org.springframework", + "spring-amqp": "org.springframework.amqp", + "spring-batch-core": "org.springframework.batch", + "spring-beans": "org.springframework", + "spring-boot": "org.springframework.boot", + "spring-boot-starter-web": "org.springframework.boot", + "spring-boot-starter-webflux": "org.springframework.boot", + "spring-cloud-function-context": "org.springframework.cloud", + "spring-cloud-function-parent": "org.springframework.cloud", + "spring-cloud-gateway": "org.springframework.cloud", + "spring-cloud-openfeign-core": "org.springframework.cloud", + "spring-cloud-task-dependencies": "org.springframework.cloud", + "spring-core": "org.springframework", + "spring-data-jpa": "org.springframework.data", + "spring-data-mongodb": "org.springframework.data", + "spring-data-rest-core": "org.springframework.data", + "spring-expression": "org.springframework", + "spring-integration-zip": "org.springframework.integration", + "spring-oxm": "org.springframework", + "spring-security-core": "org.springframework.security", + "spring-security-config": "org.springframework.security", + "spring-security-oauth": "org.springframework.security.oauth", + "spring-security-oauth-parent": "org.springframework.security.oauth", + "spring-security-oauth2-client": "org.springframework.security", + "spring-session-core": "org.springframework.session", + "spring-vault-core": "org.springframework.vault", + "spring-web": "org.springframework", + "spring-webflow": "org.springframework.webflow", + "spring-webflux": "org.springframework", + "spring-webmvc": "org.springframework", +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/config.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/config.go index 8a334a78..df3b6397 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/config.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/config.go @@ -4,23 +4,27 @@ import ( "github.com/anchore/syft/syft/pkg/cataloger/golang" "github.com/anchore/syft/syft/pkg/cataloger/java" "github.com/anchore/syft/syft/pkg/cataloger/kernel" + "github.com/anchore/syft/syft/pkg/cataloger/python" ) // TODO: these field naming vs helper function naming schemes are inconsistent. - type Config struct { - Search SearchConfig - Golang golang.GoCatalogerOpts - LinuxKernel kernel.LinuxCatalogerConfig - Catalogers []string - Parallelism int + Search SearchConfig + Golang golang.GoCatalogerOpts + LinuxKernel kernel.LinuxCatalogerConfig + Python python.CatalogerConfig + Catalogers []string + Parallelism int + ExcludeBinaryOverlapByOwnership bool } func DefaultConfig() Config { return Config{ - Search: DefaultSearchConfig(), - Parallelism: 1, - LinuxKernel: kernel.DefaultLinuxCatalogerConfig(), + Search: DefaultSearchConfig(), + Parallelism: 1, + LinuxKernel: kernel.DefaultLinuxCatalogerConfig(), + Python: python.DefaultCatalogerConfig(), + ExcludeBinaryOverlapByOwnership: true, } } @@ -30,11 +34,3 @@ func (c Config) Java() java.Config { SearchIndexedArchives: c.Search.IncludeIndexedArchives, } } - -func (c Config) Go() golang.GoCatalogerOpts { - return c.Golang -} - -func (c Config) Kernel() kernel.LinuxCatalogerConfig { - return c.LinuxKernel -} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/deb/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/deb/cataloger.go index 70bd8e42..946abae9 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/deb/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/deb/cataloger.go @@ -14,5 +14,5 @@ func NewDpkgdbCataloger() *generic.Cataloger { return generic.NewCataloger(catalogerName). // note: these globs have been intentionally split up in order to improve search performance, // please do NOT combine into: "**/var/lib/dpkg/{status,status.d/*}" - WithParserByGlobs(parseDpkgDB, "**/var/lib/dpkg/status", "**/var/lib/dpkg/status.d/*") + WithParserByGlobs(parseDpkgDB, "**/var/lib/dpkg/status", "**/var/lib/dpkg/status.d/*", "**/lib/opkg/info/*.control", "**/lib/opkg/status") } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/cataloger.go index 159edcb2..938ccfba 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/cataloger.go @@ -4,10 +4,13 @@ import ( "github.com/anchore/syft/syft/pkg/cataloger/generic" ) -const catalogerName = "dotnet-deps-cataloger" - // NewDotnetDepsCataloger returns a new Dotnet cataloger object base on deps json files. func NewDotnetDepsCataloger() *generic.Cataloger { - return generic.NewCataloger(catalogerName). + return generic.NewCataloger("dotnet-deps-cataloger"). WithParserByGlobs(parseDotnetDeps, "**/*.deps.json") } + +func NewDotnetPortableExecutableCataloger() *generic.Cataloger { + return generic.NewCataloger("dotnet-portable-executable-cataloger"). + WithParserByGlobs(parseDotnetPortableExecutable, "**/*.dll", "**/*.exe") +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/parse_dotnet_portable_executable.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/parse_dotnet_portable_executable.go new file mode 100644 index 00000000..b57b2f06 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/dotnet/parse_dotnet_portable_executable.go @@ -0,0 +1,87 @@ +package dotnet + +import ( + "fmt" + "io" + + "github.com/saferwall/pe" + + "github.com/anchore/packageurl-go" + "github.com/anchore/syft/internal/log" + "github.com/anchore/syft/syft/artifact" + "github.com/anchore/syft/syft/file" + "github.com/anchore/syft/syft/pkg" + "github.com/anchore/syft/syft/pkg/cataloger/generic" +) + +var _ generic.Parser = parseDotnetPortableExecutable + +func parseDotnetPortableExecutable(_ file.Resolver, _ *generic.Environment, f file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { + by, err := io.ReadAll(f) + if err != nil { + return nil, nil, fmt.Errorf("unable to read file: %w", err) + } + + peFile, err := pe.NewBytes(by, &pe.Options{}) + if err != nil { + return nil, nil, fmt.Errorf("unable to create PE file instance: %w", err) + } + + err = peFile.Parse() + if err != nil { + return nil, nil, fmt.Errorf("unable to parse PE file: %w", err) + } + + versionResources, err := peFile.ParseVersionResources() + if err != nil { + // this is not a fatal error, just log and continue + // TODO: consider this case for "known unknowns" (same goes for cases below) + log.Tracef("unable to parse version resources in PE file: %s", f.RealPath) + return nil, nil, nil + } + + name := versionResources["FileDescription"] + if name == "" { + log.Tracef("unable to find FileDescription in PE file: %s", f.RealPath) + return nil, nil, nil + } + + version := versionResources["FileVersion"] + if version == "" { + log.Tracef("unable to find FileVersion in PE file: %s", f.RealPath) + return nil, nil, nil + } + + purl := packageurl.NewPackageURL( + packageurl.TypeNuget, // See explanation in syft/pkg/cataloger/dotnet/package.go as to why this was chosen. + "", + name, + version, + nil, + "", + ).ToString() + + metadata := pkg.DotnetPortableExecutableMetadata{ + AssemblyVersion: versionResources["Assembly Version"], + LegalCopyright: versionResources["LegalCopyright"], + Comments: versionResources["Comments"], + InternalName: versionResources["InternalName"], + CompanyName: versionResources["CompanyName"], + ProductName: versionResources["ProductName"], + ProductVersion: versionResources["ProductVersion"], + } + + p := pkg.Package{ + Name: name, + Version: version, + Locations: file.NewLocationSet(f.Location), + Type: pkg.DotnetPkg, + PURL: purl, + MetadataType: pkg.DotnetPortableExecutableMetadataType, + Metadata: metadata, + } + + p.SetID() + + return []pkg.Package{p}, nil, nil +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/package_url.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/package_url.go index b091ac38..adf05ff7 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/package_url.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/package_url.go @@ -9,7 +9,7 @@ import ( // PackageURL returns the PURL for the specific java package (see https://github.com/package-url/purl-spec) func packageURL(name, version string, metadata pkg.JavaMetadata) string { var groupID = name - groupIDs := cpe.GroupIDsFromJavaMetadata(metadata) + groupIDs := cpe.GroupIDsFromJavaMetadata(name, metadata) if len(groupIDs) > 0 { groupID = groupIDs[0] } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_gradle_lockfile.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_gradle_lockfile.go index 65adf7ae..3506b44b 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_gradle_lockfile.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_gradle_lockfile.go @@ -57,7 +57,16 @@ func parseGradleLockfile(_ file.Resolver, _ *generic.Environment, reader file.Lo Language: pkg.Java, Type: pkg.JavaPkg, MetadataType: pkg.JavaMetadataType, + Metadata: pkg.JavaMetadata{ + PomProject: &pkg.PomProject{ + GroupID: dep.Group, + ArtifactID: dep.Name, + Version: dep.Version, + Name: dep.Name, + }, + }, } + mappedPkg.SetID() pkgs = append(pkgs, mappedPkg) } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_pom_xml.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_pom_xml.go index ed2a3c9e..704cb559 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_pom_xml.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/java/parse_pom_xml.go @@ -28,17 +28,19 @@ func parserPomXML(_ file.Resolver, _ *generic.Environment, reader file.LocationR } var pkgs []pkg.Package - for _, dep := range pom.Dependencies { - p := newPackageFromPom( - pom, - dep, - reader.Location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation), - ) - if p.Name == "" { - continue - } + if pom.Dependencies != nil { + for _, dep := range *pom.Dependencies { + p := newPackageFromPom( + pom, + dep, + reader.Location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation), + ) + if p.Name == "" { + continue + } - pkgs = append(pkgs, p) + pkgs = append(pkgs, p) + } } return pkgs, nil, nil @@ -53,15 +55,18 @@ func parsePomXMLProject(path string, reader io.Reader) (*pkg.PomProject, error) } func newPomProject(path string, p gopom.Project) *pkg.PomProject { + artifactID := safeString(p.ArtifactID) + name := safeString(p.Name) + projectURL := safeString(p.URL) return &pkg.PomProject{ Path: path, Parent: pomParent(p, p.Parent), GroupID: resolveProperty(p, p.GroupID), - ArtifactID: p.ArtifactID, + ArtifactID: artifactID, Version: resolveProperty(p, p.Version), - Name: p.Name, + Name: name, Description: cleanDescription(p.Description), - URL: p.URL, + URL: projectURL, } } @@ -74,7 +79,7 @@ func newPackageFromPom(pom gopom.Project, dep gopom.Dependency, locations ...fil }, } - name := dep.ArtifactID + name := safeString(dep.ArtifactID) version := resolveProperty(pom, dep.Version) p := pkg.Package{ @@ -104,19 +109,29 @@ func decodePomXML(content io.Reader) (project gopom.Project, err error) { return project, nil } -func pomParent(pom gopom.Project, parent gopom.Parent) (result *pkg.PomParent) { - if parent.ArtifactID != "" || parent.GroupID != "" || parent.Version != "" { - result = &pkg.PomParent{ - GroupID: resolveProperty(pom, parent.GroupID), - ArtifactID: parent.ArtifactID, - Version: resolveProperty(pom, parent.Version), - } +func pomParent(pom gopom.Project, parent *gopom.Parent) (result *pkg.PomParent) { + if parent == nil { + return nil + } + + artifactID := safeString(parent.ArtifactID) + result = &pkg.PomParent{ + GroupID: resolveProperty(pom, parent.GroupID), + ArtifactID: artifactID, + Version: resolveProperty(pom, parent.Version), + } + + if result.GroupID == "" && result.ArtifactID == "" && result.Version == "" { + return nil } return result } -func cleanDescription(original string) (cleaned string) { - descriptionLines := strings.Split(original, "\n") +func cleanDescription(original *string) (cleaned string) { + if original == nil { + return "" + } + descriptionLines := strings.Split(*original, "\n") for _, line := range descriptionLines { line = strings.TrimSpace(line) if len(line) == 0 { @@ -130,12 +145,17 @@ func cleanDescription(original string) (cleaned string) { // resolveProperty emulates some maven property resolution logic by looking in the project's variables // as well as supporting the project expressions like ${project.parent.groupId}. // If no match is found, the entire expression including ${} is returned -func resolveProperty(pom gopom.Project, property string) string { - return propertyMatcher.ReplaceAllStringFunc(property, func(match string) string { +// +//nolint:gocognit +func resolveProperty(pom gopom.Project, property *string) string { + propertyCase := safeString(property) + return propertyMatcher.ReplaceAllStringFunc(propertyCase, func(match string) string { propertyName := strings.TrimSpace(match[2 : len(match)-1]) - if value, ok := pom.Properties.Entries[propertyName]; ok { + entries := pomProperties(pom) + if value, ok := entries[propertyName]; ok { return value } + // if we don't find anything directly in the pom properties, // see if we have a project.x expression and process this based // on the xml tags in gopom @@ -151,9 +171,15 @@ func resolveProperty(pom gopom.Project, property string) string { part := parts[partNum] for fieldNum := 0; fieldNum < pomValueType.NumField(); fieldNum++ { f := pomValueType.Field(fieldNum) - if part == f.Tag.Get("xml") { + tag := f.Tag.Get("xml") + tag = strings.TrimSuffix(tag, ",omitempty") + if part == tag { pomValue = pomValue.Field(fieldNum) pomValueType = pomValue.Type() + if pomValueType.Kind() == reflect.Ptr { + pomValue = pomValue.Elem() + pomValueType = pomValue.Type() + } if partNum == numParts-1 { return fmt.Sprintf("%v", pomValue.Interface()) } @@ -165,3 +191,17 @@ func resolveProperty(pom gopom.Project, property string) string { return match }) } + +func pomProperties(p gopom.Project) map[string]string { + if p.Properties != nil { + return p.Properties.Entries + } + return map[string]string{} +} + +func safeString(s *string) string { + if s == nil { + return "" + } + return *s +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/package_exclusions.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/package_exclusions.go new file mode 100644 index 00000000..f99e088f --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/package_exclusions.go @@ -0,0 +1,51 @@ +package cataloger + +import ( + "golang.org/x/exp/slices" + + "github.com/anchore/syft/syft/artifact" + "github.com/anchore/syft/syft/pkg" +) + +var ( + osCatalogerTypes = []pkg.Type{ + pkg.AlpmPkg, + pkg.ApkPkg, + pkg.DebPkg, + pkg.NixPkg, + pkg.PortagePkg, + pkg.RpmPkg, + } + binaryCatalogerTypes = []pkg.Type{ + pkg.BinaryPkg, + } +) + +// ExcludeBinaryByFileOwnershipOverlap will remove packages from a collection given the following properties are true +// 1) the relationship between packages is OwnershipByFileOverlap +// 2) the parent is an "os" package +// 3) the child is a synthetic package generated by the binary cataloger +// 4) the package names are identical +// This was implemented as a way to help resolve: https://github.com/anchore/syft/issues/931 +func ExcludeBinaryByFileOwnershipOverlap(r artifact.Relationship, c *pkg.Collection) bool { + if artifact.OwnershipByFileOverlapRelationship != r.Type { + return false + } + + parent := c.Package(r.From.ID()) + if parent == nil { + return false + } + + parentInExclusion := slices.Contains(osCatalogerTypes, parent.Type) + if !parentInExclusion { + return false + } + + child := c.Package(r.To.ID()) + if child == nil { + return false + } + + return slices.Contains(binaryCatalogerTypes, child.Type) +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/cataloger.go index 1401c3a2..88868255 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/cataloger.go @@ -6,10 +6,21 @@ import ( const eggInfoGlob = "**/*.egg-info" +type CatalogerConfig struct { + GuessUnpinnedRequirements bool +} + +func DefaultCatalogerConfig() CatalogerConfig { + return CatalogerConfig{ + GuessUnpinnedRequirements: false, + } +} + // NewPythonIndexCataloger returns a new cataloger for python packages referenced from poetry lock files, requirements.txt files, and setup.py files. -func NewPythonIndexCataloger() *generic.Cataloger { +func NewPythonIndexCataloger(cfg CatalogerConfig) *generic.Cataloger { + rqp := newRequirementsParser(cfg) return generic.NewCataloger("python-index-cataloger"). - WithParserByGlobs(parseRequirementsTxt, "**/*requirements*.txt"). + WithParserByGlobs(rqp.parseRequirementsTxt, "**/*requirements*.txt"). WithParserByGlobs(parsePoetryLock, "**/poetry.lock"). WithParserByGlobs(parsePipfileLock, "**/Pipfile.lock"). WithParserByGlobs(parseSetup, "**/setup.py") diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/parse_requirements.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/parse_requirements.go index 33e1371b..ac310b10 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/parse_requirements.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/python/parse_requirements.go @@ -7,6 +7,10 @@ import ( "strings" "unicode" + pep440 "github.com/aquasecurity/go-pep440-version" + "github.com/mitchellh/mapstructure" + + "github.com/anchore/syft/internal" "github.com/anchore/syft/internal/log" "github.com/anchore/syft/syft/artifact" "github.com/anchore/syft/syft/file" @@ -14,23 +18,99 @@ import ( "github.com/anchore/syft/syft/pkg/cataloger/generic" ) -var _ generic.Parser = parseRequirementsTxt +const ( + // given the example requirement: + // requests[security] == 2.8.* ; python_version < "2.7" and sys_platform == "linux" \ + // --hash=sha256:a9b3aaa1904eeb78e32394cd46c6f37ac0fb4af6dc488daa58971bdc7d7fcaf3 \ + // --hash=sha256:e9535b8c84dc9571a48999094fda7f33e63c3f1b74f3e5f3ac0105a58405bb65 # some comment + + // namePattern matches: requests[security] + namePattern = `(?P\w[\w\[\],\s-_]+)` + + // versionConstraintPattern matches: == 2.8.* + versionConstraintPattern = `(?P([^\S\r\n]*[~=>!<]+\s*[0-9a-zA-Z.*]+[^\S\r\n]*,?)+)?(@[^\S\r\n]*(?P[^;]*))?` -var ( - extrasRegex = regexp.MustCompile(`\[.*\]`) - urlRegex = regexp.MustCompile("@.*git.*") + // markersPattern matches: python_version < "2.7" and sys_platform == "linux" + markersPattern = `(;(?P.*))?` + + // hashesPattern matches: --hash=sha256:a9b3aaa1904eeb78e32394cd46c6f37ac0fb4af6dc488daa58971bdc7d7fcaf3 --hash=sha256:e9535b8c84dc9571a48999094fda7f33e63c3f1b74f3e5f3ac0105a58405bb65 + hashesPattern = `(?P([^\S\r\n]*--hash=[a-zA-Z0-9:]+)+)?` + + // whiteSpaceNoNewlinePattern matches: (any whitespace character except for \r and \n) + whiteSpaceNoNewlinePattern = `[^\S\r\n]*` ) +var requirementPattern = regexp.MustCompile( + `^` + + whiteSpaceNoNewlinePattern + + namePattern + + whiteSpaceNoNewlinePattern + + versionConstraintPattern + + markersPattern + + hashesPattern, +) + +type unprocessedRequirement struct { + Name string `mapstructure:"name"` + VersionConstraint string `mapstructure:"versionConstraint"` + Markers string `mapstructure:"markers"` + URL string `mapstructure:"url"` + Hashes string `mapstructure:"hashes"` +} + +func newRequirement(raw string) *unprocessedRequirement { + var r unprocessedRequirement + + values := internal.MatchNamedCaptureGroups(requirementPattern, raw) + + if err := mapstructure.Decode(values, &r); err != nil { + return nil + } + + r.Name = strings.TrimSpace(r.Name) + r.VersionConstraint = strings.TrimSpace(r.VersionConstraint) + r.Markers = strings.TrimSpace(r.Markers) + r.URL = strings.TrimSpace(r.URL) + r.Hashes = strings.TrimSpace(r.Hashes) + + if r.Name == "" { + return nil + } + + return &r +} + +type requirementsParser struct { + guessUnpinnedRequirements bool +} + +func newRequirementsParser(cfg CatalogerConfig) requirementsParser { + return requirementsParser{ + guessUnpinnedRequirements: cfg.GuessUnpinnedRequirements, + } +} + // parseRequirementsTxt takes a Python requirements.txt file, returning all Python packages that are locked to a // specific version. -func parseRequirementsTxt(_ file.Resolver, _ *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { +func (rp requirementsParser) parseRequirementsTxt(_ file.Resolver, _ *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { var packages []pkg.Package scanner := bufio.NewScanner(reader) + var lastLine string for scanner.Scan() { - line := scanner.Text() - rawLineNoComments := removeTrailingComment(line) - line = trimRequirementsTxtLine(line) + line := trimRequirementsTxtLine(scanner.Text()) + + if lastLine != "" { + line = lastLine + line + lastLine = "" + } + + // remove line continuations... smashes the file into a single line + if strings.HasSuffix(line, "\\") { + // this line is a continuation of the previous line + lastLine += strings.TrimSuffix(line, "\\") + continue + } if line == "" { // nothing to parse on this line @@ -42,35 +122,20 @@ func parseRequirementsTxt(_ file.Resolver, _ *generic.Environment, reader file.L continue } - if !strings.Contains(line, "==") { - // a package without a version, or a range (unpinned) which does not tell us - // exactly what will be installed. - continue - } - - // parse a new requirement - parts := strings.Split(line, "==") - if len(parts) < 2 { - // this should never happen, but just in case + req := newRequirement(line) + if req == nil { log.WithFields("path", reader.RealPath).Warnf("unable to parse requirements.txt line: %q", line) continue } - // check if the version contains hash declarations on the same line - version, _ := parseVersionAndHashes(parts[1]) - - name := strings.TrimSpace(parts[0]) - version = strings.TrimFunc(version, func(r rune) bool { - return !unicode.IsLetter(r) && !unicode.IsNumber(r) - }) - - // TODO: Update to support more than only == - versionConstraint := fmt.Sprintf("== %s", version) + name := removeExtras(req.Name) + version := parseVersion(req.VersionConstraint, rp.guessUnpinnedRequirements) - if name == "" || version == "" { - log.WithFields("path", reader.RealPath).Debugf("found empty package in requirements.txt line: %q", line) + if version == "" { + log.WithFields("path", reader.RealPath).Tracef("unable to determine package version in requirements.txt line: %q", line) continue } + packages = append( packages, newPackageForRequirementsWithMetadata( @@ -78,10 +143,10 @@ func parseRequirementsTxt(_ file.Resolver, _ *generic.Environment, reader file.L version, pkg.PythonRequirementsMetadata{ Name: name, - Extras: parseExtras(rawLineNoComments), - VersionConstraint: versionConstraint, - URL: parseURL(rawLineNoComments), - Markers: parseMarkers(rawLineNoComments), + Extras: parseExtras(req.Name), + VersionConstraint: req.VersionConstraint, + URL: parseURL(req.URL), + Markers: req.Markers, }, reader.Location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation), ), @@ -95,13 +160,68 @@ func parseRequirementsTxt(_ file.Resolver, _ *generic.Environment, reader file.L return packages, nil, nil } -func parseVersionAndHashes(version string) (string, []string) { - parts := strings.Split(version, "--hash=") - if len(parts) < 2 { - return version, nil +func parseVersion(version string, guessFromConstraint bool) string { + if isPinnedConstraint(version) { + return strings.TrimSpace(strings.ReplaceAll(version, "==", "")) + } + + if guessFromConstraint { + return guessVersion(version) + } + + return "" +} + +func isPinnedConstraint(version string) bool { + return strings.Contains(version, "==") && !strings.ContainsAny(version, "*,<>!") +} + +func guessVersion(constraint string) string { + // handle "2.8.*" -> "2.8.0" + constraint = strings.ReplaceAll(constraint, "*", "0") + if isPinnedConstraint(constraint) { + return strings.TrimSpace(strings.ReplaceAll(constraint, "==", "")) + } + + constraints := strings.Split(constraint, ",") + filteredVersions := map[string]struct{}{} + for _, part := range constraints { + if strings.Contains(part, "!=") { + parts := strings.Split(part, "!=") + filteredVersions[strings.TrimSpace(parts[1])] = struct{}{} + } + } + + var closestVersion *pep440.Version + for _, part := range constraints { + // ignore any parts that do not have '=' in them, >,<,~ are not valid semver + parts := strings.SplitAfter(part, "=") + if len(parts) < 2 { + continue + } + version, err := pep440.Parse(strings.TrimSpace(parts[1])) + if err != nil { + // ignore any parts that are not valid semver + continue + } + if _, ok := filteredVersions[version.String()]; ok { + continue + } + + if strings.Contains(part, "==") { + parts := strings.Split(part, "==") + return strings.TrimSpace(parts[1]) + } + + if closestVersion == nil || version.GreaterThan(*closestVersion) { + closestVersion = &version + } + } + if closestVersion == nil { + return "" } - return parts[0], parts[1:] + return closestVersion.String() } // trimRequirementsTxtLine removes content from the given requirements.txt line @@ -109,8 +229,6 @@ func parseVersionAndHashes(version string) (string, []string) { func trimRequirementsTxtLine(line string) string { line = strings.TrimSpace(line) line = removeTrailingComment(line) - line = removeEnvironmentMarkers(line) - line = checkForRegex(line) // remove extras and url from line if found return line } @@ -127,57 +245,29 @@ func removeTrailingComment(line string) string { return parts[0] } -// removeEnvironmentMarkers removes any instances of environment markers (delimited by ';') from the line. -// For more information, see https://www.python.org/dev/peps/pep-0508/#environment-markers. -func removeEnvironmentMarkers(line string) string { - parts := strings.SplitN(line, ";", 2) - if len(parts) < 2 { - // there aren't any environment markers - - return line +func removeExtras(packageName string) string { + start := strings.Index(packageName, "[") + if start == -1 { + return packageName } - return parts[0] + return strings.TrimSpace(packageName[:start]) } func parseExtras(packageName string) []string { - if extrasRegex.MatchString(packageName) { - // Remove square brackets - extras := strings.TrimFunc(extrasRegex.FindString(packageName), func(r rune) bool { - return !unicode.IsLetter(r) && !unicode.IsNumber(r) - }) - - // Remove any additional whitespace - extras = strings.ReplaceAll(extras, " ", "") + var extras []string - return strings.Split(extras, ",") + start := strings.Index(packageName, "[") + stop := strings.Index(packageName, "]") + if start == -1 || stop == -1 { + return extras } - return []string{} -} - -func parseMarkers(line string) map[string]string { - markers := map[string]string{} - parts := strings.SplitN(line, ";", 2) - - if len(parts) == 2 { - splittableMarkers := parts[1] - - for _, combineString := range []string{" or ", " and "} { - splittableMarkers = strings.TrimSpace( - strings.ReplaceAll(splittableMarkers, combineString, ","), - ) - } - - splittableMarkers = strings.TrimSpace(splittableMarkers) - - for _, mark := range strings.Split(splittableMarkers, ",") { - markparts := strings.Split(mark, " ") - markers[markparts[0]] = strings.Join(markparts[1:], " ") - } + extraString := packageName[start+1 : stop] + for _, extra := range strings.Split(extraString, ",") { + extras = append(extras, strings.TrimSpace(extra)) } - - return markers + return extras } func parseURL(line string) string { @@ -204,19 +294,3 @@ func parseURL(line string) string { return "" } - -// function to check a string for all possilbe regex expressions, replacing it if found -func checkForRegex(stringToCheck string) string { - stringToReturn := stringToCheck - - for _, r := range []*regexp.Regexp{ - urlRegex, - extrasRegex, - } { - if r.MatchString(stringToCheck) { - stringToReturn = r.ReplaceAllString(stringToCheck, "") - } - } - - return stringToReturn -} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/rpm/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/rpm/cataloger.go index 909a6e97..75a0c492 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/rpm/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/rpm/cataloger.go @@ -11,6 +11,11 @@ import ( "github.com/anchore/syft/syft/pkg/cataloger/generic" ) +const ( + dbCatalogerName = "rpm-db-cataloger" + fileCatalogerName = "rpm-file-cataloger" +) + // NewRpmDBCataloger returns a new RPM DB cataloger object. func NewRpmDBCataloger() *generic.Cataloger { // check if a sqlite driver is available @@ -18,14 +23,14 @@ func NewRpmDBCataloger() *generic.Cataloger { log.Warnf("sqlite driver is not available, newer RPM databases might not be cataloged") } - return generic.NewCataloger("rpm-db-cataloger"). + return generic.NewCataloger(dbCatalogerName). WithParserByGlobs(parseRpmDB, pkg.RpmDBGlob). WithParserByGlobs(parseRpmManifest, pkg.RpmManifestGlob) } // NewFileCataloger returns a new RPM file cataloger object. func NewFileCataloger() *generic.Cataloger { - return generic.NewCataloger("rpm-file-cataloger"). + return generic.NewCataloger(fileCatalogerName). WithParserByGlobs(parseRpm, "**/*.rpm") } diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/cataloger.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/cataloger.go index 5ce504b7..a890b858 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/cataloger.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/cataloger.go @@ -1,5 +1,5 @@ /* -Package swift provides a concrete Cataloger implementation for Podfile.lock files. +Package swift provides a concrete Cataloger implementation for Podfile.lock and Package.resolved files. */ package swift @@ -7,6 +7,11 @@ import ( "github.com/anchore/syft/syft/pkg/cataloger/generic" ) +func NewSwiftPackageManagerCataloger() *generic.Cataloger { + return generic.NewCataloger("spm-cataloger"). + WithParserByGlobs(parsePackageResolved, "**/Package.resolved", "**/.package.resolved") +} + // NewCocoapodsCataloger returns a new Swift Cocoapods lock file cataloger object. func NewCocoapodsCataloger() *generic.Cataloger { return generic.NewCataloger("cocoapods-cataloger"). diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/package.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/package.go index ad6416e6..c5f606cb 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/package.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/package.go @@ -1,16 +1,37 @@ package swift import ( + "strings" + "github.com/anchore/packageurl-go" "github.com/anchore/syft/syft/file" "github.com/anchore/syft/syft/pkg" ) -func newPackage(name, version, hash string, locations ...file.Location) pkg.Package { +func newSwiftPackageManagerPackage(name, version, sourceURL, revision string, locations ...file.Location) pkg.Package { + p := pkg.Package{ + Name: name, + Version: version, + PURL: swiftPackageManagerPackageURL(name, version, sourceURL), + Locations: file.NewLocationSet(locations...), + Type: pkg.SwiftPkg, + Language: pkg.Swift, + MetadataType: pkg.SwiftPackageManagerMetadataType, + Metadata: pkg.SwiftPackageManagerMetadata{ + Revision: revision, + }, + } + + p.SetID() + + return p +} + +func newCocoaPodsPackage(name, version, hash string, locations ...file.Location) pkg.Package { p := pkg.Package{ Name: name, Version: version, - PURL: packageURL(name, version), + PURL: cocoaPodsPackageURL(name, version), Locations: file.NewLocationSet(locations...), Type: pkg.CocoapodsPkg, Language: pkg.Swift, @@ -25,7 +46,7 @@ func newPackage(name, version, hash string, locations ...file.Location) pkg.Pack return p } -func packageURL(name, version string) string { +func cocoaPodsPackageURL(name, version string) string { var qualifiers packageurl.Qualifiers return packageurl.NewPackageURL( @@ -37,3 +58,16 @@ func packageURL(name, version string) string { "", ).ToString() } + +func swiftPackageManagerPackageURL(name, version, sourceURL string) string { + var qualifiers packageurl.Qualifiers + + return packageurl.NewPackageURL( + packageurl.TypeSwift, + strings.Replace(sourceURL, "https://", "", 1), + name, + version, + qualifiers, + "", + ).ToString() +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/parse_package_resolved.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/parse_package_resolved.go new file mode 100644 index 00000000..abead252 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/parse_package_resolved.go @@ -0,0 +1,134 @@ +package swift + +import ( + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/anchore/syft/syft/artifact" + "github.com/anchore/syft/syft/file" + "github.com/anchore/syft/syft/pkg" + "github.com/anchore/syft/syft/pkg/cataloger/generic" +) + +var _ generic.Parser = parsePackageResolved + +// swift package manager has two versions (1 and 2) of the resolved files, the types below describes the serialization strategies for each version +// with its suffix indicating which version its specific to. + +type packageResolvedV1 struct { + PackageObject packageObjectV1 `json:"object"` + Version int `json:"version"` +} + +type packageObjectV1 struct { + Pins []packagePinsV1 +} + +type packagePinsV1 struct { + Name string `json:"package"` + RepositoryURL string `json:"repositoryURL"` + State packageState `json:"state"` +} + +type packageResolvedV2 struct { + Pins []packagePinsV2 +} + +type packagePinsV2 struct { + Identity string `json:"identity"` + Kind string `json:"kind"` + Location string `json:"location"` + State packageState `json:"state"` +} + +type packagePin struct { + Identity string + Location string + Revision string + Version string +} + +type packageState struct { + Revision string `json:"revision"` + Version string `json:"version"` +} + +// parsePackageResolved is a parser for the contents of a Package.resolved file, which is generated by Xcode after it's resolved Swift Package Manger packages. +func parsePackageResolved(_ file.Resolver, _ *generic.Environment, reader file.LocationReadCloser) ([]pkg.Package, []artifact.Relationship, error) { + dec := json.NewDecoder(reader) + var packageResolvedData map[string]interface{} + for { + if err := dec.Decode(&packageResolvedData); errors.Is(err, io.EOF) { + break + } else if err != nil { + return nil, nil, fmt.Errorf("failed to parse Package.resolved file: %w", err) + } + } + + var pins, err = pinsForVersion(packageResolvedData, packageResolvedData["version"].(float64)) + if err != nil { + return nil, nil, err + } + + var pkgs []pkg.Package + for _, packagePin := range pins { + pkgs = append( + pkgs, + newSwiftPackageManagerPackage( + packagePin.Identity, + packagePin.Version, + packagePin.Location, + packagePin.Revision, + reader.Location.WithAnnotation(pkg.EvidenceAnnotationKey, pkg.PrimaryEvidenceAnnotation), + ), + ) + } + return pkgs, nil, nil +} + +func pinsForVersion(data map[string]interface{}, version float64) ([]packagePin, error) { + var genericPins []packagePin + switch version { + case 1: + t := packageResolvedV1{} + jsonString, err := json.Marshal(data) + if err != nil { + return nil, err + } + parseErr := json.Unmarshal(jsonString, &t) + if parseErr != nil { + return nil, parseErr + } + for _, pin := range t.PackageObject.Pins { + genericPins = append(genericPins, packagePin{ + pin.Name, + pin.RepositoryURL, + pin.State.Revision, + pin.State.Version, + }) + } + case 2: + t := packageResolvedV2{} + jsonString, err := json.Marshal(data) + if err != nil { + return nil, err + } + parseErr := json.Unmarshal(jsonString, &t) + if parseErr != nil { + return nil, parseErr + } + for _, pin := range t.Pins { + genericPins = append(genericPins, packagePin{ + pin.Identity, + pin.Location, + pin.State.Revision, + pin.State.Version, + }) + } + default: + return nil, fmt.Errorf("unknown swift package manager version, %f", version) + } + return genericPins, nil +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/parse_podfile_lock.go b/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/parse_podfile_lock.go index 58a58c46..3ba47956 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/parse_podfile_lock.go +++ b/vendor/github.com/anchore/syft/syft/pkg/cataloger/swift/parse_podfile_lock.go @@ -61,7 +61,7 @@ func parsePodfileLock(_ file.Resolver, _ *generic.Environment, reader file.Locat pkgs = append( pkgs, - newPackage( + newCocoaPodsPackage( podName, podVersion, pkgHash, diff --git a/vendor/github.com/anchore/syft/syft/pkg/dotnet_portable_executable_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/dotnet_portable_executable_metadata.go new file mode 100644 index 00000000..7b42d133 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/dotnet_portable_executable_metadata.go @@ -0,0 +1,11 @@ +package pkg + +type DotnetPortableExecutableMetadata struct { + AssemblyVersion string `json:"assemblyVersion"` + LegalCopyright string `json:"legalCopyright"` + Comments string `json:"comments,omitempty"` + InternalName string `json:"internalName,omitempty"` + CompanyName string `json:"companyName"` + ProductName string `json:"productName"` + ProductVersion string `json:"productVersion"` +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/language.go b/vendor/github.com/anchore/syft/syft/pkg/language.go index bb2902bb..38760180 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/language.go +++ b/vendor/github.com/anchore/syft/syft/pkg/language.go @@ -82,7 +82,7 @@ func LanguageByName(name string) Language { return Dart case packageurl.TypeDotnet: return Dotnet - case packageurl.TypeCocoapods, packageurl.TypeSwift, string(CocoapodsPkg): + case packageurl.TypeCocoapods, packageurl.TypeSwift, string(CocoapodsPkg), string(SwiftPkg): return Swift case packageurl.TypeConan, string(CPP): return CPP diff --git a/vendor/github.com/anchore/syft/syft/pkg/metadata.go b/vendor/github.com/anchore/syft/syft/pkg/metadata.go index 5d43e991..ce0594bd 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/metadata.go @@ -10,37 +10,39 @@ type MetadataType string const ( // this is the full set of data shapes that can be represented within the pkg.Package.Metadata field - UnknownMetadataType MetadataType = "UnknownMetadata" - AlpmMetadataType MetadataType = "AlpmMetadata" - ApkMetadataType MetadataType = "ApkMetadata" - BinaryMetadataType MetadataType = "BinaryMetadata" - CocoapodsMetadataType MetadataType = "CocoapodsMetadataType" - ConanLockMetadataType MetadataType = "ConanLockMetadataType" - ConanMetadataType MetadataType = "ConanMetadataType" - DartPubMetadataType MetadataType = "DartPubMetadata" - DotnetDepsMetadataType MetadataType = "DotnetDepsMetadata" - DpkgMetadataType MetadataType = "DpkgMetadata" - GemMetadataType MetadataType = "GemMetadata" - GolangBinMetadataType MetadataType = "GolangBinMetadata" - GolangModMetadataType MetadataType = "GolangModMetadata" - HackageMetadataType MetadataType = "HackageMetadataType" - JavaMetadataType MetadataType = "JavaMetadata" - KbPackageMetadataType MetadataType = "KbPackageMetadata" - LinuxKernelMetadataType MetadataType = "LinuxKernelMetadata" - LinuxKernelModuleMetadataType MetadataType = "LinuxKernelModuleMetadata" - MixLockMetadataType MetadataType = "MixLockMetadataType" - NixStoreMetadataType MetadataType = "NixStoreMetadata" - NpmPackageJSONMetadataType MetadataType = "NpmPackageJsonMetadata" - NpmPackageLockJSONMetadataType MetadataType = "NpmPackageLockJsonMetadata" - PhpComposerJSONMetadataType MetadataType = "PhpComposerJsonMetadata" - PortageMetadataType MetadataType = "PortageMetadata" - PythonPackageMetadataType MetadataType = "PythonPackageMetadata" - PythonPipfileLockMetadataType MetadataType = "PythonPipfileLockMetadata" - PythonRequirementsMetadataType MetadataType = "PythonRequirementsMetadata" - RebarLockMetadataType MetadataType = "RebarLockMetadataType" - RDescriptionFileMetadataType MetadataType = "RDescriptionFileMetadataType" - RpmMetadataType MetadataType = "RpmMetadata" - RustCargoPackageMetadataType MetadataType = "RustCargoPackageMetadata" + UnknownMetadataType MetadataType = "UnknownMetadata" + AlpmMetadataType MetadataType = "AlpmMetadata" + ApkMetadataType MetadataType = "ApkMetadata" + BinaryMetadataType MetadataType = "BinaryMetadata" + CocoapodsMetadataType MetadataType = "CocoapodsMetadataType" + ConanLockMetadataType MetadataType = "ConanLockMetadataType" + ConanMetadataType MetadataType = "ConanMetadataType" + DartPubMetadataType MetadataType = "DartPubMetadata" + DotnetDepsMetadataType MetadataType = "DotnetDepsMetadata" + DotnetPortableExecutableMetadataType MetadataType = "DotnetPortableExecutableMetadata" + DpkgMetadataType MetadataType = "DpkgMetadata" + GemMetadataType MetadataType = "GemMetadata" + GolangBinMetadataType MetadataType = "GolangBinMetadata" + GolangModMetadataType MetadataType = "GolangModMetadata" + HackageMetadataType MetadataType = "HackageMetadataType" + JavaMetadataType MetadataType = "JavaMetadata" + KbPackageMetadataType MetadataType = "KbPackageMetadata" + LinuxKernelMetadataType MetadataType = "LinuxKernelMetadata" + LinuxKernelModuleMetadataType MetadataType = "LinuxKernelModuleMetadata" + MixLockMetadataType MetadataType = "MixLockMetadataType" + NixStoreMetadataType MetadataType = "NixStoreMetadata" + NpmPackageJSONMetadataType MetadataType = "NpmPackageJsonMetadata" + NpmPackageLockJSONMetadataType MetadataType = "NpmPackageLockJsonMetadata" + PhpComposerJSONMetadataType MetadataType = "PhpComposerJsonMetadata" + PortageMetadataType MetadataType = "PortageMetadata" + PythonPackageMetadataType MetadataType = "PythonPackageMetadata" + PythonPipfileLockMetadataType MetadataType = "PythonPipfileLockMetadata" + PythonRequirementsMetadataType MetadataType = "PythonRequirementsMetadata" + RebarLockMetadataType MetadataType = "RebarLockMetadataType" + RDescriptionFileMetadataType MetadataType = "RDescriptionFileMetadataType" + RpmMetadataType MetadataType = "RpmMetadata" + RustCargoPackageMetadataType MetadataType = "RustCargoPackageMetadata" + SwiftPackageManagerMetadataType MetadataType = "SwiftPackageManagerMetadata" ) var AllMetadataTypes = []MetadataType{ @@ -52,6 +54,7 @@ var AllMetadataTypes = []MetadataType{ ConanMetadataType, DartPubMetadataType, DotnetDepsMetadataType, + DotnetPortableExecutableMetadataType, DpkgMetadataType, GemMetadataType, GolangBinMetadataType, @@ -74,39 +77,42 @@ var AllMetadataTypes = []MetadataType{ RebarLockMetadataType, RpmMetadataType, RustCargoPackageMetadataType, + SwiftPackageManagerMetadataType, } var MetadataTypeByName = map[MetadataType]reflect.Type{ - AlpmMetadataType: reflect.TypeOf(AlpmMetadata{}), - ApkMetadataType: reflect.TypeOf(ApkMetadata{}), - BinaryMetadataType: reflect.TypeOf(BinaryMetadata{}), - CocoapodsMetadataType: reflect.TypeOf(CocoapodsMetadata{}), - ConanLockMetadataType: reflect.TypeOf(ConanLockMetadata{}), - ConanMetadataType: reflect.TypeOf(ConanMetadata{}), - DartPubMetadataType: reflect.TypeOf(DartPubMetadata{}), - DotnetDepsMetadataType: reflect.TypeOf(DotnetDepsMetadata{}), - DpkgMetadataType: reflect.TypeOf(DpkgMetadata{}), - GemMetadataType: reflect.TypeOf(GemMetadata{}), - GolangBinMetadataType: reflect.TypeOf(GolangBinMetadata{}), - GolangModMetadataType: reflect.TypeOf(GolangModMetadata{}), - HackageMetadataType: reflect.TypeOf(HackageMetadata{}), - JavaMetadataType: reflect.TypeOf(JavaMetadata{}), - KbPackageMetadataType: reflect.TypeOf(KbPackageMetadata{}), - LinuxKernelMetadataType: reflect.TypeOf(LinuxKernelMetadata{}), - LinuxKernelModuleMetadataType: reflect.TypeOf(LinuxKernelModuleMetadata{}), - MixLockMetadataType: reflect.TypeOf(MixLockMetadata{}), - NixStoreMetadataType: reflect.TypeOf(NixStoreMetadata{}), - NpmPackageJSONMetadataType: reflect.TypeOf(NpmPackageJSONMetadata{}), - NpmPackageLockJSONMetadataType: reflect.TypeOf(NpmPackageLockJSONMetadata{}), - PhpComposerJSONMetadataType: reflect.TypeOf(PhpComposerJSONMetadata{}), - PortageMetadataType: reflect.TypeOf(PortageMetadata{}), - PythonPackageMetadataType: reflect.TypeOf(PythonPackageMetadata{}), - PythonPipfileLockMetadataType: reflect.TypeOf(PythonPipfileLockMetadata{}), - PythonRequirementsMetadataType: reflect.TypeOf(PythonRequirementsMetadata{}), - RDescriptionFileMetadataType: reflect.TypeOf(RDescriptionFileMetadata{}), - RebarLockMetadataType: reflect.TypeOf(RebarLockMetadata{}), - RpmMetadataType: reflect.TypeOf(RpmMetadata{}), - RustCargoPackageMetadataType: reflect.TypeOf(CargoPackageMetadata{}), + AlpmMetadataType: reflect.TypeOf(AlpmMetadata{}), + ApkMetadataType: reflect.TypeOf(ApkMetadata{}), + BinaryMetadataType: reflect.TypeOf(BinaryMetadata{}), + CocoapodsMetadataType: reflect.TypeOf(CocoapodsMetadata{}), + ConanLockMetadataType: reflect.TypeOf(ConanLockMetadata{}), + ConanMetadataType: reflect.TypeOf(ConanMetadata{}), + DartPubMetadataType: reflect.TypeOf(DartPubMetadata{}), + DotnetDepsMetadataType: reflect.TypeOf(DotnetDepsMetadata{}), + DotnetPortableExecutableMetadataType: reflect.TypeOf(DotnetPortableExecutableMetadata{}), + DpkgMetadataType: reflect.TypeOf(DpkgMetadata{}), + GemMetadataType: reflect.TypeOf(GemMetadata{}), + GolangBinMetadataType: reflect.TypeOf(GolangBinMetadata{}), + GolangModMetadataType: reflect.TypeOf(GolangModMetadata{}), + HackageMetadataType: reflect.TypeOf(HackageMetadata{}), + JavaMetadataType: reflect.TypeOf(JavaMetadata{}), + KbPackageMetadataType: reflect.TypeOf(KbPackageMetadata{}), + LinuxKernelMetadataType: reflect.TypeOf(LinuxKernelMetadata{}), + LinuxKernelModuleMetadataType: reflect.TypeOf(LinuxKernelModuleMetadata{}), + MixLockMetadataType: reflect.TypeOf(MixLockMetadata{}), + NixStoreMetadataType: reflect.TypeOf(NixStoreMetadata{}), + NpmPackageJSONMetadataType: reflect.TypeOf(NpmPackageJSONMetadata{}), + NpmPackageLockJSONMetadataType: reflect.TypeOf(NpmPackageLockJSONMetadata{}), + PhpComposerJSONMetadataType: reflect.TypeOf(PhpComposerJSONMetadata{}), + PortageMetadataType: reflect.TypeOf(PortageMetadata{}), + PythonPackageMetadataType: reflect.TypeOf(PythonPackageMetadata{}), + PythonPipfileLockMetadataType: reflect.TypeOf(PythonPipfileLockMetadata{}), + PythonRequirementsMetadataType: reflect.TypeOf(PythonRequirementsMetadata{}), + RDescriptionFileMetadataType: reflect.TypeOf(RDescriptionFileMetadata{}), + RebarLockMetadataType: reflect.TypeOf(RebarLockMetadata{}), + RpmMetadataType: reflect.TypeOf(RpmMetadata{}), + RustCargoPackageMetadataType: reflect.TypeOf(CargoPackageMetadata{}), + SwiftPackageManagerMetadataType: reflect.TypeOf(SwiftPackageManagerMetadata{}), } func CleanMetadataType(typ MetadataType) MetadataType { diff --git a/vendor/github.com/anchore/syft/syft/pkg/python_requirements_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/python_requirements_metadata.go index da675e6a..161669a4 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/python_requirements_metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/python_requirements_metadata.go @@ -1,9 +1,9 @@ package pkg type PythonRequirementsMetadata struct { - Name string `json:"name" mapstruct:"Name"` - Extras []string `json:"extras" mapstruct:"Extras"` - VersionConstraint string `json:"versionConstraint" mapstruct:"VersionConstraint"` - URL string `json:"url" mapstruct:"URL"` - Markers map[string]string `json:"markers" mapstruct:"Markers"` + Name string `json:"name" mapstruct:"Name"` + Extras []string `json:"extras,omitempty" mapstruct:"Extras"` + VersionConstraint string `json:"versionConstraint" mapstruct:"VersionConstraint"` + URL string `json:"url,omitempty" mapstruct:"URL"` + Markers string `json:"markers,omitempty" mapstruct:"Markers"` } diff --git a/vendor/github.com/anchore/syft/syft/pkg/rpm_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/rpm_metadata.go index 41a825d9..e2442787 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/rpm_metadata.go +++ b/vendor/github.com/anchore/syft/syft/pkg/rpm_metadata.go @@ -13,7 +13,7 @@ import ( // Packages is the legacy Berkely db based format // Packages.db is the "ndb" format used in SUSE // rpmdb.sqlite is the sqlite format used in fedora + derivates -const RpmDBGlob = "**/{var/lib,usr/share}/rpm/{Packages,Packages.db,rpmdb.sqlite}" +const RpmDBGlob = "**/{var/lib,usr/share,usr/lib/sysimage}/rpm/{Packages,Packages.db,rpmdb.sqlite}" // Used in CBL-Mariner distroless images const RpmManifestGlob = "**/var/lib/rpmmanifest/container-manifest-2" diff --git a/vendor/github.com/anchore/syft/syft/pkg/swiftpackagemanager_metadata.go b/vendor/github.com/anchore/syft/syft/pkg/swiftpackagemanager_metadata.go new file mode 100644 index 00000000..fd33a8d6 --- /dev/null +++ b/vendor/github.com/anchore/syft/syft/pkg/swiftpackagemanager_metadata.go @@ -0,0 +1,5 @@ +package pkg + +type SwiftPackageManagerMetadata struct { + Revision string `mapstructure:"revision" json:"revision"` +} diff --git a/vendor/github.com/anchore/syft/syft/pkg/type.go b/vendor/github.com/anchore/syft/syft/pkg/type.go index 760b3232..e3ed3f4c 100644 --- a/vendor/github.com/anchore/syft/syft/pkg/type.go +++ b/vendor/github.com/anchore/syft/syft/pkg/type.go @@ -36,6 +36,7 @@ const ( Rpkg Type = "R-package" RpmPkg Type = "rpm" RustPkg Type = "rust-crate" + SwiftPkg Type = "swift" ) // AllPkgs represents all supported package types @@ -65,6 +66,7 @@ var AllPkgs = []Type{ Rpkg, RpmPkg, RustPkg, + SwiftPkg, } // PackageURLType returns the PURL package type for the current package. @@ -114,6 +116,8 @@ func (t Type) PackageURLType() string { return packageurl.TypeRPM case RustPkg: return "cargo" + case SwiftPkg: + return packageurl.TypeSwift default: // TODO: should this be a "generic" purl type instead? return "" @@ -179,6 +183,8 @@ func TypeByName(name string) Type { return NixPkg case packageurl.TypeCran: return Rpkg + case packageurl.TypeSwift: + return SwiftPkg default: return UnknownPkg } diff --git a/vendor/github.com/anchore/syft/syft/sbom/sbom.go b/vendor/github.com/anchore/syft/syft/sbom/sbom.go index 8592d844..f95df095 100644 --- a/vendor/github.com/anchore/syft/syft/sbom/sbom.go +++ b/vendor/github.com/anchore/syft/syft/sbom/sbom.go @@ -5,6 +5,7 @@ import ( "golang.org/x/exp/slices" + "github.com/anchore/syft/internal/log" "github.com/anchore/syft/syft/artifact" "github.com/anchore/syft/syft/file" "github.com/anchore/syft/syft/linux" @@ -77,11 +78,18 @@ func (s SBOM) RelationshipsForPackage(p pkg.Package, rt ...artifact.Relationship var relationships []artifact.Relationship for _, relationship := range s.Relationships { + if relationship.From == nil || relationship.To == nil { + log.Debugf("relationship has nil edge, skipping: %#v", relationship) + continue + } + if relationship.From.ID() != p.ID() { + continue + } // check if the relationship is one we're searching for; rt is inclusive - idx := slices.IndexFunc(rt, func(r artifact.RelationshipType) bool { return relationship.Type == r }) - if relationship.From.ID() == p.ID() && idx != -1 { - relationships = append(relationships, relationship) + if !slices.ContainsFunc(rt, func(r artifact.RelationshipType) bool { return relationship.Type == r }) { + continue } + relationships = append(relationships, relationship) } return relationships diff --git a/vendor/github.com/anchore/syft/syft/source/directory_source.go b/vendor/github.com/anchore/syft/syft/source/directory_source.go index ab7f3d46..e8e5e217 100644 --- a/vendor/github.com/anchore/syft/syft/source/directory_source.go +++ b/vendor/github.com/anchore/syft/syft/source/directory_source.go @@ -120,6 +120,9 @@ func (s DirectorySource) Describe() Description { if a.Name != "" { name = a.Name } + if a.Version != "" { + version = a.Version + } } return Description{ ID: string(s.id), diff --git a/vendor/github.com/anchore/syft/syft/source/stereoscope_image_source.go b/vendor/github.com/anchore/syft/syft/source/stereoscope_image_source.go index e9c39d17..2f45de7d 100644 --- a/vendor/github.com/anchore/syft/syft/source/stereoscope_image_source.go +++ b/vendor/github.com/anchore/syft/syft/source/stereoscope_image_source.go @@ -5,10 +5,12 @@ import ( "fmt" "github.com/bmatcuk/doublestar/v4" + "github.com/docker/distribution/reference" "github.com/opencontainers/go-digest" "github.com/anchore/stereoscope" "github.com/anchore/stereoscope/pkg/image" + "github.com/anchore/syft/internal/log" "github.com/anchore/syft/syft/artifact" "github.com/anchore/syft/syft/file" "github.com/anchore/syft/syft/internal/fileresolver" @@ -83,18 +85,44 @@ func (s StereoscopeImageSource) ID() artifact.ID { } func (s StereoscopeImageSource) Describe() Description { - name := s.metadata.UserInput - version := s.metadata.ManifestDigest - a := s.config.Alias - if a.Name != "" { - name = a.Name + + name := a.Name + nameIfUnset := func(n string) { + if name != "" { + return + } + name = n } - if a.Version != "" { - version = a.Version + version := a.Version + versionIfUnset := func(v string) { + if version != "" && version != "latest" { + return + } + version = v + } + + ref, err := reference.Parse(s.metadata.UserInput) + if err != nil { + log.Debugf("unable to parse image ref: %s", s.config.Reference) + } else { + if ref, ok := ref.(reference.Named); ok { + nameIfUnset(ref.Name()) + } + + if ref, ok := ref.(reference.NamedTagged); ok { + versionIfUnset(ref.Tag()) + } + + if ref, ok := ref.(reference.Digested); ok { + versionIfUnset(ref.Digest().String()) + } } + nameIfUnset(s.metadata.UserInput) + versionIfUnset(s.metadata.ManifestDigest) + return Description{ ID: string(s.id), Name: name, diff --git a/vendor/github.com/aquasecurity/go-pep440-version/.gitignore b/vendor/github.com/aquasecurity/go-pep440-version/.gitignore new file mode 100644 index 00000000..66fd13c9 --- /dev/null +++ b/vendor/github.com/aquasecurity/go-pep440-version/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/aquasecurity/go-pep440-version/LICENSE b/vendor/github.com/aquasecurity/go-pep440-version/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/aquasecurity/go-pep440-version/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aquasecurity/go-pep440-version/README.md b/vendor/github.com/aquasecurity/go-pep440-version/README.md new file mode 100644 index 00000000..6bc55bef --- /dev/null +++ b/vendor/github.com/aquasecurity/go-pep440-version/README.md @@ -0,0 +1,51 @@ +# go-pep440-version +![Test](https://github.com/aquasecurity/go-pep440-version/workflows/Test/badge.svg?branch=main) +[![Go Report Card](https://goreportcard.com/badge/github.com/aquasecurity/go-pep440-version)](https://goreportcard.com/report/github.com/aquasecurity/go-pep440-version) +![GitHub](https://img.shields.io/github/license/aquasecurity/go-pep440-version) + +A golang library for parsing PEP 440 compliant Python versions + +go-pep440-version is a library for parsing versions of Python software distributions and version specifiers, and verifying versions against a set of specifiers. + +Versions used with go-pep440-version must follow [PEP 440](https://www.python.org/dev/peps/pep-0440/). + +For more details, see [pypa/packaging](https://github.com/pypa/packaging) + +## Usage +### Version Parsing and Comparison + +See [example](./examples/comparison/main.go) + +``` +v1, _ := version.Parse("1.2.a") +v2, _ := version.Parse("1.2") + +// Comparison example. There is also GreaterThan, Equal, and just +// a simple Compare that returns an int allowing easy >=, <=, etc. +if v1.LessThan(v2) { + fmt.Printf("%s is less than %s", v1, v2) +} +``` + +### Version Constraints +See [example](./examples/constraint/main.go) + +``` +v, _ := version.Parse("2.1") +c, _ := version.NewSpecifiers(">= 1.0, < 1.4 || > 2.0") + +if c.Check(v) { + fmt.Printf("%s satisfies specifiers '%s'", v, c) +} +``` + +## Status + +- [x] `>` +- [x] `>=` +- [x] `<` +- [x] `<=` +- [x] `==` +- [x] `!=` +- [x] `~=` +- [ ] `===` \ No newline at end of file diff --git a/vendor/github.com/aquasecurity/go-pep440-version/specifier.go b/vendor/github.com/aquasecurity/go-pep440-version/specifier.go new file mode 100644 index 00000000..797b7008 --- /dev/null +++ b/vendor/github.com/aquasecurity/go-pep440-version/specifier.go @@ -0,0 +1,385 @@ +package version + +import ( + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + + "golang.org/x/xerrors" +) + +var ( + specifierOperators = map[string]operatorFunc{ + "": specifierEqual, // not defined in PEP 440 + "=": specifierEqual, // not defined in PEP 440 + "==": specifierEqual, + "!=": specifierNotEqual, + ">": specifierGreaterThan, + "<": specifierLessThan, + ">=": specifierGreaterThanEqual, + "<=": specifierLessThanEqual, + "~=": specifierCompatible, + "===": specifierArbitrary, + } + + specifierRegexp *regexp.Regexp + validConstraintRegexp *regexp.Regexp + prefixRegexp *regexp.Regexp +) + +func init() { + ops := make([]string, 0, len(specifierOperators)) + for k := range specifierOperators { + ops = append(ops, regexp.QuoteMeta(k)) + } + + specifierRegexp = regexp.MustCompile(fmt.Sprintf( + `(?i)(?P(%s))\s*(?P%s(\.\*)?)`, + strings.Join(ops, "|"), regex)) + + validConstraintRegexp = regexp.MustCompile(fmt.Sprintf( + `^\s*(\s*(%s)\s*(%s(\.\*)?)\s*\,?)*\s*$`, + strings.Join(ops, "|"), regex)) + + prefixRegexp = regexp.MustCompile(`^([0-9]+)((?:a|b|c|rc)[0-9]+)$`) +} + +type operatorFunc func(v Version, c string) bool + +type Specifiers struct { + specifiers [][]specifier + conf conf +} + +type specifier struct { + version string + operator operatorFunc + original string +} + +// NewSpecifiers parses a given specifier and returns a new instance of Specifiers +func NewSpecifiers(v string, opts ...SpecifierOption) (Specifiers, error) { + c := new(conf) + + // Apply options + for _, o := range opts { + o.apply(c) + } + + var sss [][]specifier + for _, vv := range strings.Split(v, "||") { + if strings.TrimSpace(vv) == "*" { + vv = ">=0.0.0" + } + + // Validate the segment + if !validConstraintRegexp.MatchString(vv) { + return Specifiers{}, xerrors.Errorf("improper constraint: %s", vv) + } + + ss := specifierRegexp.FindAllString(vv, -1) + if ss == nil { + ss = append(ss, strings.TrimSpace(vv)) + } + + var specs []specifier + for _, single := range ss { + s, err := newSpecifier(single) + if err != nil { + return Specifiers{}, err + } + specs = append(specs, s) + } + sss = append(sss, specs) + } + + return Specifiers{ + specifiers: sss, + conf: *c, + }, nil + +} + +func newSpecifier(s string) (specifier, error) { + m := specifierRegexp.FindStringSubmatch(s) + if m == nil { + return specifier{}, xerrors.Errorf("improper specifier: %s", s) + } + + operator := m[specifierRegexp.SubexpIndex("operator")] + version := m[specifierRegexp.SubexpIndex("version")] + + if operator != "===" { + if err := validate(operator, version); err != nil { + return specifier{}, err + } + } + + return specifier{ + version: version, + operator: specifierOperators[operator], + original: s, + }, nil +} + +func validate(operator, version string) error { + hasWildcard := false + if strings.HasSuffix(version, ".*") { + hasWildcard = true + version = strings.TrimSuffix(version, ".*") + } + v, err := Parse(version) + if err != nil { + return xerrors.Errorf("version parse error (%s): %w", v, err) + } + + switch operator { + case "", "=", "==", "!=": + if hasWildcard && (!v.dev.isNull() || v.local != "") { + return xerrors.New("the (non)equality operators don't allow to use a wild card and a dev" + + " or local version together") + } + case "~=": + if hasWildcard { + return xerrors.New("a wild card is not allowed") + } else if len(v.release) < 2 { + return xerrors.New("the compatible operator requires at least two digits in the release segment") + } else if v.local != "" { + return xerrors.New("local versions cannot be specified") + } + default: + if hasWildcard { + return xerrors.New("a wild card is not allowed") + } else if v.local != "" { + return xerrors.New("local versions cannot be specified") + } + } + return nil +} + +// Check tests if a version satisfies all the specifiers. +func (ss Specifiers) Check(v Version) bool { + if ss.conf.includePreRelease { + v.preReleaseIncluded = true + } + + for _, s := range ss.specifiers { + if andCheck(v, s) { + return true + } + } + + return false +} + +func (s specifier) check(v Version) bool { + return s.operator(v, s.version) +} + +func (s specifier) String() string { + return s.original +} + +// String returns the string format of the specifiers +func (ss Specifiers) String() string { + var ssStr []string + for _, orS := range ss.specifiers { + var sstr []string + for _, andS := range orS { + sstr = append(sstr, andS.String()) + } + ssStr = append(ssStr, strings.Join(sstr, ",")) + } + + return strings.Join(ssStr, "||") +} + +func andCheck(v Version, specifiers []specifier) bool { + for _, c := range specifiers { + if !c.check(v) { + return false + } + } + return true +} + +func versionSplit(version string) []string { + var result []string + for _, v := range strings.Split(version, ".") { + m := prefixRegexp.FindStringSubmatch(v) + if m != nil { + result = append(result, m[1:]...) + } else { + result = append(result, v) + } + } + return result +} + +func isDigist(s string) bool { + if _, err := strconv.Atoi(s); err == nil { + return true + } + return false +} + +func padVersion(left, right []string) ([]string, []string) { + var leftRelease, rightRelease []string + for _, l := range left { + if isDigist(l) { + leftRelease = append(leftRelease, l) + } + } + + for _, r := range right { + if isDigist(r) { + rightRelease = append(rightRelease, r) + } + } + + // Get the rest of our versions + leftRest := left[len(leftRelease):] + rightRest := left[len(rightRelease):] + + for i := 0; i < len(leftRelease)-len(rightRelease); i++ { + rightRelease = append(rightRelease, "0") + } + for i := 0; i < len(rightRelease)-len(leftRelease); i++ { + leftRelease = append(leftRelease, "0") + } + + return append(leftRelease, leftRest...), append(rightRelease, rightRest...) +} + +//------------------------------------------------------------------- +// Specifier functions +//------------------------------------------------------------------- + +func specifierCompatible(prospective Version, spec string) bool { + // Compatible releases have an equivalent combination of >= and ==. That is that ~=2.2 is equivalent to >=2.2,==2.*. + // This allows us to implement this in terms of the other specifiers instead of implementing it ourselves. + // The only thing we need to do is construct the other specifiers. + + var prefixElements []string + for _, s := range versionSplit(spec) { + if strings.HasPrefix(s, "post") || strings.HasPrefix(s, "dev") { + break + } + prefixElements = append(prefixElements, s) + } + + // We want everything but the last item in the version, but we want to ignore post and dev releases and + // we want to treat the pre-release as it's own separate segment. + prefix := strings.Join(prefixElements[:len(prefixElements)-1], ".") + + // Add the prefix notation to the end of our string + prefix += ".*" + + return specifierGreaterThanEqual(prospective, spec) && specifierEqual(prospective, prefix) +} + +func specifierEqual(prospective Version, spec string) bool { + // https://github.com/pypa/packaging/blob/a6407e3a7e19bd979e93f58cfc7f6641a7378c46/packaging/specifiers.py#L476 + // We need special logic to handle prefix matching + if strings.HasSuffix(spec, ".*") { + // In the case of prefix matching we want to ignore local segment. + prospective = MustParse(prospective.Public()) + + // Split the spec out by dots, and pretend that there is an implicit + // dot in between a release segment and a pre-release segment. + splitSpec := versionSplit(strings.TrimSuffix(spec, ".*")) + + // Split the prospective version out by dots, and pretend that there is an implicit dot + // in between a release segment and a pre-release segment. + splitProspective := versionSplit(prospective.String()) + + // Shorten the prospective version to be the same length as the spec + // so that we can determine if the specifier is a prefix of the + // prospective version or not. + if len(splitProspective) > len(splitSpec) { + splitProspective = splitProspective[:len(splitSpec)] + } + + paddedSpec, paddedProspective := padVersion(splitSpec, splitProspective) + return reflect.DeepEqual(paddedSpec, paddedProspective) + } + + specVersion := MustParse(spec) + if specVersion.local == "" { + prospective = MustParse(prospective.Public()) + } + + return specVersion.Equal(prospective) +} + +func specifierNotEqual(prospective Version, spec string) bool { + return !specifierEqual(prospective, spec) +} + +func specifierLessThan(prospective Version, spec string) bool { + // Convert our spec to a Version instance, since we'll want to work with it as a version. + s := MustParse(spec) + + // Check to see if the prospective version is less than the spec version. + // If it's not we can short circuit and just return False now instead of doing extra unneeded work. + if !prospective.LessThan(s) { + return false + } + + // This special case is here so that, unless the specifier itself includes is a pre-release version, + // that we do not accept pre-release versions for the version mentioned in the specifier + // (e.g. <3.1 should not match 3.1.dev0, but should match 3.0.dev0). + if !s.IsPreRelease() && prospective.IsPreRelease() { + if MustParse(prospective.BaseVersion()).Equal(MustParse(s.BaseVersion())) { + return false + } + } + return true +} + +func specifierGreaterThan(prospective Version, spec string) bool { + // Convert our spec to a Version instance, since we'll want to work with it as a version. + s := MustParse(spec) + + // Check to see if the prospective version is greater than the spec version. + // If it's not we can short circuit and just return False now instead of doing extra unneeded work. + if !prospective.GreaterThan(s) { + return false + } + + // This special case is here so that, unless the specifier itself includes is a post-release version, + // that we do not accept post-release versions for the version mentioned in the specifier + // (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). + if !s.IsPostRelease() && prospective.IsPostRelease() { + if MustParse(prospective.BaseVersion()).Equal(MustParse(s.BaseVersion())) { + return false + } + } + + // Ensure that we do not allow a local version of the version mentioned + // in the specifier, which is technically greater than, to match. + if prospective.local != "" { + if MustParse(prospective.BaseVersion()).Equal(MustParse(s.BaseVersion())) { + return false + } + } + return true +} + +func specifierArbitrary(prospective Version, spec string) bool { + return strings.EqualFold(prospective.String(), spec) +} + +func specifierLessThanEqual(prospective Version, spec string) bool { + p := MustParse(prospective.Public()) + s := MustParse(spec) + return p.LessThanOrEqual(s) +} + +func specifierGreaterThanEqual(prospective Version, spec string) bool { + p := MustParse(prospective.Public()) + s := MustParse(spec) + return p.GreaterThanOrEqual(s) +} diff --git a/vendor/github.com/aquasecurity/go-pep440-version/specifier_option.go b/vendor/github.com/aquasecurity/go-pep440-version/specifier_option.go new file mode 100644 index 00000000..71bdb96c --- /dev/null +++ b/vendor/github.com/aquasecurity/go-pep440-version/specifier_option.go @@ -0,0 +1,15 @@ +package version + +type conf struct { + includePreRelease bool +} + +type SpecifierOption interface { + apply(*conf) +} + +type WithPreRelease bool + +func (o WithPreRelease) apply(c *conf) { + c.includePreRelease = bool(o) +} diff --git a/vendor/github.com/aquasecurity/go-pep440-version/version.go b/vendor/github.com/aquasecurity/go-pep440-version/version.go new file mode 100644 index 00000000..8ddf2771 --- /dev/null +++ b/vendor/github.com/aquasecurity/go-pep440-version/version.go @@ -0,0 +1,358 @@ +package version + +import ( + "bytes" + "fmt" + "regexp" + "strings" + + "golang.org/x/xerrors" + + "github.com/aquasecurity/go-version/pkg/part" +) + +var ( + // The compiled regular expression used to test the validity of a version. + versionRegex *regexp.Regexp + + // https://github.com/pypa/packaging/blob/a6407e3a7e19bd979e93f58cfc7f6641a7378c46/packaging/version.py#L459-L464 + preReleaseAliases = map[string]string{ + "a": "a", + "alpha": "a", + "b": "b", + "beta": "b", + "rc": "rc", + "c": "rc", + "pre": "rc", + "preview": "rc", + } + + // https://github.com/pypa/packaging/blob/a6407e3a7e19bd979e93f58cfc7f6641a7378c46/packaging/version.py#L465-L466 + postReleaseAliases = map[string]string{ + "post": "post", + "rev": "post", + "r": "post", + } +) + +const ( + // The raw regular expression string used for testing the validity of a version. + regex = `v?` + + `(?:` + + `(?:(?P[0-9]+)!)?` + // epoch + `(?P[0-9]+(?:\.[0-9]+)*)` + // release segment + `(?P
[-_\.]?(?P(a|b|c|rc|alpha|beta|pre|preview))[-_\.]?(?P[0-9]+)?)?` + // pre-release
+		`(?P(?:-(?P[0-9]+))|(?:[-_\.]?(?Ppost|rev|r)[-_\.]?(?P[0-9]+)?))?` + // post release
+		`(?P[-_\.]?(?Pdev)[-_\.]?(?P[0-9]+)?)?)` + // dev release
+		`(?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?` // local version`
+)
+
+// Version represents a single version.
+type Version struct {
+	epoch              part.Uint64
+	release            []part.Uint64
+	pre                letterNumber
+	post               letterNumber
+	dev                letterNumber
+	local              string
+	key                key
+	preReleaseIncluded bool
+	original           string
+}
+
+type key struct {
+	epoch   part.Uint64
+	release part.Parts
+	pre     part.Part
+	post    part.Part
+	dev     part.Part
+	local   part.Part
+}
+
+func (k key) compare(o key) int {
+	p1 := part.Parts{k.epoch, k.release, k.pre, k.post, k.dev, k.local}
+	p2 := part.Parts{o.epoch, o.release, o.pre, o.post, o.dev, o.local}
+	return p1.Compare(p2)
+}
+
+type letterNumber struct {
+	letter part.String
+	number part.Uint64
+}
+
+func (ln letterNumber) isNull() bool {
+	return ln.letter.IsNull() && ln.number.IsNull()
+}
+
+func init() {
+	versionRegex = regexp.MustCompile(`(?i)^\s*` + regex + `\s*$`)
+}
+
+// MustParse is like Parse but panics if the version cannot be parsed.
+func MustParse(v string) Version {
+	ver, err := Parse(v)
+	if err != nil {
+		panic(err)
+	}
+	return ver
+}
+
+// Parse parses the given version and returns a new Version.
+func Parse(v string) (Version, error) {
+	matches := versionRegex.FindStringSubmatch(v)
+	if matches == nil {
+		return Version{}, xerrors.Errorf("malformed version: %s", v)
+	}
+
+	var epoch, preN, postN, devN part.Uint64
+	var preL, postL, devL part.String
+	var release []part.Uint64
+	var local string
+	var err error
+
+	for i, name := range versionRegex.SubexpNames() {
+		m := matches[i]
+		if m == "" {
+			continue
+		}
+
+		switch name {
+		case "epoch":
+			epoch, err = part.NewUint64(m)
+		case "release":
+			for _, str := range strings.Split(m, ".") {
+				val, err := part.NewUint64(str)
+				if err != nil {
+					return Version{}, xerrors.Errorf("error parsing version: %w", err)
+				}
+
+				release = append(release, val)
+			}
+		case "pre_l":
+			preL = part.String(preReleaseAliases[strings.ToLower(m)])
+		case "pre_n":
+			preN, err = part.NewUint64(m)
+		case "post_l":
+			postL = part.String(postReleaseAliases[strings.ToLower(m)])
+		case "post_n1", "post_n2":
+			// https://github.com/pypa/packaging/blob/a6407e3a7e19bd979e93f58cfc7f6641a7378c46/packaging/version.py#L469-L472
+			if postL == "" {
+				postL = "post"
+			}
+			postN, err = part.NewUint64(m)
+		case "dev_l":
+			devL = part.String(strings.ToLower(m))
+		case "dev_n":
+			devN, err = part.NewUint64(m)
+		case "local":
+			local = strings.ToLower(m)
+		}
+		if err != nil {
+			return Version{}, xerrors.Errorf("failed to parse version (%s): %w", v, err)
+		}
+	}
+
+	pre := letterNumber{
+		letter: preL,
+		number: preN,
+	}
+	post := letterNumber{
+		letter: postL,
+		number: postN,
+	}
+	dev := letterNumber{
+		letter: devL,
+		number: devN,
+	}
+
+	return Version{
+		epoch:    epoch,
+		release:  release,
+		pre:      pre,
+		post:     post,
+		dev:      dev,
+		local:    local,
+		key:      cmpkey(epoch, release, pre, post, dev, local),
+		original: v,
+	}, nil
+}
+
+// ref. https://github.com/pypa/packaging/blob/a6407e3a7e19bd979e93f58cfc7f6641a7378c46/packaging/version.py#L495
+func cmpkey(epoch part.Uint64, release []part.Uint64, pre, post, dev letterNumber, local string) key {
+	// Set default values
+	k := key{
+		epoch: epoch,
+		pre:   part.Parts{pre.letter, pre.number},
+		post:  part.Parts{post.letter, post.number},
+		dev:   part.Parts{dev.letter, dev.number},
+		local: part.NegativeInfinity,
+	}
+
+	// Remove trailing zeros
+	k.release = part.Uint64SliceToParts(release).Normalize()
+
+	// https://github.com/pypa/packaging/blob/a6407e3a7e19bd979e93f58cfc7f6641a7378c46/packaging/version.py#L514-L517
+	if pre.isNull() && post.isNull() && !dev.isNull() {
+		k.pre = part.NegativeInfinity
+	} else if pre.isNull() {
+		k.pre = part.Infinity
+	}
+
+	// Versions without a post segment should sort before those with one.
+	if post.isNull() {
+		k.post = part.NegativeInfinity
+	}
+
+	// Versions without a development segment should sort after those with one.
+	if dev.isNull() {
+		k.dev = part.Infinity
+	}
+
+	// Versions with a local segment need that segment parsed to implement the sorting rules in PEP440.
+	//   - Alpha numeric segments sort before numeric segments
+	//   - Alpha numeric segments sort lexicographically
+	//   - Numeric segments sort numerically
+	//   - Shorter versions sort before longer versions when the prefixes match exactly
+	if local != "" {
+		var parts part.Parts
+		for _, l := range strings.Split(local, ".") {
+			if p, err := part.NewUint64(l); err == nil {
+				parts = append(parts, p)
+			} else {
+				parts = append(parts, part.NewPreString(l))
+			}
+		}
+		k.local = parts
+	}
+
+	return k
+}
+
+// Compare compares this version to another version. This
+// returns -1, 0, or 1 if this version is smaller, equal,
+// or larger than the other version, respectively.
+func (v Version) Compare(other Version) int {
+	// A quick, efficient equality check
+	if v.String() == other.String() {
+		return 0
+	}
+
+	k1 := v.key
+	k2 := other.key
+
+	k1.release = k1.release.Padding(len(k2.release), part.Zero)
+	k2.release = k2.release.Padding(len(k2.release), part.Zero)
+
+	return k1.compare(k2)
+}
+
+// Equal tests if two versions are equal.
+func (v Version) Equal(o Version) bool {
+	return v.Compare(o) == 0
+}
+
+// GreaterThan tests if this version is greater than another version.
+func (v Version) GreaterThan(o Version) bool {
+	return v.Compare(o) > 0
+}
+
+// GreaterThanOrEqual tests if this version is greater than or equal to another version.
+func (v Version) GreaterThanOrEqual(o Version) bool {
+	return v.Compare(o) >= 0
+}
+
+// LessThan tests if this version is less than another version.
+func (v Version) LessThan(o Version) bool {
+	return v.Compare(o) < 0
+}
+
+// LessThanOrEqual tests if this version is less than or equal to another version.
+func (v Version) LessThanOrEqual(o Version) bool {
+	return v.Compare(o) <= 0
+}
+
+// String returns the full version string included pre-release
+// and metadata information.
+func (v Version) String() string {
+	var buf bytes.Buffer
+
+	// Epoch
+	if v.epoch != 0 {
+		fmt.Fprintf(&buf, "%d!", v.epoch)
+	}
+
+	// Release segment
+	fmt.Fprintf(&buf, "%d", v.release[0])
+	for _, r := range v.release[1:len(v.release)] {
+		fmt.Fprintf(&buf, ".%d", r)
+	}
+
+	// Pre-release
+	if !v.pre.isNull() {
+		fmt.Fprintf(&buf, "%s%d", v.pre.letter, v.pre.number)
+	}
+
+	// Post-release
+	if !v.post.isNull() {
+		fmt.Fprintf(&buf, ".post%d", v.post.number)
+	}
+
+	// Development release
+	if !v.dev.isNull() {
+		fmt.Fprintf(&buf, ".dev%d", v.dev.number)
+	}
+
+	// Local version segment
+	if v.local != "" {
+		fmt.Fprintf(&buf, "+%s", v.local)
+	}
+
+	return buf.String()
+}
+
+// BaseVersion returns the base version
+func (v Version) BaseVersion() string {
+	var buf bytes.Buffer
+
+	// Epoch
+	if v.epoch != 0 {
+		fmt.Fprintf(&buf, "%d!", v.epoch)
+	}
+
+	// Release segment
+	fmt.Fprintf(&buf, "%d", v.release[0])
+	for _, r := range v.release[1:len(v.release)] {
+		fmt.Fprintf(&buf, ".%d", r)
+	}
+
+	return buf.String()
+}
+
+// Original returns the original parsed version as-is, including any
+// potential whitespace, `v` prefix, etc.
+func (v Version) Original() string {
+	return v.original
+}
+
+// Local returns the local version
+func (v Version) Local() string {
+	return v.local
+}
+
+// Public returns the public version
+func (v Version) Public() string {
+	return strings.SplitN(v.String(), "+", 2)[0]
+}
+
+// IsPreRelease returns if it is a pre-release
+func (v Version) IsPreRelease() bool {
+	if v.preReleaseIncluded {
+		return false
+	}
+	return !v.pre.isNull() || !v.dev.isNull()
+}
+
+// IsPostRelease returns if it is a post-release
+func (v Version) IsPostRelease() bool {
+	return !v.post.isNull()
+}
diff --git a/vendor/github.com/aquasecurity/go-version/LICENSE b/vendor/github.com/aquasecurity/go-version/LICENSE
new file mode 100644
index 00000000..261eeb9e
--- /dev/null
+++ b/vendor/github.com/aquasecurity/go-version/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/aquasecurity/go-version/pkg/part/any.go b/vendor/github.com/aquasecurity/go-version/pkg/part/any.go
new file mode 100644
index 00000000..c9536bc6
--- /dev/null
+++ b/vendor/github.com/aquasecurity/go-version/pkg/part/any.go
@@ -0,0 +1,33 @@
+package part
+
+import (
+	"golang.org/x/xerrors"
+)
+
+type Any bool
+
+func NewAny(s string) (Any, error) {
+	if s == "*" || s == "x" || s == "X" {
+		return true, nil
+	}
+	return true, xerrors.New("not wildcard")
+}
+
+func (s Any) Compare(other Part) int {
+	if s {
+		return 0
+	}
+	return -1
+}
+
+func (s Any) IsNull() bool {
+	return false
+}
+
+func (s Any) IsAny() bool {
+	return bool(s)
+}
+
+func (s Any) IsEmpty() bool {
+	return false
+}
diff --git a/vendor/github.com/aquasecurity/go-version/pkg/part/empty.go b/vendor/github.com/aquasecurity/go-version/pkg/part/empty.go
new file mode 100644
index 00000000..4af30162
--- /dev/null
+++ b/vendor/github.com/aquasecurity/go-version/pkg/part/empty.go
@@ -0,0 +1,28 @@
+package part
+
+type Empty struct {
+	any bool
+}
+
+func NewEmpty(any bool) Empty {
+	return Empty{any: any}
+}
+
+func (s Empty) Compare(other Part) int {
+	if s.IsAny() {
+		return 0
+	}
+	return Uint64(0).Compare(other)
+}
+
+func (s Empty) IsNull() bool {
+	return false
+}
+
+func (s Empty) IsAny() bool {
+	return s.any
+}
+
+func (s Empty) IsEmpty() bool {
+	return true
+}
diff --git a/vendor/github.com/aquasecurity/go-version/pkg/part/infinity.go b/vendor/github.com/aquasecurity/go-version/pkg/part/infinity.go
new file mode 100644
index 00000000..7bfe30c6
--- /dev/null
+++ b/vendor/github.com/aquasecurity/go-version/pkg/part/infinity.go
@@ -0,0 +1,51 @@
+package part
+
+var Infinity = InfinityType{}
+
+type InfinityType struct{}
+
+func (InfinityType) Compare(other Part) int {
+	switch other.(type) {
+	case InfinityType:
+		return 0
+	default:
+		return 1
+	}
+}
+
+func (InfinityType) IsNull() bool {
+	return false
+}
+
+func (InfinityType) IsAny() bool {
+	return false
+}
+
+func (InfinityType) IsEmpty() bool {
+	return false
+}
+
+var NegativeInfinity = NegativeInfinityType{}
+
+type NegativeInfinityType struct{}
+
+func (NegativeInfinityType) Compare(other Part) int {
+	switch other.(type) {
+	case NegativeInfinityType:
+		return 0
+	default:
+		return -1
+	}
+}
+
+func (NegativeInfinityType) IsNull() bool {
+	return false
+}
+
+func (NegativeInfinityType) IsAny() bool {
+	return false
+}
+
+func (NegativeInfinityType) IsEmpty() bool {
+	return false
+}
diff --git a/vendor/github.com/aquasecurity/go-version/pkg/part/int.go b/vendor/github.com/aquasecurity/go-version/pkg/part/int.go
new file mode 100644
index 00000000..04c2a1bc
--- /dev/null
+++ b/vendor/github.com/aquasecurity/go-version/pkg/part/int.go
@@ -0,0 +1,58 @@
+package part
+
+import (
+	"strconv"
+)
+
+const Zero = Uint64(0)
+
+type Uint64 uint64
+
+func NewUint64(s string) (Uint64, error) {
+	n, err := strconv.ParseUint(s, 10, 64)
+	if err != nil {
+		return 0, err
+	}
+	return Uint64(n), nil
+}
+
+func (s Uint64) Compare(other Part) int {
+	if other == nil {
+		return 1
+	} else if s == other {
+		return 0
+	}
+
+	switch o := other.(type) {
+	case Uint64:
+		if s < o {
+			return -1
+		}
+		return 1
+	case String:
+		return -1
+	case PreString:
+		return 1
+	case Any:
+		return 0
+	case Empty:
+		if o.IsAny() {
+			return 0
+		}
+		return s.Compare(Uint64(0))
+	default:
+		panic("unknown type")
+	}
+}
+
+func (s Uint64) IsNull() bool {
+	return s == 0
+}
+
+func (s Uint64) IsAny() bool {
+	return false
+}
+
+func (s Uint64) IsEmpty() bool {
+	return false
+}
diff --git a/vendor/github.com/aquasecurity/go-version/pkg/part/list.go b/vendor/github.com/aquasecurity/go-version/pkg/part/list.go
new file mode 100644
index 00000000..2796b5b9
--- /dev/null
+++ b/vendor/github.com/aquasecurity/go-version/pkg/part/list.go
@@ -0,0 +1,149 @@
+package part
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+)
+
+type Parts []Part
+
+func NewParts(s string) Parts {
+	var parts []Part
+	if s == "" {
+		return parts
+	}
+
+	for _, p := range strings.Split(s, ".") {
+		parts = append(parts, NewPart(p))
+	}
+	return parts
+}
+
+func (parts Parts) Normalize() Parts {
+	ret := make(Parts, len(parts))
+	copy(ret, parts)
+
+	for i := len(ret) - 1; i >= 0; i-- {
+		lastItem := ret[i]
+		if lastItem.IsNull() {
+			ret = ret[:i]
+			continue
+		}
+		break
+	}
+	return ret
+}
+
+func (parts Parts) Padding(size int, padding Part) Parts {
+	diff := size - len(parts)
+	if diff <= 0 {
+		return parts
+	}
+
+	padded := parts
+	for i := 0; i < diff; i++ {
+		padded = append(padded, padding)
+	}
+	return padded
+}
+
+func (parts Parts) Compare(other Part) int {
+	if other == nil {
+		return 1
+	} else if other.IsAny() {
+		return 0
+	}
+
+	var o Parts
+	switch t := other.(type) {
+	case InfinityType:
+		return -1
+	case NegativeInfinityType:
+		return 1
+	case Parts:
+		o = t
+	default:
+		return -1
+	}
+
+	if reflect.DeepEqual(parts, o) {
+		return 0
+	}
+
+	iter := parts.Zip(o)
+	for tuple := iter(); tuple != nil; tuple = iter() {
+		var l, r = tuple.Left, tuple.Right
+		if l == nil {
+			return -1
+		}
+		if r == nil {
+			return 1
+		}
+
+		if l.IsAny() || r.IsAny() {
+			return 0
+		}
+
+		if result := l.Compare(r); result != 0 {
+			return result
+		}
+	}
+	return 0
+}
+
+func (parts Parts) IsNull() bool {
+	return parts.IsAny() || len(parts) == 0
+}
+
+func (parts Parts) IsAny() bool {
+	for _, p := range parts {
+		if p.IsAny() {
+			return true
+		}
+	}
+	return false
+}
+
+func (parts Parts) IsEmpty() bool {
+	return false
+}
+
+func (parts Parts) String() string {
+	s := make([]string, len(parts))
+	for i, p := range parts {
+		s[i] = fmt.Sprint(p)
+	}
+	return strings.Join(s, ".")
+}
+
+type ZipTuple struct {
+	Left  Part
+	Right Part
+}
+
+func (parts Parts) Zip(other Parts) func() *ZipTuple {
+	i := 0
+	return func() *ZipTuple {
+		var part1, part2 Part
+		if i < len(parts) {
+			part1 = parts[i]
+		}
+		if i < len(other) {
+			part2 = other[i]
+		}
+		if part1 == nil && part2 == nil {
+			return nil
+		}
+		i++
+		return &ZipTuple{Left: part1, Right: part2}
+	}
+}
+
+func Uint64SliceToParts(uint64Parts []Uint64) Parts {
+	parts := make(Parts, len(uint64Parts))
+	for i, u := range uint64Parts {
+		parts[i] = u
+	}
+	return parts
+}
diff --git a/vendor/github.com/aquasecurity/go-version/pkg/part/part.go b/vendor/github.com/aquasecurity/go-version/pkg/part/part.go
new file mode 100644
index 00000000..8cc3e72b
--- /dev/null
+++ b/vendor/github.com/aquasecurity/go-version/pkg/part/part.go
@@ -0,0 +1,21 @@
+package part
+
+type Part interface {
+	Compare(Part) int
+	IsNull() bool
+	IsAny() bool
+	IsEmpty() bool
+}
+
+func NewPart(s string) Part {
+	var p Part
+	p, err := NewUint64(s)
+	if err == nil {
+		return p
+	}
+	p, err = NewAny(s)
+	if err == nil {
+		return p
+	}
+	return NewString(s)
+}
diff --git a/vendor/github.com/aquasecurity/go-version/pkg/part/string.go b/vendor/github.com/aquasecurity/go-version/pkg/part/string.go
new file mode 100644
index 00000000..0cb6b914
--- /dev/null
+++ b/vendor/github.com/aquasecurity/go-version/pkg/part/string.go
@@ -0,0 +1,94 @@
+package part
+
+import (
+	"strings"
+)
+
+type String string
+
+func NewString(s string) String {
+	return String(s)
+}
+
+func (s String) Compare(other Part) int {
+	if other == nil {
+		return 1
+	} else if s == other {
+		return 0
+	}
+
+	switch o := other.(type) {
+	case Uint64:
+		return 1
+	case String:
+		return strings.Compare(string(s), string(o))
+	case PreString:
+		return strings.Compare(string(s), string(o))
+	case Any:
+		return 0
+	case Empty:
+		if o.IsAny() {
+			return 0
+		}
+		return s.Compare(Uint64(0))
+	}
+	return 0
+}
+
+func (s String) IsNull() bool {
+	return s == ""
+}
+
+func (s String) IsAny() bool {
+	return false
+}
+
+func (s String) IsEmpty() bool {
+	return false
+}
+
+// PreString is less than the number
+// e.g. a < 1
+type PreString string
+
+func NewPreString(s string) PreString {
+	return PreString(s)
+}
+
+func (s PreString) Compare(other Part) int {
+	if other == nil {
+		return 1
+	} else if s == other {
+		return 0
+	}
+
+	switch o := other.(type) {
+	case Uint64:
+		return -1
+	case String:
+		return strings.Compare(string(s), string(o))
+	case PreString:
+		return strings.Compare(string(s), string(o))
+	case Any:
+		return 0
+	case Empty:
+		if o.IsAny() {
+			return 0
+		}
+
+		return s.Compare(Uint64(0))
+	}
+	return 0
+}
+
+func (s PreString) IsNull() bool {
+	return s == ""
+}
+
+func (s PreString) IsAny() bool {
+	return false
+}
+
+func (s PreString) IsEmpty() bool {
+	return false
+}
diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file.go b/vendor/github.com/docker/cli/cli/config/configfile/file.go
index 609a88c2..5db7f8b8 100644
--- a/vendor/github.com/docker/cli/cli/config/configfile/file.go
+++ b/vendor/github.com/docker/cli/cli/config/configfile/file.go
@@ -37,7 +37,6 @@ type ConfigFile struct {
 	PruneFilters         []string                     `json:"pruneFilters,omitempty"`
 	Proxies              map[string]ProxyConfig       `json:"proxies,omitempty"`
 	Experimental         string                       `json:"experimental,omitempty"`
-	StackOrchestrator    string                       `json:"stackOrchestrator,omitempty"` // Deprecated: swarm is now the default orchestrator, and this option is ignored.
 	CurrentContext       string                       `json:"currentContext,omitempty"`
 	CLIPluginsExtraDirs  []string                     `json:"cliPluginsExtraDirs,omitempty"`
 	Plugins              map[string]map[string]string `json:"plugins,omitempty"`
diff --git a/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go b/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go
index a0b035c9..202ddb84 100644
--- a/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go
+++ b/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go
@@ -32,7 +32,7 @@ import (
 )
 
 // New returns net.Conn
-func New(ctx context.Context, cmd string, args ...string) (net.Conn, error) {
+func New(_ context.Context, cmd string, args ...string) (net.Conn, error) {
 	var (
 		c   commandConn
 		err error
diff --git a/vendor/github.com/docker/cli/cli/connhelper/connhelper.go b/vendor/github.com/docker/cli/cli/connhelper/connhelper.go
index 9ac9d674..397149c3 100644
--- a/vendor/github.com/docker/cli/cli/connhelper/connhelper.go
+++ b/vendor/github.com/docker/cli/cli/connhelper/connhelper.go
@@ -47,7 +47,12 @@ func getConnectionHelper(daemonURL string, sshFlags []string) (*ConnectionHelper
 		}
 		return &ConnectionHelper{
 			Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) {
-				return commandconn.New(ctx, "ssh", append(sshFlags, sp.Args("docker", "system", "dial-stdio")...)...)
+				args := []string{"docker"}
+				if sp.Path != "" {
+					args = append(args, "--host", "unix://"+sp.Path)
+				}
+				args = append(args, "system", "dial-stdio")
+				return commandconn.New(ctx, "ssh", append(sshFlags, sp.Args(args...)...)...)
 			},
 			Host: "http://docker.example.com",
 		}, nil
diff --git a/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go b/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go
index bde01ae7..fb4c9111 100644
--- a/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go
+++ b/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go
@@ -30,9 +30,7 @@ func ParseURL(daemonURL string) (*Spec, error) {
 		return nil, errors.Errorf("no host specified")
 	}
 	sp.Port = u.Port()
-	if u.Path != "" {
-		return nil, errors.Errorf("extra path after the host: %q", u.Path)
-	}
+	sp.Path = u.Path
 	if u.RawQuery != "" {
 		return nil, errors.Errorf("extra query after the host: %q", u.RawQuery)
 	}
@@ -47,6 +45,7 @@ type Spec struct {
 	User string
 	Host string
 	Port string
+	Path string
 }
 
 // Args returns args except "ssh" itself combined with optional additional command args
diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml
index c2943888..a820f996 100644
--- a/vendor/github.com/docker/docker/api/swagger.yaml
+++ b/vendor/github.com/docker/docker/api/swagger.yaml
@@ -5162,42 +5162,8 @@ definitions:
       ServerVersion:
         description: |
           Version string of the daemon.
-
-          > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/)
-          > returns the Swarm version instead of the daemon  version, for example
-          > `swarm/1.2.8`.
-        type: "string"
-        example: "17.06.0-ce"
-      ClusterStore:
-        description: |
-          URL of the distributed storage backend.
-
-
-          The storage backend is used for multihost networking (to store
-          network and endpoint information) and by the node discovery mechanism.
-
-          


- - > **Deprecated**: This field is only propagated when using standalone Swarm - > mode, and overlay networking using an external k/v store. Overlay - > networks with Swarm mode enabled use the built-in raft store, and - > this field will be empty. - type: "string" - example: "consul://consul.corp.example.com:8600/some/path" - ClusterAdvertise: - description: | - The network endpoint that the Engine advertises for the purpose of - node discovery. ClusterAdvertise is a `host:port` combination on which - the daemon is reachable by other hosts. - -


- - > **Deprecated**: This field is only propagated when using standalone Swarm - > mode, and overlay networking using an external k/v store. Overlay - > networks with Swarm mode enabled use the built-in raft store, and - > this field will be empty. type: "string" - example: "node5.corp.example.com:8000" + example: "24.0.2" Runtimes: description: | List of [OCI compliant](https://github.com/opencontainers/runtime-spec) @@ -9930,7 +9896,9 @@ paths: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" 403: - description: "operation not supported for pre-defined networks" + description: | + Forbidden operation. This happens when trying to create a network named after a pre-defined network, + or when trying to create an overlay network on a daemon which is not part of a Swarm cluster. schema: $ref: "#/definitions/ErrorResponse" 404: @@ -10393,6 +10361,12 @@ paths: default if omitted. required: true type: "string" + - name: "force" + in: "query" + description: | + Force disable a plugin even if still in use. + required: false + type: "boolean" tags: ["Plugin"] /plugins/{name}/upgrade: post: diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go index 7689f38b..7d5930bb 100644 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -3,7 +3,7 @@ package types // import "github.com/docker/docker/api/types" import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // configs holds structs used for internal communication between the @@ -16,7 +16,7 @@ type ContainerCreateConfig struct { Config *container.Config HostConfig *container.HostConfig NetworkingConfig *network.NetworkingConfig - Platform *specs.Platform + Platform *ocispec.Platform AdjustCPUShares bool } diff --git a/vendor/github.com/docker/docker/api/types/image/opts.go b/vendor/github.com/docker/docker/api/types/image/opts.go index a24f9059..3cefecb0 100644 --- a/vendor/github.com/docker/docker/api/types/image/opts.go +++ b/vendor/github.com/docker/docker/api/types/image/opts.go @@ -1,9 +1,9 @@ package image -import specs "github.com/opencontainers/image-spec/specs-go/v1" +import ocispec "github.com/opencontainers/image-spec/specs-go/v1" // GetImageOpts holds parameters to inspect an image. type GetImageOpts struct { - Platform *specs.Platform + Platform *ocispec.Platform Details bool } diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go index 62a88f5b..b83f5d7b 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -4,7 +4,7 @@ import ( "encoding/json" "net" - v1 "github.com/opencontainers/image-spec/specs-go/v1" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // ServiceConfig stores daemon registry services configuration. @@ -113,8 +113,8 @@ type SearchResults struct { type DistributionInspect struct { // Descriptor contains information about the manifest, including // the content addressable digest - Descriptor v1.Descriptor + Descriptor ocispec.Descriptor // Platforms contains the list of platforms supported by the image, // obtained by parsing the manifest - Platforms []v1.Platform + Platforms []ocispec.Platform } diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index 1c081a51..54fa36cc 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -56,6 +56,36 @@ import ( "github.com/pkg/errors" ) +// DummyHost is a hostname used for local communication. +// +// It acts as a valid formatted hostname for local connections (such as "unix://" +// or "npipe://") which do not require a hostname. It should never be resolved, +// but uses the special-purpose ".localhost" TLD (as defined in [RFC 2606, Section 2] +// and [RFC 6761, Section 6.3]). +// +// [RFC 7230, Section 5.4] defines that an empty header must be used for such +// cases: +// +// If the authority component is missing or undefined for the target URI, +// then a client MUST send a Host header field with an empty field-value. +// +// However, [Go stdlib] enforces the semantics of HTTP(S) over TCP, does not +// allow an empty header to be used, and requires req.URL.Scheme to be either +// "http" or "https". +// +// For further details, refer to: +// +// - https://github.com/docker/engine-api/issues/189 +// - https://github.com/golang/go/issues/13624 +// - https://github.com/golang/go/issues/61076 +// - https://github.com/moby/moby/issues/45935 +// +// [RFC 2606, Section 2]: https://www.rfc-editor.org/rfc/rfc2606.html#section-2 +// [RFC 6761, Section 6.3]: https://www.rfc-editor.org/rfc/rfc6761#section-6.3 +// [RFC 7230, Section 5.4]: https://datatracker.ietf.org/doc/html/rfc7230#section-5.4 +// [Go stdlib]: https://github.com/golang/go/blob/6244b1946bc2101b01955468f1be502dbadd6807/src/net/http/transport.go#L558-L569 +const DummyHost = "api.moby.localhost" + // ErrRedirect is the error returned by checkRedirect when the request is non-GET. var ErrRedirect = errors.New("unexpected redirect in response") diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go index f82420b6..193a2bb5 100644 --- a/vendor/github.com/docker/docker/client/container_create.go +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -9,7 +9,7 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/versions" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) type configWrapper struct { @@ -20,7 +20,7 @@ type configWrapper struct { // ContainerCreate creates a new container based on the given configuration. // It can be associated with a name, but it's not mandatory. -func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.CreateResponse, error) { +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) { var response container.CreateResponse if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { @@ -75,7 +75,7 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config // Similar to containerd's platforms.Format(), but does allow components to be // omitted (e.g. pass "architecture" only, without "os": // https://github.com/containerd/containerd/blob/v1.5.2/platforms/platforms.go#L243-L263 -func formatPlatform(platform *specs.Platform) string { +func formatPlatform(platform *ocispec.Platform) string { if platform == nil { return "" } diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index 6bdacab1..7e84865f 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -23,14 +23,10 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu if err != nil { return types.HijackedResponse{}, err } - - apiPath := cli.getAPIPath(ctx, path, query) - req, err := http.NewRequest(http.MethodPost, apiPath, bodyEncoded) + req, err := cli.buildRequest(http.MethodPost, cli.getAPIPath(ctx, path, query), bodyEncoded, headers) if err != nil { return types.HijackedResponse{}, err } - req = cli.addHeaders(req, headers) - conn, mediaType, err := cli.setupHijackConn(ctx, req, "tcp") if err != nil { return types.HijackedResponse{}, err @@ -64,7 +60,6 @@ func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { } func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, string, error) { - req.Host = cli.addr req.Header.Set("Connection", "Upgrade") req.Header.Set("Upgrade", proto) @@ -80,8 +75,8 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto // state. Setting TCP KeepAlive on the socket connection will prohibit // ECONNTIMEOUT unless the socket connection truly is broken if tcpConn, ok := conn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) + _ = tcpConn.SetKeepAlive(true) + _ = tcpConn.SetKeepAlivePeriod(30 * time.Second) } clientconn := httputil.NewClientConn(conn, nil) @@ -96,7 +91,7 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto return nil, "", err } if resp.StatusCode != http.StatusSwitchingProtocols { - resp.Body.Close() + _ = resp.Body.Close() return nil, "", fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) } } diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index 64877d16..7993c5a4 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -15,7 +15,7 @@ import ( "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/volume" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // CommonAPIClient is the common methods between stable and experimental versions of APIClient. @@ -47,7 +47,7 @@ type CommonAPIClient interface { type ContainerAPIClient interface { ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) - ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.CreateResponse, error) + ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) ContainerDiff(ctx context.Context, container string) ([]container.FilesystemChange, error) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index c799095c..bcedcf3b 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -96,16 +96,14 @@ func (cli *Client) buildRequest(method, path string, body io.Reader, headers hea return nil, err } req = cli.addHeaders(req, headers) + req.URL.Scheme = cli.scheme + req.URL.Host = cli.addr if cli.proto == "unix" || cli.proto == "npipe" { - // For local communications, it doesn't matter what the host is. We just - // need a valid and meaningful host name. (See #189) - req.Host = "docker" + // Override host header for non-tcp connections. + req.Host = DummyHost } - req.URL.Host = cli.addr - req.URL.Scheme = cli.scheme - if expectedPayload && req.Header.Get("Content-Type") == "" { req.Header.Set("Content-Type", "text/plain") } diff --git a/vendor/github.com/edsrzf/mmap-go/.gitignore b/vendor/github.com/edsrzf/mmap-go/.gitignore new file mode 100644 index 00000000..6c694e4b --- /dev/null +++ b/vendor/github.com/edsrzf/mmap-go/.gitignore @@ -0,0 +1,11 @@ +*.out +*.5 +*.6 +*.8 +*.swp +_obj +_test +testdata +/.idea +*.iml +/notes.txt diff --git a/vendor/github.com/edsrzf/mmap-go/LICENSE b/vendor/github.com/edsrzf/mmap-go/LICENSE new file mode 100644 index 00000000..8f05f338 --- /dev/null +++ b/vendor/github.com/edsrzf/mmap-go/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2011, Evan Shaw +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the copyright holder nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/edsrzf/mmap-go/README.md b/vendor/github.com/edsrzf/mmap-go/README.md new file mode 100644 index 00000000..1ac39f7e --- /dev/null +++ b/vendor/github.com/edsrzf/mmap-go/README.md @@ -0,0 +1,14 @@ +mmap-go +======= +![Build Status](https://github.com/edsrzf/mmap-go/actions/workflows/build-test.yml/badge.svg) +[![Go Reference](https://pkg.go.dev/badge/github.com/edsrzf/mmap-go.svg)](https://pkg.go.dev/github.com/edsrzf/mmap-go) + +mmap-go is a portable mmap package for the [Go programming language](http://golang.org). + +Operating System Support +======================== +This package is tested using GitHub Actions on Linux, macOS, and Windows. It should also work on other Unix-like platforms, but hasn't been tested with them. I'm interested to hear about the results. + +I haven't been able to add more features without adding significant complexity, so mmap-go doesn't support `mprotect`, `mincore`, and maybe a few other things. If you're running on a Unix-like platform and need some of these features, I suggest Gustavo Niemeyer's [gommap](http://labix.org/gommap). + +This package compiles on Plan 9, but its functions always return errors. diff --git a/vendor/github.com/edsrzf/mmap-go/mmap.go b/vendor/github.com/edsrzf/mmap-go/mmap.go new file mode 100644 index 00000000..29655bd2 --- /dev/null +++ b/vendor/github.com/edsrzf/mmap-go/mmap.go @@ -0,0 +1,117 @@ +// Copyright 2011 Evan Shaw. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file defines the common package interface and contains a little bit of +// factored out logic. + +// Package mmap allows mapping files into memory. It tries to provide a simple, reasonably portable interface, +// but doesn't go out of its way to abstract away every little platform detail. +// This specifically means: +// * forked processes may or may not inherit mappings +// * a file's timestamp may or may not be updated by writes through mappings +// * specifying a size larger than the file's actual size can increase the file's size +// * If the mapped file is being modified by another process while your program's running, don't expect consistent results between platforms +package mmap + +import ( + "errors" + "os" + "reflect" + "unsafe" +) + +const ( + // RDONLY maps the memory read-only. + // Attempts to write to the MMap object will result in undefined behavior. + RDONLY = 0 + // RDWR maps the memory as read-write. Writes to the MMap object will update the + // underlying file. + RDWR = 1 << iota + // COPY maps the memory as copy-on-write. Writes to the MMap object will affect + // memory, but the underlying file will remain unchanged. + COPY + // If EXEC is set, the mapped memory is marked as executable. + EXEC +) + +const ( + // If the ANON flag is set, the mapped memory will not be backed by a file. + ANON = 1 << iota +) + +// MMap represents a file mapped into memory. +type MMap []byte + +// Map maps an entire file into memory. +// If ANON is set in flags, f is ignored. +func Map(f *os.File, prot, flags int) (MMap, error) { + return MapRegion(f, -1, prot, flags, 0) +} + +// MapRegion maps part of a file into memory. +// The offset parameter must be a multiple of the system's page size. +// If length < 0, the entire file will be mapped. +// If ANON is set in flags, f is ignored. +func MapRegion(f *os.File, length int, prot, flags int, offset int64) (MMap, error) { + if offset%int64(os.Getpagesize()) != 0 { + return nil, errors.New("offset parameter must be a multiple of the system's page size") + } + + var fd uintptr + if flags&ANON == 0 { + fd = uintptr(f.Fd()) + if length < 0 { + fi, err := f.Stat() + if err != nil { + return nil, err + } + length = int(fi.Size()) + } + } else { + if length <= 0 { + return nil, errors.New("anonymous mapping requires non-zero length") + } + fd = ^uintptr(0) + } + return mmap(length, uintptr(prot), uintptr(flags), fd, offset) +} + +func (m *MMap) header() *reflect.SliceHeader { + return (*reflect.SliceHeader)(unsafe.Pointer(m)) +} + +func (m *MMap) addrLen() (uintptr, uintptr) { + header := m.header() + return header.Data, uintptr(header.Len) +} + +// Lock keeps the mapped region in physical memory, ensuring that it will not be +// swapped out. +func (m MMap) Lock() error { + return m.lock() +} + +// Unlock reverses the effect of Lock, allowing the mapped region to potentially +// be swapped out. +// If m is already unlocked, aan error will result. +func (m MMap) Unlock() error { + return m.unlock() +} + +// Flush synchronizes the mapping's contents to the file's contents on disk. +func (m MMap) Flush() error { + return m.flush() +} + +// Unmap deletes the memory mapped region, flushes any remaining changes, and sets +// m to nil. +// Trying to read or write any remaining references to m after Unmap is called will +// result in undefined behavior. +// Unmap should only be called on the slice value that was originally returned from +// a call to Map. Calling Unmap on a derived slice may cause errors. +func (m *MMap) Unmap() error { + err := m.unmap() + *m = nil + return err +} diff --git a/vendor/github.com/edsrzf/mmap-go/mmap_plan9.go b/vendor/github.com/edsrzf/mmap-go/mmap_plan9.go new file mode 100644 index 00000000..e4c33d39 --- /dev/null +++ b/vendor/github.com/edsrzf/mmap-go/mmap_plan9.go @@ -0,0 +1,27 @@ +// Copyright 2020 Evan Shaw. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mmap + +import "syscall" + +func mmap(len int, inprot, inflags, fd uintptr, off int64) ([]byte, error) { + return nil, syscall.EPLAN9 +} + +func (m MMap) flush() error { + return syscall.EPLAN9 +} + +func (m MMap) lock() error { + return syscall.EPLAN9 +} + +func (m MMap) unlock() error { + return syscall.EPLAN9 +} + +func (m MMap) unmap() error { + return syscall.EPLAN9 +} diff --git a/vendor/github.com/edsrzf/mmap-go/mmap_unix.go b/vendor/github.com/edsrzf/mmap-go/mmap_unix.go new file mode 100644 index 00000000..25b13e51 --- /dev/null +++ b/vendor/github.com/edsrzf/mmap-go/mmap_unix.go @@ -0,0 +1,51 @@ +// Copyright 2011 Evan Shaw. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux openbsd solaris netbsd + +package mmap + +import ( + "golang.org/x/sys/unix" +) + +func mmap(len int, inprot, inflags, fd uintptr, off int64) ([]byte, error) { + flags := unix.MAP_SHARED + prot := unix.PROT_READ + switch { + case inprot© != 0: + prot |= unix.PROT_WRITE + flags = unix.MAP_PRIVATE + case inprot&RDWR != 0: + prot |= unix.PROT_WRITE + } + if inprot&EXEC != 0 { + prot |= unix.PROT_EXEC + } + if inflags&ANON != 0 { + flags |= unix.MAP_ANON + } + + b, err := unix.Mmap(int(fd), off, len, prot, flags) + if err != nil { + return nil, err + } + return b, nil +} + +func (m MMap) flush() error { + return unix.Msync([]byte(m), unix.MS_SYNC) +} + +func (m MMap) lock() error { + return unix.Mlock([]byte(m)) +} + +func (m MMap) unlock() error { + return unix.Munlock([]byte(m)) +} + +func (m MMap) unmap() error { + return unix.Munmap([]byte(m)) +} diff --git a/vendor/github.com/edsrzf/mmap-go/mmap_windows.go b/vendor/github.com/edsrzf/mmap-go/mmap_windows.go new file mode 100644 index 00000000..e0d986f7 --- /dev/null +++ b/vendor/github.com/edsrzf/mmap-go/mmap_windows.go @@ -0,0 +1,154 @@ +// Copyright 2011 Evan Shaw. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mmap + +import ( + "errors" + "os" + "sync" + + "golang.org/x/sys/windows" +) + +// mmap on Windows is a two-step process. +// First, we call CreateFileMapping to get a handle. +// Then, we call MapviewToFile to get an actual pointer into memory. +// Because we want to emulate a POSIX-style mmap, we don't want to expose +// the handle -- only the pointer. We also want to return only a byte slice, +// not a struct, so it's convenient to manipulate. + +// We keep this map so that we can get back the original handle from the memory address. + +type addrinfo struct { + file windows.Handle + mapview windows.Handle + writable bool +} + +var handleLock sync.Mutex +var handleMap = map[uintptr]*addrinfo{} + +func mmap(len int, prot, flags, hfile uintptr, off int64) ([]byte, error) { + flProtect := uint32(windows.PAGE_READONLY) + dwDesiredAccess := uint32(windows.FILE_MAP_READ) + writable := false + switch { + case prot© != 0: + flProtect = windows.PAGE_WRITECOPY + dwDesiredAccess = windows.FILE_MAP_COPY + writable = true + case prot&RDWR != 0: + flProtect = windows.PAGE_READWRITE + dwDesiredAccess = windows.FILE_MAP_WRITE + writable = true + } + if prot&EXEC != 0 { + flProtect <<= 4 + dwDesiredAccess |= windows.FILE_MAP_EXECUTE + } + + // The maximum size is the area of the file, starting from 0, + // that we wish to allow to be mappable. It is the sum of + // the length the user requested, plus the offset where that length + // is starting from. This does not map the data into memory. + maxSizeHigh := uint32((off + int64(len)) >> 32) + maxSizeLow := uint32((off + int64(len)) & 0xFFFFFFFF) + // TODO: Do we need to set some security attributes? It might help portability. + h, errno := windows.CreateFileMapping(windows.Handle(hfile), nil, flProtect, maxSizeHigh, maxSizeLow, nil) + if h == 0 { + return nil, os.NewSyscallError("CreateFileMapping", errno) + } + + // Actually map a view of the data into memory. The view's size + // is the length the user requested. + fileOffsetHigh := uint32(off >> 32) + fileOffsetLow := uint32(off & 0xFFFFFFFF) + addr, errno := windows.MapViewOfFile(h, dwDesiredAccess, fileOffsetHigh, fileOffsetLow, uintptr(len)) + if addr == 0 { + windows.CloseHandle(windows.Handle(h)) + return nil, os.NewSyscallError("MapViewOfFile", errno) + } + handleLock.Lock() + handleMap[addr] = &addrinfo{ + file: windows.Handle(hfile), + mapview: h, + writable: writable, + } + handleLock.Unlock() + + m := MMap{} + dh := m.header() + dh.Data = addr + dh.Len = len + dh.Cap = dh.Len + + return m, nil +} + +func (m MMap) flush() error { + addr, len := m.addrLen() + errno := windows.FlushViewOfFile(addr, len) + if errno != nil { + return os.NewSyscallError("FlushViewOfFile", errno) + } + + handleLock.Lock() + defer handleLock.Unlock() + handle, ok := handleMap[addr] + if !ok { + // should be impossible; we would've errored above + return errors.New("unknown base address") + } + + if handle.writable && handle.file != windows.Handle(^uintptr(0)) { + if err := windows.FlushFileBuffers(handle.file); err != nil { + return os.NewSyscallError("FlushFileBuffers", err) + } + } + + return nil +} + +func (m MMap) lock() error { + addr, len := m.addrLen() + errno := windows.VirtualLock(addr, len) + return os.NewSyscallError("VirtualLock", errno) +} + +func (m MMap) unlock() error { + addr, len := m.addrLen() + errno := windows.VirtualUnlock(addr, len) + return os.NewSyscallError("VirtualUnlock", errno) +} + +func (m MMap) unmap() error { + err := m.flush() + if err != nil { + return err + } + + addr := m.header().Data + // Lock the UnmapViewOfFile along with the handleMap deletion. + // As soon as we unmap the view, the OS is free to give the + // same addr to another new map. We don't want another goroutine + // to insert and remove the same addr into handleMap while + // we're trying to remove our old addr/handle pair. + handleLock.Lock() + defer handleLock.Unlock() + err = windows.UnmapViewOfFile(addr) + if err != nil { + return err + } + + handle, ok := handleMap[addr] + if !ok { + // should be impossible; we would've errored above + return errors.New("unknown base address") + } + delete(handleMap, addr) + + e := windows.CloseHandle(windows.Handle(handle.mapview)) + return os.NewSyscallError("CloseHandle", e) +} diff --git a/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md b/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md index 2a72b501..afd4f03b 100644 --- a/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md +++ b/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md @@ -1,111 +1,233 @@ -Supported Capabilities -====================== - -Here is a non-comprehensive table of git commands and features whose equivalent -is supported by go-git. - -| Feature | Status | Notes | -|---------------------------------------|--------|-------| -| **config** | -| config | ✔ | Reading and modifying per-repository configuration (`.git/config`) is supported. Global configuration (`$HOME/.gitconfig`) is not. | -| **getting and creating repositories** | -| init | ✔ | Plain init and `--bare` are supported. Flags `--template`, `--separate-git-dir` and `--shared` are not. | -| clone | ✔ | Plain clone and equivalents to `--progress`, `--single-branch`, `--depth`, `--origin`, `--recurse-submodules` are supported. Others are not. | -| **basic snapshotting** | -| add | ✔ | Plain add is supported. Any other flags aren't supported | -| status | ✔ | -| commit | ✔ | -| reset | ✔ | -| rm | ✔ | -| mv | ✔ | -| **branching and merging** | -| branch | ✔ | -| checkout | ✔ | Basic usages of checkout are supported. | -| merge | ✖ | -| mergetool | ✖ | -| stash | ✖ | -| tag | ✔ | -| **sharing and updating projects** | -| fetch | ✔ | -| pull | ✔ | Only supports merges where the merge can be resolved as a fast-forward. | -| push | ✔ | -| remote | ✔ | -| submodule | ✔ | -| **inspection and comparison** | -| show | ✔ | -| log | ✔ | -| shortlog | (see log) | -| describe | | -| **patching** | -| apply | ✖ | -| cherry-pick | ✖ | -| diff | ✔ | Patch object with UnifiedDiff output representation | -| rebase | ✖ | -| revert | ✖ | -| **debugging** | -| bisect | ✖ | -| blame | ✔ | -| grep | ✔ | -| **email** || -| am | ✖ | -| apply | ✖ | -| format-patch | ✖ | -| send-email | ✖ | -| request-pull | ✖ | -| **external systems** | -| svn | ✖ | -| fast-import | ✖ | -| **administration** | -| clean | ✔ | -| gc | ✖ | -| fsck | ✖ | -| reflog | ✖ | -| filter-branch | ✖ | -| instaweb | ✖ | -| archive | ✖ | -| bundle | ✖ | -| prune | ✖ | -| repack | ✖ | -| **server admin** | -| daemon | | -| update-server-info | | -| **advanced** | -| notes | ✖ | -| replace | ✖ | -| worktree | ✖ | -| annotate | (see blame) | -| **gpg** | -| git-verify-commit | ✔ | -| git-verify-tag | ✔ | -| **plumbing commands** | -| cat-file | ✔ | -| check-ignore | | -| commit-tree | | -| count-objects | | -| diff-index | | -| for-each-ref | ✔ | -| hash-object | ✔ | -| ls-files | ✔ | -| merge-base | ✔ | Calculates the merge-base only between two commits, and supports `--independent` and `--is-ancestor` modifiers; Does not support `--fork-point` nor `--octopus` modifiers. | -| read-tree | | -| rev-list | ✔ | -| rev-parse | | -| show-ref | ✔ | -| symbolic-ref | ✔ | -| update-index | | -| update-ref | | -| verify-pack | | -| write-tree | | -| **protocols** | -| http(s):// (dumb) | ✖ | -| http(s):// (smart) | ✔ | -| git:// | ✔ | -| ssh:// | ✔ | -| file:// | partial | Warning: this is not pure Golang. This shells out to the `git` binary. | -| custom | ✔ | -| **other features** | -| gitignore | ✔ | -| gitattributes | ✖ | -| index version | | -| packfile version | | -| push-certs | ✖ | +# Supported Features + +Here is a non-comprehensive table of git commands and features and their +compatibility status with go-git. + +## Getting and creating repositories + +| Feature | Sub-feature | Status | Notes | Examples | +|---|---|---|---|---| +| `init` | | ✅ | | | +| `init` | `--bare` | ✅ | | | +| `init` | `--template`
`--separate-git-dir`
`--shared` | ❌ | | | +| `clone` | | ✅ | | - [PlainClone](_examples/clone/main.go) | +| `clone` | Authentication:
- none
- access token
- username + password
- ssh | ✅ | | - [clone ssh](_examples/clone/auth/ssh/main.go)
- [clone access token](_examples/clone/auth/basic/access_token/main.go)
- [clone user + password](_examples/clone/auth/basic/username_password/main.go) | +| `clone` | `--progress`
`--single-branch`
`--depth`
`--origin`
`--recurse-submodules` | ✅ | | - [recurse submodules](_examples/clone/main.go)
- [progress](_examples/progress/main.go) | + +## Basic snapshotting + +| Feature | Sub-feature | Status | Notes | Examples | +|---|---|---|---|---| +| `add` | | ✅ | Plain add is supported. Any other flags aren't supported | | +| `status` | | ✅ | | | +| `commit` | | ✅ | | - [commit](_examples/commit/main.go) | +| `reset` | | ✅ | | | +| `rm` | | ✅ | | | +| `mv` | | ✅ | | | + +## Branching and merging + +| Feature | Sub-feature | Status | Notes | Examples | +|---|---|---|---|---| +| `branch` | | ✅ | | - [branch](_examples/branch/main.go) | +| `checkout` | | ✅ | Basic usages of checkout are supported. | - [checkout](_examples/checkout/main.go) | +| `merge` | | ❌ | | | +| `mergetool` | | ❌ | | | +| `stash` | | ❌ | | | +| `tag` | | ✅ | | - [tag](_examples/tag/main.go)
- [tag create and push](_examples/tag-create-push/main.go) | + +## Sharing and updating projects + +| Feature | Sub-feature | Status | Notes | Examples | +|---|---|---|---|---| +| `fetch` | | ✅ | | | +| `pull` | | ✅ | Only supports merges where the merge can be resolved as a fast-forward. | - [pull](_examples/pull/main.go) | +| `push` | | ✅ | | - [push](_examples/push/main.go) | +| `remote` | | ✅ | | - [remotes](_examples/remotes/main.go) | +| `submodule` | | ✅ | | - [submodule](_examples/submodule/main.go) | +| `submodule` | deinit | ❌ | | | + +## Inspection and comparison + +| Feature | Sub-feature | Status | Notes | Examples | +|---|---|---|---|---| +| `show` | | ✅ | | | +| `log` | | ✅ | | - [log](_examples/log/main.go) | +| `shortlog` | | (see log) | | | +| `describe` | | ❌ | | | + +## Patching + +| Feature | Sub-feature | Status | Notes | Examples | +|---|---|---|---|---| +| `apply` | | ❌ | | | +| `cherry-pick` | | ❌ | | | +| `diff` | | ✅ | Patch object with UnifiedDiff output representation. | | +| `rebase` | | ❌ | | | +| `revert` | | ❌ | | | + +## Debugging + +| Feature | Sub-feature | Status | Notes | Examples | +|---|---|---|---|---| +| `bisect` | | ❌ | | | +| `blame` | | ✅ | | - [blame](_examples/blame/main.go) | +| `grep` | | ✅ | | | + +## Email + +| Feature | Sub-feature | Status | Notes | Examples | +|---|---|---|---|---| +| `am` | | ❌ | | | +| `apply` | | ❌ | | | +| `format-patch` | | ❌ | | | +| `send-email` | | ❌ | | | +| `request-pull` | | ❌ | | | + +## External systems + +| Feature | Sub-feature | Status | Notes | Examples | +|---|---|---|---|---| +| `svn` | | ❌ | | | +| `fast-import` | | ❌ | | | +| `lfs` | | ❌ | | | + +## Administration + +| Feature | Sub-feature | Status | Notes | Examples | +|---|---|---|---|---| +| `clean` | | ✅ | | | +| `gc` | | ❌ | | | +| `fsck` | | ❌ | | | +| `reflog` | | ❌ | | | +| `filter-branch` | | ❌ | | | +| `instaweb` | | ❌ | | | +| `archive` | | ❌ | | | +| `bundle` | | ❌ | | | +| `prune` | | ❌ | | | +| `repack` | | ❌ | | | + +## Server admin + +| Feature | Sub-feature | Status | Notes | Examples | +|---|---|---|---|---| +| `daemon` | | ❌ | | | +| `update-server-info` | | ❌ | | | + +## Advanced + +| Feature | Sub-feature | Status | Notes | Examples | +|---|---|---|---|---| +| `notes` | | ❌ | | | +| `replace` | | ❌ | | | +| `worktree` | | ❌ | | | +| `annotate` | | (see blame) | | | + +## GPG + +| Feature | Sub-feature | Status | Notes | Examples | +|---|---|---|---|---| +| `git-verify-commit` | | ✅ | | | +| `git-verify-tag` | | ✅ | | | + +## Plumbing commands + +| Feature | Sub-feature | Status | Notes | Examples | +|---|---|---|---|---| +| `cat-file` | | ✅ | | | +| `check-ignore` | | ❌ | | | +| `commit-tree` | | ❌ | | | +| `count-objects` | | ❌ | | | +| `diff-index` | | ❌ | | | +| `for-each-ref` | | ✅ | | | +| `hash-object` | | ✅ | | | +| `ls-files` | | ✅ | | | +| `ls-remote` | | ✅ | | - [ls-remote](_examples/ls-remote/main.go) | +| `merge-base` | `--independent`
`--is-ancestor` | ⚠️ (partial) | Calculates the merge-base only between two commits. | - [merge-base](_examples/merge_base/main.go) | +| `merge-base` | `--fork-point`
`--octopus` | ❌ | | | +| `read-tree` | | ❌ | | | +| `rev-list` | | ✅ | | | +| `rev-parse` | | ❌ | | | +| `show-ref` | | ✅ | | | +| `symbolic-ref` | | ✅ | | | +| `update-index` | | ❌ | | | +| `update-ref` | | ❌ | | | +| `verify-pack` | | ❌ | | | +| `write-tree` | | ❌ | | | + +## Indexes and Git Protocols + +| Feature | Version | Status | Notes | +|---|---|---|---| +| index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | | +| index | [v2](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ✅ | | +| index | [v3](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | | +| pack-protocol | [v1](https://github.com/git/git/blob/master/Documentation/gitprotocol-pack.txt) | ✅ | | +| pack-protocol | [v2](https://github.com/git/git/blob/master/Documentation/gitprotocol-v2.txt) | ❌ | | +| multi-pack-index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | | +| pack-*.rev files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | | +| pack-*.mtimes files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | | +| cruft packs | | ❌ | | + +## Capabilities + +| Feature | Status | Notes | +|---|---|---| +| `multi_ack` | ❌ | | +| `multi_ack_detailed` | ❌ | | +| `no-done` | ❌ | | +| `thin-pack` | ❌ | | +| `side-band` | ⚠️ (partial) | | +| `side-band-64k` | ⚠️ (partial) | | +| `ofs-delta` | ✅ | | +| `agent` | ✅ | | +| `object-format` | ❌ | | +| `symref` | ✅ | | +| `shallow` | ✅ | | +| `deepen-since` | ✅ | | +| `deepen-not` | ❌ | | +| `deepen-relative` | ❌ | | +| `no-progress` | ✅ | | +| `include-tag` | ✅ | | +| `report-status` | ✅ | | +| `report-status-v2` | ❌ | | +| `delete-refs` | ✅ | | +| `quiet` | ❌ | | +| `atomic` | ✅ | | +| `push-options` | ✅ | | +| `allow-tip-sha1-in-want` | ✅ | | +| `allow-reachable-sha1-in-want` | ❌ | | +| `push-cert=` | ❌ | | +| `filter` | ❌ | | +| `session-id=` | ❌ | | + +## Transport Schemes + +| Scheme | Status | Notes | Examples | +|---|---|---|---| +| `http(s)://` (dumb) | ❌ | | | +| `http(s)://` (smart) | ✅ | | | +| `git://` | ✅ | | | +| `ssh://` | ✅ | | | +| `file://` | ⚠️ (partial) | Warning: this is not pure Golang. This shells out to the `git` binary. | | +| Custom | ✅ | All existing schemes can be replaced by custom implementations. | - [custom_http](_examples/custom_http/main.go) | + +## SHA256 + +| Feature | Sub-feature | Status | Notes | Examples | +|---|---|---|---|---| +| `init` | | ✅ | Requires building with tag sha256. | - [init](_examples/sha256/main.go) | +| `commit` | | ✅ | Requires building with tag sha256. | - [commit](_examples/sha256/main.go) | +| `pull` | | ❌ | | | +| `fetch` | | ❌ | | | +| `push` | | ❌ | | | + +## Other features + +| Feature | Sub-feature | Status | Notes | Examples | +|---|---|---|---|---| +| `config` | `--local` | ✅ | Read and write per-repository (`.git/config`). | | +| `config` | `--global`
`--system` | ✅ | Read-only. | | +| `gitignore` | | ✅ | | | +| `gitattributes` | | ✅ | | | +| `git-worktree` | | ❌ | Multiple worktrees are not supported. | | diff --git a/vendor/github.com/go-git/go-git/v5/EXTENDING.md b/vendor/github.com/go-git/go-git/v5/EXTENDING.md new file mode 100644 index 00000000..a2778e34 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/EXTENDING.md @@ -0,0 +1,78 @@ +# Extending go-git + +`go-git` was built in a highly extensible manner, which enables some of its functionalities to be changed or extended without the need of changing its codebase. Here are the key extensibility features: + +## Dot Git Storers + +Dot git storers are the components responsible for storing the Git internal files, including objects and references. + +The built-in storer implementations include [memory](storage/memory) and [filesystem](storage/filesystem). The `memory` storer stores all the data in memory, and its use look like this: + +```go + r, err := git.Init(memory.NewStorage(), nil) +``` + +The `filesystem` storer stores the data in the OS filesystem, and can be used as follows: + +```go + r, err := git.Init(filesystem.NewStorage(osfs.New("/tmp/foo")), nil) +``` + +New implementations can be created by implementing the [storage.Storer interface](storage/storer.go#L16). + +## Filesystem + +Git repository worktrees are managed using a filesystem abstraction based on [go-billy](https://github.com/go-git/go-billy). The Git operations will take place against the specific filesystem implementation. Initialising a repository in Memory can be done as follows: + +```go + fs := memfs.New() + r, err := git.Init(memory.NewStorage(), fs) +``` + +The same operation can be done against the OS filesystem: + +```go + fs := osfs.New("/tmp/foo") + r, err := git.Init(memory.NewStorage(), fs) +``` + +New filesystems (e.g. cloud based storage) could be created by implementing `go-billy`'s [Filesystem interface](https://github.com/go-git/go-billy/blob/326c59f064021b821a55371d57794fbfb86d4cb3/fs.go#L52). + +## Transport Schemes + +Git supports various transport schemes, including `http`, `https`, `ssh`, `git`, `file`. `go-git` defines the [transport.Transport interface](plumbing/transport/common.go#L48) to represent them. + +The built-in implementations can be replaced by calling `client.InstallProtocol`. + +An example of changing the built-in `https` implementation to skip TLS could look like this: + +```go + customClient := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }, + } + + client.InstallProtocol("https", githttp.NewClient(customClient)) +``` + +Some internal implementations enables code reuse amongst the different transport implementations. Some of these may be made public in the future (e.g. `plumbing/transport/internal/common`). + +## Cache + +Several different operations across `go-git` lean on caching of objects in order to achieve optimal performance. The caching functionality is defined by the [cache.Object interface](plumbing/cache/common.go#L17). + +Two built-in implementations are `cache.ObjectLRU` and `cache.BufferLRU`. However, the caching functionality can be customized by implementing the interface `cache.Object` interface. + +## Hash + +`go-git` uses the `crypto.Hash` interface to represent hash functions. The built-in implementations are `github.com/pjbgf/sha1cd` for SHA1 and Go's `crypto/SHA256`. + +The default hash functions can be changed by calling `hash.RegisterHash`. +```go + func init() { + hash.RegisterHash(crypto.SHA1, sha1.New) + } +``` + +New `SHA1` or `SHA256` hash functions that implement the `hash.RegisterHash` interface can be registered by calling `RegisterHash`. diff --git a/vendor/github.com/go-git/go-git/v5/SECURITY.md b/vendor/github.com/go-git/go-git/v5/SECURITY.md new file mode 100644 index 00000000..0d2f8d03 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/SECURITY.md @@ -0,0 +1,38 @@ +# go-git Security Policy + +The purpose of this security policy is to outline `go-git`'s process +for reporting, handling and disclosing security sensitive information. + +## Supported Versions + +The project follows a version support policy where only the latest minor +release is actively supported. Therefore, only issues that impact the latest +minor release will be fixed. Users are encouraged to upgrade to the latest +minor/patch release to benefit from the most up-to-date features, bug fixes, +and security enhancements.​ + +The supported versions policy applies to both the `go-git` library and its +associated repositories within the `go-git` org. + +## Reporting Security Issues + +Please report any security vulnerabilities or potential weaknesses in `go-git` +privately via go-git-security@googlegroups.com. Do not publicly disclose the +details of the vulnerability until a fix has been implemented and released. + +During the process the project maintainers will investigate the report, so please +provide detailed information, including steps to reproduce, affected versions, and any mitigations if known. + +The project maintainers will acknowledge the receipt of the report and work with +the reporter to validate and address the issue. + +Please note that `go-git` does not have any bounty programs, and therefore do +not provide financial compensation for disclosures. + +## Security Disclosure Process + +The project maintainers will make every effort to promptly address security issues. + +Once a security vulnerability is fixed, a security advisory will be published to notify users and provide appropriate mitigation measures. + +All `go-git` advisories can be found at https://github.com/go-git/go-git/security/advisories. diff --git a/vendor/github.com/go-git/go-git/v5/blame.go b/vendor/github.com/go-git/go-git/v5/blame.go index 43634b32..2a877dcd 100644 --- a/vendor/github.com/go-git/go-git/v5/blame.go +++ b/vendor/github.com/go-git/go-git/v5/blame.go @@ -2,16 +2,18 @@ package git import ( "bytes" + "container/heap" "errors" "fmt" + "io" "strconv" - "strings" "time" "unicode/utf8" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/object" "github.com/go-git/go-git/v5/utils/diff" + "github.com/sergi/go-diff/diffmatchpatch" ) // BlameResult represents the result of a Blame operation. @@ -29,67 +31,86 @@ type BlameResult struct { func Blame(c *object.Commit, path string) (*BlameResult, error) { // The file to blame is identified by the input arguments: // commit and path. commit is a Commit object obtained from a Repository. Path - // represents a path to a specific file contained into the repository. + // represents a path to a specific file contained in the repository. // - // Blaming a file is a two step process: + // Blaming a file is done by walking the tree in reverse order trying to find where each line was last modified. // - // 1. Create a linear history of the commits affecting a file. We use - // revlist.New for that. + // When a diff is found it cannot immediately assume it came from that commit, as it may have come from 1 of its + // parents, so it will first try to resolve those diffs from its parents, if it couldn't find the change in its + // parents then it will assign the change to itself. // - // 2. Then build a graph with a node for every line in every file in - // the history of the file. + // When encountering 2 parents that have made the same change to a file it will choose the parent that was merged + // into the current branch first (this is determined by the order of the parents inside the commit). // - // Each node is assigned a commit: Start by the nodes in the first - // commit. Assign that commit as the creator of all its lines. - // - // Then jump to the nodes in the next commit, and calculate the diff - // between the two files. Newly created lines get - // assigned the new commit as its origin. Modified lines also get - // this new commit. Untouched lines retain the old commit. - // - // All this work is done in the assignOrigin function which holds all - // the internal relevant data in a "blame" struct, that is not - // exported. - // - // TODO: ways to improve the efficiency of this function: - // 1. Improve revlist - // 2. Improve how to traverse the history (example a backward traversal will - // be much more efficient) - // - // TODO: ways to improve the function in general: - // 1. Add memoization between revlist and assign. - // 2. It is using much more memory than needed, see the TODOs below. + // This currently works on a line by line basis, if performance becomes an issue it could be changed to work with + // hunks rather than lines. Then when encountering diff hunks it would need to split them where necessary. b := new(blame) b.fRev = c b.path = path + b.q = new(priorityQueue) - // get all the file revisions - if err := b.fillRevs(); err != nil { + file, err := b.fRev.File(path) + if err != nil { return nil, err } - - // calculate the line tracking graph and fill in - // file contents in data. - if err := b.fillGraphAndData(); err != nil { + finalLines, err := file.Lines() + if err != nil { return nil, err } + finalLength := len(finalLines) - file, err := b.fRev.File(b.path) + needsMap := make([]lineMap, finalLength) + for i := range needsMap { + needsMap[i] = lineMap{i, i, nil, -1} + } + contents, err := file.Contents() if err != nil { return nil, err } - finalLines, err := file.Lines() + b.q.Push(&queueItem{ + nil, + nil, + c, + path, + contents, + needsMap, + 0, + false, + 0, + }) + items := make([]*queueItem, 0) + for { + items = items[:0] + for { + if b.q.Len() == 0 { + return nil, errors.New("invalid state: no items left on the blame queue") + } + item := b.q.Pop() + items = append(items, item) + next := b.q.Peek() + if next == nil || next.Hash != item.Commit.Hash { + break + } + } + finished, err := b.addBlames(items) + if err != nil { + return nil, err + } + if finished == true { + break + } + } if err != nil { return nil, err } - // Each node (line) holds the commit where it was introduced or - // last modified. To achieve that we use the FORWARD algorithm - // described in Zimmermann, et al. "Mining Version Archives for - // Co-changed Lines", in proceedings of the Mining Software - // Repositories workshop, Shanghai, May 22-23, 2006. - lines, err := newLines(finalLines, b.sliceGraph(len(b.graph)-1)) + b.lineToCommit = make([]*object.Commit, finalLength) + for i := range needsMap { + b.lineToCommit[i] = needsMap[i].Commit + } + + lines, err := newLines(finalLines, b.lineToCommit) if err != nil { return nil, err } @@ -105,6 +126,8 @@ func Blame(c *object.Commit, path string) (*BlameResult, error) { type Line struct { // Author is the email address of the last author that modified the line. Author string + // AuthorName is the name of the last author that modified the line. + AuthorName string // Text is the original text of the line. Text string // Date is when the original text of the line was introduced @@ -113,31 +136,21 @@ type Line struct { Hash plumbing.Hash } -func newLine(author, text string, date time.Time, hash plumbing.Hash) *Line { +func newLine(author, authorName, text string, date time.Time, hash plumbing.Hash) *Line { return &Line{ - Author: author, - Text: text, - Hash: hash, - Date: date, + Author: author, + AuthorName: authorName, + Text: text, + Hash: hash, + Date: date, } } func newLines(contents []string, commits []*object.Commit) ([]*Line, error) { - lcontents := len(contents) - lcommits := len(commits) - - if lcontents != lcommits { - if lcontents == lcommits-1 && contents[lcontents-1] != "\n" { - contents = append(contents, "\n") - } else { - return nil, errors.New("contents and commits have different length") - } - } - - result := make([]*Line, 0, lcontents) + result := make([]*Line, 0, len(contents)) for i := range contents { result = append(result, newLine( - commits[i].Author.Email, contents[i], + commits[i].Author.Email, commits[i].Author.Name, contents[i], commits[i].Author.When, commits[i].Hash, )) } @@ -152,151 +165,426 @@ type blame struct { path string // the commit of the final revision of the file to blame fRev *object.Commit - // the chain of revisions affecting the the file to blame - revs []*object.Commit - // the contents of the file across all its revisions - data []string - // the graph of the lines in the file across all the revisions - graph [][]*object.Commit + // resolved lines + lineToCommit []*object.Commit + // queue of commits that need resolving + q *priorityQueue } -// calculate the history of a file "path", starting from commit "from", sorted by commit date. -func (b *blame) fillRevs() error { - var err error - - b.revs, err = references(b.fRev, b.path) - return err +type lineMap struct { + Orig, Cur int + Commit *object.Commit + FromParentNo int } -// build graph of a file from its revision history -func (b *blame) fillGraphAndData() error { - //TODO: not all commits are needed, only the current rev and the prev - b.graph = make([][]*object.Commit, len(b.revs)) - b.data = make([]string, len(b.revs)) // file contents in all the revisions - // for every revision of the file, starting with the first - // one... - for i, rev := range b.revs { +func (b *blame) addBlames(curItems []*queueItem) (bool, error) { + curItem := curItems[0] + + // Simple optimisation to merge paths, there is potential to go a bit further here and check for any duplicates + // not only if they are all the same. + if len(curItems) == 1 { + curItems = nil + } else if curItem.IdenticalToChild { + allSame := true + lenCurItems := len(curItems) + lowestParentNo := curItem.ParentNo + for i := 1; i < lenCurItems; i++ { + if !curItems[i].IdenticalToChild || curItem.Child != curItems[i].Child { + allSame = false + break + } + lowestParentNo = min(lowestParentNo, curItems[i].ParentNo) + } + if allSame { + curItem.Child.numParentsNeedResolving = curItem.Child.numParentsNeedResolving - lenCurItems + 1 + curItems = nil // free the memory + curItem.ParentNo = lowestParentNo + + // Now check if we can remove the parent completely + for curItem.Child.IdenticalToChild && curItem.Child.MergedChildren == nil && curItem.Child.numParentsNeedResolving == 1 { + oldChild := curItem.Child + curItem.Child = oldChild.Child + curItem.ParentNo = oldChild.ParentNo + } + } + } + + // if we have more than 1 item for this commit, create a single needsMap + if len(curItems) > 1 { + curItem.MergedChildren = make([]childToNeedsMap, len(curItems)) + for i, c := range curItems { + curItem.MergedChildren[i] = childToNeedsMap{c.Child, c.NeedsMap, c.IdenticalToChild, c.ParentNo} + } + newNeedsMap := make([]lineMap, 0, len(curItem.NeedsMap)) + newNeedsMap = append(newNeedsMap, curItems[0].NeedsMap...) + + for i := 1; i < len(curItems); i++ { + cur := curItems[i].NeedsMap + n := 0 // position in newNeedsMap + c := 0 // position in current list + for c < len(cur) { + if n == len(newNeedsMap) { + newNeedsMap = append(newNeedsMap, cur[c:]...) + break + } else if newNeedsMap[n].Cur == cur[c].Cur { + n++ + c++ + } else if newNeedsMap[n].Cur < cur[c].Cur { + n++ + } else { + newNeedsMap = append(newNeedsMap, cur[c]) + newPos := len(newNeedsMap) - 1 + for newPos > n { + newNeedsMap[newPos-1], newNeedsMap[newPos] = newNeedsMap[newPos], newNeedsMap[newPos-1] + newPos-- + } + } + } + } + curItem.NeedsMap = newNeedsMap + curItem.IdenticalToChild = false + curItem.Child = nil + curItems = nil // free the memory + } + + parents, err := parentsContainingPath(curItem.path, curItem.Commit) + if err != nil { + return false, err + } + + anyPushed := false + for parnetNo, prev := range parents { + currentHash, err := blobHash(curItem.path, curItem.Commit) + if err != nil { + return false, err + } + prevHash, err := blobHash(prev.Path, prev.Commit) + if err != nil { + return false, err + } + if currentHash == prevHash { + if len(parents) == 1 && curItem.MergedChildren == nil && curItem.IdenticalToChild { + // commit that has 1 parent and 1 child and is the same as both, bypass it completely + b.q.Push(&queueItem{ + Child: curItem.Child, + Commit: prev.Commit, + path: prev.Path, + Contents: curItem.Contents, + NeedsMap: curItem.NeedsMap, // reuse the NeedsMap as we are throwing away this item + IdenticalToChild: true, + ParentNo: curItem.ParentNo, + }) + } else { + b.q.Push(&queueItem{ + Child: curItem, + Commit: prev.Commit, + path: prev.Path, + Contents: curItem.Contents, + NeedsMap: append([]lineMap(nil), curItem.NeedsMap...), // create new slice and copy + IdenticalToChild: true, + ParentNo: parnetNo, + }) + curItem.numParentsNeedResolving++ + } + anyPushed = true + continue + } + // get the contents of the file - file, err := rev.File(b.path) + file, err := prev.Commit.File(prev.Path) if err != nil { - return nil + return false, err } - b.data[i], err = file.Contents() + prevContents, err := file.Contents() if err != nil { - return err + return false, err } - nLines := countLines(b.data[i]) - // create a node for each line - b.graph[i] = make([]*object.Commit, nLines) - // assign a commit to each node - // if this is the first revision, then the node is assigned to - // this first commit. - if i == 0 { - for j := 0; j < nLines; j++ { - b.graph[i][j] = b.revs[i] + + hunks := diff.Do(prevContents, curItem.Contents) + prevl := -1 + curl := -1 + need := 0 + getFromParent := make([]lineMap, 0) + out: + for h := range hunks { + hLines := countLines(hunks[h].Text) + for hl := 0; hl < hLines; hl++ { + switch { + case hunks[h].Type == diffmatchpatch.DiffEqual: + prevl++ + curl++ + if curl == curItem.NeedsMap[need].Cur { + // add to needs + getFromParent = append(getFromParent, lineMap{curl, prevl, nil, -1}) + // move to next need + need++ + if need >= len(curItem.NeedsMap) { + break out + } + } + case hunks[h].Type == diffmatchpatch.DiffInsert: + curl++ + if curl == curItem.NeedsMap[need].Cur { + // the line we want is added, it may have been added here (or by another parent), skip it for now + need++ + if need >= len(curItem.NeedsMap) { + break out + } + } + case hunks[h].Type == diffmatchpatch.DiffDelete: + prevl += hLines + continue out + default: + return false, errors.New("invalid state: invalid hunk Type") + } } - } else { - // if this is not the first commit, then assign to the old - // commit or to the new one, depending on what the diff - // says. - b.assignOrigin(i, i-1) + } + + if len(getFromParent) > 0 { + b.q.Push(&queueItem{ + curItem, + nil, + prev.Commit, + prev.Path, + prevContents, + getFromParent, + 0, + false, + parnetNo, + }) + curItem.numParentsNeedResolving++ + anyPushed = true } } - return nil -} -// sliceGraph returns a slice of commits (one per line) for a particular -// revision of a file (0=first revision). -func (b *blame) sliceGraph(i int) []*object.Commit { - fVs := b.graph[i] - result := make([]*object.Commit, 0, len(fVs)) - for _, v := range fVs { - c := *v - result = append(result, &c) + curItem.Contents = "" // no longer need, free the memory + + if !anyPushed { + return finishNeeds(curItem) } - return result + + return false, nil } -// Assigns origin to vertexes in current (c) rev from data in its previous (p) -// revision -func (b *blame) assignOrigin(c, p int) { - // assign origin based on diff info - hunks := diff.Do(b.data[p], b.data[c]) - sl := -1 // source line - dl := -1 // destination line - for h := range hunks { - hLines := countLines(hunks[h].Text) - for hl := 0; hl < hLines; hl++ { - switch { - case hunks[h].Type == 0: - sl++ - dl++ - b.graph[c][dl] = b.graph[p][sl] - case hunks[h].Type == 1: - dl++ - b.graph[c][dl] = b.revs[c] - case hunks[h].Type == -1: - sl++ - default: - panic("unreachable") +func finishNeeds(curItem *queueItem) (bool, error) { + // any needs left in the needsMap must have come from this revision + for i := range curItem.NeedsMap { + if curItem.NeedsMap[i].Commit == nil { + curItem.NeedsMap[i].Commit = curItem.Commit + curItem.NeedsMap[i].FromParentNo = -1 + } + } + + if curItem.Child == nil && curItem.MergedChildren == nil { + return true, nil + } + + if curItem.MergedChildren == nil { + return applyNeeds(curItem.Child, curItem.NeedsMap, curItem.IdenticalToChild, curItem.ParentNo) + } + + for _, ctn := range curItem.MergedChildren { + m := 0 // position in merged needs map + p := 0 // position in parent needs map + for p < len(ctn.NeedsMap) { + if ctn.NeedsMap[p].Cur == curItem.NeedsMap[m].Cur { + ctn.NeedsMap[p].Commit = curItem.NeedsMap[m].Commit + m++ + p++ + } else if ctn.NeedsMap[p].Cur < curItem.NeedsMap[m].Cur { + p++ + } else { + m++ } } + finished, err := applyNeeds(ctn.Child, ctn.NeedsMap, ctn.IdenticalToChild, ctn.ParentNo) + if finished || err != nil { + return finished, err + } } -} -// GoString prints the results of a Blame using git-blame's style. -func (b *blame) GoString() string { - var buf bytes.Buffer + return false, nil +} - file, err := b.fRev.File(b.path) - if err != nil { - panic("PrettyPrint: internal error in repo.Data") +func applyNeeds(child *queueItem, needsMap []lineMap, identicalToChild bool, parentNo int) (bool, error) { + if identicalToChild { + for i := range child.NeedsMap { + l := &child.NeedsMap[i] + if l.Cur != needsMap[i].Cur || l.Orig != needsMap[i].Orig { + return false, errors.New("needsMap isn't the same? Why not??") + } + if l.Commit == nil || parentNo < l.FromParentNo { + l.Commit = needsMap[i].Commit + l.FromParentNo = parentNo + } + } + } else { + i := 0 + out: + for j := range child.NeedsMap { + l := &child.NeedsMap[j] + for needsMap[i].Orig < l.Cur { + i++ + if i == len(needsMap) { + break out + } + } + if l.Cur == needsMap[i].Orig { + if l.Commit == nil || parentNo < l.FromParentNo { + l.Commit = needsMap[i].Commit + l.FromParentNo = parentNo + } + } + } } - contents, err := file.Contents() - if err != nil { - panic("PrettyPrint: internal error in repo.Data") + child.numParentsNeedResolving-- + if child.numParentsNeedResolving == 0 { + finished, err := finishNeeds(child) + if finished || err != nil { + return finished, err + } } - lines := strings.Split(contents, "\n") + return false, nil +} + +// String prints the results of a Blame using git-blame's style. +func (b BlameResult) String() string { + var buf bytes.Buffer + // max line number length - mlnl := len(strconv.Itoa(len(lines))) + mlnl := len(strconv.Itoa(len(b.Lines))) // max author length mal := b.maxAuthorLength() - format := fmt.Sprintf("%%s (%%-%ds %%%dd) %%s\n", - mal, mlnl) + format := fmt.Sprintf("%%s (%%-%ds %%s %%%dd) %%s\n", mal, mlnl) - fVs := b.graph[len(b.graph)-1] - for ln, v := range fVs { - fmt.Fprintf(&buf, format, v.Hash.String()[:8], - prettyPrintAuthor(fVs[ln]), ln+1, lines[ln]) + for ln := range b.Lines { + _, _ = fmt.Fprintf(&buf, format, b.Lines[ln].Hash.String()[:8], + b.Lines[ln].AuthorName, b.Lines[ln].Date.Format("2006-01-02 15:04:05 -0700"), ln+1, b.Lines[ln].Text) } return buf.String() } -// utility function to pretty print the author. -func prettyPrintAuthor(c *object.Commit) string { - return fmt.Sprintf("%s %s", c.Author.Name, c.Author.When.Format("2006-01-02")) -} - // utility function to calculate the number of runes needed // to print the longest author name in the blame of a file. -func (b *blame) maxAuthorLength() int { - memo := make(map[plumbing.Hash]struct{}, len(b.graph)-1) - fVs := b.graph[len(b.graph)-1] +func (b BlameResult) maxAuthorLength() int { m := 0 - for ln := range fVs { - if _, ok := memo[fVs[ln].Hash]; ok { - continue - } - memo[fVs[ln].Hash] = struct{}{} - m = max(m, utf8.RuneCountInString(prettyPrintAuthor(fVs[ln]))) + for ln := range b.Lines { + m = max(m, utf8.RuneCountInString(b.Lines[ln].AuthorName)) } return m } +func min(a, b int) int { + if a < b { + return a + } + return b +} + func max(a, b int) int { if a > b { return a } return b } + +type childToNeedsMap struct { + Child *queueItem + NeedsMap []lineMap + IdenticalToChild bool + ParentNo int +} + +type queueItem struct { + Child *queueItem + MergedChildren []childToNeedsMap + Commit *object.Commit + path string + Contents string + NeedsMap []lineMap + numParentsNeedResolving int + IdenticalToChild bool + ParentNo int +} + +type priorityQueueImp []*queueItem + +func (pq *priorityQueueImp) Len() int { return len(*pq) } +func (pq *priorityQueueImp) Less(i, j int) bool { + return !(*pq)[i].Commit.Less((*pq)[j].Commit) +} +func (pq *priorityQueueImp) Swap(i, j int) { (*pq)[i], (*pq)[j] = (*pq)[j], (*pq)[i] } +func (pq *priorityQueueImp) Push(x any) { *pq = append(*pq, x.(*queueItem)) } +func (pq *priorityQueueImp) Pop() any { + n := len(*pq) + ret := (*pq)[n-1] + (*pq)[n-1] = nil // ovoid memory leak + *pq = (*pq)[0 : n-1] + + return ret +} +func (pq *priorityQueueImp) Peek() *object.Commit { + if len(*pq) == 0 { + return nil + } + return (*pq)[0].Commit +} + +type priorityQueue priorityQueueImp + +func (pq *priorityQueue) Init() { heap.Init((*priorityQueueImp)(pq)) } +func (pq *priorityQueue) Len() int { return (*priorityQueueImp)(pq).Len() } +func (pq *priorityQueue) Push(c *queueItem) { + heap.Push((*priorityQueueImp)(pq), c) +} +func (pq *priorityQueue) Pop() *queueItem { + return heap.Pop((*priorityQueueImp)(pq)).(*queueItem) +} +func (pq *priorityQueue) Peek() *object.Commit { return (*priorityQueueImp)(pq).Peek() } + +type parentCommit struct { + Commit *object.Commit + Path string +} + +func parentsContainingPath(path string, c *object.Commit) ([]parentCommit, error) { + // TODO: benchmark this method making git.object.Commit.parent public instead of using + // an iterator + var result []parentCommit + iter := c.Parents() + for { + parent, err := iter.Next() + if err == io.EOF { + return result, nil + } + if err != nil { + return nil, err + } + if _, err := parent.File(path); err == nil { + result = append(result, parentCommit{parent, path}) + } else { + // look for renames + patch, err := parent.Patch(c) + if err != nil { + return nil, err + } else if patch != nil { + for _, fp := range patch.FilePatches() { + from, to := fp.Files() + if from != nil && to != nil && to.Path() == path { + result = append(result, parentCommit{parent, from.Path()}) + break + } + } + } + } + } +} + +func blobHash(path string, commit *object.Commit) (plumbing.Hash, error) { + file, err := commit.File(path) + if err != nil { + return plumbing.ZeroHash, err + } + return file.Hash, nil +} diff --git a/vendor/github.com/go-git/go-git/v5/internal/path_util/path_util.go b/vendor/github.com/go-git/go-git/v5/internal/path_util/path_util.go new file mode 100644 index 00000000..48e4a3d0 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/internal/path_util/path_util.go @@ -0,0 +1,29 @@ +package path_util + +import ( + "os" + "os/user" + "strings" +) + +func ReplaceTildeWithHome(path string) (string, error) { + if strings.HasPrefix(path, "~") { + firstSlash := strings.Index(path, "/") + if firstSlash == 1 { + home, err := os.UserHomeDir() + if err != nil { + return path, err + } + return strings.Replace(path, "~", home, 1), nil + } else if firstSlash > 1 { + username := path[1:firstSlash] + userAccount, err := user.Lookup(username) + if err != nil { + return path, err + } + return strings.Replace(path, path[:firstSlash], userAccount.HomeDir, 1), nil + } + } + + return path, nil +} diff --git a/vendor/github.com/go-git/go-git/v5/options.go b/vendor/github.com/go-git/go-git/v5/options.go index d607b307..6d802d16 100644 --- a/vendor/github.com/go-git/go-git/v5/options.go +++ b/vendor/github.com/go-git/go-git/v5/options.go @@ -62,6 +62,9 @@ type CloneOptions struct { // within, using their default settings. This option is ignored if the // cloned repository does not have a worktree. RecurseSubmodules SubmoduleRescursivity + // ShallowSubmodules limit cloning submodules to the 1 level of depth. + // It matches the git command --shallow-submodules. + ShallowSubmodules bool // Progress is where the human readable information sent by the server is // stored, if nil nothing is stored and the capability (if supported) // no-progress, is sent to the server to avoid send this information. diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go index bb786557..d8fb30c1 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go @@ -8,6 +8,7 @@ import ( "strings" "github.com/go-git/go-billy/v5" + "github.com/go-git/go-git/v5/internal/path_util" "github.com/go-git/go-git/v5/plumbing/format/config" gioutil "github.com/go-git/go-git/v5/utils/ioutil" ) @@ -25,6 +26,9 @@ const ( // readIgnoreFile reads a specific git ignore file. func readIgnoreFile(fs billy.Filesystem, path []string, ignoreFile string) (ps []Pattern, err error) { + + ignoreFile, _ = path_util.ReplaceTildeWithHome(ignoreFile) + f, err := fs.Open(fs.Join(append(path, ignoreFile)...)) if err == nil { defer f.Close() diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go index 098cb502..450b3cdf 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go @@ -39,6 +39,8 @@ type pattern struct { // ParsePattern parses a gitignore pattern string into the Pattern structure. func ParsePattern(p string, domain []string) Pattern { + // storing domain, copy it to ensure it isn't changed externally + domain = append([]string(nil), domain...) res := pattern{domain: domain} if strings.HasPrefix(p, inclusionPrefix) { diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go index d2f71840..8a0f35c7 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go @@ -376,6 +376,17 @@ func (c *Commit) Verify(armoredKeyRing string) (*openpgp.Entity, error) { return openpgp.CheckArmoredDetachedSignature(keyring, er, signature, nil) } +// Less defines a compare function to determine which commit is 'earlier' by: +// - First use Committer.When +// - If Committer.When are equal then use Author.When +// - If Author.When also equal then compare the string value of the hash +func (c *Commit) Less(rhs *Commit) bool { + return c.Committer.When.Before(rhs.Committer.When) || + (c.Committer.When.Equal(rhs.Committer.When) && + (c.Author.When.Before(rhs.Author.When) || + (c.Author.When.Equal(rhs.Author.When) && bytes.Compare(c.Hash[:], rhs.Hash[:]) < 0))) +} + func indent(t string) string { var output []string for _, line := range strings.Split(t, "\n") { diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go index 63bbe5ab..f8d26a28 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go @@ -133,6 +133,7 @@ func decodeFirstHash(p *advRefsDecoder) decoderStateFn { return nil } + // TODO: Use object-format (when available) for hash size. Git 2.41+ if len(p.line) < hashSize { p.error("cannot read hash, pkt-line too short") return nil diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackreq.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackreq.go index de2206b3..48f44385 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackreq.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackreq.go @@ -38,10 +38,10 @@ func NewUploadPackRequestFromCapabilities(adv *capability.List) *UploadPackReque } } -// IsEmpty a request if empty if Haves are contained in the Wants, or if Wants -// length is zero +// IsEmpty returns whether a request is empty - it is empty if Haves are contained +// in the Wants, or if Wants length is zero, and we don't have any shallows func (r *UploadPackRequest) IsEmpty() bool { - return isSubset(r.Wants, r.Haves) + return isSubset(r.Wants, r.Haves) && len(r.Shallows) == 0 } func isSubset(needle []plumbing.Hash, haystack []plumbing.Hash) bool { diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/reference.go b/vendor/github.com/go-git/go-git/v5/plumbing/reference.go index aeb4227b..5a67f69e 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/reference.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/reference.go @@ -15,10 +15,11 @@ const ( symrefPrefix = "ref: " ) -// RefRevParseRules are a set of rules to parse references into short names. -// These are the same rules as used by git in shorten_unambiguous_ref. +// RefRevParseRules are a set of rules to parse references into short names, or expand into a full reference. +// These are the same rules as used by git in shorten_unambiguous_ref and expand_ref. // See: https://github.com/git/git/blob/e0aaa1b6532cfce93d87af9bc813fb2e7a7ce9d7/refs.c#L417 var RefRevParseRules = []string{ + "%s", "refs/%s", "refs/tags/%s", "refs/heads/%s", @@ -113,7 +114,7 @@ func (r ReferenceName) String() string { func (r ReferenceName) Short() string { s := string(r) res := s - for _, format := range RefRevParseRules { + for _, format := range RefRevParseRules[1:] { _, err := fmt.Sscanf(s, format, &res) if err == nil { continue diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go index f9b7a0e5..a7cdc1e1 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go @@ -73,6 +73,17 @@ func advertisedReferences(ctx context.Context, s *session, serviceName string) ( return nil, err } + // Git 2.41+ returns a zero-id plus capabilities when an empty + // repository is being cloned. This skips the existing logic within + // advrefs_decode.decodeFirstHash, which expects a flush-pkt instead. + // + // This logic aligns with plumbing/transport/internal/common/common.go. + if ar.IsEmpty() && + // Empty repositories are valid for git-receive-pack. + transport.ReceivePackServiceName != serviceName { + return nil, transport.ErrEmptyRemoteRepository + } + transport.FilterUnsupportedCapabilities(ar.Capabilities) s.advRefs = ar diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go index 99e0850f..5fdf4250 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go @@ -232,7 +232,7 @@ func (s *session) handleAdvRefDecodeError(err error) error { // UploadPack performs a request to the server to fetch a packfile. A reader is // returned with the packfile content. The reader must be closed after reading. func (s *session) UploadPack(ctx context.Context, req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) { - if req.IsEmpty() && len(req.Shallows) == 0 { + if req.IsEmpty() { return nil, transport.ErrEmptyUploadPackRequest } diff --git a/vendor/github.com/go-git/go-git/v5/references.go b/vendor/github.com/go-git/go-git/v5/references.go deleted file mode 100644 index 6d96035a..00000000 --- a/vendor/github.com/go-git/go-git/v5/references.go +++ /dev/null @@ -1,264 +0,0 @@ -package git - -import ( - "io" - "sort" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/utils/diff" - - "github.com/sergi/go-diff/diffmatchpatch" -) - -// References returns a slice of Commits for the file at "path", starting from -// the commit provided that contains the file from the provided path. The last -// commit into the returned slice is the commit where the file was created. -// If the provided commit does not contains the specified path, a nil slice is -// returned. The commits are sorted in commit order, newer to older. -// -// Caveats: -// -// - Moves and copies are not currently supported. -// -// - Cherry-picks are not detected unless there are no commits between them and -// therefore can appear repeated in the list. (see git path-id for hints on how -// to fix this). -func references(c *object.Commit, path string) ([]*object.Commit, error) { - var result []*object.Commit - seen := make(map[plumbing.Hash]struct{}) - if err := walkGraph(&result, &seen, c, path); err != nil { - return nil, err - } - - // TODO result should be returned without ordering - sortCommits(result) - - // for merges of identical cherry-picks - return removeComp(path, result, equivalent) -} - -type commitSorterer struct { - l []*object.Commit -} - -func (s commitSorterer) Len() int { - return len(s.l) -} - -func (s commitSorterer) Less(i, j int) bool { - return s.l[i].Committer.When.Before(s.l[j].Committer.When) || - s.l[i].Committer.When.Equal(s.l[j].Committer.When) && - s.l[i].Author.When.Before(s.l[j].Author.When) -} - -func (s commitSorterer) Swap(i, j int) { - s.l[i], s.l[j] = s.l[j], s.l[i] -} - -// SortCommits sorts a commit list by commit date, from older to newer. -func sortCommits(l []*object.Commit) { - s := &commitSorterer{l} - sort.Sort(s) -} - -// Recursive traversal of the commit graph, generating a linear history of the -// path. -func walkGraph(result *[]*object.Commit, seen *map[plumbing.Hash]struct{}, current *object.Commit, path string) error { - // check and update seen - if _, ok := (*seen)[current.Hash]; ok { - return nil - } - (*seen)[current.Hash] = struct{}{} - - // if the path is not in the current commit, stop searching. - if _, err := current.File(path); err != nil { - return nil - } - - // optimization: don't traverse branches that does not - // contain the path. - parents, err := parentsContainingPath(path, current) - if err != nil { - return err - } - switch len(parents) { - // if the path is not found in any of its parents, the path was - // created by this commit; we must add it to the revisions list and - // stop searching. This includes the case when current is the - // initial commit. - case 0: - *result = append(*result, current) - return nil - case 1: // only one parent contains the path - // if the file contents has change, add the current commit - different, err := differentContents(path, current, parents) - if err != nil { - return err - } - if len(different) == 1 { - *result = append(*result, current) - } - // in any case, walk the parent - return walkGraph(result, seen, parents[0], path) - default: // more than one parent contains the path - // TODO: detect merges that had a conflict, because they must be - // included in the result here. - for _, p := range parents { - err := walkGraph(result, seen, p, path) - if err != nil { - return err - } - } - } - return nil -} - -func parentsContainingPath(path string, c *object.Commit) ([]*object.Commit, error) { - // TODO: benchmark this method making git.object.Commit.parent public instead of using - // an iterator - var result []*object.Commit - iter := c.Parents() - for { - parent, err := iter.Next() - if err == io.EOF { - return result, nil - } - if err != nil { - return nil, err - } - if _, err := parent.File(path); err == nil { - result = append(result, parent) - } - } -} - -// Returns an slice of the commits in "cs" that has the file "path", but with different -// contents than what can be found in "c". -func differentContents(path string, c *object.Commit, cs []*object.Commit) ([]*object.Commit, error) { - result := make([]*object.Commit, 0, len(cs)) - h, found := blobHash(path, c) - if !found { - return nil, object.ErrFileNotFound - } - for _, cx := range cs { - if hx, found := blobHash(path, cx); found && h != hx { - result = append(result, cx) - } - } - return result, nil -} - -// blobHash returns the hash of a path in a commit -func blobHash(path string, commit *object.Commit) (hash plumbing.Hash, found bool) { - file, err := commit.File(path) - if err != nil { - var empty plumbing.Hash - return empty, found - } - return file.Hash, true -} - -type contentsComparatorFn func(path string, a, b *object.Commit) (bool, error) - -// Returns a new slice of commits, with duplicates removed. Expects a -// sorted commit list. Duplication is defined according to "comp". It -// will always keep the first commit of a series of duplicated commits. -func removeComp(path string, cs []*object.Commit, comp contentsComparatorFn) ([]*object.Commit, error) { - result := make([]*object.Commit, 0, len(cs)) - if len(cs) == 0 { - return result, nil - } - result = append(result, cs[0]) - for i := 1; i < len(cs); i++ { - equals, err := comp(path, cs[i], cs[i-1]) - if err != nil { - return nil, err - } - if !equals { - result = append(result, cs[i]) - } - } - return result, nil -} - -// Equivalent commits are commits whose patch is the same. -func equivalent(path string, a, b *object.Commit) (bool, error) { - numParentsA := a.NumParents() - numParentsB := b.NumParents() - - // the first commit is not equivalent to anyone - // and "I think" merges can not be equivalent to anything - if numParentsA != 1 || numParentsB != 1 { - return false, nil - } - - diffsA, err := patch(a, path) - if err != nil { - return false, err - } - diffsB, err := patch(b, path) - if err != nil { - return false, err - } - - return sameDiffs(diffsA, diffsB), nil -} - -func patch(c *object.Commit, path string) ([]diffmatchpatch.Diff, error) { - // get contents of the file in the commit - file, err := c.File(path) - if err != nil { - return nil, err - } - content, err := file.Contents() - if err != nil { - return nil, err - } - - // get contents of the file in the first parent of the commit - var contentParent string - iter := c.Parents() - parent, err := iter.Next() - if err != nil { - return nil, err - } - file, err = parent.File(path) - if err != nil { - contentParent = "" - } else { - contentParent, err = file.Contents() - if err != nil { - return nil, err - } - } - - // compare the contents of parent and child - return diff.Do(content, contentParent), nil -} - -func sameDiffs(a, b []diffmatchpatch.Diff) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if !sameDiff(a[i], b[i]) { - return false - } - } - return true -} - -func sameDiff(a, b diffmatchpatch.Diff) bool { - if a.Type != b.Type { - return false - } - switch a.Type { - case 0: - return countLines(a.Text) == countLines(b.Text) - case 1, -1: - return a.Text == b.Text - default: - panic("unreachable") - } -} diff --git a/vendor/github.com/go-git/go-git/v5/remote.go b/vendor/github.com/go-git/go-git/v5/remote.go index 7b2741a4..679e0af2 100644 --- a/vendor/github.com/go-git/go-git/v5/remote.go +++ b/vendor/github.com/go-git/go-git/v5/remote.go @@ -224,11 +224,13 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) { return err } - if err = rs.Error(); err != nil { - return err + if rs != nil { + if err = rs.Error(); err != nil { + return err + } } - return r.updateRemoteReferenceStorage(req, rs) + return r.updateRemoteReferenceStorage(req) } func (r *Remote) useRefDeltas(ar *packp.AdvRefs) bool { @@ -347,7 +349,6 @@ func (r *Remote) newReferenceUpdateRequest( func (r *Remote) updateRemoteReferenceStorage( req *packp.ReferenceUpdateRequest, - result *packp.ReportStatus, ) error { for _, spec := range r.c.Fetch { @@ -445,7 +446,7 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen return nil, err } - refs, err := calculateRefs(o.RefSpecs, remoteRefs, o.Tags) + refs, specToRefs, err := calculateRefs(o.RefSpecs, remoteRefs, o.Tags) if err != nil { return nil, err } @@ -457,9 +458,9 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen } } - req.Wants, err = getWants(r.s, refs) + req.Wants, err = getWants(r.s, refs, o.Depth) if len(req.Wants) > 0 { - req.Haves, err = getHaves(localRefs, remoteRefs, r.s) + req.Haves, err = getHaves(localRefs, remoteRefs, r.s, o.Depth) if err != nil { return nil, err } @@ -469,7 +470,7 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen } } - updated, err := r.updateLocalReferenceStorage(o.RefSpecs, refs, remoteRefs, o.Tags, o.Force) + updated, err := r.updateLocalReferenceStorage(o.RefSpecs, refs, remoteRefs, specToRefs, o.Tags, o.Force) if err != nil { return nil, err } @@ -837,6 +838,7 @@ func getHavesFromRef( remoteRefs map[plumbing.Hash]bool, s storage.Storer, haves map[plumbing.Hash]bool, + depth int, ) error { h := ref.Hash() if haves[h] { @@ -862,7 +864,13 @@ func getHavesFromRef( // commits from the history of each ref. walker := object.NewCommitPreorderIter(commit, haves, nil) toVisit := maxHavesToVisitPerRef - return walker.ForEach(func(c *object.Commit) error { + // But only need up to the requested depth + if depth > 0 && depth < maxHavesToVisitPerRef { + toVisit = depth + } + // It is safe to ignore any error here as we are just trying to find the references that we already have + // An example of a legitimate failure is we have a shallow clone and don't have the previous commit(s) + _ = walker.ForEach(func(c *object.Commit) error { haves[c.Hash] = true toVisit-- // If toVisit starts out at 0 (indicating there is no @@ -873,12 +881,15 @@ func getHavesFromRef( } return nil }) + + return nil } func getHaves( localRefs []*plumbing.Reference, remoteRefStorer storer.ReferenceStorer, s storage.Storer, + depth int, ) ([]plumbing.Hash, error) { haves := map[plumbing.Hash]bool{} @@ -899,7 +910,7 @@ func getHaves( continue } - err = getHavesFromRef(ref, remoteRefs, s, haves) + err = getHavesFromRef(ref, remoteRefs, s, haves, depth) if err != nil { return nil, err } @@ -919,42 +930,41 @@ func calculateRefs( spec []config.RefSpec, remoteRefs storer.ReferenceStorer, tagMode TagMode, -) (memory.ReferenceStorage, error) { +) (memory.ReferenceStorage, [][]*plumbing.Reference, error) { if tagMode == AllTags { spec = append(spec, refspecAllTags) } refs := make(memory.ReferenceStorage) - for _, s := range spec { - if err := doCalculateRefs(s, remoteRefs, refs); err != nil { - return nil, err + // list of references matched for each spec + specToRefs := make([][]*plumbing.Reference, len(spec)) + for i := range spec { + var err error + specToRefs[i], err = doCalculateRefs(spec[i], remoteRefs, refs) + if err != nil { + return nil, nil, err } } - return refs, nil + return refs, specToRefs, nil } func doCalculateRefs( s config.RefSpec, remoteRefs storer.ReferenceStorer, refs memory.ReferenceStorage, -) error { - iter, err := remoteRefs.IterReferences() - if err != nil { - return err - } +) ([]*plumbing.Reference, error) { + var refList []*plumbing.Reference if s.IsExactSHA1() { ref := plumbing.NewHashReference(s.Dst(""), plumbing.NewHash(s.Src())) - return refs.SetReference(ref) + + refList = append(refList, ref) + return refList, refs.SetReference(ref) } var matched bool - err = iter.ForEach(func(ref *plumbing.Reference) error { - if !s.Match(ref.Name()) { - return nil - } - + onMatched := func(ref *plumbing.Reference) error { if ref.Type() == plumbing.SymbolicReference { target, err := storer.ResolveReference(remoteRefs, ref.Name()) if err != nil { @@ -969,28 +979,47 @@ func doCalculateRefs( } matched = true - if err := refs.SetReference(ref); err != nil { - return err - } + refList = append(refList, ref) + return refs.SetReference(ref) + } - if !s.IsWildcard() { - return storer.ErrStop + var ret error + if s.IsWildcard() { + iter, err := remoteRefs.IterReferences() + if err != nil { + return nil, err } + ret = iter.ForEach(func(ref *plumbing.Reference) error { + if !s.Match(ref.Name()) { + return nil + } - return nil - }) + return onMatched(ref) + }) + } else { + var resolvedRef *plumbing.Reference + src := s.Src() + resolvedRef, ret = expand_ref(remoteRefs, plumbing.ReferenceName(src)) + if ret == nil { + ret = onMatched(resolvedRef) + } + } if !matched && !s.IsWildcard() { - return NoMatchingRefSpecError{refSpec: s} + return nil, NoMatchingRefSpecError{refSpec: s} } - return err + return refList, ret } -func getWants(localStorer storage.Storer, refs memory.ReferenceStorage) ([]plumbing.Hash, error) { +func getWants(localStorer storage.Storer, refs memory.ReferenceStorage, depth int) ([]plumbing.Hash, error) { + // If depth is anything other than 1 and the repo has shallow commits then just because we have the commit + // at the reference doesn't mean that we don't still need to fetch the parents shallow := false - if s, _ := localStorer.Shallow(); len(s) > 0 { - shallow = true + if depth != 1 { + if s, _ := localStorer.Shallow(); len(s) > 0 { + shallow = true + } } wants := map[plumbing.Hash]bool{} @@ -1144,27 +1173,28 @@ func buildSidebandIfSupported(l *capability.List, reader io.Reader, p sideband.P func (r *Remote) updateLocalReferenceStorage( specs []config.RefSpec, fetchedRefs, remoteRefs memory.ReferenceStorage, + specToRefs [][]*plumbing.Reference, tagMode TagMode, force bool, ) (updated bool, err error) { isWildcard := true forceNeeded := false - for _, spec := range specs { + for i, spec := range specs { if !spec.IsWildcard() { isWildcard = false } - for _, ref := range fetchedRefs { - if !spec.Match(ref.Name()) && !spec.IsExactSHA1() { - continue - } - + for _, ref := range specToRefs[i] { if ref.Type() != plumbing.HashReference { continue } localName := spec.Dst(ref.Name()) + // If localName doesn't start with "refs/" then treat as a branch. + if !strings.HasPrefix(localName.String(), "refs/") { + localName = plumbing.NewBranchReferenceName(localName.String()) + } old, _ := storer.ResolveReference(r.s, localName) new := plumbing.NewHashReference(localName, ref.Hash()) diff --git a/vendor/github.com/go-git/go-git/v5/repository.go b/vendor/github.com/go-git/go-git/v5/repository.go index f3540c63..3154ac01 100644 --- a/vendor/github.com/go-git/go-git/v5/repository.go +++ b/vendor/github.com/go-git/go-git/v5/repository.go @@ -14,11 +14,13 @@ import ( "strings" "time" + "dario.cat/mergo" "github.com/ProtonMail/go-crypto/openpgp" "github.com/go-git/go-billy/v5" "github.com/go-git/go-billy/v5/osfs" "github.com/go-git/go-billy/v5/util" "github.com/go-git/go-git/v5/config" + "github.com/go-git/go-git/v5/internal/path_util" "github.com/go-git/go-git/v5/internal/revision" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/cache" @@ -31,7 +33,6 @@ import ( "github.com/go-git/go-git/v5/storage/filesystem" "github.com/go-git/go-git/v5/storage/filesystem/dotgit" "github.com/go-git/go-git/v5/utils/ioutil" - "github.com/imdario/mergo" ) // GitDirName this is a special folder where all the git stuff is. @@ -322,6 +323,11 @@ func PlainOpenWithOptions(path string, o *PlainOpenOptions) (*Repository, error) } func dotGitToOSFilesystems(path string, detect bool) (dot, wt billy.Filesystem, err error) { + path, err = path_util.ReplaceTildeWithHome(path) + if err != nil { + return nil, nil, err + } + if path, err = filepath.Abs(path); err != nil { return nil, nil, err } @@ -916,7 +922,13 @@ func (r *Repository) clone(ctx context.Context, o *CloneOptions) error { if o.RecurseSubmodules != NoRecurseSubmodules { if err := w.updateSubmodules(&SubmoduleUpdateOptions{ RecurseSubmodules: o.RecurseSubmodules, - Auth: o.Auth, + Depth: func() int { + if o.ShallowSubmodules { + return 1 + } + return 0 + }(), + Auth: o.Auth, }); err != nil { return err } @@ -967,7 +979,6 @@ func (r *Repository) cloneRefSpec(o *CloneOptions) []config.RefSpec { case o.SingleBranch && o.ReferenceName == plumbing.HEAD: return []config.RefSpec{ config.RefSpec(fmt.Sprintf(refspecSingleBranchHEAD, o.RemoteName)), - config.RefSpec(fmt.Sprintf(refspecSingleBranch, plumbing.Master.Short(), o.RemoteName)), } case o.SingleBranch: return []config.RefSpec{ @@ -1029,21 +1040,9 @@ func (r *Repository) fetchAndUpdateReferences( return nil, err } - var resolvedRef *plumbing.Reference - // return error from checking the raw ref passed in - var rawRefError error - for _, rule := range append([]string{"%s"}, plumbing.RefRevParseRules...) { - resolvedRef, err = storer.ResolveReference(remoteRefs, plumbing.ReferenceName(fmt.Sprintf(rule, ref))) - - if err == nil { - break - } else if rawRefError == nil { - rawRefError = err - } - } - + resolvedRef, err := expand_ref(remoteRefs, ref) if err != nil { - return nil, rawRefError + return nil, err } refsUpdated, err := r.updateReferences(remote.c.Fetch, resolvedRef) @@ -1489,6 +1488,23 @@ func (r *Repository) Worktree() (*Worktree, error) { return &Worktree{r: r, Filesystem: r.wt}, nil } +func expand_ref(s storer.ReferenceStorer, ref plumbing.ReferenceName) (*plumbing.Reference, error) { + // For improving troubleshooting, this preserves the error for the provided `ref`, + // and returns the error for that specific ref in case all parse rules fails. + var ret error + for _, rule := range plumbing.RefRevParseRules { + resolvedRef, err := storer.ResolveReference(s, plumbing.ReferenceName(fmt.Sprintf(rule, ref))) + + if err == nil { + return resolvedRef, nil + } else if ret == nil { + ret = err + } + } + + return nil, ret +} + // ResolveRevision resolves revision to corresponding hash. It will always // resolve to a commit hash, not a tree or annotated tag. // @@ -1518,13 +1534,9 @@ func (r *Repository) ResolveRevision(in plumbing.Revision) (*plumbing.Hash, erro tryHashes = append(tryHashes, r.resolveHashPrefix(string(revisionRef))...) - for _, rule := range append([]string{"%s"}, plumbing.RefRevParseRules...) { - ref, err := storer.ResolveReference(r.Storer, plumbing.ReferenceName(fmt.Sprintf(rule, revisionRef))) - - if err == nil { - tryHashes = append(tryHashes, ref.Hash()) - break - } + ref, err := expand_ref(r.Storer, plumbing.ReferenceName(revisionRef)) + if err == nil { + tryHashes = append(tryHashes, ref.Hash()) } // in ambiguous cases, `git rev-parse` will emit a warning, but diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go index 19d70263..e02e6ddf 100644 --- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go @@ -582,7 +582,9 @@ func (d *DotGit) hasIncomingObjects() bool { directoryContents, err := d.fs.ReadDir(objectsPath) if err == nil { for _, file := range directoryContents { - if strings.HasPrefix(file.Name(), "incoming-") && file.IsDir() { + if file.IsDir() && (strings.HasPrefix(file.Name(), "tmp_objdir-incoming-") || + // Before Git 2.35 incoming commits directory had another prefix + strings.HasPrefix(file.Name(), "incoming-")) { d.incomingDirName = file.Name() } } diff --git a/vendor/github.com/go-git/go-git/v5/submodule.go b/vendor/github.com/go-git/go-git/v5/submodule.go index b0c41696..84f020dc 100644 --- a/vendor/github.com/go-git/go-git/v5/submodule.go +++ b/vendor/github.com/go-git/go-git/v5/submodule.go @@ -5,13 +5,13 @@ import ( "context" "errors" "fmt" - "net/url" "path" "github.com/go-git/go-billy/v5" "github.com/go-git/go-git/v5/config" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/format/index" + "github.com/go-git/go-git/v5/plumbing/transport" ) var ( @@ -133,29 +133,29 @@ func (s *Submodule) Repository() (*Repository, error) { return nil, err } - moduleURL, err := url.Parse(s.c.URL) + moduleEndpoint, err := transport.NewEndpoint(s.c.URL) if err != nil { return nil, err } - if !path.IsAbs(moduleURL.Path) { + if !path.IsAbs(moduleEndpoint.Path) && moduleEndpoint.Protocol == "file" { remotes, err := s.w.r.Remotes() if err != nil { return nil, err } - rootURL, err := url.Parse(remotes[0].c.URLs[0]) + rootEndpoint, err := transport.NewEndpoint(remotes[0].c.URLs[0]) if err != nil { return nil, err } - rootURL.Path = path.Join(rootURL.Path, moduleURL.Path) - *moduleURL = *rootURL + rootEndpoint.Path = path.Join(rootEndpoint.Path, moduleEndpoint.Path) + *moduleEndpoint = *rootEndpoint } _, err = r.CreateRemote(&config.RemoteConfig{ Name: DefaultRemoteName, - URLs: []string{moduleURL.String()}, + URLs: []string{moduleEndpoint.String()}, }) return r, err diff --git a/vendor/github.com/go-git/go-git/v5/worktree_status.go b/vendor/github.com/go-git/go-git/v5/worktree_status.go index a26c9e51..61bb6f75 100644 --- a/vendor/github.com/go-git/go-git/v5/worktree_status.go +++ b/vendor/github.com/go-git/go-git/v5/worktree_status.go @@ -281,8 +281,10 @@ func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string, } } + directory = filepath.ToSlash(filepath.Clean(directory)) + for name := range s { - if !isPathInDirectory(name, filepath.ToSlash(filepath.Clean(directory))) { + if !isPathInDirectory(name, directory) { continue } @@ -292,32 +294,14 @@ func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string, return } - if !added && a { - added = true - } + added = added || a } return } func isPathInDirectory(path, directory string) bool { - ps := strings.Split(path, "/") - ds := strings.Split(directory, "/") - - if len(ds) == 1 && ds[0] == "." { - return true - } - - if len(ps) < len(ds) { - return false - } - - for i := 0; i < len(ds); i++ { - if ps[i] != ds[i] { - return false - } - } - return true + return directory == "." || strings.HasPrefix(path, directory+"/") } // AddWithOptions file contents to the index, updates the index using the diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/write.go index 906b12ae..d6e35c39 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/write.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/write.go @@ -196,6 +196,7 @@ func (l Path) WriteBlob(hash v1.Hash, r io.ReadCloser) error { } func (l Path) writeBlob(hash v1.Hash, size int64, rc io.ReadCloser, renamer func() (v1.Hash, error)) error { + defer rc.Close() if hash.Hex == "" && renamer == nil { panic("writeBlob called an invalid hash and no renamer") } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go index e4a0e527..1a24b10d 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go @@ -402,7 +402,9 @@ func Time(img v1.Image, t time.Time) (v1.Image, error) { historyIdx++ break } - addendums[addendumIdx].Layer = newLayer + if addendumIdx < len(addendums) { + addendums[addendumIdx].Layer = newLayer + } } // add all leftover History entries diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go index 61f28f4c..fafe910e 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go @@ -16,6 +16,7 @@ package remote import ( "context" + "errors" "fmt" "github.com/google/go-containerregistry/pkg/logs" @@ -33,20 +34,11 @@ var allManifestMediaTypes = append(append([]types.MediaType{ // ErrSchema1 indicates that we received a schema1 manifest from the registry. // This library doesn't have plans to support this legacy image format: // https://github.com/google/go-containerregistry/issues/377 -type ErrSchema1 struct { - schema string -} +var ErrSchema1 = errors.New("see https://github.com/google/go-containerregistry/issues/377") // newErrSchema1 returns an ErrSchema1 with the unexpected MediaType. func newErrSchema1(schema types.MediaType) error { - return &ErrSchema1{ - schema: string(schema), - } -} - -// Error implements error. -func (e *ErrSchema1) Error() string { - return fmt.Sprintf("unsupported MediaType: %q, see https://github.com/google/go-containerregistry/issues/377", e.schema) + return fmt.Errorf("unsupported MediaType: %q, %w", schema, ErrSchema1) } // Descriptor provides access to metadata about remote artifact and accessors diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/fetcher.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/fetcher.go index b671f836..4e61002b 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/fetcher.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/fetcher.go @@ -32,6 +32,12 @@ import ( "github.com/google/go-containerregistry/pkg/v1/types" ) +const ( + kib = 1024 + mib = 1024 * kib + manifestLimit = 100 * mib +) + // fetcher implements methods for reading from a registry. type fetcher struct { target resource @@ -130,7 +136,7 @@ func (f *fetcher) fetchManifest(ctx context.Context, ref name.Reference, accepta return nil, nil, err } - manifest, err := io.ReadAll(resp.Body) + manifest, err := io.ReadAll(io.LimitReader(resp.Body, manifestLimit)) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go index a722c2ca..99a2bb2e 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go @@ -96,7 +96,8 @@ var defaultRetryStatusCodes = []int{ http.StatusBadGateway, http.StatusServiceUnavailable, http.StatusGatewayTimeout, - 499, + 499, // nginx-specific, client closed request + 522, // Cloudflare-specific, connection timeout } const ( diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go index ea07ff6a..cb156749 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go @@ -32,6 +32,71 @@ import ( "github.com/google/go-containerregistry/pkg/name" ) +type Token struct { + Token string `json:"token"` + AccessToken string `json:"access_token,omitempty"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` +} + +// Exchange requests a registry Token with the given scopes. +func Exchange(ctx context.Context, reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string, pr *Challenge) (*Token, error) { + if strings.ToLower(pr.Scheme) != "bearer" { + // TODO: Pretend token for basic? + return nil, fmt.Errorf("challenge scheme %q is not bearer", pr.Scheme) + } + bt, err := fromChallenge(reg, auth, t, pr, scopes...) + if err != nil { + return nil, err + } + authcfg, err := auth.Authorization() + if err != nil { + return nil, err + } + tok, err := bt.Refresh(ctx, authcfg) + if err != nil { + return nil, err + } + return tok, nil +} + +// FromToken returns a transport given a Challenge + Token. +func FromToken(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, pr *Challenge, tok *Token) (http.RoundTripper, error) { + if strings.ToLower(pr.Scheme) != "bearer" { + return &Wrapper{&basicTransport{inner: t, auth: auth, target: reg.RegistryStr()}}, nil + } + bt, err := fromChallenge(reg, auth, t, pr) + if err != nil { + return nil, err + } + if tok.Token != "" { + bt.bearer.RegistryToken = tok.Token + } + return &Wrapper{bt}, nil +} + +func fromChallenge(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, pr *Challenge, scopes ...string) (*bearerTransport, error) { + // We require the realm, which tells us where to send our Basic auth to turn it into Bearer auth. + realm, ok := pr.Parameters["realm"] + if !ok { + return nil, fmt.Errorf("malformed www-authenticate, missing realm: %v", pr.Parameters) + } + service := pr.Parameters["service"] + scheme := "https" + if pr.Insecure { + scheme = "http" + } + return &bearerTransport{ + inner: t, + basic: auth, + realm: realm, + registry: reg, + service: service, + scopes: scopes, + scheme: scheme, + }, nil +} + type bearerTransport struct { // Wrapped by bearerTransport. inner http.RoundTripper @@ -73,7 +138,7 @@ func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) { // we are redirected, only set it when the authorization header matches // the registry with which we are interacting. // In case of redirect http.Client can use an empty Host, check URL too. - if matchesHost(bt.registry, in, bt.scheme) { + if matchesHost(bt.registry.RegistryStr(), in, bt.scheme) { hdr := fmt.Sprintf("Bearer %s", bt.bearer.RegistryToken) in.Header.Set("Authorization", hdr) } @@ -135,7 +200,36 @@ func (bt *bearerTransport) refresh(ctx context.Context) error { return nil } - var content []byte + response, err := bt.Refresh(ctx, auth) + if err != nil { + return err + } + + // Some registries set access_token instead of token. See #54. + if response.AccessToken != "" { + response.Token = response.AccessToken + } + + // Find a token to turn into a Bearer authenticator + if response.Token != "" { + bt.bearer.RegistryToken = response.Token + } + + // If we obtained a refresh token from the oauth flow, use that for refresh() now. + if response.RefreshToken != "" { + bt.basic = authn.FromConfig(authn.AuthConfig{ + IdentityToken: response.RefreshToken, + }) + } + + return nil +} + +func (bt *bearerTransport) Refresh(ctx context.Context, auth *authn.AuthConfig) (*Token, error) { + var ( + content []byte + err error + ) if auth.IdentityToken != "" { // If the secret being stored is an identity token, // the Username should be set to , which indicates @@ -152,48 +246,25 @@ func (bt *bearerTransport) refresh(ctx context.Context) error { content, err = bt.refreshBasic(ctx) } if err != nil { - return err - } - - // Some registries don't have "token" in the response. See #54. - type tokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - // TODO: handle expiry? + return nil, err } - var response tokenResponse + var response Token if err := json.Unmarshal(content, &response); err != nil { - return err - } - - // Some registries set access_token instead of token. - if response.AccessToken != "" { - response.Token = response.AccessToken - } - - // Find a token to turn into a Bearer authenticator - if response.Token != "" { - bt.bearer.RegistryToken = response.Token - } else { - return fmt.Errorf("no token in bearer response:\n%s", content) + return nil, err } - // If we obtained a refresh token from the oauth flow, use that for refresh() now. - if response.RefreshToken != "" { - bt.basic = authn.FromConfig(authn.AuthConfig{ - IdentityToken: response.RefreshToken, - }) + if response.Token == "" && response.AccessToken == "" { + return &response, fmt.Errorf("no token in bearer response:\n%s", content) } - return nil + return &response, nil } -func matchesHost(reg name.Registry, in *http.Request, scheme string) bool { +func matchesHost(host string, in *http.Request, scheme string) bool { canonicalHeaderHost := canonicalAddress(in.Host, scheme) canonicalURLHost := canonicalAddress(in.URL.Host, scheme) - canonicalRegistryHost := canonicalAddress(reg.RegistryStr(), scheme) + canonicalRegistryHost := canonicalAddress(host, scheme) return canonicalHeaderHost == canonicalRegistryHost || canonicalURLHost == canonicalRegistryHost } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go index d852ef84..799c7ea0 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go @@ -28,33 +28,22 @@ import ( "github.com/google/go-containerregistry/pkg/name" ) -type challenge string - -const ( - anonymous challenge = "anonymous" - basic challenge = "basic" - bearer challenge = "bearer" -) - // 300ms is the default fallback period for go's DNS dialer but we could make this configurable. var fallbackDelay = 300 * time.Millisecond -type pingResp struct { - challenge challenge +type Challenge struct { + Scheme string // Following the challenge there are often key/value pairs // e.g. Bearer service="gcr.io",realm="https://auth.gcr.io/v36/tokenz" - parameters map[string]string + Parameters map[string]string - // The registry's scheme to use. Communicates whether we fell back to http. - scheme string + // Whether we had to use http to complete the Ping. + Insecure bool } -func (c challenge) Canonical() challenge { - return challenge(strings.ToLower(string(c))) -} - -func ping(ctx context.Context, reg name.Registry, t http.RoundTripper) (*pingResp, error) { +// Ping does a GET /v2/ against the registry and returns the response. +func Ping(ctx context.Context, reg name.Registry, t http.RoundTripper) (*Challenge, error) { // This first attempts to use "https" for every request, falling back to http // if the registry matches our localhost heuristic or if it is intentionally // set to insecure via name.NewInsecureRegistry. @@ -68,9 +57,9 @@ func ping(ctx context.Context, reg name.Registry, t http.RoundTripper) (*pingRes return pingParallel(ctx, reg, t, schemes) } -func pingSingle(ctx context.Context, reg name.Registry, t http.RoundTripper, scheme string) (*pingResp, error) { +func pingSingle(ctx context.Context, reg name.Registry, t http.RoundTripper, scheme string) (*Challenge, error) { client := http.Client{Transport: t} - url := fmt.Sprintf("%s://%s/v2/", scheme, reg.Name()) + url := fmt.Sprintf("%s://%s/v2/", scheme, reg.RegistryStr()) req, err := http.NewRequest(http.MethodGet, url, nil) if err != nil { return nil, err @@ -86,27 +75,28 @@ func pingSingle(ctx context.Context, reg name.Registry, t http.RoundTripper, sch resp.Body.Close() }() + insecure := scheme == "http" + switch resp.StatusCode { case http.StatusOK: // If we get a 200, then no authentication is needed. - return &pingResp{ - challenge: anonymous, - scheme: scheme, + return &Challenge{ + Insecure: insecure, }, nil case http.StatusUnauthorized: if challenges := authchallenge.ResponseChallenges(resp); len(challenges) != 0 { // If we hit more than one, let's try to find one that we know how to handle. wac := pickFromMultipleChallenges(challenges) - return &pingResp{ - challenge: challenge(wac.Scheme).Canonical(), - parameters: wac.Parameters, - scheme: scheme, + return &Challenge{ + Scheme: wac.Scheme, + Parameters: wac.Parameters, + Insecure: insecure, }, nil } // Otherwise, just return the challenge without parameters. - return &pingResp{ - challenge: challenge(resp.Header.Get("WWW-Authenticate")).Canonical(), - scheme: scheme, + return &Challenge{ + Scheme: resp.Header.Get("WWW-Authenticate"), + Insecure: insecure, }, nil default: return nil, CheckError(resp, http.StatusOK, http.StatusUnauthorized) @@ -114,12 +104,12 @@ func pingSingle(ctx context.Context, reg name.Registry, t http.RoundTripper, sch } // Based on the golang happy eyeballs dialParallel impl in net/dial.go. -func pingParallel(ctx context.Context, reg name.Registry, t http.RoundTripper, schemes []string) (*pingResp, error) { +func pingParallel(ctx context.Context, reg name.Registry, t http.RoundTripper, schemes []string) (*Challenge, error) { returned := make(chan struct{}) defer close(returned) type pingResult struct { - *pingResp + *Challenge error primary bool done bool @@ -130,7 +120,7 @@ func pingParallel(ctx context.Context, reg name.Registry, t http.RoundTripper, s startRacer := func(ctx context.Context, scheme string) { pr, err := pingSingle(ctx, reg, t, scheme) select { - case results <- pingResult{pingResp: pr, error: err, primary: scheme == "https", done: true}: + case results <- pingResult{Challenge: pr, error: err, primary: scheme == "https", done: true}: case <-returned: if pr != nil { logs.Debug.Printf("%s lost race", scheme) @@ -156,7 +146,7 @@ func pingParallel(ctx context.Context, reg name.Registry, t http.RoundTripper, s case res := <-results: if res.error == nil { - return res.pingResp, nil + return res.Challenge, nil } if res.primary { primary = res @@ -164,7 +154,7 @@ func pingParallel(ctx context.Context, reg name.Registry, t http.RoundTripper, s fallback = res } if primary.done && fallback.done { - return nil, multierrs([]error{primary.error, fallback.error}) + return nil, multierrs{primary.error, fallback.error} } if res.primary && fallbackTimer.Stop() { // Primary failed and we haven't started the fallback, diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go index d70b6a85..05844db1 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go @@ -37,7 +37,7 @@ func (st *schemeTransport) RoundTrip(in *http.Request) (*http.Response, error) { // based on which scheme was successful. That is only valid for the // registry server and not e.g. a separate token server or blob storage, // so we should only override the scheme if the host is the registry. - if matchesHost(st.registry, in, st.scheme) { + if matchesHost(st.registry.String(), in, st.scheme) { in.URL.Scheme = st.scheme } return st.inner.RoundTrip(in) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go index 01fe1fa8..bd539b44 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go @@ -16,8 +16,8 @@ package transport import ( "context" - "fmt" "net/http" + "strings" "github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/name" @@ -59,7 +59,7 @@ func NewWithContext(ctx context.Context, reg name.Registry, auth authn.Authentic // First we ping the registry to determine the parameters of the authentication handshake // (if one is even necessary). - pr, err := ping(ctx, reg, t) + pr, err := Ping(ctx, reg, t) if err != nil { return nil, err } @@ -69,39 +69,32 @@ func NewWithContext(ctx context.Context, reg name.Registry, auth authn.Authentic t = NewUserAgent(t, "") } + scheme := "https" + if pr.Insecure { + scheme = "http" + } + // Wrap t in a transport that selects the appropriate scheme based on the ping response. t = &schemeTransport{ - scheme: pr.scheme, + scheme: scheme, registry: reg, inner: t, } - switch pr.challenge.Canonical() { - case anonymous, basic: + if strings.ToLower(pr.Scheme) != "bearer" { return &Wrapper{&basicTransport{inner: t, auth: auth, target: reg.RegistryStr()}}, nil - case bearer: - // We require the realm, which tells us where to send our Basic auth to turn it into Bearer auth. - realm, ok := pr.parameters["realm"] - if !ok { - return nil, fmt.Errorf("malformed www-authenticate, missing realm: %v", pr.parameters) - } - service := pr.parameters["service"] - bt := &bearerTransport{ - inner: t, - basic: auth, - realm: realm, - registry: reg, - service: service, - scopes: scopes, - scheme: pr.scheme, - } - if err := bt.refresh(ctx); err != nil { - return nil, err - } - return &Wrapper{bt}, nil - default: - return nil, fmt.Errorf("unrecognized challenge: %s", pr.challenge) } + + bt, err := fromChallenge(reg, auth, t, pr) + if err != nil { + return nil, err + } + bt.scopes = scopes + + if err := bt.refresh(ctx); err != nil { + return nil, err + } + return &Wrapper{bt}, nil } // Wrapper results in *not* wrapping supplied transport with additional logic such as retries, useragent and debug logging diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go index f4369e2a..6bfce75e 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go @@ -210,7 +210,7 @@ func (w *writer) initiateUpload(ctx context.Context, from, mount, origin string) req.Header.Set("Content-Type", "application/json") resp, err := w.client.Do(req.WithContext(ctx)) if err != nil { - if origin != "" && origin != w.repo.RegistryStr() { + if from != "" { // https://github.com/google/go-containerregistry/issues/1679 logs.Warn.Printf("retrying without mount: %v", err) return w.initiateUpload(ctx, "", "", "") @@ -220,7 +220,7 @@ func (w *writer) initiateUpload(ctx context.Context, from, mount, origin string) defer resp.Body.Close() if err := transport.CheckError(resp, http.StatusCreated, http.StatusAccepted); err != nil { - if origin != "" && origin != w.repo.RegistryStr() { + if from != "" { // https://github.com/google/go-containerregistry/issues/1404 logs.Warn.Printf("retrying without mount: %v", err) return w.initiateUpload(ctx, "", "", "") @@ -360,8 +360,16 @@ func (w *writer) uploadOne(ctx context.Context, l v1.Layer) error { if err := w.maybeUpdateScopes(ctx, ml); err != nil { return err } + from = ml.Reference.Context().RepositoryStr() origin = ml.Reference.Context().RegistryStr() + + // This keeps breaking with DockerHub. + // https://github.com/google/go-containerregistry/issues/1741 + if w.repo.RegistryStr() == name.DefaultRegistry && origin != w.repo.RegistryStr() { + from = "" + origin = "" + } } location, mounted, err := w.initiateUpload(ctx, from, mount, origin) diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/attestations.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/attestations.go new file mode 100644 index 00000000..73aafe7e --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/attestations.go @@ -0,0 +1,99 @@ +package in_toto + +import ( + "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" + slsa01 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1" + slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + slsa1 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1" +) + +const ( + // StatementInTotoV01 is the statement type for the generalized link format + // containing statements. This is constant for all predicate types. + StatementInTotoV01 = "https://in-toto.io/Statement/v0.1" + // PredicateSPDX represents a SBOM using the SPDX standard. + // The SPDX mandates 'spdxVersion' field, so predicate type can omit + // version. + PredicateSPDX = "https://spdx.dev/Document" + // PredicateCycloneDX represents a CycloneDX SBOM + PredicateCycloneDX = "https://cyclonedx.org/bom" + // PredicateLinkV1 represents an in-toto 0.9 link. + PredicateLinkV1 = "https://in-toto.io/Link/v1" +) + +// Subject describes the set of software artifacts the statement applies to. +type Subject struct { + Name string `json:"name"` + Digest common.DigestSet `json:"digest"` +} + +// StatementHeader defines the common fields for all statements +type StatementHeader struct { + Type string `json:"_type"` + PredicateType string `json:"predicateType"` + Subject []Subject `json:"subject"` +} + +/* +Statement binds the attestation to a particular subject and identifies the +of the predicate. This struct represents a generic statement. +*/ +type Statement struct { + StatementHeader + // Predicate contains type speficic metadata. + Predicate interface{} `json:"predicate"` +} + +// ProvenanceStatementSLSA01 is the definition for an entire provenance statement with SLSA 0.1 predicate. +type ProvenanceStatementSLSA01 struct { + StatementHeader + Predicate slsa01.ProvenancePredicate `json:"predicate"` +} + +// ProvenanceStatementSLSA02 is the definition for an entire provenance statement with SLSA 0.2 predicate. +type ProvenanceStatementSLSA02 struct { + StatementHeader + Predicate slsa02.ProvenancePredicate `json:"predicate"` +} + +// ProvenanceStatementSLSA1 is the definition for an entire provenance statement with SLSA 1.0 predicate. +type ProvenanceStatementSLSA1 struct { + StatementHeader + Predicate slsa1.ProvenancePredicate `json:"predicate"` +} + +// ProvenanceStatement is the definition for an entire provenance statement with SLSA 0.2 predicate. +// Deprecated: Only version-specific provenance structs will be maintained (ProvenanceStatementSLSA01, ProvenanceStatementSLSA02). +type ProvenanceStatement struct { + StatementHeader + Predicate slsa02.ProvenancePredicate `json:"predicate"` +} + +// LinkStatement is the definition for an entire link statement. +type LinkStatement struct { + StatementHeader + Predicate Link `json:"predicate"` +} + +/* +SPDXStatement is the definition for an entire SPDX statement. +This is currently not implemented. Some tooling exists here: +https://github.com/spdx/tools-golang, but this software is still in +early state. +This struct is the same as the generic Statement struct but is added for +completeness +*/ +type SPDXStatement struct { + StatementHeader + Predicate interface{} `json:"predicate"` +} + +/* +CycloneDXStatement defines a cyclonedx sbom in the predicate. It is not +currently serialized just as its SPDX counterpart. It is an empty +interface, like the generic Statement. +*/ +type CycloneDXStatement struct { + StatementHeader + Predicate interface{} `json:"predicate"` +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/envelope.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/envelope.go new file mode 100644 index 00000000..2c8afff1 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/envelope.go @@ -0,0 +1,166 @@ +package in_toto + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + + "github.com/secure-systems-lab/go-securesystemslib/cjson" + "github.com/secure-systems-lab/go-securesystemslib/dsse" + "github.com/secure-systems-lab/go-securesystemslib/signerverifier" +) + +// PayloadType is the payload type used for links and layouts. +const PayloadType = "application/vnd.in-toto+json" + +// ErrInvalidPayloadType indicates that the envelope used an unknown payload type +var ErrInvalidPayloadType = errors.New("unknown payload type") + +type Envelope struct { + envelope *dsse.Envelope + payload any +} + +func loadEnvelope(env *dsse.Envelope) (*Envelope, error) { + e := &Envelope{envelope: env} + + contentBytes, err := env.DecodeB64Payload() + if err != nil { + return nil, err + } + + payload, err := loadPayload(contentBytes) + if err != nil { + return nil, err + } + e.payload = payload + + return e, nil +} + +func (e *Envelope) SetPayload(payload any) error { + encodedBytes, err := cjson.EncodeCanonical(payload) + if err != nil { + return err + } + + e.payload = payload + e.envelope = &dsse.Envelope{ + Payload: base64.StdEncoding.EncodeToString(encodedBytes), + PayloadType: PayloadType, + } + + return nil +} + +func (e *Envelope) GetPayload() any { + return e.payload +} + +func (e *Envelope) VerifySignature(key Key) error { + verifier, err := getSignerVerifierFromKey(key) + if err != nil { + return err + } + + ev, err := dsse.NewEnvelopeVerifier(verifier) + if err != nil { + return err + } + + _, err = ev.Verify(context.Background(), e.envelope) + return err +} + +func (e *Envelope) Sign(key Key) error { + signer, err := getSignerVerifierFromKey(key) + if err != nil { + return err + } + + es, err := dsse.NewEnvelopeSigner(signer) + if err != nil { + return err + } + + payload, err := e.envelope.DecodeB64Payload() + if err != nil { + return err + } + + env, err := es.SignPayload(context.Background(), e.envelope.PayloadType, payload) + if err != nil { + return err + } + + e.envelope = env + return nil +} + +func (e *Envelope) Sigs() []Signature { + sigs := []Signature{} + for _, s := range e.envelope.Signatures { + sigs = append(sigs, Signature{ + KeyID: s.KeyID, + Sig: s.Sig, + }) + } + return sigs +} + +func (e *Envelope) GetSignatureForKeyID(keyID string) (Signature, error) { + for _, s := range e.Sigs() { + if s.KeyID == keyID { + return s, nil + } + } + + return Signature{}, fmt.Errorf("no signature found for key '%s'", keyID) +} + +func (e *Envelope) Dump(path string) error { + jsonBytes, err := json.MarshalIndent(e.envelope, "", " ") + if err != nil { + return err + } + + // Write JSON bytes to the passed path with permissions (-rw-r--r--) + err = os.WriteFile(path, jsonBytes, 0644) + if err != nil { + return err + } + + return nil +} + +func getSignerVerifierFromKey(key Key) (dsse.SignerVerifier, error) { + sslibKey := getSSLibKeyFromKey(key) + + switch sslibKey.KeyType { + case signerverifier.RSAKeyType: + return signerverifier.NewRSAPSSSignerVerifierFromSSLibKey(&sslibKey) + case signerverifier.ED25519KeyType: + return signerverifier.NewED25519SignerVerifierFromSSLibKey(&sslibKey) + case signerverifier.ECDSAKeyType: + return signerverifier.NewECDSASignerVerifierFromSSLibKey(&sslibKey) + } + + return nil, ErrUnsupportedKeyType +} + +func getSSLibKeyFromKey(key Key) signerverifier.SSLibKey { + return signerverifier.SSLibKey{ + KeyType: key.KeyType, + KeyIDHashAlgorithms: key.KeyIDHashAlgorithms, + KeyID: key.KeyID, + Scheme: key.Scheme, + KeyVal: signerverifier.KeyVal{ + Public: key.KeyVal.Public, + Private: key.KeyVal.Private, + Certificate: key.KeyVal.Certificate, + }, + } +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go index 7de48282..52429ca4 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go @@ -13,7 +13,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "strings" @@ -325,7 +324,7 @@ func (k *Key) LoadKeyReader(r io.Reader, scheme string, KeyIDHashAlgorithms []st return ErrNoPEMBlock } // Read key bytes - pemBytes, err := ioutil.ReadAll(r) + pemBytes, err := io.ReadAll(r) if err != nil { return err } @@ -344,7 +343,7 @@ func (k *Key) LoadKeyReaderDefaults(r io.Reader) error { return ErrNoPEMBlock } // Read key bytes - pemBytes, err := ioutil.ReadAll(r) + pemBytes, err := io.ReadAll(r) if err != nil { return err } @@ -366,7 +365,7 @@ func (k *Key) LoadKeyReaderDefaults(r io.Reader) error { func getDefaultKeyScheme(key interface{}) (scheme string, keyIDHashAlgorithms []string, err error) { keyIDHashAlgorithms = []string{"sha256", "sha512"} - switch key.(type) { + switch k := key.(type) { case *rsa.PublicKey, *rsa.PrivateKey: scheme = rsassapsssha256Scheme case ed25519.PrivateKey, ed25519.PublicKey: @@ -374,7 +373,7 @@ func getDefaultKeyScheme(key interface{}) (scheme string, keyIDHashAlgorithms [] case *ecdsa.PrivateKey, *ecdsa.PublicKey: scheme = ecdsaSha2nistp256 case *x509.Certificate: - return getDefaultKeyScheme(key.(*x509.Certificate).PublicKey) + return getDefaultKeyScheme(k.PublicKey) default: err = ErrUnsupportedKeyType } @@ -382,11 +381,10 @@ func getDefaultKeyScheme(key interface{}) (scheme string, keyIDHashAlgorithms [] return scheme, keyIDHashAlgorithms, err } -func (k *Key) loadKey(key interface{}, pemData *pem.Block, scheme string, keyIDHashAlgorithms []string) error { - - switch key.(type) { +func (k *Key) loadKey(keyObj interface{}, pemData *pem.Block, scheme string, keyIDHashAlgorithms []string) error { + switch key := keyObj.(type) { case *rsa.PublicKey: - pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*rsa.PublicKey)) + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key) if err != nil { return err } @@ -396,7 +394,7 @@ func (k *Key) loadKey(key interface{}, pemData *pem.Block, scheme string, keyIDH case *rsa.PrivateKey: // Note: RSA Public Keys will get stored as X.509 SubjectPublicKeyInfo (RFC5280) // This behavior is consistent to the securesystemslib - pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*rsa.PrivateKey).Public()) + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.Public()) if err != nil { return err } @@ -404,16 +402,16 @@ func (k *Key) loadKey(key interface{}, pemData *pem.Block, scheme string, keyIDH return err } case ed25519.PublicKey: - if err := k.setKeyComponents(key.(ed25519.PublicKey), []byte{}, ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil { + if err := k.setKeyComponents(key, []byte{}, ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil { return err } case ed25519.PrivateKey: - pubKeyBytes := key.(ed25519.PrivateKey).Public() - if err := k.setKeyComponents(pubKeyBytes.(ed25519.PublicKey), key.(ed25519.PrivateKey), ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil { + pubKeyBytes := key.Public() + if err := k.setKeyComponents(pubKeyBytes.(ed25519.PublicKey), key, ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil { return err } case *ecdsa.PrivateKey: - pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*ecdsa.PrivateKey).Public()) + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.Public()) if err != nil { return err } @@ -421,7 +419,7 @@ func (k *Key) loadKey(key interface{}, pemData *pem.Block, scheme string, keyIDH return err } case *ecdsa.PublicKey: - pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*ecdsa.PublicKey)) + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key) if err != nil { return err } @@ -429,7 +427,7 @@ func (k *Key) loadKey(key interface{}, pemData *pem.Block, scheme string, keyIDH return err } case *x509.Certificate: - err := k.loadKey(key.(*x509.Certificate).PublicKey, pemData, scheme, keyIDHashAlgorithms) + err := k.loadKey(key.PublicKey, pemData, scheme, keyIDHashAlgorithms) if err != nil { return err } diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go index e22b79da..f56b784e 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go @@ -7,7 +7,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "os" "reflect" "regexp" @@ -15,10 +14,6 @@ import ( "strings" "time" - "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" - slsa01 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1" - slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" - "github.com/secure-systems-lab/go-securesystemslib/cjson" "github.com/secure-systems-lab/go-securesystemslib/dsse" ) @@ -30,7 +25,7 @@ and private keys in PEM format stored as strings. For public keys the Private field may be an empty string. */ type KeyVal struct { - Private string `json:"private"` + Private string `json:"private,omitempty"` Public string `json:"public"` Certificate string `json:"certificate,omitempty"` } @@ -48,9 +43,6 @@ type Key struct { Scheme string `json:"scheme"` } -// PayloadType is the payload type used for links and layouts. -const PayloadType = "application/vnd.in-toto+json" - // ErrEmptyKeyField will be thrown if a field in our Key struct is empty. var ErrEmptyKeyField = errors.New("empty field in key") @@ -73,23 +65,6 @@ var ErrNoPublicKey = errors.New("the given key is not a public key") // for example: curve size = "521" and scheme = "ecdsa-sha2-nistp224" var ErrCurveSizeSchemeMismatch = errors.New("the scheme does not match the curve size") -const ( - // StatementInTotoV01 is the statement type for the generalized link format - // containing statements. This is constant for all predicate types. - StatementInTotoV01 = "https://in-toto.io/Statement/v0.1" - // PredicateSPDX represents a SBOM using the SPDX standard. - // The SPDX mandates 'spdxVersion' field, so predicate type can omit - // version. - PredicateSPDX = "https://spdx.dev/Document" - // PredicateCycloneDX represents a CycloneDX SBOM - PredicateCycloneDX = "https://cyclonedx.org/bom" - // PredicateLinkV1 represents an in-toto 0.9 link. - PredicateLinkV1 = "https://in-toto.io/Link/v1" -) - -// ErrInvalidPayloadType indicates that the envelope used an unkown payload type -var ErrInvalidPayloadType = errors.New("unknown payload type") - /* matchEcdsaScheme checks if the scheme suffix, matches the ecdsa key curve size. We do not need a full regex match here, because @@ -702,6 +677,67 @@ func validateLayout(layout Layout) error { return nil } +type Metadata interface { + Sign(Key) error + VerifySignature(Key) error + GetPayload() any + Sigs() []Signature + GetSignatureForKeyID(string) (Signature, error) + Dump(string) error +} + +func LoadMetadata(path string) (Metadata, error) { + jsonBytes, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + var rawData map[string]*json.RawMessage + if err := json.Unmarshal(jsonBytes, &rawData); err != nil { + return nil, err + } + + if _, ok := rawData["payloadType"]; ok { + dsseEnv := &dsse.Envelope{} + if rawData["payload"] == nil || rawData["signatures"] == nil { + return nil, fmt.Errorf("in-toto metadata envelope requires 'payload' and 'signatures' parts") + } + + if err := json.Unmarshal(jsonBytes, dsseEnv); err != nil { + return nil, err + } + + if dsseEnv.PayloadType != PayloadType { + return nil, ErrInvalidPayloadType + } + + return loadEnvelope(dsseEnv) + } + + mb := &Metablock{} + + // Error out on missing `signed` or `signatures` field or if + // one of them has a `null` value, which would lead to a nil pointer + // dereference in Unmarshal below. + if rawData["signed"] == nil || rawData["signatures"] == nil { + return nil, fmt.Errorf("in-toto metadata requires 'signed' and 'signatures' parts") + } + + // Fully unmarshal signatures part + if err := json.Unmarshal(*rawData["signatures"], &mb.Signatures); err != nil { + return nil, err + } + + payload, err := loadPayload(*rawData["signed"]) + if err != nil { + return nil, err + } + + mb.Signed = payload + + return mb, nil +} + /* Metablock is a generic container for signable in-toto objects such as Layout or Link. It has two fields, one that contains the signable object and one that @@ -767,17 +803,13 @@ func checkRequiredJSONFields(obj map[string]interface{}, Load parses JSON formatted metadata at the passed path into the Metablock object on which it was called. It returns an error if it cannot parse a valid JSON formatted Metablock that contains a Link or Layout. + +Deprecated: Use LoadMetadata for a signature wrapper agnostic way to load an +envelope. */ func (mb *Metablock) Load(path string) error { - // Open file and close before returning - jsonFile, err := os.Open(path) - if err != nil { - return err - } - defer jsonFile.Close() - // Read entire file - jsonBytes, err := ioutil.ReadAll(jsonFile) + jsonBytes, err := os.ReadFile(path) if err != nil { return err } @@ -803,54 +835,14 @@ func (mb *Metablock) Load(path string) error { return err } - // Temporarily copy signed to opaque map to inspect the `_type` of signed - // and create link or layout accordingly - var signed map[string]interface{} - if err := json.Unmarshal(*rawMb["signed"], &signed); err != nil { + payload, err := loadPayload(*rawMb["signed"]) + if err != nil { return err } - if signed["_type"] == "link" { - var link Link - if err := checkRequiredJSONFields(signed, reflect.TypeOf(link)); err != nil { - return err - } - - data, err := rawMb["signed"].MarshalJSON() - if err != nil { - return err - } - decoder := json.NewDecoder(strings.NewReader(string(data))) - decoder.DisallowUnknownFields() - if err := decoder.Decode(&link); err != nil { - return err - } - mb.Signed = link - - } else if signed["_type"] == "layout" { - var layout Layout - if err := checkRequiredJSONFields(signed, reflect.TypeOf(layout)); err != nil { - return err - } - - data, err := rawMb["signed"].MarshalJSON() - if err != nil { - return err - } - decoder := json.NewDecoder(strings.NewReader(string(data))) - decoder.DisallowUnknownFields() - if err := decoder.Decode(&layout); err != nil { - return err - } - - mb.Signed = layout - - } else { - return fmt.Errorf("the '_type' field of the 'signed' part of in-toto" + - " metadata must be one of 'link' or 'layout'") - } + mb.Signed = payload - return jsonFile.Close() + return nil } /* @@ -866,7 +858,7 @@ func (mb *Metablock) Dump(path string) error { } // Write JSON bytes to the passed path with permissions (-rw-r--r--) - err = ioutil.WriteFile(path, jsonBytes, 0644) + err = os.WriteFile(path, jsonBytes, 0644) if err != nil { return err } @@ -883,6 +875,14 @@ func (mb *Metablock) GetSignableRepresentation() ([]byte, error) { return cjson.EncodeCanonical(mb.Signed) } +func (mb *Metablock) GetPayload() any { + return mb.Signed +} + +func (mb *Metablock) Sigs() []Signature { + return mb.Signatures +} + /* VerifySignature verifies the first signature, corresponding to the passed Key, that it finds in the Signatures field of the Metablock on which it was called. @@ -965,109 +965,3 @@ func (mb *Metablock) Sign(key Key) error { mb.Signatures = append(mb.Signatures, newSignature) return nil } - -// Subject describes the set of software artifacts the statement applies to. -type Subject struct { - Name string `json:"name"` - Digest common.DigestSet `json:"digest"` -} - -// StatementHeader defines the common fields for all statements -type StatementHeader struct { - Type string `json:"_type"` - PredicateType string `json:"predicateType"` - Subject []Subject `json:"subject"` -} - -/* -Statement binds the attestation to a particular subject and identifies the -of the predicate. This struct represents a generic statement. -*/ -type Statement struct { - StatementHeader - // Predicate contains type speficic metadata. - Predicate interface{} `json:"predicate"` -} - -// ProvenanceStatementSLSA01 is the definition for an entire provenance statement with SLSA 0.1 predicate. -type ProvenanceStatementSLSA01 struct { - StatementHeader - Predicate slsa01.ProvenancePredicate `json:"predicate"` -} - -// ProvenanceStatementSLSA02 is the definition for an entire provenance statement with SLSA 0.2 predicate. -type ProvenanceStatementSLSA02 struct { - StatementHeader - Predicate slsa02.ProvenancePredicate `json:"predicate"` -} - -// ProvenanceStatement is the definition for an entire provenance statement with SLSA 0.2 predicate. -// Deprecated: Only version-specific provenance structs will be maintained (ProvenanceStatementSLSA01, ProvenanceStatementSLSA02). -type ProvenanceStatement struct { - StatementHeader - Predicate slsa02.ProvenancePredicate `json:"predicate"` -} - -// LinkStatement is the definition for an entire link statement. -type LinkStatement struct { - StatementHeader - Predicate Link `json:"predicate"` -} - -/* -SPDXStatement is the definition for an entire SPDX statement. -This is currently not implemented. Some tooling exists here: -https://github.com/spdx/tools-golang, but this software is still in -early state. -This struct is the same as the generic Statement struct but is added for -completeness -*/ -type SPDXStatement struct { - StatementHeader - Predicate interface{} `json:"predicate"` -} - -/* -CycloneDXStatement defines a cyclonedx sbom in the predicate. It is not -currently serialized just as its SPDX counterpart. It is an empty -interface, like the generic Statement. -*/ -type CycloneDXStatement struct { - StatementHeader - Predicate interface{} `json:"predicate"` -} - -/* -DSSESigner provides signature generation and validation based on the SSL -Signing Spec: https://github.com/secure-systems-lab/signing-spec -as describe by: https://github.com/MarkLodato/ITE/tree/media-type/ITE/5 -It wraps the generic SSL envelope signer and enforces the correct payload -type both during signature generation and validation. -*/ -type DSSESigner struct { - signer *dsse.EnvelopeSigner -} - -func NewDSSESigner(p ...dsse.SignVerifier) (*DSSESigner, error) { - es, err := dsse.NewEnvelopeSigner(p...) - if err != nil { - return nil, err - } - - return &DSSESigner{ - signer: es, - }, nil -} - -func (s *DSSESigner) SignPayload(body []byte) (*dsse.Envelope, error) { - return s.signer.SignPayload(PayloadType, body) -} - -func (s *DSSESigner) Verify(e *dsse.Envelope) error { - if e.PayloadType != PayloadType { - return ErrInvalidPayloadType - } - - _, err := s.signer.Verify(e) - return err -} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go index 80eef3d7..f0a55d82 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go @@ -4,7 +4,7 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "io" "os" "os/exec" "path/filepath" @@ -21,6 +21,8 @@ var ErrSymCycle = errors.New("symlink cycle detected") // ErrUnsupportedHashAlgorithm signals a missing hash mapping in getHashMapping var ErrUnsupportedHashAlgorithm = errors.New("unsupported hash algorithm detected") +var ErrEmptyCommandArgs = errors.New("the command args are empty") + // visitedSymlinks is a hashset that contains all paths that we have visited. var visitedSymlinks Set @@ -42,7 +44,7 @@ normalized to Unix-style line separators (LF) before hashing file contents. func RecordArtifact(path string, hashAlgorithms []string, lineNormalization bool) (map[string]interface{}, error) { supportedHashMappings := getHashMapping() // Read file from passed path - contents, err := ioutil.ReadFile(path) + contents, err := os.ReadFile(path) hashedContentsMap := make(map[string]interface{}) if err != nil { return nil, err @@ -90,10 +92,10 @@ the following format: If recording an artifact fails the first return value is nil and the second return value is the error. */ -func RecordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (evalArtifacts map[string]interface{}, err error) { +func RecordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool) (evalArtifacts map[string]interface{}, err error) { // Make sure to initialize a fresh hashset for every RecordArtifacts call visitedSymlinks = NewSet() - evalArtifacts, err = recordArtifacts(paths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + evalArtifacts, err = recordArtifacts(paths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) // pass result and error through return evalArtifacts, err } @@ -116,7 +118,7 @@ the following format: If recording an artifact fails the first return value is nil and the second return value is the error. */ -func recordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (map[string]interface{}, error) { +func recordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool) (map[string]interface{}, error) { artifacts := make(map[string]interface{}) for _, path := range paths { err := filepath.Walk(path, @@ -158,18 +160,35 @@ func recordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns if err != nil { return err } + info, err := os.Stat(evalSym) + if err != nil { + return err + } + targetIsDir := false + if info.IsDir() { + if !followSymlinkDirs { + // We don't follow symlinked directories + return nil + } + targetIsDir = true + } // add symlink to visitedSymlinks set // this way, we know which link we have visited already // if we visit a symlink twice, we have detected a symlink cycle visitedSymlinks.Add(path) - // We recursively call RecordArtifacts() to follow + // We recursively call recordArtifacts() to follow // the new path. - evalArtifacts, evalErr := recordArtifacts([]string{evalSym}, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + evalArtifacts, evalErr := recordArtifacts([]string{evalSym}, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) if evalErr != nil { return evalErr } for key, value := range evalArtifacts { - artifacts[key] = value + if targetIsDir { + symlinkPath := filepath.Join(path, strings.TrimPrefix(key, evalSym)) + artifacts[symlinkPath] = value + } else { + artifacts[path] = value + } } return nil } @@ -187,8 +206,7 @@ func recordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns } } // Check if path is unique - _, existingPath := artifacts[path] - if existingPath { + if _, exists := artifacts[path]; exists { return fmt.Errorf("left stripping has resulted in non unique dictionary key: %s", path) } artifacts[path] = artifact @@ -247,6 +265,9 @@ NOTE: Since stdout and stderr are captured, they cannot be seen during the command execution. */ func RunCommand(cmdArgs []string, runDir string) (map[string]interface{}, error) { + if len(cmdArgs) == 0 { + return nil, ErrEmptyCommandArgs + } cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...) @@ -268,8 +289,8 @@ func RunCommand(cmdArgs []string, runDir string) (map[string]interface{}, error) } // TODO: duplicate stdout, stderr - stdout, _ := ioutil.ReadAll(stdoutPipe) - stderr, _ := ioutil.ReadAll(stderrPipe) + stdout, _ := io.ReadAll(stdoutPipe) + stderr, _ := io.ReadAll(stderrPipe) retVal := waitErrToExitCode(cmd.Wait()) @@ -288,27 +309,27 @@ and materials at the passed materialPaths. The returned link is wrapped in a Metablock object. If command execution or artifact recording fails the first return value is an empty Metablock and the second return value is the error. */ -func InTotoRun(name string, runDir string, materialPaths []string, productPaths []string, - cmdArgs []string, key Key, hashAlgorithms []string, gitignorePatterns []string, - lStripPaths []string, lineNormalization bool) (Metablock, error) { - var linkMb Metablock - - materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) +func InTotoRun(name string, runDir string, materialPaths []string, productPaths []string, cmdArgs []string, key Key, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool, useDSSE bool) (Metadata, error) { + materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) if err != nil { - return linkMb, err + return nil, err } - byProducts, err := RunCommand(cmdArgs, runDir) - if err != nil { - return linkMb, err + // make sure that we only run RunCommand if cmdArgs is not nil or empty + byProducts := map[string]interface{}{} + if len(cmdArgs) != 0 { + byProducts, err = RunCommand(cmdArgs, runDir) + if err != nil { + return nil, err + } } - products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) if err != nil { - return linkMb, err + return nil, err } - linkMb.Signed = Link{ + link := Link{ Type: "link", Name: name, Materials: materials, @@ -318,14 +339,25 @@ func InTotoRun(name string, runDir string, materialPaths []string, productPaths Environment: map[string]interface{}{}, } - linkMb.Signatures = []Signature{} - // We use a new feature from Go1.13 here, to check the key struct. - // IsZero() will return True, if the key hasn't been initialized + if useDSSE { + env := &Envelope{} + if err := env.SetPayload(link); err != nil { + return nil, err + } - // with other values than the default ones. + if !reflect.ValueOf(key).IsZero() { + if err := env.Sign(key); err != nil { + return nil, err + } + } + + return env, nil + } + + linkMb := &Metablock{Signed: link, Signatures: []Signature{}} if !reflect.ValueOf(key).IsZero() { if err := linkMb.Sign(key); err != nil { - return linkMb, err + return nil, err } } @@ -338,14 +370,13 @@ in order to provide evidence for supply chain steps that cannot be carries out by a single command. InTotoRecordStart collects the hashes of the materials before any commands are run, signs the unfinished link, and returns the link. */ -func InTotoRecordStart(name string, materialPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (Metablock, error) { - var linkMb Metablock - materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) +func InTotoRecordStart(name string, materialPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool, useDSSE bool) (Metadata, error) { + materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) if err != nil { - return linkMb, err + return nil, err } - linkMb.Signed = Link{ + link := Link{ Type: "link", Name: name, Materials: materials, @@ -355,9 +386,26 @@ func InTotoRecordStart(name string, materialPaths []string, key Key, hashAlgorit Environment: map[string]interface{}{}, } + if useDSSE { + env := &Envelope{} + if err := env.SetPayload(link); err != nil { + return nil, err + } + + if !reflect.ValueOf(key).IsZero() { + if err := env.Sign(key); err != nil { + return nil, err + } + } + + return env, nil + } + + linkMb := &Metablock{Signed: link, Signatures: []Signature{}} + linkMb.Signatures = []Signature{} if !reflect.ValueOf(key).IsZero() { if err := linkMb.Sign(key); err != nil { - return linkMb, err + return nil, err } } @@ -371,25 +419,39 @@ created by InTotoRecordStart and records the hashes of any products creted by commands run between InTotoRecordStart and InTotoRecordStop. The resultant finished link metablock is then signed by the provided key and returned. */ -func InTotoRecordStop(prelimLinkMb Metablock, productPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (Metablock, error) { - var linkMb Metablock - if err := prelimLinkMb.VerifySignature(key); err != nil { - return linkMb, err +func InTotoRecordStop(prelimLinkEnv Metadata, productPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool, useDSSE bool) (Metadata, error) { + if err := prelimLinkEnv.VerifySignature(key); err != nil { + return nil, err } - link, ok := prelimLinkMb.Signed.(Link) + link, ok := prelimLinkEnv.GetPayload().(Link) if !ok { - return linkMb, errors.New("invalid metadata block") + return nil, errors.New("invalid metadata block") } - products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) if err != nil { - return linkMb, err + return nil, err } link.Products = products - linkMb.Signed = link + if useDSSE { + env := &Envelope{} + if err := env.SetPayload(link); err != nil { + return nil, err + } + + if !reflect.ValueOf(key).IsZero() { + if err := env.Sign(key); err != nil { + return nil, err + } + } + + return env, nil + } + + linkMb := &Metablock{Signed: link, Signatures: []Signature{}} if !reflect.ValueOf(key).IsZero() { if err := linkMb.Sign(key); err != nil { return linkMb, err diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go index 5fca7abb..40416e29 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go @@ -11,6 +11,13 @@ const ( PredicateSLSAProvenance = "https://slsa.dev/provenance/v0.2" ) +// These are type aliases to common to avoid backwards incompatible changes. +type ( + DigestSet = common.DigestSet + ProvenanceBuilder = common.ProvenanceBuilder + ProvenanceMaterial = common.ProvenanceMaterial +) + // ProvenancePredicate is the provenance predicate definition. type ProvenancePredicate struct { // Builder identifies the entity that executed the invocation, which is trusted to have diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1/provenance.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1/provenance.go new file mode 100644 index 00000000..e849731d --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1/provenance.go @@ -0,0 +1,151 @@ +package v1 + +import ( + "time" + + "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" +) + +const ( + // PredicateSLSAProvenance represents a build provenance for an artifact. + PredicateSLSAProvenance = "https://slsa.dev/provenance/v1" +) + +// ProvenancePredicate is the provenance predicate definition. +type ProvenancePredicate struct { + // The BuildDefinition describes all of the inputs to the build. The + // accuracy and completeness are implied by runDetails.builder.id. + // + // It SHOULD contain all the information necessary and sufficient to + // initialize the build and begin execution. + BuildDefinition ProvenanceBuildDefinition `json:"buildDefinition"` + + // Details specific to this particular execution of the build. + RunDetails ProvenanceRunDetails `json:"runDetails"` +} + +// ProvenanceBuildDefinition describes the inputs to the build. +type ProvenanceBuildDefinition struct { + // Identifies the template for how to perform the build and interpret the + // parameters and dependencies. + + // The URI SHOULD resolve to a human-readable specification that includes: + // overall description of the build type; schema for externalParameters and + // systemParameters; unambiguous instructions for how to initiate the build + // given this BuildDefinition, and a complete example. + BuildType string `json:"buildType"` + + // The parameters that are under external control, such as those set by a + // user or tenant of the build system. They MUST be complete at SLSA Build + // L3, meaning that that there is no additional mechanism for an external + // party to influence the build. (At lower SLSA Build levels, the + // completeness MAY be best effort.) + + // The build system SHOULD be designed to minimize the size and complexity + // of externalParameters, in order to reduce fragility and ease + // verification. Consumers SHOULD have an expectation of what “good” looks + // like; the more information that they need to check, the harder that task + // becomes. + ExternalParameters interface{} `json:"externalParameters"` + + // The parameters that are under the control of the entity represented by + // builder.id. The primary intention of this field is for debugging, + // incident response, and vulnerability management. The values here MAY be + // necessary for reproducing the build. There is no need to verify these + // parameters because the build system is already trusted, and in many cases + // it is not practical to do so. + InternalParameters interface{} `json:"internalParameters,omitempty"` + + // Unordered collection of artifacts needed at build time. Completeness is + // best effort, at least through SLSA Build L3. For example, if the build + // script fetches and executes “example.com/foo.sh”, which in turn fetches + // “example.com/bar.tar.gz”, then both “foo.sh” and “bar.tar.gz” SHOULD be + // listed here. + ResolvedDependencies []ResourceDescriptor `json:"resolvedDependencies,omitempty"` +} + +// ProvenanceRunDetails includes details specific to a particular execution of a +// build. +type ProvenanceRunDetails struct { + // Identifies the entity that executed the invocation, which is trusted to + // have correctly performed the operation and populated this provenance. + // + // This field is REQUIRED for SLSA Build 1 unless id is implicit from the + // attestation envelope. + Builder Builder `json:"builder"` + + // Metadata about this particular execution of the build. + BuildMetadata BuildMetadata `json:"metadata,omitempty"` + + // Additional artifacts generated during the build that are not considered + // the “output” of the build but that might be needed during debugging or + // incident response. For example, this might reference logs generated + // during the build and/or a digest of the fully evaluated build + // configuration. + // + // In most cases, this SHOULD NOT contain all intermediate files generated + // during the build. Instead, this SHOULD only contain files that are + // likely to be useful later and that cannot be easily reproduced. + Byproducts []ResourceDescriptor `json:"byproducts,omitempty"` +} + +// ResourceDescriptor describes a particular software artifact or resource +// (mutable or immutable). +// See https://github.com/in-toto/attestation/blob/main/spec/v1.0/resource_descriptor.md +type ResourceDescriptor struct { + // A URI used to identify the resource or artifact globally. This field is + // REQUIRED unless either digest or content is set. + URI string `json:"uri,omitempty"` + + // A set of cryptographic digests of the contents of the resource or + // artifact. This field is REQUIRED unless either uri or content is set. + Digest common.DigestSet `json:"digest,omitempty"` + + // TMachine-readable identifier for distinguishing between descriptors. + Name string `json:"name,omitempty"` + + // The location of the described resource or artifact, if different from the + // uri. + DownloadLocation string `json:"downloadLocation,omitempty"` + + // The MIME Type (i.e., media type) of the described resource or artifact. + MediaType string `json:"mediaType,omitempty"` + + // The contents of the resource or artifact. This field is REQUIRED unless + // either uri or digest is set. + Content []byte `json:"content,omitempty"` + + // This field MAY be used to provide additional information or metadata + // about the resource or artifact that may be useful to the consumer when + // evaluating the attestation against a policy. + Annotations map[string]interface{} `json:"annotations,omitempty"` +} + +// Builder represents the transitive closure of all the entities that are, by +// necessity, trusted to faithfully run the build and record the provenance. +type Builder struct { + // URI indicating the transitive closure of the trusted builder. + ID string `json:"id"` + + // Version numbers of components of the builder. + Version map[string]string `json:"version,omitempty"` + + // Dependencies used by the orchestrator that are not run within the + // workload and that do not affect the build, but might affect the + // provenance generation or security guarantees. + BuilderDependencies []ResourceDescriptor `json:"builderDependencies,omitempty"` +} + +type BuildMetadata struct { + // Identifies this particular build invocation, which can be useful for + // finding associated logs or other ad-hoc analysis. The exact meaning and + // format is defined by builder.id; by default it is treated as opaque and + // case-sensitive. The value SHOULD be globally unique. + InvocationID string `json:"invocationID,omitempty"` + + // The timestamp of when the build started. + StartedOn *time.Time `json:"startedOn,omitempty"` + + // The timestamp of when the build completed. + FinishedOn *time.Time `json:"finishedOn,omitempty"` +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go index 59cba86e..5c36dede 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go @@ -1,9 +1,15 @@ package in_toto import ( + "encoding/json" + "errors" "fmt" + "reflect" + "strings" ) +var ErrUnknownMetadataType = errors.New("unknown metadata type encountered: not link or layout") + /* Set represents a data structure for set operations. See `NewSet` for how to create a Set, and available Set receivers for useful set operations. @@ -145,3 +151,40 @@ func (s Set) IsSubSet(subset Set) bool { } return true } + +func loadPayload(payloadBytes []byte) (any, error) { + var payload map[string]any + if err := json.Unmarshal(payloadBytes, &payload); err != nil { + return nil, fmt.Errorf("error decoding payload: %w", err) + } + + if payload["_type"] == "link" { + var link Link + if err := checkRequiredJSONFields(payload, reflect.TypeOf(link)); err != nil { + return nil, fmt.Errorf("error decoding payload: %w", err) + } + + decoder := json.NewDecoder(strings.NewReader(string(payloadBytes))) + decoder.DisallowUnknownFields() + if err := decoder.Decode(&link); err != nil { + return nil, fmt.Errorf("error decoding payload: %w", err) + } + + return link, nil + } else if payload["_type"] == "layout" { + var layout Layout + if err := checkRequiredJSONFields(payload, reflect.TypeOf(layout)); err != nil { + return nil, fmt.Errorf("error decoding payload: %w", err) + } + + decoder := json.NewDecoder(strings.NewReader(string(payloadBytes))) + decoder.DisallowUnknownFields() + if err := decoder.Decode(&layout); err != nil { + return nil, fmt.Errorf("error decoding payload: %w", err) + } + + return layout, nil + } + + return nil, ErrUnknownMetadataType +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go index 2302040f..2564bd47 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go @@ -12,7 +12,6 @@ import ( "io" "os" "path" - osPath "path" "path/filepath" "reflect" "regexp" @@ -23,6 +22,8 @@ import ( // ErrInspectionRunDirIsSymlink gets thrown if the runDir is a symlink var ErrInspectionRunDirIsSymlink = errors.New("runDir is a symlink. This is a security risk") +var ErrNotLayout = errors.New("verification workflow passed a non-layout") + /* RunInspections iteratively executes the command in the Run field of all inspections of the passed layout, creating unsigned link metadata that records @@ -41,8 +42,8 @@ If executing the inspection command fails, or if the executed command has a non-zero exit code, the first return value is an empty Metablock map and the second return value is the error. */ -func RunInspections(layout Layout, runDir string, lineNormalization bool) (map[string]Metablock, error) { - inspectionMetadata := make(map[string]Metablock) +func RunInspections(layout Layout, runDir string, lineNormalization bool, useDSSE bool) (map[string]Metadata, error) { + inspectionMetadata := make(map[string]Metadata) for _, inspection := range layout.Inspect { @@ -51,14 +52,14 @@ func RunInspections(layout Layout, runDir string, lineNormalization bool) (map[s paths = []string{runDir} } - linkMb, err := InTotoRun(inspection.Name, runDir, paths, paths, - inspection.Run, Key{}, []string{"sha256"}, nil, nil, lineNormalization) + linkEnv, err := InTotoRun(inspection.Name, runDir, paths, paths, + inspection.Run, Key{}, []string{"sha256"}, nil, nil, lineNormalization, false, useDSSE) if err != nil { return nil, err } - retVal := linkMb.Signed.(Link).ByProducts["return-value"] + retVal := linkEnv.GetPayload().(Link).ByProducts["return-value"] if retVal != float64(0) { return nil, fmt.Errorf("inspection command '%s' of inspection '%s'"+ " returned a non-zero value: %d", inspection.Run, inspection.Name, @@ -67,11 +68,11 @@ func RunInspections(layout Layout, runDir string, lineNormalization bool) (map[s // Dump inspection link to cwd using the short link name format linkName := fmt.Sprintf(LinkNameFormatShort, inspection.Name) - if err := linkMb.Dump(linkName); err != nil { + if err := linkEnv.Dump(linkName); err != nil { fmt.Printf("JSON serialization or writing failed: %s", err) } - inspectionMetadata[inspection.Name] = linkMb + inspectionMetadata[inspection.Name] = linkEnv } return inspectionMetadata, nil } @@ -80,10 +81,10 @@ func RunInspections(layout Layout, runDir string, lineNormalization bool) (map[s // type MATCH. See VerifyArtifacts for more details. func verifyMatchRule(ruleData map[string]string, srcArtifacts map[string]interface{}, srcArtifactQueue Set, - itemsMetadata map[string]Metablock) Set { + itemsMetadata map[string]Metadata) Set { consumed := NewSet() // Get destination link metadata - dstLinkMb, exists := itemsMetadata[ruleData["dstName"]] + dstLinkEnv, exists := itemsMetadata[ruleData["dstName"]] if !exists { // Destination link does not exist, rule can't consume any // artifacts @@ -94,9 +95,9 @@ func verifyMatchRule(ruleData map[string]string, var dstArtifacts map[string]interface{} switch ruleData["dstType"] { case "materials": - dstArtifacts = dstLinkMb.Signed.(Link).Materials + dstArtifacts = dstLinkEnv.GetPayload().(Link).Materials case "products": - dstArtifacts = dstLinkMb.Signed.(Link).Products + dstArtifacts = dstLinkEnv.GetPayload().(Link).Products } // cleanup paths in pattern and artifact maps @@ -140,7 +141,7 @@ func verifyMatchRule(ruleData map[string]string, // Construct corresponding destination artifact path, i.e. // an optional destination prefix plus the source base path - dstPath := path.Clean(osPath.Join(ruleData["dstPrefix"], srcBasePath)) + dstPath := path.Clean(path.Join(ruleData["dstPrefix"], srcBasePath)) // Try to find the corresponding destination artifact dstArtifact, exists := dstArtifacts[dstPath] @@ -180,7 +181,7 @@ DISALLOW rule to fail overall verification, if artifacts are left in the queue that should have been consumed by preceding rules. */ func VerifyArtifacts(items []interface{}, - itemsMetadata map[string]Metablock) error { + itemsMetadata map[string]Metadata) error { // Verify artifact rules for each item in the layout for _, itemI := range items { // The layout item (interface) must be a Link or an Inspection we are only @@ -207,7 +208,7 @@ func VerifyArtifacts(items []interface{}, } // Use the item's name to extract the corresponding link - srcLinkMb, exists := itemsMetadata[itemName] + srcLinkEnv, exists := itemsMetadata[itemName] if !exists { return fmt.Errorf("VerifyArtifacts could not find metadata"+ " for item '%s', got: '%s'", itemName, itemsMetadata) @@ -215,8 +216,8 @@ func VerifyArtifacts(items []interface{}, // Create shortcuts to materials and products (including hashes) reported // by the item's link, required to verify "match" rules - materials := srcLinkMb.Signed.(Link).Materials - products := srcLinkMb.Signed.(Link).Products + materials := srcLinkEnv.GetPayload().(Link).Materials + products := srcLinkEnv.GetPayload().(Link).Products // All other rules only require the material or product paths (without // hashes). We extract them from the corresponding maps and store them as @@ -364,9 +365,9 @@ Products, the first return value is an empty Metablock map and the second return value is the error. */ func ReduceStepsMetadata(layout Layout, - stepsMetadata map[string]map[string]Metablock) (map[string]Metablock, + stepsMetadata map[string]map[string]Metadata) (map[string]Metadata, error) { - stepsMetadataReduced := make(map[string]Metablock) + stepsMetadataReduced := make(map[string]Metadata) for _, step := range layout.Steps { linksPerStep, ok := stepsMetadata[step.Name] @@ -379,16 +380,16 @@ func ReduceStepsMetadata(layout Layout, // Get the first link (could be any link) for the current step, which will // serve as reference link for below comparisons var referenceKeyID string - var referenceLinkMb Metablock - for keyID, linkMb := range linksPerStep { - referenceLinkMb = linkMb + var referenceLinkEnv Metadata + for keyID, linkEnv := range linksPerStep { + referenceLinkEnv = linkEnv referenceKeyID = keyID break } // Only one link, nothing to reduce, take the reference link if len(linksPerStep) == 1 { - stepsMetadataReduced[step.Name] = referenceLinkMb + stepsMetadataReduced[step.Name] = referenceLinkEnv // Multiple links, reduce but first check } else { @@ -396,11 +397,11 @@ func ReduceStepsMetadata(layout Layout, // TODO: What should we do if there are more links, than the // threshold requires, but not all of them are equal? Right now we would // also error. - for keyID, linkMb := range linksPerStep { - if !reflect.DeepEqual(linkMb.Signed.(Link).Materials, - referenceLinkMb.Signed.(Link).Materials) || - !reflect.DeepEqual(linkMb.Signed.(Link).Products, - referenceLinkMb.Signed.(Link).Products) { + for keyID, linkEnv := range linksPerStep { + if !reflect.DeepEqual(linkEnv.GetPayload().(Link).Materials, + referenceLinkEnv.GetPayload().(Link).Materials) || + !reflect.DeepEqual(linkEnv.GetPayload().(Link).Products, + referenceLinkEnv.GetPayload().(Link).Products) { return nil, fmt.Errorf("link '%s' and '%s' have different"+ " artifacts", fmt.Sprintf(LinkNameFormat, step.Name, referenceKeyID), @@ -408,7 +409,7 @@ func ReduceStepsMetadata(layout Layout, } } // We haven't errored out, so we can reduce (i.e take the reference link) - stepsMetadataReduced[step.Name] = referenceLinkMb + stepsMetadataReduced[step.Name] = referenceLinkEnv } } return stepsMetadataReduced, nil @@ -421,7 +422,7 @@ command, as per the layout. Soft verification means that, in case a command does not align, a warning is issued. */ func VerifyStepCommandAlignment(layout Layout, - stepsMetadata map[string]map[string]Metablock) { + stepsMetadata map[string]map[string]Metadata) { for _, step := range layout.Steps { linksPerStep, ok := stepsMetadata[step.Name] // We should never get here, layout verification must fail earlier @@ -430,9 +431,9 @@ func VerifyStepCommandAlignment(layout Layout, "', no link metadata found.") } - for signerKeyID, linkMb := range linksPerStep { + for signerKeyID, linkEnv := range linksPerStep { expectedCommandS := strings.Join(step.ExpectedCommand, " ") - executedCommandS := strings.Join(linkMb.Signed.(Link).Command, " ") + executedCommandS := strings.Join(linkEnv.GetPayload().(Link).Command, " ") if expectedCommandS != executedCommandS { linkName := fmt.Sprintf(LinkNameFormat, step.Name, signerKeyID) @@ -502,11 +503,11 @@ return value is an empty map of Metablock maps and the second return value is the error. */ func VerifyLinkSignatureThesholds(layout Layout, - stepsMetadata map[string]map[string]Metablock, rootCertPool, intermediateCertPool *x509.CertPool) ( - map[string]map[string]Metablock, error) { + stepsMetadata map[string]map[string]Metadata, rootCertPool, intermediateCertPool *x509.CertPool) ( + map[string]map[string]Metadata, error) { // This will stores links with valid signature from an authorized functionary // for all steps - stepsMetadataVerified := make(map[string]map[string]Metablock) + stepsMetadataVerified := make(map[string]map[string]Metadata) // Try to find enough (>= threshold) links each with a valid signature from // distinct authorized functionaries for each step @@ -515,7 +516,7 @@ func VerifyLinkSignatureThesholds(layout Layout, // This will store links with valid signature from an authorized // functionary for the given step - linksPerStepVerified := make(map[string]Metablock) + linksPerStepVerified := make(map[string]Metadata) // Check if there are any links at all for the given step linksPerStep, ok := stepsMetadata[step.Name] @@ -528,12 +529,12 @@ func VerifyLinkSignatureThesholds(layout Layout, // verification passes. Only good links are stored, to verify thresholds // below. isAuthorizedSignature := false - for signerKeyID, linkMb := range linksPerStep { + for signerKeyID, linkEnv := range linksPerStep { for _, authorizedKeyID := range step.PubKeys { if signerKeyID == authorizedKeyID { if verifierKey, ok := layout.Keys[authorizedKeyID]; ok { - if err := linkMb.VerifySignature(verifierKey); err == nil { - linksPerStepVerified[signerKeyID] = linkMb + if err := linkEnv.VerifySignature(verifierKey); err == nil { + linksPerStepVerified[signerKeyID] = linkEnv isAuthorizedSignature = true break } @@ -544,7 +545,7 @@ func VerifyLinkSignatureThesholds(layout Layout, // If the signer's key wasn't in our step's pubkeys array, check the cert pool to // see if the key is known to us. if !isAuthorizedSignature { - sig, err := linkMb.GetSignatureForKeyID(signerKeyID) + sig, err := linkEnv.GetSignatureForKeyID(signerKeyID) if err != nil { stepErr = err continue @@ -563,13 +564,13 @@ func VerifyLinkSignatureThesholds(layout Layout, continue } - err = linkMb.VerifySignature(cert) + err = linkEnv.VerifySignature(cert) if err != nil { stepErr = err continue } - linksPerStepVerified[signerKeyID] = linkMb + linksPerStepVerified[signerKeyID] = linkEnv } } @@ -614,30 +615,30 @@ ignored. Only a preliminary threshold check is performed, that is, if there aren't at least Threshold links for any given step, the first return value is an empty map of Metablock maps and the second return value is the error. */ -func LoadLinksForLayout(layout Layout, linkDir string) (map[string]map[string]Metablock, error) { - stepsMetadata := make(map[string]map[string]Metablock) +func LoadLinksForLayout(layout Layout, linkDir string) (map[string]map[string]Metadata, error) { + stepsMetadata := make(map[string]map[string]Metadata) for _, step := range layout.Steps { - linksPerStep := make(map[string]Metablock) + linksPerStep := make(map[string]Metadata) // Since we can verify against certificates belonging to a CA, we need to // load any possible links - linkFiles, err := filepath.Glob(osPath.Join(linkDir, fmt.Sprintf(LinkGlobFormat, step.Name))) + linkFiles, err := filepath.Glob(path.Join(linkDir, fmt.Sprintf(LinkGlobFormat, step.Name))) if err != nil { return nil, err } for _, linkPath := range linkFiles { - var linkMb Metablock - if err := linkMb.Load(linkPath); err != nil { + linkEnv, err := LoadMetadata(linkPath) + if err != nil { continue } // To get the full key from the metadata's signatures, we have to check // for one with the same short id... signerShortKeyID := strings.TrimSuffix(strings.TrimPrefix(filepath.Base(linkPath), step.Name+"."), ".link") - for _, sig := range linkMb.Signatures { + for _, sig := range linkEnv.Sigs() { if strings.HasPrefix(sig.KeyID, signerShortKeyID) { - linksPerStep[sig.KeyID] = linkMb + linksPerStep[sig.KeyID] = linkEnv break } } @@ -677,14 +678,14 @@ Signatures and keys are associated by key id. If the key map is empty, or the Metablock's Signature field does not have a signature for one or more of the passed keys, or a matching signature is invalid, an error is returned. */ -func VerifyLayoutSignatures(layoutMb Metablock, +func VerifyLayoutSignatures(layoutEnv Metadata, layoutKeys map[string]Key) error { if len(layoutKeys) < 1 { return fmt.Errorf("layout verification requires at least one key") } for _, key := range layoutKeys { - if err := layoutMb.VerifySignature(key); err != nil { + if err := layoutEnv.VerifySignature(key); err != nil { return err } } @@ -700,29 +701,35 @@ NOTE: The assumption is that the steps mentioned in the layout are to be performed sequentially. So, the first step mentioned in the layout denotes what comes into the supply chain and the last step denotes what goes out. */ -func GetSummaryLink(layout Layout, stepsMetadataReduced map[string]Metablock, - stepName string) (Metablock, error) { +func GetSummaryLink(layout Layout, stepsMetadataReduced map[string]Metadata, + stepName string, useDSSE bool) (Metadata, error) { var summaryLink Link - var result Metablock if len(layout.Steps) > 0 { firstStepLink := stepsMetadataReduced[layout.Steps[0].Name] lastStepLink := stepsMetadataReduced[layout.Steps[len(layout.Steps)-1].Name] - summaryLink.Materials = firstStepLink.Signed.(Link).Materials + summaryLink.Materials = firstStepLink.GetPayload().(Link).Materials summaryLink.Name = stepName - summaryLink.Type = firstStepLink.Signed.(Link).Type + summaryLink.Type = firstStepLink.GetPayload().(Link).Type - summaryLink.Products = lastStepLink.Signed.(Link).Products - summaryLink.ByProducts = lastStepLink.Signed.(Link).ByProducts + summaryLink.Products = lastStepLink.GetPayload().(Link).Products + summaryLink.ByProducts = lastStepLink.GetPayload().(Link).ByProducts // Using the last command of the sublayout as the command // of the summary link can be misleading. Is it necessary to // include all the commands executed as part of sublayout? - summaryLink.Command = lastStepLink.Signed.(Link).Command + summaryLink.Command = lastStepLink.GetPayload().(Link).Command } - result.Signed = summaryLink + if useDSSE { + env := &Envelope{} + if err := env.SetPayload(summaryLink); err != nil { + return nil, err + } - return result, nil + return env, nil + } + + return &Metablock{Signed: summaryLink}, nil } /* @@ -731,11 +738,11 @@ so, recursively resolves it and replaces it with a summary link summarizing the steps carried out in the sublayout. */ func VerifySublayouts(layout Layout, - stepsMetadataVerified map[string]map[string]Metablock, - superLayoutLinkPath string, intermediatePems [][]byte, lineNormalization bool) (map[string]map[string]Metablock, error) { + stepsMetadataVerified map[string]map[string]Metadata, + superLayoutLinkPath string, intermediatePems [][]byte, lineNormalization bool) (map[string]map[string]Metadata, error) { for stepName, linkData := range stepsMetadataVerified { for keyID, metadata := range linkData { - if _, ok := metadata.Signed.(Layout); ok { + if _, ok := metadata.GetPayload().(Layout); ok { layoutKeys := make(map[string]Key) layoutKeys[keyID] = layout.Keys[keyID] @@ -861,55 +868,60 @@ Metablock object. NOTE: Artifact rules of type "create", "modify" and "delete" are currently not supported. */ -func InTotoVerify(layoutMb Metablock, layoutKeys map[string]Key, +func InTotoVerify(layoutEnv Metadata, layoutKeys map[string]Key, linkDir string, stepName string, parameterDictionary map[string]string, intermediatePems [][]byte, lineNormalization bool) ( - Metablock, error) { - - var summaryLink Metablock - var err error + Metadata, error) { // Verify root signatures - if err := VerifyLayoutSignatures(layoutMb, layoutKeys); err != nil { - return summaryLink, err + if err := VerifyLayoutSignatures(layoutEnv, layoutKeys); err != nil { + return nil, err + } + + useDSSE := false + if _, ok := layoutEnv.(*Envelope); ok { + useDSSE = true } - // Extract the layout from its Metablock container (for further processing) - layout := layoutMb.Signed.(Layout) + // Extract the layout from its Metadata container (for further processing) + layout, ok := layoutEnv.GetPayload().(Layout) + if !ok { + return nil, ErrNotLayout + } // Verify layout expiration if err := VerifyLayoutExpiration(layout); err != nil { - return summaryLink, err + return nil, err } // Substitute parameters in layout - layout, err = SubstituteParameters(layout, parameterDictionary) + layout, err := SubstituteParameters(layout, parameterDictionary) if err != nil { - return summaryLink, err + return nil, err } rootCertPool, intermediateCertPool, err := LoadLayoutCertificates(layout, intermediatePems) if err != nil { - return summaryLink, err + return nil, err } // Load links for layout stepsMetadata, err := LoadLinksForLayout(layout, linkDir) if err != nil { - return summaryLink, err + return nil, err } // Verify link signatures stepsMetadataVerified, err := VerifyLinkSignatureThesholds(layout, stepsMetadata, rootCertPool, intermediateCertPool) if err != nil { - return summaryLink, err + return nil, err } // Verify and resolve sublayouts stepsSublayoutVerified, err := VerifySublayouts(layout, stepsMetadataVerified, linkDir, intermediatePems, lineNormalization) if err != nil { - return summaryLink, err + return nil, err } // Verify command alignment (WARNING only) @@ -922,18 +934,18 @@ func InTotoVerify(layoutMb Metablock, layoutKeys map[string]Key, stepsMetadataReduced, err := ReduceStepsMetadata(layout, stepsSublayoutVerified) if err != nil { - return summaryLink, err + return nil, err } // Verify artifact rules if err = VerifyArtifacts(layout.stepsAsInterfaceSlice(), stepsMetadataReduced); err != nil { - return summaryLink, err + return nil, err } - inspectionMetadata, err := RunInspections(layout, "", lineNormalization) + inspectionMetadata, err := RunInspections(layout, "", lineNormalization, useDSSE) if err != nil { - return summaryLink, err + return nil, err } // Add steps metadata to inspection metadata, because inspection artifact @@ -944,51 +956,48 @@ func InTotoVerify(layoutMb Metablock, layoutKeys map[string]Key, if err = VerifyArtifacts(layout.inspectAsInterfaceSlice(), inspectionMetadata); err != nil { - return summaryLink, err + return nil, err } - summaryLink, err = GetSummaryLink(layout, stepsMetadataReduced, stepName) + summaryLink, err := GetSummaryLink(layout, stepsMetadataReduced, stepName, useDSSE) if err != nil { - return summaryLink, err + return nil, err } return summaryLink, nil } /* -InTotoVerifyWithDirectory provides the same functionality as IntotoVerify, but +InTotoVerifyWithDirectory provides the same functionality as InTotoVerify, but adds the possibility to select a local directory from where the inspections are run. */ -func InTotoVerifyWithDirectory(layoutMb Metablock, layoutKeys map[string]Key, +func InTotoVerifyWithDirectory(layoutEnv Metadata, layoutKeys map[string]Key, linkDir string, runDir string, stepName string, parameterDictionary map[string]string, intermediatePems [][]byte, lineNormalization bool) ( - Metablock, error) { - - var summaryLink Metablock - var err error + Metadata, error) { // runDir sanity checks // check if path exists info, err := os.Stat(runDir) if err != nil { - return Metablock{}, err + return nil, err } // check if runDir is a symlink if info.Mode()&os.ModeSymlink == os.ModeSymlink { - return Metablock{}, ErrInspectionRunDirIsSymlink + return nil, ErrInspectionRunDirIsSymlink } // check if runDir is writable and a directory err = isWritable(runDir) if err != nil { - return Metablock{}, err + return nil, err } // check if runDir is empty (we do not want to overwrite files) // We abuse File.Readdirnames for this action. f, err := os.Open(runDir) if err != nil { - return Metablock{}, err + return nil, err } defer f.Close() // We use Readdirnames(1) for performance reasons, one child node @@ -996,55 +1005,63 @@ func InTotoVerifyWithDirectory(layoutMb Metablock, layoutKeys map[string]Key, _, err = f.Readdirnames(1) // if io.EOF gets returned as error the directory is empty if err == io.EOF { - return Metablock{}, err + return nil, err } err = f.Close() if err != nil { - return Metablock{}, err + return nil, err } // Verify root signatures - if err := VerifyLayoutSignatures(layoutMb, layoutKeys); err != nil { - return summaryLink, err + if err := VerifyLayoutSignatures(layoutEnv, layoutKeys); err != nil { + return nil, err } - // Extract the layout from its Metablock container (for further processing) - layout := layoutMb.Signed.(Layout) + useDSSE := false + if _, ok := layoutEnv.(*Envelope); ok { + useDSSE = true + } + + // Extract the layout from its Metadata container (for further processing) + layout, ok := layoutEnv.GetPayload().(Layout) + if !ok { + return nil, ErrNotLayout + } // Verify layout expiration if err := VerifyLayoutExpiration(layout); err != nil { - return summaryLink, err + return nil, err } // Substitute parameters in layout layout, err = SubstituteParameters(layout, parameterDictionary) if err != nil { - return summaryLink, err + return nil, err } rootCertPool, intermediateCertPool, err := LoadLayoutCertificates(layout, intermediatePems) if err != nil { - return summaryLink, err + return nil, err } // Load links for layout stepsMetadata, err := LoadLinksForLayout(layout, linkDir) if err != nil { - return summaryLink, err + return nil, err } // Verify link signatures stepsMetadataVerified, err := VerifyLinkSignatureThesholds(layout, stepsMetadata, rootCertPool, intermediateCertPool) if err != nil { - return summaryLink, err + return nil, err } // Verify and resolve sublayouts stepsSublayoutVerified, err := VerifySublayouts(layout, stepsMetadataVerified, linkDir, intermediatePems, lineNormalization) if err != nil { - return summaryLink, err + return nil, err } // Verify command alignment (WARNING only) @@ -1057,18 +1074,18 @@ func InTotoVerifyWithDirectory(layoutMb Metablock, layoutKeys map[string]Key, stepsMetadataReduced, err := ReduceStepsMetadata(layout, stepsSublayoutVerified) if err != nil { - return summaryLink, err + return nil, err } // Verify artifact rules if err = VerifyArtifacts(layout.stepsAsInterfaceSlice(), stepsMetadataReduced); err != nil { - return summaryLink, err + return nil, err } - inspectionMetadata, err := RunInspections(layout, runDir, lineNormalization) + inspectionMetadata, err := RunInspections(layout, runDir, lineNormalization, useDSSE) if err != nil { - return summaryLink, err + return nil, err } // Add steps metadata to inspection metadata, because inspection artifact @@ -1079,12 +1096,12 @@ func InTotoVerifyWithDirectory(layoutMb Metablock, layoutKeys map[string]Key, if err = VerifyArtifacts(layout.inspectAsInterfaceSlice(), inspectionMetadata); err != nil { - return summaryLink, err + return nil, err } - summaryLink, err = GetSummaryLink(layout, stepsMetadataReduced, stepName) + summaryLink, err := GetSummaryLink(layout, stepsMetadataReduced, stepName, useDSSE) if err != nil { - return summaryLink, err + return nil, err } return summaryLink, nil diff --git a/vendor/github.com/saferwall/pe/.editorconfig b/vendor/github.com/saferwall/pe/.editorconfig new file mode 100644 index 00000000..ddf181f9 --- /dev/null +++ b/vendor/github.com/saferwall/pe/.editorconfig @@ -0,0 +1,23 @@ +; https://editorconfig.org/ + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +eclint_indent_style = unset + +[Dockerfile] +indent_size = 4 \ No newline at end of file diff --git a/vendor/github.com/saferwall/pe/.gitattributes b/vendor/github.com/saferwall/pe/.gitattributes new file mode 100644 index 00000000..cabbb173 --- /dev/null +++ b/vendor/github.com/saferwall/pe/.gitattributes @@ -0,0 +1,16 @@ +# Treat all files in the Go repo as binary, with no git magic updating +# line endings. This produces predictable results in different environments. +# +# Windows users contributing to Go will need to use a modern version +# of git and editors capable of LF line endings. +# +# Windows .bat files are known to have multiple bugs when run with LF +# endings, and so they are checked in with CRLF endings, with a test +# in test/winbatch.go to catch problems. (See golang.org/issue/37791.) +# +# We'll prevent accidental CRLF line endings from entering the repo +# via the git-codereview gofmt checks and tests. +# +# See golang.org/issue/9281. + +* -text diff --git a/vendor/github.com/saferwall/pe/.gitignore b/vendor/github.com/saferwall/pe/.gitignore new file mode 100644 index 00000000..b4a68901 --- /dev/null +++ b/vendor/github.com/saferwall/pe/.gitignore @@ -0,0 +1,29 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out +coverage + +# Dependency directories (remove the comment below to include it) +vendor/ + +# Code editors configs +.idea/ +.vscode/launch.json + +# Go fuzz artefact +crashers/ +suppressions/ + +# Log files +*.log + +test/testdata/ \ No newline at end of file diff --git a/vendor/github.com/saferwall/pe/CHANGELOG.md b/vendor/github.com/saferwall/pe/CHANGELOG.md new file mode 100644 index 00000000..0e6eb6e2 --- /dev/null +++ b/vendor/github.com/saferwall/pe/CHANGELOG.md @@ -0,0 +1,150 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [1.4.0] - Unreleased + +### Added + +- Permit more granular control over which data directories are parsed by [rabbitstack](https://github.com/rabbitstack) [#72](https://github.com/saferwall/pe/pull/72). +- Support parsing the different `retpoline` types: Imported Address, Indirect Branch and Switchable retpoline [#70](https://github.com/saferwall/pe/pull/70). +- Unit tests for load config directory [#70](https://github.com/saferwall/pe/pull/69). +- Unit tests for TLS directory [#69](https://github.com/saferwall/pe/pull/69). +- Unit tests for debug directory [#68](https://github.com/saferwall/pe/pull/68). +- Unit tests for resource directory and add functions to prettify resource (sub)languages [#66](https://github.com/saferwall/pe/pull/66). +- Annotate PE structures with JSON tags during JSON encoding [#64](https://github.com/saferwall/pe/pull/64), [#65](https://github.com/saferwall/pe/pull/65) and [#67](https://github.com/saferwall/pe/pull/67). +- Improve PE dumper to print imports and unit test parsing imports data directory[#63](https://github.com/saferwall/pe/pull/63). +- Improve PE dumper to print section headers [#62](https://github.com/saferwall/pe/pull/62). +- Improve PE dumper to print PE headers [#61](https://github.com/saferwall/pe/pull/61). +- Add `SerialNumber`, `SignatureAlgorithm` and `PubKeyAlgorithm` to the `CertInfo` [#60](https://github.com/saferwall/pe/pull/60). +- Option to disable certificate validation [#59](https://github.com/saferwall/pe/pull/59). +- Improve PE dumper to print exceptions [#57](https://github.com/saferwall/pe/pull/57). +- Unit tests for debug directory [#49](https://github.com/saferwall/pe/pull/49). + +### Fixed + +- Bug while iterating over VolatileInfoRangeTable entries [#70](https://github.com/saferwall/pe/pull/70). +- Bug while iterating (additional padding and loop condition) over DVRT relocation block entries [#70](https://github.com/saferwall/pe/pull/70). +- Bug while appending (twice) Control Flow Guard IAT entries [#70](https://github.com/saferwall/pe/pull/70). +- Bug while parsing `POGO` debug entry types [#68](https://github.com/saferwall/pe/pull/68). +- `Authentihash()` for instances w/o fd thanks to [flanfly](https://github.com/flanfly) [#47](https://github.com/saferwall/pe/pull/47). + +### Changed + +- Some fields has been renamed for consistency: + - `RichHeader.XorKey` -> `RichHeader.XORKey`. + - Any `Rva` substring -> `RVA` and any `Iat` substring -> `IAT`. + - And many more. +- Some fields used internally in imports parsing were changed from a slice of pointers to a simple slice. +- Certificate.Content changed from `*pkcs7.PKCS7` to `pkcs7.PKCS7`. +- `Section.Entropy` changed from `float64` to `float64*` to distinguish between the case when the section entropy is equal to zero and the case when the entropy is equal to nil - meaning that it was never calculated. +- Remove `cobra` dependency from `cmd/pedumper` [#56](https://github.com/saferwall/pe/pull/56). + +## [1.3.0] - 2022-08-04 + +## Added + +- Authenticode signature validation in Windows [#43](https://github.com/saferwall/pe/pull/43). +- File information structure that helps to identify what parts of the PE file we have, such as `HasImports()` [#42](https://github.com/saferwall/pe/pull/42).. +- Calculate Rich header hash thanks to [wanglei-coder](https://github.com/wanglei-coder) [#38](https://github.com/saferwall/pe/pull/38). +- PE Overlay thanks to [wanglei-coder](https://github.com/wanglei-coder) [#37](https://github.com/saferwall/pe/pull/37). +- Unit tests for DOS header parsing. +- Unit tests for CLR directory [#34](https://github.com/saferwall/pe/pull/28). +- Unit tests for Rich header [#33](https://github.com/saferwall/pe/pull/33). + +## Changed + +- Do not return an error when parsing a data directory fails [#45](https://github.com/saferwall/pe/pull/45). +- Remove pointers from fields in the main `File` structure [#44](https://github.com/saferwall/pe/pull/44). + +### Fixed + +- Fix getting section data repeatedly thanks to [wanglei-coder](https://github.com/wanglei-coder) [#41](https://github.com/saferwall/pe/pull/41). +- Fix `adjustSectionAlignment()` thanks to [wanglei-coder](https://github.com/wanglei-coder) [#40](https://github.com/saferwall/pe/pull/40). +- Fix authentihash calculation thanks to [wanglei-coder](https://github.com/wanglei-coder) [#38](https://github.com/saferwall/pe/pull/38). +- Memory leak in `Close()` function that missed a call to `unmap()` thanks to [Mamba24L8](https://github.com/Mamba24L8). + +## [1.2.0] - 2022-06-12 + +## Added + +- Unit tests for export directory [#28](https://github.com/saferwall/pe/pull/28). +- Add a new option to allow usage of a custom logger [#24](https://github.com/saferwall/pe/pull/24). +- Unit tests for delay imports directory [#23](https://github.com/saferwall/pe/pull/23). +- Allow access to the raw certificates content [#22](https://github.com/saferwall/pe/pull/22). +- Unit tests for security directory [#19](https://github.com/saferwall/pe/pull/19). +- Unit tests for bound imports directory [#18](https://github.com/saferwall/pe/pull/18). + +## Changed + +- Make `GetData()` and `GetRVAFromOffset()` and `GetOffsetFromRva()` helper routines public. +- Keep parsing in exports directories even when anomalies are found [#26](https://github.com/saferwall/pe/pull/26). + +## Fixed + +- Incorrect check for `skipCertVerification` in security directory. +- Null pointer dereference in `GetExportFunctionByRVA()` and out of bounds when calculating `symbolAddress` in export directory [#28](https://github.com/saferwall/pe/pull/28). +- Reading unicode string from resource directory `readUnicodeStringAtRVA()` [#26](https://github.com/saferwall/pe/pull/26). +- Null pointer dereference in resource directory parsing [#25](https://github.com/saferwall/pe/pull/25). +- Imphash calculation [#17](https://github.com/saferwall/pe/pull/17) thanks to [@secDre4mer](https://github.com/secDre4mer). +- Null certificate header in security directory [#19](https://github.com/saferwall/pe/pull/19) + +## [1.1.0] - 2021-12-20 + +### Added + +- Add .editorconfig and .vscode config. +- Add github action CI workflow to test the package. +- Add few badges for the README.md to track build status, coverage and code quality. +- Introduce a new API to parse a file from a byte array. +- Parse .net metadata Module table. +- Parse .net metadata stream headers and metadata tables stream header. +- Add cmd/pedumper to illustrate how to use the library. +- Add unit test for relocation, exception, security, symbol, file, nt header, section and helper files. +- Add an option `New()` to customize max of relocations entries and COFF symbols to parse. + +### Changed + +- Remove uneeded break statements & lowercase error messages and anomalies. +- Make COFF entry in File struct a pointer. +- Remove unsafe pointer usage from resource directory. +- Do not return an error when COFF symbol table is not found. +- License from Apache 2 to MIT. + +### Fixed + +- Probe for invalid Nt Header offset. +- Fix authenticode hash calculation. +- Compile correctly on 32 bit thnkas to @Max Altgelt. +- COFF symbol table `readASCIIStringAtOffset()` out of bounds exception. +- Probe for optional header section alignment != 0. +- Fix infinite loop in exception unwind code parsing. +- Fix last data directory entry is reserved and must be zero. +- Safe ready of global pointer register + +## [1.0.0] - 2021-03-04 (Initial Release) + +- Works with PE32/PE32+ file fomat. +- Supports Intel x86/AMD64/ARM7ARM7 Thumb/ARM8-64/IA64/CHPE architectures. +- MS DOS header. +- Rich Header (calculate checksum). +- NT Header (file header + optional header). +- COFF symbol table and string table. +- Sections headers + entropy calculation. +- Data directories: + - Import Table + ImpHash calculation. + - Export Table. + - Resource Table. + - Exceptions Table. + - Security Table + Authentihash calculation. + - Relocations Table. + - Debug Table (CODEVIEW, POGO, VC FEATURE, REPRO, FPO, EXDLL CHARACTERISTICS debug types). + - TLS Table. + - Load Config Directory (SEH, GFID, GIAT, Guard LongJumps, CHPE, Dynamic Value Reloc Table, Enclave Configuration, Volatile Metadata tables). + - Bound Import Table. + - Delay Import Table. + - COM Table (CLR Metadata Header, Metadata Table Streams). + - Report several anomalies. diff --git a/vendor/github.com/saferwall/pe/CODE_OF_CONDUCT.md b/vendor/github.com/saferwall/pe/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..f16913ed --- /dev/null +++ b/vendor/github.com/saferwall/pe/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at report@saferwall.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/saferwall/pe/LICENSE b/vendor/github.com/saferwall/pe/LICENSE new file mode 100644 index 00000000..98b9f513 --- /dev/null +++ b/vendor/github.com/saferwall/pe/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Saferwall + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/saferwall/pe/README.md b/vendor/github.com/saferwall/pe/README.md new file mode 100644 index 00000000..5abb1fe0 --- /dev/null +++ b/vendor/github.com/saferwall/pe/README.md @@ -0,0 +1,265 @@ +Saferwall logo + +# Portable Executable Parser + +[![GoDoc](http://godoc.org/github.com/saferwall/pe?status.svg)](https://pkg.go.dev/github.com/saferwall/pe) ![Go Version](https://img.shields.io/badge/go%20version-%3E=1.15-61CFDD.svg) [![Report Card](https://goreportcard.com/badge/github.com/saferwall/pe)](https://goreportcard.com/report/github.com/saferwall/pe) [![codecov](https://codecov.io/gh/saferwall/pe/branch/main/graph/badge.svg?token=W7WTOUZLRY)](https://codecov.io/gh/saferwall/pe) ![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/saferwall/pe/ci.yaml?branch=main) + + +**pe** is a go package for parsing the [portable executable](https://docs.microsoft.com/en-us/windows/win32/debug/pe-format) file format. This package was designed with malware analysis in mind, and being resistent to PE malformations. + +## Table of content + +- [Portable Executable Parser](#portable-executable-parser) + - [Table of content](#table-of-content) + - [Features](#features) + - [Installing](#installing) + - [Using the library](#using-the-library) + - [PE Header](#pe-header) + - [Rich Header](#rich-header) + - [Iterating over sections](#iterating-over-sections) + - [Roadmap](#roadmap) + - [Fuzz Testing](#fuzz-testing) + - [References](#references) + +## Features + +- Works with PE32/PE32+ file format. +- Supports Intel x86/AMD64/ARM7ARM7 Thumb/ARM8-64/IA64/CHPE architectures. +- MS DOS header. +- Rich Header (calculate checksum and hash). +- NT Header (file header + optional header). +- COFF symbol table and string table. +- Sections headers + entropy calculation. +- Data directories + - Import Table + ImpHash calculation. + - Export Table + - Resource Table + - Exceptions Table + - Security Table + Authentihash calculation. + - Relocations Table + - Debug Table (CODEVIEW, POGO, VC FEATURE, REPRO, FPO, EXDLL CHARACTERISTICS debug types). + - TLS Table + - Load Config Directory (SEH, GFID, GIAT, Guard LongJumps, CHPE, Dynamic Value Reloc Table, Enclave Configuration, Volatile Metadata tables). + - Bound Import Table + - Delay Import Table + - COM Table (CLR Metadata Header, Metadata Table Streams) +- Report several anomalies + +## Installing + +Using this go package is easy. First, use `go get` to install the latest version of the library. This command will install the `pedumper` executable along with the library and its dependencies: + + go get -u github.com/saferwall/pe + +Next, include `pe` package in your application: + +```go +import "github.com/saferwall/pe" +``` + +## Using the library + +```go +package main + +import ( + peparser "github.com/saferwall/pe" +) + +func main() { + filename := "C:\\Binaries\\notepad.exe" + pe, err := peparser.New(filename, &peparser.Options{}) + if err != nil { + log.Fatalf("Error while opening file: %s, reason: %v", filename, err) + } + + err = pe.Parse() + if err != nil { + log.Fatalf("Error while parsing file: %s, reason: %v", filename, err) + } +} +``` + +Start by instantiating a pe object by called the `New()` method, which takes the file path to the file to be parsed and some optional options. + +Afterwards, a call to the `Parse()` method will give you access to all the different part of the PE format, directly accessible to be used. Here is the definition of the struct: + +```go +type File struct { + DOSHeader ImageDOSHeader + RichHeader RichHeader + NtHeader ImageNtHeader + COFF COFF + Sections []Section + Imports []Import + Export Export + Debugs []DebugEntry + Relocations []Relocation + Resources ResourceDirectory + TLS TLSDirectory + LoadConfig LoadConfig + Exceptions []Exception + Certificates Certificate + DelayImports []DelayImport + BoundImports []BoundImportDescriptorData + GlobalPtr uint32 + CLR CLRData + IAT []IATEntry + Header []byte + data mmap.MMap + closer io.Closer + Is64 bool + Is32 bool + Anomalies []string + size uint32 + f *os.File + opts *Options +} +``` + +### PE Header + +As mentionned before, all members of the struct are directly (no getters) accessible, additionally, the fields types has been preserved as the spec defines them, that means if you need to show the prettified version of an `int` type, you have to call the corresponding helper function. + +```go +fmt.Printf("Magic is: 0x%x\n", pe.DosHeader.Magic) +fmt.Printf("Signature is: 0x%x\n", pe.NtHeader.Signature) +fmt.Printf("Machine is: 0x%x, Meaning: %s\n", pe.NtHeader.FileHeader.Machine, pe.PrettyMachineType()) +``` + +Output: +``` +Magic is: 0x5a4d +Signature is: 0x4550 +Machine is: 0x8664, Meaning: x64 +``` + +### Rich Header + +Example: +```go +richHeader, _ := json.Marshal(pe.RichHeader) +fmt.Print(prettyPrint(richHeader)) +``` + +Output: +```json +{ + "XorKey": 2796214951, + "CompIDs": [ + { + "MinorCV": 27412, + "ProdID": 257, + "Count": 4, + "Unmasked": 16870164 + }, + { + "MinorCV": 30729, + "ProdID": 147, + "Count": 193, + "Unmasked": 9664521 + }, + { + "MinorCV": 0, + "ProdID": 1, + "Count": 1325, + "Unmasked": 65536 + }, + { + "MinorCV": 27412, + "ProdID": 260, + "Count": 9, + "Unmasked": 17066772 + }, + { + "MinorCV": 27412, + "ProdID": 259, + "Count": 3, + "Unmasked": 17001236 + }, + { + "MinorCV": 27412, + "ProdID": 256, + "Count": 1, + "Unmasked": 16804628 + }, + { + "MinorCV": 27412, + "ProdID": 269, + "Count": 209, + "Unmasked": 17656596 + }, + { + "MinorCV": 27412, + "ProdID": 255, + "Count": 1, + "Unmasked": 16739092 + }, + { + "MinorCV": 27412, + "ProdID": 258, + "Count": 1, + "Unmasked": 16935700 + } + ], + "DansOffset": 128, + "Raw": "47vE9afaqqan2qqmp9qqprOxq6ej2qqmrqI5pmbaqqan2qumit+qprOxrqeu2qqms7Gpp6TaqqazsaqnptqqprOxp6d22qqms7FVpqbaqqazsainptqqplJpY2in2qqm" +} + +``` + +### Iterating over sections + +```go +for _, sec := range pe.Sections { + fmt.Printf("Section Name : %s\n", sec.NameString()) + fmt.Printf("Section VirtualSize : %x\n", sec.Header.VirtualSize) + fmt.Printf("Section Flags : %x, Meaning: %v\n\n", + sec.Header.Characteristics, sec.PrettySectionFlags()) +} +``` + +Output: + +``` +Section Name : .text +Section VirtualSize : 2ea58 +Section Flags : 60500060, Meaning: [Align8Bytes Readable Align16Bytes Executable Contains Code Initialized Data Align1Bytes] + +Section Name : .data +Section VirtualSize : 58 +Section Flags : c0500040, Meaning: [Readable Initialized Data Writable Align1Bytes Align16Bytes Align8Bytes] + +Section Name : .rdata +Section VirtualSize : 18d0 +Section Flags : 40600040, Meaning: [Align2Bytes Align8Bytes Readable Initialized Data Align32Bytes] + +... +``` + +## Roadmap + +- imports MS-styled names demangling +- PE: VB5 and VB6 typical structures: project info, DLLCall-imports, referenced modules, object table + +## Fuzz Testing + +To validate the parser we use the [go-fuzz](https://github.com/dvyukov/go-fuzz) and a corpus of known malformed and tricky PE files from [corkami](https://github.com/corkami/pocs/tree/master/PE). + +## Projects Using This Library + + + Fibratus + + +[Fibratus](https://github.com/rabbitstack/fibratus) A modern tool for Windows kernel exploration and tracing with a focus on security. + +## References + +- [Peering Inside the PE: A Tour of the Win32 Portable Executable File Format by Matt Pietrek](http://bytepointer.com/resources/pietrek_peering_inside_pe.htm) +- [An In-Depth Look into the Win32 Portable Executable File Format - Part 1 by Matt Pietrek](http://www.delphibasics.info/home/delphibasicsarticles/anin-depthlookintothewin32portableexecutablefileformat-part1) +- [An In-Depth Look into the Win32 Portable Executable File Format - Part 2 by Matt Pietrek](http://www.delphibasics.info/home/delphibasicsarticles/anin-depthlookintothewin32portableexecutablefileformat-part2) +- [Portable Executable File Format](https://blog.kowalczyk.info/articles/pefileformat.html) +- [PE Format MSDN spec](https://docs.microsoft.com/en-us/windows/win32/debug/pe-format) +- [DotNET format](https://www.ntcore.com/files/dotnetformat.htm) +- [BlackHat 2011 - CONSTANT INSECURITY: (PECOFF) Portable Executable FIle Format](https://www.youtube.com/watch?v=uoQL3CE24ls) diff --git a/vendor/github.com/saferwall/pe/anomaly.go b/vendor/github.com/saferwall/pe/anomaly.go new file mode 100644 index 00000000..ff1303b6 --- /dev/null +++ b/vendor/github.com/saferwall/pe/anomaly.go @@ -0,0 +1,218 @@ +// Copyright 2021 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +import ( + "encoding/binary" + "time" +) + +// Anomalies found in a PE +var ( + + // AnoPEHeaderOverlapDOSHeader is reported when the PE headers overlaps with the DOS header. + AnoPEHeaderOverlapDOSHeader = "PE header overlaps with DOS header" + + // AnoPETimeStampNull is reported when the file header timestamp is 0. + AnoPETimeStampNull = "file header timestamp set to 0" + + // AnoPETimeStampFuture is reported when the file header timestamp is more + // than one day ahead of the current date timestamp. + AnoPETimeStampFuture = "file header timestamp set to 0" + + // NumberOfSections is reported when number of sections is larger or equal than 10. + AnoNumberOfSections10Plus = "number of sections is 10+" + + // AnoNumberOfSectionsNull is reported when sections count's is 0. + AnoNumberOfSectionsNull = "number of sections is 0" + + // AnoSizeOfOptionalHeaderNull is reported when size of optional header is 0. + AnoSizeOfOptionalHeaderNull = "size of optional header is 0" + + // AnoUncommonSizeOfOptionalHeader32 is reported when size of optional + // header for PE32 is larger than 0xE0. + AnoUncommonSizeOfOptionalHeader32 = "size of optional header is larger than 0xE0 (PE32)" + + // AnoUncommonSizeOfOptionalHeader64 is reported when size of optional + // header for PE32+ is larger than 0xF0. + AnoUncommonSizeOfOptionalHeader64 = "size of optional header is larger than 0xF0 (PE32+)" + + // AnoAddressOfEntryPointNull is reported when address of entry point is 0. + AnoAddressOfEntryPointNull = "address of entry point is 0" + + // AnoAddressOfEPLessSizeOfHeaders is reported when address of entry point + // is smaller than size of headers, the file cannot run under Windows. + AnoAddressOfEPLessSizeOfHeaders = "address of entry point is smaller than size of headers, " + + "the file cannot run under Windows 8" + + // AnoImageBaseNull is reported when the image base is null. + AnoImageBaseNull = "image base is 0" + + // AnoDanSMagicOffset is reported when the `DanS` magic offset is different than 0x80. + AnoDanSMagicOffset = "`DanS` magic offset is different than 0x80" + + // ErrInvalidFileAlignment is reported when file alignment is larger than + // 0x200 and not a power of 2. + ErrInvalidFileAlignment = "FileAlignment larger than 0x200 and not a power of 2" + + // ErrInvalidSectionAlignment is reported when file alignment is lesser + // than 0x200 and different from section alignment. + ErrInvalidSectionAlignment = "FileAlignment lesser than 0x200 and different from section alignment" + + // AnoMajorSubsystemVersion is reported when MajorSubsystemVersion has a + // value different than the standard 3 --> 6. + AnoMajorSubsystemVersion = "MajorSubsystemVersion is outside 3<-->6 boundary" + + // AnonWin32VersionValue is reported when Win32VersionValue is different than 0 + AnonWin32VersionValue = "Win32VersionValue is a reserved field, must be set to zero" + + // AnoInvalidPEChecksum is reported when the optional header checksum field + // is different from what it should normally be. + AnoInvalidPEChecksum = "optional header checksum is invalid" + + // AnoNumberOfRvaAndSizes is reported when NumberOfRvaAndSizes is different than 16. + AnoNumberOfRvaAndSizes = "optional header NumberOfRvaAndSizes != 16" + + // AnoReservedDataDirectoryEntry is reported when the last data directory entry is not zero. + AnoReservedDataDirectoryEntry = "last data directory entry is a reserved field, must be set to zero" + + // AnoCOFFSymbolsCount is reported when the number of COFF symbols is absurdly high. + AnoCOFFSymbolsCount = "COFF symbols count is absurdly high" + + // AnoRelocationEntriesCount is reported when the number of relocation entries is absurdly high. + AnoRelocationEntriesCount = "relocation entries count is absurdly high" +) + +// GetAnomalies reportes anomalies found in a PE binary. +// These nomalies does prevent the Windows loader from loading the files but +// is an interesting features for malware analysis. +func (pe *File) GetAnomalies() error { + + // ******************** Anomalies in File header ************************ + // An application for Windows NT typically has the nine predefined sections + // named: .text, .bss, .rdata, .data, .rsrc, .edata, .idata, .pdata, and + // .debug. Some applications do not need all of these sections, while + // others may define still more sections to suit their specific needs. + // NumberOfSections can be up to 96 under XP. + // NumberOfSections can be up to 65535 under Vista and later. + if pe.NtHeader.FileHeader.NumberOfSections >= 10 { + pe.Anomalies = append(pe.Anomalies, AnoNumberOfSections10Plus) + } + + // File header timestamp set to 0. + if pe.NtHeader.FileHeader.TimeDateStamp == 0 { + pe.Anomalies = append(pe.Anomalies, AnoPETimeStampNull) + } + + // File header timestamp set to the future. + now := time.Now() + future := uint32(now.Add(24 * time.Hour).Unix()) + if pe.NtHeader.FileHeader.TimeDateStamp > future { + pe.Anomalies = append(pe.Anomalies, AnoPETimeStampFuture) + } + + // NumberOfSections can be null with low alignment PEs + // and in this case, the values are just checked but not really used (under XP) + if pe.NtHeader.FileHeader.NumberOfSections == 0 { + pe.Anomalies = append(pe.Anomalies, AnoNumberOfSectionsNull) + } + + // SizeOfOptionalHeader is not the size of the optional header, but the delta + // between the top of the Optional header and the start of the section table. + // Thus, it can be null (the section table will overlap the Optional Header, + // or can be null when no sections are present) + if pe.NtHeader.FileHeader.SizeOfOptionalHeader == 0 { + pe.Anomalies = append(pe.Anomalies, AnoSizeOfOptionalHeaderNull) + } + + // SizeOfOptionalHeader can be bigger than the file + // (the section table will be in virtual space, full of zeroes), but can't be negative. + // Do some check here. + oh32 := ImageOptionalHeader32{} + oh64 := ImageOptionalHeader64{} + + // SizeOfOptionalHeader standard value is 0xE0 for PE32. + if pe.Is32 && + pe.NtHeader.FileHeader.SizeOfOptionalHeader > uint16(binary.Size(oh32)) { + pe.Anomalies = append(pe.Anomalies, AnoUncommonSizeOfOptionalHeader32) + } + + // SizeOfOptionalHeader standard value is 0xF0 for PE32+. + if pe.Is64 && + pe.NtHeader.FileHeader.SizeOfOptionalHeader > uint16(binary.Size(oh64)) { + pe.Anomalies = append(pe.Anomalies, AnoUncommonSizeOfOptionalHeader64) + } + + // ***************** Anomalies in Optional header ********************* + // Under Windows 8, AddressOfEntryPoint is not allowed to be smaller than + // SizeOfHeaders, except if it's null. + switch pe.Is64 { + case true: + oh64 = pe.NtHeader.OptionalHeader.(ImageOptionalHeader64) + case false: + oh32 = pe.NtHeader.OptionalHeader.(ImageOptionalHeader32) + } + + // Use oh for fields which are common for both structures. + oh := oh32 + if oh.AddressOfEntryPoint != 0 && oh.AddressOfEntryPoint < oh.SizeOfHeaders { + pe.Anomalies = append(pe.Anomalies, AnoAddressOfEPLessSizeOfHeaders) + } + + // AddressOfEntryPoint can be null in DLLs: in this case, + // DllMain is just not called. can be null + if oh.AddressOfEntryPoint == 0 { + pe.Anomalies = append(pe.Anomalies, AnoAddressOfEntryPointNull) + } + + // ImageBase can be null, under XP. + // In this case, the binary will be relocated to 10000h + if (pe.Is64 && oh64.ImageBase == 0) || + (pe.Is32 && oh32.ImageBase == 0) { + pe.Anomalies = append(pe.Anomalies, AnoImageBaseNull) + } + + // The msdn states that SizeOfImage must be a multiple of the section + // alignment. This is not a requirement though. Adding it as anomaly. + // Todo: raise an anomaly when SectionAlignment is NULL ? + if oh.SectionAlignment != 0 && oh.SizeOfImage%oh.SectionAlignment != 0 { + pe.Anomalies = append(pe.Anomalies, AnoInvalidSizeOfImage) + } + + // For DLLs, MajorSubsystemVersion is ignored until Windows 8. It can have + // any value. Under Windows 8, it needs a standard value (3.10 < 6.30). + if oh.MajorSubsystemVersion < 3 || oh.MajorSubsystemVersion > 6 { + pe.Anomalies = append(pe.Anomalies, AnoMajorSubsystemVersion) + } + + // Win32VersionValue officially defined as `reserved` and should be null + // if non null, it overrides MajorVersion/MinorVersion/BuildNumber/PlatformId + // OperatingSystem Versions values located in the PEB, after loading. + if oh.Win32VersionValue != 0 { + pe.Anomalies = append(pe.Anomalies, AnonWin32VersionValue) + } + + // Checksums are required for kernel-mode drivers and some system DLLs. + // Otherwise, this field can be 0. + if pe.Checksum() != oh.CheckSum && oh.CheckSum != 0 { + pe.Anomalies = append(pe.Anomalies, AnoInvalidPEChecksum) + } + + // This field contains the number of IMAGE_DATA_DIRECTORY entries. + // This field has been 16 since the earliest releases of Windows NT. + if (pe.Is64 && oh64.NumberOfRvaAndSizes == 0xA) || + (pe.Is32 && oh32.NumberOfRvaAndSizes == 0xA) { + pe.Anomalies = append(pe.Anomalies, AnoNumberOfRvaAndSizes) + } + + return nil +} + +// addAnomaly appends the given anomaly to the list of anomalies. +func (pe *File) addAnomaly(anomaly string) { + if !stringInSlice(anomaly, pe.Anomalies) { + pe.Anomalies = append(pe.Anomalies, anomaly) + } +} diff --git a/vendor/github.com/saferwall/pe/arch.go b/vendor/github.com/saferwall/pe/arch.go new file mode 100644 index 00000000..c9ccfa75 --- /dev/null +++ b/vendor/github.com/saferwall/pe/arch.go @@ -0,0 +1,11 @@ +// Copyright 2022 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +// Architecture-specific data. This data directory is not used +// (set to all zeros) for I386, IA64, or AMD64 architecture. +func (pe *File) parseArchitectureDirectory(rva, size uint32) error { + return nil +} diff --git a/vendor/github.com/saferwall/pe/boundimports.go b/vendor/github.com/saferwall/pe/boundimports.go new file mode 100644 index 00000000..3fbd13fc --- /dev/null +++ b/vendor/github.com/saferwall/pe/boundimports.go @@ -0,0 +1,154 @@ +// Copyright 2018 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +import ( + "encoding/binary" +) + +const ( + // MaxStringLength represents the maximum length of a string to be retrieved + // from the file. It's there to prevent loading massive amounts of data from + // memory mapped files. Strings longer than 0x100B should be rather rare. + MaxStringLength = uint32(0x100) +) + +// ImageBoundImportDescriptor represents the IMAGE_BOUND_IMPORT_DESCRIPTOR. +type ImageBoundImportDescriptor struct { + // TimeDateStamp is just the value from the Exports information of the DLL + // which is being imported from. + TimeDateStamp uint32 `json:"time_date_stamp"` + // Offset of the DLL name counted from the beginning of the BOUND_IMPORT table. + OffsetModuleName uint16 `json:"offset_module_name"` + // Number of forwards, + NumberOfModuleForwarderRefs uint16 `json:"number_of_module_forwarder_refs"` + // Array of zero or more IMAGE_BOUND_FORWARDER_REF follows. +} + +// ImageBoundForwardedRef represents the IMAGE_BOUND_FORWARDER_REF. +type ImageBoundForwardedRef struct { + TimeDateStamp uint32 `json:"time_date_stamp"` + OffsetModuleName uint16 `json:"offset_module_name"` + Reserved uint16 `json:"reserved"` +} + +// BoundImportDescriptorData represents the descriptor in addition to forwarded refs. +type BoundImportDescriptorData struct { + Struct ImageBoundImportDescriptor `json:"struct"` + Name string `json:"name"` + ForwardedRefs []BoundForwardedRefData `json:"forwarded_refs"` +} + +// BoundForwardedRefData represents the struct in addition to the dll name. +type BoundForwardedRefData struct { + Struct ImageBoundForwardedRef `json:"struct"` + Name string `json:"name"` +} + +// This table is an array of bound import descriptors, each of which describes +// a DLL this image was bound up with at the time of the image creation. +// The descriptors also carry the time stamps of the bindings, and if the +// bindings are up-to-date, the OS loader uses these bindings as a “shortcut” +// for API import. Otherwise, the loader ignores the bindings and resolves the +// imported APIs through the Import tables. +func (pe *File) parseBoundImportDirectory(rva, size uint32) (err error) { + var sectionsAfterOffset []uint32 + var safetyBoundary uint32 + var start = rva + + for { + bndDesc := ImageBoundImportDescriptor{} + bndDescSize := uint32(binary.Size(bndDesc)) + err = pe.structUnpack(&bndDesc, rva, bndDescSize) + // If the RVA is invalid all would blow up. Some EXEs seem to be + // specially nasty and have an invalid RVA. + if err != nil { + return err + } + + // If the structure is all zeros, we reached the end of the list. + if bndDesc == (ImageBoundImportDescriptor{}) { + break + } + + rva += bndDescSize + sectionsAfterOffset = nil + + fileOffset := pe.GetOffsetFromRva(rva) + section := pe.getSectionByRva(rva) + if section == nil { + safetyBoundary = pe.size - fileOffset + for _, section := range pe.Sections { + if section.Header.PointerToRawData > fileOffset { + sectionsAfterOffset = append( + sectionsAfterOffset, section.Header.PointerToRawData) + } + } + if len(sectionsAfterOffset) > 0 { + // Find the first section starting at a later offset than that + // specified by 'rva' + firstSectionAfterOffset := Min(sectionsAfterOffset) + section = pe.getSectionByOffset(firstSectionAfterOffset) + if section != nil { + safetyBoundary = section.Header.PointerToRawData - fileOffset + } + } + } else { + sectionLen := uint32(len(section.Data(0, 0, pe))) + safetyBoundary = (section.Header.PointerToRawData + sectionLen) - fileOffset + } + + if section == nil { + pe.logger.Warnf("RVA of IMAGE_BOUND_IMPORT_DESCRIPTOR points to an invalid address: 0x%x", rva) + return nil + } + + bndFrwdRef := ImageBoundForwardedRef{} + bndFrwdRefSize := uint32(binary.Size(bndFrwdRef)) + count := min(uint32(bndDesc.NumberOfModuleForwarderRefs), safetyBoundary/bndFrwdRefSize) + + var forwarderRefs []BoundForwardedRefData + for i := uint32(0); i < count; i++ { + err = pe.structUnpack(&bndFrwdRef, rva, bndFrwdRefSize) + if err != nil { + return err + } + + rva += bndFrwdRefSize + + offset := start + uint32(bndFrwdRef.OffsetModuleName) + DllNameBuff := string(pe.GetStringFromData(0, pe.data[offset:offset+MaxStringLength])) + DllName := string(DllNameBuff) + + // OffsetModuleName points to a DLL name. These shouldn't be too long. + // Anything longer than a safety length of 128 will be taken to indicate + // a corrupt entry and abort the processing of these entries. + // Names shorter than 4 characters will be taken as invalid as well. + if DllName != "" && (len(DllName) > 256 || !IsPrintable(DllName)) { + break + } + + forwarderRefs = append(forwarderRefs, BoundForwardedRefData{ + Struct: bndFrwdRef, Name: DllName}) + } + + offset := start + uint32(bndDesc.OffsetModuleName) + DllNameBuff := pe.GetStringFromData(0, pe.data[offset:offset+MaxStringLength]) + DllName := string(DllNameBuff) + if DllName != "" && (len(DllName) > 256 || !IsPrintable(DllName)) { + break + } + + pe.BoundImports = append(pe.BoundImports, BoundImportDescriptorData{ + Struct: bndDesc, + Name: DllName, + ForwardedRefs: forwarderRefs}) + } + + if len(pe.BoundImports) > 0 { + pe.HasBoundImp = true + } + return nil +} diff --git a/vendor/github.com/saferwall/pe/debug.go b/vendor/github.com/saferwall/pe/debug.go new file mode 100644 index 00000000..73458060 --- /dev/null +++ b/vendor/github.com/saferwall/pe/debug.go @@ -0,0 +1,772 @@ +// Copyright 2018 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +import ( + "encoding/binary" + "errors" + "fmt" +) + +// The following values are defined for the Type field of the debug directory entry: +const ( + // An unknown value that is ignored by all tools. + ImageDebugTypeUnknown = 0 + + // The COFF debug information (line numbers, symbol table, and string table). + // This type of debug information is also pointed to by fields in the file headers. + ImageDebugTypeCOFF = 1 + + // The Visual C++ debug information. + ImageDebugTypeCodeView = 2 + + // The frame pointer omission (FPO) information. This information tells the + // debugger how to interpret nonstandard stack frames, which use the EBP + // register for a purpose other than as a frame pointer. + ImageDebugTypeFPO = 3 + + // The location of DBG file. + ImageDebugTypeMisc = 4 + + // A copy of .pdata section. + ImageDebugTypeException = 5 + + // Reserved. + ImageDebugTypeFixup = 6 + + // The mapping from an RVA in image to an RVA in source image. + ImageDebugTypeOMAPToSrc = 7 + + // The mapping from an RVA in source image to an RVA in image. + ImageDebugTypeOMAPFromSrc = 8 + + // Reserved for Borland. + ImageDebugTypeBorland = 9 + + // Reserved. + ImageDebugTypeReserved = 10 + + // Reserved. + ImageDebugTypeCLSID = 11 + + // Visual C++ features (/GS counts /sdl counts and guardN counts). + ImageDebugTypeVCFeature = 12 + + // Pogo aka PGO aka Profile Guided Optimization. + ImageDebugTypePOGO = 13 + + // Incremental Link Time Code Generation (iLTCG). + ImageDebugTypeILTCG = 14 + + // Intel MPX. + ImageDebugTypeMPX = 15 + + // PE determinism or reproducibility. + ImageDebugTypeRepro = 16 + + // Extended DLL characteristics bits. + ImageDebugTypeExDllCharacteristics = 20 +) + +const ( + // CVSignatureRSDS represents the CodeView signature 'SDSR'. + CVSignatureRSDS = 0x53445352 + + // CVSignatureNB10 represents the CodeView signature 'NB10'. + CVSignatureNB10 = 0x3031424e +) + +const ( + // FrameFPO indicates a frame of type FPO. + FrameFPO = 0x0 + + // FrameTrap indicates a frame of type Trap. + FrameTrap = 0x1 + + // FrameTSS indicates a frame of type TSS. + FrameTSS = 0x2 + + // FrameNonFPO indicates a frame of type Non-FPO. + FrameNonFPO = 0x3 +) + +// DllCharacteristicsExType represents a DLL Characteristics type. +type DllCharacteristicsExType uint32 + +const ( + // ImageDllCharacteristicsExCETCompat indicates that the image is CET + // compatible. + ImageDllCharacteristicsExCETCompat = 0x0001 +) + +const ( + // POGOTypePGU represents a signature for an undocumented PGO sub type. + POGOTypePGU = 0x50475500 + // POGOTypePGI represents a signature for an undocumented PGO sub type. + POGOTypePGI = 0x50474900 + // POGOTypePGO represents a signature for an undocumented PGO sub type. + POGOTypePGO = 0x50474F00 + // POGOTypeLTCG represents a signature for an undocumented PGO sub type. + POGOTypeLTCG = 0x4c544347 +) + +// ImageDebugDirectoryType represents the type of a debug directory. +type ImageDebugDirectoryType uint32 + +// ImageDebugDirectory represents the IMAGE_DEBUG_DIRECTORY structure. +// This directory indicates what form of debug information is present +// and where it is. This directory consists of an array of debug directory +// entries whose location and size are indicated in the image optional header. +type ImageDebugDirectory struct { + // Reserved, must be 0. + Characteristics uint32 `json:"characteristics"` + + // The time and date that the debug data was created. + TimeDateStamp uint32 `json:"time_date_stamp"` + + // The major version number of the debug data format. + MajorVersion uint16 `json:"major_version"` + + // The minor version number of the debug data format. + MinorVersion uint16 `json:"minor_version"` + + // The format of debugging information. This field enables support of + // multiple debuggers. + Type ImageDebugDirectoryType `json:"type"` + + // The size of the debug data (not including the debug directory itself). + SizeOfData uint32 `json:"size_of_data"` + + //The address of the debug data when loaded, relative to the image base. + AddressOfRawData uint32 `json:"address_of_raw_data"` + + // The file pointer to the debug data. + PointerToRawData uint32 `json:"pointer_to_raw_data"` +} + +// DebugEntry wraps ImageDebugDirectory to include debug directory type. +type DebugEntry struct { + // Points to the image debug entry structure. + Struct ImageDebugDirectory `json:"struct"` + + // Holds specific information about the debug type entry. + Info interface{} `json:"info"` + + // Type of the debug entry. + Type string `json:"type"` +} + +// GUID is a 128-bit value consisting of one group of 8 hexadecimal digits, +// followed by three groups of 4 hexadecimal digits each, followed by one +// group of 12 hexadecimal digits. +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} + +// CVSignature represents a CodeView signature. +type CVSignature uint32 + +// CVInfoPDB70 represents the the CodeView data block of a PDB 7.0 file. +type CVInfoPDB70 struct { + // CodeView signature, equal to `RSDS`. + CVSignature CVSignature `json:"cv_signature"` + + // A unique identifier, which changes with every rebuild of the executable and PDB file. + Signature GUID `json:"signature"` + + // Ever-incrementing value, which is initially set to 1 and incremented every + // time when a part of the PDB file is updated without rewriting the whole file. + Age uint32 `json:"age"` + + // Null-terminated name of the PDB file. It can also contain full or partial + // path to the file. + PDBFileName string `json:"pdb_file_name"` +} + +// CVHeader represents the the CodeView header struct to the PDB 2.0 file. +type CVHeader struct { + // CodeView signature, equal to `NB10`. + Signature CVSignature `json:"signature"` + + // CodeView offset. Set to 0, because debug information is stored in a + // separate file. + Offset uint32 `json:"offset"` +} + +// CVInfoPDB20 represents the the CodeView data block of a PDB 2.0 file. +type CVInfoPDB20 struct { + // Points to the CodeView header structure. + CVHeader CVHeader `json:"cv_header"` + + // The time when debug information was created (in seconds since 01.01.1970). + Signature uint32 `json:"signature"` + + // Ever-incrementing value, which is initially set to 1 and incremented every + // time when a part of the PDB file is updated without rewriting the whole file. + Age uint32 `json:"age"` + + // Null-terminated name of the PDB file. It can also contain full or partial + // path to the file. + PDBFileName string `json:"pdb_file_name"` +} + +// FPOFrameType represents the type of a FPO frame. +type FPOFrameType uint8 + +// FPOData represents the stack frame layout for a function on an x86 computer when +// frame pointer omission (FPO) optimization is used. The structure is used to locate +// the base of the call frame. +type FPOData struct { + // The offset of the first byte of the function code. + OffsetStart uint32 `json:"offset_start"` + + // The number of bytes in the function. + ProcSize uint32 `json:"proc_size"` + + // The number of local variables. + NumLocals uint32 `json:"num_locals"` + + // The size of the parameters, in DWORDs. + ParamsSize uint16 `json:"params_size"` + + // The number of bytes in the function prolog code. + PrologLength uint8 `json:"prolog_length"` + + // The number of registers saved. + SavedRegsCount uint8 `json:"saved_regs_count"` + + // A variable that indicates whether the function uses structured exception handling. + HasSEH uint8 `json:"has_seh"` + + // A variable that indicates whether the EBP register has been allocated. + UseBP uint8 `json:"use_bp"` + + // Reserved for future use. + Reserved uint8 `json:"reserved"` + + // A variable that indicates the frame type. + FrameType FPOFrameType `json:"frame_type"` +} + +// ImagePGOItem represents the _IMAGE_POGO_INFO structure. +type ImagePGOItem struct { + RVA uint32 `json:"rva"` + Size uint32 `json:"size"` + Name string `json:"name"` +} + +// POGOType represents a POGO type. +type POGOType uint32 + +// POGO structure contains information related to the Profile Guided Optimization. +// PGO is an approach to optimization where the compiler uses profile information +// to make better optimization decisions for the program. +type POGO struct { + // Signature represents the PGO sub type. + Signature POGOType `json:"signature"` + Entries []ImagePGOItem `json:"entries"` +} + +type VCFeature struct { + PreVC11 uint32 `json:"pre_vc11"` + CCpp uint32 `json:"C/C++"` + Gs uint32 `json:"/GS"` + Sdl uint32 `json:"/sdl"` + GuardN uint32 `json:"guardN"` +} + +type REPRO struct { + Size uint32 `json:"size"` + Hash []byte `json:"hash"` +} + +// ImageDebugMisc represents the IMAGE_DEBUG_MISC structure. +type ImageDebugMisc struct { + // The type of data carried in the `Data` field. + DataType uint32 `json:"data_type"` + + // The length of this structure in bytes, including the entire Data field + // and its NUL terminator (rounded to four byte multiple.) + Length uint32 `json:"length"` + + // The encoding of the Data field. True if data is unicode string. + Unicode bool `json:"unicode"` + + // Reserved. + Reserved [3]byte `json:"reserved"` + + // Actual data. + Data string `json:"data"` +} + +// Image files contain an optional debug directory that indicates what form of +// debug information is present and where it is. This directory consists of an +// array of debug directory entries whose location and size are indicated in the +// image optional header. The debug directory can be in a discardable .debug +// section (if one exists), or it can be included in any other section in the +// image file, or not be in a section at all. +func (pe *File) parseDebugDirectory(rva, size uint32) error { + + debugEntry := DebugEntry{} + debugDir := ImageDebugDirectory{} + errorMsg := fmt.Sprintf("Invalid debug information. Can't read data at RVA: 0x%x", rva) + debugDirSize := uint32(binary.Size(debugDir)) + debugDirsCount := size / debugDirSize + + for i := uint32(0); i < debugDirsCount; i++ { + offset := pe.GetOffsetFromRva(rva + debugDirSize*i) + err := pe.structUnpack(&debugDir, offset, debugDirSize) + if err != nil { + return errors.New(errorMsg) + } + + switch debugDir.Type { + case ImageDebugTypeCodeView: + debugSignature, err := pe.ReadUint32(debugDir.PointerToRawData) + if err != nil { + continue + } + + if debugSignature == CVSignatureRSDS { + // PDB 7.0 + pdb := CVInfoPDB70{CVSignature: CVSignatureRSDS} + + // Extract the GUID. + offset := debugDir.PointerToRawData + 4 + guidSize := uint32(binary.Size(pdb.Signature)) + err = pe.structUnpack(&pdb.Signature, offset, guidSize) + if err != nil { + continue + } + + // Extract the age. + offset += guidSize + pdb.Age, err = pe.ReadUint32(offset) + if err != nil { + continue + } + offset += 4 + + // PDB file name. + pdbFilenameSize := debugDir.SizeOfData - 24 - 1 + + // pdbFileName_size can be negative here, as seen in the malware + // sample with MD5 hash: 7c297600870d026c014d42596bb9b5fd + // Checking for positive size here to ensure proper parsing. + if pdbFilenameSize > 0 { + pdbFilename := make([]byte, pdbFilenameSize) + err = pe.structUnpack(&pdbFilename, offset, pdbFilenameSize) + if err != nil { + continue + } + pdb.PDBFileName = string(pdbFilename) + } + + // Include these extra information. + debugEntry.Info = pdb + + } else if debugSignature == CVSignatureNB10 { + // PDB 2.0. + cvHeader := CVHeader{} + offset := debugDir.PointerToRawData + err = pe.structUnpack(&cvHeader, offset, size) + if err != nil { + continue + } + + pdb := CVInfoPDB20{CVHeader: cvHeader} + + // Extract the signature. + pdb.Signature, err = pe.ReadUint32(offset + 8) + if err != nil { + continue + } + + // Extract the age. + pdb.Age, err = pe.ReadUint32(offset + 12) + if err != nil { + continue + } + offset += 16 + + pdbFilenameSize := debugDir.SizeOfData - 16 - 1 + if pdbFilenameSize > 0 { + pdbFilename := make([]byte, pdbFilenameSize) + err = pe.structUnpack(&pdbFilename, offset, pdbFilenameSize) + if err != nil { + continue + } + pdb.PDBFileName = string(pdbFilename) + } + + // Include these extra information. + debugEntry.Info = pdb + } + case ImageDebugTypePOGO: + pogoSignature, err := pe.ReadUint32(debugDir.PointerToRawData) + if err != nil { + continue + } + + pogo := POGO{} + + switch pogoSignature { + case 0x0, POGOTypePGU, POGOTypePGI, POGOTypePGO, POGOTypeLTCG: + // TODO: Some files like 00da1a2a9d9ebf447508bf6550f05f466f8eabb4ed6c4f2a524c0769b2d75bc1 + // have a POGO signature of 0x0. To be reverse engineered. + pogo.Signature = POGOType(pogoSignature) + offset = debugDir.PointerToRawData + 4 + c := uint32(0) + for c < debugDir.SizeOfData-4 { + + pogoEntry := ImagePGOItem{} + pogoEntry.RVA, err = pe.ReadUint32(offset) + if err != nil { + break + } + offset += 4 + + pogoEntry.Size, err = pe.ReadUint32(offset) + if err != nil { + break + } + offset += 4 + + pogoEntry.Name = string(pe.GetStringFromData(0, pe.data[offset:offset+64])) + + pogo.Entries = append(pogo.Entries, pogoEntry) + offset += uint32(len(pogoEntry.Name)) + + // Make sure offset is aligned to 4 bytes. + padding := 4 - (offset % 4) + c += 4 + 4 + uint32(len(pogoEntry.Name)) + padding + offset += padding + } + + debugEntry.Info = pogo + } + case ImageDebugTypeVCFeature: + vcf := VCFeature{} + size := uint32(binary.Size(vcf)) + err := pe.structUnpack(&vcf, debugDir.PointerToRawData, size) + if err != nil { + continue + } + debugEntry.Info = vcf + case ImageDebugTypeRepro: + repro := REPRO{} + offset := debugDir.PointerToRawData + + // Extract the size. + repro.Size, err = pe.ReadUint32(offset) + if err != nil { + continue + } + + // Extract the hash. + repro.Hash, err = pe.ReadBytesAtOffset(offset+4, repro.Size) + if err != nil { + continue + } + debugEntry.Info = repro + case ImageDebugTypeFPO: + offset := debugDir.PointerToRawData + size := uint32(16) + fpoEntries := []FPOData{} + c := uint32(0) + for c < debugDir.SizeOfData { + fpo := FPOData{} + fpo.OffsetStart, err = pe.ReadUint32(offset) + if err != nil { + break + } + + fpo.ProcSize, err = pe.ReadUint32(offset + 4) + if err != nil { + break + } + + fpo.NumLocals, err = pe.ReadUint32(offset + 8) + if err != nil { + break + } + + fpo.ParamsSize, err = pe.ReadUint16(offset + 12) + if err != nil { + break + } + + fpo.PrologLength, err = pe.ReadUint8(offset + 14) + if err != nil { + break + } + + attributes, err := pe.ReadUint16(offset + 15) + if err != nil { + break + } + + // + // UChar cbRegs :3; /* # regs saved */ + // UChar fHasSEH:1; /* Structured Exception Handling */ + // UChar fUseBP :1; /* EBP has been used */ + // UChar reserved:1; + // UChar cbFrame:2; /* frame type */ + // + + // The lowest 3 bits + fpo.SavedRegsCount = uint8(attributes & 0x7) + + // The next bit. + fpo.HasSEH = uint8(attributes & 0x8 >> 3) + + // The next bit. + fpo.UseBP = uint8(attributes & 0x10 >> 4) + + // The next bit. + fpo.Reserved = uint8(attributes & 0x20 >> 5) + + // The next 2 bits. + fpo.FrameType = FPOFrameType(attributes & 0xC0 >> 6) + + fpoEntries = append(fpoEntries, fpo) + c += size + offset += 16 + } + debugEntry.Info = fpoEntries + case ImageDebugTypeExDllCharacteristics: + exDllChar, err := pe.ReadUint32(debugDir.PointerToRawData) + if err != nil { + continue + } + + debugEntry.Info = DllCharacteristicsExType(exDllChar) + } + + debugEntry.Struct = debugDir + debugEntry.Type = debugDir.Type.String() + pe.Debugs = append(pe.Debugs, debugEntry) + } + + if len(pe.Debugs) > 0 { + pe.HasDebug = true + } + + return nil +} + +// SectionAttributeDescription maps a section attribute to a friendly name. +func SectionAttributeDescription(section string) string { + sectionNameMap := map[string]string{ + ".00cfg": "CFG Check Functions Pointers", + ".bss$00": "Uninit.data in phaseN of Pri7", + ".bss$dk00": "PGI: Uninit.data may be not const", + ".bss$dk01": "PGI: Uninit.data may be not const", + ".bss$pr00": "PGI: Uninit.data only for read", + ".bss$pr03": "PGI: Uninit.data only for read", + ".bss$zz": "PGO: Dead uninit.data", + ".CRT$XCA": "First C++ Initializer", + ".CRT$XCZ": "Last C++ Initializer", + ".xdata$x": "EH data", + ".gfids$y": "CFG Functions table", + ".CRT$XCAA": "Startup C++ Initializer", + ".CRT$XCC": "Global initializer: init_seg(compiler)", + ".CRT$XCL": "Global initializer: init_seg(lib)", + ".CRT$XCU": "Global initializer: init_seg(user)", + ".CRT$XDA": "First Dynamic TLS Initializer", + ".CRT$XDZ": "Last Dynamic TLS Initializer", + ".CRT$XIA": "First C Initializer", + ".CRT$XIAA": "Startup C Initializer", + ".CRT$XIAB": "PGO C Initializer", + ".CRT$XIAC": "Post-PGO C Initializer", + ".CRT$XIC": "CRT C Initializers", + ".CRT$XIYA": "VCCorLib Threading Model Initializer", + ".CRT$XIYAA": "XAML Designer Threading Model Override Initializer", + ".CRT$XIYB": "VCCorLib Main Initializer", + ".CRT$XIZ": "Last C Initializer", + ".CRT$XLA": "First Loader TLS Callback", + ".CRT$XLC": "CRT TLS Constructor", + ".CRT$XLD": "CRT TLS Terminator", + ".CRT$XLZ": "Last Loader TLS Callback", + ".CRT$XPA": "First Pre-Terminator", + ".CRT$XPB": "CRT ConcRT Pre-Terminator", + ".CRT$XPX": "CRT Pre-Terminators", + ".CRT$XPXA": "CRT stdio Pre-Terminator", + ".CRT$XPZ": "Last Pre-Terminator", + ".CRT$XTA": "First Terminator", + ".CRT$XTZ": "Last Terminator", + ".CRTMA$XCA": "First Managed C++ Initializer", + ".CRTMA$XCZ": "Last Managed C++ Initializer", + ".CRTVT$XCA": "First Managed VTable Initializer", + ".CRTVT$XCZ": "Last Managed VTable Initializer", + ".data$00": "Init.data in phaseN of Pri7", + ".data$dk00": "PGI: Init.data may be not const", + ".data$dk00$brc": "PGI: Init.data may be not const", + ".data$pr00": "PGI: Init.data only for read", + ".data$r": "RTTI Type Descriptors", + ".data$zz": "PGO: Dead init.data", + ".data$zz$brc": "PGO: Dead init.data", + ".didat$2": "Delay Import Descriptors", + ".didat$3": "Delay Import Final NULL Entry", + ".didat$4": "Delay Import INT", + ".didat$5": "Delay Import IAT", + ".didat$6": "Delay Import Symbol Names", + ".didat$7": "Delay Import Bound IAT", + ".edata": "Export Table", + ".gehcont": "CFG EHCont Table", + ".gfids": "CFG Functions Table", + ".giats": "CFG IAT Table", + ".idata$2": "Import Descriptors", + ".idata$3": "Import Final NULL Entry", + ".idata$4": "Import Names Table", + ".idata$5": "Import Addresses Table", + ".idata$6": "Import Symbol and DLL Names", + ".pdata": "Procedure data", + ".rdata$00": "Readonly data in phaseN of Pri7", + ".rdata$00$brc": "Readonly data in phaseN of Pri7", + ".rdata$09": "Readonly data in phaseN of Pri7", + ".rdata$brc": "BaseRelocation Clustering", + ".rdata$r": "RTTI Data", + ".rdata$sxdata": "Safe SEH", + ".rdata$T": "TLS Header", + ".rdata$zETW0": "ETW Metadata Header", + ".rdata$zETW1": "ETW Events Metadata", + ".rdata$zETW2": "ETW Providers Metadata", + ".rdata$zETW9": "ETW Metadata Footer", + ".rdata$zz": "PGO: Dead Readonly Data", + ".rdata$zz$brc": "PGO: Dead Readonly Data", + ".rdata$zzzdbg": "Debug directory data", + ".rsrc$01": "Resources Header", + ".rsrc$02": "Resources Data", + ".rtc$IAA": "First RTC Initializer", + ".rtc$IZZ": "Last RTC Initializer", + ".rtc$TAA": "First RTC Terminator", + ".rtc$TZZ": "Last RTC Terminator", + ".text$di": "MSVC Dynamic Initializers", + ".text$lp00kernel32.dll!20_pri7": "PGO: LoaderPhaseN warm-to-hot code", + ".text$lp01kernel32.dll!20_pri7": "PGO: LoaderPhaseN warm-to-hot code", + ".text$lp03kernel32.dll!30_clientonly": "PGO: LoaderPhaseN warm-to-hot code", + ".text$lp04kernel32.dll!30_clientonly": "PGO: LoaderPhaseN warm-to-hot code", + ".text$lp08kernel32.dll!40_serveronly": "PGO: LoaderPhaseN warm-to-hot code", + ".text$lp09kernel32.dll!40_serveronly": "PGO: LoaderPhaseN warm-to-hot code", + ".text$lp10kernel32.dll!40_serveronly": "PGO: LoaderPhaseN warm-to-hot code", + ".text$mn": "Contains EP", + ".text$mn$00": "CFG Dispatching", + ".text$np": "PGO: __asm or disabled via pragma", + ".text$x": "EH Filters", + ".text$yd": "MSVC Destructors", + ".text$zy": "PGO: Dead Code Blocks", + ".text$zz": "PGO: Dead Whole Functions", + ".xdata": "Unwind data", + } + + if val, ok := sectionNameMap[section]; ok { + return val + } + + return "" +} + +// String returns a string interpretation of the FPO frame type. +func (ft FPOFrameType) String() string { + frameTypeMap := map[FPOFrameType]string{ + FrameFPO: "FPO", + FrameTrap: "Trap", + FrameTSS: "TSS", + FrameNonFPO: "Non FPO", + } + + v, ok := frameTypeMap[ft] + if ok { + return v + } + + return "?" +} + +// String returns the string representation of a GUID. +func (g GUID) String() string { + return fmt.Sprintf("{%06X-%04X-%04X-%04X-%X}", g.Data1, g.Data2, g.Data3, g.Data4[0:2], g.Data4[2:]) +} + +// String returns the string representation of a debug entry type. +func (t ImageDebugDirectoryType) String() string { + + debugTypeMap := map[ImageDebugDirectoryType]string{ + ImageDebugTypeUnknown: "Unknown", + ImageDebugTypeCOFF: "COFF", + ImageDebugTypeCodeView: "CodeView", + ImageDebugTypeFPO: "FPO", + ImageDebugTypeMisc: "Misc", + ImageDebugTypeException: "Exception", + ImageDebugTypeFixup: "Fixup", + ImageDebugTypeOMAPToSrc: "OMAP To Src", + ImageDebugTypeOMAPFromSrc: "OMAP From Src", + ImageDebugTypeBorland: "Borland", + ImageDebugTypeReserved: "Reserved", + ImageDebugTypeVCFeature: "VC Feature", + ImageDebugTypePOGO: "POGO", + ImageDebugTypeILTCG: "iLTCG", + ImageDebugTypeMPX: "MPX", + ImageDebugTypeRepro: "REPRO", + ImageDebugTypeExDllCharacteristics: "Ex.DLL Characteristics", + } + + v, ok := debugTypeMap[t] + if ok { + return v + } + + return "?" +} + +// String returns a string interpretation of a POGO type. +func (p POGOType) String() string { + pogoTypeMap := map[POGOType]string{ + POGOTypePGU: "PGU", + POGOTypePGI: "PGI", + POGOTypePGO: "PGO", + POGOTypeLTCG: "LTCG", + } + + v, ok := pogoTypeMap[p] + if ok { + return v + } + + return "?" +} + +// String returns a string interpretation of a CodeView signature. +func (s CVSignature) String() string { + cvSignatureMap := map[CVSignature]string{ + CVSignatureRSDS: "RSDS", + CVSignatureNB10: "NB10", + } + + v, ok := cvSignatureMap[s] + if ok { + return v + } + + return "?" +} + +// String returns a string interpretation of Dll Characteristics Ex. +func (flag DllCharacteristicsExType) String() string { + dllCharacteristicsExTypeMap := map[DllCharacteristicsExType]string{ + ImageDllCharacteristicsExCETCompat: "CET Compatible", + } + + v, ok := dllCharacteristicsExTypeMap[flag] + if ok { + return v + } + + return "?" +} diff --git a/vendor/github.com/saferwall/pe/delayimports.go b/vendor/github.com/saferwall/pe/delayimports.go new file mode 100644 index 00000000..364fb75e --- /dev/null +++ b/vendor/github.com/saferwall/pe/delayimports.go @@ -0,0 +1,155 @@ +// Copyright 2018 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +import ( + "encoding/binary" +) + +// ImageDelayImportDescriptor represents the _IMAGE_DELAYLOAD_DESCRIPTOR structure. +type ImageDelayImportDescriptor struct { + // As yet, no attribute flags are defined. The linker sets this field to zero + // in the image. This field can be used to extend the record by indicating + // the presence of new fields, or it can be used to indicate behaviors to + // the delay or unload helper functions. + Attributes uint32 + + // The name of the DLL to be delay-loaded resides in the read-only data + // section of the image. It is referenced through the szName field. + Name uint32 + + // The handle of the DLL to be delay-loaded is in the data section of the + // image. The phmod field points to the handle. The supplied delay-load + // helper uses this location to store the handle to the loaded DLL. + ModuleHandleRVA uint32 + + // The delay import address table (IAT) is referenced by the delay import + // descriptor through the pIAT field. The delay-load helper updates these + // pointers with the real entry points so that the thunks are no longer in + // the calling loop + ImportAddressTableRVA uint32 + + // The delay import name table (INT) contains the names of the imports that + // might require loading. They are ordered in the same fashion as the + // function pointers in the IAT. + ImportNameTableRVA uint32 + + // The delay bound import address table (BIAT) is an optional table of + // IMAGE_THUNK_DATA items that is used along with the timestamp field + // of the delay-load directory table by a post-process binding phase. + BoundImportAddressTableRVA uint32 + + // The delay unload import address table (UIAT) is an optional table of + // IMAGE_THUNK_DATA items that the unload code uses to handle an explicit + // unload request. It consists of initialized data in the read-only section + // that is an exact copy of the original IAT that referred the code to the + // delay-load thunks. On the unload request, the library can be freed, + // the *phmod cleared, and the UIAT written over the IAT to restore + // everything to its preload state. + UnloadInformationTableRVA uint32 + + // 0 if not bound, otherwise, date/time of the target DLL. + TimeDateStamp uint32 +} + +// DelayImport represents an entry in the delay import table. +type DelayImport struct { + Offset uint32 + Name string + Functions []ImportFunction + Descriptor ImageDelayImportDescriptor +} + +// Delay-Load Import Tables tables were added to the image to support a uniform +// mechanism for applications to delay the loading of a DLL until the first call +// into that DLL. The delay-load directory table is the counterpart to the +// import directory table. +func (pe *File) parseDelayImportDirectory(rva, size uint32) error { + for { + importDelayDesc := ImageDelayImportDescriptor{} + fileOffset := pe.GetOffsetFromRva(rva) + importDescSize := uint32(binary.Size(importDelayDesc)) + err := pe.structUnpack(&importDelayDesc, fileOffset, importDescSize) + + // If the RVA is invalid all would blow up. Some EXEs seem to be + // specially nasty and have an invalid RVA. + if err != nil { + return err + } + + // If the structure is all zeros, we reached the end of the list. + if importDelayDesc == (ImageDelayImportDescriptor{}) { + break + } + + rva += importDescSize + + // If the array of thunks is somewhere earlier than the import + // descriptor we can set a maximum length for the array. Otherwise + // just set a maximum length of the size of the file + maxLen := uint32(len(pe.data)) - fileOffset + if rva > importDelayDesc.ImportNameTableRVA || + rva > importDelayDesc.ImportAddressTableRVA { + if rva < importDelayDesc.ImportNameTableRVA { + maxLen = rva - importDelayDesc.ImportAddressTableRVA + } else if rva < importDelayDesc.ImportAddressTableRVA { + maxLen = rva - importDelayDesc.ImportNameTableRVA + } else { + maxLen = Max(rva-importDelayDesc.ImportNameTableRVA, + rva-importDelayDesc.ImportAddressTableRVA) + } + } + + var importedFunctions []ImportFunction + if pe.Is64 { + importedFunctions, err = pe.parseImports64(&importDelayDesc, maxLen) + } else { + importedFunctions, err = pe.parseImports32(&importDelayDesc, maxLen) + } + if err != nil { + return err + } + + nameRVA := uint32(0) + if importDelayDesc.Attributes == 0 { + nameRVA = importDelayDesc.Name - + pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).ImageBase + } else { + nameRVA = importDelayDesc.Name + } + dllName := pe.getStringAtRVA(nameRVA, maxLen) + if !IsValidDosFilename(dllName) { + dllName = "*invalid*" + continue + } + + pe.DelayImports = append(pe.DelayImports, DelayImport{ + Offset: fileOffset, + Name: string(dllName), + Functions: importedFunctions, + Descriptor: importDelayDesc, + }) + } + + if len(pe.DelayImports) > 0 { + pe.HasDelayImp = true + } + + return nil +} + +// GetDelayImportEntryInfoByRVA return an import function + index of the entry given +// an RVA. +func (pe *File) GetDelayImportEntryInfoByRVA(rva uint32) (DelayImport, int) { + for _, imp := range pe.DelayImports { + for i, entry := range imp.Functions { + if entry.ThunkRVA == rva { + return imp, i + } + } + } + + return DelayImport{}, 0 +} diff --git a/vendor/github.com/saferwall/pe/dosheader.go b/vendor/github.com/saferwall/pe/dosheader.go new file mode 100644 index 00000000..60717b93 --- /dev/null +++ b/vendor/github.com/saferwall/pe/dosheader.go @@ -0,0 +1,108 @@ +// Copyright 2018 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +import ( + "encoding/binary" +) + +// ImageDOSHeader represents the DOS stub of a PE. +type ImageDOSHeader struct { + // Magic number. + Magic uint16 `json:"magic"` + + // Bytes on last page of file. + BytesOnLastPageOfFile uint16 `json:"bytes_on_last_page_of_file"` + + // Pages in file. + PagesInFile uint16 `json:"pages_in_file"` + + // Relocations. + Relocations uint16 `json:"relocations"` + + // Size of header in paragraphs. + SizeOfHeader uint16 `json:"size_of_header"` + + // Minimum extra paragraphs needed. + MinExtraParagraphsNeeded uint16 `json:"min_extra_paragraphs_needed"` + + // Maximum extra paragraphs needed. + MaxExtraParagraphsNeeded uint16 `json:"max_extra_paragraphs_needed"` + + // Initial (relative) SS value. + InitialSS uint16 `json:"initial_ss"` + + // Initial SP value. + InitialSP uint16 `json:"initial_sp"` + + // Checksum. + Checksum uint16 `json:"checksum"` + + // Initial IP value. + InitialIP uint16 `json:"initial_ip"` + + // Initial (relative) CS value. + InitialCS uint16 `json:"initial_cs"` + + // File address of relocation table. + AddressOfRelocationTable uint16 `json:"address_of_relocation_table"` + + // Overlay number. + OverlayNumber uint16 `json:"overlay_number"` + + // Reserved words. + ReservedWords1 [4]uint16 `json:"reserved_words_1"` + + // OEM identifier. + OEMIdentifier uint16 `json:"oem_identifier"` + + // OEM information. + OEMInformation uint16 `json:"oem_information"` + + // Reserved words. + ReservedWords2 [10]uint16 `json:"reserved_words_2"` + + // File address of new exe header (Elfanew). + AddressOfNewEXEHeader uint32 `json:"address_of__new_exe_header"` +} + +// ParseDOSHeader parses the DOS header stub. Every PE file begins with a small +// MS-DOS stub. The need for this arose in the early days of Windows, before a +// significant number of consumers were running it. When executed on a machine +// without Windows, the program could at least print out a message saying that +// Windows was required to run the executable. +func (pe *File) ParseDOSHeader() (err error) { + offset := uint32(0) + size := uint32(binary.Size(pe.DOSHeader)) + err = pe.structUnpack(&pe.DOSHeader, offset, size) + if err != nil { + return err + } + + // It can be ZM on an (non-PE) EXE. + // These executables still work under XP via ntvdm. + if pe.DOSHeader.Magic != ImageDOSSignature && + pe.DOSHeader.Magic != ImageDOSZMSignature { + return ErrDOSMagicNotFound + } + + // `e_lfanew` is the only required element (besides the signature) of the + // DOS header to turn the EXE into a PE. It is is a relative offset to the + // NT Headers. It can't be null (signatures would overlap). + // Can be 4 at minimum. + if pe.DOSHeader.AddressOfNewEXEHeader < 4 || + pe.DOSHeader.AddressOfNewEXEHeader > pe.size { + return ErrInvalidElfanewValue + } + + // tiny pe has a e_lfanew of 4, which means the NT Headers is overlapping + // the DOS Header. + if pe.DOSHeader.AddressOfNewEXEHeader <= 0x3c { + pe.Anomalies = append(pe.Anomalies, AnoPEHeaderOverlapDOSHeader) + } + + pe.HasDOSHdr = true + return nil +} diff --git a/vendor/github.com/saferwall/pe/dotnet.go b/vendor/github.com/saferwall/pe/dotnet.go new file mode 100644 index 00000000..5b5022d1 --- /dev/null +++ b/vendor/github.com/saferwall/pe/dotnet.go @@ -0,0 +1,773 @@ +// Copyright 2018 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +import ( + "encoding/binary" +) + +// References +// https://www.ntcore.com/files/dotnetformat.htm + +// COMImageFlagsType represents a COM+ header entry point flag type. +type COMImageFlagsType uint32 + +// COM+ Header entry point flags. +const ( + // The image file contains IL code only, with no embedded native unmanaged + // code except the start-up stub (which simply executes an indirect jump to + // the CLR entry point). + COMImageFlagsILOnly = 0x00000001 + + // The image file can be loaded only into a 32-bit process. + COMImageFlags32BitRequired = 0x00000002 + + // This flag is obsolete and should not be set. Setting it—as the IL + // assembler allows, using the .corflags directive—will render your module + // un-loadable. + COMImageFlagILLibrary = 0x00000004 + + // The image file is protected with a strong name signature. + COMImageFlagsStrongNameSigned = 0x00000008 + + // The executable’s entry point is an unmanaged method. The EntryPointToken/ + // EntryPointRVA field of the CLR header contains the RVA of this native + // method. This flag was introduced in version 2.0 of the CLR. + COMImageFlagsNativeEntrypoint = 0x00000010 + + // The CLR loader and the JIT compiler are required to track debug + // information about the methods. This flag is not used. + COMImageFlagsTrackDebugData = 0x00010000 + + // The image file can be loaded into any process, but preferably into a + // 32-bit process. This flag can be only set together with flag + // COMIMAGE_FLAGS_32BITREQUIRED. When set, these two flags mean the image + // is platformneutral, but prefers to be loaded as 32-bit when possible. + // This flag was introduced in CLR v4.0 + COMImageFlags32BitPreferred = 0x00020000 +) + +// V-table constants. +const ( + // V-table slots are 32-bits in size. + CORVTable32Bit = 0x01 + + // V-table slots are 64-bits in size. + CORVTable64Bit = 0x02 + + // The thunk created by the common language runtime must provide data + // marshaling between managed and unmanaged code. + CORVTableFromUnmanaged = 0x04 + + // The thunk created by the common language runtime must provide data + // marshaling between managed and unmanaged code. Current appdomain should + // be selected to dispatch the call. + CORVTableFromUnmanagedRetainAppDomain = 0x08 + + // Call most derived method described by + CORVTableCallMostDerived = 0x10 +) + +// Metadata Tables constants. +const ( + // The current module descriptor. + Module = 0 + // Class reference descriptors. + TypeRef = 1 + // Class or interface definition descriptors. + TypeDef = 2 + // A class-to-fields lookup table, which does not exist in optimized + // metadata (#~ stream). + FieldPtr = 3 + // Field definition descriptors. + Field = 4 + // A class-to-methods lookup table, which does not exist in + // optimized metadata (#~ stream). + MethodPtr = 5 + // Method definition descriptors. + Method = 6 + // A method-to-parameters lookup table, which does not exist in optimized + // metadata (#~ stream). + ParamPtr = 7 + // Parameter definition descriptors. + Param = 8 + // Interface implementation descriptors. + InterfaceImpl = 9 + // Member (field or method) reference descriptors. + MemberRef = 10 + // Constant value descriptors that map the default values stored in the + // #Blob stream to respective fields, parameters, and properties. + Constant = 11 + // Custom attribute descriptors. + CustomAttribute = 12 + // Field or parameter marshaling descriptors for managed/unmanaged + // inter-operations. + FieldMarshal = 13 + // Security descriptors. + DeclSecurity = 14 + // Class layout descriptors that hold information about how the loader + // should lay out respective classes. + ClassLayout = 15 + // Field layout descriptors that specify the offset or ordinal of + // individual fields. + FieldLayout = 16 + // Stand-alone signature descriptors. Signatures per se are used in two + // capacities: as composite signatures of local variables of methods and as + // parameters of the call indirect (calli) IL instruction. + StandAloneSig = 17 + // A class-to-events mapping table. This is not an intermediate lookup + // table, and it does exist in optimized metadata. + EventMap = 18 + // An event map–to–events lookup table, which does not exist in optimized + // metadata (#~ stream). + EventPtr = 19 + // Event descriptors. + Event = 20 + // A class-to-properties mapping table. This is not an intermediate lookup + // table, and it does exist in optimized metadata. + PropertyMap = 21 + // A property map–to–properties lookup table, which does not exist in + // optimized metadata (#~ stream). + PropertyPtr = 22 + // Property descriptors. + Property = 23 + // Method semantics descriptors that hold information about which method is + // associated with a specific property or event and in what capacity. + MethodSemantics = 24 + // Method implementation descriptors. + MethodImpl = 25 + // Module reference descriptors. + ModuleRef = 26 + // Type specification descriptors. + TypeSpec = 27 + // Implementation map descriptors used for the platform invocation + // (P/Invoke) type of managed/unmanaged code inter-operation. + ImplMap = 28 + // Field-to-data mapping descriptors. + FieldRVA = 29 + // Edit-and-continue log descriptors that hold information about what + // changes have been made to specific metadata items during in-memory + // editing. This table does not exist in optimized metadata (#~ stream) + ENCLog = 30 + // Edit-and-continue mapping descriptors. This table does not exist in + // optimized metadata (#~ stream). + ENCMap = 31 + // The current assembly descriptor, which should appear only in the prime + // module metadata. + Assembly = 32 + // This table is unused. + AssemblyProcessor = 33 + // This table is unused. + AssemblyOS = 34 + // Assembly reference descriptors. + AssemblyRef = 35 + // This table is unused. + AssemblyRefProcessor = 36 + // This table is unused. + AssemblyRefOS = 37 + // File descriptors that contain information about other files in the + // current assembly. + FileMD = 38 + // Exported type descriptors that contain information about public classes + // exported by the current assembly, which are declared in other modules of + // the assembly. Only the prime module of the assembly should carry this + // table. + ExportedType = 39 + // Managed resource descriptors. + ManifestResource = 40 + // Nested class descriptors that provide mapping of nested classes to their + // respective enclosing classes. + NestedClass = 41 + // Type parameter descriptors for generic (parameterized) classes and + // methods. + GenericParam = 42 + // Generic method instantiation descriptors. + MethodSpec = 43 + // Descriptors of constraints specified for type parameters of generic + // classes and methods + GenericParamConstraint = 44 +) + +// Heaps Streams Bit Positions. +const ( + StringStream = 0 + GUIDStream = 1 + BlobStream = 2 +) + +// MetadataTableIndexToString returns the string representation of the metadata +// table index. +func MetadataTableIndexToString(k int) string { + metadataTablesMap := map[int]string{ + Module: "Module", + TypeRef: "TypeRef", + TypeDef: "TypeDef", + FieldPtr: "FieldPtr", + Field: "Field", + MethodPtr: "MethodPtr", + Method: "Method", + ParamPtr: "ParamPtr", + Param: "Param", + InterfaceImpl: "InterfaceImpl", + MemberRef: "MemberRef", + Constant: "Constant", + CustomAttribute: "CustomAttribute", + FieldMarshal: "FieldMarshal", + DeclSecurity: "DeclSecurity", + ClassLayout: "ClassLayout", + FieldLayout: "FieldLayout", + StandAloneSig: "StandAloneSig", + EventMap: "EventMap", + EventPtr: "EventPtr", + Event: "Event", + PropertyMap: "PropertyMap", + PropertyPtr: "PropertyPtr", + Property: "Property", + MethodSemantics: "MethodSemantics", + MethodImpl: "MethodImpl", + ModuleRef: "ModuleRef", + TypeSpec: "TypeSpec", + ImplMap: "ImplMap", + FieldRVA: "FieldRVA", + ENCLog: "ENCLog", + ENCMap: "ENCMap", + Assembly: "Assembly", + AssemblyProcessor: "AssemblyProcessor", + AssemblyOS: "AssemblyOS", + AssemblyRef: "AssemblyRef", + AssemblyRefProcessor: "AssemblyRefProcessor", + AssemblyRefOS: "AssemblyRefOS", + FileMD: "File", + ExportedType: "ExportedType", + ManifestResource: "ManifestResource", + NestedClass: "NestedClass", + GenericParam: "GenericParam", + MethodSpec: "MethodSpec", + GenericParamConstraint: "GenericParamConstraint", + } + + if value, ok := metadataTablesMap[k]; ok { + return value + } + return "" +} + +// GetMetadataStreamIndexSize returns the size of indexes to read into a +// particular heap. +func (pe *File) GetMetadataStreamIndexSize(BitPosition int) int { + // The `Heaps` field is a bit vector that encodes how wide indexes into the + // various heaps are: + // - If bit 0 is set, indexes into the "#String" heap are 4 bytes wide; + // - if bit 1 is set, indexes into the "#GUID" heap are 4 bytes wide; + // - if bit 2 is set, indexes into the "#Blob" heap are 4 bytes wide. + heaps := pe.CLR.MetadataTablesStreamHeader.Heaps + if IsBitSet(uint64(heaps), BitPosition) { + return 4 + } + // Conversely, if the HeapSizes bit for a particular heap is not set, + // indexes into that heap are 2 bytes wide. + return 2 +} + +// ImageDataDirectory represents the directory format. +type ImageDataDirectory struct { + + // The relative virtual address of the table. + VirtualAddress uint32 `json:"virtual_address"` + + // The size of the table, in bytes. + Size uint32 `json:"size"` +} + +// ImageCOR20Header represents the CLR 2.0 header structure. +type ImageCOR20Header struct { + + // Size of the header in bytes. + Cb uint32 `json:"cb"` + + // Major number of the minimum version of the runtime required to run the + // program. + MajorRuntimeVersion uint16 `json:"major_runtime_version"` + + // Minor number of the version of the runtime required to run the program. + MinorRuntimeVersion uint16 `json:"minor_runtime_version"` + + // RVA and size of the metadata. + MetaData ImageDataDirectory `json:"meta_data"` + + // Bitwise flags indicating attributes of this executable. + Flags COMImageFlagsType `json:"flags"` + + // Metadata identifier (token) of the entry point for the image file; can + // be 0 for DLL images. This field identifies a method belonging to this + // module or a module containing the entry point method. + // In images of version 2.0 and newer, this field may contain RVA of the + // embedded native entry point method. + // union { + // + // If COMIMAGE_FLAGS_NATIVE_ENTRYPOINT is not set, + // EntryPointToken represents a managed entrypoint. + // DWORD EntryPointToken; + // + // If COMIMAGE_FLAGS_NATIVE_ENTRYPOINT is set, + // EntryPointRVA represents an RVA to a native entrypoint + // DWORD EntryPointRVA; + //}; + EntryPointRVAorToken uint32 `json:"entry_point_rva_or_token"` + + // This is the blob of managed resources. Fetched using + // code:AssemblyNative.GetResource and code:PEFile.GetResource and accessible + // from managed code from System.Assembly.GetManifestResourceStream. The + // metadata has a table that maps names to offsets into this blob, so + // logically the blob is a set of resources. + Resources ImageDataDirectory `json:"resources"` + + // RVA and size of the hash data for this PE file, used by the loader for + // binding and versioning. IL assemblies can be signed with a public-private + // key to validate who created it. The signature goes here if this feature + // is used. + StrongNameSignature ImageDataDirectory `json:"strong_name_signature"` + + // RVA and size of the Code Manager table. In the existing releases of the + // runtime, this field is reserved and must be set to 0. + CodeManagerTable ImageDataDirectory `json:"code_manager_table"` + + // RVA and size in bytes of an array of virtual table (v-table) fixups. + // Among current managed compilers, only the VC++ linker and the IL + // assembler can produce this array. + VTableFixups ImageDataDirectory `json:"vtable_fixups"` + + // RVA and size of an array of addresses of jump thunks. Among managed + // compilers, only the VC++ of versions pre-8.0 could produce this table, + // which allows the export of unmanaged native methods embedded in the + // managed PE file. In v2.0+ of CLR this entry is obsolete and must be set + // to 0. + ExportAddressTableJumps ImageDataDirectory `json:"export_address_table_jumps"` + + // Reserved for precompiled images; set to 0 + // NGEN images it points at a code:CORCOMPILE_HEADER structure + ManagedNativeHeader ImageDataDirectory `json:"managed_native_header"` +} + +// ImageCORVTableFixup defines the v-table fixups that contains the +// initializing information necessary for the runtime to create the thunks. +// Non VOS v-table entries. Define an array of these pointed to by +// IMAGE_COR20_HEADER.VTableFixups. Each entry describes a contiguous array of +// v-table slots. The slots start out initialized to the meta data token value +// for the method they need to call. At image load time, the CLR Loader will +// turn each entry into a pointer to machine code for the CPU and can be +// called directly. +type ImageCORVTableFixup struct { + RVA uint32 `json:"rva"` // Offset of v-table array in image. + Count uint16 `json:"count"` // How many entries at location. + Type uint16 `json:"type"` // COR_VTABLE_xxx type of entries. +} + +// MetadataHeader consists of a storage signature and a storage header. +type MetadataHeader struct { + + // The storage signature, which must be 4-byte aligned: + // ”Magic” signature for physical metadata, currently 0x424A5342, or, read + // as characters, BSJB—the initials of four “founding fathers” Brian Harry, + // Susa Radke-Sproull, Jason Zander, and Bill Evans, who started the + // runtime development in 1998. + Signature uint32 `json:"signature"` + + // Major version. + MajorVersion uint16 `json:"major_version"` + + // Minor version. + MinorVersion uint16 `json:"minor_version"` + + // Reserved; set to 0. + ExtraData uint32 `json:"extra_data"` + + // Length of the version string. + VersionString uint32 `json:"version_string"` + + // Version string. + Version string `json:"version"` + + // The storage header follows the storage signature, aligned on a 4-byte + // boundary. + // + + // Reserved; set to 0. + Flags uint8 `json:"flags"` + + // Another byte used for [padding] + + // Number of streams. + Streams uint16 `json:"streams"` +} + +// MetadataStreamHeader represents a Metadata Stream Header Structure. +type MetadataStreamHeader struct { + // Offset in the file for this stream. + Offset uint32 `json:"offset"` + + // Size of the stream in bytes. + Size uint32 `json:"size"` + + // Name of the stream; a zero-terminated ASCII string no longer than 31 + // characters (plus zero terminator). The name might be shorter, in which + // case the size of the stream header is correspondingly reduced, padded to + // the 4-byte boundary. + Name string `json:"name"` +} + +// MetadataTableStreamHeader represents the Metadata Table Stream Header Structure. +type MetadataTableStreamHeader struct { + // Reserved; set to 0. + Reserved uint32 `json:"reserved"` + + // Major version of the table schema (1 for v1.0 and v1.1; 2 for v2.0 or later). + MajorVersion uint8 `json:"major_version"` + + // Minor version of the table schema (0 for all versions). + MinorVersion uint8 `json:"minor_version"` + + // Binary flags indicate the offset sizes to be used within the heaps. + // 4-byte unsigned integer offset is indicated by: + // - 0x01 for a string heap, 0x02 for a GUID heap, and 0x04 for a blob heap. + // If a flag is not set, the respective heap offset is a 2-byte unsigned integer. + // A #- stream can also have special flags set: + // - flag 0x20, indicating that the stream contains only changes made + // during an edit-and-continue session, and; + // - flag 0x80, indicating that the metadata might contain items marked as + // deleted. + Heaps uint8 `json:"heaps"` + + // Bit width of the maximal record index to all tables of the metadata; + // calculated at run time (during the metadata stream initialization). + RID uint8 `json:"rid"` + + // Bit vector of present tables, each bit representing one table (1 if + // present). + MaskValid uint64 `json:"mask_valid"` + + // Bit vector of sorted tables, each bit representing a respective table (1 + // if sorted) + Sorted uint64 `json:"sorted"` +} + +// MetadataTable represents the content of a particular table in the metadata. +// The metadata schema defines 45 tables. +type MetadataTable struct { + // The name of the table. + Name string `json:"name"` + + // Number of columns in the table. + CountCols uint32 `json:"count_cols"` + + // Every table has a different layout, defined in the ECMA-335 spec. + // Content abstract the type each table is pointing to. + Content interface{} `json:"content"` +} + +// ModuleTableRow represents the `Module` metadata table contains a single +// record that provides the identification of the current module. The column +// structure of the table is as follows: +type ModuleTableRow struct { + // Used only at run time, in edit-and-continue mode. + Generation uint16 `json:"generation"` + + // (offset in the #Strings stream) The module name, which is the same as + // the name of the executable file with its extension but without a path. + // The length should not exceed 512 bytes in UTF-8 encoding, counting the + // zero terminator. + Name uint32 `json:"name"` + + // (offset in the #GUID stream) A globally unique identifier, assigned + // to the module as it is generated. + Mvid uint32 `json:"mvid"` + + // (offset in the #GUID stream): Used only at run time, in + // edit-and-continue mode. + EncID uint32 `json:"enc_id"` + + // (offset in the #GUID stream): Used only at run time, in edit-and-continue mode. + EncBaseID uint32 `json:"enc_base_id"` +} + +// CLRData embeds the Common Language Runtime Header structure as well as the +// Metadata header structure. +type CLRData struct { + CLRHeader ImageCOR20Header `json:"clr_header"` + MetadataHeader MetadataHeader `json:"metadata_header"` + MetadataStreamHeaders []MetadataStreamHeader `json:"metadata_stream_headers"` + MetadataStreams map[string][]byte `json:"-"` + MetadataTablesStreamHeader MetadataTableStreamHeader `json:"metadata_tables_stream_header"` + MetadataTables map[int]*MetadataTable `json:"metadata_tables"` + StringStreamIndexSize int `json:"-"` + GUIDStreamIndexSize int `json:"-"` + BlobStreamIndexSize int `json:"-"` +} + +func (pe *File) readFromMetadataSteam(Stream int, off uint32, out *uint32) (uint32, error) { + var indexSize int + switch Stream { + case StringStream: + indexSize = pe.CLR.StringStreamIndexSize + case GUIDStream: + indexSize = pe.CLR.GUIDStreamIndexSize + case BlobStream: + indexSize = pe.CLR.BlobStreamIndexSize + } + + var data uint32 + var err error + switch indexSize { + case 2: + d, err := pe.ReadUint16(off) + if err != nil { + return 0, err + } + data = uint32(d) + case 4: + data, err = pe.ReadUint32(off) + if err != nil { + return 0, err + } + } + + *out = data + return uint32(indexSize), nil +} + +func (pe *File) parseMetadataStream(off, size uint32) (MetadataTableStreamHeader, error) { + + mdTableStreamHdr := MetadataTableStreamHeader{} + if size == 0 { + return mdTableStreamHdr, nil + } + + mdTableStreamHdrSize := uint32(binary.Size(mdTableStreamHdr)) + err := pe.structUnpack(&mdTableStreamHdr, off, mdTableStreamHdrSize) + if err != nil { + return mdTableStreamHdr, err + } + + return mdTableStreamHdr, nil +} + +func (pe *File) parseMetadataHeader(offset, size uint32) (MetadataHeader, error) { + var err error + mh := MetadataHeader{} + + if mh.Signature, err = pe.ReadUint32(offset); err != nil { + return mh, err + } + if mh.MajorVersion, err = pe.ReadUint16(offset + 4); err != nil { + return mh, err + } + if mh.MinorVersion, err = pe.ReadUint16(offset + 6); err != nil { + return mh, err + } + if mh.ExtraData, err = pe.ReadUint32(offset + 8); err != nil { + return mh, err + } + if mh.VersionString, err = pe.ReadUint32(offset + 12); err != nil { + return mh, err + } + mh.Version, err = pe.getStringAtOffset(offset+16, mh.VersionString) + if err != nil { + return mh, err + } + + offset += 16 + mh.VersionString + if mh.Flags, err = pe.ReadUint8(offset); err != nil { + return mh, err + } + + if mh.Streams, err = pe.ReadUint16(offset + 2); err != nil { + return mh, err + } + + return mh, err +} + +func (pe *File) parseMetadataModuleTable(off uint32) (ModuleTableRow, error) { + var err error + var indexSize uint32 + modTableRow := ModuleTableRow{} + + if modTableRow.Generation, err = pe.ReadUint16(off); err != nil { + return ModuleTableRow{}, err + } + off += 2 + + if indexSize, err = pe.readFromMetadataSteam(StringStream, off, &modTableRow.Name); err != nil { + return ModuleTableRow{}, err + } + off += indexSize + + if indexSize, err = pe.readFromMetadataSteam(GUIDStream, off, &modTableRow.Mvid); err != nil { + return ModuleTableRow{}, err + } + off += indexSize + if indexSize, err = pe.readFromMetadataSteam(GUIDStream, off, &modTableRow.EncID); err != nil { + return ModuleTableRow{}, err + } + off += indexSize + + if _, err = pe.readFromMetadataSteam(GUIDStream, off, &modTableRow.EncBaseID); err != nil { + return ModuleTableRow{}, err + } + + return modTableRow, nil +} + +// The 15th directory entry of the PE header contains the RVA and size of the +// runtime header in the image file. The runtime header, which contains all of +// the runtime-specific data entries and other information, should reside in a +// read-only section of the image file. The IL assembler puts the common +// language runtime header in the .text section. +func (pe *File) parseCLRHeaderDirectory(rva, size uint32) error { + + clrHeader := ImageCOR20Header{} + offset := pe.GetOffsetFromRva(rva) + err := pe.structUnpack(&clrHeader, offset, size) + if err != nil { + return err + } + + pe.CLR.CLRHeader = clrHeader + if clrHeader.MetaData.VirtualAddress == 0 || clrHeader.MetaData.Size == 0 { + return nil + } + + // If we get a CLR header, we assume that this is enough + // to say we have a CLR data to show even if parsing + // other structures fails later. + pe.HasCLR = true + + offset = pe.GetOffsetFromRva(clrHeader.MetaData.VirtualAddress) + mh, err := pe.parseMetadataHeader(offset, clrHeader.MetaData.Size) + if err != nil { + return err + } + pe.CLR.MetadataHeader = mh + pe.CLR.MetadataStreams = make(map[string][]byte) + offset += 16 + mh.VersionString + 4 + + // Immediately following the MetadataHeader is a series of Stream Headers. + // A “stream” is to the metadata what a “section” is to the assembly. The + // NumberOfStreams property indicates how many StreamHeaders to read. + mdStreamHdrOff := uint32(0) + mdStreamHdrSize := uint32(0) + for i := uint16(0); i < mh.Streams; i++ { + sh := MetadataStreamHeader{} + if sh.Offset, err = pe.ReadUint32(offset); err != nil { + return err + } + if sh.Size, err = pe.ReadUint32(offset + 4); err != nil { + return err + } + + // Name requires a special treatment. + offset += 8 + for j := uint32(0); j <= 32; j++ { + var c uint8 + if c, err = pe.ReadUint8(offset); err != nil { + return err + } + + offset++ + if c == 0 && (j+1)%4 == 0 { + break + } + if c != 0 { + sh.Name += string(c) + } + } + + // The streams #~ and #- are mutually exclusive; that is, the metadata + // structure of the module is either optimized or un-optimized; it + // cannot be both at the same time or be something in between. + if sh.Name == "#~" || sh.Name == "#-" { + mdStreamHdrOff = sh.Offset + mdStreamHdrSize = sh.Size + } + + // Save the stream into a map []byte. + rva = clrHeader.MetaData.VirtualAddress + sh.Offset + start := pe.GetOffsetFromRva(rva) + pe.CLR.MetadataStreams[sh.Name] = pe.data[start : start+sh.Size] + pe.CLR.MetadataStreamHeaders = append(pe.CLR.MetadataStreamHeaders, sh) + } + + // Get the Metadata Table Stream. + if mdStreamHdrSize == 0 { + return nil + } + // The .Offset indicated by the stream header is an RVA relative to the + // metadataDirectoryAddress in the CLRHeader. + rva = clrHeader.MetaData.VirtualAddress + mdStreamHdrOff + offset = pe.GetOffsetFromRva(rva) + mdTableStreamHdr, err := pe.parseMetadataStream(offset, mdStreamHdrSize) + if err != nil { + return nil + } + pe.CLR.MetadataTablesStreamHeader = mdTableStreamHdr + + // Get the size of indexes of #String", "#GUID" and "#Blob" streams. + pe.CLR.StringStreamIndexSize = pe.GetMetadataStreamIndexSize(StringStream) + pe.CLR.GUIDStreamIndexSize = pe.GetMetadataStreamIndexSize(GUIDStream) + pe.CLR.BlobStreamIndexSize = pe.GetMetadataStreamIndexSize(BlobStream) + + // This header is followed by a sequence of 4-byte unsigned integers + // indicating the number of records in each table marked 1 in the MaskValid + // bit vector. + offset += uint32(binary.Size(mdTableStreamHdr)) + pe.CLR.MetadataTables = make(map[int]*MetadataTable) + for i := 0; i < GenericParamConstraint; i++ { + if IsBitSet(mdTableStreamHdr.MaskValid, i) { + mdTable := MetadataTable{} + mdTable.Name = MetadataTableIndexToString(i) + mdTable.CountCols, err = pe.ReadUint32(offset) + if err != nil { + break + } + offset += 4 + pe.CLR.MetadataTables[i] = &mdTable + } + } + + // Parse the metadata tables. + for tableIdx, table := range pe.CLR.MetadataTables { + switch tableIdx { + case Module: + table.Content, err = pe.parseMetadataModuleTable(offset) + if err != nil { + return err + } + } + } + + return nil +} + +// String returns a string interpretation of a COMImageFlags type. +func (flags COMImageFlagsType) String() []string { + COMImageFlags := map[COMImageFlagsType]string{ + COMImageFlagsILOnly: "IL Only", + COMImageFlags32BitRequired: "32-Bit Required", + COMImageFlagILLibrary: "IL Library", + COMImageFlagsStrongNameSigned: "Strong Name Signed", + COMImageFlagsNativeEntrypoint: "Native Entrypoint", + COMImageFlagsTrackDebugData: "Track Debug Data", + COMImageFlags32BitPreferred: "32-Bit Preferred", + } + + var values []string + for k, v := range COMImageFlags { + if (k & flags) == k { + values = append(values, v) + } + } + + return values +} diff --git a/vendor/github.com/saferwall/pe/exception.go b/vendor/github.com/saferwall/pe/exception.go new file mode 100644 index 00000000..01697fbe --- /dev/null +++ b/vendor/github.com/saferwall/pe/exception.go @@ -0,0 +1,598 @@ +// Copyright 2018 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +import ( + "encoding/binary" + "strconv" +) + +const ( + // Unwind information flags. + + // UnwFlagNHandler - The function has no handler. + UnwFlagNHandler = uint8(0x0) + + // UnwFlagEHandler - The function has an exception handler that should + // be called when looking for functions that need to examine exceptions. + UnwFlagEHandler = uint8(0x1) + + // UnwFlagUHandler - The function has a termination handler that should + // be called when unwinding an exception. + UnwFlagUHandler = uint8(0x2) + + // UnwFlagChainInfo - This unwind info structure is not the primary one + // for the procedure. Instead, the chained unwind info entry is the contents + // of a previous RUNTIME_FUNCTION entry. For information, see Chained unwind + // info structures. If this flag is set, then the UNW_FLAG_EHANDLER and + // UNW_FLAG_UHANDLER flags must be cleared. Also, the frame register and + // fixed-stack allocation field must have the same values as in the primary + // unwind info. + UnwFlagChainInfo = uint8(0x4) +) + +// The meaning of the operation info bits depends upon the operation code. +// To encode a general-purpose (integer) register, this mapping is used: +const ( + rax = iota + rcx + rdx + rbx + rsp + rbp + rsi + rdi + r8 + r9 + r10 + r11 + r12 + r13 + r14 + r15 +) + +// OpInfoRegisters maps registers to string. +var OpInfoRegisters = map[uint8]string{ + rax: "RAX", + rcx: "RCX", + rdx: "RDX", + rbx: "RBX", + rsp: "RSP", + rbp: "RBP", + rsi: "RSI", + rdi: "RDI", + r8: "R8", + r9: "R9", + r10: "R10", + r11: "R11", + r12: "R12", + r13: "R13", + r14: "R14", + r15: "R15", +} + +// UnwindOpType represents the type of an unwind opcode. +type UnwindOpType uint8 + +// _UNWIND_OP_CODES +const ( + // Push a nonvolatile integer register, decrementing RSP by 8. The + // operation info is the number of the register. Because of the constraints + // on epilogs, UWOP_PUSH_NONVOL unwind codes must appear first in the + // prolog and correspondingly, last in the unwind code array. This relative + // ordering applies to all other unwind codes except UWOP_PUSH_MACHFRAME. + UwOpPushNonVol = UnwindOpType(0) + + // Allocate a large-sized area on the stack. There are two forms. If the + // operation info equals 0, then the size of the allocation divided by 8 is + // recorded in the next slot, allowing an allocation up to 512K - 8. If the + // operation info equals 1, then the unscaled size of the allocation is + // recorded in the next two slots in little-endian format, allowing + // allocations up to 4GB - 8. + UwOpAllocLarge = UnwindOpType(1) + + // Allocate a small-sized area on the stack. The size of the allocation is + // the operation info field * 8 + 8, allowing allocations from 8 to 128 + // bytes. + UwOpAllocSmall = UnwindOpType(2) + + // Establish the frame pointer register by setting the register to some + // offset of the current RSP. The offset is equal to the Frame Register + // offset (scaled) field in the UNWIND_INFO * 16, allowing offsets from 0 + // to 240. The use of an offset permits establishing a frame pointer that + // points to the middle of the fixed stack allocation, helping code density + // by allowing more accesses to use short instruction forms. The operation + // info field is reserved and shouldn't be used. + UwOpSetFpReg = UnwindOpType(3) + + // Save a nonvolatile integer register on the stack using a MOV instead of + // a PUSH. This code is primarily used for shrink-wrapping, where a + // nonvolatile register is saved to the stack in a position that was + // previously allocated. The operation info is the number of the register. + // The scaled-by-8 stack offset is recorded in the next unwind operation + // code slot, as described in the note above. + UwOpSaveNonVol = UnwindOpType(4) + + // Save a nonvolatile integer register on the stack with a long offset, + // using a MOV instead of a PUSH. This code is primarily used for + // shrink-wrapping, where a nonvolatile register is saved to the stack in a + // position that was previously allocated. The operation info is the number + // of the register. The unscaled stack offset is recorded in the next two + // unwind operation code slots, as described in the note above. + UwOpSaveNonVolFar = UnwindOpType(5) + + // For version 1 of the UNWIND_INFO structure, this code was called + // UWOP_SAVE_XMM and occupied 2 records, it retained the lower 64 bits of + // the XMM register, but was later removed and is now skipped. In practice, + // this code has never been used. + // For version 2 of the UNWIND_INFO structure, this code is called + // UWOP_EPILOG, takes 2 entries, and describes the function epilogue. + UwOpEpilog = UnwindOpType(6) + + // For version 1 of the UNWIND_INFO structure, this code was called + // UWOP_SAVE_XMM_FAR and occupied 3 records, it saved the lower 64 bits of + // the XMM register, but was later removed and is now skipped. In practice, + // this code has never been used. + // For version 2 of the UNWIND_INFO structure, this code is called + // UWOP_SPARE_CODE, takes 3 entries, and makes no sense. + UwOpSpareCode = UnwindOpType(7) + + // Save all 128 bits of a nonvolatile XMM register on the stack. The + // operation info is the number of the register. The scaled-by-16 stack + // offset is recorded in the next slot. + UwOpSaveXmm128 = UnwindOpType(8) + + // Save all 128 bits of a nonvolatile XMM register on the stack with a long + // offset. The operation info is the number of the register. The unscaled + // stack offset is recorded in the next two slots. + UwOpSaveXmm128Far = UnwindOpType(9) + + // Push a machine frame. This unwind code is used to record the effect of a + // hardware interrupt or exception. + UwOpPushMachFrame = UnwindOpType(10) + + // UWOP_SET_FPREG_LARGE is a CLR Unix-only extension to the Windows AMD64 + // unwind codes. It is not part of the standard Windows AMD64 unwind codes + // specification. UWOP_SET_FPREG allows for a maximum of a 240 byte offset + // between RSP and the frame pointer, when the frame pointer is + // established. UWOP_SET_FPREG_LARGE has a 32-bit range scaled by 16. When + // UWOP_SET_FPREG_LARGE is used, UNWIND_INFO.FrameRegister must be set to + // the frame pointer register, and UNWIND_INFO.FrameOffset must be set to + // 15 (its maximum value). UWOP_SET_FPREG_LARGE is followed by two + // UNWIND_CODEs that are combined to form a 32-bit offset (the same as + // UWOP_SAVE_NONVOL_FAR). This offset is then scaled by 16. The result must + // be less than 2^32 (that is, the top 4 bits of the unscaled 32-bit number + // must be zero). This result is used as the frame pointer register offset + // from RSP at the time the frame pointer is established. Either + // UWOP_SET_FPREG or UWOP_SET_FPREG_LARGE can be used, but not both. + UwOpSetFpRegLarge = UnwindOpType(11) +) + +// ImageRuntimeFunctionEntry represents an entry in the function table on 64-bit +// Windows (IMAGE_RUNTIME_FUNCTION_ENTRY). Table-based exception handling request +// a table entry for all functions that allocate stack space or call another +// function (for example, non-leaf functions). +type ImageRuntimeFunctionEntry struct { + // The address of the start of the function. + BeginAddress uint32 `json:"begin_address"` + + // The address of the end of the function. + EndAddress uint32 `json:"end_address"` + + // The unwind data info structure is used to record the effects a function + // has on the stack pointer, and where the nonvolatile registers are saved + // on the stack. + UnwindInfoAddress uint32 `json:"unwind_info_address"` +} + +// ImageARMRuntimeFunctionEntry represents the function table entry for the ARM +// platform. +type ImageARMRuntimeFunctionEntry struct { + // Function Start RVA is the 32-bit RVA of the start of the function. If + // the function contains thumb code, the low bit of this address must be set. + BeginAddress uint32 `bitfield:",functionstart"` + + // Flag is a 2-bit field that indicates how to interpret the remaining + // 30 bits of the second .pdata word. If Flag is 0, then the remaining bits + // form an Exception Information RVA (with the low two bits implicitly 0). + // If Flag is non-zero, then the remaining bits form a Packed Unwind Data + // structure. + Flag uint8 + + /* Exception Information RVA or Packed Unwind Data. + + Exception Information RVA is the address of the variable-length exception + information structure, stored in the .xdata section. + This data must be 4-byte aligned. + + Packed Unwind Data is a compressed description of the operations required + to unwind from a function, assuming a canonical form. In this case, no + .xdata record is required. */ + ExceptionFlag uint32 +} + +// UnwindCode is used to record the sequence of operations in the prolog that +// affect the nonvolatile registers and RSP. Each code item has this format: +/* typedef union _UNWIND_CODE { + struct { + UCHAR CodeOffset; + UCHAR UnwindOp : 4; + UCHAR OpInfo : 4; + } DUMMYUNIONNAME; + + struct { + UCHAR OffsetLow; + UCHAR UnwindOp : 4; + UCHAR OffsetHigh : 4; + } EpilogueCode; + + USHORT FrameOffset; +} UNWIND_CODE, *PUNWIND_CODE;*/ +// +// It provides information about the amount of stack space allocated, the location +// of saved non-volatile registers, and whether or not a frame register is used +// and what relation it has to the rest of the stack. +type UnwindCode struct { + // Offset (from the beginning of the prolog) of the end of the instruction + // that performs is operation, plus 1 (that is, the offset of the start of + // the next instruction). + CodeOffset uint8 `json:"code_offset"` + + // The unwind operation code. + UnwindOp UnwindOpType `json:"unwind_op"` + + // Operation info. + OpInfo uint8 `json:"op_info"` + + // Allocation size. + Operand string `json:"operand"` + FrameOffset uint16 `json:"frame_offset"` +} + +// UnwindInfo represents the _UNWIND_INFO structure. It is used to record the +// effects a function has on the stack pointer, and where the nonvolatile +// registers are saved on the stack. +type UnwindInfo struct { + // (3 bits) Version number of the unwind data, currently 1 and 2. + Version uint8 `json:"version"` + + // (5 bits) Three flags are currently defined above. + Flags uint8 `json:"flags"` + + // Length of the function prolog in bytes. + SizeOfProlog uint8 `json:"size_of_prolog"` + + // The number of slots in the unwind codes array. Some unwind codes, + // for example, UWOP_SAVE_NONVOL, require more than one slot in the array. + CountOfCodes uint8 `json:"count_of_codes"` + + // If nonzero, then the function uses a frame pointer (FP), and this field + // is the number of the nonvolatile register used as the frame pointer, + // using the same encoding for the operation info field of UNWIND_CODE nodes. + FrameRegister uint8 `json:"frame_register"` + + // If the frame register field is nonzero, this field is the scaled offset + // from RSP that is applied to the FP register when it's established. The + // actual FP register is set to RSP + 16 * this number, allowing offsets + // from 0 to 240. This offset permits pointing the FP register into the + // middle of the local stack allocation for dynamic stack frames, allowing + // better code density through shorter instructions. (That is, more + // instructions can use the 8-bit signed offset form.) + FrameOffset uint8 `json:"frame_offset"` + + // An array of items that explains the effect of the prolog on the + // nonvolatile registers and RSP. See the section on UNWIND_CODE for the + // meanings of individual items. For alignment purposes, this array always + // has an even number of entries, and the final entry is potentially + // unused. In that case, the array is one longer than indicated by the + // count of unwind codes field. + UnwindCodes []UnwindCode `json:"unwind_codes"` + + // Address of exception handler when UNW_FLAG_EHANDLER is set. + ExceptionHandler uint32 `json:"exception_handler"` + + // If flag UNW_FLAG_CHAININFO is set, then the UNWIND_INFO structure ends + // with three UWORDs. These UWORDs represent the RUNTIME_FUNCTION + // information for the function of the chained unwind. + FunctionEntry ImageRuntimeFunctionEntry `json:"function_entry"` +} + +// +// The unwind codes are followed by an optional DWORD aligned field that +// contains the exception handler address or the address of chained unwind +// information. If an exception handler address is specified, then it is +// followed by the language specified exception handler data. +// +// union { +// ULONG ExceptionHandler; +// ULONG FunctionEntry; +// }; +// +// ULONG ExceptionData[]; +// + +type ScopeRecord struct { + // This value indicates the offset of the first instruction within a __try + // block located in the function. + BeginAddress uint32 `json:"begin_address"` + + // This value indicates the offset to the instruction after the last + // instruction within the __try block (conceptually the __except statement). + EndAddress uint32 `json:"end_address"` + + // This value indicates the offset to the function located within the + // parentheses of the __except() statement. In the documentation you'll + // find this routine called the "exception handler" or "exception filter". + HandlerAddress uint32 `json:"handler_address"` + + // This value indicates the offset to the first instruction in the __except + // block associated with the __try block. + JumpTarget uint32 `json:"jump_target"` +} + +// ScopeTable represents a variable length structure containing a count followed +// by Count "scope records". While the RUNTIME_FUNCTION describes the entire range +// of a function that contains SEH, the SCOPE_TABLE describes each of the individual +// __try/__except blocks within the function. +type ScopeTable struct { + // The count of scope records. + Count uint32 `json:"count"` + + // A array of scope record. + ScopeRecords []ScopeRecord `json:"scope_records"` +} + +//  typedef struct _SCOPE_TABLE { +// ULONG Count; +// struct +// { +// ULONG BeginAddress; +// ULONG EndAddress; +// ULONG HandlerAddress; +// ULONG JumpTarget; +// } ScopeRecord[1]; +//  } SCOPE_TABLE, *PSCOPE_TABLE; + +// Exception represent an entry in the function table. +type Exception struct { + RuntimeFunction ImageRuntimeFunctionEntry `json:"runtime_function"` + UnwindInfo UnwindInfo `json:"unwind_info"` +} + +func (pe *File) parseUnwindCode(offset uint32, version uint8) (UnwindCode, int) { + + unwindCode := UnwindCode{} + advanceBy := 0 + + // Read the unwind code at offset (2 bytes) + uc, err := pe.ReadUint16(offset) + if err != nil { + return unwindCode, advanceBy + } + + unwindCode.CodeOffset = uint8(uc & 0xff) + unwindCode.UnwindOp = UnwindOpType(uc & 0xf00 >> 8) + unwindCode.OpInfo = uint8(uc & 0xf000 >> 12) + + switch unwindCode.UnwindOp { + case UwOpAllocSmall: + size := int(unwindCode.OpInfo*8 + 8) + unwindCode.Operand = "Size=" + strconv.Itoa(size) + advanceBy++ + case UwOpAllocLarge: + if unwindCode.OpInfo == 0 { + size := int(binary.LittleEndian.Uint16(pe.data[offset+2:]) * 8) + unwindCode.Operand = "Size=" + strconv.Itoa(size) + advanceBy += 2 + } else { + size := int(binary.LittleEndian.Uint32(pe.data[offset+2:]) << 16) + unwindCode.Operand = "Size=" + strconv.Itoa(size) + advanceBy += 3 + } + case UwOpSetFpReg: + unwindCode.Operand = "Register=" + OpInfoRegisters[unwindCode.OpInfo] + advanceBy++ + case UwOpPushNonVol: + unwindCode.Operand = "Register=" + OpInfoRegisters[unwindCode.OpInfo] + advanceBy++ + case UwOpSaveNonVol: + fo := binary.LittleEndian.Uint16(pe.data[offset+2:]) + unwindCode.FrameOffset = fo * 8 + unwindCode.Operand = "Register=" + OpInfoRegisters[unwindCode.OpInfo] + + ", Offset=" + strconv.Itoa(int(unwindCode.FrameOffset)) + advanceBy += 2 + case UwOpSaveNonVolFar: + fo := binary.LittleEndian.Uint32(pe.data[offset+2:]) + unwindCode.FrameOffset = uint16(fo * 8) + unwindCode.Operand = "Register=" + OpInfoRegisters[unwindCode.OpInfo] + + ", Offset=" + strconv.Itoa(int(unwindCode.FrameOffset)) + advanceBy += 3 + case UwOpSaveXmm128: + fo := binary.LittleEndian.Uint16(pe.data[offset+2:]) + unwindCode.FrameOffset = fo * 16 + unwindCode.Operand = "Register=XMM" + strconv.Itoa(int(unwindCode.OpInfo)) + + ", Offset=" + strconv.Itoa(int(unwindCode.FrameOffset)) + advanceBy += 2 + case UwOpSaveXmm128Far: + fo := binary.LittleEndian.Uint32(pe.data[offset+2:]) + unwindCode.FrameOffset = uint16(fo) + unwindCode.Operand = "Register=XMM" + strconv.Itoa(int(unwindCode.OpInfo)) + + ", Offset=" + strconv.Itoa(int(unwindCode.FrameOffset)) + advanceBy += 3 + case UwOpSetFpRegLarge: + unwindCode.Operand = "Register=" + OpInfoRegisters[unwindCode.OpInfo] + advanceBy += 2 + case UwOpPushMachFrame: + advanceBy++ + case UwOpEpilog: + if version == 2 { + unwindCode.Operand = "Flags=" + strconv.Itoa(int(unwindCode.OpInfo)) + ", Size=" + strconv.Itoa(int(unwindCode.CodeOffset)) + } + advanceBy += 2 + case UwOpSpareCode: + advanceBy += 3 + default: + advanceBy++ // so we can get out of the loop + pe.logger.Warnf("Wrong unwind opcode %d", unwindCode.UnwindOp) + } + + return unwindCode, advanceBy +} + +func (pe *File) parseUnwindInfo(unwindInfo uint32) UnwindInfo { + + ui := UnwindInfo{} + + offset := pe.GetOffsetFromRva(unwindInfo) + v, err := pe.ReadUint32(offset) + if err != nil { + return ui + } + + // The lowest 3 bits + ui.Version = uint8(v & 0x7) + + // The next 5 bits. + ui.Flags = uint8(v & 0xf8 >> 3) + + // The next byte + ui.SizeOfProlog = uint8(v & 0xff00 >> 8) + + // The next byte + ui.CountOfCodes = uint8(v & 0xff0000 >> 16) + + // The next 4 bits + ui.FrameRegister = uint8(v & 0xf00000 >> 24) + + // The next 4 bits. + ui.FrameOffset = uint8(v&0xf0000000>>28) * 6 + + // Each unwind code struct is 2 bytes wide. + offset += 4 + i := 0 + for i < int(ui.CountOfCodes) { + ucOffset := offset + 2*uint32(i) + unwindCode, advanceBy := pe.parseUnwindCode(ucOffset, ui.Version) + if advanceBy == 0 { + return ui + } + ui.UnwindCodes = append(ui.UnwindCodes, unwindCode) + i += advanceBy + } + + if ui.CountOfCodes&1 == 1 { + offset += 2 + } + + // An image-relative pointer to either the function's language-specific + // exception or termination handler, if flag UNW_FLAG_CHAININFO is clear + // and one of the flags UNW_FLAG_EHADLER or UNW_FLAG_UHANDLER is set. + if ui.Flags&UnwFlagEHandler != 0 || ui.Flags&UnwFlagUHandler != 0 { + if ui.Flags&UnwFlagChainInfo == 0 { + handlerOffset := offset + 2*uint32(i) + ui.ExceptionHandler = binary.LittleEndian.Uint32(pe.data[handlerOffset:]) + } + } + + // If the UNW_FLAG_CHAININFO flag is set, then an unwind info structure + // is a secondary one, and the shared exception-handler/chained-info + // address field contains the primary unwind information. This sample + // code retrieves the primary unwind information, assuming that unwindInfo + // is the structure that has the UNW_FLAG_CHAININFO flag set. + if ui.Flags&UnwFlagChainInfo != 0 { + chainOffset := offset + 2*uint32(i) + rf := ImageRuntimeFunctionEntry{} + size := uint32(binary.Size(ImageRuntimeFunctionEntry{})) + err := pe.structUnpack(&rf, chainOffset, size) + if err != nil { + return ui + } + ui.FunctionEntry = rf + } + + return ui +} + +// Exception directory contains an array of function table entries that are used +// for exception handling. +func (pe *File) parseExceptionDirectory(rva, size uint32) error { + + // The target platform determines which format of the function table entry + // to use. + var exceptions []Exception + fileOffset := pe.GetOffsetFromRva(rva) + + entrySize := uint32(binary.Size(ImageRuntimeFunctionEntry{})) + entriesCount := size / entrySize + + for i := uint32(0); i < entriesCount; i++ { + functionEntry := ImageRuntimeFunctionEntry{} + offset := fileOffset + (entrySize * i) + err := pe.structUnpack(&functionEntry, offset, entrySize) + if err != nil { + return err + } + + exception := Exception{RuntimeFunction: functionEntry} + + if pe.Is64 { + exception.UnwindInfo = pe.parseUnwindInfo(functionEntry.UnwindInfoAddress) + } + + exceptions = append(exceptions, exception) + } + + pe.Exceptions = exceptions + if len(exceptions) > 0 { + pe.HasException = true + } + return nil +} + +// PrettyUnwindInfoHandlerFlags returns the string representation of the +// `flags` field of the unwind info structure. +func PrettyUnwindInfoHandlerFlags(flags uint8) []string { + var values []string + + unwFlagHandlerMap := map[uint8]string{ + UnwFlagNHandler: "No Handler", + UnwFlagEHandler: "Exception", + UnwFlagUHandler: "Termination", + UnwFlagChainInfo: "Chain", + } + + for k, s := range unwFlagHandlerMap { + if k&flags != 0 { + values = append(values, s) + } + } + return values +} + +// String returns the string representation of the an unwind opcode. +func (uo UnwindOpType) String() string { + + unOpToString := map[UnwindOpType]string{ + UwOpPushNonVol: "UWOP_PUSH_NONVOL", + UwOpAllocLarge: "UWOP_ALLOC_LARE", + UwOpAllocSmall: "UWOP_ALLOC_SMALL", + UwOpSetFpReg: "UWOP_SET_FPREG", + UwOpSaveNonVol: "UWOP_SAVE_NONVOL", + UwOpSaveNonVolFar: "UWOP_SAVE_NONVOL_FAR", + UwOpEpilog: "UWOP_EPILOG", + UwOpSpareCode: "UWOP_SPARE_CODE", + UwOpSaveXmm128: "UWOP_SAVE_XMM128", + UwOpSaveXmm128Far: "UWOP_SAVE_XMM128_FAR", + UwOpPushMachFrame: "UWOP_PUSH_MACHFRAME", + UwOpSetFpRegLarge: "UWOP_SET_FPREG_LARGE", + } + + if val, ok := unOpToString[uo]; ok { + return val + } + + return "?" +} diff --git a/vendor/github.com/saferwall/pe/exports.go b/vendor/github.com/saferwall/pe/exports.go new file mode 100644 index 00000000..dfe6ef56 --- /dev/null +++ b/vendor/github.com/saferwall/pe/exports.go @@ -0,0 +1,329 @@ +// Copyright 2022 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +import ( + "encoding/binary" + "errors" + "fmt" +) + +const ( + maxExportedSymbols = 0x2000 +) + +var ( + ErrExportMaxOrdEntries = "Export directory contains more than max ordinal entries" + ErrExportManyRepeatedEntries = "Export directory contains many repeated entries" + AnoNullNumberOfFunctions = "Export directory contains zero number of functions" + AnoNullAddressOfFunctions = "Export directory contains zero address of functions" +) + +// ImageExportDirectory represents the IMAGE_EXPORT_DIRECTORY structure. +// The export directory table contains address information that is used +// to resolve imports to the entry points within this image. +type ImageExportDirectory struct { + // Reserved, must be 0. + Characteristics uint32 + + // The time and date that the export data was created. + TimeDateStamp uint32 + + // The major version number. + //The major and minor version numbers can be set by the user. + MajorVersion uint16 + + // The minor version number. + MinorVersion uint16 + + // The address of the ASCII string that contains the name of the DLL. + // This address is relative to the image base. + Name uint32 + + // The starting ordinal number for exports in this image. This field + // specifies the starting ordinal number for the export address table. + // It is usually set to 1. + Base uint32 + + // The number of entries in the export address table. + NumberOfFunctions uint32 + + // The number of entries in the name pointer table. This is also the number + // of entries in the ordinal table. + NumberOfNames uint32 + + // The address of the export address table, relative to the image base. + AddressOfFunctions uint32 + + // The address of the export name pointer table, relative to the image base. + // The table size is given by the Number of Name Pointers field. + AddressOfNames uint32 + + // The address of the ordinal table, relative to the image base. + AddressOfNameOrdinals uint32 +} + +// ExportFunction represents an imported function in the export table. +type ExportFunction struct { + Ordinal uint32 + FunctionRVA uint32 + NameOrdinal uint32 + NameRVA uint32 + Name string + Forwarder string + ForwarderRVA uint32 +} + +// Export represent the export table. +type Export struct { + Functions []ExportFunction + Struct ImageExportDirectory + Name string +} + +/* +A few notes learned from `Corkami` about parsing export directory: +- like many data directories, Exports' size are not necessary, except for forwarding. +- Characteristics, TimeDateStamp, MajorVersion and MinorVersion are not necessary. +- the export name is not necessary, and can be anything. +- AddressOfNames is lexicographically-ordered. +- export names can have any value (even null or more than 65536 characters long, + with unprintable characters), just null terminated. +- an EXE can have exports (no need of relocation nor DLL flag), and can use +them normally +- exports can be not used for execution, but for documenting the internal code +- numbers of functions will be different from number of names when the file +is exporting some functions by ordinal. +*/ +func (pe *File) parseExportDirectory(rva, size uint32) error { + + // Define some vars. + exp := Export{} + exportDir := ImageExportDirectory{} + errorMsg := fmt.Sprintf("Error parsing export directory at RVA: 0x%x", rva) + + fileOffset := pe.GetOffsetFromRva(rva) + exportDirSize := uint32(binary.Size(exportDir)) + err := pe.structUnpack(&exportDir, fileOffset, exportDirSize) + if err != nil { + return errors.New(errorMsg) + } + exp.Struct = exportDir + + // We keep track of the bytes left in the file and use it to set a upper + // bound in the number of items that can be read from the different arrays. + lengthUntilEOF := func(rva uint32) uint32 { + return pe.size - pe.GetOffsetFromRva(rva) + } + var length uint32 + var addressOfNames []byte + + // Some DLLs have null number of functions. + if exportDir.NumberOfFunctions == 0 { + pe.Anomalies = append(pe.Anomalies, AnoNullNumberOfFunctions) + } + + // Some DLLs have null address of functions. + if exportDir.AddressOfFunctions == 0 { + pe.Anomalies = append(pe.Anomalies, AnoNullAddressOfFunctions) + } + + length = min(lengthUntilEOF(exportDir.AddressOfNames), + exportDir.NumberOfNames*4) + addressOfNames, err = pe.GetData(exportDir.AddressOfNames, length) + if err != nil { + return errors.New(errorMsg) + } + + length = min(lengthUntilEOF(exportDir.AddressOfNameOrdinals), + exportDir.NumberOfNames*4) + addressOfNameOrdinals, err := pe.GetData(exportDir.AddressOfNameOrdinals, length) + if err != nil { + return errors.New(errorMsg) + } + + length = min(lengthUntilEOF(exportDir.AddressOfFunctions), + exportDir.NumberOfFunctions*4) + addressOfFunctions, err := pe.GetData(exportDir.AddressOfFunctions, length) + if err != nil { + return errors.New(errorMsg) + } + + exp.Name = pe.getStringAtRVA(exportDir.Name, 0x100000) + + maxFailedEntries := 10 + var forwarderStr string + var forwarderOffset uint32 + safetyBoundary := pe.size // overly generous upper bound + symbolCounts := make(map[uint32]int) + parsingFailed := false + + // read the image export directory + section := pe.getSectionByRva(exportDir.AddressOfNames) + if section != nil { + safetyBoundary = (section.Header.VirtualAddress + + uint32(len(section.Data(0, 0, pe)))) - exportDir.AddressOfNames + } + + numNames := min(exportDir.NumberOfNames, safetyBoundary/4) + var symbolAddress uint32 + for i := uint32(0); i < numNames; i++ { + + defer func() { + // recover from panic if one occured. Set err to nil otherwise. + if recover() != nil { + err = errors.New("array index out of bounds") + } + }() + + symbolOrdinal := binary.LittleEndian.Uint16(addressOfNameOrdinals[i*2:]) + symbolAddress = binary.LittleEndian.Uint32(addressOfFunctions[symbolOrdinal*4:]) + if symbolAddress == 0 { + continue + } + + // If the function's RVA points within the export directory + // it will point to a string with the forwarded symbol's string + // instead of pointing the the function start address. + if symbolAddress >= rva && symbolAddress < rva+size { + forwarderStr = pe.getStringAtRVA(symbolAddress, 0x100000) + forwarderOffset = pe.GetOffsetFromRva(symbolAddress) + } else { + forwarderStr = "" + fileOffset = 0 + } + + symbolNameAddress := binary.LittleEndian.Uint32(addressOfNames[i*4:]) + if symbolNameAddress == 0 { + maxFailedEntries-- + if maxFailedEntries <= 0 { + parsingFailed = true + break + } + } + symbolName := pe.getStringAtRVA(symbolNameAddress, 0x100000) + if !IsValidFunctionName(symbolName) { + parsingFailed = true + break + } + + symbolNameOffset := pe.GetOffsetFromRva(symbolNameAddress) + if symbolNameOffset == 0 { + maxFailedEntries-- + if maxFailedEntries <= 0 { + parsingFailed = true + break + } + } + + // File 0b1d3d3664915577ab9a32188d29bbf3542b86c7b9ce333e245496c3018819f1 + // was being parsed as potentially containing millions of exports. + // Checking for duplicates addresses the issue. + symbolCounts[symbolAddress]++ + if symbolCounts[symbolAddress] > 10 { + if !stringInSlice(ErrExportManyRepeatedEntries, pe.Anomalies) { + pe.Anomalies = append(pe.Anomalies, ErrExportManyRepeatedEntries) + } + } + if len(symbolCounts) > maxExportedSymbols { + if !stringInSlice(ErrExportMaxOrdEntries, pe.Anomalies) { + pe.Anomalies = append(pe.Anomalies, ErrExportMaxOrdEntries) + } + } + newExport := ExportFunction{ + Name: symbolName, + NameRVA: symbolNameAddress, + NameOrdinal: uint32(symbolOrdinal), + Ordinal: exportDir.Base + uint32(symbolOrdinal), + FunctionRVA: symbolAddress, + Forwarder: forwarderStr, + ForwarderRVA: forwarderOffset, + } + + exp.Functions = append(exp.Functions, newExport) + } + + if parsingFailed { + fmt.Printf("RVA AddressOfNames in the export directory points to an "+ + "invalid address: 0x%x\n", exportDir.AddressOfNames) + } + + maxFailedEntries = 10 + section = pe.getSectionByRva(exportDir.AddressOfFunctions) + + // Overly generous upper bound + safetyBoundary = pe.size + if section != nil { + safetyBoundary = section.Header.VirtualAddress + + uint32(len(section.Data(0, 0, pe))) - exportDir.AddressOfNames + } + parsingFailed = false + ordinals := make(map[uint32]bool) + for _, export := range exp.Functions { + ordinals[export.Ordinal] = true + } + numNames = min(exportDir.NumberOfFunctions, safetyBoundary/4) + for i := uint32(0); i < numNames; i++ { + value := i + exportDir.Base + if ordinals[value] { + continue + } + + if len(addressOfFunctions) >= int(i*4)+4 { + symbolAddress = binary.LittleEndian.Uint32(addressOfFunctions[i*4:]) + } + if symbolAddress == 0 { + continue + } + + // Checking for forwarder again. + if symbolAddress >= rva && symbolAddress < rva+size { + forwarderStr = pe.getStringAtRVA(symbolAddress, 0x100000) + forwarderOffset = pe.GetOffsetFromRva(symbolAddress) + } else { + forwarderStr = "" + fileOffset = 0 + } + + // File 0b1d3d3664915577ab9a32188d29bbf3542b86c7b9ce333e245496c3018819f1 + // was being parsed as potentially containing millions of exports. + // Checking for duplicates addresses the issue. + symbolCounts[symbolAddress]++ + if symbolCounts[symbolAddress] > 10 { + if !stringInSlice(ErrExportManyRepeatedEntries, pe.Anomalies) { + pe.Anomalies = append(pe.Anomalies, ErrExportManyRepeatedEntries) + } + } + if len(symbolCounts) > maxExportedSymbols { + if !stringInSlice(ErrExportMaxOrdEntries, pe.Anomalies) { + + pe.Anomalies = append(pe.Anomalies, ErrExportMaxOrdEntries) + } + } + newExport := ExportFunction{ + Ordinal: exportDir.Base + i, + FunctionRVA: symbolAddress, + Forwarder: forwarderStr, + ForwarderRVA: forwarderOffset, + } + + exp.Functions = append(exp.Functions, newExport) + } + + pe.Export = exp + pe.HasExport = true + return nil +} + +// GetExportFunctionByRVA return an export function given an RVA. +func (pe *File) GetExportFunctionByRVA(rva uint32) ExportFunction { + for _, exp := range pe.Export.Functions { + if exp.FunctionRVA == rva { + return exp + } + } + + return ExportFunction{} +} diff --git a/vendor/github.com/saferwall/pe/file.go b/vendor/github.com/saferwall/pe/file.go new file mode 100644 index 00000000..aff9c6c9 --- /dev/null +++ b/vendor/github.com/saferwall/pe/file.go @@ -0,0 +1,385 @@ +// Copyright 2018 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +import ( + "errors" + "os" + + mmap "github.com/edsrzf/mmap-go" + "github.com/saferwall/pe/log" +) + +// A File represents an open PE file. +type File struct { + DOSHeader ImageDOSHeader `json:"dos_header,omitempty"` + RichHeader RichHeader `json:"rich_header,omitempty"` + NtHeader ImageNtHeader `json:"nt_header,omitempty"` + COFF COFF `json:"coff,omitempty"` + Sections []Section `json:"sections,omitempty"` + Imports []Import `json:"imports,omitempty"` + Export Export `json:"export,omitempty"` + Debugs []DebugEntry `json:"debugs,omitempty"` + Relocations []Relocation `json:"relocations,omitempty"` + Resources ResourceDirectory `json:"resources,omitempty"` + TLS TLSDirectory `json:"tls,omitempty"` + LoadConfig LoadConfig `json:"load_config,omitempty"` + Exceptions []Exception `json:"exceptions,omitempty"` + Certificates Certificate `json:"certificates,omitempty"` + DelayImports []DelayImport `json:"delay_imports,omitempty"` + BoundImports []BoundImportDescriptorData `json:"bound_imports,omitempty"` + GlobalPtr uint32 `json:"global_ptr,omitempty"` + CLR CLRData `json:"clr,omitempty"` + IAT []IATEntry `json:"iat,omitempty"` + Anomalies []string `json:"anomalies,omitempty"` + Header []byte + data mmap.MMap + FileInfo + size uint32 + OverlayOffset int64 + f *os.File + opts *Options + logger *log.Helper +} + +// Options that influence the PE parsing behaviour. +type Options struct { + + // Parse only the PE header and do not parse data directories, by default (false). + Fast bool + + // Includes section entropy, by default (false). + SectionEntropy bool + + // Maximum COFF symbols to parse, by default (MaxDefaultCOFFSymbolsCount). + MaxCOFFSymbolsCount uint32 + + // Maximum relocations to parse, by default (MaxDefaultRelocEntriesCount). + MaxRelocEntriesCount uint32 + + // Disable certificate validation, by default (false). + DisableCertValidation bool + + // A custom logger. + Logger log.Logger + + // OmitExportDirectory determines if export directory parsing is skipped, by default (false). + OmitExportDirectory bool + + // OmitImportDirectory determines if import directory parsing is skipped, by default (false). + OmitImportDirectory bool + + // OmitExceptionDirectory determines if exception directory parsing is skipped, by default (false). + OmitExceptionDirectory bool + + // OmitResourceDirectory determines if resource directory parsing is skipped, by default (false). + OmitResourceDirectory bool + + // OmitSecurityDirectory determines if security directory parsing is skipped, by default (false). + OmitSecurityDirectory bool + + // OmitRelocDirectory determines if relocation directory parsing is skipped, by default (false). + OmitRelocDirectory bool + + // OmitDebugDirectory determines if debug directory parsing is skipped, by default (false). + OmitDebugDirectory bool + + // OmitArchitectureDirectory determines if architecture directory parsing is skipped, by default (false). + OmitArchitectureDirectory bool + + // OmitGlobalPtrDirectory determines if global pointer directory parsing is skipped, by default (false). + OmitGlobalPtrDirectory bool + + // OmitTLSDirectory determines if TLS directory parsing is skipped, by default (false). + OmitTLSDirectory bool + + // OmitLoadConfigDirectory determines if load config directory parsing is skipped, by default (false). + OmitLoadConfigDirectory bool + + // OmitBoundImportDirectory determines if bound import directory parsing is skipped, by default (false). + OmitBoundImportDirectory bool + + // OmitIATDirectory determines if IAT directory parsing is skipped, by default (false). + OmitIATDirectory bool + + // OmitDelayImportDirectory determines if delay import directory parsing is skipped, by default (false). + OmitDelayImportDirectory bool + + // OmitCLRHeaderDirectory determines if CLR header directory parsing is skipped, by default (false). + OmitCLRHeaderDirectory bool +} + +// New instantiates a file instance with options given a file name. +func New(name string, opts *Options) (*File, error) { + + f, err := os.Open(name) + if err != nil { + return nil, err + } + + // Memory map the file instead of using read/write. + data, err := mmap.Map(f, mmap.RDONLY, 0) + if err != nil { + f.Close() + return nil, err + } + + file := File{} + if opts != nil { + file.opts = opts + } else { + file.opts = &Options{} + } + + if file.opts.MaxCOFFSymbolsCount == 0 { + file.opts.MaxCOFFSymbolsCount = MaxDefaultCOFFSymbolsCount + } + if file.opts.MaxRelocEntriesCount == 0 { + file.opts.MaxRelocEntriesCount = MaxDefaultRelocEntriesCount + } + + var logger log.Logger + if opts.Logger == nil { + logger = log.NewStdLogger(os.Stdout) + file.logger = log.NewHelper(log.NewFilter(logger, + log.FilterLevel(log.LevelError))) + } else { + file.logger = log.NewHelper(opts.Logger) + } + + file.data = data + file.size = uint32(len(file.data)) + file.f = f + return &file, nil +} + +// NewBytes instantiates a file instance with options given a memory buffer. +func NewBytes(data []byte, opts *Options) (*File, error) { + + file := File{} + if opts != nil { + file.opts = opts + } else { + file.opts = &Options{} + } + + if file.opts.MaxCOFFSymbolsCount == 0 { + file.opts.MaxCOFFSymbolsCount = MaxDefaultCOFFSymbolsCount + } + if file.opts.MaxRelocEntriesCount == 0 { + file.opts.MaxRelocEntriesCount = MaxDefaultRelocEntriesCount + } + + var logger log.Logger + if opts.Logger == nil { + logger = log.NewStdLogger(os.Stdout) + file.logger = log.NewHelper(log.NewFilter(logger, + log.FilterLevel(log.LevelError))) + } else { + file.logger = log.NewHelper(opts.Logger) + } + + file.data = data + file.size = uint32(len(file.data)) + return &file, nil +} + +// Close closes the File. +func (pe *File) Close() error { + if pe.data != nil { + _ = pe.data.Unmap() + } + + if pe.f != nil { + return pe.f.Close() + } + return nil +} + +// Parse performs the file parsing for a PE binary. +func (pe *File) Parse() error { + + // check for the smallest PE size. + if len(pe.data) < TinyPESize { + return ErrInvalidPESize + } + + // Parse the DOS header. + err := pe.ParseDOSHeader() + if err != nil { + return err + } + + // Parse the Rich header. + err = pe.ParseRichHeader() + if err != nil { + pe.logger.Errorf("rich header parsing failed: %v", err) + } + + // Parse the NT header. + err = pe.ParseNTHeader() + if err != nil { + return err + } + + // Parse COFF symbol table. + err = pe.ParseCOFFSymbolTable() + if err != nil { + pe.logger.Debugf("coff symbols parsing failed: %v", err) + } + + // Parse the Section Header. + err = pe.ParseSectionHeader() + if err != nil { + return err + } + + // In fast mode, do not parse data directories. + if pe.opts.Fast { + return nil + } + + // Parse the Data Directory entries. + return pe.ParseDataDirectories() +} + +// String stringify the data directory entry. +func (entry ImageDirectoryEntry) String() string { + dataDirMap := map[ImageDirectoryEntry]string{ + ImageDirectoryEntryExport: "Export", + ImageDirectoryEntryImport: "Import", + ImageDirectoryEntryResource: "Resource", + ImageDirectoryEntryException: "Exception", + ImageDirectoryEntryCertificate: "Security", + ImageDirectoryEntryBaseReloc: "Relocation", + ImageDirectoryEntryDebug: "Debug", + ImageDirectoryEntryArchitecture: "Architecture", + ImageDirectoryEntryGlobalPtr: "GlobalPtr", + ImageDirectoryEntryTLS: "TLS", + ImageDirectoryEntryLoadConfig: "LoadConfig", + ImageDirectoryEntryBoundImport: "BoundImport", + ImageDirectoryEntryIAT: "IAT", + ImageDirectoryEntryDelayImport: "DelayImport", + ImageDirectoryEntryCLR: "CLR", + ImageDirectoryEntryReserved: "Reserved", + } + + return dataDirMap[entry] +} + +// ParseDataDirectories parses the data directories. The DataDirectory is an +// array of 16 structures. Each array entry has a predefined meaning for what +// it refers to. +func (pe *File) ParseDataDirectories() error { + + foundErr := false + oh32 := ImageOptionalHeader32{} + oh64 := ImageOptionalHeader64{} + + switch pe.Is64 { + case true: + oh64 = pe.NtHeader.OptionalHeader.(ImageOptionalHeader64) + case false: + oh32 = pe.NtHeader.OptionalHeader.(ImageOptionalHeader32) + } + + // Maps data directory index to function which parses that directory. + funcMaps := make(map[ImageDirectoryEntry]func(uint32, uint32) error) + if !pe.opts.OmitExportDirectory { + funcMaps[ImageDirectoryEntryExport] = pe.parseExportDirectory + } + if !pe.opts.OmitImportDirectory { + funcMaps[ImageDirectoryEntryImport] = pe.parseImportDirectory + } + if !pe.opts.OmitExceptionDirectory { + funcMaps[ImageDirectoryEntryException] = pe.parseExceptionDirectory + } + if !pe.opts.OmitResourceDirectory { + funcMaps[ImageDirectoryEntryResource] = pe.parseResourceDirectory + } + if !pe.opts.OmitSecurityDirectory { + funcMaps[ImageDirectoryEntryCertificate] = pe.parseSecurityDirectory + } + if !pe.opts.OmitRelocDirectory { + funcMaps[ImageDirectoryEntryBaseReloc] = pe.parseRelocDirectory + } + if !pe.opts.OmitDebugDirectory { + funcMaps[ImageDirectoryEntryDebug] = pe.parseDebugDirectory + } + if !pe.opts.OmitArchitectureDirectory { + funcMaps[ImageDirectoryEntryArchitecture] = pe.parseArchitectureDirectory + } + if !pe.opts.OmitGlobalPtrDirectory { + funcMaps[ImageDirectoryEntryGlobalPtr] = pe.parseGlobalPtrDirectory + } + if !pe.opts.OmitTLSDirectory { + funcMaps[ImageDirectoryEntryTLS] = pe.parseTLSDirectory + } + if !pe.opts.OmitLoadConfigDirectory { + funcMaps[ImageDirectoryEntryLoadConfig] = pe.parseLoadConfigDirectory + } + if !pe.opts.OmitBoundImportDirectory { + funcMaps[ImageDirectoryEntryBoundImport] = pe.parseBoundImportDirectory + } + if !pe.opts.OmitIATDirectory { + funcMaps[ImageDirectoryEntryIAT] = pe.parseIATDirectory + } + if !pe.opts.OmitDelayImportDirectory { + funcMaps[ImageDirectoryEntryDelayImport] = pe.parseDelayImportDirectory + } + if !pe.opts.OmitCLRHeaderDirectory { + funcMaps[ImageDirectoryEntryCLR] = pe.parseCLRHeaderDirectory + } + + // Iterate over data directories and call the appropriate function. + for entryIndex := ImageDirectoryEntry(0); entryIndex < ImageNumberOfDirectoryEntries; entryIndex++ { + + var va, size uint32 + switch pe.Is64 { + case true: + dirEntry := oh64.DataDirectory[entryIndex] + va = dirEntry.VirtualAddress + size = dirEntry.Size + case false: + dirEntry := oh32.DataDirectory[entryIndex] + va = dirEntry.VirtualAddress + size = dirEntry.Size + } + + if va != 0 { + func() { + // keep parsing data directories even though some entries fails. + defer func() { + if e := recover(); e != nil { + pe.logger.Errorf("unhandled exception when parsing data directory %s, reason: %v", + entryIndex.String(), e) + foundErr = true + } + }() + + // the last entry in the data directories is reserved and must be zero. + if entryIndex == ImageDirectoryEntryReserved { + pe.Anomalies = append(pe.Anomalies, AnoReservedDataDirectoryEntry) + return + } + + parseDirectory, ok := funcMaps[entryIndex] + if !ok { + return + } + err := parseDirectory(va, size) + if err != nil { + pe.logger.Warnf("failed to parse data directory %s, reason: %v", + entryIndex.String(), err) + } + }() + } + } + + if foundErr { + return errors.New("Data directory parsing failed") + } + return nil +} diff --git a/vendor/github.com/saferwall/pe/globalptr.go b/vendor/github.com/saferwall/pe/globalptr.go new file mode 100644 index 00000000..a3b754b2 --- /dev/null +++ b/vendor/github.com/saferwall/pe/globalptr.go @@ -0,0 +1,36 @@ +// Copyright 2022 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +const ( + // AnoInvalidGlobalPtrReg is reported when the global pointer register offset is outide the image. + AnoInvalidGlobalPtrReg = "Global pointer register offset outside of PE image" +) + +// RVA of the value to be stored in the global pointer register. The size must +// be set to 0. This data directory is set to all zeros if the target +// architecture (for example, I386 or AMD64) does not use the concept of a +// global pointer. +func (pe *File) parseGlobalPtrDirectory(rva, size uint32) error { + + var err error + + // RVA of the value to be stored in the global pointer register. + offset := pe.GetOffsetFromRva(rva) + if offset == ^uint32(0) { + // Fake global pointer data directory + // sample: 0101f36de484fbc7bfbe6cb942a1ecf6fac0c3acd9f65b88b19400582d7e7007 + pe.Anomalies = append(pe.Anomalies, AnoInvalidGlobalPtrReg) + return nil + } + + pe.GlobalPtr, err = pe.ReadUint32(offset) + if err != nil { + return err + } + + pe.HasGlobalPtr = true + return nil +} diff --git a/vendor/github.com/saferwall/pe/helper.go b/vendor/github.com/saferwall/pe/helper.go new file mode 100644 index 00000000..fb9d5c5d --- /dev/null +++ b/vendor/github.com/saferwall/pe/helper.go @@ -0,0 +1,697 @@ +// Copyright 2018 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +import ( + "bytes" + "encoding/binary" + "errors" + "golang.org/x/text/encoding/unicode" + "path" + "path/filepath" + "runtime" + "strings" +) + +const ( + // TinyPESize On Windows XP (x32) the smallest PE executable is 97 bytes. + TinyPESize = 97 + + // FileAlignmentHardcodedValue represents the value which PointerToRawData + // should be at least equal or bigger to, or it will be rounded to zero. + // According to http://corkami.blogspot.com/2010/01/parce-que-la-planche-aura-brule.html + // if PointerToRawData is less that 0x200 it's rounded to zero. + FileAlignmentHardcodedValue = 0x200 +) + +// Errors +var ( + + // ErrInvalidPESize is returned when the file size is less that the smallest + // PE file size possible.ErrImageOS2SignatureFound + ErrInvalidPESize = errors.New("not a PE file, smaller than tiny PE") + + // ErrDOSMagicNotFound is returned when file is potentially a ZM executable. + ErrDOSMagicNotFound = errors.New("DOS Header magic not found") + + // ErrInvalidElfanewValue is returned when e_lfanew is larger than file size. + ErrInvalidElfanewValue = errors.New("invalid e_lfanew value. Probably not a PE file") + + // ErrInvalidNtHeaderOffset is returned when the NT Header offset is beyond + // the image file. + ErrInvalidNtHeaderOffset = errors.New( + "invalid NT Header Offset. NT Header Signature not found") + + // ErrImageOS2SignatureFound is returned when signature is for a NE file. + ErrImageOS2SignatureFound = errors.New( + "not a valid PE signature. Probably a NE file") + + // ErrImageOS2LESignatureFound is returned when signature is for a LE file. + ErrImageOS2LESignatureFound = errors.New( + "not a valid PE signature. Probably an LE file") + + // ErrImageVXDSignatureFound is returned when signature is for a LX file. + ErrImageVXDSignatureFound = errors.New( + "not a valid PE signature. Probably an LX file") + + // ErrImageTESignatureFound is returned when signature is for a TE file. + ErrImageTESignatureFound = errors.New( + "not a valid PE signature. Probably a TE file") + + // ErrImageNtSignatureNotFound is returned when PE magic signature is not found. + ErrImageNtSignatureNotFound = errors.New( + "not a valid PE signature. Magic not found") + + // ErrImageNtOptionalHeaderMagicNotFound is returned when optional header + // magic is different from PE32/PE32+. + ErrImageNtOptionalHeaderMagicNotFound = errors.New( + "not a valid PE signature. Optional Header magic not found") + + // ErrImageBaseNotAligned is reported when the image base is not aligned to 64K. + ErrImageBaseNotAligned = errors.New( + "corrupt PE file. Image base not aligned to 64 K") + + // AnoImageBaseOverflow is reported when the image base + SizeOfImage is + // larger than 80000000h/FFFF080000000000h in PE32/P32+. + AnoImageBaseOverflow = "Image base beyond allowed address" + + // ErrInvalidSectionFileAlignment is reported when section alignment is less than a + // PAGE_SIZE and section alignment != file alignment. + ErrInvalidSectionFileAlignment = errors.New("corrupt PE file. Section " + + "alignment is less than a PAGE_SIZE and section alignment != file alignment") + + // AnoInvalidSizeOfImage is reported when SizeOfImage is not multiple of + // SectionAlignment. + AnoInvalidSizeOfImage = "Invalid SizeOfImage value, should be multiple " + + "of SectionAlignment" + + // ErrOutsideBoundary is reported when attempting to read an address beyond + // file image limits. + ErrOutsideBoundary = errors.New("reading data outside boundary") +) + +// Max returns the larger of x or y. +func Max(x, y uint32) uint32 { + if x < y { + return y + } + return x +} + +func min(a, b uint32) uint32 { + if a < b { + return a + } + return b +} + +// Min returns the min number in a slice. +func Min(values []uint32) uint32 { + min := values[0] + for _, v := range values { + if v < min { + min = v + } + } + return min +} + +// IsValidDosFilename returns true if the DLL name is likely to be valid. +// Valid FAT32 8.3 short filename characters according to: +// http://en.wikipedia.org/wiki/8.3_filename +// The filename length is not checked because the DLLs filename +// can be longer that the 8.3 +func IsValidDosFilename(filename string) bool { + alphabet := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + numerals := "0123456789" + special := "!#$%&'()-@^_`{}~+,.;=[]\\/" + charset := alphabet + numerals + special + for _, c := range filename { + if !strings.Contains(charset, string(c)) { + return false + } + } + return true +} + +// IsValidFunctionName checks if an imported name uses the valid accepted +// characters expected in mangled function names. If the symbol's characters +// don't fall within this charset we will assume the name is invalid. +func IsValidFunctionName(functionName string) bool { + alphabet := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + numerals := "0123456789" + special := "_?@$()<>" + charset := alphabet + numerals + special + for _, c := range charset { + if !strings.Contains(charset, string(c)) { + return false + } + } + return true +} + +// IsPrintable checks weather a string is printable. +func IsPrintable(s string) bool { + alphabet := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + numerals := "0123456789" + whitespace := " \t\n\r\v\f" + special := "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~" + charset := alphabet + numerals + special + whitespace + for _, c := range charset { + if !strings.Contains(charset, string(c)) { + return false + } + } + return true +} + +// getSectionByRva returns the section containing the given address. +func (pe *File) getSectionByRva(rva uint32) *Section { + for _, section := range pe.Sections { + if section.Contains(rva, pe) { + return §ion + } + } + return nil +} + +// getSectionByRva returns the section name containing the given address. +func (pe *File) getSectionNameByRva(rva uint32) string { + for _, section := range pe.Sections { + if section.Contains(rva, pe) { + return section.String() + } + } + return "" +} + +func (pe *File) getSectionByOffset(offset uint32) *Section { + for _, section := range pe.Sections { + if section.Header.PointerToRawData == 0 { + continue + } + + adjustedPointer := pe.adjustFileAlignment( + section.Header.PointerToRawData) + if adjustedPointer <= offset && + offset < (adjustedPointer+section.Header.SizeOfRawData) { + return §ion + } + } + return nil +} + +// GetOffsetFromRva returns the file offset corresponding to this RVA. +func (pe *File) GetOffsetFromRva(rva uint32) uint32 { + + // Given a RVA, this method will find the section where the + // data lies and return the offset within the file. + section := pe.getSectionByRva(rva) + if section == nil { + if rva < uint32(len(pe.data)) { + return rva + } + return ^uint32(0) + } + sectionAlignment := pe.adjustSectionAlignment(section.Header.VirtualAddress) + fileAlignment := pe.adjustFileAlignment(section.Header.PointerToRawData) + return rva - sectionAlignment + fileAlignment +} + +// GetRVAFromOffset returns an RVA given an offset. +func (pe *File) GetRVAFromOffset(offset uint32) uint32 { + section := pe.getSectionByOffset(offset) + minAddr := ^uint32(0) + if section == nil { + + if len(pe.Sections) == 0 { + return offset + } + + for _, section := range pe.Sections { + vaddr := pe.adjustSectionAlignment(section.Header.VirtualAddress) + if vaddr < minAddr { + minAddr = vaddr + } + } + // Assume that offset lies within the headers + // The case illustrating this behavior can be found at: + // http://corkami.blogspot.com/2010/01/hey-hey-hey-whats-in-your-head.html + // where the import table is not contained by any section + // hence the RVA needs to be resolved to a raw offset + if offset < minAddr { + return offset + } + + pe.logger.Warn("data at Offset can't be fetched. Corrupt header?") + return ^uint32(0) + } + sectionAlignment := pe.adjustSectionAlignment(section.Header.VirtualAddress) + fileAlignment := pe.adjustFileAlignment(section.Header.PointerToRawData) + return offset - fileAlignment + sectionAlignment +} + +func (pe *File) getSectionByName(secName string) (section *ImageSectionHeader) { + for _, section := range pe.Sections { + if section.String() == secName { + return §ion.Header + } + + } + return nil +} + +// getStringAtRVA returns an ASCII string located at the given address. +func (pe *File) getStringAtRVA(rva, maxLen uint32) string { + if rva == 0 { + return "" + } + + section := pe.getSectionByRva(rva) + if section == nil { + if rva > pe.size { + return "" + } + + end := rva + maxLen + if end > pe.size { + end = pe.size + } + s := pe.GetStringFromData(0, pe.data[rva:end]) + return string(s) + } + s := pe.GetStringFromData(0, section.Data(rva, maxLen, pe)) + return string(s) +} + +func (pe *File) readUnicodeStringAtRVA(rva uint32, maxLength uint32) string { + str := "" + offset := pe.GetOffsetFromRva(rva) + i := uint32(0) + for i = 0; i < maxLength; i += 2 { + if offset+i >= pe.size || pe.data[offset+i] == 0 { + break + } + + str += string(pe.data[offset+i]) + } + return str +} + +func (pe *File) readASCIIStringAtOffset(offset, maxLength uint32) (uint32, string) { + str := "" + i := uint32(0) + + for i = 0; i < maxLength; i++ { + if offset+i >= pe.size || pe.data[offset+i] == 0 { + break + } + + str += string(pe.data[offset+i]) + } + return i, str +} + +// GetStringFromData returns ASCII string from within the data. +func (pe *File) GetStringFromData(offset uint32, data []byte) []byte { + + dataSize := uint32(len(data)) + if dataSize == 0 { + return nil + } + + if offset > dataSize { + return nil + } + + end := offset + for end < dataSize { + if data[end] == 0 { + break + } + end++ + } + return data[offset:end] +} + +// getStringAtOffset returns a string given an offset. +func (pe *File) getStringAtOffset(offset, size uint32) (string, error) { + if offset+size > pe.size { + return "", ErrOutsideBoundary + } + + str := string(pe.data[offset : offset+size]) + return strings.Replace(str, "\x00", "", -1), nil +} + +// GetData returns the data given an RVA regardless of the section where it +// lies on. +func (pe *File) GetData(rva, length uint32) ([]byte, error) { + + // Given a RVA and the size of the chunk to retrieve, this method + // will find the section where the data lies and return the data. + section := pe.getSectionByRva(rva) + + var end uint32 + if length > 0 { + end = rva + length + } else { + end = 0 + } + + if section == nil { + if rva < uint32(len(pe.Header)) { + return pe.Header[rva:end], nil + } + + // Before we give up we check whether the file might contain the data + // anyway. There are cases of PE files without sections that rely on + // windows loading the first 8291 bytes into memory and assume the data + // will be there. A functional file with these characteristics is: + // MD5: 0008892cdfbc3bda5ce047c565e52295 + // SHA-1: c7116b9ff950f86af256defb95b5d4859d4752a9 + + if rva < uint32(len(pe.data)) { + return pe.data[rva:end], nil + } + + return nil, errors.New("data at RVA can't be fetched. Corrupt header?") + } + return section.Data(rva, length, pe), nil +} + +// The alignment factor (in bytes) that is used to align the raw data of sections +// in the image file. The value should be a power of 2 between 512 and 64 K, +// inclusive. The default is 512. If the SectionAlignment is less than the +// architecture's page size, then FileAlignment must match SectionAlignment. +func (pe *File) adjustFileAlignment(va uint32) uint32 { + + var fileAlignment uint32 + switch pe.Is64 { + case true: + fileAlignment = pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).FileAlignment + case false: + fileAlignment = pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).FileAlignment + } + + if fileAlignment > FileAlignmentHardcodedValue && fileAlignment%2 != 0 { + pe.Anomalies = append(pe.Anomalies, ErrInvalidFileAlignment) + } + + if fileAlignment < FileAlignmentHardcodedValue { + return va + } + + // round it to 0x200 if not power of 2. + // According to https://github.com/corkami/docs/blob/master/PE/PE.md + // if PointerToRawData is less that 0x200 it's rounded to zero. Loading the + // test file in a debugger it's easy to verify that the PointerToRawData + // value of 1 is rounded to zero. Hence we reproduce the behavior + return (va / 0x200) * 0x200 + +} + +// The alignment (in bytes) of sections when they are loaded into memory +// It must be greater than or equal to FileAlignment. The default is the +// page size for the architecture. +func (pe *File) adjustSectionAlignment(va uint32) uint32 { + var fileAlignment, sectionAlignment uint32 + + switch pe.Is64 { + case true: + fileAlignment = pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).FileAlignment + sectionAlignment = pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).SectionAlignment + case false: + fileAlignment = pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).FileAlignment + sectionAlignment = pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).SectionAlignment + } + + if fileAlignment < FileAlignmentHardcodedValue && + fileAlignment != sectionAlignment { + pe.Anomalies = append(pe.Anomalies, ErrInvalidSectionAlignment) + } + + if sectionAlignment < 0x1000 { // page size + sectionAlignment = fileAlignment + } + + // 0x200 is the minimum valid FileAlignment according to the documentation + // although ntoskrnl.exe has an alignment of 0x80 in some Windows versions + if sectionAlignment != 0 && va%sectionAlignment != 0 { + return sectionAlignment * (va / sectionAlignment) + } + return va +} + +// alignDword aligns the offset on a 32-bit boundary. +func alignDword(offset, base uint32) uint32 { + return ((offset + base + 3) & 0xfffffffc) - (base & 0xfffffffc) +} + +// stringInSlice checks weather a string exists in a slice of strings. +func stringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} + +// intInSlice checks weather a uint32 exists in a slice of uint32. +func intInSlice(a uint32, list []uint32) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} + +// IsDriver returns true if the PE file is a Windows driver. +func (pe *File) IsDriver() bool { + + // Checking that the ImageBase field of the OptionalHeader is above or + // equal to 0x80000000 (that is, whether it lies in the upper 2GB of + //the address space, normally belonging to the kernel) is not a + // reliable enough indicator. For instance, PEs that play the invalid + // ImageBase trick to get relocated could be incorrectly assumed to be + // drivers. + + // Checking if any section characteristics have the IMAGE_SCN_MEM_NOT_PAGED + // flag set is not reliable either. + + // If there's still no import directory (the PE doesn't have one or it's + // malformed), give up. + if len(pe.Imports) == 0 { + return false + } + + // DIRECTORY_ENTRY_IMPORT will now exist, although it may be empty. + // If it imports from "ntoskrnl.exe" or other kernel components it should + // be a driver. + systemDLLs := []string{"ntoskrnl.exe", "hal.dll", "ndis.sys", + "bootvid.dll", "kdcom.dll"} + for _, dll := range pe.Imports { + if stringInSlice(strings.ToLower(dll.Name), systemDLLs) { + return true + } + } + + // If still we couldn't tell, check common driver section with combination + // of IMAGE_SUBSYSTEM_NATIVE or IMAGE_SUBSYSTEM_NATIVE_WINDOWS. + subsystem := ImageOptionalHeaderSubsystemType(0) + oh32 := ImageOptionalHeader32{} + oh64 := ImageOptionalHeader64{} + switch pe.Is64 { + case true: + oh64 = pe.NtHeader.OptionalHeader.(ImageOptionalHeader64) + subsystem = oh64.Subsystem + case false: + oh32 = pe.NtHeader.OptionalHeader.(ImageOptionalHeader32) + subsystem = oh32.Subsystem + } + commonDriverSectionNames := []string{"page", "paged", "nonpage", "init"} + for _, section := range pe.Sections { + s := strings.ToLower(section.String()) + if stringInSlice(s, commonDriverSectionNames) && + (subsystem&ImageSubsystemNativeWindows != 0 || + subsystem&ImageSubsystemNative != 0) { + return true + } + + } + + return false +} + +// IsDLL returns true if the PE file is a standard DLL. +func (pe *File) IsDLL() bool { + return pe.NtHeader.FileHeader.Characteristics&ImageFileDLL != 0 +} + +// IsEXE returns true if the PE file is a standard executable. +func (pe *File) IsEXE() bool { + + // Returns true only if the file has the IMAGE_FILE_EXECUTABLE_IMAGE flag set + // and the IMAGE_FILE_DLL not set and the file does not appear to be a driver either. + if pe.IsDLL() || pe.IsDriver() { + return false + } + + if pe.NtHeader.FileHeader.Characteristics&ImageFileExecutableImage == 0 { + return false + } + + return true +} + +// Checksum calculates the PE checksum as generated by CheckSumMappedFile(). +func (pe *File) Checksum() uint32 { + var checksum uint64 = 0 + var max uint64 = 0x100000000 + currentDword := uint32(0) + + // Get the Checksum offset. + optionalHeaderOffset := pe.DOSHeader.AddressOfNewEXEHeader + 4 + + uint32(binary.Size(pe.NtHeader.FileHeader)) + + // `CheckSum` field position in optional PE headers is always 64 for PE and PE+. + checksumOffset := optionalHeaderOffset + 64 + + // Verify the data is DWORD-aligned and add padding if needed + remainder := pe.size % 4 + dataLen := pe.size + if remainder > 0 { + dataLen = pe.size + (4 - remainder) + paddedBytes := make([]byte, 4-remainder) + pe.data = append(pe.data, paddedBytes...) + } + + for i := uint32(0); i < dataLen; i += 4 { + // Skip the checksum field. + if i == checksumOffset { + continue + } + + // Read DWORD from file. + currentDword = binary.LittleEndian.Uint32(pe.data[i:]) + + // Calculate checksum. + checksum = (checksum & 0xffffffff) + uint64(currentDword) + (checksum >> 32) + if checksum > max { + checksum = (checksum & 0xffffffff) + (checksum >> 32) + } + } + + checksum = (checksum & 0xffff) + (checksum >> 16) + checksum = checksum + (checksum >> 16) + checksum = checksum & 0xffff + + // The length is the one of the original data, not the padded one + checksum += uint64(pe.size) + + return uint32(checksum) +} + +// ReadUint64 read a uint64 from a buffer. +func (pe *File) ReadUint64(offset uint32) (uint64, error) { + if offset+8 > pe.size { + return 0, ErrOutsideBoundary + } + + return binary.LittleEndian.Uint64(pe.data[offset:]), nil +} + +// ReadUint32 read a uint32 from a buffer. +func (pe *File) ReadUint32(offset uint32) (uint32, error) { + if offset > pe.size-4 { + return 0, ErrOutsideBoundary + } + + return binary.LittleEndian.Uint32(pe.data[offset:]), nil +} + +// ReadUint16 read a uint16 from a buffer. +func (pe *File) ReadUint16(offset uint32) (uint16, error) { + if offset > pe.size-2 { + return 0, ErrOutsideBoundary + } + + return binary.LittleEndian.Uint16(pe.data[offset:]), nil +} + +// ReadUint8 read a uint8 from a buffer. +func (pe *File) ReadUint8(offset uint32) (uint8, error) { + if offset+1 > pe.size { + return 0, ErrOutsideBoundary + } + + b := pe.data[offset : offset+1][0] + return uint8(b), nil +} + +func (pe *File) structUnpack(iface interface{}, offset, size uint32) (err error) { + // Boundary check + totalSize := offset + size + + // Integer overflow + if (totalSize > offset) != (size > 0) { + return ErrOutsideBoundary + } + + if offset >= pe.size || totalSize > pe.size { + return ErrOutsideBoundary + } + + buf := bytes.NewReader(pe.data[offset : offset+size]) + err = binary.Read(buf, binary.LittleEndian, iface) + if err != nil { + return err + } + return nil +} + +// ReadBytesAtOffset returns a byte array from offset. +func (pe *File) ReadBytesAtOffset(offset, size uint32) ([]byte, error) { + // Boundary check + totalSize := offset + size + + // Integer overflow + if (totalSize > offset) != (size > 0) { + return nil, ErrOutsideBoundary + } + + if offset >= pe.size || totalSize > pe.size { + return nil, ErrOutsideBoundary + } + + return pe.data[offset : offset+size], nil +} + +// DecodeUTF16String decodes the UTF16 string from the byte slice. +func DecodeUTF16String(b []byte) (string, error) { + n := bytes.Index(b, []byte{0, 0}) + if n == 0 { + return "", nil + } + decoder := unicode.UTF16(unicode.LittleEndian, unicode.UseBOM).NewDecoder() + s, err := decoder.Bytes(b[0 : n+1]) + if err != nil { + return "", err + } + return string(s), nil +} + +// IsBitSet returns true when a bit on a particular position is set. +func IsBitSet(n uint64, pos int) bool { + val := n & (1 << pos) + return (val > 0) +} + +func getAbsoluteFilePath(testfile string) string { + _, p, _, _ := runtime.Caller(0) + return path.Join(filepath.Dir(p), testfile) +} diff --git a/vendor/github.com/saferwall/pe/iat.go b/vendor/github.com/saferwall/pe/iat.go new file mode 100644 index 00000000..ae148e04 --- /dev/null +++ b/vendor/github.com/saferwall/pe/iat.go @@ -0,0 +1,67 @@ +// Copyright 2022 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +// IATEntry represents an entry inside the IAT. +type IATEntry struct { + Index uint32 + Rva uint32 + Value interface{} + Meaning string +} + +// The structure and content of the import address table are identical to those +// of the import lookup table, until the file is bound. During binding, the +// entries in the import address table are overwritten with the 32-bit (for +// PE32) or 64-bit (for PE32+) addresses of the symbols that are being imported. +// These addresses are the actual memory addresses of the symbols, although +// technically they are still called “virtual addresses.” The loader typically +// processes the binding. +// +// The Import Address Table is there to to only trigger Copy On Write for as +// few pages as possible (those being the actual Import Address Table pages +// themselves). +// This is, partially the reason there's that extra level of indirection in the +// PE to begin with. +func (pe *File) parseIATDirectory(rva, size uint32) error { + + var entries []IATEntry + var index uint32 + var err error + + startRva := rva + + for startRva+size > rva { + ie := IATEntry{} + offset := pe.GetOffsetFromRva(rva) + if pe.Is64 { + ie.Value, err = pe.ReadUint64(offset) + if err != nil { + break + } + ie.Rva = rva + rva += 8 + } else { + ie.Value, err = pe.ReadUint32(offset) + if err != nil { + break + } + ie.Rva = rva + + rva += 4 + } + ie.Index = index + imp, i := pe.GetImportEntryInfoByRVA(rva) + if len(imp.Functions) != 0 { + ie.Meaning = imp.Name + "!" + imp.Functions[i].Name + } + entries = append(entries, ie) + index++ + } + + pe.IAT = entries + pe.HasIAT = true + return nil +} diff --git a/vendor/github.com/saferwall/pe/imports.go b/vendor/github.com/saferwall/pe/imports.go new file mode 100644 index 00000000..a7d6cf9a --- /dev/null +++ b/vendor/github.com/saferwall/pe/imports.go @@ -0,0 +1,797 @@ +// Copyright 2018 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +import ( + "crypto/md5" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "strconv" + "strings" +) + +const ( + imageOrdinalFlag32 = uint32(0x80000000) + imageOrdinalFlag64 = uint64(0x8000000000000000) + maxRepeatedAddresses = uint32(0xF) + maxAddressSpread = uint32(0x8000000) + addressMask32 = uint32(0x7fffffff) + addressMask64 = uint64(0x7fffffffffffffff) + maxDllLength = 0x200 + maxImportNameLength = 0x200 +) + +var ( + // AnoInvalidThunkAddressOfData is reported when thunk address is too spread out. + AnoInvalidThunkAddressOfData = "Thunk Address Of Data too spread out" + + // AnoManyRepeatedEntries is reported when import directory contains many + // entries have the same RVA. + AnoManyRepeatedEntries = "Import directory contains many repeated entries" + + // AnoAddressOfDataBeyondLimits is reported when Thunk AddressOfData goes + // beyond limits. + AnoAddressOfDataBeyondLimits = "Thunk AddressOfData beyond limits" + + // AnoImportNoNameNoOrdinal is reported when an import entry does not have + // a name neither an ordinal, most probably malformed data. + AnoImportNoNameNoOrdinal = "Must have either an ordinal or a name in an import" + + // ErrDamagedImportTable is reported when the IAT and ILT table length is 0. + ErrDamagedImportTable = errors.New( + "damaged Import Table information. ILT and/or IAT appear to be broken") +) + +// ImageImportDescriptor describes the remainder of the import information. +// The import directory table contains address information that is used to +// resolve fixup references to the entry points within a DLL image. +// It consists of an array of import directory entries, one entry for each DLL +// to which the image refers. The last directory entry is empty (filled with +// null values), which indicates the end of the directory table. +type ImageImportDescriptor struct { + // The RVA of the import lookup/name table (INT). This table contains a name + // or ordinal for each import. The INT is an array of IMAGE_THUNK_DATA structs. + OriginalFirstThunk uint32 `json:"original_first_thunk"` + + // The stamp that is set to zero until the image is bound. After the image + // is bound, this field is set to the time/data stamp of the DLL. + TimeDateStamp uint32 `json:"time_date_stamp"` + + // The index of the first forwarder reference (-1 if no forwarders). + ForwarderChain uint32 `json:"forwarder_chain"` + + // The address of an ASCII string that contains the name of the DLL. + // This address is relative to the image base. + Name uint32 `json:"name"` + + // The RVA of the import address table (IAT). The contents of this table are + // identical to the contents of the import lookup table until the image is bound. + FirstThunk uint32 `json:"first_thunk"` +} + +// ImageThunkData32 corresponds to one imported function from the executable. +// The entries are an array of 32-bit numbers for PE32 or an array of 64-bit +// numbers for PE32+. The ends of both arrays are indicated by an +// IMAGE_THUNK_DATA element with a value of zero. +// The IMAGE_THUNK_DATA union is a DWORD with these interpretations: +// DWORD Function; // Memory address of the imported function +// DWORD Ordinal; // Ordinal value of imported API +// DWORD AddressOfData; // RVA to an IMAGE_IMPORT_BY_NAME with the imported API name +// DWORD ForwarderString;// RVA to a forwarder string +type ImageThunkData32 struct { + AddressOfData uint32 +} + +// ImageThunkData64 is the PE32+ version of IMAGE_THUNK_DATA. +type ImageThunkData64 struct { + AddressOfData uint64 +} + +type ThunkData32 struct { + ImageThunkData ImageThunkData32 + Offset uint32 +} + +type ThunkData64 struct { + ImageThunkData ImageThunkData64 + Offset uint32 +} + +// ImportFunction represents an imported function in the import table. +type ImportFunction struct { + // An ASCII string that contains the name to import. This is the string that + // must be matched to the public name in the DLL. This string is case + // sensitive and terminated by a null byte. + Name string `json:"name"` + + // An index into the export name pointer table. A match is attempted first + // with this value. If it fails, a binary search is performed on the DLL's + // export name pointer table. + Hint uint16 `json:"hint"` + + // If this is true, import by ordinal. Otherwise, import by name. + ByOrdinal bool `json:"by_ordinal"` + + // A 16-bit ordinal number. This field is used only if the Ordinal/Name Flag + // bit field is 1 (import by ordinal). Bits 30-15 or 62-15 must be 0. + Ordinal uint32 `json:"ordinal"` + + // Name Thunk Value (OFT) + OriginalThunkValue uint64 `json:"original_thunk_value"` + + // Address Thunk Value (FT) + ThunkValue uint64 `json:"thunk_value"` + + // Address Thunk RVA. + ThunkRVA uint32 `json:"thunk_rva"` + + // Name Thunk RVA. + OriginalThunkRVA uint32 `json:"original_thunk_rva"` +} + +// Import represents an empty entry in the import table. +type Import struct { + Offset uint32 `json:"offset"` + Name string `json:"name"` + Functions []ImportFunction `json:"functions"` + Descriptor ImageImportDescriptor `json:"descriptor"` +} + +func (pe *File) parseImportDirectory(rva, size uint32) (err error) { + + for { + importDesc := ImageImportDescriptor{} + fileOffset := pe.GetOffsetFromRva(rva) + importDescSize := uint32(binary.Size(importDesc)) + err := pe.structUnpack(&importDesc, fileOffset, importDescSize) + + // If the RVA is invalid all would blow up. Some EXEs seem to be + // specially nasty and have an invalid RVA. + if err != nil { + return err + } + + // If the structure is all zeros, we reached the end of the list. + if importDesc == (ImageImportDescriptor{}) { + break + } + + rva += importDescSize + + // If the array of thunks is somewhere earlier than the import + // descriptor we can set a maximum length for the array. Otherwise + // just set a maximum length of the size of the file + maxLen := uint32(len(pe.data)) - fileOffset + if rva > importDesc.OriginalFirstThunk || rva > importDesc.FirstThunk { + if rva < importDesc.OriginalFirstThunk { + maxLen = rva - importDesc.FirstThunk + } else if rva < importDesc.FirstThunk { + maxLen = rva - importDesc.OriginalFirstThunk + } else { + maxLen = Max(rva-importDesc.OriginalFirstThunk, + rva-importDesc.FirstThunk) + } + } + + var importedFunctions []ImportFunction + if pe.Is64 { + importedFunctions, err = pe.parseImports64(&importDesc, maxLen) + } else { + importedFunctions, err = pe.parseImports32(&importDesc, maxLen) + } + if err != nil { + return err + } + + dllName := pe.getStringAtRVA(importDesc.Name, maxDllLength) + if !IsValidDosFilename(dllName) { + dllName = "*invalid*" + continue + } + + pe.Imports = append(pe.Imports, Import{ + Offset: fileOffset, + Name: string(dllName), + Functions: importedFunctions, + Descriptor: importDesc, + }) + } + + if len(pe.Imports) > 0 { + pe.HasImport = true + } + + return nil +} + +func (pe *File) getImportTable32(rva uint32, maxLen uint32, + isOldDelayImport bool) ([]ThunkData32, error) { + + // Setup variables + thunkTable := make(map[uint32]*ImageThunkData32) + retVal := []ThunkData32{} + minAddressOfData := ^uint32(0) + maxAddressOfData := uint32(0) + repeatedAddress := uint32(0) + var size uint32 = 4 + addressesOfData := make(map[uint32]bool) + + startRVA := rva + + if rva == 0 { + return nil, nil + } + + for { + if rva >= startRVA+maxLen { + pe.logger.Warnf("Error parsing the import table. Entries go beyond bounds.") + break + } + + // if we see too many times the same entry we assume it could be + // a table containing bogus data (with malicious intent or otherwise) + if repeatedAddress >= maxRepeatedAddresses { + if !stringInSlice(AnoManyRepeatedEntries, pe.Anomalies) { + pe.Anomalies = append(pe.Anomalies, AnoManyRepeatedEntries) + } + } + + // if the addresses point somewhere but the difference between the + // highest and lowest address is larger than maxAddressSpread we assume + // a bogus table as the addresses should be contained within a module + if maxAddressOfData-minAddressOfData > maxAddressSpread { + if !stringInSlice(AnoInvalidThunkAddressOfData, pe.Anomalies) { + pe.Anomalies = append(pe.Anomalies, AnoInvalidThunkAddressOfData) + } + } + + // In its original incarnation in Visual C++ 6.0, all ImgDelayDescr + // fields containing addresses used virtual addresses, rather than RVAs. + // That is, they contained actual addresses where the delayload data + // could be found. These fields are DWORDs, the size of a pointer on the x86. + // Now fast-forward to IA-64 support. All of a sudden, 4 bytes isn't + // enough to hold a complete address. At this point, Microsoft did the + // correct thing and changed the fields containing addresses to RVAs. + offset := uint32(0) + if isOldDelayImport { + oh32 := pe.NtHeader.OptionalHeader.(ImageOptionalHeader32) + newRVA := rva - oh32.ImageBase + offset = pe.GetOffsetFromRva(newRVA) + if offset == ^uint32(0) { + return nil, nil + } + } else { + offset = pe.GetOffsetFromRva(rva) + if offset == ^uint32(0) { + return nil, nil + } + } + + // Read the image thunk data. + thunk := ImageThunkData32{} + err := pe.structUnpack(&thunk, offset, size) + if err != nil { + // pe.logger.Warnf("Error parsing the import table. " + + // "Invalid data at RVA: 0x%x", rva) + return nil, nil + } + + if thunk == (ImageThunkData32{}) { + break + } + + // Check if the AddressOfData lies within the range of RVAs that it's + // being scanned, abort if that is the case, as it is very unlikely + // to be legitimate data. + // Seen in PE with SHA256: + // 5945bb6f0ac879ddf61b1c284f3b8d20c06b228e75ae4f571fa87f5b9512902c + if thunk.AddressOfData >= startRVA && thunk.AddressOfData <= rva { + pe.logger.Warnf("Error parsing the import table. "+ + "AddressOfData overlaps with THUNK_DATA for THUNK at: "+ + "RVA 0x%x", rva) + break + } + + if thunk.AddressOfData&imageOrdinalFlag32 > 0 { + // If the entry looks like could be an ordinal. + if thunk.AddressOfData&0x7fffffff > 0xffff { + // but its value is beyond 2^16, we will assume it's a + // corrupted and ignore it altogether + if !stringInSlice(AnoAddressOfDataBeyondLimits, pe.Anomalies) { + pe.Anomalies = append(pe.Anomalies, AnoAddressOfDataBeyondLimits) + } + } + } else { + // and if it looks like it should be an RVA keep track of the RVAs seen + // and store them to study their properties. When certain non-standard + // features are detected the parsing will be aborted + _, ok := addressesOfData[thunk.AddressOfData] + if ok { + repeatedAddress++ + } else { + addressesOfData[thunk.AddressOfData] = true + } + + if thunk.AddressOfData > maxAddressOfData { + maxAddressOfData = thunk.AddressOfData + } + + if thunk.AddressOfData < minAddressOfData { + minAddressOfData = thunk.AddressOfData + } + } + + thunkTable[rva] = &thunk + thunkData := ThunkData32{ImageThunkData: thunk, Offset: rva} + retVal = append(retVal, thunkData) + rva += size + } + return retVal, nil +} + +func (pe *File) getImportTable64(rva uint32, maxLen uint32, + isOldDelayImport bool) ([]ThunkData64, error) { + + // Setup variables + thunkTable := make(map[uint32]*ImageThunkData64) + retVal := []ThunkData64{} + minAddressOfData := ^uint64(0) + maxAddressOfData := uint64(0) + repeatedAddress := uint64(0) + var size uint32 = 8 + addressesOfData := make(map[uint64]bool) + + startRVA := rva + + if rva == 0 { + return nil, nil + } + + for { + if rva >= startRVA+maxLen { + pe.logger.Warnf("Error parsing the import table. Entries go beyond bounds.") + break + } + + // if we see too many times the same entry we assume it could be + // a table containing bogus data (with malicious intent or otherwise) + if repeatedAddress >= uint64(maxRepeatedAddresses) { + if !stringInSlice(AnoManyRepeatedEntries, pe.Anomalies) { + pe.Anomalies = append(pe.Anomalies, AnoManyRepeatedEntries) + } + } + + // if the addresses point somewhere but the difference between the highest + // and lowest address is larger than maxAddressSpread we assume a bogus + // table as the addresses should be contained within a module + if maxAddressOfData-minAddressOfData > uint64(maxAddressSpread) { + if !stringInSlice(AnoInvalidThunkAddressOfData, pe.Anomalies) { + pe.Anomalies = append(pe.Anomalies, AnoInvalidThunkAddressOfData) + } + } + + // In its original incarnation in Visual C++ 6.0, all ImgDelayDescr + // fields containing addresses used virtual addresses, rather than RVAs. + // That is, they contained actual addresses where the delayload data + // could be found. These fields are DWORDs, the size of a pointer on the x86. + // Now fast-forward to IA-64 support. All of a sudden, 4 bytes isn't + // enough to hold a complete address. At this point, Microsoft did the + // correct thing and changed the fields containing addresses to RVAs. + offset := uint32(0) + if isOldDelayImport { + oh64 := pe.NtHeader.OptionalHeader.(ImageOptionalHeader64) + newRVA := rva - uint32(oh64.ImageBase) + offset = pe.GetOffsetFromRva(newRVA) + if offset == ^uint32(0) { + return nil, nil + } + } else { + offset = pe.GetOffsetFromRva(rva) + if offset == ^uint32(0) { + return nil, nil + } + } + + // Read the image thunk data. + thunk := ImageThunkData64{} + err := pe.structUnpack(&thunk, offset, size) + if err != nil { + // pe.logger.Warnf("Error parsing the import table. " + + // "Invalid data at RVA: 0x%x", rva) + return nil, nil + } + + if thunk == (ImageThunkData64{}) { + break + } + + // Check if the AddressOfData lies within the range of RVAs that it's + // being scanned, abort if that is the case, as it is very unlikely + // to be legitimate data. + // Seen in PE with SHA256: + // 5945bb6f0ac879ddf61b1c284f3b8d20c06b228e75ae4f571fa87f5b9512902c + if thunk.AddressOfData >= uint64(startRVA) && + thunk.AddressOfData <= uint64(rva) { + pe.logger.Warnf("Error parsing the import table. "+ + "AddressOfData overlaps with THUNK_DATA for THUNK at: "+ + "RVA 0x%x", rva) + break + } + + // If the entry looks like could be an ordinal + if thunk.AddressOfData&imageOrdinalFlag64 > 0 { + // but its value is beyond 2^16, we will assume it's a + // corrupted and ignore it altogether + if thunk.AddressOfData&0x7fffffff > 0xffff { + if !stringInSlice(AnoAddressOfDataBeyondLimits, pe.Anomalies) { + pe.Anomalies = append(pe.Anomalies, AnoAddressOfDataBeyondLimits) + } + } + // and if it looks like it should be an RVA + } else { + // keep track of the RVAs seen and store them to study their + // properties. When certain non-standard features are detected + // the parsing will be aborted + _, ok := addressesOfData[thunk.AddressOfData] + if ok { + repeatedAddress++ + } else { + addressesOfData[thunk.AddressOfData] = true + } + + if thunk.AddressOfData > maxAddressOfData { + maxAddressOfData = thunk.AddressOfData + } + + if thunk.AddressOfData < minAddressOfData { + minAddressOfData = thunk.AddressOfData + } + } + + thunkTable[rva] = &thunk + thunkData := ThunkData64{ImageThunkData: thunk, Offset: rva} + retVal = append(retVal, thunkData) + rva += size + } + return retVal, nil +} + +func (pe *File) parseImports32(importDesc interface{}, maxLen uint32) ( + []ImportFunction, error) { + + var OriginalFirstThunk uint32 + var FirstThunk uint32 + var isOldDelayImport bool + + switch desc := importDesc.(type) { + case *ImageImportDescriptor: + OriginalFirstThunk = desc.OriginalFirstThunk + FirstThunk = desc.FirstThunk + case *ImageDelayImportDescriptor: + OriginalFirstThunk = desc.ImportNameTableRVA + FirstThunk = desc.ImportAddressTableRVA + if desc.Attributes == 0 { + isOldDelayImport = true + } + } + + // Import Lookup Table (OFT). Contains ordinals or pointers to strings. + ilt, err := pe.getImportTable32(OriginalFirstThunk, maxLen, isOldDelayImport) + if err != nil { + return nil, err + } + + // Import Address Table (FT). May have identical content to ILT if PE file is + // not bound. It will contain the address of the imported symbols once + // the binary is loaded or if it is already bound. + iat, err := pe.getImportTable32(FirstThunk, maxLen, isOldDelayImport) + if err != nil { + return nil, err + } + + // Some DLLs has IAT or ILT with nil type. + if len(iat) == 0 && len(ilt) == 0 { + return nil, ErrDamagedImportTable + } + + var table []ThunkData32 + if len(ilt) > 0 { + table = ilt + } else if len(iat) > 0 { + table = iat + } else { + return nil, err + } + + importedFunctions := []ImportFunction{} + numInvalid := uint32(0) + for idx := uint32(0); idx < uint32(len(table)); idx++ { + imp := ImportFunction{} + if table[idx].ImageThunkData.AddressOfData > 0 { + // If imported by ordinal, we will append the ordinal number + if table[idx].ImageThunkData.AddressOfData&imageOrdinalFlag32 > 0 { + imp.ByOrdinal = true + imp.Ordinal = table[idx].ImageThunkData.AddressOfData & uint32(0xffff) + + // Original Thunk + if uint32(len(ilt)) > idx { + imp.OriginalThunkValue = uint64(ilt[idx].ImageThunkData.AddressOfData) + imp.OriginalThunkRVA = ilt[idx].Offset + } + + // Thunk + if uint32(len(iat)) > idx { + imp.ThunkValue = uint64(iat[idx].ImageThunkData.AddressOfData) + imp.ThunkRVA = iat[idx].Offset + } + + imp.Name = "#" + strconv.Itoa(int(imp.Ordinal)) + } else { + imp.ByOrdinal = false + if isOldDelayImport { + table[idx].ImageThunkData.AddressOfData -= + pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).ImageBase + } + + // Original Thunk + if uint32(len(ilt)) > idx { + imp.OriginalThunkValue = uint64(ilt[idx].ImageThunkData.AddressOfData & addressMask32) + imp.OriginalThunkRVA = ilt[idx].Offset + } + + // Thunk + if uint32(len(iat)) > idx { + imp.ThunkValue = uint64(iat[idx].ImageThunkData.AddressOfData & addressMask32) + imp.ThunkRVA = iat[idx].Offset + } + + // Thunk + hintNameTableRva := table[idx].ImageThunkData.AddressOfData & addressMask32 + off := pe.GetOffsetFromRva(hintNameTableRva) + imp.Hint, err = pe.ReadUint16(off) + if err != nil { + imp.Hint = ^uint16(0) + } + imp.Name = pe.getStringAtRVA(table[idx].ImageThunkData.AddressOfData+2, + maxImportNameLength) + if !IsValidFunctionName(imp.Name) { + imp.Name = "*invalid*" + } + } + } + + // This file bfe97192e8107d52dd7b4010d12b2924 has an invalid table built + // in a way that it's parsable but contains invalid entries that lead + // pefile to take extremely long amounts of time to parse. It also leads + // to extreme memory consumption. To prevent similar cases, if invalid + // entries are found in the middle of a table the parsing will be aborted. + hasName := len(imp.Name) > 0 + if imp.Ordinal == 0 && !hasName { + if !stringInSlice(AnoImportNoNameNoOrdinal, pe.Anomalies) { + pe.Anomalies = append(pe.Anomalies, AnoImportNoNameNoOrdinal) + } + } + + // Some PEs appear to interleave valid and invalid imports. Instead of + // aborting the parsing altogether we will simply skip the invalid entries. + // Although if we see 1000 invalid entries and no legit ones, we abort. + if imp.Name == "*invalid*" { + if numInvalid > 1000 && numInvalid == idx { + return nil, errors.New( + `too many invalid names, aborting parsing`) + } + numInvalid++ + continue + } + + importedFunctions = append(importedFunctions, imp) + } + + return importedFunctions, nil +} + +func (pe *File) parseImports64(importDesc interface{}, maxLen uint32) ([]ImportFunction, error) { + + var OriginalFirstThunk uint32 + var FirstThunk uint32 + var isOldDelayImport bool + + switch desc := importDesc.(type) { + case *ImageImportDescriptor: + OriginalFirstThunk = desc.OriginalFirstThunk + FirstThunk = desc.FirstThunk + case *ImageDelayImportDescriptor: + OriginalFirstThunk = desc.ImportNameTableRVA + FirstThunk = desc.ImportAddressTableRVA + if desc.Attributes == 0 { + isOldDelayImport = true + } + } + + // Import Lookup Table. Contains ordinals or pointers to strings. + ilt, err := pe.getImportTable64(OriginalFirstThunk, maxLen, isOldDelayImport) + if err != nil { + return nil, err + } + + // Import Address Table. May have identical content to ILT if PE file is + // not bound. It will contain the address of the imported symbols once + // the binary is loaded or if it is already bound. + iat, err := pe.getImportTable64(FirstThunk, maxLen, isOldDelayImport) + if err != nil { + return nil, err + } + + // Would crash if IAT or ILT had nil type + if len(iat) == 0 && len(ilt) == 0 { + return nil, ErrDamagedImportTable + } + + var table []ThunkData64 + if len(ilt) > 0 { + table = ilt + } else if len(iat) > 0 { + table = iat + } else { + return nil, err + } + + importedFunctions := []ImportFunction{} + numInvalid := uint32(0) + for idx := uint32(0); idx < uint32(len(table)); idx++ { + imp := ImportFunction{} + if table[idx].ImageThunkData.AddressOfData > 0 { + + // If imported by ordinal, we will append the ordinal number + if table[idx].ImageThunkData.AddressOfData&imageOrdinalFlag64 > 0 { + imp.ByOrdinal = true + imp.Ordinal = uint32(table[idx].ImageThunkData.AddressOfData) & uint32(0xffff) + + // Original Thunk + if uint32(len(ilt)) > idx { + imp.OriginalThunkValue = + ilt[idx].ImageThunkData.AddressOfData + imp.OriginalThunkRVA = ilt[idx].Offset + } + + // Thunk + if uint32(len(iat)) > idx { + imp.ThunkValue = iat[idx].ImageThunkData.AddressOfData + imp.ThunkRVA = iat[idx].Offset + } + + imp.Name = "#" + strconv.Itoa(int(imp.Ordinal)) + + } else { + + imp.ByOrdinal = false + + if isOldDelayImport { + table[idx].ImageThunkData.AddressOfData -= + pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).ImageBase + } + + // Original Thunk + if uint32(len(ilt)) > idx { + imp.OriginalThunkValue = + ilt[idx].ImageThunkData.AddressOfData & addressMask64 + imp.OriginalThunkRVA = ilt[idx].Offset + } + + // Thunk + if uint32(len(iat)) > idx { + imp.ThunkValue = iat[idx].ImageThunkData.AddressOfData & addressMask64 + imp.ThunkRVA = iat[idx].Offset + } + + hintNameTableRva := table[idx].ImageThunkData.AddressOfData & addressMask64 + off := pe.GetOffsetFromRva(uint32(hintNameTableRva)) + imp.Hint = binary.LittleEndian.Uint16(pe.data[off:]) + imp.Name = pe.getStringAtRVA(uint32(table[idx].ImageThunkData.AddressOfData+2), + maxImportNameLength) + if !IsValidFunctionName(imp.Name) { + imp.Name = "*invalid*" + } + } + } + + // This file bfe97192e8107d52dd7b4010d12b2924 has an invalid table built + // in a way that it's parsable but contains invalid entries that lead + // pefile to take extremely long amounts of time to parse. It also leads + // to extreme memory consumption. To prevent similar cases, if invalid + // entries are found in the middle of a table the parsing will be aborted. + hasName := len(imp.Name) > 0 + if imp.Ordinal == 0 && !hasName { + if !stringInSlice(AnoImportNoNameNoOrdinal, pe.Anomalies) { + pe.Anomalies = append(pe.Anomalies, AnoImportNoNameNoOrdinal) + } + } + // Some PEs appear to interleave valid and invalid imports. Instead of + // aborting the parsing altogether we will simply skip the invalid entries. + // Although if we see 1000 invalid entries and no legit ones, we abort. + if imp.Name == "*invalid*" { + if numInvalid > 1000 && numInvalid == idx { + return nil, errors.New( + `too many invalid names, aborting parsing`) + } + numInvalid++ + continue + } + + importedFunctions = append(importedFunctions, imp) + } + + return importedFunctions, nil +} + +// GetImportEntryInfoByRVA return an import function + index of the entry given +// an RVA. +func (pe *File) GetImportEntryInfoByRVA(rva uint32) (Import, int) { + for _, imp := range pe.Imports { + for i, entry := range imp.Functions { + if entry.ThunkRVA == rva { + return imp, i + } + } + } + + return Import{}, 0 +} + +// md5hash hashes using md5 algorithm. +func md5hash(text string) string { + h := md5.New() + h.Write([]byte(text)) + return hex.EncodeToString(h.Sum(nil)) +} + +// ImpHash calculates the import hash. +// Algorithm: +// Resolving ordinals to function names when they appear +// Converting both DLL names and function names to all lowercase +// Removing the file extensions from imported module names +// Building and storing the lowercased string . in an ordered list +// Generating the MD5 hash of the ordered list +func (pe *File) ImpHash() (string, error) { + if len(pe.Imports) == 0 { + return "", errors.New("no imports found") + } + + extensions := []string{"ocx", "sys", "dll"} + var impStrs []string + + for _, imp := range pe.Imports { + var libName string + parts := strings.Split(imp.Name, ".") + if len(parts) == 2 && stringInSlice(strings.ToLower(parts[1]), extensions) { + libName = parts[0] + } else { + libName = imp.Name + } + + libName = strings.ToLower(libName) + + for _, function := range imp.Functions { + var funcName string + if function.ByOrdinal { + funcName = OrdLookup(imp.Name, uint64(function.Ordinal), true) + } else { + funcName = function.Name + } + + if funcName == "" { + continue + } + + impStr := fmt.Sprintf("%s.%s", libName, strings.ToLower(funcName)) + impStrs = append(impStrs, impStr) + } + } + + hash := md5hash(strings.Join(impStrs, ",")) + return hash, nil +} diff --git a/vendor/github.com/saferwall/pe/loadconfig.go b/vendor/github.com/saferwall/pe/loadconfig.go new file mode 100644 index 00000000..40ba7538 --- /dev/null +++ b/vendor/github.com/saferwall/pe/loadconfig.go @@ -0,0 +1,1521 @@ +// Copyright 2018 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +// References: +// https://www.virtualbox.org/svn/vbox/trunk/include/iprt/formats/pecoff.h +// https://github.com/hdoc/llvm-project/blob/release/15.x/llvm/include/llvm/Object/COFF.h +// https://ffri.github.io/ProjectChameleon/new_reloc_chpev2/ +// https://blogs.blackberry.com/en/2019/09/teardown-windows-10-on-arm-x86-emulation +// DVRT: https://www.alex-ionescu.com/?p=323 +// https://xlab.tencent.com/en/2016/11/02/return-flow-guard/ +// https://denuvosoftwaresolutions.github.io/DVRT/dvrt.html +// BlueHat v18 || Retpoline: The Anti sectre type 2 mitigation in windows: https://www.youtube.com/watch?v=ZfxXjDQRpsU + +package pe + +import ( + "bytes" + "encoding/binary" + "fmt" + "reflect" +) + +// ImageGuardFlagType represents the type for load configuration image guard flags. +type ImageGuardFlagType uint8 + +// GFIDS table entry flags. +const ( + // ImageGuardFlagFIDSuppressed indicates that the call target is explicitly + // suppressed (do not treat it as valid for purposes of CFG). + ImageGuardFlagFIDSuppressed = 0x1 + + // ImageGuardFlagExportSuppressed indicates that the call target is export + // suppressed. See Export suppression for more details. + ImageGuardFlagExportSuppressed = 0x2 +) + +// The GuardFlags field contains a combination of one or more of the +// following flags and subfields: +const ( + // ImageGuardCfInstrumented indicates that the module performs control flow + // integrity checks using system-supplied support. + ImageGuardCfInstrumented = 0x00000100 + + // ImageGuardCfWInstrumented indicates that the module performs control + // flow and write integrity checks. + ImageGuardCfWInstrumented = 0x00000200 + + // ImageGuardCfFunctionTablePresent indicates that the module contains + // valid control flow target metadata. + ImageGuardCfFunctionTablePresent = 0x00000400 + + // ImageGuardSecurityCookieUnused indicates that the module does not make + // use of the /GS security cookie. + ImageGuardSecurityCookieUnused = 0x00000800 + + // ImageGuardProtectDelayLoadIAT indicates that the module supports read + // only delay load IAT. + ImageGuardProtectDelayLoadIAT = 0x00001000 + + // ImageGuardDelayLoadIATInItsOwnSection indicates that the Delayload + // import table in its own .didat section (with nothing else in it) that + // can be freely reprotected. + ImageGuardDelayLoadIATInItsOwnSection = 0x00002000 + + // ImageGuardCfExportSuppressionInfoPresent indicates that the module + // contains suppressed export information. This also infers that the + // address taken IAT table is also present in the load config. + ImageGuardCfExportSuppressionInfoPresent = 0x00004000 + + // ImageGuardCfEnableExportSuppression indicates that the module enables + // suppression of exports. + ImageGuardCfEnableExportSuppression = 0x00008000 + + // ImageGuardCfLongJumpTablePresent indicates that the module contains + // long jmp target information. + ImageGuardCfLongJumpTablePresent = 0x00010000 +) + +const ( + // ImageGuardCfFunctionTableSizeMask indicates that the mask for the + // subfield that contains the stride of Control Flow Guard function table + // entries (that is, the additional count of bytes per table entry). + ImageGuardCfFunctionTableSizeMask = 0xF0000000 + + // ImageGuardCfFunctionTableSizeShift indicates the shift to right-justify + // Guard CF function table stride. + ImageGuardCfFunctionTableSizeShift = 28 +) + +const ( + ImageDynamicRelocationGuardRfPrologue = 0x00000001 + ImageDynamicRelocationGuardREpilogue = 0x00000002 +) + +// Software enclave information. +const ( + ImageEnclaveLongIDLength = 32 + ImageEnclaveShortIDLength = 16 +) + +const ( + // ImageEnclaveImportMatchNone indicates that none of the identifiers of the + // image need to match the value in the import record. + ImageEnclaveImportMatchNone = 0x00000000 + + // ImageEnclaveImportMatchUniqueId indicates that the value of the enclave + // unique identifier of the image must match the value in the import record. + // Otherwise, loading of the image fails. + ImageEnclaveImportMatchUniqueID = 0x00000001 + + // ImageEnclaveImportMatchAuthorId indicates that the value of the enclave + // author identifier of the image must match the value in the import record. + // Otherwise, loading of the image fails. If this flag is set and the import + // record indicates an author identifier of all zeros, the imported image + // must be part of the Windows installation. + ImageEnclaveImportMatchAuthorID = 0x00000002 + + // ImageEnclaveImportMatchFamilyId indicates that the value of the enclave + // family identifier of the image must match the value in the import record. + // Otherwise, loading of the image fails. + ImageEnclaveImportMatchFamilyID = 0x00000003 + + // ImageEnclaveImportMatchImageId indicates that the value of the enclave + // image identifier must match the value in the import record. Otherwise, + // loading of the image fails. + ImageEnclaveImportMatchImageID = 0x00000004 +) + +// ImageLoadConfigDirectory32 Contains the load configuration data of an image for x86 binaries. +type ImageLoadConfigDirectory32 struct { + // The actual size of the structure inclusive. May differ from the size + // given in the data directory for Windows XP and earlier compatibility. + Size uint32 `json:"size"` + + // Date and time stamp value. + TimeDateStamp uint32 `json:"time_date_stamp"` + + // Major version number. + MajorVersion uint16 `json:"major_version"` + + // Minor version number. + MinorVersion uint16 `json:"minor_version"` + + // The global loader flags to clear for this process as the loader starts + // the process. + GlobalFlagsClear uint32 `json:"global_flags_clear"` + + // The global loader flags to set for this process as the loader starts the + // process. + GlobalFlagsSet uint32 `json:"global_flags_set"` + + // The default timeout value to use for this process's critical sections + // that are abandoned. + CriticalSectionDefaultTimeout uint32 `json:"critical_section_default_timeout"` + + // Memory that must be freed before it is returned to the system, in bytes. + DeCommitFreeBlockThreshold uint32 `json:"de_commit_free_block_threshold"` + + // Total amount of free memory, in bytes. + DeCommitTotalFreeThreshold uint32 `json:"de_commit_total_free_threshold"` + + // [x86 only] The VA of a list of addresses where the LOCK prefix is used so + // that they can be replaced with NOP on single processor machines. + LockPrefixTable uint32 `json:"lock_prefix_table"` + + // Maximum allocation size, in bytes. + MaximumAllocationSize uint32 `json:"maximum_allocation_size"` + + // Maximum virtual memory size, in bytes. + VirtualMemoryThreshold uint32 `json:"virtual_memory_threshold"` + + // Process heap flags that correspond to the first argument of the HeapCreate + // function. These flags apply to the process heap that is created during + // process startup. + ProcessHeapFlags uint32 `json:"process_heap_flags"` + + // Setting this field to a non-zero value is equivalent to calling + // SetProcessAffinityMask with this value during process startup (.exe only) + ProcessAffinityMask uint32 `json:"process_affinity_mask"` + + // The service pack version identifier. + CSDVersion uint16 `json:"csd_version"` + + // Must be zero. + DependentLoadFlags uint16 `json:"dependent_load_flags"` + + // Reserved for use by the system. + EditList uint32 `json:"edit_list"` + + // A pointer to a cookie that is used by Visual C++ or GS implementation. + SecurityCookie uint32 `json:"security_cookie"` + + // [x86 only] The VA of the sorted table of RVAs of each valid, unique SE + // handler in the image. + SEHandlerTable uint32 `json:"se_handler_table"` + + // [x86 only] The count of unique handlers in the table. + SEHandlerCount uint32 `json:"se_handler_count"` + + // The VA where Control Flow Guard check-function pointer is stored. + GuardCFCheckFunctionPointer uint32 `json:"guard_cf_check_function_pointer"` + + // The VA where Control Flow Guard dispatch-function pointer is stored. + GuardCFDispatchFunctionPointer uint32 `json:"guard_cf_dispatch_function_pointer"` + + // The VA of the sorted table of RVAs of each Control Flow Guard function in + // the image. + GuardCFFunctionTable uint32 `json:"guard_cf_function_table"` + + // The count of unique RVAs in the above table. + GuardCFFunctionCount uint32 `json:"guard_cf_function_count"` + + // Control Flow Guard related flags. + GuardFlags uint32 `json:"guard_flags"` + + // Code integrity information. + CodeIntegrity ImageLoadConfigCodeIntegrity `json:"code_integrity"` + + // The VA where Control Flow Guard address taken IAT table is stored. + GuardAddressTakenIATEntryTable uint32 `json:"guard_address_taken_iat_entry_table"` + + // The count of unique RVAs in the above table. + GuardAddressTakenIATEntryCount uint32 `json:"guard_address_taken_iat_entry_count"` + + // The VA where Control Flow Guard long jump target table is stored. + GuardLongJumpTargetTable uint32 `json:"guard_long_jump_target_table"` + + // The count of unique RVAs in the above table. + GuardLongJumpTargetCount uint32 `json:"guard_long_jump_target_count"` + + DynamicValueRelocTable uint32 `json:"dynamic_value_reloc_table"` + + // Not sure when this was renamed from HybridMetadataPointer. + CHPEMetadataPointer uint32 `json:"chpe_metadata_pointer"` + + GuardRFFailureRoutine uint32 `json:"guard_rf_failure_routine"` + GuardRFFailureRoutineFunctionPointer uint32 `json:"guard_rf_failure_routine_function_pointer"` + DynamicValueRelocTableOffset uint32 `json:"dynamic_value_reloc_table_offset"` + DynamicValueRelocTableSection uint16 `json:"dynamic_value_reloc_table_section"` + Reserved2 uint16 `json:"reserved_2"` + GuardRFVerifyStackPointerFunctionPointer uint32 `json:"guard_rf_verify_stack_pointer_function_pointer"` + HotPatchTableOffset uint32 `json:"hot_patch_table_offset"` + Reserved3 uint32 `json:"reserved_3"` + EnclaveConfigurationPointer uint32 `json:"enclave_configuration_pointer"` + VolatileMetadataPointer uint32 `json:"volatile_metadata_pointer"` + GuardEHContinuationTable uint32 `json:"guard_eh_continuation_table"` + GuardEHContinuationCount uint32 `json:"guard_eh_continuation_count"` + GuardXFGCheckFunctionPointer uint32 `json:"guard_xfg_check_function_pointer"` + GuardXFGDispatchFunctionPointer uint32 `json:"guard_xfg_dispatch_function_pointer"` + GuardXFGTableDispatchFunctionPointer uint32 `json:"guard_xfg_table_dispatch_function_pointer"` + CastGuardOSDeterminedFailureMode uint32 `json:"cast_guard_os_determined_failure_mode"` + GuardMemcpyFunctionPointer uint32 `json:"guard_memcpy_function_pointer"` +} + +// ImageLoadConfigDirectory64 Contains the load configuration data of an image for x64 binaries. +type ImageLoadConfigDirectory64 struct { + // The actual size of the structure inclusive. May differ from the size + // given in the data directory for Windows XP and earlier compatibility. + Size uint32 `json:"size"` + + // Date and time stamp value. + TimeDateStamp uint32 `json:"time_date_stamp"` + + // Major version number. + MajorVersion uint16 `json:"major_version"` + + // Minor version number. + MinorVersion uint16 `json:"minor_version"` + + // The global loader flags to clear for this process as the loader starts + // the process. + GlobalFlagsClear uint32 `json:"global_flags_clear"` + + // The global loader flags to set for this process as the loader starts the + // process. + GlobalFlagsSet uint32 `json:"global_flags_set"` + + // The default timeout value to use for this process's critical sections + // that are abandoned. + CriticalSectionDefaultTimeout uint32 `json:"critical_section_default_timeout"` + + // Memory that must be freed before it is returned to the system, in bytes. + DeCommitFreeBlockThreshold uint64 `json:"de_commit_free_block_threshold"` + + // Total amount of free memory, in bytes. + DeCommitTotalFreeThreshold uint64 `json:"de_commit_total_free_threshold"` + + // [x86 only] The VA of a list of addresses where the LOCK prefix is used so + // that they can be replaced with NOP on single processor machines. + LockPrefixTable uint64 `json:"lock_prefix_table"` + + // Maximum allocation size, in bytes. + MaximumAllocationSize uint64 `json:"maximum_allocation_size"` + + // Maximum virtual memory size, in bytes. + VirtualMemoryThreshold uint64 `json:"virtual_memory_threshold"` + + // Setting this field to a non-zero value is equivalent to calling + // SetProcessAffinityMask with this value during process startup (.exe only) + ProcessAffinityMask uint64 `json:"process_affinity_mask"` + + // Process heap flags that correspond to the first argument of the HeapCreate + // function. These flags apply to the process heap that is created during + // process startup. + ProcessHeapFlags uint32 `json:"process_heap_flags"` + + // The service pack version identifier. + CSDVersion uint16 `json:"csd_version"` + + // Must be zero. + DependentLoadFlags uint16 `json:"dependent_load_flags"` + + // Reserved for use by the system. + EditList uint64 `json:"edit_list"` + + // A pointer to a cookie that is used by Visual C++ or GS implementation. + SecurityCookie uint64 `json:"security_cookie"` + + // [x86 only] The VA of the sorted table of RVAs of each valid, unique SE + // handler in the image. + SEHandlerTable uint64 `json:"se_handler_table"` + + // [x86 only] The count of unique handlers in the table. + SEHandlerCount uint64 `json:"se_handler_count"` + + // The VA where Control Flow Guard check-function pointer is stored. + GuardCFCheckFunctionPointer uint64 `json:"guard_cf_check_function_pointer"` + + // The VA where Control Flow Guard dispatch-function pointer is stored. + GuardCFDispatchFunctionPointer uint64 `json:"guard_cf_dispatch_function_pointer"` + + // The VA of the sorted table of RVAs of each Control Flow Guard function in + // the image. + GuardCFFunctionTable uint64 `json:"guard_cf_function_table"` + + // The count of unique RVAs in the above table. + GuardCFFunctionCount uint64 `json:"guard_cf_function_count"` + + // Control Flow Guard related flags. + GuardFlags uint32 `json:"guard_flags"` + + // Code integrity information. + CodeIntegrity ImageLoadConfigCodeIntegrity `json:"code_integrity"` + + // The VA where Control Flow Guard address taken IAT table is stored. + GuardAddressTakenIATEntryTable uint64 `json:"guard_address_taken_iat_entry_table"` + + // The count of unique RVAs in the above table. + GuardAddressTakenIATEntryCount uint64 `json:"guard_address_taken_iat_entry_count"` + + // The VA where Control Flow Guard long jump target table is stored. + GuardLongJumpTargetTable uint64 `json:"guard_long_jump_target_table"` + + // The count of unique RVAs in the above table. + GuardLongJumpTargetCount uint64 `json:"guard_long_jump_target_count"` + + DynamicValueRelocTable uint64 `json:"dynamic_value_reloc_table"` + + // Not sure when this was renamed from HybridMetadataPointer. + CHPEMetadataPointer uint64 `json:"chpe_metadata_pointer"` + + GuardRFFailureRoutine uint64 `json:"guard_rf_failure_routine"` + GuardRFFailureRoutineFunctionPointer uint64 `json:"guard_rf_failure_routine_function_pointer"` + DynamicValueRelocTableOffset uint32 `json:"dynamic_value_reloc_table_offset"` + DynamicValueRelocTableSection uint16 `json:"dynamic_value_reloc_table_section"` + Reserved2 uint16 `json:"reserved_2"` + GuardRFVerifyStackPointerFunctionPointer uint64 `json:"guard_rf_verify_stack_pointer_function_pointer"` + HotPatchTableOffset uint32 `json:"hot_patch_table_offset"` + Reserved3 uint32 `json:"reserved_3"` + EnclaveConfigurationPointer uint64 `json:"enclave_configuration_pointer"` + VolatileMetadataPointer uint64 `json:"volatile_metadata_pointer"` + GuardEHContinuationTable uint64 `json:"guard_eh_continuation_table"` + GuardEHContinuationCount uint64 `json:"guard_eh_continuation_count"` + GuardXFGCheckFunctionPointer uint64 `json:"guard_xfg_check_function_pointer"` + GuardXFGDispatchFunctionPointer uint64 `json:"guard_xfg_dispatch_function_pointer"` + GuardXFGTableDispatchFunctionPointer uint64 `json:"guard_xfg_table_dispatch_function_pointer"` + CastGuardOSDeterminedFailureMode uint64 `json:"cast_guard_os_determined_failure_mode"` + GuardMemcpyFunctionPointer uint64 `json:"guard_memcpy_function_pointer"` +} + +// ImageCHPEMetadataX86 represents the X86_IMAGE_CHPE_METADATA_X86. +type ImageCHPEMetadataX86 struct { + Version uint32 `json:"version"` + CHPECodeAddressRangeOffset uint32 `json:"chpe_code_address_range_offset"` + CHPECodeAddressRangeCount uint32 `json:"chpe_code_address_range_count"` + WoWA64ExceptionHandlerFunctionPtr uint32 `json:"WoW_a64_exception_handler_function_ptr"` + WoWA64DispatchCallFunctionPtr uint32 `json:"WoW_a64_dispatch_call_function_ptr"` + WoWA64DispatchIndirectCallFunctionPtr uint32 `json:"WoW_a64_dispatch_indirect_call_function_ptr"` + WoWA64DispatchIndirectCallCfgFunctionPtr uint32 `json:"WoW_a64_dispatch_indirect_call_cfg_function_ptr"` + WoWA64DispatchRetFunctionPtr uint32 `json:"WoW_a64_dispatch_ret_function_ptr"` + WoWA64DispatchRetLeafFunctionPtr uint32 `json:"WoW_a64_dispatch_ret_leaf_function_ptr"` + WoWA64DispatchJumpFunctionPtr uint32 `json:"WoW_a64_dispatch_jump_function_ptr"` + CompilerIATPointer uint32 `json:"compiler_iat_pointer"` // Present if Version >= 2 + WoWA64RDTSCFunctionPtr uint32 `json:"WoW_a64_rdtsc_function_ptr"` // Present if Version >= 3 +} + +type CodeRange struct { + Begin uint32 `json:"begin"` + Length uint32 `json:"length"` + Machine uint8 `json:"machine"` +} + +type CompilerIAT struct { + RVA uint32 `json:"rva"` + Value uint32 `json:"value"` + Description string `json:"description"` +} + +type HybridPE struct { + CHPEMetadata interface{} `json:"chpe_metadata"` + CodeRanges []CodeRange `json:"code_ranges"` + CompilerIAT []CompilerIAT `json:"compiler_iat"` +} + +// ImageDynamicRelocationTable represents the DVRT header. +type ImageDynamicRelocationTable struct { + // Until now, there is only one version of the DVRT header (1).. + Version uint32 `json:"version"` + // Size represents the number of bytes after the header that contains + // retpoline information. + Size uint32 `json:"size"` + // IMAGE_DYNAMIC_RELOCATION DynamicRelocations[0]; +} + +// Dynamic value relocation entries following IMAGE_DYNAMIC_RELOCATION_TABLE. +// Each block starts with the header. + +// ImageDynamicRelocation32 represents the 32-bit version of a reloc entry. +type ImageDynamicRelocation32 struct { + // Symbol field identifies one of the existing types of dynamic relocations + // so far (values 3, 4 and 5). + Symbol uint32 `json:"symbol"` + + // Then, for each page, there is a block that starts with a relocation entry. + // BaseRelocSize represents the size of the block. + BaseRelocSize uint32 `json:"base_reloc_size"` + // IMAGE_BASE_RELOCATION BaseRelocations[0]; +} + +// ImageDynamicRelocation64 represents the 64-bit version of a reloc entry. +type ImageDynamicRelocation64 struct { + // Symbol field identifies one of the existing types of dynamic relocations + // so far (values 3, 4 and 5). + Symbol uint64 `json:"symbol"` + + // Then, for each page, there is a block that starts with a relocation entry. + // BaseRelocSize represents the size of the block. + BaseRelocSize uint32 `json:"base_reloc_size"` + // IMAGE_BASE_RELOCATION BaseRelocations[0]; +} + +type ImageDynamicRelocation32v2 struct { + HeaderSize uint32 `json:"header_size"` + FixupInfoSize uint32 `json:"fixup_info_size"` + Symbol uint32 `json:"symbol"` + SymbolGroup uint32 `json:"symbol_group"` + Flags uint32 `json:"flags"` + // ... variable length header fields + // UCHAR FixupInfo[FixupInfoSize] +} + +type ImageDynamicRelocation64v2 struct { + HeaderSize uint32 `json:"header_size"` + FixupInfoSize uint32 `json:"fixup_info_size"` + Symbol uint64 `json:"symbol"` + SymbolGroup uint32 `json:"symbol_group"` + Flags uint32 `json:"flags"` + // ... variable length header fields + // UCHAR FixupInfo[FixupInfoSize] +} + +type ImagePrologueDynamicRelocationHeader struct { + PrologueByteCount uint8 `json:"prologue_byte_count"` + // UCHAR PrologueBytes[PrologueByteCount]; +} + +type ImageEpilogueDynamicRelocationHeader struct { + EpilogueCount uint32 `json:"epilogue_count"` + EpilogueByteCount uint8 `json:"epilogue_byte_count"` + BranchDescriptorElementSize uint8 `json:"branch_descriptor_element_size"` + BranchDescriptorCount uint8 `json:"branch_descriptor_count"` + // UCHAR BranchDescriptors[...]; + // UCHAR BranchDescriptorBitMap[...]; +} + +type CFGFunction struct { + // RVA of the target CFG call. + RVA uint32 `json:"rva"` + + // Flags attached to each GFIDS entry if any call targets have metadata. + Flags ImageGuardFlagType `json:"flags"` + Description string `json:"description"` +} + +type CFGIATEntry struct { + RVA uint32 `json:"rva"` + IATValue uint32 `json:"iat_value"` + INTValue uint32 `json:"int_value"` + Description string `json:"description"` +} + +type RelocBlock struct { + ImgBaseReloc ImageBaseRelocation `json:"img_base_reloc"` + TypeOffsets []interface{} `json:"type_offsets"` +} +type RelocEntry struct { + // Could be ImageDynamicRelocation32{} or ImageDynamicRelocation64{} + ImageDynamicRelocation interface{} `json:"image_dynamic_relocation"` + RelocBlocks []RelocBlock `json:"reloc_blocks"` +} + +// ImageImportControlTransferDynamicRelocation represents the Imported Address +// Retpoline (type 3), size = 4 bytes. +type ImageImportControlTransferDynamicRelocation struct { + PageRelativeOffset uint16 // (12 bits) + // 1 - the opcode is a CALL + // 0 - the opcode is a JMP. + IndirectCall uint16 // (1 bit) + IATIndex uint32 // (19 bits) +} + +// ImageIndirectControlTransferDynamicRelocation represents the Indirect Branch +// Retpoline (type 4), size = 2 bytes. +type ImageIndirectControlTransferDynamicRelocation struct { + PageRelativeOffset uint16 // (12 bits) + IndirectCall uint8 // (1 bit) + RexWPrefix uint8 // (1 bit) + CfgCheck uint8 // (1 bit) + Reserved uint8 // (1 bit) +} + +// ImageSwitchableBranchDynamicRelocation represents the Switchable Retpoline +// (type 5), size = 2 bytes. +type ImageSwitchableBranchDynamicRelocation struct { + PageRelativeOffset uint16 // (12 bits) + RegisterNumber uint16 // (4 bits) +} + +// DVRT represents the Dynamic Value Relocation Table. +// The DVRT was originally introduced back in the Windows 10 Creators Update to +// improve kernel address space layout randomization (KASLR). It allowed the +// memory manager’s page frame number (PFN) database and page table self-map to +// be assigned dynamic addresses at runtime. The DVRT is stored directly in the +// binary and contains a series of relocation entries for each symbol (i.e. +// address) that is to be relocated. The relocation entries are themselves +// arranged in a hierarchical fashion grouped first by symbol and then by +// containing page to allow for a compact description of all locations in the +// binary that reference a relocatable symbol. +// Reference: https://techcommunity.microsoft.com/t5/windows-os-platform-blog/mitigating-spectre-variant-2-with-retpoline-on-windows/ba-p/295618 +type DVRT struct { + ImageDynamicRelocationTable `json:"image_dynamic_relocation_table"` + Entries []RelocEntry `json:"entries"` +} + +type Enclave struct { + + // Points to either ImageEnclaveConfig32{} or ImageEnclaveConfig64{}. + Config interface{} `json:"config"` + + Imports []ImageEnclaveImport `json:"imports"` +} + +type RangeTableEntry struct { + RVA uint32 `json:"rva"` + Size uint32 `json:"size"` +} + +type VolatileMetadata struct { + Struct ImageVolatileMetadata `json:"struct"` + AccessRVATable []uint32 `json:"access_rva_table"` + InfoRangeTable []RangeTableEntry `json:"info_range_table"` +} +type LoadConfig struct { + Struct interface{} `json:"struct"` + SEH []uint32 `json:"seh"` + GFIDS []CFGFunction `json:"gfids"` + CFGIAT []CFGIATEntry `json:"cfgiat"` + CFGLongJump []uint32 `json:"cfg_long_jump"` + CHPE *HybridPE `json:"chpe"` + DVRT *DVRT `json:"dvrt"` + Enclave *Enclave `json:"enclave"` + VolatileMetadata *VolatileMetadata `json:"volatile_metadata"` +} + +// ImageLoadConfigCodeIntegrity Code Integrity in load config (CI). +type ImageLoadConfigCodeIntegrity struct { + // Flags to indicate if CI information is available, etc. + Flags uint16 `json:"flags"` + // 0xFFFF means not available + Catalog uint16 `json:"catalog"` + CatalogOffset uint32 `json:"catalog_offset"` + // Additional bitmask to be defined later + Reserved uint32 `json:"reserved"` +} + +type ImageEnclaveConfig32 struct { + + // The size of the IMAGE_ENCLAVE_CONFIG32 structure, in bytes. + Size uint32 `json:"size"` + + // The minimum size of the IMAGE_ENCLAVE_CONFIG32 structure that the image + // loader must be able to process in order for the enclave to be usable. + // This member allows an enclave to inform an earlier version of the image + // loader that the image loader can safely load the enclave and ignore optional + // members added to IMAGE_ENCLAVE_CONFIG32 for later versions of the enclave. + + // If the size of IMAGE_ENCLAVE_CONFIG32 that the image loader can process is + // less than MinimumRequiredConfigSize, the enclave cannot be run securely. + // If MinimumRequiredConfigSize is zero, the minimum size of the + // IMAGE_ENCLAVE_CONFIG32 structure that the image loader must be able to + // process in order for the enclave to be usable is assumed to be the size + // of the structure through and including the MinimumRequiredConfigSize member. + MinimumRequiredConfigSize uint32 `json:"minimum_required_config_size"` + + // A flag that indicates whether the enclave permits debugging. + PolicyFlags uint32 `json:"policy_flags"` + + // The number of images in the array of images that the ImportList member + // points to. + NumberOfImports uint32 `json:"number_of_imports"` + + // The relative virtual address of the array of images that the enclave + // image may import, with identity information for each image. + ImportList uint32 `json:"import_list"` + + // The size of each image in the array of images that the ImportList member + // points to. + ImportEntrySize uint32 `json:"import_entry_size"` + + // The family identifier that the author of the enclave assigned to the enclave. + FamilyID [ImageEnclaveShortIDLength]uint8 `json:"family_id"` + + // The image identifier that the author of the enclave assigned to the enclave. + ImageID [ImageEnclaveShortIDLength]uint8 `json:"image_id"` + + // The version number that the author of the enclave assigned to the enclave. + ImageVersion uint32 `json:"image_version"` + + // The security version number that the author of the enclave assigned to + // the enclave. + SecurityVersion uint32 `json:"security_version"` + + // The expected virtual size of the private address range for the enclave, + // in bytes. + EnclaveSize uint32 `json:"enclave_size"` + + // The maximum number of threads that can be created within the enclave. + NumberOfThreads uint32 `json:"number_of_threads"` + + // A flag that indicates whether the image is suitable for use as the + // primary image in the enclave. + EnclaveFlags uint32 `json:"enclave_flags"` +} + +type ImageEnclaveConfig64 struct { + + // The size of the IMAGE_ENCLAVE_CONFIG32 structure, in bytes. + Size uint32 `json:"size"` + + // The minimum size of the IMAGE_ENCLAVE_CONFIG32 structure that the image + // loader must be able to process in order for the enclave to be usable. + // This member allows an enclave to inform an earlier version of the image + // loader that the image loader can safely load the enclave and ignore + // optional members added to IMAGE_ENCLAVE_CONFIG32 for later versions of + // the enclave. + + // If the size of IMAGE_ENCLAVE_CONFIG32 that the image loader can process + // is less than MinimumRequiredConfigSize, the enclave cannot be run securely. + // If MinimumRequiredConfigSize is zero, the minimum size of the + // IMAGE_ENCLAVE_CONFIG32 structure that the image loader must be able to + // process in order for the enclave to be usable is assumed to be the size + // of the structure through and including the MinimumRequiredConfigSize member. + MinimumRequiredConfigSize uint32 `json:"minimum_required_config_size"` + + // A flag that indicates whether the enclave permits debugging. + PolicyFlags uint32 `json:"policy_flags"` + + // The number of images in the array of images that the ImportList member + // points to. + NumberOfImports uint32 `json:"number_of_imports"` + + // The relative virtual address of the array of images that the enclave + // image may import, with identity information for each image. + ImportList uint32 `json:"import_list"` + + // The size of each image in the array of images that the ImportList member + // points to. + ImportEntrySize uint32 `json:"import_entry_size"` + + // The family identifier that the author of the enclave assigned to the enclave. + FamilyID [ImageEnclaveShortIDLength]uint8 `json:"family_id"` + + // The image identifier that the author of the enclave assigned to the enclave. + ImageID [ImageEnclaveShortIDLength]uint8 `json:"image_id"` + + // The version number that the author of the enclave assigned to the enclave. + ImageVersion uint32 `json:"image_version"` + + // The security version number that the author of the enclave assigned to the enclave. + SecurityVersion uint32 `json:"security_version"` + + // The expected virtual size of the private address range for the enclave,in bytes. + EnclaveSize uint64 `json:"enclave_size"` + + // The maximum number of threads that can be created within the enclave. + NumberOfThreads uint32 `json:"number_of_threads"` + + // A flag that indicates whether the image is suitable for use as the primary + // image in the enclave. + EnclaveFlags uint32 `json:"enclave_flags"` +} + +// ImageEnclaveImport defines a entry in the array of images that an enclave can import. +type ImageEnclaveImport struct { + + // The type of identifier of the image that must match the value in the import record. + MatchType uint32 `json:"match_type"` + + // The minimum enclave security version that each image must have for the + // image to be imported successfully. The image is rejected unless its + // enclave security version is equal to or greater than the minimum value in + // the import record. Set the value in the import record to zero to turn off + // the security version check. + MinimumSecurityVersion uint32 `json:"minimum_security_version"` + + // The unique identifier of the primary module for the enclave, if the + // MatchType member is IMAGE_ENCLAVE_IMPORT_MATCH_UNIQUE_ID. Otherwise, + // the author identifier of the primary module for the enclave.. + UniqueOrAuthorID [ImageEnclaveLongIDLength]uint8 `json:"unique_or_author_id"` + + // The family identifier of the primary module for the enclave. + FamilyID [ImageEnclaveShortIDLength]uint8 `json:"family_id"` + + // The image identifier of the primary module for the enclave. + ImageID [ImageEnclaveShortIDLength]uint8 `json:"image_id"` + + // The relative virtual address of a NULL-terminated string that contains + // the same value found in the import directory for the image. + ImportName uint32 `json:"import_name"` + + // Reserved. + Reserved uint32 `json:"reserved"` +} + +type ImageVolatileMetadata struct { + Size uint32 + Version uint32 + VolatileAccessTable uint32 + VolatileAccessTableSize uint32 + VolatileInfoRangeTable uint32 + VolatileInfoRangeTableSize uint32 +} + +// The load configuration structure (IMAGE_LOAD_CONFIG_DIRECTORY) was formerly +// used in very limited cases in the Windows NT operating system itself to +// describe various features too difficult or too large to describe in the file + +// header or optional header of the image. Current versions of the Microsoft +// linker and Windows XP and later versions of Windows use a new version of this +// structure for 32-bit x86-based systems that include reserved SEH technology. +// The data directory entry for a pre-reserved SEH load configuration structure +// must specify a particular size of the load configuration structure because +// the operating system loader always expects it to be a certain value. In that +// regard, the size is really only a version check. For compatibility with +// Windows XP and earlier versions of Windows, the size must be 64 for x86 images. +func (pe *File) parseLoadConfigDirectory(rva, size uint32) error { + + // As the load config structure changes over time, + // we first read it size to figure out which one we have to cast against. + fileOffset := pe.GetOffsetFromRva(rva) + structSize, err := pe.ReadUint32(fileOffset) + if err != nil { + return err + } + + // Use this helper function to print struct size. + // PrintLoadConfigStruct() + var loadCfg interface{} + + // Boundary check + totalSize := fileOffset + size + + // Integer overflow + if (totalSize > fileOffset) != (size > 0) { + return ErrOutsideBoundary + } + + if fileOffset >= pe.size || totalSize > pe.size { + return ErrOutsideBoundary + } + + if pe.Is32 { + loadCfg32 := ImageLoadConfigDirectory32{} + imgLoadConfigDirectory := make([]byte, binary.Size(loadCfg32)) + copy(imgLoadConfigDirectory, pe.data[fileOffset:fileOffset+structSize]) + buf := bytes.NewReader(imgLoadConfigDirectory) + err = binary.Read(buf, binary.LittleEndian, &loadCfg32) + loadCfg = loadCfg32 + } else { + loadCfg64 := ImageLoadConfigDirectory64{} + imgLoadConfigDirectory := make([]byte, binary.Size(loadCfg64)) + copy(imgLoadConfigDirectory, pe.data[fileOffset:fileOffset+structSize]) + buf := bytes.NewReader(imgLoadConfigDirectory) + err = binary.Read(buf, binary.LittleEndian, &loadCfg64) + loadCfg = loadCfg64 + } + + if err != nil { + return err + } + + // Save the load config struct. + pe.HasLoadCFG = true + pe.LoadConfig.Struct = loadCfg + + // Retrieve SEH handlers if there are any.. + if pe.Is32 { + handlers := pe.getSEHHandlers() + pe.LoadConfig.SEH = handlers + } + + // Retrieve Control Flow Guard Function Targets if there are any. + pe.LoadConfig.GFIDS = pe.getControlFlowGuardFunctions() + + // Retrieve Control Flow Guard IAT entries if there are any. + pe.LoadConfig.CFGIAT = pe.getControlFlowGuardIAT() + + // Retrieve Long jump target functions if there are any. + pe.LoadConfig.CFGLongJump = pe.getLongJumpTargetTable() + + // Retrieve compiled hybrid PE metadata if there are any. + pe.LoadConfig.CHPE = pe.getHybridPE() + + // Retrieve dynamic value relocation table if there are any. + pe.LoadConfig.DVRT = pe.getDynamicValueRelocTable() + + // Retrieve enclave configuration if there are any. + pe.LoadConfig.Enclave = pe.getEnclaveConfiguration() + + // Retrieve volatile metadata table if there are any. + pe.LoadConfig.VolatileMetadata = pe.getVolatileMetadata() + + return nil +} + +// StringifyGuardFlags returns list of strings which describes the GuardFlags. +func StringifyGuardFlags(flags uint32) []string { + var values []string + guardFlagMap := map[uint32]string{ + ImageGuardCfInstrumented: "Instrumented", + ImageGuardCfWInstrumented: "WriteInstrumented", + ImageGuardCfFunctionTablePresent: "TargetMetadata", + ImageGuardSecurityCookieUnused: "SecurityCookieUnused", + ImageGuardProtectDelayLoadIAT: "DelayLoadIAT", + ImageGuardDelayLoadIATInItsOwnSection: "DelayLoadIATInItsOwnSection", + ImageGuardCfExportSuppressionInfoPresent: "ExportSuppressionInfoPresent", + ImageGuardCfEnableExportSuppression: "EnableExportSuppression", + ImageGuardCfLongJumpTablePresent: "LongJumpTablePresent", + } + + for k, s := range guardFlagMap { + if k&flags != 0 { + values = append(values, s) + } + } + return values +} + +func (pe *File) getSEHHandlers() []uint32 { + + var handlers []uint32 + v := reflect.ValueOf(pe.LoadConfig.Struct) + + // SEHandlerCount is found in index 19 of the struct. + SEHandlerCount := uint32(v.Field(19).Uint()) + if SEHandlerCount > 0 { + SEHandlerTable := uint32(v.Field(18).Uint()) + imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).ImageBase + rva := SEHandlerTable - imageBase + for i := uint32(0); i < SEHandlerCount; i++ { + offset := pe.GetOffsetFromRva(rva + i*4) + handler, err := pe.ReadUint32(offset) + if err != nil { + return handlers + } + + handlers = append(handlers, handler) + } + } + + return handlers +} + +func (pe *File) getControlFlowGuardFunctions() []CFGFunction { + + v := reflect.ValueOf(pe.LoadConfig.Struct) + var GFIDS []CFGFunction + var err error + + // The GFIDS table is an array of 4 + n bytes, where n is given by : + // ((GuardFlags & IMAGE_GUARD_CF_FUNCTION_TABLE_SIZE_MASK) >> + // IMAGE_GUARD_CF_FUNCTION_TABLE_SIZE_SHIFT). + + // This allows for extra metadata to be attached to CFG call targets in + // the future. The only currently defined metadata is an optional 1-byte + // extra flags field (“GFIDS flags”) that is attached to each GFIDS + // entry if any call targets have metadata. + GuardFlags := v.Field(24).Uint() + n := (GuardFlags & ImageGuardCfFunctionTableSizeMask) >> + ImageGuardCfFunctionTableSizeShift + GuardCFFunctionCount := v.Field(23).Uint() + if GuardCFFunctionCount > 0 { + if pe.Is32 { + GuardCFFunctionTable := uint32(v.Field(22).Uint()) + imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).ImageBase + rva := GuardCFFunctionTable - imageBase + offset := pe.GetOffsetFromRva(rva) + for i := uint32(1); i <= uint32(GuardCFFunctionCount); i++ { + cfgFunction := CFGFunction{} + var cfgFlags uint8 + cfgFunction.RVA, err = pe.ReadUint32(offset) + if err != nil { + return GFIDS + } + if n > 0 { + err = pe.structUnpack(&cfgFlags, offset+4, uint32(n)) + if err != nil { + return GFIDS + } + cfgFunction.Flags = ImageGuardFlagType(cfgFlags) + if cfgFlags == ImageGuardFlagFIDSuppressed || + cfgFlags == ImageGuardFlagExportSuppressed { + exportName := pe.GetExportFunctionByRVA(cfgFunction.RVA) + cfgFunction.Description = exportName.Name + } + } + + GFIDS = append(GFIDS, cfgFunction) + offset += 4 + uint32(n) + } + } else { + GuardCFFunctionTable := v.Field(22).Uint() + imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).ImageBase + rva := uint32(GuardCFFunctionTable - imageBase) + offset := pe.GetOffsetFromRva(rva) + for i := uint64(1); i <= GuardCFFunctionCount; i++ { + var cfgFlags uint8 + cfgFunction := CFGFunction{} + cfgFunction.RVA, err = pe.ReadUint32(offset) + if err != nil { + return GFIDS + } + if n > 0 { + pe.structUnpack(&cfgFlags, offset+4, uint32(n)) + cfgFunction.Flags = ImageGuardFlagType(cfgFlags) + if cfgFlags == ImageGuardFlagFIDSuppressed || + cfgFlags == ImageGuardFlagExportSuppressed { + exportName := pe.GetExportFunctionByRVA(cfgFunction.RVA) + cfgFunction.Description = exportName.Name + } + } + + GFIDS = append(GFIDS, cfgFunction) + offset += 4 + uint32(n) + } + } + } + return GFIDS +} + +func (pe *File) getControlFlowGuardIAT() []CFGIATEntry { + + v := reflect.ValueOf(pe.LoadConfig.Struct) + var GFGIAT []CFGIATEntry + var err error + + // GuardAddressTakenIatEntryCount is found in index 27 of the struct. + // An image that supports CFG ES includes a GuardAddressTakenIatEntryTable + // whose count is provided by the GuardAddressTakenIatEntryCount as part + // of its load configuration directory. This table is structurally + // formatted the same as the GFIDS table. It uses the same GuardFlags + // IMAGE_GUARD_CF_FUNCTION_TABLE_SIZE_MASK mechanism to encode extra + // optional metadata bytes in the address taken IAT table, though all + // metadata bytes must be zero for the address taken IAT table and are + // reserved. + GuardFlags := v.Field(24).Uint() + n := (GuardFlags & ImageGuardCfFunctionTableSizeMask) >> + ImageGuardCfFunctionTableSizeShift + GuardAddressTakenIatEntryCount := v.Field(27).Uint() + if GuardAddressTakenIatEntryCount > 0 { + if pe.Is32 { + GuardAddressTakenIatEntryTable := uint32(v.Field(26).Uint()) + imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).ImageBase + rva := GuardAddressTakenIatEntryTable - imageBase + offset := pe.GetOffsetFromRva(rva) + for i := uint32(1); i <= uint32(GuardAddressTakenIatEntryCount); i++ { + cfgIATEntry := CFGIATEntry{} + cfgIATEntry.RVA, err = pe.ReadUint32(offset) + if err != nil { + return GFGIAT + } + imp, index := pe.GetImportEntryInfoByRVA(cfgIATEntry.RVA) + if len(imp.Functions) != 0 { + cfgIATEntry.INTValue = uint32(imp.Functions[index].OriginalThunkValue) + cfgIATEntry.IATValue = uint32(imp.Functions[index].ThunkValue) + cfgIATEntry.Description = imp.Name + "!" + imp.Functions[index].Name + } + GFGIAT = append(GFGIAT, cfgIATEntry) + offset += 4 + uint32(n) + } + } else { + GuardAddressTakenIatEntryTable := v.Field(26).Uint() + imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).ImageBase + rva := uint32(GuardAddressTakenIatEntryTable - imageBase) + offset := pe.GetOffsetFromRva(rva) + for i := uint64(1); i <= GuardAddressTakenIatEntryCount; i++ { + cfgIATEntry := CFGIATEntry{} + cfgIATEntry.RVA, err = pe.ReadUint32(offset) + if err != nil { + return GFGIAT + } + imp, index := pe.GetImportEntryInfoByRVA(cfgIATEntry.RVA) + if len(imp.Functions) != 0 { + cfgIATEntry.INTValue = uint32(imp.Functions[index].OriginalThunkValue) + cfgIATEntry.IATValue = uint32(imp.Functions[index].ThunkValue) + cfgIATEntry.Description = imp.Name + "!" + imp.Functions[index].Name + } + + GFGIAT = append(GFGIAT, cfgIATEntry) + offset += 4 + uint32(n) + } + } + + } + return GFGIAT +} + +func (pe *File) getLongJumpTargetTable() []uint32 { + + v := reflect.ValueOf(pe.LoadConfig.Struct) + var longJumpTargets []uint32 + + // The long jump table represents a sorted array of RVAs that are valid + // long jump targets. If a long jump target module sets + // IMAGE_GUARD_CF_LONGJUMP_TABLE_PRESENT in its GuardFlags field, then + // all long jump targets must be enumerated in the LongJumpTargetTable. + GuardFlags := v.Field(24).Uint() + n := (GuardFlags & ImageGuardCfFunctionTableSizeMask) >> + ImageGuardCfFunctionTableSizeShift + + // GuardLongJumpTargetCount is found in index 29 of the struct. + GuardLongJumpTargetCount := v.Field(29).Uint() + if GuardLongJumpTargetCount > 0 { + if pe.Is32 { + GuardLongJumpTargetTable := uint32(v.Field(28).Uint()) + imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).ImageBase + rva := GuardLongJumpTargetTable - imageBase + offset := pe.GetOffsetFromRva(rva) + for i := uint32(1); i <= uint32(GuardLongJumpTargetCount); i++ { + target, err := pe.ReadUint32(offset) + if err != nil { + return longJumpTargets + } + longJumpTargets = append(longJumpTargets, target) + offset += 4 + uint32(n) + } + } else { + GuardLongJumpTargetTable := v.Field(28).Uint() + imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).ImageBase + rva := uint32(GuardLongJumpTargetTable - imageBase) + offset := pe.GetOffsetFromRva(rva) + for i := uint64(1); i <= GuardLongJumpTargetCount; i++ { + target, err := pe.ReadUint32(offset) + if err != nil { + return longJumpTargets + } + longJumpTargets = append(longJumpTargets, target) + offset += 4 + uint32(n) + } + } + + } + return longJumpTargets +} + +func (pe *File) getHybridPE() *HybridPE { + v := reflect.ValueOf(pe.LoadConfig.Struct) + + // CHPEMetadataPointer is found in index 31 of the struct. + CHPEMetadataPointer := v.Field(31).Uint() + if CHPEMetadataPointer == 0 { + return nil + } + var rva uint32 + if pe.Is32 { + imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).ImageBase + rva = uint32(CHPEMetadataPointer) - imageBase + } else { + imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).ImageBase + rva = uint32(CHPEMetadataPointer - imageBase) + } + + // As the image CHPE metadata structure changes over time, + // we first read its version to figure out which one we have to + // cast against. + fileOffset := pe.GetOffsetFromRva(rva) + version, err := pe.ReadUint32(fileOffset) + if err != nil { + return nil + } + + structSize := uint32(0) + imgCHPEMetaX86 := ImageCHPEMetadataX86{} + + switch version { + case 0x1: + structSize = uint32(binary.Size(imgCHPEMetaX86) - 8) + case 0x2: + structSize = uint32(binary.Size(imgCHPEMetaX86) - 4) + case 0x3: + structSize = uint32(binary.Size(imgCHPEMetaX86)) + default: + // This should be a newer version, default to the latest CHPE version. + structSize = uint32(binary.Size(imgCHPEMetaX86)) + } + + // Boundary check + totalSize := fileOffset + structSize + + // Integer overflow + if (totalSize > fileOffset) != (structSize > 0) { + pe.logger.Debug("encountered an outside read boundary when reading CHPE structure") + return nil + } + + if fileOffset >= pe.size || totalSize > pe.size { + pe.logger.Debug("encountered an outside read boundary when reading CHPE structure") + return nil + } + + imgCHPEMeta := make([]byte, binary.Size(imgCHPEMetaX86)) + copy(imgCHPEMeta, pe.data[fileOffset:fileOffset+structSize]) + buf := bytes.NewReader(imgCHPEMeta) + err = binary.Read(buf, binary.LittleEndian, &imgCHPEMetaX86) + if err != nil { + pe.logger.Debug("encountered an error while unpacking image CHPE Meta") + return nil + } + + hybridPE := HybridPE{} + hybridPE.CHPEMetadata = imgCHPEMetaX86 + + // Code Ranges + + /* + typedef struct _IMAGE_CHPE_RANGE_ENTRY { + union { + ULONG StartOffset; + struct { + ULONG NativeCode : 1; + ULONG AddressBits : 31; + } DUMMYSTRUCTNAME; + } DUMMYUNIONNAME; + + ULONG Length; + } IMAGE_CHPE_RANGE_ENTRY, *PIMAGE_CHPE_RANGE_ENTRY; + */ + + rva = imgCHPEMetaX86.CHPECodeAddressRangeOffset + for i := 0; i < int(imgCHPEMetaX86.CHPECodeAddressRangeCount); i++ { + + codeRange := CodeRange{} + fileOffset := pe.GetOffsetFromRva(rva) + begin, err := pe.ReadUint32(fileOffset) + if err != nil { + break + } + + if begin&1 == 1 { + codeRange.Machine = 1 + begin = uint32(int(begin) & ^1) + } + codeRange.Begin = begin + + fileOffset += 4 + size, err := pe.ReadUint32(fileOffset) + if err != nil { + break + } + codeRange.Length = size + + hybridPE.CodeRanges = append(hybridPE.CodeRanges, codeRange) + rva += 8 + } + + // Compiler IAT + if imgCHPEMetaX86.CompilerIATPointer != 0 { + rva := imgCHPEMetaX86.CompilerIATPointer + for i := 0; i < 1024; i++ { + compilerIAT := CompilerIAT{} + compilerIAT.RVA = rva + fileOffset = pe.GetOffsetFromRva(rva) + compilerIAT.Value, err = pe.ReadUint32(fileOffset) + if err != nil { + break + } + + impFunc, _ := pe.GetImportEntryInfoByRVA(compilerIAT.RVA) + compilerIAT.Description = impFunc.Name + hybridPE.CompilerIAT = append( + hybridPE.CompilerIAT, compilerIAT) + rva += 4 + } + } + return &hybridPE +} + +func (pe *File) getDynamicValueRelocTable() *DVRT { + + var structSize uint32 + var imgDynRelocSize uint32 + var retpolineType uint8 + dvrt := DVRT{} + imgDynRelocTable := ImageDynamicRelocationTable{} + + v := reflect.ValueOf(pe.LoadConfig.Struct) + DynamicValueRelocTableOffset := v.Field(34).Uint() + DynamicValueRelocTableSection := v.Field(35).Uint() + if DynamicValueRelocTableOffset == 0 || DynamicValueRelocTableSection == 0 { + return nil + } + + section := pe.getSectionByName(".reloc") + if section == nil { + return nil + } + + // Get the dynamic value relocation table header. + rva := section.VirtualAddress + uint32(DynamicValueRelocTableOffset) + offset := pe.GetOffsetFromRva(rva) + structSize = uint32(binary.Size(imgDynRelocTable)) + err := pe.structUnpack(&imgDynRelocTable, offset, structSize) + if err != nil { + return nil + } + + dvrt.ImageDynamicRelocationTable = imgDynRelocTable + offset += structSize + + // Get dynamic relocation entries according to version. + switch imgDynRelocTable.Version { + case 1: + relocTableIt := uint32(0) + baseBlockSize := uint32(0) + + // Iterate over our dynamic reloc table entries. + for relocTableIt < imgDynRelocTable.Size { + + relocEntry := RelocEntry{} + + // Each block starts with the header. + if pe.Is32 { + imgDynReloc := ImageDynamicRelocation32{} + imgDynRelocSize = uint32(binary.Size(imgDynReloc)) + err = pe.structUnpack(&imgDynReloc, offset, imgDynRelocSize) + if err != nil { + return nil + } + relocEntry.ImageDynamicRelocation = imgDynReloc + baseBlockSize = imgDynReloc.BaseRelocSize + retpolineType = uint8(imgDynReloc.Symbol) + } else { + imgDynReloc := ImageDynamicRelocation64{} + imgDynRelocSize = uint32(binary.Size(imgDynReloc)) + err = pe.structUnpack(&imgDynReloc, offset, imgDynRelocSize) + if err != nil { + return nil + } + relocEntry.ImageDynamicRelocation = imgDynReloc + baseBlockSize = imgDynReloc.BaseRelocSize + retpolineType = uint8(imgDynReloc.Symbol) + } + offset += imgDynRelocSize + relocTableIt += imgDynRelocSize + + // Then, for each page, there is a block that starts with a relocation entry: + blockIt := uint32(0) + for blockIt <= baseBlockSize-imgDynRelocSize { + relocBlock := RelocBlock{} + + baseReloc := ImageBaseRelocation{} + structSize = uint32(binary.Size(baseReloc)) + err = pe.structUnpack(&baseReloc, offset, structSize) + if err != nil { + return nil + } + + relocBlock.ImgBaseReloc = baseReloc + offset += structSize + + // After that there are entries for all of the places which need + // to be overwritten by the retpoline jump. The structure used + // for those entries depends on the type (symbol) that was used + // above. There are three types of retpoline so far. Entry for + //each of them will contain pageRelativeOffset. The kernel uses + // that entry to apply the proper replacement under + // virtualAddress + pageRelativeOffset address. + branchIt := uint32(0) + switch retpolineType { + case 3: + for branchIt < (baseReloc.SizeOfBlock-structSize)/4 { + imgImpCtrlTransDynReloc := ImageImportControlTransferDynamicRelocation{} + + dword, err := pe.ReadUint32(offset) + if err != nil { + return nil + } + + imgImpCtrlTransDynReloc.PageRelativeOffset = uint16(dword) & 0xfff + imgImpCtrlTransDynReloc.IndirectCall = uint16(dword) & 0x1000 >> 12 + imgImpCtrlTransDynReloc.IATIndex = dword & 0xFFFFE000 >> 13 + + offset += 4 + branchIt += 1 + relocBlock.TypeOffsets = append(relocBlock.TypeOffsets, imgImpCtrlTransDynReloc) + } + case 4: + for branchIt < (baseReloc.SizeOfBlock-structSize)/2 { + imgIndirCtrlTransDynReloc := ImageIndirectControlTransferDynamicRelocation{} + + word, err := pe.ReadUint16(offset) + if err != nil { + return nil + } + imgIndirCtrlTransDynReloc.PageRelativeOffset = word & 0xfff + imgIndirCtrlTransDynReloc.IndirectCall = uint8(word & 0x1000 >> 12) + imgIndirCtrlTransDynReloc.RexWPrefix = uint8(word & 0x2000 >> 13) + imgIndirCtrlTransDynReloc.CfgCheck = uint8(word & 0x4000 >> 14) + imgIndirCtrlTransDynReloc.Reserved = uint8(word & 0x8000 >> 15) + + branchIt += 1 + offset += 2 + + // Padding might be added at the end of the block. + if (ImageIndirectControlTransferDynamicRelocation{}) == imgIndirCtrlTransDynReloc { + continue + } + relocBlock.TypeOffsets = append(relocBlock.TypeOffsets, imgIndirCtrlTransDynReloc) + } + case 5: + for branchIt < (baseReloc.SizeOfBlock-structSize)/2 { + imgSwitchBranchDynReloc := ImageSwitchableBranchDynamicRelocation{} + + word, err := pe.ReadUint16(offset) + if err != nil { + return nil + } + imgSwitchBranchDynReloc.PageRelativeOffset = word & 0xfff + imgSwitchBranchDynReloc.RegisterNumber = word & 0xf000 >> 12 + + offset += 2 + branchIt += 1 + + // Padding might be added at the end of the block. + if (ImageSwitchableBranchDynamicRelocation{}) == imgSwitchBranchDynReloc { + continue + } + relocBlock.TypeOffsets = append(relocBlock.TypeOffsets, imgSwitchBranchDynReloc) + } + } + + blockIt += baseReloc.SizeOfBlock + relocEntry.RelocBlocks = append(relocEntry.RelocBlocks, relocBlock) + } + + dvrt.Entries = append(dvrt.Entries, relocEntry) + relocTableIt += baseBlockSize + } + case 2: + fmt.Print("Got version 2 !") + } + + return &dvrt +} + +func (pe *File) getEnclaveConfiguration() *Enclave { + + enclave := Enclave{} + + v := reflect.ValueOf(pe.LoadConfig.Struct) + EnclaveConfigurationPointer := v.Field(40).Uint() + if EnclaveConfigurationPointer == 0 { + return nil + } + + if pe.Is32 { + imgEnclaveCfg := ImageEnclaveConfig32{} + imgEnclaveCfgSize := uint32(binary.Size(imgEnclaveCfg)) + imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).ImageBase + rva := uint32(EnclaveConfigurationPointer) - imageBase + offset := pe.GetOffsetFromRva(rva) + err := pe.structUnpack(&imgEnclaveCfg, offset, imgEnclaveCfgSize) + if err != nil { + return nil + } + enclave.Config = imgEnclaveCfg + } else { + imgEnclaveCfg := ImageEnclaveConfig64{} + imgEnclaveCfgSize := uint32(binary.Size(imgEnclaveCfg)) + imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).ImageBase + rva := uint32(EnclaveConfigurationPointer - imageBase) + offset := pe.GetOffsetFromRva(rva) + err := pe.structUnpack(&imgEnclaveCfg, offset, imgEnclaveCfgSize) + if err != nil { + return nil + } + enclave.Config = imgEnclaveCfg + } + + // Get the array of images that an enclave can import. + val := reflect.ValueOf(enclave.Config) + ImportListRVA := val.FieldByName("ImportList").Interface().(uint32) + NumberOfImports := val.FieldByName("NumberOfImports").Interface().(uint32) + ImportEntrySize := val.FieldByName("ImportEntrySize").Interface().(uint32) + + offset := pe.GetOffsetFromRva(ImportListRVA) + for i := uint32(0); i < NumberOfImports; i++ { + imgEncImp := ImageEnclaveImport{} + imgEncImpSize := uint32(binary.Size(imgEncImp)) + err := pe.structUnpack(&imgEncImp, offset, imgEncImpSize) + if err != nil { + return nil + } + + offset += ImportEntrySize + enclave.Imports = append(enclave.Imports, imgEncImp) + } + + return &enclave +} + +func (pe *File) getVolatileMetadata() *VolatileMetadata { + + volatileMeta := VolatileMetadata{} + imgVolatileMeta := ImageVolatileMetadata{} + rva := uint32(0) + + v := reflect.ValueOf(pe.LoadConfig.Struct) + if v.NumField() <= 41 { + return nil + } + + VolatileMetadataPointer := v.Field(41).Uint() + if VolatileMetadataPointer == 0 { + return nil + } + + if pe.Is32 { + imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).ImageBase + rva = uint32(VolatileMetadataPointer) - imageBase + } else { + imageBase := pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).ImageBase + rva = uint32(VolatileMetadataPointer - imageBase) + } + + offset := pe.GetOffsetFromRva(rva) + imgVolatileMetaSize := uint32(binary.Size(imgVolatileMeta)) + err := pe.structUnpack(&imgVolatileMeta, offset, imgVolatileMetaSize) + if err != nil { + return nil + } + volatileMeta.Struct = imgVolatileMeta + + if imgVolatileMeta.VolatileAccessTable != 0 && + imgVolatileMeta.VolatileAccessTableSize != 0 { + offset := pe.GetOffsetFromRva(imgVolatileMeta.VolatileAccessTable) + for i := uint32(0); i < imgVolatileMeta.VolatileAccessTableSize/4; i++ { + accessRVA, err := pe.ReadUint32(offset) + if err != nil { + break + } + + volatileMeta.AccessRVATable = append(volatileMeta.AccessRVATable, accessRVA) + offset += 4 + } + } + + if imgVolatileMeta.VolatileInfoRangeTable != 0 && imgVolatileMeta.VolatileInfoRangeTableSize != 0 { + offset := pe.GetOffsetFromRva(imgVolatileMeta.VolatileInfoRangeTable) + rangeEntrySize := uint32(binary.Size(RangeTableEntry{})) + for i := uint32(0); i < imgVolatileMeta.VolatileInfoRangeTableSize/rangeEntrySize; i++ { + entry := RangeTableEntry{} + err := pe.structUnpack(&entry, offset, rangeEntrySize) + if err != nil { + break + } + + volatileMeta.InfoRangeTable = append(volatileMeta.InfoRangeTable, entry) + offset += rangeEntrySize + } + } + + return &volatileMeta +} + +// String returns a string interpretation of the load config directory image +// guard flag. +func (flag ImageGuardFlagType) String() string { + imageGuardFlagTypeMap := map[ImageGuardFlagType]string{ + ImageGuardFlagFIDSuppressed: "FID Suppressed", + ImageGuardFlagExportSuppressed: "Export Suppressed", + } + + v, ok := imageGuardFlagTypeMap[flag] + if ok { + return v + } + + return "?" +} diff --git a/vendor/github.com/saferwall/pe/log/README.md b/vendor/github.com/saferwall/pe/log/README.md new file mode 100644 index 00000000..3b357d2a --- /dev/null +++ b/vendor/github.com/saferwall/pe/log/README.md @@ -0,0 +1,42 @@ +# Logger + +This code was taken from the go microservice framework [kratos](https://github.com/go-kratos/kratos). + +## Usage + +### Structured logging + +```go +logger := log.NewStdLogger(os.Stdout) +// fields & valuer +logger = log.With(logger, + "service.name", "hellworld", + "service.version", "v1.0.0", + "ts", log.DefaultTimestamp, + "caller", log.DefaultCaller, +) +logger.Log(log.LevelInfo, "key", "value") + +// helper +helper := log.NewHelper(logger) +helper.Log(log.LevelInfo, "key", "value") +helper.Info("info message") +helper.Infof("info %s", "message") +helper.Infow("key", "value") + +// filter +log := log.NewHelper(log.NewFilter(logger, + log.FilterLevel(log.LevelInfo), + log.FilterKey("foo"), + log.FilterValue("bar"), + log.FilterFunc(customFilter), +)) +log.Debug("debug log") +log.Info("info log") +log.Warn("warn log") +log.Error("warn log") +``` + +## Third party log library + +If you need to implement a third party logging library like `zap`, have a look this [url](https://github.com/go-kratos/kratos/tree/main/contrib/log). \ No newline at end of file diff --git a/vendor/github.com/saferwall/pe/log/filter.go b/vendor/github.com/saferwall/pe/log/filter.go new file mode 100644 index 00000000..9ef87ddf --- /dev/null +++ b/vendor/github.com/saferwall/pe/log/filter.go @@ -0,0 +1,96 @@ +package log + +// FilterOption is filter option. +type FilterOption func(*Filter) + +const fuzzyStr = "***" + +// FilterLevel with filter level. +func FilterLevel(level Level) FilterOption { + return func(opts *Filter) { + opts.level = level + } +} + +// FilterKey with filter key. +func FilterKey(key ...string) FilterOption { + return func(o *Filter) { + for _, v := range key { + o.key[v] = struct{}{} + } + } +} + +// FilterValue with filter value. +func FilterValue(value ...string) FilterOption { + return func(o *Filter) { + for _, v := range value { + o.value[v] = struct{}{} + } + } +} + +// FilterFunc with filter func. +func FilterFunc(f func(level Level, keyvals ...interface{}) bool) FilterOption { + return func(o *Filter) { + o.filter = f + } +} + +// Filter is a logger filter. +type Filter struct { + logger Logger + level Level + key map[interface{}]struct{} + value map[interface{}]struct{} + filter func(level Level, keyvals ...interface{}) bool +} + +// NewFilter new a logger filter. +func NewFilter(logger Logger, opts ...FilterOption) *Filter { + options := Filter{ + logger: logger, + key: make(map[interface{}]struct{}), + value: make(map[interface{}]struct{}), + } + for _, o := range opts { + o(&options) + } + return &options +} + +// Log Print log by level and keyvals. +func (f *Filter) Log(level Level, keyvals ...interface{}) error { + if level < f.level { + return nil + } + // fkv is used to provide a slice to contains both logger.prefix and keyvals for filter + var fkv []interface{} + if l, ok := f.logger.(*logger); ok { + if len(l.prefix) > 0 { + fkv = make([]interface{}, 0, len(l.prefix)+len(keyvals)) + fkv = append(fkv, l.prefix...) + fkv = append(fkv, keyvals...) + } + } else { + fkv = keyvals + } + if f.filter != nil && f.filter(level, fkv...) { + return nil + } + if len(f.key) > 0 || len(f.value) > 0 { + for i := 0; i < len(keyvals); i += 2 { + v := i + 1 + if v >= len(keyvals) { + continue + } + if _, ok := f.key[keyvals[i]]; ok { + keyvals[v] = fuzzyStr + } + if _, ok := f.value[keyvals[v]]; ok { + keyvals[v] = fuzzyStr + } + } + } + return f.logger.Log(level, keyvals...) +} diff --git a/vendor/github.com/saferwall/pe/log/global.go b/vendor/github.com/saferwall/pe/log/global.go new file mode 100644 index 00000000..0e98bae5 --- /dev/null +++ b/vendor/github.com/saferwall/pe/log/global.go @@ -0,0 +1,122 @@ +package log + +import ( + "sync" +) + +// globalLogger is designed as a global logger in current process. +var global = &loggerAppliance{} + +// loggerAppliance is the proxy of `Logger` to +// make logger change will affect all sub-logger. +type loggerAppliance struct { + lock sync.Mutex + Logger + helper *Helper +} + +func init() { + global.SetLogger(DefaultLogger) +} + +func (a *loggerAppliance) SetLogger(in Logger) { + a.lock.Lock() + defer a.lock.Unlock() + a.Logger = in + a.helper = NewHelper(a.Logger) +} + +func (a *loggerAppliance) GetLogger() Logger { + return a.Logger +} + +// SetLogger should be called before any other log call. +// And it is NOT THREAD SAFE. +func SetLogger(logger Logger) { + global.SetLogger(logger) +} + +// GetLogger returns global logger appliance as logger in current process. +func GetLogger() Logger { + return global +} + +// Log Print log by level and keyvals. +func Log(level Level, keyvals ...interface{}) { + global.helper.Log(level, keyvals...) +} + +// Debug logs a message at debug level. +func Debug(a ...interface{}) { + global.helper.Debug(a...) +} + +// Debugf logs a message at debug level. +func Debugf(format string, a ...interface{}) { + global.helper.Debugf(format, a...) +} + +// Debugw logs a message at debug level. +func Debugw(keyvals ...interface{}) { + global.helper.Debugw(keyvals...) +} + +// Info logs a message at info level. +func Info(a ...interface{}) { + global.helper.Info(a...) +} + +// Infof logs a message at info level. +func Infof(format string, a ...interface{}) { + global.helper.Infof(format, a...) +} + +// Infow logs a message at info level. +func Infow(keyvals ...interface{}) { + global.helper.Infow(keyvals...) +} + +// Warn logs a message at warn level. +func Warn(a ...interface{}) { + global.helper.Warn(a...) +} + +// Warnf logs a message at warnf level. +func Warnf(format string, a ...interface{}) { + global.helper.Warnf(format, a...) +} + +// Warnw logs a message at warnf level. +func Warnw(keyvals ...interface{}) { + global.helper.Warnw(keyvals...) +} + +// Error logs a message at error level. +func Error(a ...interface{}) { + global.helper.Error(a...) +} + +// Errorf logs a message at error level. +func Errorf(format string, a ...interface{}) { + global.helper.Errorf(format, a...) +} + +// Errorw logs a message at error level. +func Errorw(keyvals ...interface{}) { + global.helper.Errorw(keyvals...) +} + +// Fatal logs a message at fatal level. +func Fatal(a ...interface{}) { + global.helper.Fatal(a...) +} + +// Fatalf logs a message at fatal level. +func Fatalf(format string, a ...interface{}) { + global.helper.Fatalf(format, a...) +} + +// Fatalw logs a message at fatal level. +func Fatalw(keyvals ...interface{}) { + global.helper.Fatalw(keyvals...) +} diff --git a/vendor/github.com/saferwall/pe/log/helper.go b/vendor/github.com/saferwall/pe/log/helper.go new file mode 100644 index 00000000..b01c947a --- /dev/null +++ b/vendor/github.com/saferwall/pe/log/helper.go @@ -0,0 +1,130 @@ +package log + +import ( + "context" + "fmt" + "os" +) + +// DefaultMessageKey default message key. +var DefaultMessageKey = "msg" + +// Option is Helper option. +type Option func(*Helper) + +// Helper is a logger helper. +type Helper struct { + logger Logger + msgKey string +} + +// WithMessageKey with message key. +func WithMessageKey(k string) Option { + return func(opts *Helper) { + opts.msgKey = k + } +} + +// NewHelper new a logger helper. +func NewHelper(logger Logger, opts ...Option) *Helper { + options := &Helper{ + msgKey: DefaultMessageKey, // default message key + logger: logger, + } + for _, o := range opts { + o(options) + } + return options +} + +// WithContext returns a shallow copy of h with its context changed +// to ctx. The provided ctx must be non-nil. +func (h *Helper) WithContext(ctx context.Context) *Helper { + return &Helper{ + msgKey: h.msgKey, + logger: WithContext(ctx, h.logger), + } +} + +// Log Print log by level and keyvals. +func (h *Helper) Log(level Level, keyvals ...interface{}) { + _ = h.logger.Log(level, keyvals...) +} + +// Debug logs a message at debug level. +func (h *Helper) Debug(a ...interface{}) { + _ = h.logger.Log(LevelDebug, h.msgKey, fmt.Sprint(a...)) +} + +// Debugf logs a message at debug level. +func (h *Helper) Debugf(format string, a ...interface{}) { + _ = h.logger.Log(LevelDebug, h.msgKey, fmt.Sprintf(format, a...)) +} + +// Debugw logs a message at debug level. +func (h *Helper) Debugw(keyvals ...interface{}) { + _ = h.logger.Log(LevelDebug, keyvals...) +} + +// Info logs a message at info level. +func (h *Helper) Info(a ...interface{}) { + _ = h.logger.Log(LevelInfo, h.msgKey, fmt.Sprint(a...)) +} + +// Infof logs a message at info level. +func (h *Helper) Infof(format string, a ...interface{}) { + _ = h.logger.Log(LevelInfo, h.msgKey, fmt.Sprintf(format, a...)) +} + +// Infow logs a message at info level. +func (h *Helper) Infow(keyvals ...interface{}) { + _ = h.logger.Log(LevelInfo, keyvals...) +} + +// Warn logs a message at warn level. +func (h *Helper) Warn(a ...interface{}) { + _ = h.logger.Log(LevelWarn, h.msgKey, fmt.Sprint(a...)) +} + +// Warnf logs a message at warnf level. +func (h *Helper) Warnf(format string, a ...interface{}) { + _ = h.logger.Log(LevelWarn, h.msgKey, fmt.Sprintf(format, a...)) +} + +// Warnw logs a message at warnf level. +func (h *Helper) Warnw(keyvals ...interface{}) { + _ = h.logger.Log(LevelWarn, keyvals...) +} + +// Error logs a message at error level. +func (h *Helper) Error(a ...interface{}) { + _ = h.logger.Log(LevelError, h.msgKey, fmt.Sprint(a...)) +} + +// Errorf logs a message at error level. +func (h *Helper) Errorf(format string, a ...interface{}) { + _ = h.logger.Log(LevelError, h.msgKey, fmt.Sprintf(format, a...)) +} + +// Errorw logs a message at error level. +func (h *Helper) Errorw(keyvals ...interface{}) { + _ = h.logger.Log(LevelError, keyvals...) +} + +// Fatal logs a message at fatal level. +func (h *Helper) Fatal(a ...interface{}) { + _ = h.logger.Log(LevelFatal, h.msgKey, fmt.Sprint(a...)) + os.Exit(1) +} + +// Fatalf logs a message at fatal level. +func (h *Helper) Fatalf(format string, a ...interface{}) { + _ = h.logger.Log(LevelFatal, h.msgKey, fmt.Sprintf(format, a...)) + os.Exit(1) +} + +// Fatalw logs a message at fatal level. +func (h *Helper) Fatalw(keyvals ...interface{}) { + _ = h.logger.Log(LevelFatal, keyvals...) + os.Exit(1) +} diff --git a/vendor/github.com/saferwall/pe/log/level.go b/vendor/github.com/saferwall/pe/log/level.go new file mode 100644 index 00000000..22f41c78 --- /dev/null +++ b/vendor/github.com/saferwall/pe/log/level.go @@ -0,0 +1,56 @@ +package log + +import "strings" + +// Level is a logger level. +type Level int8 + +// LevelKey is logger level key. +const LevelKey = "level" + +const ( + // LevelDebug is logger debug level. + LevelDebug Level = iota - 1 + // LevelInfo is logger info level. + LevelInfo + // LevelWarn is logger warn level. + LevelWarn + // LevelError is logger error level. + LevelError + // LevelFatal is logger fatal level + LevelFatal +) + +func (l Level) String() string { + switch l { + case LevelDebug: + return "DEBUG" + case LevelInfo: + return "INFO" + case LevelWarn: + return "WARN" + case LevelError: + return "ERROR" + case LevelFatal: + return "FATAL" + default: + return "" + } +} + +// ParseLevel parses a level string into a logger Level value. +func ParseLevel(s string) Level { + switch strings.ToUpper(s) { + case "DEBUG": + return LevelDebug + case "INFO": + return LevelInfo + case "WARN": + return LevelWarn + case "ERROR": + return LevelError + case "FATAL": + return LevelFatal + } + return LevelInfo +} diff --git a/vendor/github.com/saferwall/pe/log/log.go b/vendor/github.com/saferwall/pe/log/log.go new file mode 100644 index 00000000..1b612f8b --- /dev/null +++ b/vendor/github.com/saferwall/pe/log/log.go @@ -0,0 +1,71 @@ +package log + +import ( + "context" + "log" +) + +// DefaultLogger is default logger. +var DefaultLogger = NewStdLogger(log.Writer()) + +// Logger is a logger interface. +type Logger interface { + Log(level Level, keyvals ...interface{}) error +} + +type logger struct { + logs []Logger + prefix []interface{} + hasValuer bool + ctx context.Context +} + +func (c *logger) Log(level Level, keyvals ...interface{}) error { + kvs := make([]interface{}, 0, len(c.prefix)+len(keyvals)) + kvs = append(kvs, c.prefix...) + if c.hasValuer { + bindValues(c.ctx, kvs) + } + kvs = append(kvs, keyvals...) + for _, l := range c.logs { + if err := l.Log(level, kvs...); err != nil { + return err + } + } + return nil +} + +// With with logger fields. +func With(l Logger, kv ...interface{}) Logger { + if c, ok := l.(*logger); ok { + kvs := make([]interface{}, 0, len(c.prefix)+len(kv)) + kvs = append(kvs, kv...) + kvs = append(kvs, c.prefix...) + return &logger{ + logs: c.logs, + prefix: kvs, + hasValuer: containsValuer(kvs), + ctx: c.ctx, + } + } + return &logger{logs: []Logger{l}, prefix: kv, hasValuer: containsValuer(kv)} +} + +// WithContext returns a shallow copy of l with its context changed +// to ctx. The provided ctx must be non-nil. +func WithContext(ctx context.Context, l Logger) Logger { + if c, ok := l.(*logger); ok { + return &logger{ + logs: c.logs, + prefix: c.prefix, + hasValuer: c.hasValuer, + ctx: ctx, + } + } + return &logger{logs: []Logger{l}, ctx: ctx} +} + +// MultiLogger wraps multi logger. +func MultiLogger(logs ...Logger) Logger { + return &logger{logs: logs} +} diff --git a/vendor/github.com/saferwall/pe/log/std.go b/vendor/github.com/saferwall/pe/log/std.go new file mode 100644 index 00000000..8d79f5c5 --- /dev/null +++ b/vendor/github.com/saferwall/pe/log/std.go @@ -0,0 +1,47 @@ +package log + +import ( + "bytes" + "fmt" + "io" + "log" + "sync" +) + +var _ Logger = (*stdLogger)(nil) + +type stdLogger struct { + log *log.Logger + pool *sync.Pool +} + +// NewStdLogger new a logger with writer. +func NewStdLogger(w io.Writer) Logger { + return &stdLogger{ + log: log.New(w, "", 0), + pool: &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + } +} + +// Log print the kv pairs log. +func (l *stdLogger) Log(level Level, keyvals ...interface{}) error { + if len(keyvals) == 0 { + return nil + } + if (len(keyvals) & 1) == 1 { + keyvals = append(keyvals, "KEYVALS UNPAIRED") + } + buf := l.pool.Get().(*bytes.Buffer) + buf.WriteString(level.String()) + for i := 0; i < len(keyvals); i += 2 { + _, _ = fmt.Fprintf(buf, " %s=%v", keyvals[i], keyvals[i+1]) + } + _ = l.log.Output(4, buf.String()) //nolint:gomnd + buf.Reset() + l.pool.Put(buf) + return nil +} diff --git a/vendor/github.com/saferwall/pe/log/value.go b/vendor/github.com/saferwall/pe/log/value.go new file mode 100644 index 00000000..86e91559 --- /dev/null +++ b/vendor/github.com/saferwall/pe/log/value.go @@ -0,0 +1,71 @@ +package log + +import ( + "context" + "runtime" + "strconv" + "strings" + "time" +) + +var ( + defaultDepth = 3 + // DefaultCaller is a Valuer that returns the file and line. + DefaultCaller = Caller(defaultDepth) + + // DefaultTimestamp is a Valuer that returns the current wallclock time. + DefaultTimestamp = Timestamp(time.RFC3339) +) + +// Valuer is returns a log value. +type Valuer func(ctx context.Context) interface{} + +// Value return the function value. +func Value(ctx context.Context, v interface{}) interface{} { + if v, ok := v.(Valuer); ok { + return v(ctx) + } + return v +} + +// Caller returns a Valuer that returns a pkg/file:line description of the caller. +func Caller(depth int) Valuer { + return func(context.Context) interface{} { + d := depth + _, file, line, _ := runtime.Caller(d) + if strings.LastIndex(file, "/log/filter.go") > 0 { + d++ + _, file, line, _ = runtime.Caller(d) + } + if strings.LastIndex(file, "/log/helper.go") > 0 { + d++ + _, file, line, _ = runtime.Caller(d) + } + idx := strings.LastIndexByte(file, '/') + return file[idx+1:] + ":" + strconv.Itoa(line) + } +} + +// Timestamp returns a timestamp Valuer with a custom time format. +func Timestamp(layout string) Valuer { + return func(context.Context) interface{} { + return time.Now().Format(layout) + } +} + +func bindValues(ctx context.Context, keyvals []interface{}) { + for i := 1; i < len(keyvals); i += 2 { + if v, ok := keyvals[i].(Valuer); ok { + keyvals[i] = v(ctx) + } + } +} + +func containsValuer(keyvals []interface{}) bool { + for i := 1; i < len(keyvals); i += 2 { + if _, ok := keyvals[i].(Valuer); ok { + return true + } + } + return false +} diff --git a/vendor/github.com/saferwall/pe/ntheader.go b/vendor/github.com/saferwall/pe/ntheader.go new file mode 100644 index 00000000..c053b00b --- /dev/null +++ b/vendor/github.com/saferwall/pe/ntheader.go @@ -0,0 +1,602 @@ +// Copyright 2018 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +import ( + "encoding/binary" +) + +// ImageFileHeaderMachineType represents the type of the image file header `Machine“ field. +type ImageFileHeaderMachineType uint16 + +// ImageFileHeaderCharacteristicsType represents the type of the image file header +// `Characteristics` field. +type ImageFileHeaderCharacteristicsType uint16 + +// ImageOptionalHeaderSubsystemType represents the type of the optional header `Subsystem field. +type ImageOptionalHeaderSubsystemType uint16 + +// ImageOptionalHeaderDllCharacteristicsType represents the type of the optional header `DllCharacteristics field. +type ImageOptionalHeaderDllCharacteristicsType uint16 + +// ImageNtHeader represents the PE header and is the general term for a structure +// named IMAGE_NT_HEADERS. +type ImageNtHeader struct { + // Signature is a DWORD containing the value 50h, 45h, 00h, 00h. + Signature uint32 `json:"signature"` + + // IMAGE_NT_HEADERS provides a standard COFF header. It is located + // immediately after the PE signature. The COFF header provides the most + // general characteristics of a PE/COFF file, applicable to both object and + // executable files. It is represented with IMAGE_FILE_HEADER structure. + FileHeader ImageFileHeader `json:"file_header"` + + // OptionalHeader is of type *OptionalHeader32 or *OptionalHeader64. + OptionalHeader interface{} `json:"optional_header"` +} + +// ImageFileHeader contains infos about the physical layout and properties of the +// file. +type ImageFileHeader struct { + // The number that identifies the type of target machine. + Machine ImageFileHeaderMachineType `json:"machine"` + + // The number of sections. This indicates the size of the section table, + // which immediately follows the headers. + NumberOfSections uint16 `json:"number_of_sections"` + + // // The low 32 bits of the number of seconds since 00:00 January 1, 1970 + // (a C run-time time_t value), that indicates when the file was created. + TimeDateStamp uint32 `json:"time_date_stamp"` + + // // The file offset of the COFF symbol table, or zero if no COFF symbol + // table is present. This value should be zero for an image because COFF + // debugging information is deprecated. + PointerToSymbolTable uint32 `json:"pointer_to_symbol_table"` + + // The number of entries in the symbol table. This data can be used to + // locate the string table, which immediately follows the symbol table. + // This value should be zero for an image because COFF debugging information + // is deprecated. + NumberOfSymbols uint32 `json:"number_of_symbols"` + + // The size of the optional header, which is required for executable files + // but not for object files. This value should be zero for an object file. + SizeOfOptionalHeader uint16 `json:"size_of_optional_header"` + + // The flags that indicate the attributes of the file. + Characteristics ImageFileHeaderCharacteristicsType `json:"characteristics"` +} + +// ImageOptionalHeader32 represents the PE32 format structure of the optional header. +// PE32 contains this additional field, which is absent in PE32+. +type ImageOptionalHeader32 struct { + + // The unsigned integer that identifies the state of the image file. + // The most common number is 0x10B, which identifies it as a normal + // executable file. 0x107 identifies it as a ROM image, and 0x20B identifies + // it as a PE32+ executable. + Magic uint16 `json:"magic"` + + // Linker major version number. The VC++ linker sets this field to current + // version of Visual Studio. + MajorLinkerVersion uint8 `json:"major_linker_version"` + + // The linker minor version number. + MinorLinkerVersion uint8 `json:"minor_linker_version"` + + // The size of the code (text) section, or the sum of all code sections + // if there are multiple sections. + SizeOfCode uint32 `json:"size_of_code"` + + // The size of the initialized data section (held in the field SizeOfRawData + // of the respective section header), or the sum of all such sections if + // there are multiple data sections. + SizeOfInitializedData uint32 `json:"size_of_initialized_data"` + + // The size of the uninitialized data section (BSS), or the sum of all + // such sections if there are multiple BSS sections. This data is not part + // of the disk file and does not have specific values, but the OS loader + // commits memory space for this data when the file is loaded. + SizeOfUninitializedData uint32 `json:"size_of_uninitialized_data"` + + // The address of the entry point relative to the image base when the + // executable file is loaded into memory. For program images, this is the + // starting address. For device drivers, this is the address of the + // initialization function. An entry point is optional for DLLs. When no + // entry point is present, this field must be zero. For managed PE files, + // this value always points to the common language runtime invocation stub. + AddressOfEntryPoint uint32 `json:"address_of_entrypoint"` + + // The address that is relative to the image base of the beginning-of-code + // section when it is loaded into memory. + BaseOfCode uint32 `json:"base_of_code"` + + // The address that is relative to the image base of the beginning-of-data + // section when it is loaded into memory. This entry doesn’t exist in the + // 64-bit Optional header. + BaseOfData uint32 `json:"base_of_data"` + + // The preferred address of the first byte of image when loaded into memory; + // must be a multiple of 64 K. The default for DLLs is 0x10000000. The + // default for Windows CE EXEs is 0x00010000. The default for Windows NT, + // Windows 2000, Windows XP, Windows 95, Windows 98, and Windows Me is + // 0x00400000. + ImageBase uint32 `json:"image_base"` + + // The alignment (in bytes) of sections when they are loaded into memory. + // It must be greater than or equal to FileAlignment. The default is the + // page size for the architecture. + SectionAlignment uint32 `json:"section_alignment"` + + // The alignment factor (in bytes) that is used to align the raw data of + // sections in the image file. The value should be a power of 2 between 512 + // and 64 K, inclusive. The default is 512. If the SectionAlignment is less + // than the architecture's page size, then FileAlignment must match + // SectionAlignment. + FileAlignment uint32 `json:"file_alignment"` + + // The major version number of the required operating system. + MajorOperatingSystemVersion uint16 `json:"major_os_version"` + + // The minor version number of the required operating system. + MinorOperatingSystemVersion uint16 `json:"minor_os_version"` + + // The major version number of the image. + MajorImageVersion uint16 `json:"major_image_version"` + + // The minor version number of the image. + MinorImageVersion uint16 `json:"minor_image_version"` + + // The major version number of the subsystem. + MajorSubsystemVersion uint16 `json:"major_subsystem_version"` + + // The minor version number of the subsystem. + MinorSubsystemVersion uint16 `json:"minor_subsystem_version"` + + // Reserved, must be zero. + Win32VersionValue uint32 `json:"win32_version_value"` + + // The size (in bytes) of the image, including all headers, as the image + // is loaded in memory. It must be a multiple of SectionAlignment. + SizeOfImage uint32 `json:"size_of_image"` + + // The combined size of an MS-DOS stub, PE header, and section headers + // rounded up to a multiple of FileAlignment. + SizeOfHeaders uint32 `json:"size_of_headers"` + + // The image file checksum. The algorithm for computing the checksum is + // incorporated into IMAGHELP.DLL. The following are checked for validation + // at load time: all drivers, any DLL loaded at boot time, and any DLL + // that is loaded into a critical Windows process. + CheckSum uint32 `json:"checksum"` + + // The subsystem that is required to run this image. + Subsystem ImageOptionalHeaderSubsystemType `json:"subsystem"` + + // For more information, see DLL Characteristics later in this specification. + DllCharacteristics ImageOptionalHeaderDllCharacteristicsType `json:"dll_characteristics"` + + // Size of virtual memory to reserve for the initial thread’s stack. Only + // the SizeOfStackCommit field is committed; the rest is available in + // one-page increments. The default is 1MB for 32-bit images and 4MB for + // 64-bit images. + SizeOfStackReserve uint32 `json:"size_of_stack_reserve"` + + // Size of virtual memory initially committed for the initial thread’s + // stack. The default is one page (4KB) for 32-bit images and 16KB for + // 64-bit images. + SizeOfStackCommit uint32 `json:"size_of_stack_commit"` + + // size of the local heap space to reserve. Only SizeOfHeapCommit is + // committed; the rest is made available one page at a time until the + // reserve size is reached. The default is 1MB for both 32-bit and 64-bit + // images. + SizeOfHeapReserve uint32 `json:"size_of_heap_reserve"` + + // Size of virtual memory initially committed for the process heap. The + // default is 4KB (one operating system memory page) for 32-bit images and + // 16KB for 64-bit images. + SizeOfHeapCommit uint32 `json:"size_of_heap_commit"` + + // Reserved, must be zero. + LoaderFlags uint32 `json:"loader_flags"` + + // Number of entries in the DataDirectory array; at least 16. Although it + // is theoretically possible to emit more than 16 data directories, all + // existing managed compilers emit exactly 16 data directories, with the + // 16th (last) data directory never used (reserved). + NumberOfRvaAndSizes uint32 `json:"number_of_rva_and_sizes"` + + // An array of 16 IMAGE_DATA_DIRECTORY structures. + DataDirectory [16]DataDirectory `json:"data_directories"` +} + +// ImageOptionalHeader64 represents the PE32+ format structure of the optional header. +type ImageOptionalHeader64 struct { + // The unsigned integer that identifies the state of the image file. + // The most common number is 0x10B, which identifies it as a normal + // executable file. 0x107 identifies it as a ROM image, and 0x20B identifies + // it as a PE32+ executable. + Magic uint16 `json:"magic"` + + // Linker major version number. The VC++ linker sets this field to current + // version of Visual Studio. + MajorLinkerVersion uint8 `json:"major_linker_version"` + + // The linker minor version number. + MinorLinkerVersion uint8 `json:"minor_linker_version"` + + // The size of the code (text) section, or the sum of all code sections + // if there are multiple sections. + SizeOfCode uint32 `json:"size_of_code"` + + // The size of the initialized data section (held in the field SizeOfRawData + // of the respective section header), or the sum of all such sections if + // there are multiple data sections. + SizeOfInitializedData uint32 `json:"size_of_initialized_data"` + + // The size of the uninitialized data section (BSS), or the sum of all + // such sections if there are multiple BSS sections. This data is not part + // of the disk file and does not have specific values, but the OS loader + // commits memory space for this data when the file is loaded. + SizeOfUninitializedData uint32 `json:"size_of_uninitialized_data"` + + // The address of the entry point relative to the image base when the + // executable file is loaded into memory. For program images, this is the + // starting address. For device drivers, this is the address of the + // initialization function. An entry point is optional for DLLs. When no + // entry point is present, this field must be zero. For managed PE files, + // this value always points to the common language runtime invocation stub. + AddressOfEntryPoint uint32 `json:"address_of_entrypoint"` + + // The address that is relative to the image base of the beginning-of-code + // section when it is loaded into memory. + BaseOfCode uint32 `json:"base_of_code"` + + // In PE+, ImageBase is 8 bytes size. + ImageBase uint64 `json:"image_base"` + + // The alignment (in bytes) of sections when they are loaded into memory. + // It must be greater than or equal to FileAlignment. The default is the + // page size for the architecture. + SectionAlignment uint32 `json:"section_alignment"` + + // The alignment factor (in bytes) that is used to align the raw data of + // sections in the image file. The value should be a power of 2 between 512 + // and 64 K, inclusive. The default is 512. If the SectionAlignment is less + // than the architecture's page size, then FileAlignment must match SectionAlignment. + FileAlignment uint32 `json:"file_alignment"` + + // The major version number of the required operating system. + MajorOperatingSystemVersion uint16 `json:"major_os_version"` + + // The minor version number of the required operating system. + MinorOperatingSystemVersion uint16 `json:"minor_os_version"` + + // The major version number of the image. + MajorImageVersion uint16 `json:"major_image_version"` + + // The minor version number of the image. + MinorImageVersion uint16 `json:"minor_image_version"` + + // The major version number of the subsystem. + MajorSubsystemVersion uint16 `json:"major_subsystem_version"` + + // The minor version number of the subsystem. + MinorSubsystemVersion uint16 `json:"minor_subsystem_version"` + + // Reserved, must be zero. + Win32VersionValue uint32 `json:"win32_version_value"` + + // The size (in bytes) of the image, including all headers, as the image + // is loaded in memory. It must be a multiple of SectionAlignment. + SizeOfImage uint32 `json:"size_of_image"` + + // The combined size of an MS-DOS stub, PE header, and section headers + // rounded up to a multiple of FileAlignment. + SizeOfHeaders uint32 `json:"size_of_headers"` + + // The image file checksum. The algorithm for computing the checksum is + // incorporated into IMAGHELP.DLL. The following are checked for validation + // at load time: all drivers, any DLL loaded at boot time, and any DLL + // that is loaded into a critical Windows process. + CheckSum uint32 `json:"checksum"` + + // The subsystem that is required to run this image. + Subsystem ImageOptionalHeaderSubsystemType `json:"subsystem"` + + // For more information, see DLL Characteristics later in this specification. + DllCharacteristics ImageOptionalHeaderDllCharacteristicsType `json:"dll_characteristics"` + + // Size of virtual memory to reserve for the initial thread’s stack. Only + // the SizeOfStackCommit field is committed; the rest is available in + // one-page increments. The default is 1MB for 32-bit images and 4MB for + // 64-bit images. + SizeOfStackReserve uint64 `json:"size_of_stack_reserve"` + + // Size of virtual memory initially committed for the initial thread’s + // stack. The default is one page (4KB) for 32-bit images and 16KB for + // 64-bit images. + SizeOfStackCommit uint64 `json:"size_of_stack_commit"` + + // size of the local heap space to reserve. Only SizeOfHeapCommit is + // committed; the rest is made available one page at a time until the + // reserve size is reached. The default is 1MB for both 32-bit and 64-bit + // images. + SizeOfHeapReserve uint64 `json:"size_of_heap_reserve"` + + // Size of virtual memory initially committed for the process heap. The + // default is 4KB (one operating system memory page) for 32-bit images and + // 16KB for 64-bit images. + SizeOfHeapCommit uint64 `json:"size_of_heap_commit"` + + // Reserved, must be zero. + LoaderFlags uint32 `json:"loader_flags"` + + // Number of entries in the DataDirectory array; at least 16. Although it + // is theoretically possible to emit more than 16 data directories, all + // existing managed compilers emit exactly 16 data directories, with the + // 16th (last) data directory never used (reserved). + NumberOfRvaAndSizes uint32 `json:"number_of_rva_and_sizes"` + + // An array of 16 IMAGE_DATA_DIRECTORY structures. + DataDirectory [16]DataDirectory `json:"data_directories"` +} + +// DataDirectory represents an array of 16 IMAGE_DATA_DIRECTORY structures, +// 8 bytes apiece, each relating to an important data structure in the PE file. +// The data directory table starts at offset 96 in a 32-bit PE header and at +// offset 112 in a 64-bit PE header. Each entry in the data directory table +// contains the RVA and size of a table or a string that this particular +// directory entry describes;this information is used by the operating system. +type DataDirectory struct { + VirtualAddress uint32 // The RVA of the data structure. + Size uint32 // The size in bytes of the data structure referred to. +} + +// ParseNTHeader parse the PE NT header structure referred as IMAGE_NT_HEADERS. +// Its offset is given by the e_lfanew field in the IMAGE_DOS_HEADER at the +// beginning of the file. +func (pe *File) ParseNTHeader() (err error) { + ntHeaderOffset := pe.DOSHeader.AddressOfNewEXEHeader + signature, err := pe.ReadUint32(ntHeaderOffset) + if err != nil { + return ErrInvalidNtHeaderOffset + } + + // Probe for PE signature. + if signature&0xFFFF == ImageOS2Signature { + return ErrImageOS2SignatureFound + } + if signature&0xFFFF == ImageOS2LESignature { + return ErrImageOS2LESignatureFound + } + if signature&0xFFFF == ImageVXDSignature { + return ErrImageVXDSignatureFound + } + if signature&0xFFFF == ImageTESignature { + return ErrImageTESignatureFound + } + + // This is the smallest requirement for a valid PE. + if signature != ImageNTSignature { + return ErrImageNtSignatureNotFound + } + pe.NtHeader.Signature = signature + + // The file header structure contains some basic information about the file; + // most importantly, a field describing the size of the optional data that + // follows it. + fileHeaderSize := uint32(binary.Size(pe.NtHeader.FileHeader)) + fileHeaderOffset := ntHeaderOffset + 4 + err = pe.structUnpack(&pe.NtHeader.FileHeader, fileHeaderOffset, fileHeaderSize) + if err != nil { + return err + } + + // The PE header which immediately follows the COFF header, provides + // information for the OS loader. Although this header is referred to as + // the optional header, it is optional only in the sense that object files + // usually don’t contain it. For PE files, this header is mandatory. + // The size of the PE header is not fixed. It depends on the number of data + // directories defined in the header and is specified in the + // SizeOfOptionalHeader field of the COFF header. + // The optional header could be either for a PE or PE+ file. + oh32 := ImageOptionalHeader32{} + oh64 := ImageOptionalHeader64{} + + optHeaderOffset := ntHeaderOffset + (fileHeaderSize + 4) + magic, err := pe.ReadUint16(optHeaderOffset) + if err != nil { + return err + } + + // Probes for PE32/PE32+ optional header magic. + if magic != ImageNtOptionalHeader32Magic && + magic != ImageNtOptionalHeader64Magic { + return ErrImageNtOptionalHeaderMagicNotFound + } + + // Are we dealing with a PE64 optional header. + switch magic { + case ImageNtOptionalHeader64Magic: + size := uint32(binary.Size(oh64)) + err = pe.structUnpack(&oh64, optHeaderOffset, size) + if err != nil { + return err + } + pe.Is64 = true + pe.NtHeader.OptionalHeader = oh64 + case ImageNtOptionalHeader32Magic: + size := uint32(binary.Size(oh32)) + err = pe.structUnpack(&oh32, optHeaderOffset, size) + if err != nil { + return err + } + pe.Is32 = true + pe.NtHeader.OptionalHeader = oh32 + } + + // ImageBase should be multiple of 10000h. + if (pe.Is64 && oh64.ImageBase%0x10000 != 0) || (pe.Is32 && oh32.ImageBase%0x10000 != 0) { + return ErrImageBaseNotAligned + } + + // ImageBase can be any value as long as: + // ImageBase + SizeOfImage < 80000000h for PE32. + // ImageBase + SizeOfImage < 0xffff080000000000 for PE32+. + if (pe.Is32 && oh32.ImageBase+oh32.SizeOfImage >= 0x80000000) || (pe.Is64 && oh64.ImageBase+uint64(oh64.SizeOfImage) >= 0xffff080000000000) { + pe.Anomalies = append(pe.Anomalies, AnoImageBaseOverflow) + } + + pe.HasNTHdr = true + return nil +} + +// String returns the string representations of the `Machine` field of the IMAGE_FILE_HEADER. +func (t ImageFileHeaderMachineType) String() string { + machineType := map[ImageFileHeaderMachineType]string{ + ImageFileMachineUnknown: "Unknown", + ImageFileMachineAM33: "Matsushita AM33", + ImageFileMachineAMD64: "x64", + ImageFileMachineARM: "ARM little endian", + ImageFileMachineARM64: "ARM64 little endian", + ImageFileMachineARMNT: "ARM Thumb-2 little endian", + ImageFileMachineEBC: "EFI byte code", + ImageFileMachineI386: "Intel 386 or later / compatible processors", + ImageFileMachineIA64: "Intel Itanium processor family", + ImageFileMachineM32R: "Mitsubishi M32R little endian", + ImageFileMachineMIPS16: "MIPS16", + ImageFileMachineMIPSFPU: "MIPS with FPU", + ImageFileMachineMIPSFPU16: "MIPS16 with FPU", + ImageFileMachinePowerPC: "Power PC little endian", + ImageFileMachinePowerPCFP: "Power PC with floating point support", + ImageFileMachineR4000: "MIPS little endian", + ImageFileMachineRISCV32: "RISC-V 32-bit address space", + ImageFileMachineRISCV64: "RISC-V 64-bit address space", + ImageFileMachineRISCV128: "RISC-V 128-bit address space", + ImageFileMachineSH3: "Hitachi SH3", + ImageFileMachineSH3DSP: "Hitachi SH3 DSP", + ImageFileMachineSH4: "Hitachi SH4", + ImageFileMachineSH5: "Hitachi SH5", + ImageFileMachineTHUMB: "Thumb", + ImageFileMachineWCEMIPSv2: "MIPS little-endian WCE v2", + } + + if val, ok := machineType[t]; ok { + return val + } + return "?" +} + +// String returns the string representations of the `Characteristics` field of the IMAGE_FILE_HEADER. +func (t ImageFileHeaderCharacteristicsType) String() []string { + var values []string + fileHeaderCharacteristics := map[ImageFileHeaderCharacteristicsType]string{ + ImageFileRelocsStripped: "RelocsStripped", + ImageFileExecutableImage: "ExecutableImage", + ImageFileLineNumsStripped: "LineNumsStripped", + ImageFileLocalSymsStripped: "LocalSymsStripped", + ImageFileAggressiveWSTrim: "AgressibeWsTrim", + ImageFileLargeAddressAware: "LargeAddressAware", + ImageFileBytesReservedLow: "BytesReservedLow", + ImageFile32BitMachine: "32BitMachine", + ImageFileDebugStripped: "DebugStripped", + ImageFileRemovableRunFromSwap: "RemovableRunFromSwap", + ImageFileSystem: "FileSystem", + ImageFileDLL: "DLL", + ImageFileUpSystemOnly: "UpSystemOnly", + ImageFileBytesReservedHigh: "BytesReservedHigh", + } + + for k, s := range fileHeaderCharacteristics { + if k&t != 0 { + values = append(values, s) + } + } + + return values +} + +// String returns the string representations of the `DllCharacteristics` field of ImageOptionalHeader. +func (t ImageOptionalHeaderDllCharacteristicsType) String() []string { + var values []string + + imgDllCharacteristics := map[ImageOptionalHeaderDllCharacteristicsType]string{ + ImageDllCharacteristicsHighEntropyVA: "HighEntropyVA", + ImageDllCharacteristicsDynamicBase: "DynamicBase", + ImageDllCharacteristicsForceIntegrity: "ForceIntegrity", + ImageDllCharacteristicsNXCompact: "NXCompact", + ImageDllCharacteristicsNoIsolation: "NoIsolation", + ImageDllCharacteristicsNoSEH: "NoSEH", + ImageDllCharacteristicsNoBind: "NoBind", + ImageDllCharacteristicsAppContainer: "AppContainer", + ImageDllCharacteristicsWdmDriver: "WdmDriver", + ImageDllCharacteristicsGuardCF: "GuardCF", + ImageDllCharacteristicsTerminalServiceAware: "TerminalServiceAware", + } + + for k, s := range imgDllCharacteristics { + if k&t != 0 { + values = append(values, s) + } + } + + return values +} + +// String returns the string representations of the `Subsystem` field +// of ImageOptionalHeader. +func (subsystem ImageOptionalHeaderSubsystemType) String() string { + subsystemMap := map[ImageOptionalHeaderSubsystemType]string{ + ImageSubsystemUnknown: "Unknown", + ImageSubsystemNative: "Native", + ImageSubsystemWindowsGUI: "Windows GUI", + ImageSubsystemWindowsCUI: "Windows CUI", + ImageSubsystemOS2CUI: "OS/2 character", + ImageSubsystemPosixCUI: "POSIX character", + ImageSubsystemNativeWindows: "Native Win9x driver", + ImageSubsystemWindowsCEGUI: "Windows CE GUI", + ImageSubsystemEFIApplication: "EFI Application", + ImageSubsystemEFIBootServiceDriver: "EFI Boot Service Driver", + ImageSubsystemEFIRuntimeDriver: "EFI ROM image", + ImageSubsystemEFIRom: "EFI ROM image", + ImageSubsystemXBOX: "XBOX", + ImageSubsystemWindowsBootApplication: "Windows boot application", + } + + if val, ok := subsystemMap[subsystem]; ok { + return val + } + + return "?" +} + +// PrettyOptionalHeaderMagic returns the string representations of the +// `Magic` field of ImageOptionalHeader. +func (pe *File) PrettyOptionalHeaderMagic() string { + + var magic uint16 + + if pe.Is64 { + magic = + pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).Magic + } else { + magic = + pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).Magic + } + + switch magic { + case ImageNtOptionalHeader32Magic: + return "PE32" + case ImageNtOptionalHeader64Magic: + return "PE64" + case ImageROMOptionalHeaderMagic: + return "ROM" + default: + return "?" + } +} diff --git a/vendor/github.com/saferwall/pe/ordlookup.go b/vendor/github.com/saferwall/pe/ordlookup.go new file mode 100644 index 00000000..d94a6121 --- /dev/null +++ b/vendor/github.com/saferwall/pe/ordlookup.go @@ -0,0 +1,554 @@ +// Copyright 2021 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +import ( + "fmt" + "strings" +) + +// WS232OrdNames maps ordinals to name. +var WS232OrdNames = map[uint64]string{ + 1: "accept", + 2: "bind", + 3: "closesocket", + 4: "connect", + 5: "getpeername", + 6: "getsockname", + 7: "getsockopt", + 8: "htonl", + 9: "htons", + 10: "ioctlsocket", + 11: "inet_addr", + 12: "inet_ntoa", + 13: "listen", + 14: "ntohl", + 15: "ntohs", + 16: "recv", + 17: "recvfrom", + 18: "select", + 19: "send", + 20: "sendto", + 21: "setsockopt", + 22: "shutdown", + 23: "socket", + 24: "GetAddrInfoW", + 25: "GetNameInfoW", + 26: "WSApSetPostRoutine", + 27: "FreeAddrInfoW", + 28: "WPUCompleteOverlappedRequest", + 29: "WSAAccept", + 30: "WSAAddressToStringA", + 31: "WSAAddressToStringW", + 32: "WSACloseEvent", + 33: "WSAConnect", + 34: "WSACreateEvent", + 35: "WSADuplicateSocketA", + 36: "WSADuplicateSocketW", + 37: "WSAEnumNameSpaceProvidersA", + 38: "WSAEnumNameSpaceProvidersW", + 39: "WSAEnumNetworkEvents", + 40: "WSAEnumProtocolsA", + 41: "WSAEnumProtocolsW", + 42: "WSAEventSelect", + 43: "WSAGetOverlappedResult", + 44: "WSAGetQOSByName", + 45: "WSAGetServiceClassInfoA", + 46: "WSAGetServiceClassInfoW", + 47: "WSAGetServiceClassNameByClassIdA", + 48: "WSAGetServiceClassNameByClassIdW", + 49: "WSAHtonl", + 50: "WSAHtons", + 51: "gethostbyaddr", + 52: "gethostbyname", + 53: "getprotobyname", + 54: "getprotobynumber", + 55: "getservbyname", + 56: "getservbyport", + 57: "gethostname", + 58: "WSAInstallServiceClassA", + 59: "WSAInstallServiceClassW", + 60: "WSAIoctl", + 61: "WSAJoinLeaf", + 62: "WSALookupServiceBeginA", + 63: "WSALookupServiceBeginW", + 64: "WSALookupServiceEnd", + 65: "WSALookupServiceNextA", + 66: "WSALookupServiceNextW", + 67: "WSANSPIoctl", + 68: "WSANtohl", + 69: "WSANtohs", + 70: "WSAProviderConfigChange", + 71: "WSARecv", + 72: "WSARecvDisconnect", + 73: "WSARecvFrom", + 74: "WSARemoveServiceClass", + 75: "WSAResetEvent", + 76: "WSASend", + 77: "WSASendDisconnect", + 78: "WSASendTo", + 79: "WSASetEvent", + 80: "WSASetServiceA", + 81: "WSASetServiceW", + 82: "WSASocketA", + 83: "WSASocketW", + 84: "WSAStringToAddressA", + 85: "WSAStringToAddressW", + 86: "WSAWaitForMultipleEvents", + 87: "WSCDeinstallProvider", + 88: "WSCEnableNSProvider", + 89: "WSCEnumProtocols", + 90: "WSCGetProviderPath", + 91: "WSCInstallNameSpace", + 92: "WSCInstallProvider", + 93: "WSCUnInstallNameSpace", + 94: "WSCUpdateProvider", + 95: "WSCWriteNameSpaceOrder", + 96: "WSCWriteProviderOrder", + 97: "freeaddrinfo", + 98: "getaddrinfo", + 99: "getnameinfo", + 101: "WSAAsyncSelect", + 102: "WSAAsyncGetHostByAddr", + 103: "WSAAsyncGetHostByName", + 104: "WSAAsyncGetProtoByNumber", + 105: "WSAAsyncGetProtoByName", + 106: "WSAAsyncGetServByPort", + 107: "WSAAsyncGetServByName", + 108: "WSACancelAsyncRequest", + 109: "WSASetBlockingHook", + 110: "WSAUnhookBlockingHook", + 111: "WSAGetLastError", + 112: "WSASetLastError", + 113: "WSACancelBlockingCall", + 114: "WSAIsBlocking", + 115: "WSAStartup", + 116: "WSACleanup", + 151: "__WSAFDIsSet", + 500: "WEP", +} + +// OleAut32OrdNames maps ordinals to names. +var OleAut32OrdNames = map[uint64]string{ + 2: "SysAllocString", + 3: "SysReAllocString", + 4: "SysAllocStringLen", + 5: "SysReAllocStringLen", + 6: "SysFreeString", + 7: "SysStringLen", + 8: "VariantInit", + 9: "VariantClear", + 10: "VariantCopy", + 11: "VariantCopyInd", + 12: "VariantChangeType", + 13: "VariantTimeToDosDateTime", + 14: "DosDateTimeToVariantTime", + 15: "SafeArrayCreate", + 16: "SafeArrayDestroy", + 17: "SafeArrayGetDim", + 18: "SafeArrayGetElemsize", + 19: "SafeArrayGetUBound", + 20: "SafeArrayGetLBound", + 21: "SafeArrayLock", + 22: "SafeArrayUnlock", + 23: "SafeArrayAccessData", + 24: "SafeArrayUnaccessData", + 25: "SafeArrayGetElement", + 26: "SafeArrayPutElement", + 27: "SafeArrayCopy", + 28: "DispGetParam", + 29: "DispGetIDsOfNames", + 30: "DispInvoke", + 31: "CreateDispTypeInfo", + 32: "CreateStdDispatch", + 33: "RegisterActiveObject", + 34: "RevokeActiveObject", + 35: "GetActiveObject", + 36: "SafeArrayAllocDescriptor", + 37: "SafeArrayAllocData", + 38: "SafeArrayDestroyDescriptor", + 39: "SafeArrayDestroyData", + 40: "SafeArrayRedim", + 41: "SafeArrayAllocDescriptorEx", + 42: "SafeArrayCreateEx", + 43: "SafeArrayCreateVectorEx", + 44: "SafeArraySetRecordInfo", + 45: "SafeArrayGetRecordInfo", + 46: "VarParseNumFromStr", + 47: "VarNumFromParseNum", + 48: "VarI2FromUI1", + 49: "VarI2FromI4", + 50: "VarI2FromR4", + 51: "VarI2FromR8", + 52: "VarI2FromCy", + 53: "VarI2FromDate", + 54: "VarI2FromStr", + 55: "VarI2FromDisp", + 56: "VarI2FromBool", + 57: "SafeArraySetIID", + 58: "VarI4FromUI1", + 59: "VarI4FromI2", + 60: "VarI4FromR4", + 61: "VarI4FromR8", + 62: "VarI4FromCy", + 63: "VarI4FromDate", + 64: "VarI4FromStr", + 65: "VarI4FromDisp", + 66: "VarI4FromBool", + 67: "SafeArrayGetIID", + 68: "VarR4FromUI1", + 69: "VarR4FromI2", + 70: "VarR4FromI4", + 71: "VarR4FromR8", + 72: "VarR4FromCy", + 73: "VarR4FromDate", + 74: "VarR4FromStr", + 75: "VarR4FromDisp", + 76: "VarR4FromBool", + 77: "SafeArrayGetVartype", + 78: "VarR8FromUI1", + 79: "VarR8FromI2", + 80: "VarR8FromI4", + 81: "VarR8FromR4", + 82: "VarR8FromCy", + 83: "VarR8FromDate", + 84: "VarR8FromStr", + 85: "VarR8FromDisp", + 86: "VarR8FromBool", + 87: "VarFormat", + 88: "VarDateFromUI1", + 89: "VarDateFromI2", + 90: "VarDateFromI4", + 91: "VarDateFromR4", + 92: "VarDateFromR8", + 93: "VarDateFromCy", + 94: "VarDateFromStr", + 95: "VarDateFromDisp", + 96: "VarDateFromBool", + 97: "VarFormatDateTime", + 98: "VarCyFromUI1", + 99: "VarCyFromI2", + 100: "VarCyFromI4", + 101: "VarCyFromR4", + 102: "VarCyFromR8", + 103: "VarCyFromDate", + 104: "VarCyFromStr", + 105: "VarCyFromDisp", + 106: "VarCyFromBool", + 107: "VarFormatNumber", + 108: "VarBstrFromUI1", + 109: "VarBstrFromI2", + 110: "VarBstrFromI4", + 111: "VarBstrFromR4", + 112: "VarBstrFromR8", + 113: "VarBstrFromCy", + 114: "VarBstrFromDate", + 115: "VarBstrFromDisp", + 116: "VarBstrFromBool", + 117: "VarFormatPercent", + 118: "VarBoolFromUI1", + 119: "VarBoolFromI2", + 120: "VarBoolFromI4", + 121: "VarBoolFromR4", + 122: "VarBoolFromR8", + 123: "VarBoolFromDate", + 124: "VarBoolFromCy", + 125: "VarBoolFromStr", + 126: "VarBoolFromDisp", + 127: "VarFormatCurrency", + 128: "VarWeekdayName", + 129: "VarMonthName", + 130: "VarUI1FromI2", + 131: "VarUI1FromI4", + 132: "VarUI1FromR4", + 133: "VarUI1FromR8", + 134: "VarUI1FromCy", + 135: "VarUI1FromDate", + 136: "VarUI1FromStr", + 137: "VarUI1FromDisp", + 138: "VarUI1FromBool", + 139: "VarFormatFromTokens", + 140: "VarTokenizeFormatString", + 141: "VarAdd", + 142: "VarAnd", + 143: "VarDiv", + 144: "DllCanUnloadNow", + 145: "DllGetClassObject", + 146: "DispCallFunc", + 147: "VariantChangeTypeEx", + 148: "SafeArrayPtrOfIndex", + 149: "SysStringByteLen", + 150: "SysAllocStringByteLen", + 151: "DllRegisterServer", + 152: "VarEqv", + 153: "VarIdiv", + 154: "VarImp", + 155: "VarMod", + 156: "VarMul", + 157: "VarOr", + 158: "VarPow", + 159: "VarSub", + 160: "CreateTypeLib", + 161: "LoadTypeLib", + 162: "LoadRegTypeLib", + 163: "RegisterTypeLib", + 164: "QueryPathOfRegTypeLib", + 165: "LHashValOfNameSys", + 166: "LHashValOfNameSysA", + 167: "VarXor", + 168: "VarAbs", + 169: "VarFix", + 170: "OaBuildVersion", + 171: "ClearCustData", + 172: "VarInt", + 173: "VarNeg", + 174: "VarNot", + 175: "VarRound", + 176: "VarCmp", + 177: "VarDecAdd", + 178: "VarDecDiv", + 179: "VarDecMul", + 180: "CreateTypeLib2", + 181: "VarDecSub", + 182: "VarDecAbs", + 183: "LoadTypeLibEx", + 184: "SystemTimeToVariantTime", + 185: "VariantTimeToSystemTime", + 186: "UnRegisterTypeLib", + 187: "VarDecFix", + 188: "VarDecInt", + 189: "VarDecNeg", + 190: "VarDecFromUI1", + 191: "VarDecFromI2", + 192: "VarDecFromI4", + 193: "VarDecFromR4", + 194: "VarDecFromR8", + 195: "VarDecFromDate", + 196: "VarDecFromCy", + 197: "VarDecFromStr", + 198: "VarDecFromDisp", + 199: "VarDecFromBool", + 200: "GetErrorInfo", + 201: "SetErrorInfo", + 202: "CreateErrorInfo", + 203: "VarDecRound", + 204: "VarDecCmp", + 205: "VarI2FromI1", + 206: "VarI2FromUI2", + 207: "VarI2FromUI4", + 208: "VarI2FromDec", + 209: "VarI4FromI1", + 210: "VarI4FromUI2", + 211: "VarI4FromUI4", + 212: "VarI4FromDec", + 213: "VarR4FromI1", + 214: "VarR4FromUI2", + 215: "VarR4FromUI4", + 216: "VarR4FromDec", + 217: "VarR8FromI1", + 218: "VarR8FromUI2", + 219: "VarR8FromUI4", + 220: "VarR8FromDec", + 221: "VarDateFromI1", + 222: "VarDateFromUI2", + 223: "VarDateFromUI4", + 224: "VarDateFromDec", + 225: "VarCyFromI1", + 226: "VarCyFromUI2", + 227: "VarCyFromUI4", + 228: "VarCyFromDec", + 229: "VarBstrFromI1", + 230: "VarBstrFromUI2", + 231: "VarBstrFromUI4", + 232: "VarBstrFromDec", + 233: "VarBoolFromI1", + 234: "VarBoolFromUI2", + 235: "VarBoolFromUI4", + 236: "VarBoolFromDec", + 237: "VarUI1FromI1", + 238: "VarUI1FromUI2", + 239: "VarUI1FromUI4", + 240: "VarUI1FromDec", + 241: "VarDecFromI1", + 242: "VarDecFromUI2", + 243: "VarDecFromUI4", + 244: "VarI1FromUI1", + 245: "VarI1FromI2", + 246: "VarI1FromI4", + 247: "VarI1FromR4", + 248: "VarI1FromR8", + 249: "VarI1FromDate", + 250: "VarI1FromCy", + 251: "VarI1FromStr", + 252: "VarI1FromDisp", + 253: "VarI1FromBool", + 254: "VarI1FromUI2", + 255: "VarI1FromUI4", + 256: "VarI1FromDec", + 257: "VarUI2FromUI1", + 258: "VarUI2FromI2", + 259: "VarUI2FromI4", + 260: "VarUI2FromR4", + 261: "VarUI2FromR8", + 262: "VarUI2FromDate", + 263: "VarUI2FromCy", + 264: "VarUI2FromStr", + 265: "VarUI2FromDisp", + 266: "VarUI2FromBool", + 267: "VarUI2FromI1", + 268: "VarUI2FromUI4", + 269: "VarUI2FromDec", + 270: "VarUI4FromUI1", + 271: "VarUI4FromI2", + 272: "VarUI4FromI4", + 273: "VarUI4FromR4", + 274: "VarUI4FromR8", + 275: "VarUI4FromDate", + 276: "VarUI4FromCy", + 277: "VarUI4FromStr", + 278: "VarUI4FromDisp", + 279: "VarUI4FromBool", + 280: "VarUI4FromI1", + 281: "VarUI4FromUI2", + 282: "VarUI4FromDec", + 283: "BSTR_UserSize", + 284: "BSTR_UserMarshal", + 285: "BSTR_UserUnmarshal", + 286: "BSTR_UserFree", + 287: "VARIANT_UserSize", + 288: "VARIANT_UserMarshal", + 289: "VARIANT_UserUnmarshal", + 290: "VARIANT_UserFree", + 291: "LPSAFEARRAY_UserSize", + 292: "LPSAFEARRAY_UserMarshal", + 293: "LPSAFEARRAY_UserUnmarshal", + 294: "LPSAFEARRAY_UserFree", + 295: "LPSAFEARRAY_Size", + 296: "LPSAFEARRAY_Marshal", + 297: "LPSAFEARRAY_Unmarshal", + 298: "VarDecCmpR8", + 299: "VarCyAdd", + 300: "DllUnregisterServer", + 301: "OACreateTypeLib2", + 303: "VarCyMul", + 304: "VarCyMulI4", + 305: "VarCySub", + 306: "VarCyAbs", + 307: "VarCyFix", + 308: "VarCyInt", + 309: "VarCyNeg", + 310: "VarCyRound", + 311: "VarCyCmp", + 312: "VarCyCmpR8", + 313: "VarBstrCat", + 314: "VarBstrCmp", + 315: "VarR8Pow", + 316: "VarR4CmpR8", + 317: "VarR8Round", + 318: "VarCat", + 319: "VarDateFromUdateEx", + 322: "GetRecordInfoFromGuids", + 323: "GetRecordInfoFromTypeInfo", + 325: "SetVarConversionLocaleSetting", + 326: "GetVarConversionLocaleSetting", + 327: "SetOaNoCache", + 329: "VarCyMulI8", + 330: "VarDateFromUdate", + 331: "VarUdateFromDate", + 332: "GetAltMonthNames", + 333: "VarI8FromUI1", + 334: "VarI8FromI2", + 335: "VarI8FromR4", + 336: "VarI8FromR8", + 337: "VarI8FromCy", + 338: "VarI8FromDate", + 339: "VarI8FromStr", + 340: "VarI8FromDisp", + 341: "VarI8FromBool", + 342: "VarI8FromI1", + 343: "VarI8FromUI2", + 344: "VarI8FromUI4", + 345: "VarI8FromDec", + 346: "VarI2FromI8", + 347: "VarI2FromUI8", + 348: "VarI4FromI8", + 349: "VarI4FromUI8", + 360: "VarR4FromI8", + 361: "VarR4FromUI8", + 362: "VarR8FromI8", + 363: "VarR8FromUI8", + 364: "VarDateFromI8", + 365: "VarDateFromUI8", + 366: "VarCyFromI8", + 367: "VarCyFromUI8", + 368: "VarBstrFromI8", + 369: "VarBstrFromUI8", + 370: "VarBoolFromI8", + 371: "VarBoolFromUI8", + 372: "VarUI1FromI8", + 373: "VarUI1FromUI8", + 374: "VarDecFromI8", + 375: "VarDecFromUI8", + 376: "VarI1FromI8", + 377: "VarI1FromUI8", + 378: "VarUI2FromI8", + 379: "VarUI2FromUI8", + 401: "OleLoadPictureEx", + 402: "OleLoadPictureFileEx", + 411: "SafeArrayCreateVector", + 412: "SafeArrayCopyData", + 413: "VectorFromBstr", + 414: "BstrFromVector", + 415: "OleIconToCursor", + 416: "OleCreatePropertyFrameIndirect", + 417: "OleCreatePropertyFrame", + 418: "OleLoadPicture", + 419: "OleCreatePictureIndirect", + 420: "OleCreateFontIndirect", + 421: "OleTranslateColor", + 422: "OleLoadPictureFile", + 423: "OleSavePictureFile", + 424: "OleLoadPicturePath", + 425: "VarUI4FromI8", + 426: "VarUI4FromUI8", + 427: "VarI8FromUI8", + 428: "VarUI8FromI8", + 429: "VarUI8FromUI1", + 430: "VarUI8FromI2", + 431: "VarUI8FromR4", + 432: "VarUI8FromR8", + 433: "VarUI8FromCy", + 434: "VarUI8FromDate", + 435: "VarUI8FromStr", + 436: "VarUI8FromDisp", + 437: "VarUI8FromBool", + 438: "VarUI8FromI1", + 439: "VarUI8FromUI2", + 440: "VarUI8FromUI4", + 441: "VarUI8FromDec", + 442: "RegisterTypeLibForUser", + 443: "UnRegisterTypeLibForUser", +} + +// OrdNames maps the dll names to ordinal names. +var OrdNames = map[string]map[uint64]string{ + "ws2_32.dll": WS232OrdNames, + "wsock32.dll": WS232OrdNames, + "oleaut32.dll": OleAut32OrdNames, +} + +// OrdLookup returns API name given an ordinal. +func OrdLookup(libname string, ord uint64, makeName bool) string { + names, ok := OrdNames[strings.ToLower(libname)] + if ok { + if name, ok := names[ord]; ok { + return name + } + } + if makeName { + return fmt.Sprintf("ord%d", ord) + } + return "" +} diff --git a/vendor/github.com/saferwall/pe/overlay.go b/vendor/github.com/saferwall/pe/overlay.go new file mode 100644 index 00000000..22bdd171 --- /dev/null +++ b/vendor/github.com/saferwall/pe/overlay.go @@ -0,0 +1,44 @@ +// Copyright 2022 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +import ( + "errors" + "io" +) + +// error +var ( + ErrNoOverlayFound = errors.New("pe does not have overlay data") +) + +// NewOverlayReader returns a new ReadSeeker reading the PE overlay data. +func (pe *File) NewOverlayReader() (*io.SectionReader, error) { + if pe.data == nil { + return nil, errors.New("pe: file reader is nil") + } + return io.NewSectionReader(pe.f, pe.OverlayOffset, 1<<63-1), nil +} + +// Overlay returns the overlay of the PE file. +func (pe *File) Overlay() ([]byte, error) { + sr, err := pe.NewOverlayReader() + if err != nil { + return nil, err + } + + overlay := make([]byte, int64(pe.size)-pe.OverlayOffset) + n, err := sr.ReadAt(overlay, 0) + if n == len(overlay) { + pe.HasOverlay = true + err = nil + } + + return overlay, err +} + +func (pe *File) OverlayLength() int64 { + return int64(pe.size) - pe.OverlayOffset +} diff --git a/vendor/github.com/saferwall/pe/pe.go b/vendor/github.com/saferwall/pe/pe.go new file mode 100644 index 00000000..7d9c1efd --- /dev/null +++ b/vendor/github.com/saferwall/pe/pe.go @@ -0,0 +1,229 @@ +// Copyright 2018 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +// Image executable types +const ( + + // The DOS MZ executable format is the executable file format used + // for .EXE files in DOS. + ImageDOSSignature = 0x5A4D // MZ + ImageDOSZMSignature = 0x4D5A // ZM + + // The New Executable (abbreviated NE or NewEXE) is a 16-bit .exe file + // format, a successor to the DOS MZ executable format. It was used in + // Windows 1.0–3.x, multitasking MS-DOS 4.0, OS/2 1.x, and the OS/2 subset + // of Windows NT up to version 5.0 (Windows 2000). A NE is also called a + // segmented executable. + ImageOS2Signature = 0x454E + + // Linear Executable is an executable file format in the EXE family. + // It was used by 32-bit OS/2, by some DOS extenders, and by Microsoft + // Windows VxD files. It is an extension of MS-DOS EXE, and a successor + // to NE (New Executable). + ImageOS2LESignature = 0x454C + + // There are two main varieties of LE executables: + // LX (32-bit), and LE (mixed 16/32-bit). + ImageVXDSignature = 0x584C + + // Terse Executables have a 'VZ' signature. + ImageTESignature = 0x5A56 + + // The Portable Executable (PE) format is a file format for executables, + // object code, DLLs and others used in 32-bit and 64-bit versions of + // Windows operating systems. + ImageNTSignature = 0x00004550 // PE00 +) + +// Optional Header magic +const ( + ImageNtOptionalHeader32Magic = 0x10b + ImageNtOptionalHeader64Magic = 0x20b + ImageROMOptionalHeaderMagic = 0x10 +) + +// Image file machine types +const ( + ImageFileMachineUnknown = ImageFileHeaderMachineType(0x0) // The contents of this field are assumed to be applicable to any machine type + ImageFileMachineAM33 = ImageFileHeaderMachineType(0x1d3) // Matsushita AM33 + ImageFileMachineAMD64 = ImageFileHeaderMachineType(0x8664) // x64 + ImageFileMachineARM = ImageFileHeaderMachineType(0x1c0) // ARM little endian + ImageFileMachineARM64 = ImageFileHeaderMachineType(0xaa64) // ARM64 little endian + ImageFileMachineARMNT = ImageFileHeaderMachineType(0x1c4) // ARM Thumb-2 little endian + ImageFileMachineEBC = ImageFileHeaderMachineType(0xebc) // EFI byte code + ImageFileMachineI386 = ImageFileHeaderMachineType(0x14c) // Intel 386 or later processors and compatible processors + ImageFileMachineIA64 = ImageFileHeaderMachineType(0x200) // Intel Itanium processor family + ImageFileMachineM32R = ImageFileHeaderMachineType(0x9041) // Mitsubishi M32R little endian + ImageFileMachineMIPS16 = ImageFileHeaderMachineType(0x266) // MIPS16 + ImageFileMachineMIPSFPU = ImageFileHeaderMachineType(0x366) // MIPS with FPU + ImageFileMachineMIPSFPU16 = ImageFileHeaderMachineType(0x466) // MIPS16 with FPU + ImageFileMachinePowerPC = ImageFileHeaderMachineType(0x1f0) // Power PC little endian + ImageFileMachinePowerPCFP = ImageFileHeaderMachineType(0x1f1) // Power PC with floating point support + ImageFileMachineR4000 = ImageFileHeaderMachineType(0x166) // MIPS little endian + ImageFileMachineRISCV32 = ImageFileHeaderMachineType(0x5032) // RISC-V 32-bit address space + ImageFileMachineRISCV64 = ImageFileHeaderMachineType(0x5064) // RISC-V 64-bit address space + ImageFileMachineRISCV128 = ImageFileHeaderMachineType(0x5128) // RISC-V 128-bit address space + ImageFileMachineSH3 = ImageFileHeaderMachineType(0x1a2) // Hitachi SH3 + ImageFileMachineSH3DSP = ImageFileHeaderMachineType(0x1a3) // Hitachi SH3 DSP + ImageFileMachineSH4 = ImageFileHeaderMachineType(0x1a6) // Hitachi SH4 + ImageFileMachineSH5 = ImageFileHeaderMachineType(0x1a8) // Hitachi SH5 + ImageFileMachineTHUMB = ImageFileHeaderMachineType(0x1c2) // Thumb + ImageFileMachineWCEMIPSv2 = ImageFileHeaderMachineType(0x169) // MIPS little-endian WCE v2 +) + +// The Characteristics field contains flags that indicate attributes of the object or image file. +const ( + // Image file only. This flag indicates that the file contains no base + // relocations and must be loaded at its preferred base address. In the + // case of base address conflict, the OS loader reports an error. This flag + // should not be set for managed PE files. + ImageFileRelocsStripped = 0x0001 + + // Flag indicates that the file is an image file (EXE or DLL). This flag + // should be set for managed PE files. If it is not set, this generally + // indicates a linker error (i.e. no unresolved external references). + ImageFileExecutableImage = 0x0002 + + // COFF line numbers have been removed. This flag should be set for managed + // PE files because they do not use the debug information embedded in the + // PE file itself. Instead, the debug information is saved in accompanying + // program database (PDB) files. + ImageFileLineNumsStripped = 0x0004 + + // COFF symbol table entries for local symbols have been removed. This flag + // should be set for managed PE files, for the reason given in the preceding + // entry. + ImageFileLocalSymsStripped = 0x0008 + + // Aggressively trim the working set. + ImageFileAggressiveWSTrim = 0x0010 + + // Application can handle addresses beyond the 2GB range. This flag should + // not be set for pure-IL managed PE files of versions 1.0 and 1.1 but can + // be set for v2.0+ files. + ImageFileLargeAddressAware = 0x0020 + + // Little endian. + ImageFileBytesReservedLow = 0x0080 + + // Machine is based on 32-bit architecture. This flag is usually set by + // the current versions of code generators producing managed PE files. + // Version 2.0 and newer, however, can produce 64-bit specific images, + // which don’t have this flag set. + ImageFile32BitMachine = 0x0100 + + // Debug information has been removed from the image file. + ImageFileDebugStripped = 0x0200 + + // If the image file is on removable media, copy and run it from the swap + // file. + ImageFileRemovableRunFromSwap = 0x0400 + + // If the image file is on a network, copy and run it from the swap file. + ImageFileNetRunFromSwap = 0x0800 + + // The image file is a system file (for example, a device driver). This flag + ImageFileSystem = 0x1000 + + // The image file is a DLL rather than an EXE. It cannot be directly run. + ImageFileDLL = 0x2000 + + // The image file should be run on a uniprocessor machine only. + ImageFileUpSystemOnly = 0x4000 + + // Big endian. + ImageFileBytesReservedHigh = 0x8000 +) + +// Subsystem values of an OptionalHeader. +const ( + ImageSubsystemUnknown = 0 // An unknown subsystem. + ImageSubsystemNative = 1 // Device drivers and native Windows processes + ImageSubsystemWindowsGUI = 2 // The Windows graphical user interface (GUI) subsystem. + ImageSubsystemWindowsCUI = 3 // The Windows character subsystem + ImageSubsystemOS2CUI = 5 // The OS/2 character subsystem. + ImageSubsystemPosixCUI = 7 // The Posix character subsystem. + ImageSubsystemNativeWindows = 8 // Native Win9x driver + ImageSubsystemWindowsCEGUI = 9 // Windows CE + ImageSubsystemEFIApplication = 10 // An Extensible Firmware Interface (EFI) application + ImageSubsystemEFIBootServiceDriver = 11 // An EFI driver with boot services + ImageSubsystemEFIRuntimeDriver = 12 // An EFI driver with run-time services + ImageSubsystemEFIRom = 13 // An EFI ROM image . + ImageSubsystemXBOX = 14 // XBOX. + ImageSubsystemWindowsBootApplication = 16 // Windows boot application. +) + +// DllCharacteristics values of an OptionalHeader +const ( + ImageDllCharacteristicsReserved1 = 0x0001 // Reserved, must be zero. + ImageDllCharacteristicsReserved2 = 0x0002 // Reserved, must be zero. + ImageDllCharacteristicsReserved4 = 0x0004 // Reserved, must be zero. + ImageDllCharacteristicsReserved8 = 0x0008 // Reserved, must be zero. + ImageDllCharacteristicsHighEntropyVA = 0x0020 // Image can handle a high entropy 64-bit virtual address space + ImageDllCharacteristicsDynamicBase = 0x0040 // DLL can be relocated at load time. + ImageDllCharacteristicsForceIntegrity = 0x0080 // Code Integrity checks are enforced. + ImageDllCharacteristicsNXCompact = 0x0100 // Image is NX compatible. + ImageDllCharacteristicsNoIsolation = 0x0200 // Isolation aware, but do not isolate the image. + ImageDllCharacteristicsNoSEH = 0x0400 // Does not use structured exception (SE) handling. No SE handler may be called in this image. + ImageDllCharacteristicsNoBind = 0x0800 // Do not bind the image. + ImageDllCharacteristicsAppContainer = 0x1000 // Image must execute in an AppContainer + ImageDllCharacteristicsWdmDriver = 0x2000 // A WDM driver. + ImageDllCharacteristicsGuardCF = 0x4000 // Image supports Control Flow Guard. + ImageDllCharacteristicsTerminalServiceAware = 0x8000 // Terminal Server aware. + +) + +// ImageDirectoryEntry represents an entry inside the data directories. +type ImageDirectoryEntry int + +// DataDirectory entries of an OptionalHeader +const ( + ImageDirectoryEntryExport ImageDirectoryEntry = iota // Export Table + ImageDirectoryEntryImport // Import Table + ImageDirectoryEntryResource // Resource Table + ImageDirectoryEntryException // Exception Table + ImageDirectoryEntryCertificate // Certificate Directory + ImageDirectoryEntryBaseReloc // Base Relocation Table + ImageDirectoryEntryDebug // Debug + ImageDirectoryEntryArchitecture // Architecture Specific Data + ImageDirectoryEntryGlobalPtr // The RVA of the value to be stored in the global pointer register. + ImageDirectoryEntryTLS // The thread local storage (TLS) table + ImageDirectoryEntryLoadConfig // The load configuration table + ImageDirectoryEntryBoundImport // The bound import table + ImageDirectoryEntryIAT // Import Address Table + ImageDirectoryEntryDelayImport // Delay Import Descriptor + ImageDirectoryEntryCLR // CLR Runtime Header + ImageDirectoryEntryReserved // Must be zero + ImageNumberOfDirectoryEntries // Tables count. +) + +// FileInfo represents the PE file information struct. +type FileInfo struct { + Is32 bool + Is64 bool + HasDOSHdr bool + HasRichHdr bool + HasCOFF bool + HasNTHdr bool + HasSections bool + HasExport bool + HasImport bool + HasResource bool + HasException bool + HasCertificate bool + HasReloc bool + HasDebug bool + HasArchitect bool + HasGlobalPtr bool + HasTLS bool + HasLoadCFG bool + HasBoundImp bool + HasIAT bool + HasDelayImp bool + HasCLR bool + HasOverlay bool + IsSigned bool +} diff --git a/vendor/github.com/saferwall/pe/reloc.go b/vendor/github.com/saferwall/pe/reloc.go new file mode 100644 index 00000000..3a242c4d --- /dev/null +++ b/vendor/github.com/saferwall/pe/reloc.go @@ -0,0 +1,257 @@ +// Copyright 2018 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +import ( + "encoding/binary" + "errors" +) + +var ( + // ErrInvalidBaseRelocVA is reposed when base reloc lies outside of the image. + ErrInvalidBaseRelocVA = errors.New("invalid relocation information." + + " Base Relocation VirtualAddress is outside of PE Image") + + // ErrInvalidBasicRelocSizeOfBloc is reposed when base reloc is too large. + ErrInvalidBasicRelocSizeOfBloc = errors.New("invalid relocation " + + "information. Base Relocation SizeOfBlock too large") +) + +// ImageBaseRelocationEntryType represents the type of an in image base relocation entry. +type ImageBaseRelocationEntryType uint8 + +// The Type field of the relocation record indicates what kind of relocation +// should be performed. Different relocation types are defined for each type +// of machine. +const ( + // The base relocation is skipped. This type can be used to pad a block. + ImageRelBasedAbsolute = 0 + + // The base relocation adds the high 16 bits of the difference to the 16-bit + // field at offset. The 16-bit field represents the high value of a 32-bit word. + ImageRelBasedHigh = 1 + + // The base relocation adds the low 16 bits of the difference to the 16-bit + // field at offset. The 16-bit field represents the low half of a 32-bit word. + ImageRelBasedLow = 2 + + // The base relocation applies all 32 bits of the difference to the 32-bit + // field at offset. + ImageRelBasedHighLow = 3 + + // The base relocation adds the high 16 bits of the difference to the 16-bit + // field at offset. The 16-bit field represents the high value of a 32-bit + // word. The low 16 bits of the 32-bit value are stored in the 16-bit word + // that follows this base relocation. This means that this base relocation + // occupies two slots. + ImageRelBasedHighAdj = 4 + + // The relocation interpretation is dependent on the machine type. + // When the machine type is MIPS, the base relocation applies to a MIPS jump + // instruction. + ImageRelBasedMIPSJmpAddr = 5 + + // This relocation is meaningful only when the machine type is ARM or Thumb. + // The base relocation applies the 32-bit address of a symbol across a + // consecutive MOVW/MOVT instruction pair. + ImageRelBasedARMMov32 = 5 + + // This relocation is only meaningful when the machine type is RISC-V. The + // base relocation applies to the high 20 bits of a 32-bit absolute address. + ImageRelBasedRISCVHigh20 = 5 + + // Reserved, must be zero. + ImageRelReserved = 6 + + // This relocation is meaningful only when the machine type is Thumb. + // The base relocation applies the 32-bit address of a symbol to a + // consecutive MOVW/MOVT instruction pair. + ImageRelBasedThumbMov32 = 7 + + // This relocation is only meaningful when the machine type is RISC-V. + // The base relocation applies to the low 12 bits of a 32-bit absolute + // address formed in RISC-V I-type instruction format. + ImageRelBasedRISCVLow12i = 7 + + // This relocation is only meaningful when the machine type is RISC-V. + // The base relocation applies to the low 12 bits of a 32-bit absolute + // address formed in RISC-V S-type instruction format. + ImageRelBasedRISCVLow12s = 8 + + // The relocation is only meaningful when the machine type is MIPS. + // The base relocation applies to a MIPS16 jump instruction. + ImageRelBasedMIPSJmpAddr16 = 9 + + // The base relocation applies the difference to the 64-bit field at offset. + ImageRelBasedDir64 = 10 +) + +const ( + // MaxDefaultRelocEntriesCount represents the default maximum number of + // relocations entries to parse. Some malware uses a fake huge reloc entries that + // can slow significantly the parser. + // Example: 01008963d32f5cc17b64c31446386ee5b36a7eab6761df87a2989ba9394d8f3d + MaxDefaultRelocEntriesCount = 0x1000 +) + +// ImageBaseRelocation represents the IMAGE_BASE_RELOCATION structure. +// Each chunk of base relocation data begins with an IMAGE_BASE_RELOCATION structure. +type ImageBaseRelocation struct { + // The image base plus the page RVA is added to each offset to create the + // VA where the base relocation must be applied. + VirtualAddress uint32 + + // The total number of bytes in the base relocation block, including the + // Page RVA and Block Size fields and the Type/Offset fields that follow. + SizeOfBlock uint32 +} + +// ImageBaseRelocationEntry represents an image base relocation entry. +type ImageBaseRelocationEntry struct { + // Locate data that must be reallocated in buffer (data being an address + // we use pointer of pointer). + Data uint16 + + // The offset of the relocation. This value plus the VirtualAddress + // in IMAGE_BASE_RELOCATION is the complete RVA. + Offset uint16 + + // A value that indicates the kind of relocation that should be performed. + // Valid relocation types depend on machine type. + Type ImageBaseRelocationEntryType +} + +// Relocation represents the relocation table which holds the data that needs to +// be relocated. +type Relocation struct { + // Points to the ImageBaseRelocation structure. + Data ImageBaseRelocation + + // holds the list of entries for each chunk. + Entries []ImageBaseRelocationEntry +} + +func (pe *File) parseRelocations(dataRVA, rva, size uint32) ([]ImageBaseRelocationEntry, error) { + var relocEntries []ImageBaseRelocationEntry + + relocEntriesCount := size / 2 + if relocEntriesCount > pe.opts.MaxRelocEntriesCount { + pe.Anomalies = append(pe.Anomalies, AnoAddressOfDataBeyondLimits) + } + + offset := pe.GetOffsetFromRva(dataRVA) + var err error + for i := uint32(0); i < relocEntriesCount; i++ { + entry := ImageBaseRelocationEntry{} + entry.Data, err = pe.ReadUint16(offset + (i * 2)) + if err != nil { + break + } + entry.Type = ImageBaseRelocationEntryType(entry.Data >> 12) + entry.Offset = entry.Data & 0x0fff + relocEntries = append(relocEntries, entry) + } + + return relocEntries, nil +} + +func (pe *File) parseRelocDirectory(rva, size uint32) error { + var sizeOfImage uint32 + switch pe.Is64 { + case true: + sizeOfImage = pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).SizeOfImage + case false: + sizeOfImage = pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).SizeOfImage + } + + relocSize := uint32(binary.Size(ImageBaseRelocation{})) + end := rva + size + for rva < end { + baseReloc := ImageBaseRelocation{} + offset := pe.GetOffsetFromRva(rva) + err := pe.structUnpack(&baseReloc, offset, relocSize) + if err != nil { + return err + } + + // VirtualAddress must lie within the Image. + if baseReloc.VirtualAddress > sizeOfImage { + return ErrInvalidBaseRelocVA + } + + // SizeOfBlock must be less or equal than the size of the image. + // It's a rather loose sanity test. + if baseReloc.SizeOfBlock > sizeOfImage { + return ErrInvalidBasicRelocSizeOfBloc + } + + relocEntries, err := pe.parseRelocations(rva+relocSize, + baseReloc.VirtualAddress, baseReloc.SizeOfBlock-relocSize) + if err != nil { + return err + } + + pe.Relocations = append(pe.Relocations, Relocation{ + Data: baseReloc, + Entries: relocEntries, + }) + + if baseReloc.SizeOfBlock == 0 { + break + } + rva += baseReloc.SizeOfBlock + } + + if len(pe.Relocations) > 0 { + pe.HasReloc = true + } + + return nil +} + +// String returns the string representation of the `Type` field of a base reloc entry. +func (t ImageBaseRelocationEntryType) String(pe *File) string { + relocTypesMap := map[ImageBaseRelocationEntryType]string{ + ImageRelBasedAbsolute: "Absolute", + ImageRelBasedHigh: "High", + ImageRelBasedLow: "Low", + ImageRelBasedHighLow: "HighLow", + ImageRelBasedHighAdj: "HighAdj", + ImageRelReserved: "Reserved", + ImageRelBasedRISCVLow12s: "RISC-V Low12s", + ImageRelBasedMIPSJmpAddr16: "MIPS Jmp Addr16", + ImageRelBasedDir64: "DIR64", + } + + if value, ok := relocTypesMap[t]; ok { + return value + } + + switch pe.NtHeader.FileHeader.Machine { + case ImageFileMachineMIPS16, ImageFileMachineMIPSFPU, ImageFileMachineMIPSFPU16, ImageFileMachineWCEMIPSv2: + if t == ImageRelBasedMIPSJmpAddr { + return "MIPS JMP Addr" + } + + case ImageFileMachineARM, ImageFileMachineARM64, ImageFileMachineARMNT: + if t == ImageRelBasedARMMov32 { + return "ARM MOV 32" + } + + if t == ImageRelBasedThumbMov32 { + return "Thumb MOV 32" + } + case ImageFileMachineRISCV32, ImageFileMachineRISCV64, ImageFileMachineRISCV128: + if t == ImageRelBasedRISCVHigh20 { + return "RISC-V High 20" + } + + if t == ImageRelBasedRISCVLow12i { + return "RISC-V Low 12" + } + } + + return "?" +} diff --git a/vendor/github.com/saferwall/pe/resource.go b/vendor/github.com/saferwall/pe/resource.go new file mode 100644 index 00000000..e28a60c7 --- /dev/null +++ b/vendor/github.com/saferwall/pe/resource.go @@ -0,0 +1,2233 @@ +// Copyright 2018 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +import ( + "encoding/binary" +) + +// ResourceType represents a resource type. +type ResourceType int + +// ResourceLang represents a resource language. +type ResourceLang uint32 + +// ResourceSubLang represents a resource sub language. +type ResourceSubLang uint32 + +// https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-lcid/70feba9f-294e-491e-b6eb-56532684c37f + +// Special resource (sub)language identifiers. +const ( + LangNeutral ResourceLang = 0x00 // Default custom (MUI) locale language + LangUserDefault ResourceLang = 0x01 // User default locale language + LangSystemDefault ResourceLang = 0x02 // System default locale language + LangInvariant ResourceLang = 0x7F // Invariant locale language + + SubLangNeutral ResourceSubLang = 0x00 // Neutral sub-language + SubLangInvariant ResourceSubLang = 0x00 // Invariant sub-language + SubLangDefault ResourceSubLang = 0x01 // User default sub-language + SubLangSysDefault ResourceSubLang = 0x02 // System default sub-language + SubLangCustomDefault ResourceSubLang = 0x03 // Default custom sub-language + SubLangCustomUnspecified ResourceSubLang = 0x04 // Unspecified custom sub-language + SubLangMUICustomDefault ResourceSubLang = 0x05 // Default custom MUI sub-language +) + +// All resource language identifiers. +const ( + // Afrikaans (af) + LangAfrikaans ResourceLang = 0x0036 + // Albanian (sq) + LangAlbanian ResourceLang = 0x001C + // Alsatian (gsw) + LangAlsatian ResourceLang = 0x0084 + // Amharic (am) + LangAmharic ResourceLang = 0x005E + // Arabic (ar) + LangArabic ResourceLang = 0x0001 + // Armenian (hy) + LangArmenian ResourceLang = 0x002B + // Assamese (as) + LangAssamese ResourceLang = 0x004D + // Azerbaijani (Latin) (az) + LangAzerbaijaniLatin ResourceLang = 0x002C + // Bangla (bn) + LangBangla ResourceLang = 0x0045 + // Bashkir (ba) + LangBashkir ResourceLang = 0x006D + // Basque (eu) + LangBasque ResourceLang = 0x002D + // Belarusian (be) + LangBelarusian ResourceLang = 0x0023 + // Bosnian (Latin) (bs) + LangBosnianLatin ResourceLang = 0x781A + // Breton (br) + LangBreton ResourceLang = 0x007E + // Bulgarian (bg) + LangBulgarian ResourceLang = 0x0002 + // Burmese (my) + LangBurmese ResourceLang = 0x0055 + // Catalan (ca) + LangCatalan ResourceLang = 0x0003 + // Central Kurdish (ku) + LangCentralKurdish ResourceLang = 0x0092 + // Cherokee (chr) + LangCherokee ResourceLang = 0x005C + // Chinese (Simplified) (zh) + LangChineseSimplified ResourceLang = 0x7804 + // Corsican (co) + LangCorsican ResourceLang = 0x0083 + // Croatian (hr) + LangCroatian ResourceLang = 0x001A + // Czech (cs) + LangCzech ResourceLang = 0x0005 + // Danish (da) + LangDanish ResourceLang = 0x0006 + // Dari (prs) + LangDari ResourceLang = 0x008C + // Divehi (dv) + LangDivehi ResourceLang = 0x0065 + // Dutch (nl) + LangDutch ResourceLang = 0x0013 + // English (en) + LangEnglish ResourceLang = 0x0009 + // Estonian (et) + LangEstonian ResourceLang = 0x0025 + // Faroese (fo) + LangFaroese ResourceLang = 0x0038 + // Filipino (fil) + LangFilipino ResourceLang = 0x0064 + // Finnish (fi) + LangFinnish ResourceLang = 0x000B + // French (fr) + LangFrench ResourceLang = 0x000C + // Frisian (fy) + LangFrisian ResourceLang = 0x0062 + // Fulah (ff) + LangFulah ResourceLang = 0x0067 + // Fulah (Latin) (ff-Latn) + LangFulahLatin ResourceLang = 0x7C67 + // Galician (gl) + LangGalician ResourceLang = 0x0056 + // Georgian (ka) + LangGeorgian ResourceLang = 0x0037 + // German (de) + LangGerman ResourceLang = 0x0007 + // Greek (el) + LangGreek ResourceLang = 0x0008 + // Greenlandic (kl) + LangGreenlandic ResourceLang = 0x006F + // Guarani (gn) + LangGuarani ResourceLang = 0x0074 + // Gujarati (gu) + LangGujarati ResourceLang = 0x0047 + // Hausa (Latin) (ha) + LangHausaLatin ResourceLang = 0x0068 + // Hawaiian (haw) + LangHawaiian ResourceLang = 0x0075 + // Hebrew (he) + LangHebrew ResourceLang = 0x000D + // Hindi (hi) + LangHindi ResourceLang = 0x0039 + // Hungarian (hu) + LangHungarian ResourceLang = 0x000E + // Icelandic (is) + LangIcelandic ResourceLang = 0x000F + // Igbo (ig) + LangIgbo ResourceLang = 0x0070 + // Indonesian (id) + LangIndonesian ResourceLang = 0x0021 + // Inuktitut (Latin) (iu) + LangInuktitutLatin ResourceLang = 0x005D + // Irish (ga) + LangIrish ResourceLang = 0x003C + // Italian (it) + LangItalian ResourceLang = 0x0010 + // Japanese (ja) + LangJapanese ResourceLang = 0x0011 + // Kannada (kn) + LangKannada ResourceLang = 0x004B + // Kashmiri (ks) + LangKashmiri ResourceLang = 0x0060 + // Kazakh (kk) + LangKazakh ResourceLang = 0x003F + // Khmer (km) + LangKhmer ResourceLang = 0x0053 + // K'iche (quc) + LangKiche ResourceLang = 0x0086 + // Kinyarwanda (rw) + LangKinyarwanda ResourceLang = 0x0087 + // Kiswahili (sw) + LangKiswahili ResourceLang = 0x0041 + // Konkani (kok) + LangKonkani ResourceLang = 0x0057 + // Korean (ko) + LangKorean ResourceLang = 0x0012 + // Kyrgyz (ky) + LangKyrgyz ResourceLang = 0x0040 + // Lao (lo) + LangLao ResourceLang = 0x0054 + // Latvian (lv) + LangLatvian ResourceLang = 0x0026 + // Lithuanian (lt) + LangLithuanian ResourceLang = 0x0027 + // Lower Sorbian (dsb) + LangLowerSorbian ResourceLang = 0x7C2E + // Luxembourgish (lb) + LangLuxembourgish ResourceLang = 0x006E + // Macedonian (mk) + LangMacedonian ResourceLang = 0x002F + // Malay (ms) + LangMalay ResourceLang = 0x003E + // Malayalam (ml) + LangMalayalam ResourceLang = 0x004C + // Maltese (mt) + LangMaltese ResourceLang = 0x003A + // Maori (mi) + LangMaori ResourceLang = 0x0081 + // Mapudungun (arn) + LangMapudungun ResourceLang = 0x007A + // Marathi (mr) + LangMarathi ResourceLang = 0x004E + // Mohawk (moh) + LangMohawk ResourceLang = 0x007C + // Mongolian (Cyrillic) (mn) + LangMongolianCyrillic ResourceLang = 0x0050 + // Nepali (ne) + LangNepali ResourceLang = 0x0061 + // Norwegian (Bokmal) (no) + LangNorwegianBokmalNo ResourceLang = 0x0014 + // Norwegian (Bokmal) (nb) + LangNorwegianBokmal ResourceLang = 0x7C14 + // Norwegian (Nynorsk) (nn) + LangNorwegianNynorsk ResourceLang = 0x7814 + // Occitan (oc) + LangOccitan ResourceLang = 0x0082 + // Odia (or) + LangOdia ResourceLang = 0x0048 + // Oromo (om) + LangOromo ResourceLang = 0x0072 + // Pashto (ps) + LangPashto ResourceLang = 0x0063 + // Persian (fa) + LangPersian ResourceLang = 0x0029 + // Polish (pl) + LangPolish ResourceLang = 0x0015 + // Portuguese (pt) + LangPortuguese ResourceLang = 0x0016 + // Punjabi (pa) + LangPunjabi ResourceLang = 0x0046 + // Quechua (quz) + LangQuechua ResourceLang = 0x006B + // Romanian (ro) + LangRomanian ResourceLang = 0x0018 + // Romansh (rm) + LangRomansh ResourceLang = 0x0017 + // Russian (ru) + LangRussian ResourceLang = 0x0019 + // Sakha (sah) + LangSakha ResourceLang = 0x0085 + // Sami (Inari) (smn) + LangSamiInari ResourceLang = 0x703B + // Sami (Lule) (smj) + LangSamiLule ResourceLang = 0x7C3B + // Sami (Northern) (se) + LangSamiNorthern ResourceLang = 0x003B + // Sami (Skolt) (sms) + LangSamiSkolt ResourceLang = 0x743B + // Sami (Southern) (sma) + LangSamiSouthern ResourceLang = 0x783B + // Sanskrit (sa) + LangSanskrit ResourceLang = 0x004F + // Scottish Gaelic (gd) + LangScottishGaelic ResourceLang = 0x0091 + // Serbian (Latin) (sr) + LangSerbianLatin ResourceLang = 0x7C1A + // Sesotho Sa Leboa (nso) + LangSesothoSaLeboa ResourceLang = 0x006C + // Setswana (tn) + LangSetswana ResourceLang = 0x0032 + // Sindhi (sd) + LangSindhi ResourceLang = 0x0059 + // Sinhala (si) + LangSinhala ResourceLang = 0x005B + // Slovak (sk) + LangSlovak ResourceLang = 0x001B + // Slovenian (sl) + LangSlovenian ResourceLang = 0x0024 + // Somali (so) + LangSomali ResourceLang = 0x0077 + // Sotho (st) + LangSotho ResourceLang = 0x0030 + // Spanish (es) + LangSpanish ResourceLang = 0x000A + // Swedish (sv) + LangSwedish ResourceLang = 0x001D + // Syriac (syr) + LangSyriac ResourceLang = 0x005A + // Tajik (Cyrillic) (tg) + LangTajikCyrillic ResourceLang = 0x0028 + // Tamazight (Latin) (tzm) + LangTamazightLatin ResourceLang = 0x005F + // Tamil (ta) + LangTamil ResourceLang = 0x0049 + // Tatar (tt) + LangTatar ResourceLang = 0x0044 + // Telugu (te) + LangTelugu ResourceLang = 0x004A + // Thai (th) + LangThai ResourceLang = 0x001E + // Tibetan (bo) + LangTibetan ResourceLang = 0x0051 + // Tigrinya (ti) + LangTigrinya ResourceLang = 0x0073 + // Tsonga (ts) + LangTsonga ResourceLang = 0x0031 + // Turkish (tr) + LangTurkish ResourceLang = 0x001F + // Turkmen (tk) + LangTurkmen ResourceLang = 0x0042 + // Ukrainian (uk) + LangUkrainian ResourceLang = 0x0022 + // Upper Sorbian (hsb) + LangUpperSorbian ResourceLang = 0x002E + // Urdu (ur) + LangUrdu ResourceLang = 0x0020 + // Uyghur (ug) + LangUyghur ResourceLang = 0x0080 + // Uzbek (Latin) (uz) + LangUzbekLatin ResourceLang = 0x0043 + // Venda (ve) + LangVenda ResourceLang = 0x0033 + // Vietnamese (vi) + LangVietnamese ResourceLang = 0x002A + // Welsh (cy) + LangWelsh ResourceLang = 0x0052 + // Wolof (wo) + LangWolof ResourceLang = 0x0088 + // Xhosa (xh) + LangXhosa ResourceLang = 0x0034 + // Yi (ii) + LangYi ResourceLang = 0x0078 + // Yoruba (yo) + LangYoruba ResourceLang = 0x006A + // Zulu (zu) + LangZulu ResourceLang = 0x0035 +) + +// All resource sub-language identifiers. +const ( + // Afrikaans South Africa (af-ZA) + SubLangAfrikaansSouthAfrica ResourceSubLang = iota + // Albanian Albania (sq-AL) + SubLangAlbanianAlbania + // Alsatian France (gsw-FR) + SubLangAlsatianFrance + // Amharic Ethiopia (am-ET) + SubLangAmharicEthiopia + // Arabic Algeria (ar-DZ) + SubLangArabicAlgeria + // Arabic Bahrain (ar-BH) + SubLangArabicBahrain + // Arabic Egypt (ar-EG) + SubLangArabicEgypt + // Arabic Iraq (ar-IQ) + SubLangArabicIraq + // Arabic Jordan (ar-JO) + SubLangArabicJordan + // Arabic Kuwait (ar-KW) + SubLangArabicKuwait + // Arabic Lebanon (ar-LB) + SubLangArabicLebanon + // Arabic Libya (ar-LY) + SubLangArabicLibya + // Arabic Morocco (ar-MA) + SubLangArabicMorocco + // Arabic Oman (ar-OM) + SubLangArabicOman + // Arabic Qatar (ar-QA) + SubLangArabicQatar + // Arabic Saudi Arabia (ar-SA) + SubLangArabicSaudiArabia + // Arabic Syria (ar-SY) + SubLangArabicSyria + // Arabic Tunisia (ar-TN) + SubLangArabicTunisia + // Arabic U.a.e. (ar-AE) + SubLangArabicUae + // Arabic Yemen (ar-YE) + SubLangArabicYemen + // Armenian Armenia (hy-AM) + SubLangArmenianArmenia + // Assamese India (as-IN) + SubLangAssameseIndia + // Azerbaijani (Cyrillic) (az-Cyrl) + SubLangAzerbaijaniCyrillic + // Azerbaijani (Cyrillic) Azerbaijan (az-Cyrl-AZ) + SubLangAzerbaijaniCyrillicAzerbaijan + // Azerbaijani (Latin) (az-Latn) + SubLangAzerbaijaniLatin + // Azerbaijani (Latin) Azerbaijan (az-Latn-AZ) + SubLangAzerbaijaniLatinAzerbaijan + // Bangla Bangladesh (bn-BD) + SubLangBanglaBangladesh + // Bangla India (bn-IN) + SubLangBanglaIndia + // Bashkir Russia (ba-RU) + SubLangBashkirRussia + // Basque Spain (eu-ES) + SubLangBasqueSpain + // Belarusian Belarus (be-BY) + SubLangBelarusianBelarus + // Bosnian (Cyrillic) (bs-Cyrl) + SubLangBosnianCyrillic + // Bosnian (Cyrillic) Bosnia And Herzegovina (bs-Cyrl-BA) + SubLangBosnianCyrillicBosniaAndHerzegovina + // Bosnian (Latin) (bs-Latn) + SubLangBosnianLatin + // Bosnian (Latin) Bosnia And Herzegovina (bs-Latn-BA) + SubLangBosnianLatinBosniaAndHerzegovina + // Breton France (br-FR) + SubLangBretonFrance + // Bulgarian Bulgaria (bg-BG) + SubLangBulgarianBulgaria + // Burmese Myanmar (my-MM) + SubLangBurmeseMyanmar + // Catalan Spain (ca-ES) + SubLangCatalanSpain + // Central Atlas Tamazight (Arabic) Morocco (tzm-ArabMA) + SubLangCentralAtlasTamazightArabicMorocco + // Central Kurdish (ku-Arab) + SubLangCentralKurdish + // Central Kurdish Iraq (ku-Arab-IQ) + SubLangCentralKurdishIraq + // Cherokee (chr-Cher) + SubLangCherokee + // Cherokee United States (chr-Cher-US) + SubLangCherokeeUnitedStates + // Chinese (Simplified) (zh-Hans) + SubLangChineseSimplified + // Chinese (Simplified) People's Republic Of China (zh-CN) + SubLangChineseSimplifiedPeoplesRepublicOfChina + // Chinese (Simplified) Singapore (zh-SG) + SubLangChineseSimplifiedSingapore + // Chinese (Traditional) (zh-Hant) + SubLangChineseTraditional + // Chinese (Traditional) Hong Kong S.a.r. (zh-HK) + SubLangChineseTraditionalHongKongSar + // Chinese (Traditional) Macao S.a.r. (zh-MO) + SubLangChineseTraditionalMacaoSar + // Chinese (Traditional) Taiwan (zh-TW) + SubLangChineseTraditionalTaiwan + // Corsican France (co-FR) + SubLangCorsicanFrance + // Croatian Croatia (hr-HR) + SubLangCroatianCroatia + // Croatian (Latin) Bosnia And Herzegovina (hr-BA) + SubLangCroatianLatinBosniaAndHerzegovina + // Czech Czech Republic (cs-CZ) + SubLangCzechCzechRepublic + // Danish Denmark (da-DK) + SubLangDanishDenmark + // Dari Afghanistan (prs-AF) + SubLangDariAfghanistan + // Divehi Maldives (dv-MV) + SubLangDivehiMaldives + // Dutch Belgium (nl-BE) + SubLangDutchBelgium + // Dutch Netherlands (nl-NL) + SubLangDutchNetherlands + // Dzongkha Bhutan (dz-BT) + SubLangDzongkhaBhutan + // English Australia (en-AU) + SubLangEnglishAustralia + // English Belize (en-BZ) + SubLangEnglishBelize + // English Canada (en-CA) + SubLangEnglishCanada + // English Caribbean (en-029) + SubLangEnglishCaribbean + // English Hong Kong (en-HK) + SubLangEnglishHongKong + // English India (en-IN) + SubLangEnglishIndia + // English Ireland (en-IE) + SubLangEnglishIreland + // English Jamaica (en-JM) + SubLangEnglishJamaica + // English Malaysia (en-MY) + SubLangEnglishMalaysia + // English New Zealand (en-NZ) + SubLangEnglishNewZealand + // English Republic Of The Philippines (en-PH) + SubLangEnglishRepublicOfThePhilippines + // English Singapore (en-SG) + SubLangEnglishSingapore + // English South Africa (en-ZA) + SubLangEnglishSouthAfrica + // English Trinidad And Tobago (en-TT) + SubLangEnglishTrinidadAndTobago + // English United Arab Emirates (en-AE) + SubLangEnglishUnitedArabEmirates + // English United Kingdom (en-GB) + SubLangEnglishUnitedKingdom + // English United States (en-US) + SubLangEnglishUnitedStates + // English Zimbabwe (en-ZW) + SubLangEnglishZimbabwe + // Estonian Estonia (et-EE) + SubLangEstonianEstonia + // Faroese Faroe Islands (fo-FO) + SubLangFaroeseFaroeIslands + // Filipino Philippines (fil-PH) + SubLangFilipinoPhilippines + // Finnish Finland (fi-FI) + SubLangFinnishFinland + // French Belgium (fr-BE) + SubLangFrenchBelgium + // French Cameroon (fr-CM) + SubLangFrenchCameroon + // French Canada (fr-CA) + SubLangFrenchCanada + // French Caribbean (fr-029) + SubLangFrenchCaribbean + // French Congo, Drc (fr-CD) + SubLangFrenchCongoDrc + // French Côte D'ivoire (fr-CI) + SubLangFrenchCôteDivoire + // French France (fr-FR) + SubLangFrenchFrance + // French Haiti (fr-HT) + SubLangFrenchHaiti + // French Luxembourg (fr-LU) + SubLangFrenchLuxembourg + // French Mali (fr-ML) + SubLangFrenchMali + // French Morocco (fr-MA) + SubLangFrenchMorocco + // French Principality Of Monaco (fr-MC) + SubLangFrenchPrincipalityOfMonaco + // French Reunion (fr-RE) + SubLangFrenchReunion + // French Senegal (fr-SN) + SubLangFrenchSenegal + // French Switzerland (fr-CH) + SubLangFrenchSwitzerland + // Frisian Netherlands (fy-NL) + SubLangFrisianNetherlands + // Fulah Nigeria (ff-NG) + SubLangFulahNigeria + // Fulah (Latin) Nigeria (ff-Latn-NG) + SubLangFulahLatinNigeria + // Fulah Senegal (ff-Latn-SN) + SubLangFulahSenegal + // Galician Spain (gl-ES) + SubLangGalicianSpain + // Georgian Georgia (ka-GE) + SubLangGeorgianGeorgia + // German Austria (de-AT) + SubLangGermanAustria + // German Germany (de-DE) + SubLangGermanGermany + // German Liechtenstein (de-LI) + SubLangGermanLiechtenstein + // German Luxembourg (de-LU) + SubLangGermanLuxembourg + // German Switzerland (de-CH) + SubLangGermanSwitzerland + // Greek Greece (el-GR) + SubLangGreekGreece + // Greenlandic Greenland (kl-GL) + SubLangGreenlandicGreenland + // Guarani Paraguay (gn-PY) + SubLangGuaraniParaguay + // Gujarati India (gu-IN) + SubLangGujaratiIndia + // Hausa (Latin) (ha-Latn) + SubLangHausaLatin + // Hausa (Latin) Nigeria (ha-Latn-NG) + SubLangHausaLatinNigeria + // Hawaiian United States (haw-US) + SubLangHawaiianUnitedStates + // Hebrew Israel (he-IL) + SubLangHebrewIsrael + // Hindi India (hi-IN) + SubLangHindiIndia + // Hungarian Hungary (hu-HU) + SubLangHungarianHungary + // Icelandic Iceland (is-IS) + SubLangIcelandicIceland + // Igbo Nigeria (ig-NG) + SubLangIgboNigeria + // Indonesian Indonesia (id-ID) + SubLangIndonesianIndonesia + // Inuktitut (Latin) (iu-Latn) + SubLangInuktitutLatin + // Inuktitut (Latin) Canada (iu-Latn-CA) + SubLangInuktitutLatinCanada + // Inuktitut (Syllabics) (iu-Cans) + SubLangInuktitutSyllabics + // Inuktitut (Syllabics) Canada (iu-Cans-CA) + SubLangInuktitutSyllabicsCanada + // Irish Ireland (ga-IE) + SubLangIrishIreland + // Italian Italy (it-IT) + SubLangItalianItaly + // Italian Switzerland (it-CH) + SubLangItalianSwitzerland + // Japanese Japan (ja-JP) + SubLangJapaneseJapan + // Kannada India (kn-IN) + SubLangKannadaIndia + // Kanuri (Latin) Nigeria (kr-Latn-NG) + SubLangKanuriLatinNigeria + // Kashmiri Perso-Arabic (ks-Arab) + SubLangKashmiriPersoArabic + // Kashmiri (Devanagari) India (ks-Deva-IN) + SubLangKashmiriDevanagariIndia + // Kazakh Kazakhstan (kk-KZ) + SubLangKazakhKazakhstan + // Khmer Cambodia (km-KH) + SubLangKhmerCambodia + // K'iche Guatemala (quc-Latn-GT) + SubLangKicheGuatemala + // Kinyarwanda Rwanda (rw-RW) + SubLangKinyarwandaRwanda + // Kiswahili Kenya (sw-KE) + SubLangKiswahiliKenya + // Konkani India (kok-IN) + SubLangKonkaniIndia + // Korean Korea (ko-KR) + SubLangKoreanKorea + // Kyrgyz Kyrgyzstan (ky-KG) + SubLangKyrgyzKyrgyzstan + // Lao Lao P.d.r. (lo-LA) + SubLangLaoLaoPdr + // Latin Vatican City (la-VA) + SubLangLatinVaticanCity + // Latvian Latvia (lv-LV) + SubLangLatvianLatvia + // Lithuanian Lithuania (lt-LT) + SubLangLithuanianLithuania + // Lower Sorbian Germany (dsb-DE) + SubLangLowerSorbianGermany + // Luxembourgish Luxembourg (lb-LU) + SubLangLuxembourgishLuxembourg + // Macedonian North Macedonia (mk-MK) + SubLangMacedonianNorthMacedonia + // Malay Brunei Darussalam (ms-BN) + SubLangMalayBruneiDarussalam + // Malay Malaysia (ms-MY) + SubLangMalayMalaysia + // Malayalam India (ml-IN) + SubLangMalayalamIndia + // Maltese Malta (mt-MT) + SubLangMalteseMalta + // Maori New Zealand (mi-NZ) + SubLangMaoriNewZealand + // Mapudungun Chile (arn-CL) + SubLangMapudungunChile + // Marathi India (mr-IN) + SubLangMarathiIndia + // Mohawk Canada (moh-CA) + SubLangMohawkCanada + // Mongolian (Cyrillic) (mn-Cyrl) + SubLangMongolianCyrillic + // Mongolian (Cyrillic) Mongolia (mn-MN) + SubLangMongolianCyrillicMongolia + // Mongolian (Traditional Mongolian) (mn-Mong) + SubLangMongolianTraditionalMongolian + // Mongolian (Traditional Mongolian) People's Republic Of China (mn-MongCN) + SubLangMongolianTraditionalMongolianPeoplesRepublicOfChina + // Mongolian (Traditional Mongolian) Mongolia (mn-MongMN) + SubLangMongolianTraditionalMongolianMongolia + // Nepali India (ne-IN) + SubLangNepaliIndia + // Nepali Nepal (ne-NP) + SubLangNepaliNepal + // Norwegian (Bokmal) Norway (nb-NO) + SubLangNorwegianBokmalNorway + // Norwegian (Nynorsk) Norway (nn-NO) + SubLangNorwegianNynorskNorway + // Occitan France (oc-FR) + SubLangOccitanFrance + // Odia India (or-IN) + SubLangOdiaIndia + // Oromo Ethiopia (om-ET) + SubLangOromoEthiopia + // Pashto Afghanistan (ps-AF) + SubLangPashtoAfghanistan + // Persian Iran (fa-IR) + SubLangPersianIran + // Polish Poland (pl-PL) + SubLangPolishPoland + // Portuguese Brazil (pt-BR) + SubLangPortugueseBrazil + // Portuguese Portugal (pt-PT) + SubLangPortuguesePortugal + // Pseudo Language Pseudo Locale For East Asian/Complex Script Localization Testing (qps-ploca) + SubLangPseudoLanguagePseudoLocaleForEastAsianComplexScriptLocalizationTesting + // Pseudo Language Pseudo Locale Used For Localization Testing (qps-ploc) + SubLangPseudoLanguagePseudoLocaleUsedForLocalizationTesting + // Pseudo Language Pseudo Locale Used For Localization Testing Of Mirrored Locales (qps-plocm) + SubLangPseudoLanguagePseudoLocaleUsedForLocalizationTestingOfMirroredLocales + // Punjabi (pa-Arab) + SubLangPunjabi + // Punjabi India (pa-IN) + SubLangPunjabiIndia + // Punjabi Islamic Republic Of Pakistan (pa-Arab-PK) + SubLangPunjabiIslamicRepublicOfPakistan + // Quechua Bolivia (quz-BO) + SubLangQuechuaBolivia + // Quechua Ecuador (quz-EC) + SubLangQuechuaEcuador + // Quechua Peru (quz-PE) + SubLangQuechuaPeru + // Romanian Moldova (ro-MD) + SubLangRomanianMoldova + // Romanian Romania (ro-RO) + SubLangRomanianRomania + // Romansh Switzerland (rm-CH) + SubLangRomanshSwitzerland + // Russian Moldova (ru-MD) + SubLangRussianMoldova + // Russian Russia (ru-RU) + SubLangRussianRussia + // Sakha Russia (sah-RU) + SubLangSakhaRussia + // Sami (Inari) Finland (smn-FI) + SubLangSamiInariFinland + // Sami (Lule) Norway (smj-NO) + SubLangSamiLuleNorway + // Sami (Lule) Sweden (smj-SE) + SubLangSamiLuleSweden + // Sami (Northern) Finland (se-FI) + SubLangSamiNorthernFinland + // Sami (Northern) Norway (se-NO) + SubLangSamiNorthernNorway + // Sami (Northern) Sweden (se-SE) + SubLangSamiNorthernSweden + // Sami (Skolt) Finland (sms-FI) + SubLangSamiSkoltFinland + // Sami (Southern) Norway (sma-NO) + SubLangSamiSouthernNorway + // Sami (Southern) Sweden (sma-SE) + SubLangSamiSouthernSweden + // Sanskrit India (sa-IN) + SubLangSanskritIndia + // Scottish Gaelic United Kingdom (gd-GB) + SubLangScottishGaelicUnitedKingdom + // Serbian (Cyrillic) (sr-Cyrl) + SubLangSerbianCyrillic + // Serbian (Cyrillic) Bosnia And Herzegovina (sr-Cyrl-BA) + SubLangSerbianCyrillicBosniaAndHerzegovina + // Serbian (Cyrillic) Montenegro (sr-Cyrl-ME) + SubLangSerbianCyrillicMontenegro + // Serbian (Cyrillic) Serbia (sr-Cyrl-RS) + SubLangSerbianCyrillicSerbia + // Serbian (Cyrillic) Serbia And Montenegro (Former) (sr-Cyrl-CS) + SubLangSerbianCyrillicSerbiaAndMontenegroFormer + // Serbian (Latin) (sr-Latn) + SubLangSerbianLatin + // Serbian (Latin) Bosnia And Herzegovina (sr-Latn-BA) + SubLangSerbianLatinBosniaAndHerzegovina + // Serbian (Latin) Montenegro (sr-Latn-ME) + SubLangSerbianLatinMontenegro + // Serbian (Latin) Serbia (sr-Latn-RS) + SubLangSerbianLatinSerbia + // Serbian (Latin) Serbia And Montenegro (Former) (sr-Latn-CS) + SubLangSerbianLatinSerbiaAndMontenegroFormer + // Sesotho Sa Leboa South Africa (nso-ZA) + SubLangSesothoSaLeboaSouthAfrica + // Setswana Botswana (tn-BW) + SubLangSetswanaBotswana + // Setswana South Africa (tn-ZA) + SubLangSetswanaSouthAfrica + // Sindhi (sd-Arab) + SubLangSindhi + // Sindhi Islamic Republic Of Pakistan (sd-Arab-PK) + SubLangSindhiIslamicRepublicOfPakistan + // Sinhala Sri Lanka (si-LK) + SubLangSinhalaSriLanka + // Slovak Slovakia (sk-SK) + SubLangSlovakSlovakia + // Slovenian Slovenia (sl-SI) + SubLangSlovenianSlovenia + // Somali Somalia (so-SO) + SubLangSomaliSomalia + // Sotho South Africa (st-ZA) + SubLangSothoSouthAfrica + // Spanish Argentina (es-AR) + SubLangSpanishArgentina + // Spanish Bolivarian Republic Of Venezuela (es-VE) + SubLangSpanishBolivarianRepublicOfVenezuela + // Spanish Bolivia (es-BO) + SubLangSpanishBolivia + // Spanish Chile (es-CL) + SubLangSpanishChile + // Spanish Colombia (es-CO) + SubLangSpanishColombia + // Spanish Costa Rica (es-CR) + SubLangSpanishCostaRica + // Spanish Cuba (es-CU) + SubLangSpanishCuba + // Spanish Dominican Republic (es-DO) + SubLangSpanishDominicanRepublic + // Spanish Ecuador (es-EC) + SubLangSpanishEcuador + // Spanish El Salvador (es-SV) + SubLangSpanishElSalvador + // Spanish Guatemala (es-GT) + SubLangSpanishGuatemala + // Spanish Honduras (es-HN) + SubLangSpanishHonduras + // Spanish Latin America (es-419) + SubLangSpanishLatinAmerica + // Spanish Mexico (es-MX) + SubLangSpanishMexico + // Spanish Nicaragua (es-NI) + SubLangSpanishNicaragua + // Spanish Panama (es-PA) + SubLangSpanishPanama + // Spanish Paraguay (es-PY) + SubLangSpanishParaguay + // Spanish Peru (es-PE) + SubLangSpanishPeru + // Spanish Puerto Rico (es-PR) + SubLangSpanishPuertoRico + // Spanish Spain (es-ES_tradnl) + SubLangSpanishSpainTraditional + // Spanish Spain (es-ES) + SubLangSpanishSpain + // Spanish United States (es-US) + SubLangSpanishUnitedStates + // Spanish Uruguay (es-UY) + SubLangSpanishUruguay + // Swedish Finland (sv-FI) + SubLangSwedishFinland + // Swedish Sweden (sv-SE) + SubLangSwedishSweden + // Syriac Syria (syr-SY) + SubLangSyriacSyria + // Tajik (Cyrillic) (tg-Cyrl) + SubLangTajikCyrillic + // Tajik (Cyrillic) Tajikistan (tg-Cyrl-TJ) + SubLangTajikCyrillicTajikistan + // Tamazight (Latin) (tzm-Latn) + SubLangTamazightLatin + // Tamazight (Latin) Algeria (tzm-Latn-DZ) + SubLangTamazightLatinAlgeria + // Tamil India (ta-IN) + SubLangTamilIndia + // Tamil Sri Lanka (ta-LK) + SubLangTamilSriLanka + // Tatar Russia (tt-RU) + SubLangTatarRussia + // Telugu India (te-IN) + SubLangTeluguIndia + // Thai Thailand (th-TH) + SubLangThaiThailand + // Tibetan People's Republic Of China (bo-CN) + SubLangTibetanPeoplesRepublicOfChina + // Tigrinya Eritrea (ti-ER) + SubLangTigrinyaEritrea + // Tigrinya Ethiopia (ti-ET) + SubLangTigrinyaEthiopia + // Tsonga South Africa (ts-ZA) + SubLangTsongaSouthAfrica + // Turkish Turkey (tr-TR) + SubLangTurkishTurkey + // Turkmen Turkmenistan (tk-TM) + SubLangTurkmenTurkmenistan + // Ukrainian Ukraine (uk-UA) + SubLangUkrainianUkraine + // Upper Sorbian Germany (hsb-DE) + SubLangUpperSorbianGermany + // Urdu India (ur-IN) + SubLangUrduIndia + // Urdu Islamic Republic Of Pakistan (ur-PK) + SubLangUrduIslamicRepublicOfPakistan + // Uyghur People's Republic Of China (ug-CN) + SubLangUyghurPeoplesRepublicOfChina + // Uzbek (Cyrillic) (uz-Cyrl) + SubLangUzbekCyrillic + // Uzbek (Cyrillic) Uzbekistan (uz-Cyrl-UZ) + SubLangUzbekCyrillicUzbekistan + // Uzbek (Latin) (uz-Latn) + SubLangUzbekLatin + // Uzbek (Latin) Uzbekistan (uz-Latn-UZ) + SubLangUzbekLatinUzbekistan + // Valencian Spain (ca-ESvalencia) + SubLangValencianSpain + // Venda South Africa (ve-ZA) + SubLangVendaSouthAfrica + // Vietnamese Vietnam (vi-VN) + SubLangVietnameseVietnam + // Welsh United Kingdom (cy-GB) + SubLangWelshUnitedKingdom + // Wolof Senegal (wo-SN) + SubLangWolofSenegal + // Xhosa South Africa (xh-ZA) + SubLangXhosaSouthAfrica + // Yi People's Republic Of China (ii-CN) + SubLangYiPeoplesRepublicOfChina + // Yiddish World (yi-001) + SubLangYiddishWorld + // Yoruba Nigeria (yo-NG) + SubLangYorubaNigeria + // Zulu South Africa (zu-ZA) + SubLangZuluSouthAfrica +) + +const ( + maxAllowedEntries = 0x1000 +) + +// Predefined Resource Types. +const ( + RTCursor ResourceType = iota + 1 // Hardware-dependent cursor resource. + RTBitmap = 2 // Bitmap resource. + RTIcon = 3 // Hardware-dependent icon resource. + RTMenu = 4 // Menu resource. + RTDialog = 5 // Dialog box. + RTString = 6 // String-table entry. + RTFontDir = 7 // Font directory resource. + RTFont = 8 // Font resource. + RTAccelerator = 9 // Accelerator table. + RTRCdata = 10 // Application-defined resource (raw data). + RTMessageTable = 11 // Message-table entry. + RTGroupCursor = RTCursor + 11 // Hardware-independent cursor resource. + RTGroupIcon = RTIcon + 11 // Hardware-independent icon resource. + RTVersion = 16 // Version resource. + RTDlgInclude = 17 // Dialog include entry. + RTPlugPlay = 19 // Plug and Play resource. + RTVxD = 20 // VXD. + RTAniCursor = 21 // Animated cursor. + RTAniIcon = 22 // Animated icon. + RTHtml = 23 // HTML resource. + RTManifest = 24 // Side-by-Side Assembly Manifest. +) + +// ImageResourceDirectory represents the IMAGE_RESOURCE_DIRECTORY. +// This data structure should be considered the heading of a table because the +// table actually consists of directory entries. +type ImageResourceDirectory struct { + // Resource flags. This field is reserved for future use. It is currently + // set to zero. + Characteristics uint32 `json:"characteristics"` + + // The time that the resource data was created by the resource compiler. + TimeDateStamp uint32 `json:"time_date_stamp"` + + // The major version number, set by the user. + MajorVersion uint16 `json:"major_version"` + + // The minor version number, set by the user. + MinorVersion uint16 `json:"minor_version"` + + // The number of directory entries immediately following the table that use + // strings to identify Type, Name, or Language entries (depending on the + // level of the table). + NumberOfNamedEntries uint16 `json:"number_of_named_entries"` + + // The number of directory entries immediately following the Name entries + // that use numeric IDs for Type, Name, or Language entries. + NumberOfIDEntries uint16 `json:"number_of_id_entries"` +} + +// ImageResourceDirectoryEntry represents an entry in the resource directory +// entries. +type ImageResourceDirectoryEntry struct { + // Name is used to identify either a type of resource, a resource name, or a + // resource's language ID. + Name uint32 `json:"name"` + + // OffsetToData is always used to point to a sibling in the tree, either a + // directory node or a leaf node. + OffsetToData uint32 `json:"offset_to_data"` +} + +// ImageResourceDataEntry Each Resource Data entry describes an actual unit of +// raw data in the Resource Data area. +type ImageResourceDataEntry struct { + // The address of a unit of resource data in the Resource Data area. + OffsetToData uint32 `json:"offset_to_data"` + + // The size, in bytes, of the resource data that is pointed to by the Data + // RVA field. + Size uint32 `json:"size"` + + // The code page that is used to decode code point values within the + // resource data. Typically, the code page would be the Unicode code page. + CodePage uint32 `json:"code_page"` + + // Reserved, must be 0. + Reserved uint32 `json:"reserved"` +} + +// ResourceDirectory represents resource directory information. +type ResourceDirectory struct { + // IMAGE_RESOURCE_DIRECTORY structure. + Struct ImageResourceDirectory `json:"struct"` + + // list of entries. + Entries []ResourceDirectoryEntry `json:"entries"` +} + +// ResourceDirectoryEntry represents a resource directory entry. +type ResourceDirectoryEntry struct { + // IMAGE_RESOURCE_DIRECTORY_ENTRY structure. + Struct ImageResourceDirectoryEntry `json:"struct"` + + // If the resource is identified by name this attribute will contain the + // name string. Empty string otherwise. If identified by id, the id is + // available at `ID` field. + Name string `json:"name"` + + // The resource identifier. + ID uint32 `json:"id"` + + // IsResourceDir tell us if the entry is pointing to a resource directory or + // a resource data entry. + IsResourceDir bool `json:"is_resource_dir"` + + // If this entry has a lower level directory this attribute will point to + // the ResourceDirData instance representing it. + Directory ResourceDirectory `json:"directory"` + + // If this entry has no further lower directories and points to the actual + // resource data, this attribute will reference the corresponding + // ResourceDataEntry instance. + Data ResourceDataEntry `json:"data"` +} + +// ResourceDataEntry represents a resource data entry. +type ResourceDataEntry struct { + + // IMAGE_RESOURCE_DATA_ENTRY structure. + Struct ImageResourceDataEntry `json:"struct"` + + // Primary language ID. + Lang ResourceLang `json:"lang"` + + // Sub language ID. + SubLang ResourceSubLang `json:"sub_lang"` +} + +func (pe *File) parseResourceDataEntry(rva uint32) ImageResourceDataEntry { + dataEntry := ImageResourceDataEntry{} + dataEntrySize := uint32(binary.Size(dataEntry)) + offset := pe.GetOffsetFromRva(rva) + err := pe.structUnpack(&dataEntry, offset, dataEntrySize) + if err != nil { + pe.logger.Warnf("Error parsing a resource directory data entry, the RVA is invalid") + } + return dataEntry +} + +func (pe *File) parseResourceDirectoryEntry(rva uint32) *ImageResourceDirectoryEntry { + resource := ImageResourceDirectoryEntry{} + resourceSize := uint32(binary.Size(resource)) + offset := pe.GetOffsetFromRva(rva) + err := pe.structUnpack(&resource, offset, resourceSize) + if err != nil { + return nil + } + + if resource == (ImageResourceDirectoryEntry{}) { + return nil + } + + // resource.NameOffset = resource.Name & 0x7FFFFFFF + + // resource.__pad = resource.Name & 0xFFFF0000 + // resource.Id = resource.Name & 0x0000FFFF + + // resource.DataIsDirectory = (resource.OffsetToData & 0x80000000) >> 31 + // resource.OffsetToDirectory = resource.OffsetToData & 0x7FFFFFFF + + return &resource +} + +// Navigating the resource directory hierarchy is like navigating a hard disk. +// There's a master directory (the root directory), which has subdirectories. +// The subdirectories have subdirectories of their own that may point to the +// raw resource data for things like dialog templates. +func (pe *File) doParseResourceDirectory(rva, size, baseRVA, level uint32, + dirs []uint32) (ResourceDirectory, error) { + + resourceDir := ImageResourceDirectory{} + resourceDirSize := uint32(binary.Size(resourceDir)) + offset := pe.GetOffsetFromRva(rva) + err := pe.structUnpack(&resourceDir, offset, resourceDirSize) + if err != nil { + return ResourceDirectory{}, err + } + + if baseRVA == 0 { + baseRVA = rva + } + + if len(dirs) == 0 { + dirs = append(dirs, rva) + } + + // Advance the RVA to the position immediately following the directory + // table header and pointing to the first entry in the table. + rva += resourceDirSize + + numberOfEntries := int(resourceDir.NumberOfNamedEntries + + resourceDir.NumberOfIDEntries) + var dirEntries []ResourceDirectoryEntry + + // Set a hard limit on the maximum reasonable number of entries. + if numberOfEntries > maxAllowedEntries { + pe.logger.Warnf(`Error parsing the resources directory. + The directory contains %d entries`, numberOfEntries) + return ResourceDirectory{}, nil + } + + for i := 0; i < numberOfEntries; i++ { + res := pe.parseResourceDirectoryEntry(rva) + if res == nil { + pe.logger.Warn("Error parsing a resource directory entry, the RVA is invalid") + break + } + + nameIsString := (res.Name & 0x80000000) >> 31 + entryName := "" + entryID := uint32(0) + if nameIsString == 0 { + entryID = res.Name + } else { + nameOffset := res.Name & 0x7FFFFFFF + uStringOffset := pe.GetOffsetFromRva(baseRVA + nameOffset) + maxLen, err := pe.ReadUint16(uStringOffset) + if err != nil { + break + } + entryName = pe.readUnicodeStringAtRVA(baseRVA+nameOffset+2, + uint32(maxLen*2)) + } + + // A directory entry points to either another resource directory or to + // the data for an individual resource. When the directory entry points + // to another resource directory, the high bit of the second DWORD in + // the structure is set and the remaining 31 bits are an offset to the + // resource directory. + dataIsDirectory := (res.OffsetToData & 0x80000000) >> 31 + + // The offset is relative to the beginning of the resource section, + // not an RVA. + OffsetToDirectory := res.OffsetToData & 0x7FFFFFFF + if dataIsDirectory > 0 { + // One trick malware can do is to recursively reference + // the next directory. This causes hilarity to ensue when + // trying to parse everything correctly. + // If the original RVA given to this function is equal to + // the next one to parse, we assume that it's a trick. + // Instead of raising a PEFormatError this would skip some + // reasonable data so we just break. + // 9ee4d0a0caf095314fd7041a3e4404dc is the offending sample. + if intInSlice(baseRVA+OffsetToDirectory, dirs) { + break + } + + level++ + dirs = append(dirs, baseRVA+OffsetToDirectory) + directoryEntry, _ := pe.doParseResourceDirectory( + baseRVA+OffsetToDirectory, + size-(rva-baseRVA), + baseRVA, + level, + dirs) + + dirEntries = append(dirEntries, ResourceDirectoryEntry{ + Struct: *res, + Name: entryName, + ID: entryID, + IsResourceDir: true, + Directory: directoryEntry}) + } else { + // data is entry + dataEntryStruct := pe.parseResourceDataEntry(baseRVA + + OffsetToDirectory) + entryData := ResourceDataEntry{ + Struct: dataEntryStruct, + Lang: ResourceLang(res.Name & 0x3ff), + SubLang: ResourceSubLang(res.Name >> 10), + } + + dirEntries = append(dirEntries, ResourceDirectoryEntry{ + Struct: *res, + Name: entryName, + ID: entryID, + IsResourceDir: false, + Data: entryData}) + } + + rva += uint32(binary.Size(res)) + } + + return ResourceDirectory{ + Struct: resourceDir, + Entries: dirEntries, + }, nil +} + +// The resource directory contains resources like dialog templates, icons, +// and bitmaps. The resources are found in a section called .rsrc section. +func (pe *File) parseResourceDirectory(rva, size uint32) error { + var dirs []uint32 + Resources, err := pe.doParseResourceDirectory(rva, size, 0, 0, dirs) + if err != nil { + return err + } + + pe.Resources = Resources + pe.HasResource = true + return err +} + +// String stringify the resource type. +func (rt ResourceType) String() string { + + rsrcTypeMap := map[ResourceType]string{ + RTCursor: "Cursor", + RTBitmap: "Bitmap", + RTIcon: "Icon", + RTMenu: "Menu", + RTDialog: "Dialog box", + RTString: "String", + RTFontDir: "Font directory", + RTFont: "Font", + RTAccelerator: "Accelerator", + RTRCdata: "RC Data", + RTMessageTable: "Message Table", + RTGroupCursor: "Group Cursor", + RTGroupIcon: "Group Icon", + RTVersion: "Version", + RTDlgInclude: "Dialog Include", + RTPlugPlay: "Plug & Play", + RTVxD: "VxD", + RTAniCursor: "Animated Cursor", + RTAniIcon: "Animated Icon", + RTHtml: "HTML", + RTManifest: "Manifest", + } + + if val, ok := rsrcTypeMap[rt]; ok { + return val + } + + return "?" +} + +// String stringify the resource language. +func (lang ResourceLang) String() string { + + rsrcLangMap := map[ResourceLang]string{ + LangAfrikaans: "Afrikaans (af)", + LangAlbanian: "Albanian (sq)", + LangAlsatian: "Alsatian (gsw)", + LangAmharic: "Amharic (am)", + LangArabic: "Arabic (ar)", + LangArmenian: "Armenian (hy)", + LangAssamese: "Assamese (as)", + LangAzerbaijaniLatin: "Azerbaijani (Latin) (az)", + LangBangla: "Bangla (bn)", + LangBashkir: "Bashkir (ba)", + LangBasque: "Basque (eu)", + LangBelarusian: "Belarusian (be)", + LangBosnianLatin: "Bosnian (Latin) (bs)", + LangBreton: "Breton (br)", + LangBulgarian: "Bulgarian (bg)", + LangBurmese: "Burmese (my)", + LangCatalan: "Catalan (ca)", + LangCentralKurdish: "Central Kurdish (ku)", + LangCherokee: "Cherokee (chr)", + LangChineseSimplified: "Chinese (Simplified) (zh)", + LangCorsican: "Corsican (co)", + LangCroatian: "Croatian (hr)", + LangCzech: "Czech (cs)", + LangDanish: "Danish (da)", + LangDari: "Dari (prs)", + LangDivehi: "Divehi (dv)", + LangDutch: "Dutch (nl)", + LangEnglish: "English (en)", + LangEstonian: "Estonian (et)", + LangFaroese: "Faroese (fo)", + LangFilipino: "Filipino (fil)", + LangFinnish: "Finnish (fi)", + LangFrench: "French (fr)", + LangFrisian: "Frisian (fy)", + LangFulah: "Fulah (ff)", + LangFulahLatin: "Fulah (Latin) (ff-Latn)", + LangGalician: "Galician (gl)", + LangGeorgian: "Georgian (ka)", + LangGerman: "German (de)", + LangGreek: "Greek (el)", + LangGreenlandic: "Greenlandic (kl)", + LangGuarani: "Guarani (gn)", + LangGujarati: "Gujarati (gu)", + LangHausaLatin: "Hausa (Latin) (ha)", + LangHawaiian: "Hawaiian (haw)", + LangHebrew: "Hebrew (he)", + LangHindi: "Hindi (hi)", + LangHungarian: "Hungarian (hu)", + LangIcelandic: "Icelandic (is)", + LangIgbo: "Igbo (ig)", + LangIndonesian: "Indonesian (id)", + LangInuktitutLatin: "Inuktitut (Latin) (iu)", + LangIrish: "Irish (ga)", + LangItalian: "Italian (it)", + LangJapanese: "Japanese (ja)", + LangKannada: "Kannada (kn)", + LangKashmiri: "Kashmiri (ks)", + LangKazakh: "Kazakh (kk)", + LangKhmer: "Khmer (km)", + LangKiche: "K'iche (quc)", + LangKinyarwanda: "Kinyarwanda (rw)", + LangKiswahili: "Kiswahili (sw)", + LangKonkani: "Konkani (kok)", + LangKorean: "Korean (ko)", + LangKyrgyz: "Kyrgyz (ky)", + LangLao: "Lao (lo)", + LangLatvian: "Latvian (lv)", + LangLithuanian: "Lithuanian (lt)", + LangLowerSorbian: "Lower Sorbian (dsb)", + LangLuxembourgish: "Luxembourgish (lb)", + LangMacedonian: "Macedonian (mk)", + LangMalay: "Malay (ms)", + LangMalayalam: "Malayalam (ml)", + LangMaltese: "Maltese (mt)", + LangMaori: "Maori (mi)", + LangMapudungun: "Mapudungun (arn)", + LangMarathi: "Marathi (mr)", + LangMohawk: "Mohawk (moh)", + LangMongolianCyrillic: "Mongolian (Cyrillic) (mn)", + LangNepali: "Nepali (ne)", + LangNorwegianBokmalNo: "Norwegian (Bokmal) (no)", + LangNorwegianBokmal: "Norwegian (Bokmal) (nb)", + LangNorwegianNynorsk: "Norwegian (Nynorsk) (nn)", + LangOccitan: "Occitan (oc)", + LangOdia: "Odia (or)", + LangOromo: "Oromo (om)", + LangPashto: "Pashto (ps)", + LangPersian: "Persian (fa)", + LangPolish: "Polish (pl)", + LangPortuguese: "Portuguese (pt)", + LangPunjabi: "Punjabi (pa)", + LangQuechua: "Quechua (quz)", + LangRomanian: "Romanian (ro)", + LangRomansh: "Romansh (rm)", + LangRussian: "Russian (ru)", + LangSakha: "Sakha (sah)", + LangSamiInari: "Sami (Inari) (smn)", + LangSamiLule: "Sami (Lule) (smj)", + LangSamiNorthern: "Sami (Northern) (se)", + LangSamiSkolt: "Sami (Skolt) (sms)", + LangSamiSouthern: "Sami (Southern) (sma)", + LangSanskrit: "Sanskrit (sa)", + LangScottishGaelic: "Scottish Gaelic (gd)", + LangSerbianLatin: "Serbian (Latin) (sr)", + LangSesothoSaLeboa: "Sesotho Sa Leboa (nso)", + LangSetswana: "Setswana (tn)", + LangSindhi: "Sindhi (sd)", + LangSinhala: "Sinhala (si)", + LangSlovak: "Slovak (sk)", + LangSlovenian: "Slovenian (sl)", + LangSomali: "Somali (so)", + LangSotho: "Sotho (st)", + LangSpanish: "Spanish (es)", + LangSwedish: "Swedish (sv)", + LangSyriac: "Syriac (syr)", + LangTajikCyrillic: "Tajik (Cyrillic) (tg)", + LangTamazightLatin: "Tamazight (Latin) (tzm)", + LangTamil: "Tamil (ta)", + LangTatar: "Tatar (tt)", + LangTelugu: "Telugu (te)", + LangThai: "Thai (th)", + LangTibetan: "Tibetan (bo)", + LangTigrinya: "Tigrinya (ti)", + LangTsonga: "Tsonga (ts)", + LangTurkish: "Turkish (tr)", + LangTurkmen: "Turkmen (tk)", + LangUkrainian: "Ukrainian (uk)", + LangUpperSorbian: "Upper Sorbian (hsb)", + LangUrdu: "Urdu (ur)", + LangUyghur: "Uyghur (ug)", + LangUzbekLatin: "Uzbek (Latin) (uz)", + LangVenda: "Venda (ve)", + LangVietnamese: "Vietnamese (vi)", + LangWelsh: "Welsh (cy)", + LangWolof: "Wolof (wo)", + LangXhosa: "Xhosa (xh)", + LangYi: "Yi (ii)", + LangYoruba: "Yoruba (yo)", + LangZulu: "Zulu (zu)", + } + + if val, ok := rsrcLangMap[lang]; ok { + return val + } + + return "?" +} + +// String stringify the resource sub language. +func (subLang ResourceSubLang) String() string { + + rsrcSubLangMap := map[ResourceSubLang]string{ + SubLangAfrikaansSouthAfrica: "Afrikaans South Africa (af-ZA)", + SubLangAlbanianAlbania: "Albanian Albania (sq-AL)", + SubLangAlsatianFrance: "Alsatian France (gsw-FR)", + SubLangAmharicEthiopia: "Amharic Ethiopia (am-ET)", + SubLangArabicAlgeria: "Arabic Algeria (ar-DZ)", + SubLangArabicBahrain: "Arabic Bahrain (ar-BH)", + SubLangArabicEgypt: "Arabic Egypt (ar-EG)", + SubLangArabicIraq: "Arabic Iraq (ar-IQ)", + SubLangArabicJordan: "Arabic Jordan (ar-JO)", + SubLangArabicKuwait: "Arabic Kuwait (ar-KW)", + SubLangArabicLebanon: "Arabic Lebanon (ar-LB)", + SubLangArabicLibya: "Arabic Libya (ar-LY)", + SubLangArabicMorocco: "Arabic Morocco (ar-MA)", + SubLangArabicOman: "Arabic Oman (ar-OM)", + SubLangArabicQatar: "Arabic Qatar (ar-QA)", + SubLangArabicSaudiArabia: "Arabic Saudi Arabia (ar-SA)", + SubLangArabicSyria: "Arabic Syria (ar-SY)", + SubLangArabicTunisia: "Arabic Tunisia (ar-TN)", + SubLangArabicUae: "Arabic U.a.e. (ar-AE)", + SubLangArabicYemen: "Arabic Yemen (ar-YE)", + SubLangArmenianArmenia: "Armenian Armenia (hy-AM)", + SubLangAssameseIndia: "Assamese India (as-IN)", + SubLangAzerbaijaniCyrillic: "Azerbaijani (Cyrillic) (az-Cyrl)", + SubLangAzerbaijaniCyrillicAzerbaijan: "Azerbaijani (Cyrillic) Azerbaijan (az-Cyrl-AZ)", + SubLangAzerbaijaniLatin: "Azerbaijani (Latin) (az-Latn)", + SubLangAzerbaijaniLatinAzerbaijan: "Azerbaijani (Latin) Azerbaijan (az-Latn-AZ)", + SubLangBanglaBangladesh: "Bangla Bangladesh (bn-BD)", + SubLangBanglaIndia: "Bangla India (bn-IN)", + SubLangBashkirRussia: "Bashkir Russia (ba-RU)", + SubLangBasqueSpain: "Basque Spain (eu-ES)", + SubLangBelarusianBelarus: "Belarusian Belarus (be-BY)", + SubLangBosnianCyrillic: "Bosnian (Cyrillic) (bs-Cyrl)", + SubLangBosnianCyrillicBosniaAndHerzegovina: "Bosnian (Cyrillic) Bosnia And Herzegovina (bs-Cyrl-BA)", + SubLangBosnianLatin: "Bosnian (Latin) (bs-Latn)", + SubLangBosnianLatinBosniaAndHerzegovina: "Bosnian (Latin) Bosnia And Herzegovina (bs-Latn-BA)", + SubLangBretonFrance: "Breton France (br-FR)", + SubLangBulgarianBulgaria: "Bulgarian Bulgaria (bg-BG)", + SubLangBurmeseMyanmar: "Burmese Myanmar (my-MM)", + SubLangCatalanSpain: "Catalan Spain (ca-ES)", + SubLangCentralAtlasTamazightArabicMorocco: "Central Atlas Tamazight (Arabic) Morocco (tzm-ArabMA)", + SubLangCentralKurdish: "Central Kurdish (ku-Arab)", + SubLangCentralKurdishIraq: "Central Kurdish Iraq (ku-Arab-IQ)", + SubLangCherokee: "Cherokee (chr-Cher)", + SubLangCherokeeUnitedStates: "Cherokee United States (chr-Cher-US)", + SubLangChineseSimplified: "Chinese (Simplified) (zh-Hans)", + SubLangChineseSimplifiedPeoplesRepublicOfChina: "Chinese (Simplified) People's Republic Of China (zh-CN)", + SubLangChineseSimplifiedSingapore: "Chinese (Simplified) Singapore (zh-SG)", + SubLangChineseTraditional: "Chinese (Traditional) (zh-Hant)", + SubLangChineseTraditionalHongKongSar: "Chinese (Traditional) Hong Kong S.a.r. (zh-HK)", + SubLangChineseTraditionalMacaoSar: "Chinese (Traditional) Macao S.a.r. (zh-MO)", + SubLangChineseTraditionalTaiwan: "Chinese (Traditional) Taiwan (zh-TW)", + SubLangCorsicanFrance: "Corsican France (co-FR)", + SubLangCroatianCroatia: "Croatian Croatia (hr-HR)", + SubLangCroatianLatinBosniaAndHerzegovina: "Croatian (Latin) Bosnia And Herzegovina (hr-BA)", + SubLangCzechCzechRepublic: "Czech Czech Republic (cs-CZ)", + SubLangDanishDenmark: "Danish Denmark (da-DK)", + SubLangDariAfghanistan: "Dari Afghanistan (prs-AF)", + SubLangDivehiMaldives: "Divehi Maldives (dv-MV)", + SubLangDutchBelgium: "Dutch Belgium (nl-BE)", + SubLangDutchNetherlands: "Dutch Netherlands (nl-NL)", + SubLangDzongkhaBhutan: "Dzongkha Bhutan (dz-BT)", + SubLangEnglishAustralia: "English Australia (en-AU)", + SubLangEnglishBelize: "English Belize (en-BZ)", + SubLangEnglishCanada: "English Canada (en-CA)", + SubLangEnglishCaribbean: "English Caribbean (en-029)", + SubLangEnglishHongKong: "English Hong Kong (en-HK)", + SubLangEnglishIndia: "English India (en-IN)", + SubLangEnglishIreland: "English Ireland (en-IE)", + SubLangEnglishJamaica: "English Jamaica (en-JM)", + SubLangEnglishMalaysia: "English Malaysia (en-MY)", + SubLangEnglishNewZealand: "English New Zealand (en-NZ)", + SubLangEnglishRepublicOfThePhilippines: "English Republic Of The Philippines (en-PH)", + SubLangEnglishSingapore: "English Singapore (en-SG)", + SubLangEnglishSouthAfrica: "English South Africa (en-ZA)", + SubLangEnglishTrinidadAndTobago: "English Trinidad And Tobago (en-TT)", + SubLangEnglishUnitedArabEmirates: "English United Arab Emirates (en-AE)", + SubLangEnglishUnitedKingdom: "English United Kingdom (en-GB)", + SubLangEnglishUnitedStates: "English United States (en-US)", + SubLangEnglishZimbabwe: "English Zimbabwe (en-ZW)", + SubLangEstonianEstonia: "Estonian Estonia (et-EE)", + SubLangFaroeseFaroeIslands: "Faroese Faroe Islands (fo-FO)", + SubLangFilipinoPhilippines: "Filipino Philippines (fil-PH)", + SubLangFinnishFinland: "Finnish Finland (fi-FI)", + SubLangFrenchBelgium: "French Belgium (fr-BE)", + SubLangFrenchCameroon: "French Cameroon (fr-CM)", + SubLangFrenchCanada: "French Canada (fr-CA)", + SubLangFrenchCaribbean: "French Caribbean (fr-029)", + SubLangFrenchCongoDrc: "French Congo, Drc (fr-CD)", + SubLangFrenchCôteDivoire: "French Côte D'ivoire (fr-CI)", + SubLangFrenchFrance: "French France (fr-FR)", + SubLangFrenchHaiti: "French Haiti (fr-HT)", + SubLangFrenchLuxembourg: "French Luxembourg (fr-LU)", + SubLangFrenchMali: "French Mali (fr-ML)", + SubLangFrenchMorocco: "French Morocco (fr-MA)", + SubLangFrenchPrincipalityOfMonaco: "French Principality Of Monaco (fr-MC)", + SubLangFrenchReunion: "French Reunion (fr-RE)", + SubLangFrenchSenegal: "French Senegal (fr-SN)", + SubLangFrenchSwitzerland: "French Switzerland (fr-CH)", + SubLangFrisianNetherlands: "Frisian Netherlands (fy-NL)", + SubLangFulahNigeria: "Fulah Nigeria (ff-NG)", + SubLangFulahLatinNigeria: "Fulah (Latin) Nigeria (ff-Latn-NG)", + SubLangFulahSenegal: "Fulah Senegal (ff-Latn-SN)", + SubLangGalicianSpain: "Galician Spain (gl-ES)", + SubLangGeorgianGeorgia: "Georgian Georgia (ka-GE)", + SubLangGermanAustria: "German Austria (de-AT)", + SubLangGermanGermany: "German Germany (de-DE)", + SubLangGermanLiechtenstein: "German Liechtenstein (de-LI)", + SubLangGermanLuxembourg: "German Luxembourg (de-LU)", + SubLangGermanSwitzerland: "German Switzerland (de-CH)", + SubLangGreekGreece: "Greek Greece (el-GR)", + SubLangGreenlandicGreenland: "Greenlandic Greenland (kl-GL)", + SubLangGuaraniParaguay: "Guarani Paraguay (gn-PY)", + SubLangGujaratiIndia: "Gujarati India (gu-IN)", + SubLangHausaLatin: "Hausa (Latin) (ha-Latn)", + SubLangHausaLatinNigeria: "Hausa (Latin) Nigeria (ha-Latn-NG)", + SubLangHawaiianUnitedStates: "Hawaiian United States (haw-US)", + SubLangHebrewIsrael: "Hebrew Israel (he-IL)", + SubLangHindiIndia: "Hindi India (hi-IN)", + SubLangHungarianHungary: "Hungarian Hungary (hu-HU)", + SubLangIcelandicIceland: "Icelandic Iceland (is-IS)", + SubLangIgboNigeria: "Igbo Nigeria (ig-NG)", + SubLangIndonesianIndonesia: "Indonesian Indonesia (id-ID)", + SubLangInuktitutLatin: "Inuktitut (Latin) (iu-Latn)", + SubLangInuktitutLatinCanada: "Inuktitut (Latin) Canada (iu-Latn-CA)", + SubLangInuktitutSyllabics: "Inuktitut (Syllabics) (iu-Cans)", + SubLangInuktitutSyllabicsCanada: "Inuktitut (Syllabics) Canada (iu-Cans-CA)", + SubLangIrishIreland: "Irish Ireland (ga-IE)", + SubLangItalianItaly: "Italian Italy (it-IT)", + SubLangItalianSwitzerland: "Italian Switzerland (it-CH)", + SubLangJapaneseJapan: "Japanese Japan (ja-JP)", + SubLangKannadaIndia: "Kannada India (kn-IN)", + SubLangKanuriLatinNigeria: "Kanuri (Latin) Nigeria (kr-Latn-NG)", + SubLangKashmiriPersoArabic: "Kashmiri Perso-Arabic (ks-Arab)", + SubLangKashmiriDevanagariIndia: "Kashmiri (Devanagari) India (ks-Deva-IN)", + SubLangKazakhKazakhstan: "Kazakh Kazakhstan (kk-KZ)", + SubLangKhmerCambodia: "Khmer Cambodia (km-KH)", + SubLangKicheGuatemala: "K'iche Guatemala (quc-Latn-GT)", + SubLangKinyarwandaRwanda: "Kinyarwanda Rwanda (rw-RW)", + SubLangKiswahiliKenya: "Kiswahili Kenya (sw-KE)", + SubLangKonkaniIndia: "Konkani India (kok-IN)", + SubLangKoreanKorea: "Korean Korea (ko-KR)", + SubLangKyrgyzKyrgyzstan: "Kyrgyz Kyrgyzstan (ky-KG)", + SubLangLaoLaoPdr: "Lao Lao P.d.r. (lo-LA)", + SubLangLatinVaticanCity: "Latin Vatican City (la-VA)", + SubLangLatvianLatvia: "Latvian Latvia (lv-LV)", + SubLangLithuanianLithuania: "Lithuanian Lithuania (lt-LT)", + SubLangLowerSorbianGermany: "Lower Sorbian Germany (dsb-DE)", + SubLangLuxembourgishLuxembourg: "Luxembourgish Luxembourg (lb-LU)", + SubLangMacedonianNorthMacedonia: "Macedonian North Macedonia (mk-MK)", + SubLangMalayBruneiDarussalam: "Malay Brunei Darussalam (ms-BN)", + SubLangMalayMalaysia: "Malay Malaysia (ms-MY)", + SubLangMalayalamIndia: "Malayalam India (ml-IN)", + SubLangMalteseMalta: "Maltese Malta (mt-MT)", + SubLangMaoriNewZealand: "Maori New Zealand (mi-NZ)", + SubLangMapudungunChile: "Mapudungun Chile (arn-CL)", + SubLangMarathiIndia: "Marathi India (mr-IN)", + SubLangMohawkCanada: "Mohawk Canada (moh-CA)", + SubLangMongolianCyrillic: "Mongolian (Cyrillic) (mn-Cyrl)", + SubLangMongolianCyrillicMongolia: "Mongolian (Cyrillic) Mongolia (mn-MN)", + SubLangMongolianTraditionalMongolian: "Mongolian (Traditional Mongolian) (mn-Mong)", + SubLangMongolianTraditionalMongolianPeoplesRepublicOfChina: "Mongolian (Traditional Mongolian) People's Republic Of China (mn-MongCN)", + SubLangMongolianTraditionalMongolianMongolia: "Mongolian (Traditional Mongolian) Mongolia (mn-MongMN)", + SubLangNepaliIndia: "Nepali India (ne-IN)", + SubLangNepaliNepal: "Nepali Nepal (ne-NP)", + SubLangNorwegianBokmalNorway: "Norwegian (Bokmal) Norway (nb-NO)", + SubLangNorwegianNynorskNorway: "Norwegian (Nynorsk) Norway (nn-NO)", + SubLangOccitanFrance: "Occitan France (oc-FR)", + SubLangOdiaIndia: "Odia India (or-IN)", + SubLangOromoEthiopia: "Oromo Ethiopia (om-ET)", + SubLangPashtoAfghanistan: "Pashto Afghanistan (ps-AF)", + SubLangPersianIran: "Persian Iran (fa-IR)", + SubLangPolishPoland: "Polish Poland (pl-PL)", + SubLangPortugueseBrazil: "Portuguese Brazil (pt-BR)", + SubLangPortuguesePortugal: "Portuguese Portugal (pt-PT)", + SubLangPseudoLanguagePseudoLocaleForEastAsianComplexScriptLocalizationTesting: "Pseudo Language Pseudo Locale For East Asian/Complex Script Localization Testing (qps-ploca)", + SubLangPseudoLanguagePseudoLocaleUsedForLocalizationTesting: "Pseudo Language Pseudo Locale Used For Localization Testing (qps-ploc)", + SubLangPseudoLanguagePseudoLocaleUsedForLocalizationTestingOfMirroredLocales: "Pseudo Language Pseudo Locale Used For Localization Testing Of Mirrored Locales (qps-plocm)", + SubLangPunjabi: "Punjabi (pa-Arab)", + SubLangPunjabiIndia: "Punjabi India (pa-IN)", + SubLangPunjabiIslamicRepublicOfPakistan: "Punjabi Islamic Republic Of Pakistan (pa-Arab-PK)", + SubLangQuechuaBolivia: "Quechua Bolivia (quz-BO)", + SubLangQuechuaEcuador: "Quechua Ecuador (quz-EC)", + SubLangQuechuaPeru: "Quechua Peru (quz-PE)", + SubLangRomanianMoldova: "Romanian Moldova (ro-MD)", + SubLangRomanianRomania: "Romanian Romania (ro-RO)", + SubLangRomanshSwitzerland: "Romansh Switzerland (rm-CH)", + SubLangRussianMoldova: "Russian Moldova (ru-MD)", + SubLangRussianRussia: "Russian Russia (ru-RU)", + SubLangSakhaRussia: "Sakha Russia (sah-RU)", + SubLangSamiInariFinland: "Sami (Inari) Finland (smn-FI)", + SubLangSamiLuleNorway: "Sami (Lule) Norway (smj-NO)", + SubLangSamiLuleSweden: "Sami (Lule) Sweden (smj-SE)", + SubLangSamiNorthernFinland: "Sami (Northern) Finland (se-FI)", + SubLangSamiNorthernNorway: "Sami (Northern) Norway (se-NO)", + SubLangSamiNorthernSweden: "Sami (Northern) Sweden (se-SE)", + SubLangSamiSkoltFinland: "Sami (Skolt) Finland (sms-FI)", + SubLangSamiSouthernNorway: "Sami (Southern) Norway (sma-NO)", + SubLangSamiSouthernSweden: "Sami (Southern) Sweden (sma-SE)", + SubLangSanskritIndia: "Sanskrit India (sa-IN)", + SubLangScottishGaelicUnitedKingdom: "Scottish Gaelic United Kingdom (gd-GB)", + SubLangSerbianCyrillic: "Serbian (Cyrillic) (sr-Cyrl)", + SubLangSerbianCyrillicBosniaAndHerzegovina: "Serbian (Cyrillic) Bosnia And Herzegovina (sr-Cyrl-BA)", + SubLangSerbianCyrillicMontenegro: "Serbian (Cyrillic) Montenegro (sr-Cyrl-ME)", + SubLangSerbianCyrillicSerbia: "Serbian (Cyrillic) Serbia (sr-Cyrl-RS)", + SubLangSerbianCyrillicSerbiaAndMontenegroFormer: "Serbian (Cyrillic) Serbia And Montenegro (Former) (sr-Cyrl-CS)", + SubLangSerbianLatin: "Serbian (Latin) (sr-Latn)", + SubLangSerbianLatinBosniaAndHerzegovina: "Serbian (Latin) Bosnia And Herzegovina (sr-Latn-BA)", + SubLangSerbianLatinMontenegro: "Serbian (Latin) Montenegro (sr-Latn-ME)", + SubLangSerbianLatinSerbia: "Serbian (Latin) Serbia (sr-Latn-RS)", + SubLangSerbianLatinSerbiaAndMontenegroFormer: "Serbian (Latin) Serbia And Montenegro (Former) (sr-Latn-CS)", + SubLangSesothoSaLeboaSouthAfrica: "Sesotho Sa Leboa South Africa (nso-ZA)", + SubLangSetswanaBotswana: "Setswana Botswana (tn-BW)", + SubLangSetswanaSouthAfrica: "Setswana South Africa (tn-ZA)", + SubLangSindhi: "Sindhi (sd-Arab)", + SubLangSindhiIslamicRepublicOfPakistan: "Sindhi Islamic Republic Of Pakistan (sd-Arab-PK)", + SubLangSinhalaSriLanka: "Sinhala Sri Lanka (si-LK)", + SubLangSlovakSlovakia: "Slovak Slovakia (sk-SK)", + SubLangSlovenianSlovenia: "Slovenian Slovenia (sl-SI)", + SubLangSomaliSomalia: "Somali Somalia (so-SO)", + SubLangSothoSouthAfrica: "Sotho South Africa (st-ZA)", + SubLangSpanishArgentina: "Spanish Argentina (es-AR)", + SubLangSpanishBolivarianRepublicOfVenezuela: "Spanish Bolivarian Republic Of Venezuela (es-VE)", + SubLangSpanishBolivia: "Spanish Bolivia (es-BO)", + SubLangSpanishChile: "Spanish Chile (es-CL)", + SubLangSpanishColombia: "Spanish Colombia (es-CO)", + SubLangSpanishCostaRica: "Spanish Costa Rica (es-CR)", + SubLangSpanishCuba: "Spanish Cuba (es-CU)", + SubLangSpanishDominicanRepublic: "Spanish Dominican Republic (es-DO)", + SubLangSpanishEcuador: "Spanish Ecuador (es-EC)", + SubLangSpanishElSalvador: "Spanish El Salvador (es-SV)", + SubLangSpanishGuatemala: "Spanish Guatemala (es-GT)", + SubLangSpanishHonduras: "Spanish Honduras (es-HN)", + SubLangSpanishLatinAmerica: "Spanish Latin America (es-419)", + SubLangSpanishMexico: "Spanish Mexico (es-MX)", + SubLangSpanishNicaragua: "Spanish Nicaragua (es-NI)", + SubLangSpanishPanama: "Spanish Panama (es-PA)", + SubLangSpanishParaguay: "Spanish Paraguay (es-PY)", + SubLangSpanishPeru: "Spanish Peru (es-PE)", + SubLangSpanishPuertoRico: "Spanish Puerto Rico (es-PR)", + SubLangSpanishSpainTraditional: "Spanish Spain (es-ES_tradnl)", + SubLangSpanishSpain: "Spanish Spain (es-ES)", + SubLangSpanishUnitedStates: "Spanish United States (es-US)", + SubLangSpanishUruguay: "Spanish Uruguay (es-UY)", + SubLangSwedishFinland: "Swedish Finland (sv-FI)", + SubLangSwedishSweden: "Swedish Sweden (sv-SE)", + SubLangSyriacSyria: "Syriac Syria (syr-SY)", + SubLangTajikCyrillic: "Tajik (Cyrillic) (tg-Cyrl)", + SubLangTajikCyrillicTajikistan: "Tajik (Cyrillic) Tajikistan (tg-Cyrl-TJ)", + SubLangTamazightLatin: "Tamazight (Latin) (tzm-Latn)", + SubLangTamazightLatinAlgeria: "Tamazight (Latin) Algeria (tzm-Latn-DZ)", + SubLangTamilIndia: "Tamil India (ta-IN)", + SubLangTamilSriLanka: "Tamil Sri Lanka (ta-LK)", + SubLangTatarRussia: "Tatar Russia (tt-RU)", + SubLangTeluguIndia: "Telugu India (te-IN)", + SubLangThaiThailand: "Thai Thailand (th-TH)", + SubLangTibetanPeoplesRepublicOfChina: "Tibetan People's Republic Of China (bo-CN)", + SubLangTigrinyaEritrea: "Tigrinya Eritrea (ti-ER)", + SubLangTigrinyaEthiopia: "Tigrinya Ethiopia (ti-ET)", + SubLangTsongaSouthAfrica: "Tsonga South Africa (ts-ZA)", + SubLangTurkishTurkey: "Turkish Turkey (tr-TR)", + SubLangTurkmenTurkmenistan: "Turkmen Turkmenistan (tk-TM)", + SubLangUkrainianUkraine: "Ukrainian Ukraine (uk-UA)", + SubLangUpperSorbianGermany: "Upper Sorbian Germany (hsb-DE)", + SubLangUrduIndia: "Urdu India (ur-IN)", + SubLangUrduIslamicRepublicOfPakistan: "Urdu Islamic Republic Of Pakistan (ur-PK)", + SubLangUyghurPeoplesRepublicOfChina: "Uyghur People's Republic Of China (ug-CN)", + SubLangUzbekCyrillic: "Uzbek (Cyrillic) (uz-Cyrl)", + SubLangUzbekCyrillicUzbekistan: "Uzbek (Cyrillic) Uzbekistan (uz-Cyrl-UZ)", + SubLangUzbekLatin: "Uzbek (Latin) (uz-Latn)", + SubLangUzbekLatinUzbekistan: "Uzbek (Latin) Uzbekistan (uz-Latn-UZ)", + SubLangValencianSpain: "Valencian Spain (ca-ESvalencia)", + SubLangVendaSouthAfrica: "Venda South Africa (ve-ZA)", + SubLangVietnameseVietnam: "Vietnamese Vietnam (vi-VN)", + SubLangWelshUnitedKingdom: "Welsh United Kingdom (cy-GB)", + SubLangWolofSenegal: "Wolof Senegal (wo-SN)", + SubLangXhosaSouthAfrica: "Xhosa South Africa (xh-ZA)", + SubLangYiPeoplesRepublicOfChina: "Yi People's Republic Of China (ii-CN)", + SubLangYiddishWorld: "Yiddish World (yi-001)", + SubLangYorubaNigeria: "Yoruba Nigeria (yo-NG)", + SubLangZuluSouthAfrica: "Zulu South Africa (zu-ZA)", + } + + if val, ok := rsrcSubLangMap[subLang]; ok { + return val + } + + return "?" +} + +// PrettyResourceLang prettifies the resource lang and sub lang. +func PrettyResourceLang(lang ResourceLang, subLang int) string { + m := map[ResourceLang]map[int]ResourceSubLang{ + LangAfrikaans: { + 0x1: SubLangAfrikaansSouthAfrica, + }, + LangAlbanian: { + 0x1: SubLangAlbanianAlbania, + }, + LangAlsatian: { + 0x1: SubLangAlsatianFrance, + }, + LangAmharic: { + 0x1: SubLangAmharicEthiopia, + }, + LangArabic: { + 0x5: SubLangArabicAlgeria, + 0xf: SubLangArabicBahrain, + 0x3: SubLangArabicEgypt, + 0x2: SubLangArabicIraq, + 0xb: SubLangArabicJordan, + 0xd: SubLangArabicKuwait, + 0xc: SubLangArabicLebanon, + 0x4: SubLangArabicLibya, + 0x6: SubLangArabicMorocco, + 0x8: SubLangArabicOman, + 0x10: SubLangArabicQatar, + 0x1: SubLangArabicSaudiArabia, + 0xa: SubLangArabicSyria, + 0x7: SubLangArabicTunisia, + 0xe: SubLangArabicUae, + 0x9: SubLangArabicYemen, + }, + LangArmenian: { + 0x1: SubLangArmenianArmenia, + }, + LangAssamese: { + 0x1: SubLangAssameseIndia, + 0x1d: SubLangAzerbaijaniCyrillic, + 0x2: SubLangAzerbaijaniCyrillicAzerbaijan, + }, + LangAzerbaijaniLatin: { + 0x1e: SubLangAzerbaijaniLatin, + 0x1: SubLangAzerbaijaniLatinAzerbaijan, + }, + LangBangla: { + 0x2: SubLangBanglaBangladesh, + 0x1: SubLangBanglaIndia, + }, + LangBashkir: { + 0x1: SubLangBashkirRussia, + }, + LangBasque: { + 0x1: SubLangBasqueSpain, + }, + LangBelarusian: { + 0x1: SubLangBelarusianBelarus, + 0x19: SubLangBosnianCyrillic, + 0x8: SubLangBosnianCyrillicBosniaAndHerzegovina, + 0x1a: SubLangBosnianLatin, + }, + LangBosnianLatin: { + 0x5: SubLangBosnianLatinBosniaAndHerzegovina, + }, + LangBreton: { + 0x1: SubLangBretonFrance, + }, + LangBulgarian: { + 0x1: SubLangBulgarianBulgaria, + }, + LangBurmese: { + 0x1: SubLangBurmeseMyanmar, + }, + LangCatalan: { + 0x1: SubLangCatalanSpain, + }, + LangCentralKurdish: { + 0x1f: SubLangCentralKurdish, + 0x1: SubLangCentralKurdishIraq, + }, + LangCherokee: { + 0x1f: SubLangCherokee, + 0x1: SubLangCherokeeUnitedStates, + 0x0: SubLangChineseSimplified, + }, + LangChineseSimplified: { + 0x2: SubLangChineseSimplifiedPeoplesRepublicOfChina, + 0x4: SubLangChineseSimplifiedSingapore, + 0x1f: SubLangChineseTraditional, + 0x3: SubLangChineseTraditionalHongKongSar, + 0x5: SubLangChineseTraditionalMacaoSar, + 0x1: SubLangChineseTraditionalTaiwan, + }, + LangCorsican: { + 0x1: SubLangCorsicanFrance, + }, + LangCroatian: { + 0x1: SubLangCroatianCroatia, + 0x4: SubLangCroatianLatinBosniaAndHerzegovina, + }, + LangCzech: { + 0x1: SubLangCzechCzechRepublic, + }, + LangDanish: { + 0x1: SubLangDanishDenmark, + }, + LangDari: { + 0x1: SubLangDariAfghanistan, + }, + LangDivehi: { + 0x1: SubLangDivehiMaldives, + }, + LangDutch: { + 0x2: SubLangDutchBelgium, + 0x1: SubLangDutchNetherlands, + 0x3: SubLangDzongkhaBhutan, + }, + LangEnglish: { + 0x3: SubLangEnglishAustralia, + 0xa: SubLangEnglishBelize, + 0x4: SubLangEnglishCanada, + 0x9: SubLangEnglishCaribbean, + 0xf: SubLangEnglishHongKong, + 0x10: SubLangEnglishIndia, + 0x6: SubLangEnglishIreland, + 0x8: SubLangEnglishJamaica, + 0x11: SubLangEnglishMalaysia, + 0x5: SubLangEnglishNewZealand, + 0xd: SubLangEnglishRepublicOfThePhilippines, + 0x12: SubLangEnglishSingapore, + 0x7: SubLangEnglishSouthAfrica, + 0xb: SubLangEnglishTrinidadAndTobago, + 0x13: SubLangEnglishUnitedArabEmirates, + 0x2: SubLangEnglishUnitedKingdom, + 0x1: SubLangEnglishUnitedStates, + 0xc: SubLangEnglishZimbabwe, + }, + LangEstonian: { + 0x1: SubLangEstonianEstonia, + }, + LangFaroese: { + 0x1: SubLangFaroeseFaroeIslands, + }, + LangFilipino: { + 0x1: SubLangFilipinoPhilippines, + }, + LangFinnish: { + 0x1: SubLangFinnishFinland, + }, + LangFrench: { + 0x2: SubLangFrenchBelgium, + 0xb: SubLangFrenchCameroon, + 0x3: SubLangFrenchCanada, + 0x7: SubLangFrenchCaribbean, + 0x9: SubLangFrenchCongoDrc, + 0xc: SubLangFrenchCôteDivoire, + 0x1: SubLangFrenchFrance, + 0xf: SubLangFrenchHaiti, + 0x5: SubLangFrenchLuxembourg, + 0xd: SubLangFrenchMali, + 0xe: SubLangFrenchMorocco, + 0x6: SubLangFrenchPrincipalityOfMonaco, + 0x8: SubLangFrenchReunion, + 0xa: SubLangFrenchSenegal, + 0x4: SubLangFrenchSwitzerland, + }, + LangFrisian: { + 0x1: SubLangFrisianNetherlands, + }, + LangFulah: { + 0x1: SubLangFulahNigeria, + 0x2: SubLangFulahSenegal, + }, + LangFulahLatin: { + 0x1: SubLangFulahLatinNigeria, + }, + LangGalician: { + 0x1: SubLangGalicianSpain, + }, + LangGeorgian: { + 0x1: SubLangGeorgianGeorgia, + }, + LangGerman: { + 0x3: SubLangGermanAustria, + 0x1: SubLangGermanGermany, + 0x5: SubLangGermanLiechtenstein, + 0x4: SubLangGermanLuxembourg, + 0x2: SubLangGermanSwitzerland, + }, + LangGreek: { + 0x1: SubLangGreekGreece, + }, + LangGreenlandic: { + 0x1: SubLangGreenlandicGreenland, + }, + LangGuarani: { + 0x1: SubLangGuaraniParaguay, + }, + LangGujarati: { + 0x1: SubLangGujaratiIndia, + }, + LangHausaLatin: { + 0x1f: SubLangHausaLatin, + 0x1: SubLangHausaLatinNigeria, + }, + LangHawaiian: { + 0x1: SubLangHawaiianUnitedStates, + }, + LangHebrew: { + 0x1: SubLangHebrewIsrael, + }, + LangHindi: { + 0x1: SubLangHindiIndia, + }, + LangHungarian: { + 0x1: SubLangHungarianHungary, + }, + LangIcelandic: { + 0x1: SubLangIcelandicIceland, + }, + LangIgbo: { + 0x1: SubLangIgboNigeria, + }, + LangIndonesian: { + 0x1: SubLangIndonesianIndonesia, + }, + LangInuktitutLatin: { + 0x1f: SubLangInuktitutLatin, + 0x2: SubLangInuktitutLatinCanada, + 0x1e: SubLangInuktitutSyllabics, + 0x1: SubLangInuktitutSyllabicsCanada, + }, + LangIrish: { + 0x2: SubLangIrishIreland, + }, + LangItalian: { + 0x1: SubLangItalianItaly, + 0x2: SubLangItalianSwitzerland, + }, + LangJapanese: { + 0x1: SubLangJapaneseJapan, + }, + LangKannada: { + 0x1: SubLangKannadaIndia, + }, + LangKashmiri: { + 0x1: SubLangKashmiriPersoArabic, + 0x2: SubLangKashmiriDevanagariIndia, + }, + LangKazakh: { + 0x1: SubLangKazakhKazakhstan, + }, + LangKhmer: { + 0x1: SubLangKhmerCambodia, + }, + LangKiche: { + 0x1: SubLangKicheGuatemala, + }, + LangKinyarwanda: { + 0x1: SubLangKinyarwandaRwanda, + }, + LangKiswahili: { + 0x1: SubLangKiswahiliKenya, + }, + LangKonkani: { + 0x1: SubLangKonkaniIndia, + }, + LangKorean: { + 0x1: SubLangKoreanKorea, + }, + LangKyrgyz: { + 0x1: SubLangKyrgyzKyrgyzstan, + }, + LangLao: { + 0x1: SubLangLaoLaoPdr, + }, + LangLatvian: { + 0x1: SubLangLatvianLatvia, + }, + LangLithuanian: { + 0x1: SubLangLithuanianLithuania, + }, + LangLowerSorbian: { + 0x2: SubLangLowerSorbianGermany, + }, + LangLuxembourgish: { + 0x1: SubLangLuxembourgishLuxembourg, + }, + LangMacedonian: { + 0x1: SubLangMacedonianNorthMacedonia, + }, + LangMalay: { + 0x2: SubLangMalayBruneiDarussalam, + 0x1: SubLangMalayMalaysia, + }, + LangMalayalam: { + 0x1: SubLangMalayalamIndia, + }, + LangMaltese: { + 0x1: SubLangMalteseMalta, + }, + LangMaori: { + 0x1: SubLangMaoriNewZealand, + }, + LangMapudungun: { + 0x1: SubLangMapudungunChile, + }, + LangMarathi: { + 0x1: SubLangMarathiIndia, + }, + LangMohawk: { + 0x1: SubLangMohawkCanada, + }, + LangMongolianCyrillic: { + 0x1e: SubLangMongolianCyrillic, + 0x1: SubLangMongolianCyrillicMongolia, + 0x1f: SubLangMongolianTraditionalMongolian, + 0x2: SubLangMongolianTraditionalMongolianPeoplesRepublicOfChina, + 0x3: SubLangMongolianTraditionalMongolianMongolia, + }, + LangNepali: { + 0x2: SubLangNepaliIndia, + 0x1: SubLangNepaliNepal, + }, + LangNorwegianBokmalNo: {}, + LangNorwegianBokmal: { + 0x1: SubLangNorwegianBokmalNorway, + }, + LangNorwegianNynorsk: { + 0x2: SubLangNorwegianNynorskNorway, + }, + LangOccitan: { + 0x1: SubLangOccitanFrance, + }, + LangOdia: { + 0x1: SubLangOdiaIndia, + }, + LangOromo: { + 0x1: SubLangOromoEthiopia, + }, + LangPashto: { + 0x1: SubLangPashtoAfghanistan, + }, + LangPersian: { + 0x1: SubLangPersianIran, + }, + LangPolish: { + 0x1: SubLangPolishPoland, + }, + LangPortuguese: { + 0x1: SubLangPortugueseBrazil, + 0x2: SubLangPortuguesePortugal, + }, + LangPunjabi: { + 0x1f: SubLangPunjabi, + 0x1: SubLangPunjabiIndia, + 0x2: SubLangPunjabiIslamicRepublicOfPakistan, + }, + LangQuechua: { + 0x1: SubLangQuechuaBolivia, + 0x2: SubLangQuechuaEcuador, + 0x3: SubLangQuechuaPeru, + }, + LangRomanian: { + 0x2: SubLangRomanianMoldova, + 0x1: SubLangRomanianRomania, + }, + LangRomansh: { + 0x1: SubLangRomanshSwitzerland, + }, + LangRussian: { + 0x2: SubLangRussianMoldova, + 0x1: SubLangRussianRussia, + }, + LangSakha: { + 0x1: SubLangSakhaRussia, + }, + LangSamiInari: { + 0x9: SubLangSamiInariFinland, + }, + LangSamiLule: { + 0x4: SubLangSamiLuleNorway, + 0x5: SubLangSamiLuleSweden, + }, + LangSamiNorthern: { + 0x3: SubLangSamiNorthernFinland, + 0x1: SubLangSamiNorthernNorway, + 0x2: SubLangSamiNorthernSweden, + }, + LangSamiSkolt: { + 0x8: SubLangSamiSkoltFinland, + }, + LangSamiSouthern: { + 0x6: SubLangSamiSouthernNorway, + 0x7: SubLangSamiSouthernSweden, + }, + LangSanskrit: { + 0x1: SubLangSanskritIndia, + }, + LangScottishGaelic: { + 0x1: SubLangScottishGaelicUnitedKingdom, + 0x1b: SubLangSerbianCyrillic, + 0x7: SubLangSerbianCyrillicBosniaAndHerzegovina, + 0xc: SubLangSerbianCyrillicMontenegro, + 0xa: SubLangSerbianCyrillicSerbia, + 0x3: SubLangSerbianCyrillicSerbiaAndMontenegroFormer, + 0x1c: SubLangSerbianLatin, + }, + LangSerbianLatin: { + 0x6: SubLangSerbianLatinBosniaAndHerzegovina, + 0xb: SubLangSerbianLatinMontenegro, + 0x9: SubLangSerbianLatinSerbia, + 0x2: SubLangSerbianLatinSerbiaAndMontenegroFormer, + }, + LangSesothoSaLeboa: { + 0x1: SubLangSesothoSaLeboaSouthAfrica, + }, + LangSetswana: { + 0x2: SubLangSetswanaBotswana, + 0x1: SubLangSetswanaSouthAfrica, + }, + LangSindhi: { + 0x1f: SubLangSindhi, + 0x2: SubLangSindhiIslamicRepublicOfPakistan, + }, + LangSinhala: { + 0x1: SubLangSinhalaSriLanka, + }, + LangSlovak: { + 0x1: SubLangSlovakSlovakia, + }, + LangSlovenian: { + 0x1: SubLangSlovenianSlovenia, + }, + LangSomali: { + 0x1: SubLangSomaliSomalia, + }, + LangSotho: { + 0x1: SubLangSothoSouthAfrica, + }, + LangSpanish: { + 0xb: SubLangSpanishArgentina, + 0x8: SubLangSpanishBolivarianRepublicOfVenezuela, + 0x10: SubLangSpanishBolivia, + 0xd: SubLangSpanishChile, + 0x9: SubLangSpanishColombia, + 0x5: SubLangSpanishCostaRica, + 0x17: SubLangSpanishCuba, + 0x7: SubLangSpanishDominicanRepublic, + 0xc: SubLangSpanishEcuador, + 0x11: SubLangSpanishElSalvador, + 0x4: SubLangSpanishGuatemala, + 0x12: SubLangSpanishHonduras, + 0x16: SubLangSpanishLatinAmerica, + 0x2: SubLangSpanishMexico, + 0x13: SubLangSpanishNicaragua, + 0x6: SubLangSpanishPanama, + 0xf: SubLangSpanishParaguay, + 0xa: SubLangSpanishPeru, + 0x14: SubLangSpanishPuertoRico, + 0x1: SubLangSpanishSpain, + 0x3: SubLangSpanishSpain, + 0x15: SubLangSpanishUnitedStates, + 0xe: SubLangSpanishUruguay, + }, + LangSwedish: { + 0x2: SubLangSwedishFinland, + 0x1: SubLangSwedishSweden, + }, + LangSyriac: { + 0x1: SubLangSyriacSyria, + }, + LangTajikCyrillic: { + 0x1f: SubLangTajikCyrillic, + 0x1: SubLangTajikCyrillicTajikistan, + }, + LangTamazightLatin: { + 0x1f: SubLangTamazightLatin, + 0x2: SubLangTamazightLatinAlgeria, + }, + LangTamil: { + 0x1: SubLangTamilIndia, + 0x2: SubLangTamilSriLanka, + }, + LangTatar: { + 0x1: SubLangTatarRussia, + }, + LangTelugu: { + 0x1: SubLangTeluguIndia, + }, + LangThai: { + 0x1: SubLangThaiThailand, + }, + LangTibetan: { + 0x1: SubLangTibetanPeoplesRepublicOfChina, + }, + LangTigrinya: { + 0x2: SubLangTigrinyaEritrea, + 0x1: SubLangTigrinyaEthiopia, + }, + LangTsonga: { + 0x1: SubLangTsongaSouthAfrica, + }, + LangTurkish: { + 0x1: SubLangTurkishTurkey, + }, + LangTurkmen: { + 0x1: SubLangTurkmenTurkmenistan, + }, + LangUkrainian: { + 0x1: SubLangUkrainianUkraine, + }, + LangUpperSorbian: { + 0x1: SubLangUpperSorbianGermany, + }, + LangUrdu: { + 0x2: SubLangUrduIndia, + 0x1: SubLangUrduIslamicRepublicOfPakistan, + }, + LangUyghur: { + 0x1: SubLangUyghurPeoplesRepublicOfChina, + 0x1e: SubLangUzbekCyrillic, + 0x2: SubLangUzbekCyrillicUzbekistan, + }, + LangUzbekLatin: { + 0x1f: SubLangUzbekLatin, + 0x1: SubLangUzbekLatinUzbekistan, + 0x2: SubLangValencianSpain, + }, + LangVenda: { + 0x1: SubLangVendaSouthAfrica, + }, + LangVietnamese: { + 0x1: SubLangVietnameseVietnam, + }, + LangWelsh: { + 0x1: SubLangWelshUnitedKingdom, + }, + LangWolof: { + 0x1: SubLangWolofSenegal, + }, + LangXhosa: { + 0x1: SubLangXhosaSouthAfrica, + }, + LangYi: { + 0x1: SubLangYiPeoplesRepublicOfChina, + }, + LangYoruba: { + 0x1: SubLangYorubaNigeria, + }, + LangZulu: { + 0x1: SubLangZuluSouthAfrica, + }, + } + + if val, ok := m[lang][subLang]; ok { + return val.String() + } + + return "?" +} diff --git a/vendor/github.com/saferwall/pe/richheader.go b/vendor/github.com/saferwall/pe/richheader.go new file mode 100644 index 00000000..f35dd053 --- /dev/null +++ b/vendor/github.com/saferwall/pe/richheader.go @@ -0,0 +1,529 @@ +// Copyright 2018 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +import ( + "bytes" + "crypto/md5" + "encoding/binary" + "fmt" +) + +const ( + // DansSignature ('DanS' as dword) is where the rich header struct starts. + DansSignature = 0x536E6144 + + // RichSignature ('0x68636952' as dword) is where the rich header struct ends. + RichSignature = "Rich" + + // AnoDansSigNotFound is reported when rich header signature was found, but + AnoDansSigNotFound = "Rich Header found, but could not locate DanS " + + "signature" + + // AnoPaddingDwordNotZero is reported when rich header signature leading + // padding DWORDs are not equal to 0. + AnoPaddingDwordNotZero = "Rich header found: 3 leading padding DWORDs " + + "not found after DanS signature" +) + +// CompID represents the `@comp.id` structure. +type CompID struct { + // The minor version information for the compiler used when building the product. + MinorCV uint16 `json:"minor_compiler_version"` + + // Provides information about the identity or type of the objects used to + // build the PE32. + ProdID uint16 `json:"product_id"` + + // Indicates how often the object identified by the former two fields is + // referenced by this PE32 file. + Count uint32 `json:"count"` + + // The raw @comp.id structure (unmasked). + Unmasked uint32 `json:"unmasked"` +} + +// RichHeader is a structure that is written right after the MZ DOS header. +// It consists of pairs of 4-byte integers. And it is also +// encrypted using a simple XOR operation using the checksum as the key. +// The data between the magic values encodes the ‘bill of materials’ that were +// collected by the linker to produce the binary. +type RichHeader struct { + XORKey uint32 `json:"xor_key"` + CompIDs []CompID `json:"comp_ids"` + DansOffset int `json:"dans_offset"` + Raw []byte `json:"raw"` +} + +// ParseRichHeader parses the rich header struct. +func (pe *File) ParseRichHeader() error { + + rh := RichHeader{} + ntHeaderOffset := pe.DOSHeader.AddressOfNewEXEHeader + richSigOffset := bytes.Index(pe.data[:ntHeaderOffset], []byte(RichSignature)) + + // For example, .NET executable files do not use the MSVC linker and these + // executables do not contain a detectable Rich Header. + if richSigOffset < 0 { + return nil + } + + // The DWORD following the "Rich" sequence is the XOR key stored by and + // calculated by the linker. It is actually a checksum of the DOS header with + // the e_lfanew zeroed out, and additionally includes the values of the + // unencrypted "Rich" array. Using a checksum with encryption will not only + // obfuscate the values, but it also serves as a rudimentary digital + // signature. If the checksum is calculated from scratch once the values + // have been decrypted, but doesn't match the stored key, it can be assumed + // the structure had been tampered with. For those that go the extra step to + // recalculate the checksum/key, this simple protection mechanism can be bypassed. + rh.XORKey = binary.LittleEndian.Uint32(pe.data[richSigOffset+4:]) + + // To decrypt the array, start with the DWORD just prior to the `Rich` sequence + // and XOR it with the key. Continue the loop backwards, 4 bytes at a time, + // until the sequence `DanS` is decrypted. + var decRichHeader []uint32 + dansSigOffset := -1 + estimatedBeginDans := richSigOffset - 4 - binary.Size(ImageDOSHeader{}) + for it := 0; it < estimatedBeginDans; it += 4 { + buff := binary.LittleEndian.Uint32(pe.data[richSigOffset-4-it:]) + res := buff ^ rh.XORKey + if res == DansSignature { + dansSigOffset = richSigOffset - it - 4 + break + } + + decRichHeader = append(decRichHeader, res) + } + + // Probe we successfuly found the `DanS` magic. + if dansSigOffset == -1 { + pe.Anomalies = append(pe.Anomalies, AnoDansSigNotFound) + return nil + } + + // Anomaly check: dansSigOffset is usually found in offset 0x80. + if dansSigOffset != 0x80 { + pe.Anomalies = append(pe.Anomalies, AnoDanSMagicOffset) + } + + rh.DansOffset = dansSigOffset + rh.Raw = pe.data[dansSigOffset : richSigOffset+8] + + // Reverse the decrypted rich header + for i, j := 0, len(decRichHeader)-1; i < j; i, j = i+1, j-1 { + decRichHeader[i], decRichHeader[j] = decRichHeader[j], decRichHeader[i] + } + + // After the `DanS` signature, there are some zero-padded In practice, + // Microsoft seems to have wanted the entries to begin on a 16-byte + // (paragraph) boundary, so the 3 leading padding DWORDs can be safely + // skipped as not belonging to the data. + if decRichHeader[0] != 0 || decRichHeader[1] != 0 || decRichHeader[2] != 0 { + pe.Anomalies = append(pe.Anomalies, AnoPaddingDwordNotZero) + } + + // The array stores entries that are 8-bytes each, broken into 3 members. + // Each entry represents either a tool that was employed as part of building + // the executable or a statistic. + // The @compid struct should be multiple of 8 (bytes), some malformed pe + // files have incorrect number of entries. + var lenCompIDs int + if (len(decRichHeader)-3)%2 != 0 { + lenCompIDs = len(decRichHeader) - 1 + } else { + lenCompIDs = len(decRichHeader) + } + + for i := 3; i < lenCompIDs; i += 2 { + cid := CompID{} + compid := make([]byte, binary.Size(cid)) + binary.LittleEndian.PutUint32(compid, decRichHeader[i]) + binary.LittleEndian.PutUint32(compid[4:], decRichHeader[i+1]) + buf := bytes.NewReader(compid) + err := binary.Read(buf, binary.LittleEndian, &cid) + if err != nil { + return err + } + cid.Unmasked = binary.LittleEndian.Uint32(compid) + rh.CompIDs = append(rh.CompIDs, cid) + } + + pe.RichHeader = rh + pe.HasRichHdr = true + + checksum := pe.RichHeaderChecksum() + if checksum != rh.XORKey { + pe.Anomalies = append(pe.Anomalies, "Invalid rich header checksum") + } + + return nil +} + +// RichHeaderChecksum calculate the Rich Header checksum. +func (pe *File) RichHeaderChecksum() uint32 { + + checksum := uint32(pe.RichHeader.DansOffset) + + // First, calculate the sum of the DOS header bytes each rotated left the + // number of times their position relative to the start of the DOS header e.g. + // second byte is rotated left 2x using rol operation. + for i := 0; i < pe.RichHeader.DansOffset; i++ { + // skip over dos e_lfanew field at offset 0x3C + if i >= 0x3C && i < 0x40 { + continue + } + b := uint32(pe.data[i]) + checksum += ((b << (i % 32)) | (b>>(32-(i%32)))&0xff) + checksum &= 0xFFFFFFFF + } + + // Next, take summation of each Rich header entry by combining its ProductId + // and BuildNumber into a single 32 bit number and rotating by its count. + for _, compid := range pe.RichHeader.CompIDs { + checksum += (compid.Unmasked<<(compid.Count%32) | + compid.Unmasked>>(32-(compid.Count%32))) + checksum &= 0xFFFFFFFF + } + + return checksum +} + +// RichHeaderHash calculate the Rich Header hash. +func (pe *File) RichHeaderHash() string { + if !pe.HasRichHdr { + return "" + } + + richIndex := bytes.Index(pe.RichHeader.Raw, []byte(RichSignature)) + if richIndex == -1 { + return "" + } + + key := make([]byte, 4) + binary.LittleEndian.PutUint32(key, pe.RichHeader.XORKey) + + rawData := pe.RichHeader.Raw[:richIndex] + clearData := make([]byte, len(rawData)) + for idx, val := range rawData { + clearData[idx] = val ^ key[idx%len(key)] + } + return fmt.Sprintf("%x", md5.Sum(clearData)) +} + +// ProdIDtoStr maps product ids to MS internal names. +// list from: https://github.com/kirschju/richheader +func ProdIDtoStr(prodID uint16) string { + + prodIDtoStrMap := map[uint16]string{ + 0x0000: "Unknown", + 0x0001: "Import0", + 0x0002: "Linker510", + 0x0003: "Cvtomf510", + 0x0004: "Linker600", + 0x0005: "Cvtomf600", + 0x0006: "Cvtres500", + 0x0007: "Utc11_Basic", + 0x0008: "Utc11_C", + 0x0009: "Utc12_Basic", + 0x000a: "Utc12_C", + 0x000b: "Utc12_CPP", + 0x000c: "AliasObj60", + 0x000d: "VisualBasic60", + 0x000e: "Masm613", + 0x000f: "Masm710", + 0x0010: "Linker511", + 0x0011: "Cvtomf511", + 0x0012: "Masm614", + 0x0013: "Linker512", + 0x0014: "Cvtomf512", + 0x0015: "Utc12_C_Std", + 0x0016: "Utc12_CPP_Std", + 0x0017: "Utc12_C_Book", + 0x0018: "Utc12_CPP_Book", + 0x0019: "Implib700", + 0x001a: "Cvtomf700", + 0x001b: "Utc13_Basic", + 0x001c: "Utc13_C", + 0x001d: "Utc13_CPP", + 0x001e: "Linker610", + 0x001f: "Cvtomf610", + 0x0020: "Linker601", + 0x0021: "Cvtomf601", + 0x0022: "Utc12_1_Basic", + 0x0023: "Utc12_1_C", + 0x0024: "Utc12_1_CPP", + 0x0025: "Linker620", + 0x0026: "Cvtomf620", + 0x0027: "AliasObj70", + 0x0028: "Linker621", + 0x0029: "Cvtomf621", + 0x002a: "Masm615", + 0x002b: "Utc13_LTCG_C", + 0x002c: "Utc13_LTCG_CPP", + 0x002d: "Masm620", + 0x002e: "ILAsm100", + 0x002f: "Utc12_2_Basic", + 0x0030: "Utc12_2_C", + 0x0031: "Utc12_2_CPP", + 0x0032: "Utc12_2_C_Std", + 0x0033: "Utc12_2_CPP_Std", + 0x0034: "Utc12_2_C_Book", + 0x0035: "Utc12_2_CPP_Book", + 0x0036: "Implib622", + 0x0037: "Cvtomf622", + 0x0038: "Cvtres501", + 0x0039: "Utc13_C_Std", + 0x003a: "Utc13_CPP_Std", + 0x003b: "Cvtpgd1300", + 0x003c: "Linker622", + 0x003d: "Linker700", + 0x003e: "Export622", + 0x003f: "Export700", + 0x0040: "Masm700", + 0x0041: "Utc13_POGO_I_C", + 0x0042: "Utc13_POGO_I_CPP", + 0x0043: "Utc13_POGO_O_C", + 0x0044: "Utc13_POGO_O_CPP", + 0x0045: "Cvtres700", + 0x0046: "Cvtres710p", + 0x0047: "Linker710p", + 0x0048: "Cvtomf710p", + 0x0049: "Export710p", + 0x004a: "Implib710p", + 0x004b: "Masm710p", + 0x004c: "Utc1310p_C", + 0x004d: "Utc1310p_CPP", + 0x004e: "Utc1310p_C_Std", + 0x004f: "Utc1310p_CPP_Std", + 0x0050: "Utc1310p_LTCG_C", + 0x0051: "Utc1310p_LTCG_CPP", + 0x0052: "Utc1310p_POGO_I_C", + 0x0053: "Utc1310p_POGO_I_CPP", + 0x0054: "Utc1310p_POGO_O_C", + 0x0055: "Utc1310p_POGO_O_CPP", + 0x0056: "Linker624", + 0x0057: "Cvtomf624", + 0x0058: "Export624", + 0x0059: "Implib624", + 0x005a: "Linker710", + 0x005b: "Cvtomf710", + 0x005c: "Export710", + 0x005d: "Implib710", + 0x005e: "Cvtres710", + 0x005f: "Utc1310_C", + 0x0060: "Utc1310_CPP", + 0x0061: "Utc1310_C_Std", + 0x0062: "Utc1310_CPP_Std", + 0x0063: "Utc1310_LTCG_C", + 0x0064: "Utc1310_LTCG_CPP", + 0x0065: "Utc1310_POGO_I_C", + 0x0066: "Utc1310_POGO_I_CPP", + 0x0067: "Utc1310_POGO_O_C", + 0x0068: "Utc1310_POGO_O_CPP", + 0x0069: "AliasObj710", + 0x006a: "AliasObj710p", + 0x006b: "Cvtpgd1310", + 0x006c: "Cvtpgd1310p", + 0x006d: "Utc1400_C", + 0x006e: "Utc1400_CPP", + 0x006f: "Utc1400_C_Std", + 0x0070: "Utc1400_CPP_Std", + 0x0071: "Utc1400_LTCG_C", + 0x0072: "Utc1400_LTCG_CPP", + 0x0073: "Utc1400_POGO_I_C", + 0x0074: "Utc1400_POGO_I_CPP", + 0x0075: "Utc1400_POGO_O_C", + 0x0076: "Utc1400_POGO_O_CPP", + 0x0077: "Cvtpgd1400", + 0x0078: "Linker800", + 0x0079: "Cvtomf800", + 0x007a: "Export800", + 0x007b: "Implib800", + 0x007c: "Cvtres800", + 0x007d: "Masm800", + 0x007e: "AliasObj800", + 0x007f: "PhoenixPrerelease", + 0x0080: "Utc1400_CVTCIL_C", + 0x0081: "Utc1400_CVTCIL_CPP", + 0x0082: "Utc1400_LTCG_MSIL", + 0x0083: "Utc1500_C", + 0x0084: "Utc1500_CPP", + 0x0085: "Utc1500_C_Std", + 0x0086: "Utc1500_CPP_Std", + 0x0087: "Utc1500_CVTCIL_C", + 0x0088: "Utc1500_CVTCIL_CPP", + 0x0089: "Utc1500_LTCG_C", + 0x008a: "Utc1500_LTCG_CPP", + 0x008b: "Utc1500_LTCG_MSIL", + 0x008c: "Utc1500_POGO_I_C", + 0x008d: "Utc1500_POGO_I_CPP", + 0x008e: "Utc1500_POGO_O_C", + 0x008f: "Utc1500_POGO_O_CPP", + 0x0090: "Cvtpgd1500", + 0x0091: "Linker900", + 0x0092: "Export900", + 0x0093: "Implib900", + 0x0094: "Cvtres900", + 0x0095: "Masm900", + 0x0096: "AliasObj900", + 0x0097: "Resource", + 0x0098: "AliasObj1000", + 0x0099: "Cvtpgd1600", + 0x009a: "Cvtres1000", + 0x009b: "Export1000", + 0x009c: "Implib1000", + 0x009d: "Linker1000", + 0x009e: "Masm1000", + 0x009f: "Phx1600_C", + 0x00a0: "Phx1600_CPP", + 0x00a1: "Phx1600_CVTCIL_C", + 0x00a2: "Phx1600_CVTCIL_CPP", + 0x00a3: "Phx1600_LTCG_C", + 0x00a4: "Phx1600_LTCG_CPP", + 0x00a5: "Phx1600_LTCG_MSIL", + 0x00a6: "Phx1600_POGO_I_C", + 0x00a7: "Phx1600_POGO_I_CPP", + 0x00a8: "Phx1600_POGO_O_C", + 0x00a9: "Phx1600_POGO_O_CPP", + 0x00aa: "Utc1600_C", + 0x00ab: "Utc1600_CPP", + 0x00ac: "Utc1600_CVTCIL_C", + 0x00ad: "Utc1600_CVTCIL_CPP", + 0x00ae: "Utc1600_LTCG_C", + 0x00af: "Utc1600_LTCG_CPP", + 0x00b0: "Utc1600_LTCG_MSIL", + 0x00b1: "Utc1600_POGO_I_C", + 0x00b2: "Utc1600_POGO_I_CPP", + 0x00b3: "Utc1600_POGO_O_C", + 0x00b4: "Utc1600_POGO_O_CPP", + 0x00b5: "AliasObj1010", + 0x00b6: "Cvtpgd1610", + 0x00b7: "Cvtres1010", + 0x00b8: "Export1010", + 0x00b9: "Implib1010", + 0x00ba: "Linker1010", + 0x00bb: "Masm1010", + 0x00bc: "Utc1610_C", + 0x00bd: "Utc1610_CPP", + 0x00be: "Utc1610_CVTCIL_C", + 0x00bf: "Utc1610_CVTCIL_CPP", + 0x00c0: "Utc1610_LTCG_C", + 0x00c1: "Utc1610_LTCG_CPP", + 0x00c2: "Utc1610_LTCG_MSIL", + 0x00c3: "Utc1610_POGO_I_C", + 0x00c4: "Utc1610_POGO_I_CPP", + 0x00c5: "Utc1610_POGO_O_C", + 0x00c6: "Utc1610_POGO_O_CPP", + 0x00c7: "AliasObj1100", + 0x00c8: "Cvtpgd1700", + 0x00c9: "Cvtres1100", + 0x00ca: "Export1100", + 0x00cb: "Implib1100", + 0x00cc: "Linker1100", + 0x00cd: "Masm1100", + 0x00ce: "Utc1700_C", + 0x00cf: "Utc1700_CPP", + 0x00d0: "Utc1700_CVTCIL_C", + 0x00d1: "Utc1700_CVTCIL_CPP", + 0x00d2: "Utc1700_LTCG_C", + 0x00d3: "Utc1700_LTCG_CPP", + 0x00d4: "Utc1700_LTCG_MSIL", + 0x00d5: "Utc1700_POGO_I_C", + 0x00d6: "Utc1700_POGO_I_CPP", + 0x00d7: "Utc1700_POGO_O_C", + 0x00d8: "Utc1700_POGO_O_CPP", + 0x00d9: "AliasObj1200", + 0x00da: "Cvtpgd1800", + 0x00db: "Cvtres1200", + 0x00dc: "Export1200", + 0x00dd: "Implib1200", + 0x00de: "Linker1200", + 0x00df: "Masm1200", + 0x00e0: "Utc1800_C", + 0x00e1: "Utc1800_CPP", + 0x00e2: "Utc1800_CVTCIL_C", + 0x00e3: "Utc1800_CVTCIL_CPP", + 0x00e4: "Utc1800_LTCG_C", + 0x00e5: "Utc1800_LTCG_CPP", + 0x00e6: "Utc1800_LTCG_MSIL", + 0x00e7: "Utc1800_POGO_I_C", + 0x00e8: "Utc1800_POGO_I_CPP", + 0x00e9: "Utc1800_POGO_O_C", + 0x00ea: "Utc1800_POGO_O_CPP", + 0x00eb: "AliasObj1210", + 0x00ec: "Cvtpgd1810", + 0x00ed: "Cvtres1210", + 0x00ee: "Export1210", + 0x00ef: "Implib1210", + 0x00f0: "Linker1210", + 0x00f1: "Masm1210", + 0x00f2: "Utc1810_C", + 0x00f3: "Utc1810_CPP", + 0x00f4: "Utc1810_CVTCIL_C", + 0x00f5: "Utc1810_CVTCIL_CPP", + 0x00f6: "Utc1810_LTCG_C", + 0x00f7: "Utc1810_LTCG_CPP", + 0x00f8: "Utc1810_LTCG_MSIL", + 0x00f9: "Utc1810_POGO_I_C", + 0x00fa: "Utc1810_POGO_I_CPP", + 0x00fb: "Utc1810_POGO_O_C", + 0x00fc: "Utc1810_POGO_O_CPP", + 0x00fd: "AliasObj1400", + 0x00fe: "Cvtpgd1900", + 0x00ff: "Cvtres1400", + 0x0100: "Export1400", + 0x0101: "Implib1400", + 0x0102: "Linker1400", + 0x0103: "Masm1400", + 0x0104: "Utc1900_C", + 0x0105: "Utc1900_CPP", + 0x0106: "Utc1900_CVTCIL_C", + 0x0107: "Utc1900_CVTCIL_CPP", + 0x0108: "Utc1900_LTCG_C", + 0x0109: "Utc1900_LTCG_CPP", + 0x010a: "Utc1900_LTCG_MSIL", + 0x010b: "Utc1900_POGO_I_C", + 0x010c: "Utc1900_POGO_I_CPP", + 0x010d: "Utc1900_POGO_O_C", + 0x010e: "Utc1900_POGO_O_CPP", + } + + if val, ok := prodIDtoStrMap[prodID]; ok { + return val + } + + return "?" +} + +// ProdIDtoVSversion retrieves the Visual Studio version from product id. +// list from: https://github.com/kirschju/richheader +func ProdIDtoVSversion(prodID uint16) string { + if prodID > 0x010e { + return "" + } else if prodID >= 0x00fd && prodID < 0x010e+1 { + return "Visual Studio 2015 14.00" + } else if prodID >= 0x00eb && prodID < 0x00fd { + return "Visual Studio 2013 12.10" + } else if prodID >= 0x00d9 && prodID < 0x00eb { + return "Visual Studio 2013 12.00" + } else if prodID >= 0x00c7 && prodID < 0x00d9 { + return "Visual Studio 2012 11.00" + } else if prodID >= 0x00b5 && prodID < 0x00c7 { + return "Visual Studio 2010 10.10" + } else if prodID >= 0x0098 && prodID < 0x00b5 { + return "Visual Studio 2010 10.00" + } else if prodID >= 0x0083 && prodID < 0x0098 { + return "Visual Studio 2008 09.00" + } else if prodID >= 0x006d && prodID < 0x0083 { + return "Visual Studio 2005 08.00" + } else if prodID >= 0x005a && prodID < 0x006d { + return "Visual Studio 2003 07.10" + } else if prodID == 1 { + return "Visual Studio" + } else { + return "" + } +} diff --git a/vendor/github.com/saferwall/pe/section.go b/vendor/github.com/saferwall/pe/section.go new file mode 100644 index 00000000..8f4b5b1b --- /dev/null +++ b/vendor/github.com/saferwall/pe/section.go @@ -0,0 +1,568 @@ +// Copyright 2018 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +import ( + "encoding/binary" + "math" + "sort" + "strings" +) + +const ( + // ImageSectionReserved1 for future use. + ImageSectionReserved1 = 0x00000000 + + // ImageSectionReserved2 for future use. + ImageSectionReserved2 = 0x00000001 + + // ImageSectionReserved3 for future use. + ImageSectionReserved3 = 0x00000002 + + // ImageSectionReserved4 for future use. + ImageSectionReserved4 = 0x00000004 + + // ImageSectionTypeNoPad indicates the section should not be padded to the next + // boundary. This flag is obsolete and is replaced by ImageSectionAlign1Bytes. + // This is valid only for object files. + ImageSectionTypeNoPad = 0x00000008 + + // ImageSectionReserved5 for future use. + ImageSectionReserved5 = 0x00000010 + + // ImageSectionCntCode indicates the section contains executable code. + ImageSectionCntCode = 0x00000020 + + // ImageSectionCntInitializedData indicates the section contains initialized + // data. + ImageSectionCntInitializedData = 0x00000040 + + // ImageSectionCntUninitializedData indicates the section contains uninitialized + // data. + ImageSectionCntUninitializedData = 0x00000080 + + // ImageSectionLnkOther is reserved for future use. + ImageSectionLnkOther = 0x00000100 + + // ImageSectionLnkInfo indicates the section contains comments or other + // information. The .drectve section has this type. This is valid for + // object files only. + ImageSectionLnkInfo = 0x00000200 + + // ImageSectionReserved6 for future use. + ImageSectionReserved6 = 0x00000400 + + // ImageSectionLnkRemove indicates the section will not become part of the image + // This is valid only for object files. + ImageSectionLnkRemove = 0x00000800 + + // ImageSectionLnkComdat indicates the section contains COMDAT data. For more + // information, see COMDAT Sections (Object Only). This is valid only for + // object files. + ImageSectionLnkCOMDAT = 0x00001000 + + // ImageSectionGpRel indicates the section contains data referenced through the + // global pointer (GP). + ImageSectionGpRel = 0x00008000 + + // ImageSectionMemPurgeable is reserved for future use. + ImageSectionMemPurgeable = 0x00020000 + + // ImageSectionMem16Bit is reserved for future use. + ImageSectionMem16Bit = 0x00020000 + + // ImageSectionMemLocked is reserved for future use. + ImageSectionMemLocked = 0x00040000 + + // ImageSectionMemPreload is reserved for future use. + ImageSectionMemPreload = 0x00080000 + + // ImageSectionAlign1Bytes indicates to align data on a 1-byte boundary. + // Valid only for object files. + ImageSectionAlign1Bytes = 0x00100000 + + // ImageSectionAlign2Bytes indicates to align data on a 2-byte boundary. + // Valid only for object files. + ImageSectionAlign2Bytes = 0x00200000 + + // ImageSectionAlign4Bytes indicates to align data on a 4-byte boundary. + // Valid only for object files. + ImageSectionAlign4Bytes = 0x00300000 + + // ImageSectionAlign8Bytes indicates to align data on a 8-byte boundary. + // Valid only for object files. + ImageSectionAlign8Bytes = 0x00400000 + + // ImageSectionAlign16Bytes indicates to align data on a 16-byte boundary. + // Valid only for object files. + ImageSectionAlign16Bytes = 0x00500000 + + // ImageSectionAlign32Bytes indicates to align data on a 32-byte boundary. + // Valid only for object files. + ImageSectionAlign32Bytes = 0x00600000 + + // ImageSectionAlign64Bytes indicates to align data on a 64-byte boundary. + // Valid only for object files. + ImageSectionAlign64Bytes = 0x00700000 + + // ImageSectionAlign128Bytes indicates to align data on a 128-byte boundary. + // Valid only for object files. + ImageSectionAlign128Bytes = 0x00800000 + + // ImageSectionAlign256Bytes indicates to align data on a 256-byte boundary. + // Valid only for object files. + ImageSectionAlign256Bytes = 0x00900000 + + // ImageSectionAlign512Bytes indicates to align data on a 512-byte boundary. + // Valid only for object files. + ImageSectionAlign512Bytes = 0x00A00000 + + // ImageSectionAlign1024Bytes indicates to align data on a 1024-byte boundary. + // Valid only for object files. + ImageSectionAlign1024Bytes = 0x00B00000 + + // ImageSectionAlign2048Bytes indicates to align data on a 2048-byte boundary. + // Valid only for object files. + ImageSectionAlign2048Bytes = 0x00C00000 + + // ImageSectionAlign4096Bytes indicates to align data on a 4096-byte boundary. + // Valid only for object files. + ImageSectionAlign4096Bytes = 0x00D00000 + + // ImageSectionAlign8192Bytes indicates to align data on a 8192-byte boundary. + // Valid only for object files. + ImageSectionAlign8192Bytes = 0x00E00000 + + // ImageSectionLnkMRelocOvfl indicates the section contains extended + // relocations. + ImageSectionLnkMRelocOvfl = 0x01000000 + + // ImageSectionMemDiscardable indicates the section can be discarded as needed. + ImageSectionMemDiscardable = 0x02000000 + + // ImageSectionMemNotCached indicates the section cannot be cached. + ImageSectionMemNotCached = 0x04000000 + + // ImageSectionMemNotPaged indicates the section is not pageable. + ImageSectionMemNotPaged = 0x08000000 + + // ImageSectionMemShared indicates the section can be shared in memory. + ImageSectionMemShared = 0x10000000 + + // ImageSectionMemExecute indicates the section can be executed as code. + ImageSectionMemExecute = 0x20000000 + + // ImageSectionMemRead indicates the section can be read. + ImageSectionMemRead = 0x40000000 + + // ImageSectionMemWrite indicates the section can be written to. + ImageSectionMemWrite = 0x80000000 +) + +// ImageSectionHeader is part of the section table , in fact section table is an +// array of Image Section Header each contains information about one section of +// the whole file such as attribute,virtual offset. the array size is the number +// of sections in the file. +// Binary Spec : each struct is 40 byte and there is no padding . +type ImageSectionHeader struct { + + // An 8-byte, null-padded UTF-8 encoded string. If the string is exactly 8 + // characters long, there is no terminating null. For longer names, this + // field contains a slash (/) that is followed by an ASCII representation of + // a decimal number that is an offset into the string table. Executable + // images do not use a string table and do not support section names longer + // than 8 characters. Long names in object files are truncated if they are + // emitted to an executable file. + Name [8]uint8 `json:"name"` + + // The total size of the section when loaded into memory. If this value is + // greater than SizeOfRawData, the section is zero-padded. This field is + // valid only for executable images and should be set to zero for object files. + VirtualSize uint32 `json:"virtual_size"` + + // For executable images, the address of the first byte of the section + // relative to the image base when the section is loaded into memory. + // For object files, this field is the address of the first byte before + // relocation is applied; for simplicity, compilers should set this to zero. + // Otherwise, it is an arbitrary value that is subtracted from offsets during + // relocation. + VirtualAddress uint32 `json:"virtual_address"` + + // The size of the section (for object files) or the size of the initialized + // data on disk (for image files). For executable images, this must be a + // multiple of FileAlignment from the optional header. If this is less than + // VirtualSize, the remainder of the section is zero-filled. Because the + // SizeOfRawData field is rounded but the VirtualSize field is not, it is + // possible for SizeOfRawData to be greater than VirtualSize as well. When + // a section contains only uninitialized data, this field should be zero. + SizeOfRawData uint32 `json:"size_of_raw_data"` + + // The file pointer to the first page of the section within the COFF file. + // For executable images, this must be a multiple of FileAlignment from the + // optional header. For object files, the value should be aligned on a + // 4-byte boundary for best performance. When a section contains only + // uninitialized data, this field should be zero. + PointerToRawData uint32 `json:"pointer_to_raw_data"` + + // The file pointer to the beginning of relocation entries for the section. + // This is set to zero for executable images or if there are no relocations. + PointerToRelocations uint32 `json:"pointer_to_relocations"` + + // The file pointer to the beginning of line-number entries for the section. + // This is set to zero if there are no COFF line numbers. This value should + // be zero for an image because COFF debugging information is deprecated. + PointerToLineNumbers uint32 `json:"pointer_to_line_numbers"` + + // The number of relocation entries for the section. + // This is set to zero for executable images. + NumberOfRelocations uint16 `json:"number_of_relocations"` + + // The number of line-number entries for the section. This value should be + // zero for an image because COFF debugging information is deprecated. + NumberOfLineNumbers uint16 `json:"number_of_line_numbers"` + + // The flags that describe the characteristics of the section. + Characteristics uint32 `json:"characteristics"` +} + +// Section represents a PE section header, plus additional data like entropy. +type Section struct { + Header ImageSectionHeader `json:"header"` + // Entropy represents the section entropy. This field is not always populated + // depending on weather entropy calculation is enabled. The reason behind + // using a float64 pointer instead of a float64 is to distinguish between + // the case when the section entropy is equal to zero and the case when the + // entropy is equal to nil - meaning that it was never calculated. + Entropy *float64 `json:"entropy,omitempty"` +} + +// ParseSectionHeader parses the PE section headers. Each row of the section +// table is, in effect, a section header. It must immediately follow the PE +// header. +func (pe *File) ParseSectionHeader() (err error) { + + // Get the first section offset. + optionalHeaderOffset := pe.DOSHeader.AddressOfNewEXEHeader + 4 + + uint32(binary.Size(pe.NtHeader.FileHeader)) + offset := optionalHeaderOffset + + uint32(pe.NtHeader.FileHeader.SizeOfOptionalHeader) + + // Track invalid/suspicious values while parsing sections. + maxErr := 3 + + secHeader := ImageSectionHeader{} + numberOfSections := pe.NtHeader.FileHeader.NumberOfSections + secHeaderSize := uint32(binary.Size(secHeader)) + + // The section header indexing in the table is one-based, with the order of + // the sections defined by the linker. The sections follow one another + // contiguously in the order defined by the section header table, with + // starting RVAs aligned by the value of the SectionAlignment field of the + // PE header. + for i := uint16(0); i < numberOfSections; i++ { + err := pe.structUnpack(&secHeader, offset, secHeaderSize) + if err != nil { + return err + } + + if secEnd := int64(secHeader.PointerToRawData) + int64(secHeader.SizeOfRawData); secEnd > pe.OverlayOffset { + pe.OverlayOffset = secEnd + } + + countErr := 0 + sec := Section{Header: secHeader} + secName := sec.String() + + if (ImageSectionHeader{}) == secHeader { + pe.Anomalies = append(pe.Anomalies, "Section `"+secName+"` Contents are null-bytes") + countErr++ + } + + if secHeader.SizeOfRawData+secHeader.PointerToRawData > pe.size { + pe.Anomalies = append(pe.Anomalies, "Section `"+secName+ + "` SizeOfRawData is larger than file") + countErr++ + } + + if pe.adjustFileAlignment(secHeader.PointerToRawData) > pe.size { + pe.Anomalies = append(pe.Anomalies, "Section `"+secName+ + "` PointerToRawData points beyond the end of the file") + countErr++ + } + + if secHeader.VirtualSize > 0x10000000 { + pe.Anomalies = append(pe.Anomalies, "Section `"+secName+ + "` VirtualSize is extremely large > 256MiB") + countErr++ + } + + if pe.adjustSectionAlignment(secHeader.VirtualAddress) > 0x10000000 { + pe.Anomalies = append(pe.Anomalies, "Section `"+secName+ + "` VirtualAddress is beyond 0x10000000") + countErr++ + } + + var fileAlignment uint32 + switch pe.Is64 { + case true: + fileAlignment = pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).FileAlignment + case false: + fileAlignment = pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).FileAlignment + } + if fileAlignment != 0 && secHeader.PointerToRawData%fileAlignment != 0 { + pe.Anomalies = append(pe.Anomalies, "Section `"+secName+ + "` PointerToRawData is not multiple of FileAlignment") + countErr++ + } + + if countErr >= maxErr { + break + } + + // Append to the list of sections. + if pe.opts.SectionEntropy { + entropy := sec.CalculateEntropy(pe) + sec.Entropy = &entropy + } + pe.Sections = append(pe.Sections, sec) + + offset += secHeaderSize + } + + // Sort the sections by their VirtualAddress. This will allow to check + // for potentially overlapping sections in badly constructed PEs. + sort.Sort(byVirtualAddress(pe.Sections)) + + if pe.NtHeader.FileHeader.NumberOfSections > 0 && len(pe.Sections) > 0 { + offset += secHeaderSize * uint32(pe.NtHeader.FileHeader.NumberOfSections) + } + + // There could be a problem if there are no raw data sections + // greater than 0. Example: fc91013eb72529da005110a3403541b6 + // Should this throw an exception in the minimum header offset + // can't be found? + var rawDataPointers []uint32 + for _, sec := range pe.Sections { + if sec.Header.PointerToRawData > 0 { + rawDataPointers = append( + rawDataPointers, pe.adjustFileAlignment( + sec.Header.PointerToRawData)) + } + } + + var lowestSectionOffset uint32 + if len(rawDataPointers) > 0 { + lowestSectionOffset = Min(rawDataPointers) + } else { + lowestSectionOffset = 0 + } + + if lowestSectionOffset == 0 || lowestSectionOffset < offset { + if offset <= pe.size { + pe.Header = pe.data[:offset] + } + } else { + if lowestSectionOffset <= pe.size { + pe.Header = pe.data[:lowestSectionOffset] + } + } + + pe.HasSections = true + return nil +} + +// String stringifies the section name. +func (section *Section) String() string { + return strings.Replace(string(section.Header.Name[:]), "\x00", "", -1) +} + +// NextHeaderAddr returns the VirtualAddress of the next section. +func (section *Section) NextHeaderAddr(pe *File) uint32 { + for i, currentSection := range pe.Sections { + if i == len(pe.Sections)-1 { + return 0 + } + + if section.Header == currentSection.Header { + return pe.Sections[i+1].Header.VirtualAddress + } + } + + return 0 +} + +// Contains checks whether the section contains a given RVA. +func (section *Section) Contains(rva uint32, pe *File) bool { + + // Check if the SizeOfRawData is realistic. If it's bigger than the size of + // the whole PE file minus the start address of the section it could be + // either truncated or the SizeOfRawData contains a misleading value. + // In either of those cases we take the VirtualSize. + + var size uint32 + adjustedPointer := pe.adjustFileAlignment(section.Header.PointerToRawData) + if uint32(len(pe.data))-adjustedPointer < section.Header.SizeOfRawData { + size = section.Header.VirtualSize + } else { + size = Max(section.Header.SizeOfRawData, section.Header.VirtualSize) + } + vaAdj := pe.adjustSectionAlignment(section.Header.VirtualAddress) + + // Check whether there's any section after the current one that starts before + // the calculated end for the current one. If so, cut the current section's + // size to fit in the range up to where the next section starts. + if section.NextHeaderAddr(pe) != 0 && + section.NextHeaderAddr(pe) > section.Header.VirtualAddress && + vaAdj+size > section.NextHeaderAddr(pe) { + size = section.NextHeaderAddr(pe) - vaAdj + } + + return vaAdj <= rva && rva < vaAdj+size +} + +// Data returns a data chunk from a section. +func (section *Section) Data(start, length uint32, pe *File) []byte { + + pointerToRawDataAdj := pe.adjustFileAlignment( + section.Header.PointerToRawData) + virtualAddressAdj := pe.adjustSectionAlignment( + section.Header.VirtualAddress) + + var offset uint32 + if start == 0 { + offset = pointerToRawDataAdj + } else { + offset = (start - virtualAddressAdj) + pointerToRawDataAdj + } + + if offset > pe.size { + return nil + } + + var end uint32 + if length != 0 { + end = offset + length + } else { + end = offset + section.Header.SizeOfRawData + } + + // PointerToRawData is not adjusted here as we might want to read any possible + // extra bytes that might get cut off by aligning the start (and hence cutting + // something off the end) + if end > section.Header.PointerToRawData+section.Header.SizeOfRawData && + section.Header.PointerToRawData+section.Header.SizeOfRawData > offset { + end = section.Header.PointerToRawData + section.Header.SizeOfRawData + } + + if end > pe.size { + end = pe.size + } + + return pe.data[offset:end] +} + +// CalculateEntropy calculates section entropy. +func (section *Section) CalculateEntropy(pe *File) float64 { + sectionData := section.Data(0, 0, pe) + if sectionData == nil { + return 0.0 + } + + sectionSize := float64(len(sectionData)) + if sectionSize == 0.0 { + return 0.0 + } + + var frequencies [256]uint64 + for _, v := range sectionData { + frequencies[v]++ + } + + var entropy float64 + for _, p := range frequencies { + if p > 0 { + freq := float64(p) / sectionSize + entropy += freq * math.Log2(freq) + } + } + + return -entropy +} + +// byVirtualAddress sorts all sections by Virtual Address. +type byVirtualAddress []Section + +func (s byVirtualAddress) Len() int { return len(s) } +func (s byVirtualAddress) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byVirtualAddress) Less(i, j int) bool { + return s[i].Header.VirtualAddress < s[j].Header.VirtualAddress +} + +// byPointerToRawData sorts all sections by PointerToRawData. +type byPointerToRawData []Section + +func (s byPointerToRawData) Len() int { return len(s) } +func (s byPointerToRawData) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byPointerToRawData) Less(i, j int) bool { + return s[i].Header.PointerToRawData < s[j].Header.PointerToRawData +} + +// PrettySectionFlags returns the string representations of the `Flags` field +// of section header. +func (section *Section) PrettySectionFlags() []string { + var values []string + + sectionFlags := map[uint32]string{ + //ImageSectionReserved1: "Reserved1", + ImageSectionReserved2: "Reserved2", + ImageSectionReserved3: "Reserved3", + ImageSectionReserved4: "Reserved4", + ImageSectionTypeNoPad: "No Padd", + ImageSectionReserved5: "Reserved5", + ImageSectionCntCode: "Contains Code", + ImageSectionCntInitializedData: "Initialized Data", + ImageSectionCntUninitializedData: "Uninitialized Data", + ImageSectionLnkOther: "Lnk Other", + ImageSectionLnkInfo: "Lnk Info", + ImageSectionReserved6: "Reserved6", + ImageSectionLnkRemove: "LnkRemove", + ImageSectionLnkCOMDAT: "LnkCOMDAT", + ImageSectionGpRel: "GpReferenced", + ImageSectionMemPurgeable: "Purgeable", + ImageSectionMemLocked: "Locked", + ImageSectionMemPreload: "Preload", + ImageSectionAlign1Bytes: "Align1Bytes", + ImageSectionAlign2Bytes: "Align2Bytes", + ImageSectionAlign4Bytes: "Align4Bytes", + ImageSectionAlign8Bytes: "Align8Bytes", + ImageSectionAlign16Bytes: "Align16Bytes", + ImageSectionAlign32Bytes: "Align32Bytes", + ImageSectionAlign64Bytes: "Align64Bytes", + ImageSectionAlign128Bytes: "Align128Bytes", + ImageSectionAlign256Bytes: "Align256Bytes", + ImageSectionAlign512Bytes: "Align512Bytes", + ImageSectionAlign1024Bytes: "Align1024Bytes", + ImageSectionAlign2048Bytes: "Align2048Bytes", + ImageSectionAlign4096Bytes: "Align4096Bytes", + ImageSectionAlign8192Bytes: "Align8192Bytes", + ImageSectionLnkMRelocOvfl: "ExtendedReloc", + ImageSectionMemDiscardable: "Discardable", + ImageSectionMemNotCached: "NotCached", + ImageSectionMemNotPaged: "NotPaged", + ImageSectionMemShared: "Shared", + ImageSectionMemExecute: "Executable", + ImageSectionMemRead: "Readable", + ImageSectionMemWrite: "Writable", + } + + flags := section.Header.Characteristics + for k, v := range sectionFlags { + if (k & flags) == k { + values = append(values, v) + } + } + + return values +} diff --git a/vendor/github.com/saferwall/pe/security.go b/vendor/github.com/saferwall/pe/security.go new file mode 100644 index 00000000..a3ae9431 --- /dev/null +++ b/vendor/github.com/saferwall/pe/security.go @@ -0,0 +1,503 @@ +// Copyright 2018 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "runtime" + "sort" + "strings" + "time" + + "go.mozilla.org/pkcs7" +) + +// The options for the WIN_CERTIFICATE Revision member include +// (but are not limited to) the following. +const ( + // WinCertRevision1_0 represents the WIN_CERT_REVISION_1_0 Version 1, + // legacy version of the Win_Certificate structure. + // It is supported only for purposes of verifying legacy Authenticode + // signatures + WinCertRevision1_0 = 0x0100 + + // WinCertRevision2_0 represents the WIN_CERT_REVISION_2_0. Version 2 + // is the current version of the Win_Certificate structure. + WinCertRevision2_0 = 0x0200 +) + +// The options for the WIN_CERTIFICATE CertificateType member include +// (but are not limited to) the items in the following table. Note that some +// values are not currently supported. +const ( + // Certificate contains an X.509 Certificate (Not Supported) + WinCertTypeX509 = 0x0001 + + // Certificate contains a PKCS#7 SignedData structure. + WinCertTypePKCSSignedData = 0x0002 + + // Reserved. + WinCertTypeReserved1 = 0x0003 + + // Terminal Server Protocol Stack Certificate signing (Not Supported). + WinCertTypeTSStackSigned = 0x0004 +) + +var ( + // ErrSecurityDataDirInvalidCertHeader is reported when the certificate + // header in the security directory is invalid. + ErrSecurityDataDirInvalid = errors.New( + `invalid certificate header in security directory`) +) + +// Certificate directory. +type Certificate struct { + Header WinCertificate `json:"header"` + Content pkcs7.PKCS7 `json:"-"` + Raw []byte `json:"-"` + Info CertInfo `json:"info"` + Verified bool `json:"verified"` +} + +// WinCertificate encapsulates a signature used in verifying executable files. +type WinCertificate struct { + // Specifies the length, in bytes, of the signature. + Length uint32 `json:"length"` + + // Specifies the certificate revision. + Revision uint16 `json:"revision"` + + // Specifies the type of certificate. + CertificateType uint16 `json:"certificate_type"` +} + +// CertInfo wraps the important fields of the pkcs7 structure. +// This is what we what keep in JSON marshalling. +type CertInfo struct { + // The certificate authority (CA) that charges customers to issue + // certificates for them. + Issuer string `json:"issuer"` + + // The subject of the certificate is the entity its public key is associated + // with (i.e. the "owner" of the certificate). + Subject string `json:"subject"` + + // The certificate won't be valid before this timestamp. + NotBefore time.Time `json:"not_before"` + + // The certificate won't be valid after this timestamp. + NotAfter time.Time `json:"not_after"` + + // The serial number MUST be a positive integer assigned by the CA to each + // certificate. It MUST be unique for each certificate issued by a given CA + // (i.e., the issuer name and serial number identify a unique certificate). + // CAs MUST force the serialNumber to be a non-negative integer. + // For convenience, we convert the big int to string. + SerialNumber string `json:"serial_number"` + + // The identifier for the cryptographic algorithm used by the CA to sign + // this certificate. + SignatureAlgorithm x509.SignatureAlgorithm `json:"signature_algorithm"` + + // The Public Key Algorithm refers to the public key inside the certificate. + // This certificate is used together with the matching private key to prove + // the identity of the peer. + PublicKeyAlgorithm x509.PublicKeyAlgorithm `json:"public_key_algorithm"` +} + +type RelRange struct { + Start uint32 + Length uint32 +} + +type byStart []RelRange + +func (s byStart) Len() int { return len(s) } +func (s byStart) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byStart) Less(i, j int) bool { + return s[i].Start < s[j].Start +} + +type Range struct { + Start uint32 + End uint32 +} + +func (pe *File) parseLocations() (map[string]*RelRange, error) { + location := make(map[string]*RelRange, 3) + + fileHdrSize := uint32(binary.Size(pe.NtHeader.FileHeader)) + optionalHeaderOffset := pe.DOSHeader.AddressOfNewEXEHeader + 4 + fileHdrSize + + var ( + oh32 ImageOptionalHeader32 + oh64 ImageOptionalHeader64 + + optionalHeaderSize uint32 + ) + + switch pe.Is64 { + case true: + oh64 = pe.NtHeader.OptionalHeader.(ImageOptionalHeader64) + optionalHeaderSize = oh64.SizeOfHeaders + case false: + oh32 = pe.NtHeader.OptionalHeader.(ImageOptionalHeader32) + optionalHeaderSize = oh32.SizeOfHeaders + } + + if optionalHeaderSize > pe.size-optionalHeaderOffset { + msgF := "the optional header exceeds the file length (%d + %d > %d)" + return nil, fmt.Errorf(msgF, optionalHeaderSize, optionalHeaderOffset, pe.size) + } + + if optionalHeaderSize < 68 { + msgF := "the optional header size is %d < 68, which is insufficient for authenticode" + return nil, fmt.Errorf(msgF, optionalHeaderSize) + } + + // The location of the checksum + location["checksum"] = &RelRange{optionalHeaderOffset + 64, 4} + + var rvaBase, certBase, numberOfRvaAndSizes uint32 + switch pe.Is64 { + case true: + rvaBase = optionalHeaderOffset + 108 + certBase = optionalHeaderOffset + 144 + numberOfRvaAndSizes = oh64.NumberOfRvaAndSizes + case false: + rvaBase = optionalHeaderOffset + 92 + certBase = optionalHeaderOffset + 128 + numberOfRvaAndSizes = oh32.NumberOfRvaAndSizes + } + + if optionalHeaderOffset+optionalHeaderSize < rvaBase+4 { + pe.logger.Debug("The PE Optional Header size can not accommodate for the NumberOfRvaAndSizes field") + return location, nil + } + + if numberOfRvaAndSizes < uint32(5) { + pe.logger.Debugf("The PE Optional Header does not have a Certificate Table entry in its "+ + "Data Directory; NumberOfRvaAndSizes = %d", numberOfRvaAndSizes) + return location, nil + } + + if optionalHeaderOffset+optionalHeaderSize < certBase+8 { + pe.logger.Debug("The PE Optional Header size can not accommodate for a Certificate Table" + + "entry in its Data Directory") + return location, nil + } + + // The location of the entry of the Certificate Table in the Data Directory + location["datadir_certtable"] = &RelRange{certBase, 8} + + var address, size uint32 + switch pe.Is64 { + case true: + dirEntry := oh64.DataDirectory[ImageDirectoryEntryCertificate] + address = dirEntry.VirtualAddress + size = dirEntry.Size + case false: + dirEntry := oh32.DataDirectory[ImageDirectoryEntryCertificate] + address = dirEntry.VirtualAddress + size = dirEntry.Size + } + + if size == 0 { + pe.logger.Debug("The Certificate Table is empty") + return location, nil + } + + if int64(address) < int64(optionalHeaderSize)+int64(optionalHeaderOffset) || + int64(address)+int64(size) > int64(pe.size) { + pe.logger.Debugf("The location of the Certificate Table in the binary makes no sense and "+ + "is either beyond the boundaries of the file, or in the middle of the PE header; "+ + "VirtualAddress: %x, Size: %x", address, size) + return location, nil + } + + // The location of the Certificate Table + location["certtable"] = &RelRange{address, size} + return location, nil +} + +// Authentihash generates the SHA256 pe image file hash. +// The relevant sections to exclude during hashing are: +// - The location of the checksum +// - The location of the entry of the Certificate Table in the Data Directory +// - The location of the Certificate Table. +func (pe *File) Authentihash() []byte { + return pe.AuthentihashExt(crypto.SHA256.New())[0] +} + +// AuthentihashExt generates pe image file hashes using the given hashers. +// The relevant sections to exclude during hashing are: +// - The location of the checksum +// - The location of the entry of the Certificate Table in the Data Directory +// - The location of the Certificate Table. +func (pe *File) AuthentihashExt(hashers ...hash.Hash) [][]byte { + + locationMap, err := pe.parseLocations() + if err != nil { + return nil + } + + locationSlice := make([]RelRange, 0, len(locationMap)) + for k, v := range locationMap { + if stringInSlice(k, []string{"checksum", "datadir_certtable", "certtable"}) { + locationSlice = append(locationSlice, *v) + } + } + sort.Sort(byStart(locationSlice)) + + ranges := make([]*Range, 0, len(locationSlice)) + start := uint32(0) + for _, r := range locationSlice { + ranges = append(ranges, &Range{Start: start, End: r.Start}) + start = r.Start + r.Length + } + ranges = append(ranges, &Range{Start: start, End: pe.size}) + + var rd io.ReaderAt + if pe.f != nil { + rd = pe.f + } else { + rd = bytes.NewReader(pe.data) + } + + for _, v := range ranges { + for _, hasher := range hashers { + sr := io.NewSectionReader(rd, int64(v.Start), int64(v.End)-int64(v.Start)) + io.Copy(hasher, sr) + sr.Seek(0, io.SeekStart) + } + } + + var ret [][]byte + for _, hasher := range hashers { + ret = append(ret, hasher.Sum(nil)) + } + + return ret +} + +// The security directory contains the authenticode signature, which is a digital +// signature format that is used, among other purposes, to determine the origin +// and integrity of software binaries. Authenticode is based on the Public-Key +// Cryptography Standards (PKCS) #7 standard and uses X.509 v3 certificates to +// bind an Authenticode-signed file to the identity of a software publisher. +// This data are not loaded into memory as part of the image file. +func (pe *File) parseSecurityDirectory(rva, size uint32) error { + + var pkcs *pkcs7.PKCS7 + var isValid bool + certInfo := CertInfo{} + certHeader := WinCertificate{} + certSize := uint32(binary.Size(certHeader)) + var certContent []byte + + // The virtual address value from the Certificate Table entry in the + // Optional Header Data Directory is a file offset to the first attribute + // certificate entry. + fileOffset := rva + + // PE file can be dual signed by applying multiple signatures, which is + // strongly recommended when using deprecated hashing algorithms such as MD5. + for { + err := pe.structUnpack(&certHeader, fileOffset, certSize) + if err != nil { + return ErrOutsideBoundary + } + + if fileOffset+certHeader.Length > pe.size { + return ErrOutsideBoundary + } + + if certHeader.Length == 0 { + return ErrSecurityDataDirInvalid + } + + certContent = pe.data[fileOffset+certSize : fileOffset+certHeader.Length] + pkcs, err = pkcs7.Parse(certContent) + if err != nil { + pe.Certificates = Certificate{Header: certHeader, Raw: certContent} + pe.HasCertificate = true + return err + } + + // The pkcs7.PKCS7 structure contains many fields that we are not + // interested to, so create another structure, similar to _CERT_INFO + // structure which contains only the important information. + serialNumber := pkcs.Signers[0].IssuerAndSerialNumber.SerialNumber + for _, cert := range pkcs.Certificates { + if !reflect.DeepEqual(cert.SerialNumber, serialNumber) { + continue + } + + certInfo.SerialNumber = hex.EncodeToString(cert.SerialNumber.Bytes()) + certInfo.PublicKeyAlgorithm = cert.PublicKeyAlgorithm + certInfo.SignatureAlgorithm = cert.SignatureAlgorithm + + certInfo.NotAfter = cert.NotAfter + certInfo.NotBefore = cert.NotBefore + + // Issuer infos + if len(cert.Issuer.Country) > 0 { + certInfo.Issuer = cert.Issuer.Country[0] + } + + if len(cert.Issuer.Province) > 0 { + certInfo.Issuer += ", " + cert.Issuer.Province[0] + } + + if len(cert.Issuer.Locality) > 0 { + certInfo.Issuer += ", " + cert.Issuer.Locality[0] + } + + certInfo.Issuer += ", " + cert.Issuer.CommonName + + // Subject infos + if len(cert.Subject.Country) > 0 { + certInfo.Subject = cert.Subject.Country[0] + } + + if len(cert.Subject.Province) > 0 { + certInfo.Subject += ", " + cert.Subject.Province[0] + } + + if len(cert.Subject.Locality) > 0 { + certInfo.Subject += ", " + cert.Subject.Locality[0] + } + + if len(cert.Subject.Organization) > 0 { + certInfo.Subject += ", " + cert.Subject.Organization[0] + } + + certInfo.Subject += ", " + cert.Subject.CommonName + + break + } + + // Let's mark the file as signed, then we verify if the + // signature is valid. + pe.IsSigned = true + + // Let's load the system root certs. + if !pe.opts.DisableCertValidation { + var certPool *x509.CertPool + if runtime.GOOS == "windows" { + certPool, err = loadSystemRoots() + } else { + certPool, err = x509.SystemCertPool() + } + + // Verify the signature. This will also verify the chain of trust of the + // the end-entity signer cert to one of the root in the trust store. + if err == nil { + err = pkcs.VerifyWithChain(certPool) + if err == nil { + isValid = true + } else { + isValid = false + } + } + } + + // Subsequent entries are accessed by advancing that entry's dwLength + // bytes, rounded up to an 8-byte multiple, from the start of the + // current attribute certificate entry. + nextOffset := certHeader.Length + fileOffset + nextOffset = ((nextOffset + 8 - 1) / 8) * 8 + + // Check if we walked the entire table. + if nextOffset == fileOffset+size { + break + } + + fileOffset = nextOffset + } + + pe.Certificates = Certificate{Header: certHeader, Content: *pkcs, + Raw: certContent, Info: certInfo, Verified: isValid} + pe.HasCertificate = true + return nil +} + +// loadSystemsRoots manually downloads all the trusted root certificates +// in Windows by spawning certutil then adding root certs individually +// to the cert pool. Initially, when running in windows, go SystemCertPool() +// used to enumerate all the certificate in the Windows store using +// (CertEnumCertificatesInStore). Unfortunately, Windows does not ship +// with all of its root certificates installed. Instead, it downloads them +// on-demand. As a consequence, this behavior leads to a non-deterministic +// results. Go team then disabled the loading Windows root certs. +func loadSystemRoots() (*x509.CertPool, error) { + + needSync := true + roots := x509.NewCertPool() + + // Create a temporary dir in the OS temp folder + // if it does not exists. + dir := filepath.Join(os.TempDir(), "certs") + info, err := os.Stat(dir) + if os.IsNotExist(err) { + if err = os.Mkdir(dir, 0755); err != nil { + return roots, err + } + } else { + now := time.Now() + modTime := info.ModTime() + diff := now.Sub(modTime).Hours() + if diff < 24 { + needSync = false + } + } + + // Use certutil to download all the root certs. + if needSync { + cmd := exec.Command("certutil", "-syncWithWU", dir) + out, err := cmd.Output() + if err != nil { + return roots, err + } + if !strings.Contains(string(out), "command completed successfully") { + return roots, err + } + } + + files, err := ioutil.ReadDir(dir) + if err != nil { + return roots, err + } + + for _, f := range files { + if !strings.HasSuffix(f.Name(), ".crt") { + continue + } + certPath := filepath.Join(dir, f.Name()) + certData, err := ioutil.ReadFile(certPath) + if err != nil { + return roots, err + } + + if crt, err := x509.ParseCertificate(certData); err == nil { + roots.AddCert(crt) + } + } + + return roots, nil +} diff --git a/vendor/github.com/saferwall/pe/staticcheck.conf b/vendor/github.com/saferwall/pe/staticcheck.conf new file mode 100644 index 00000000..8c311179 --- /dev/null +++ b/vendor/github.com/saferwall/pe/staticcheck.conf @@ -0,0 +1 @@ +checks = ["all", "-ST1000", "-U1000", "-SA1019"] \ No newline at end of file diff --git a/vendor/github.com/saferwall/pe/symbol.go b/vendor/github.com/saferwall/pe/symbol.go new file mode 100644 index 00000000..738bd6be --- /dev/null +++ b/vendor/github.com/saferwall/pe/symbol.go @@ -0,0 +1,459 @@ +// Copyright 2018 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +import ( + "bytes" + "encoding/binary" + "errors" + "strings" +) + +const ( + + // MaxDefaultSymbolsCount represents the default maximum number of COFF + // symbols to parse. Some malware uses a fake huge NumberOfSymbols that + // can cause an OOM exception. + // Example: 0000e876c5b712b6b7b3ce97f757ddd918fb3dbdc5a3938e850716fbd841309f + MaxDefaultCOFFSymbolsCount = 0x10000 + + // MaxCOFFSymStrLength represents the maximum string length of a COFF symbol + // to read. + MaxCOFFSymStrLength = 0x50 + + // + // Type Representation + // + + // ImageSymTypeNull indicates no type information or unknown base type. + // Microsoft tools use this setting. + ImageSymTypeNull = 0 + + // ImageSymTypeVoid indicates no type no valid type; used with void pointers and functions. + ImageSymTypeVoid = 1 + + // ImageSymTypeChar indicates a character (signed byte). + ImageSymTypeChar = 2 + + // ImageSymTypeShort indicates a 2-byte signed integer. + ImageSymTypeShort = 3 + + // ImageSymTypeInt indicates a natural integer type (normally 4 bytes in + // Windows). + ImageSymTypeInt = 4 + + // ImageSymTypeLong indicates a 4-byte signed integer. + ImageSymTypeLong = 5 + + // ImageSymTypeFloat indicates a 4-byte floating-point number. + ImageSymTypeFloat = 6 + + // ImageSymTypeDouble indicates an 8-byte floating-point number. + ImageSymTypeDouble = 7 + + // ImageSymTypeStruct indicates a structure. + ImageSymTypeStruct = 8 + + // ImageSymTypeUnion indicates a union. + ImageSymTypeUnion = 9 + + // ImageSymTypeEnum indicates an enumerated type. + ImageSymTypeEnum = 10 + + // ImageSymTypeMoe A member of enumeration (a specific value). + ImageSymTypeMoe = 11 + + // ImageSymTypeByte indicates a byte; unsigned 1-byte integer. + ImageSymTypeByte = 12 + + // ImageSymTypeWord indicates a word; unsigned 2-byte integer. + ImageSymTypeWord = 13 + + // ImageSymTypeUint indicates an unsigned integer of natural size + // (normally, 4 bytes). + ImageSymTypeUint = 14 + + // ImageSymTypeDword indicates an unsigned 4-byte integer. + ImageSymTypeDword = 15 + + // + // Storage Class + // + + // ImageSymClassEndOfFunction indicates a special symbol that represents + // the end of function, for debugging purposes. + ImageSymClassEndOfFunction = 0xff + + // ImageSymClassNull indicates no assigned storage class. + ImageSymClassNull = 0 + + // ImageSymClassAutomatic indicates automatic (stack) variable. The Value + // field specifies the stack frame offset. + ImageSymClassAutomatic = 1 + + // ImageSymClassExternal indicates a value that Microsoft tools use for + // external symbols. The Value field indicates the size if the section + // number is IMAGE_SYM_UNDEFINED (0). If the section number is not zero, + // then the Value field specifies the offset within the section. + ImageSymClassExternal = 2 + + // ImageSymClassStatic indicates the offset of the symbol within the + // section. If the Value field is zero, then the symbol represents a + // section name. + ImageSymClassStatic = 3 + + // ImageSymClassRegister indicates a register variable. The Value field + // specifies the register number. + ImageSymClassRegister = 4 + + // ImageSymClassExternalDef indicates a symbol that is defined externally. + ImageSymClassExternalDef = 5 + + // ImageSymClassLabel indicates a code label that is defined within the + // module. The Value field specifies the offset of the symbol within the + // section. + ImageSymClassLabel = 6 + + // ImageSymClassUndefinedLabel indicates a reference to a code label that + // is not defined. + ImageSymClassUndefinedLabel = 7 + + // ImageSymClassMemberOfStruct indicates the structure member. The Value + // field specifies the n th member. + ImageSymClassMemberOfStruct = 8 + + // ImageSymClassArgument indicates a formal argument (parameter) of a + // function. The Value field specifies the n th argument. + ImageSymClassArgument = 9 + + // ImageSymClassStructTag indicates the structure tag-name entry. + ImageSymClassStructTag = 10 + + // ImageSymClassMemberOfUnion indicates a union member. The Value field + // specifies the n th member. + ImageSymClassMemberOfUnion = 11 + + // ImageSymClassUnionTag indicates the structure tag-name entry. + ImageSymClassUnionTag = 12 + + // ImageSymClassTypeDefinition indicates a typedef entry. + ImageSymClassTypeDefinition = 13 + + // ImageSymClassUndefinedStatic indicates a static data declaration. + ImageSymClassUndefinedStatic = 14 + + // ImageSymClassEnumTag indicates an enumerated type tagname entry. + ImageSymClassEnumTag = 15 + + // ImageSymClassMemberOfEnum indicates a member of an enumeration. The + // Value field specifies the n th member. + ImageSymClassMemberOfEnum = 16 + + // ImageSymClassRegisterParam indicates a register parameter. + ImageSymClassRegisterParam = 17 + + // ImageSymClassBitField indicates a bit-field reference. The Value field + // specifies the n th bit in the bit field. + ImageSymClassBitField = 18 + + // ImageSymClassBlock indicates a .bb (beginning of block) or .eb (end of + // block) record. The Value field is the relocatable address of the code + // location. + ImageSymClassBlock = 100 + + // ImageSymClassFunction indicates a value that Microsoft tools use for + // symbol records that define the extent of a function: begin function (.bf + // ), end function ( .ef ), and lines in function ( .lf ). For .lf + // records, the Value field gives the number of source lines in the + // function. For .ef records, the Value field gives the size of the + // function code. + ImageSymClassFunction = 101 + + // ImageSymClassEndOfStruct indicates an end-of-structure entry. + ImageSymClassEndOfStruct = 102 + + // ImageSymClassFile indicates a value that Microsoft tools, as well as + // traditional COFF format, use for the source-file symbol record. The + // symbol is followed by auxiliary records that name the file. + ImageSymClassFile = 103 + + // ImageSymClassSsection indicates a definition of a section (Microsoft + // tools use STATIC storage class instead). + ImageSymClassSsection = 104 + + // ImageSymClassWeakExternal indicates a weak external. For more + // information, see Auxiliary Format 3: Weak Externals. + ImageSymClassWeakExternal = 24 + + // ImageSymClassClrToken indicates a CLR token symbol. The name is an ASCII + // string that consists of the hexadecimal value of the token. For more + // information, see CLR Token Definition (Object Only). + ImageSymClassClrToken = 25 + + // + // Section Number Values. + // + + // ImageSymUndefined indicates that the symbol record is not yet assigned a + // section. A value of zero indicates that a reference to an external + // symbol is defined elsewhere. A value of non-zero is a common symbol with + // a size that is specified by the value. + ImageSymUndefined = 0 + + // ImageSymAbsolute indicates that the symbol has an absolute + // (non-relocatable) value and is not an address. + ImageSymAbsolute = -1 + + // ImageSymDebug indicates that the symbol provides general type or + // debugging information but does not correspond to a section. Microsoft + // tools use this setting along with .file records (storage class FILE). + ImageSymDebug = -2 +) + +var ( + errCOFFTableNotPresent = errors.New( + "PE image does not contains a COFF symbol table") + errNoCOFFStringInTable = errors.New( + "PE image got a PointerToSymbolTable but no string in the COFF string table") + errCOFFSymbolOutOfBounds = errors.New( + "COFF symbol offset out of bounds") + errCOFFSymbolsTooHigh = errors.New( + "COFF symbols count is absurdly high") +) + +// COFFSymbol represents an entry in the COFF symbol table, which it is an +// array of records, each 18 bytes long. Each record is either a standard or +// auxiliary symbol-table record. A standard record defines a symbol or name +// and has the following format. +type COFFSymbol struct { + // The name of the symbol, represented by a union of three structures. An + // array of 8 bytes is used if the name is not more than 8 bytes long. + // union { + // BYTE ShortName[8]; + // struct { + // DWORD Short; // if 0, use LongName + // DWORD Long; // offset into string table + // } Name; + // DWORD LongName[2]; // PBYTE [2] + // } N; + Name [8]byte `json:"name"` + + // The value that is associated with the symbol. The interpretation of this + // field depends on SectionNumber and StorageClass. A typical meaning is + // the relocatable address. + Value uint32 `json:"value"` + + // The signed integer that identifies the section, using a one-based index + // into the section table. Some values have special meaning. + // See "Section Number Values." + SectionNumber int16 `json:"section_number"` + + // A number that represents type. Microsoft tools set this field to + // 0x20 (function) or 0x0 (not a function). For more information, + // see Type Representation. + Type uint16 `json:"type"` + + // An enumerated value that represents storage class. + // For more information, see Storage Class. + StorageClass uint8 `json:"storage_class"` + + // The number of auxiliary symbol table entries that follow this record. + NumberOfAuxSymbols uint8 `json:"number_of_aux_symbols"` +} + +// COFF holds properties related to the COFF format. +type COFF struct { + SymbolTable []COFFSymbol `json:"symbol_table"` + StringTable []string `json:"string_table"` + StringTableOffset uint32 `json:"string_table_offset"` + // Map the symbol offset => symbol name. + StringTableM map[uint32]string `json:"-"` +} + +// ParseCOFFSymbolTable parses the COFF symbol table. The symbol table is +// inherited from the traditional COFF format. It is distinct from Microsoft +// Visual C++ debug information. A file can contain both a COFF symbol table +// and Visual C++ debug information, and the two are kept separate. Some +// Microsoft tools use the symbol table for limited but important purposes, +// such as communicating COMDAT information to the linker. Section names and +// file names, as well as code and data symbols, are listed in the symbol table. +func (pe *File) ParseCOFFSymbolTable() error { + pointerToSymbolTable := pe.NtHeader.FileHeader.PointerToSymbolTable + if pointerToSymbolTable == 0 { + return errCOFFTableNotPresent + } + + symCount := pe.NtHeader.FileHeader.NumberOfSymbols + if symCount == 0 { + return nil + } + if symCount > pe.opts.MaxCOFFSymbolsCount { + pe.addAnomaly(AnoCOFFSymbolsCount) + return errCOFFSymbolsTooHigh + } + + // The location of the symbol table is indicated in the COFF header. + offset := pe.NtHeader.FileHeader.PointerToSymbolTable + + // The symbol table is an array of records, each 18 bytes long. + size := uint32(binary.Size(COFFSymbol{})) + symbols := make([]COFFSymbol, symCount) + + // Each record is either a standard or auxiliary symbol-table record. + // A standard record defines a symbol or name and has the COFFSymbol STRUCT format. + for i := uint32(0); i < symCount; i++ { + err := pe.structUnpack(&symbols[i], offset, size) + if err != nil { + return err + } + offset += size + } + + pe.COFF.SymbolTable = symbols + + // Get the COFF string table. + pe.COFFStringTable() + + pe.HasCOFF = true + return nil +} + +// COFFStringTable retrieves the list of strings in the COFF string table if +// any. +func (pe *File) COFFStringTable() error { + m := make(map[uint32]string) + pointerToSymbolTable := pe.NtHeader.FileHeader.PointerToSymbolTable + if pointerToSymbolTable == 0 { + return errCOFFTableNotPresent + } + + symCount := pe.NtHeader.FileHeader.NumberOfSymbols + if symCount == 0 { + return nil + } + if symCount > pe.opts.MaxCOFFSymbolsCount { + pe.addAnomaly(AnoCOFFSymbolsCount) + return errCOFFSymbolsTooHigh + } + + // COFF String Table immediately following the COFF symbol table. The + // position of this table is found by taking the symbol table address in + // the COFF header and adding the number of symbols multiplied by the size + // of a symbol. + size := uint32(binary.Size(COFFSymbol{})) + offset := pointerToSymbolTable + (size * symCount) + + // At the beginning of the COFF string table are 4 bytes that contain the + // total size (in bytes) of the rest of the string table. This size + // includes the size field itself, so that the value in this location would + // be 4 if no strings were present. + pe.COFF.StringTableOffset = offset + strTableSize, err := pe.ReadUint32(offset) + if err != nil { + return err + } + if strTableSize <= 4 { + return errNoCOFFStringInTable + } + offset += 4 + + // Following the size are null-terminated strings that are pointed to by + // symbols in the COFF symbol table. We create a map to map offset to + // string. + end := offset + strTableSize - 4 + for offset < end { + len, str := pe.readASCIIStringAtOffset(offset, MaxCOFFSymStrLength) + if len == 0 { + break + } + m[offset] = str + offset += len + 1 + pe.COFF.StringTable = append(pe.COFF.StringTable, str) + } + + pe.COFF.StringTableM = m + return nil +} + +// String returns the representation of the symbol name. +func (symbol *COFFSymbol) String(pe *File) (string, error) { + var short, long uint32 + + // The ShortName field in a symbol table consists of 8 bytes + // that contain the name itself, if it is not more than 8 + // bytes long, or the ShortName field gives an offset into + // the string table. + highDw := bytes.NewBuffer(symbol.Name[4:]) + lowDw := bytes.NewBuffer(symbol.Name[:4]) + errl := binary.Read(lowDw, binary.LittleEndian, &short) + errh := binary.Read(highDw, binary.LittleEndian, &long) + if errl != nil || errh != nil { + return "", errCOFFSymbolOutOfBounds + } + + // To determine whether the name itself or an offset is given, + // test the first 4 bytes for equality to zero. + if short != 0 { + name := strings.Replace(string(symbol.Name[:]), "\x00", "", -1) + return name, nil + } + + // Long name offset to the string table. + strOff := pe.COFF.StringTableOffset + long + name := pe.COFF.StringTableM[strOff] + return name, nil +} + +// SectionNumberName returns the name of the section corresponding to a section +// symbol number if any. +func (symbol *COFFSymbol) SectionNumberName(pe *File) string { + + // Normally, the Section Value field in a symbol table entry is a one-based + // index into the section table. However, this field is a signed integer + // and can take negative values. The following values, less than one, have + // special meanings. + if symbol.SectionNumber > 0 && symbol.SectionNumber < int16(len(pe.Sections)) { + return pe.Sections[symbol.SectionNumber-1].String() + } + + switch symbol.SectionNumber { + case ImageSymUndefined: + return "Undefined" + case ImageSymAbsolute: + return "Absolute" + case ImageSymDebug: + return "Debug" + } + + return "?" +} + +// PrettyCOFFTypeRepresentation returns the string representation of the `Type` +// field of a COFF table entry. +func (pe *File) PrettyCOFFTypeRepresentation(k uint16) string { + coffSymTypeMap := map[uint16]string{ + ImageSymTypeNull: "Null", + ImageSymTypeVoid: "Void", + ImageSymTypeChar: "Char", + ImageSymTypeShort: "Short", + ImageSymTypeInt: "Int", + ImageSymTypeLong: "Long", + ImageSymTypeFloat: "Float", + ImageSymTypeDouble: "Double", + ImageSymTypeStruct: "Struct", + ImageSymTypeUnion: "Union", + ImageSymTypeEnum: "Enum", + ImageSymTypeMoe: "Moe", + ImageSymTypeByte: "Byte", + ImageSymTypeWord: "Word", + ImageSymTypeUint: "Uint", + ImageSymTypeDword: "Dword", + } + + if value, ok := coffSymTypeMap[k]; ok { + return value + } + return "" +} diff --git a/vendor/github.com/saferwall/pe/tls.go b/vendor/github.com/saferwall/pe/tls.go new file mode 100644 index 00000000..8cccc2a6 --- /dev/null +++ b/vendor/github.com/saferwall/pe/tls.go @@ -0,0 +1,189 @@ +// Copyright 2018 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +import ( + "encoding/binary" +) + +// TLSDirectoryCharacteristicsType represents the type of a TLS directory +// Characteristics. +type TLSDirectoryCharacteristicsType uint32 + +// TLSDirectory represents tls directory information with callback entries. +type TLSDirectory struct { + + // of type *IMAGE_TLS_DIRECTORY32 or *IMAGE_TLS_DIRECTORY64 structure. + Struct interface{} `json:"struct"` + + // of type []uint32 or []uint64. + Callbacks interface{} `json:"callbacks"` +} + +// ImageTLSDirectory32 represents the IMAGE_TLS_DIRECTORY32 structure. +// It Points to the Thread Local Storage initialization section. +type ImageTLSDirectory32 struct { + + // The starting address of the TLS template. The template is a block of data + // that is used to initialize TLS data. + StartAddressOfRawData uint32 `json:"start_address_of_raw_data"` + + // The address of the last byte of the TLS, except for the zero fill. + // As with the Raw Data Start VA field, this is a VA, not an RVA. + EndAddressOfRawData uint32 `json:"end_address_of_raw_data"` + + // The location to receive the TLS index, which the loader assigns. This + // location is in an ordinary data section, so it can be given a symbolic + // name that is accessible to the program. + AddressOfIndex uint32 `json:"address_of_index"` + + // The pointer to an array of TLS callback functions. The array is + // null-terminated, so if no callback function is supported, this field + // points to 4 bytes set to zero. + AddressOfCallBacks uint32 `json:"address_of_callbacks"` + + // The size in bytes of the template, beyond the initialized data delimited + // by the Raw Data Start VA and Raw Data End VA fields. The total template + // size should be the same as the total size of TLS data in the image file. + // The zero fill is the amount of data that comes after the initialized + // nonzero data. + SizeOfZeroFill uint32 `json:"size_of_zero_fill"` + + // The four bits [23:20] describe alignment info. Possible values are those + // defined as IMAGE_SCN_ALIGN_*, which are also used to describe alignment + // of section in object files. The other 28 bits are reserved for future use. + Characteristics TLSDirectoryCharacteristicsType `json:"characteristics"` +} + +// ImageTLSDirectory64 represents the IMAGE_TLS_DIRECTORY64 structure. +// It Points to the Thread Local Storage initialization section. +type ImageTLSDirectory64 struct { + // The starting address of the TLS template. The template is a block of data + // that is used to initialize TLS data. + StartAddressOfRawData uint64 `json:"start_address_of_raw_data"` + + // The address of the last byte of the TLS, except for the zero fill. As + // with the Raw Data Start VA field, this is a VA, not an RVA. + EndAddressOfRawData uint64 `json:"end_address_of_raw_data"` + + // The location to receive the TLS index, which the loader assigns. This + // location is in an ordinary data section, so it can be given a symbolic + // name that is accessible to the program. + AddressOfIndex uint64 `json:"address_of_index"` + + // The pointer to an array of TLS callback functions. The array is + // null-terminated, so if no callback function is supported, this field + // points to 4 bytes set to zero. + AddressOfCallBacks uint64 `json:"address_of_callbacks"` + + // The size in bytes of the template, beyond the initialized data delimited + // by the Raw Data Start VA and Raw Data End VA fields. The total template + // size should be the same as the total size of TLS data in the image file. + // The zero fill is the amount of data that comes after the initialized + // nonzero data. + SizeOfZeroFill uint32 `json:"size_of_zero_fill"` + + // The four bits [23:20] describe alignment info. Possible values are those + // defined as IMAGE_SCN_ALIGN_*, which are also used to describe alignment + // of section in object files. The other 28 bits are reserved for future use. + Characteristics TLSDirectoryCharacteristicsType `json:"characteristics"` +} + +// TLS provides direct PE and COFF support for static thread local storage (TLS). +// TLS is a special storage class that Windows supports in which a data object +// is not an automatic (stack) variable, yet is local to each individual thread +// that runs the code. Thus, each thread can maintain a different value for a +// variable declared by using TLS. +func (pe *File) parseTLSDirectory(rva, size uint32) error { + + tls := TLSDirectory{} + + if pe.Is64 { + tlsDir := ImageTLSDirectory64{} + tlsSize := uint32(binary.Size(tlsDir)) + fileOffset := pe.GetOffsetFromRva(rva) + err := pe.structUnpack(&tlsDir, fileOffset, tlsSize) + if err != nil { + return err + } + tls.Struct = tlsDir + + if tlsDir.AddressOfCallBacks != 0 { + var callbacks []uint64 + rvaAddressOfCallBacks := uint32(tlsDir.AddressOfCallBacks - + pe.NtHeader.OptionalHeader.(ImageOptionalHeader64).ImageBase) + offset := pe.GetOffsetFromRva(rvaAddressOfCallBacks) + for { + c, err := pe.ReadUint64(offset) + if err != nil || c == 0 { + break + } + callbacks = append(callbacks, c) + offset += 8 + } + tls.Callbacks = callbacks + } + } else { + tlsDir := ImageTLSDirectory32{} + tlsSize := uint32(binary.Size(tlsDir)) + fileOffset := pe.GetOffsetFromRva(rva) + err := pe.structUnpack(&tlsDir, fileOffset, tlsSize) + if err != nil { + return err + } + tls.Struct = tlsDir + + // 94a9dc17d47b03f6fb01cb639e25503b37761b452e7c07ec6b6c2280635f1df9 + // Callbacks may be empty. + if tlsDir.AddressOfCallBacks != 0 { + var callbacks []uint32 + rvaAddressOfCallBacks := tlsDir.AddressOfCallBacks - + pe.NtHeader.OptionalHeader.(ImageOptionalHeader32).ImageBase + offset := pe.GetOffsetFromRva(rvaAddressOfCallBacks) + for { + c, err := pe.ReadUint32(offset) + if err != nil || c == 0 { + break + } + callbacks = append(callbacks, c) + offset += 4 + } + tls.Callbacks = callbacks + } + } + + pe.TLS = tls + pe.HasTLS = true + return nil +} + +// String returns the string representations of the `Characteristics` field of +// TLS directory. +func (characteristics TLSDirectoryCharacteristicsType) String() string { + + m := map[TLSDirectoryCharacteristicsType]string{ + ImageSectionAlign1Bytes: "Align 1-Byte", + ImageSectionAlign2Bytes: "Align 2-Bytes", + ImageSectionAlign4Bytes: "Align 4-Bytes", + ImageSectionAlign8Bytes: "Align 8-Bytes", + ImageSectionAlign16Bytes: "Align 16-Bytes", + ImageSectionAlign32Bytes: "Align 32-Bytes", + ImageSectionAlign64Bytes: "Align 64-Bytes", + ImageSectionAlign128Bytes: "Align 128-Bytes", + ImageSectionAlign256Bytes: "Align 265-Bytes", + ImageSectionAlign512Bytes: "Align 512-Bytes", + ImageSectionAlign1024Bytes: "Align 1024-Bytes", + ImageSectionAlign2048Bytes: "Align 2048-Bytes", + ImageSectionAlign4096Bytes: "Align 4096-Bytes", + ImageSectionAlign8192Bytes: "Align 8192-Bytes", + } + + v, ok := m[characteristics] + if ok { + return v + } + + return "?" +} diff --git a/vendor/github.com/saferwall/pe/version.go b/vendor/github.com/saferwall/pe/version.go new file mode 100644 index 00000000..a21239e9 --- /dev/null +++ b/vendor/github.com/saferwall/pe/version.go @@ -0,0 +1,380 @@ +// Copyright 2018 Saferwall. All rights reserved. +// Use of this source code is governed by Apache v2 license +// license that can be found in the LICENSE file. + +package pe + +import ( + "bytes" + "encoding/binary" + "fmt" +) + +const ( + // VersionResourceType identifies the version resource type in the resource directory + VersionResourceType = 16 + + // VsVersionInfoString is the UTF16-encoded string that identifies the VS_VERSION_INFO block + VsVersionInfoString = "VS_VERSION_INFO" + + // VsFileInfoSignature is the file info signature + VsFileInfoSignature uint32 = 0xFEEF04BD + + // StringFileInfoString is the UTF16-encoded string that identifies the StringFileInfo block + StringFileInfoString = "StringFileInfo" + // VarFileInfoString is the UTF16-encoded string that identifies the VarFileInfoString block + VarFileInfoString = "VarFileInfo" + + // VsVersionInfoStringLength specifies the length of the VS_VERSION_INFO structure + VsVersionInfoStringLength uint32 = 6 + // StringFileInfoLength specifies length of the StringFileInfo structure + StringFileInfoLength uint32 = 6 + // StringTableLength specifies the length of the StringTable structure + StringTableLength uint32 = 6 + // StringLength specifies the length of the String structure + StringLength uint32 = 6 + // LangIDLength specifies the length of the language identifier string. + // It is represented as 8-digit hexadecimal number stored as a Unicode string. + LangIDLength uint32 = 8*2 + 1 +) + +// VsVersionInfo represents the organization of data in +// a file-version resource. It is the root structure that +// contains all other file-version information structures. +type VsVersionInfo struct { + // Length is the length, in bytes, of the VS_VERSIONINFO structure. + // This length does not include any padding that aligns any + // subsequent version resource data on a 32-bit boundary. + Length uint16 + // ValueLength is the length, in bytes, of arbitrary data associated + // with the VS_VERSIONINFO structure. + // This value is zero if there is no any data associated with the + // current version structure. + ValueLength uint16 + // Type represents as many zero words as necessary to align the StringFileInfo + // and VarFileInfo structures on a 32-bit boundary. These bytes are not included + // in ValueLength. + Type uint16 +} + +func (pe *File) parseVersionInfo(e ResourceDirectoryEntry) (*VsVersionInfo, error) { + offset := pe.GetOffsetFromRva(e.Data.Struct.OffsetToData) + b, err := pe.ReadBytesAtOffset(offset, e.Data.Struct.Size) + if err != nil { + return nil, err + } + var v VsVersionInfo + if err := binary.Read(bytes.NewBuffer(b), binary.LittleEndian, &v); err != nil { + return nil, err + } + b, err = pe.ReadBytesAtOffset(offset+VsVersionInfoStringLength, uint32(v.ValueLength)) + if err != nil { + return nil, err + } + vsVersionString, err := DecodeUTF16String(b) + if err != nil { + return nil, err + } + if vsVersionString != VsVersionInfoString { + return nil, fmt.Errorf("invalid VS_VERSION_INFO block. %s", vsVersionString) + } + return &v, nil +} + +// VsFixedFileInfo contains version information for a file. +// This information is language and code page independent. +type VsFixedFileInfo struct { + // Signature contains the value 0xFEEF04BD. This is used + // with the `key` member of the VS_VERSIONINFO structure + // when searching a file for the VS_FIXEDFILEINFO structure. + Signature uint32 + // StructVer is the binary version number of this structure. + // The high-order word of this member contains the major version + // number, and the low-order word contains the minor version number. + StructVer uint32 + // FileVersionMS denotes the most significant 32 bits of the file's + // binary version number. + FileVersionMS uint32 + // FileVersionLS denotes the least significant 32 bits of the file's + // binary version number. + FileVersionLS uint32 + // ProductVersionMS represents the most significant 32 bits of the + // binary version number of the product with which this file was distributed. + ProductVersionMS uint32 + // ProductVersionLS represents the most significant 32 bits of the + // binary version number of the product with which this file was distributed. + ProductVersionLS uint32 + // FileFlagMask contains a bitmask that specifies the valid bits in FileFlags. + // A bit is valid only if it was defined when the file was created. + FileFlagMask uint32 + // FileFlags contains a bitmask that specifies the Boolean attributes of the file. + // For example, the file contains debugging information or is compiled with debugging + // features enabled if FileFlags is equal to 0x00000001L (VS_FF_DEBUG). + FileFlags uint32 + // FileOS represents the operating system for which this file was designed. + FileOS uint32 + // FileType describes the general type of file. + FileType uint32 + // FileSubtype specifies the function of the file. The possible values depend on the value of FileType. + FileSubtype uint32 + // FileDateMS are the most significant 32 bits of the file's 64-bit binary creation date and time stamp. + FileDateMS uint32 + // FileDateLS are the least significant 32 bits of the file's 64-bit binary creation date and time stamp. + FileDateLS uint32 +} + +// Size returns the size of this structure in bytes. +func (f *VsFixedFileInfo) Size() uint32 { return uint32(binary.Size(f)) } + +func (f *VsFixedFileInfo) GetStringFileInfoOffset(e ResourceDirectoryEntry) uint32 { + return alignDword(VsVersionInfoStringLength+uint32(2*len(VsVersionInfoString)+1)+f.Size(), e.Data.Struct.OffsetToData) +} + +func (f *VsFixedFileInfo) GetOffset(e ResourceDirectoryEntry, pe *File) uint32 { + offset := pe.GetOffsetFromRva(e.Data.Struct.OffsetToData) + VsVersionInfoStringLength + offset += uint32(2*len(VsVersionInfoString)) + 1 + return alignDword(offset, e.Data.Struct.OffsetToData) +} + +func (pe *File) parseFixedFileInfo(e ResourceDirectoryEntry) (*VsFixedFileInfo, error) { + var f VsFixedFileInfo + offset := f.GetOffset(e, pe) + b, err := pe.ReadBytesAtOffset(offset, f.Size()) + if err != nil { + return nil, err + } + if err := binary.Read(bytes.NewBuffer(b), binary.LittleEndian, &f); err != nil { + return nil, err + } + if f.Signature != VsFileInfoSignature { + return nil, fmt.Errorf("invalid file info signature %d", f.Signature) + } + return &f, nil +} + +// StringFileInfo represents the organization of data in a +// file-version resource. It contains version information +// that can be displayed for a particular language and code page. +type StringFileInfo struct { + Length uint16 + ValueLength uint16 + Type uint16 +} + +func (s *StringFileInfo) GetStringTableOffset(offset uint32) uint32 { + return offset + StringFileInfoLength + uint32(2*len(StringFileInfoString)) + 1 +} + +func (s *StringFileInfo) GetOffset(rva uint32, e ResourceDirectoryEntry, pe *File) uint32 { + offset := pe.GetOffsetFromRva(e.Data.Struct.OffsetToData) + rva + return alignDword(offset, e.Data.Struct.OffsetToData) +} + +func (pe *File) parseStringFileInfo(rva uint32, e ResourceDirectoryEntry) (*StringFileInfo, string, error) { + var s StringFileInfo + offset := s.GetOffset(rva, e, pe) + b, err := pe.ReadBytesAtOffset(offset, StringFileInfoLength) + if err != nil { + return nil, "", err + } + if err := binary.Read(bytes.NewBuffer(b), binary.LittleEndian, &s); err != nil { + return nil, "", err + } + b, err = pe.ReadBytesAtOffset(offset+StringFileInfoLength, uint32(len(StringFileInfoString)*2)+1) + if err != nil { + return nil, "", err + } + str, err := DecodeUTF16String(b) + return &s, str, err +} + +// StringTable represents the organization of data in a +// file-version resource. It contains language and code +// page formatting information for the version strings +type StringTable struct { + Length uint16 + ValueLength uint16 + Type uint16 +} + +func (s *StringTable) GetStringOffset(offset uint32, e ResourceDirectoryEntry) uint32 { + return alignDword(offset+StringTableLength+LangIDLength, e.Data.Struct.OffsetToData) +} + +func (s *StringTable) GetOffset(rva uint32, e ResourceDirectoryEntry, pe *File) uint32 { + offset := pe.GetOffsetFromRva(e.Data.Struct.OffsetToData) + rva + return alignDword(offset, e.Data.Struct.OffsetToData) +} + +func (pe *File) parseStringTable(rva uint32, e ResourceDirectoryEntry) (*StringTable, error) { + var s StringTable + offset := s.GetOffset(rva, e, pe) + b, err := pe.ReadBytesAtOffset(offset, StringTableLength) + if err != nil { + return nil, err + } + if err := binary.Read(bytes.NewBuffer(b), binary.LittleEndian, &s); err != nil { + return nil, err + } + // Read the 8-digit hexadecimal number stored as a Unicode string. + // The four most significant digits represent the language identifier. + // The four least significant digits represent the code page for which + // the data is formatted. + b, err = pe.ReadBytesAtOffset(offset+StringTableLength, (8*2)+1) + if err != nil { + return nil, err + } + langID, err := DecodeUTF16String(b) + if err != nil { + return nil, err + } + if len(langID) != int(LangIDLength/2) { + return nil, fmt.Errorf("invalid language identifier length. Expected: %d, Got: %d", + LangIDLength/2, + len(langID)) + } + return &s, nil +} + +// String Represents the organization of data in a +// file-version resource. It contains a string that +// describes a specific aspect of a file, for example, +// a file's version, its copyright notices, or its trademarks. +type String struct { + Length uint16 + ValueLength uint16 + Type uint16 +} + +func (s *String) GetOffset(rva uint32, e ResourceDirectoryEntry, pe *File) uint32 { + offset := pe.GetOffsetFromRva(e.Data.Struct.OffsetToData) + rva + return alignDword(offset, e.Data.Struct.OffsetToData) +} + +// variant of GetOffset which also returns the number of bytes which were added +// to achieve 32-bit alignment. The padding value needs to be added to the +// string length to figure out the offset of the next string +func (s *String) getOffsetAndPadding(rva uint32, e ResourceDirectoryEntry, pe *File) (uint32, uint16) { + unalignedOffset := pe.GetOffsetFromRva(e.Data.Struct.OffsetToData) + rva + alignedOffset := alignDword(unalignedOffset, e.Data.Struct.OffsetToData) + return alignedOffset, uint16(alignedOffset - unalignedOffset) +} + +func (pe *File) parseString(rva uint32, e ResourceDirectoryEntry) (string, string, uint16, error) { + var s String + offset, padding := s.getOffsetAndPadding(rva, e, pe) + b, err := pe.ReadBytesAtOffset(offset, StringLength) + if err != nil { + return "", "", 0, err + } + if err := binary.Read(bytes.NewBuffer(b), binary.LittleEndian, &s); err != nil { + return "", "", 0, err + } + const maxKeySize = 100 + b, err = pe.ReadBytesAtOffset(offset+StringLength, maxKeySize) + if err != nil { + return "", "", 0, err + } + key, err := DecodeUTF16String(b) + if err != nil { + return "", "", 0, err + } + valueOffset := alignDword(uint32(2*(len(key)+1))+offset+StringLength, e.Data.Struct.OffsetToData) + b, err = pe.ReadBytesAtOffset(valueOffset, uint32(s.Length)) + if err != nil { + return "", "", 0, err + } + value, err := DecodeUTF16String(b) + if err != nil { + return "", "", 0, err + } + // The caller of this function uses the string length as an offset to find + // the next string in the file. We need add the alignment padding here + // since the caller is unaware of the byte alignment, and will add the + // string length to the unaligned offset to get the address of the next + // string. + totalLength := s.Length + padding + return key, value, totalLength, nil +} + +// ParseVersionResources parses file version strings from the version resource +// directory. This directory contains several structures starting with VS_VERSION_INFO +// with references to children StringFileInfo structures. In addition, StringFileInfo +// contains the StringTable structure with String entries describing the name and value +// of each file version strings. +func (pe *File) ParseVersionResources() (map[string]string, error) { + vers := make(map[string]string) + if pe.opts.OmitResourceDirectory { + return vers, nil + } + for _, e := range pe.Resources.Entries { + if e.ID != VersionResourceType { + continue + } + + directory := e.Directory.Entries[0].Directory + + for _, e := range directory.Entries { + ver, err := pe.parseVersionInfo(e) + if err != nil { + return vers, err + } + ff, err := pe.parseFixedFileInfo(e) + if err != nil { + return vers, err + } + + offset := ff.GetStringFileInfoOffset(e) + + for { + f, n, err := pe.parseStringFileInfo(offset, e) + if err != nil || f.Length == 0 { + break + } + + switch n { + case StringFileInfoString: + tableOffset := f.GetStringTableOffset(offset) + for { + table, err := pe.parseStringTable(tableOffset, e) + if err != nil { + break + } + stringOffset := table.GetStringOffset(tableOffset, e) + for stringOffset < tableOffset+uint32(table.Length) { + k, v, l, err := pe.parseString(stringOffset, e) + if err != nil { + break + } + vers[k] = v + if l == 0 { + stringOffset = tableOffset + uint32(table.Length) + } else { + stringOffset = stringOffset + uint32(l) + } + } + // handle potential infinite loops + if uint32(table.Length)+tableOffset > tableOffset { + break + } + if tableOffset > uint32(f.Length) { + break + } + } + case VarFileInfoString: + break + default: + break + } + + offset += uint32(f.Length) + + // StringFileInfo/VarFileinfo structs consumed? + if offset >= uint32(ver.Length) { + break + } + } + } + } + return vers, nil +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go index fb1d5918..abc860a4 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go @@ -6,8 +6,8 @@ import ( "errors" "fmt" "reflect" - "regexp" "sort" + "strings" ) /* @@ -18,8 +18,12 @@ escaping backslashes ("\") and double quotes (") and wrapping the resulting string in double quotes ("). */ func encodeCanonicalString(s string) string { - re := regexp.MustCompile(`([\"\\])`) - return fmt.Sprintf("\"%s\"", re.ReplaceAllString(s, "\\$1")) + // Escape backslashes + s = strings.ReplaceAll(s, "\\", "\\\\") + // Escape double quotes + s = strings.ReplaceAll(s, "\"", "\\\"") + // Wrap with double quotes + return fmt.Sprintf("\"%s\"", s) } /* @@ -28,16 +32,7 @@ object according to the OLPC canonical JSON specification (see http://wiki.laptop.org/go/Canonical_JSON) and write it to the passed *bytes.Buffer. If canonicalization fails it returns an error. */ -func encodeCanonical(obj interface{}, result *bytes.Buffer) (err error) { - // Since this function is called recursively, we use panic if an error occurs - // and recover in a deferred function, which is always called before - // returning. There we set the error that is returned eventually. - defer func() { - if r := recover(); r != nil { - err = errors.New(r.(string)) - } - }() - +func encodeCanonical(obj interface{}, result *strings.Builder) (err error) { switch objAsserted := obj.(type) { case string: result.WriteString(encodeCanonicalString(objAsserted)) @@ -90,10 +85,9 @@ func encodeCanonical(obj interface{}, result *bytes.Buffer) (err error) { // Canonicalize map for i, key := range mapKeys { - // Note: `key` must be a `string` (see `case map[string]interface{}`) and - // canonicalization of strings cannot err out (see `case string`), thus - // no error handling is needed here. - encodeCanonical(key, result) + if err := encodeCanonical(key, result); err != nil { + return err + } result.WriteString(":") if err := encodeCanonical(objAsserted[key], result); err != nil { @@ -120,7 +114,16 @@ slice. It uses the OLPC canonical JSON specification (see http://wiki.laptop.org/go/Canonical_JSON). If canonicalization fails the byte slice is nil and the second return value contains the error. */ -func EncodeCanonical(obj interface{}) ([]byte, error) { +func EncodeCanonical(obj interface{}) (out []byte, err error) { + // We use panic if an error occurs and recover in a deferred function, + // which is always called before returning. + // There we set the error that is returned eventually. + defer func() { + if r := recover(); r != nil { + err = errors.New(r.(string)) + } + }() + // FIXME: Terrible hack to turn the passed struct into a map, converting // the struct's variable names to the json key names defined in the struct data, err := json.Marshal(obj) @@ -136,10 +139,13 @@ func EncodeCanonical(obj interface{}) ([]byte, error) { } // Create a buffer and write the canonicalized JSON bytes to it - var result bytes.Buffer + var result strings.Builder + // Allocate output result buffer with the input size. + result.Grow(len(data)) + // Recursively encode the jsonmap if err := encodeCanonical(jsonMap, &result); err != nil { return nil, err } - return result.Bytes(), nil + return []byte(result.String()), nil } diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/envelope.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/envelope.go new file mode 100644 index 00000000..ed223e90 --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/envelope.go @@ -0,0 +1,64 @@ +package dsse + +import ( + "encoding/base64" + "fmt" +) + +/* +Envelope captures an envelope as described by the DSSE specification. See here: +https://github.com/secure-systems-lab/dsse/blob/master/envelope.md +*/ +type Envelope struct { + PayloadType string `json:"payloadType"` + Payload string `json:"payload"` + Signatures []Signature `json:"signatures"` +} + +/* +DecodeB64Payload returns the serialized body, decoded from the envelope's +payload field. A flexible decoder is used, first trying standard base64, then +URL-encoded base64. +*/ +func (e *Envelope) DecodeB64Payload() ([]byte, error) { + return b64Decode(e.Payload) +} + +/* +Signature represents a generic in-toto signature that contains the identifier +of the key which was used to create the signature. +The used signature scheme has to be agreed upon by the signer and verifer +out of band. +The signature is a base64 encoding of the raw bytes from the signature +algorithm. +*/ +type Signature struct { + KeyID string `json:"keyid"` + Sig string `json:"sig"` +} + +/* +PAE implementes the DSSE Pre-Authentic Encoding +https://github.com/secure-systems-lab/dsse/blob/master/protocol.md#signature-definition +*/ +func PAE(payloadType string, payload []byte) []byte { + return []byte(fmt.Sprintf("DSSEv1 %d %s %d %s", + len(payloadType), payloadType, + len(payload), payload)) +} + +/* +Both standard and url encoding are allowed: +https://github.com/secure-systems-lab/dsse/blob/master/envelope.md +*/ +func b64Decode(s string) ([]byte, error) { + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b, err = base64.URLEncoding.DecodeString(s) + if err != nil { + return nil, fmt.Errorf("unable to base64 decode payload (is payload in the right format?)") + } + } + + return b, nil +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go index 3dc05a42..85aed102 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go @@ -5,111 +5,35 @@ https://github.com/secure-systems-lab/dsse package dsse import ( + "context" "encoding/base64" "errors" - "fmt" ) -// ErrUnknownKey indicates that the implementation does not recognize the -// key. -var ErrUnknownKey = errors.New("unknown key") - -// ErrNoSignature indicates that an envelope did not contain any signatures. -var ErrNoSignature = errors.New("no signature found") - // ErrNoSigners indicates that no signer was provided. var ErrNoSigners = errors.New("no signers provided") -/* -Envelope captures an envelope as described by the Secure Systems Lab -Signing Specification. See here: -https://github.com/secure-systems-lab/signing-spec/blob/master/envelope.md -*/ -type Envelope struct { - PayloadType string `json:"payloadType"` - Payload string `json:"payload"` - Signatures []Signature `json:"signatures"` -} - -/* -DecodeB64Payload returns the serialized body, decoded -from the envelope's payload field. A flexible -decoder is used, first trying standard base64, then -URL-encoded base64. -*/ -func (e *Envelope) DecodeB64Payload() ([]byte, error) { - return b64Decode(e.Payload) -} - -/* -Signature represents a generic in-toto signature that contains the identifier -of the key which was used to create the signature. -The used signature scheme has to be agreed upon by the signer and verifer -out of band. -The signature is a base64 encoding of the raw bytes from the signature -algorithm. -*/ -type Signature struct { - KeyID string `json:"keyid"` - Sig string `json:"sig"` -} - -/* -PAE implementes the DSSE Pre-Authentic Encoding -https://github.com/secure-systems-lab/dsse/blob/master/protocol.md#signature-definition -*/ -func PAE(payloadType string, payload []byte) []byte { - return []byte(fmt.Sprintf("DSSEv1 %d %s %d %s", - len(payloadType), payloadType, - len(payload), payload)) -} - -/* -Signer defines the interface for an abstract signing algorithm. -The Signer interface is used to inject signature algorithm implementations -into the EnevelopeSigner. This decoupling allows for any signing algorithm -and key management system can be used. -The full message is provided as the parameter. If the signature algorithm -depends on hashing of the message prior to signature calculation, the -implementor of this interface must perform such hashing. -The function must return raw bytes representing the calculated signature -using the current algorithm, and the key used (if applicable). -For an example see EcdsaSigner in sign_test.go. -*/ -type Signer interface { - Sign(data []byte) ([]byte, error) - KeyID() (string, error) -} - -// SignVerifer provides both the signing and verification interface. -type SignVerifier interface { - Signer - Verifier -} - // EnvelopeSigner creates signed Envelopes. type EnvelopeSigner struct { - providers []SignVerifier - ev *EnvelopeVerifier + providers []SignerVerifier } /* -NewEnvelopeSigner creates an EnvelopeSigner that uses 1+ Signer -algorithms to sign the data. -Creates a verifier with threshold=1, at least one of the providers must validate signitures successfully. +NewEnvelopeSigner creates an EnvelopeSigner that uses 1+ Signer algorithms to +sign the data. Creates a verifier with threshold=1, at least one of the +providers must validate signatures successfully. */ -func NewEnvelopeSigner(p ...SignVerifier) (*EnvelopeSigner, error) { +func NewEnvelopeSigner(p ...SignerVerifier) (*EnvelopeSigner, error) { return NewMultiEnvelopeSigner(1, p...) } /* NewMultiEnvelopeSigner creates an EnvelopeSigner that uses 1+ Signer -algorithms to sign the data. -Creates a verifier with threshold. -threashold indicates the amount of providers that must validate the envelope. +algorithms to sign the data. Creates a verifier with threshold. Threshold +indicates the amount of providers that must validate the envelope. */ -func NewMultiEnvelopeSigner(threshold int, p ...SignVerifier) (*EnvelopeSigner, error) { - var providers []SignVerifier +func NewMultiEnvelopeSigner(threshold int, p ...SignerVerifier) (*EnvelopeSigner, error) { + var providers []SignerVerifier for _, sv := range p { if sv != nil { @@ -121,19 +45,8 @@ func NewMultiEnvelopeSigner(threshold int, p ...SignVerifier) (*EnvelopeSigner, return nil, ErrNoSigners } - evps := []Verifier{} - for _, p := range providers { - evps = append(evps, p.(Verifier)) - } - - ev, err := NewMultiEnvelopeVerifier(threshold, evps...) - if err != nil { - return nil, err - } - return &EnvelopeSigner{ providers: providers, - ev: ev, }, nil } @@ -143,7 +56,7 @@ Returned is an envelope as defined here: https://github.com/secure-systems-lab/dsse/blob/master/envelope.md One signature will be added for each Signer in the EnvelopeSigner. */ -func (es *EnvelopeSigner) SignPayload(payloadType string, body []byte) (*Envelope, error) { +func (es *EnvelopeSigner) SignPayload(ctx context.Context, payloadType string, body []byte) (*Envelope, error) { var e = Envelope{ Payload: base64.StdEncoding.EncodeToString(body), PayloadType: payloadType, @@ -152,7 +65,7 @@ func (es *EnvelopeSigner) SignPayload(payloadType string, body []byte) (*Envelop paeEnc := PAE(payloadType, body) for _, signer := range es.providers { - sig, err := signer.Sign(paeEnc) + sig, err := signer.Sign(ctx, paeEnc) if err != nil { return nil, err } @@ -169,29 +82,3 @@ func (es *EnvelopeSigner) SignPayload(payloadType string, body []byte) (*Envelop return &e, nil } - -/* -Verify decodes the payload and verifies the signature. -Any domain specific validation such as parsing the decoded body and -validating the payload type is left out to the caller. -Verify returns a list of accepted keys each including a keyid, public and signiture of the accepted provider keys. -*/ -func (es *EnvelopeSigner) Verify(e *Envelope) ([]AcceptedKey, error) { - return es.ev.Verify(e) -} - -/* -Both standard and url encoding are allowed: -https://github.com/secure-systems-lab/dsse/blob/master/envelope.md -*/ -func b64Decode(s string) ([]byte, error) { - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b, err = base64.URLEncoding.DecodeString(s) - if err != nil { - return nil, err - } - } - - return b, nil -} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/signerverifier.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/signerverifier.go new file mode 100644 index 00000000..99d03c7d --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/signerverifier.go @@ -0,0 +1,43 @@ +package dsse + +import ( + "context" + "crypto" +) + +/* +Signer defines the interface for an abstract signing algorithm. The Signer +interface is used to inject signature algorithm implementations into the +EnvelopeSigner. This decoupling allows for any signing algorithm and key +management system can be used. The full message is provided as the parameter. +If the signature algorithm depends on hashing of the message prior to signature +calculation, the implementor of this interface must perform such hashing. The +function must return raw bytes representing the calculated signature using the +current algorithm, and the key used (if applicable). +*/ +type Signer interface { + Sign(ctx context.Context, data []byte) ([]byte, error) + KeyID() (string, error) +} + +/* +Verifier verifies a complete message against a signature and key. If the message +was hashed prior to signature generation, the verifier must perform the same +steps. If KeyID returns successfully, only signature matching the key ID will be +verified. +*/ +type Verifier interface { + Verify(ctx context.Context, data, sig []byte) error + KeyID() (string, error) + Public() crypto.PublicKey +} + +// SignerVerifier provides both the signing and verification interface. +type SignerVerifier interface { + Signer + Verifier +} + +// Deprecated: switch to renamed SignerVerifier. This is currently aliased for +// backwards compatibility. +type SignVerifier = SignerVerifier diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go index ead1c32c..a36146b8 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go @@ -1,6 +1,7 @@ package dsse import ( + "context" "crypto" "errors" "fmt" @@ -8,17 +9,8 @@ import ( "golang.org/x/crypto/ssh" ) -/* -Verifier verifies a complete message against a signature and key. -If the message was hashed prior to signature generation, the verifier -must perform the same steps. -If KeyID returns successfully, only signature matching the key ID will be verified. -*/ -type Verifier interface { - Verify(data, sig []byte) error - KeyID() (string, error) - Public() crypto.PublicKey -} +// ErrNoSignature indicates that an envelope did not contain any signatures. +var ErrNoSignature = errors.New("no signature found") type EnvelopeVerifier struct { providers []Verifier @@ -31,7 +23,7 @@ type AcceptedKey struct { Sig Signature } -func (ev *EnvelopeVerifier) Verify(e *Envelope) ([]AcceptedKey, error) { +func (ev *EnvelopeVerifier) Verify(ctx context.Context, e *Envelope) ([]AcceptedKey, error) { if e == nil { return nil, errors.New("cannot verify a nil envelope") } @@ -78,7 +70,7 @@ func (ev *EnvelopeVerifier) Verify(e *Envelope) ([]AcceptedKey, error) { continue } - err = v.Verify(paeEnc, sig) + err = v.Verify(ctx, paeEnc, sig) if err != nil { continue } @@ -104,11 +96,11 @@ func (ev *EnvelopeVerifier) Verify(e *Envelope) ([]AcceptedKey, error) { // Sanity if with some reflect magic this happens. if ev.threshold <= 0 || ev.threshold > len(ev.providers) { - return nil, errors.New("Invalid threshold") + return nil, errors.New("invalid threshold") } if len(usedKeyids) < ev.threshold { - return acceptedKeys, errors.New(fmt.Sprintf("Accepted signatures do not match threshold, Found: %d, Expected %d", len(acceptedKeys), ev.threshold)) + return acceptedKeys, fmt.Errorf("accepted signatures do not match threshold, Found: %d, Expected %d", len(acceptedKeys), ev.threshold) } return acceptedKeys, nil @@ -119,15 +111,15 @@ func NewEnvelopeVerifier(v ...Verifier) (*EnvelopeVerifier, error) { } func NewMultiEnvelopeVerifier(threshold int, p ...Verifier) (*EnvelopeVerifier, error) { - if threshold <= 0 || threshold > len(p) { - return nil, errors.New("Invalid threshold") + return nil, errors.New("invalid threshold") } ev := EnvelopeVerifier{ providers: p, threshold: threshold, } + return &ev, nil } diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go new file mode 100644 index 00000000..578d6a54 --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go @@ -0,0 +1,111 @@ +package signerverifier + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/sha256" + "crypto/sha512" + "fmt" + "os" +) + +const ECDSAKeyType = "ecdsa" + +// ECDSASignerVerifier is a dsse.SignerVerifier compliant interface to sign and +// verify signatures using ECDSA keys. +type ECDSASignerVerifier struct { + keyID string + curveSize int + private *ecdsa.PrivateKey + public *ecdsa.PublicKey +} + +// NewECDSASignerVerifierFromSSLibKey creates an ECDSASignerVerifier from an +// SSLibKey. +func NewECDSASignerVerifierFromSSLibKey(key *SSLibKey) (*ECDSASignerVerifier, error) { + if len(key.KeyVal.Public) == 0 { + return nil, ErrInvalidKey + } + + _, publicParsedKey, err := decodeAndParsePEM([]byte(key.KeyVal.Public)) + if err != nil { + return nil, fmt.Errorf("unable to create ECDSA signerverifier: %w", err) + } + + sv := &ECDSASignerVerifier{ + keyID: key.KeyID, + curveSize: publicParsedKey.(*ecdsa.PublicKey).Params().BitSize, + public: publicParsedKey.(*ecdsa.PublicKey), + private: nil, + } + + if len(key.KeyVal.Private) > 0 { + _, privateParsedKey, err := decodeAndParsePEM([]byte(key.KeyVal.Private)) + if err != nil { + return nil, fmt.Errorf("unable to create ECDSA signerverifier: %w", err) + } + + sv.private = privateParsedKey.(*ecdsa.PrivateKey) + } + + return sv, nil +} + +// Sign creates a signature for `data`. +func (sv *ECDSASignerVerifier) Sign(ctx context.Context, data []byte) ([]byte, error) { + if sv.private == nil { + return nil, ErrNotPrivateKey + } + + hashedData := getECDSAHashedData(data, sv.curveSize) + + return ecdsa.SignASN1(rand.Reader, sv.private, hashedData) +} + +// Verify verifies the `sig` value passed in against `data`. +func (sv *ECDSASignerVerifier) Verify(ctx context.Context, data []byte, sig []byte) error { + hashedData := getECDSAHashedData(data, sv.curveSize) + + if ok := ecdsa.VerifyASN1(sv.public, hashedData, sig); !ok { + return ErrSignatureVerificationFailed + } + + return nil +} + +// KeyID returns the identifier of the key used to create the +// ECDSASignerVerifier instance. +func (sv *ECDSASignerVerifier) KeyID() (string, error) { + return sv.keyID, nil +} + +// Public returns the public portion of the key used to create the +// ECDSASignerVerifier instance. +func (sv *ECDSASignerVerifier) Public() crypto.PublicKey { + return sv.public +} + +// LoadECDSAKeyFromFile returns an SSLibKey instance for an ECDSA key stored in +// a file in the custom securesystemslib format. +func LoadECDSAKeyFromFile(path string) (*SSLibKey, error) { + contents, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("unable to load ECDSA key from file: %w", err) + } + + return loadKeyFromSSLibBytes(contents) +} + +func getECDSAHashedData(data []byte, curveSize int) []byte { + switch { + case curveSize <= 256: + return hashBeforeSigning(data, sha256.New()) + case 256 < curveSize && curveSize <= 384: + return hashBeforeSigning(data, sha512.New384()) + case curveSize > 384: + return hashBeforeSigning(data, sha512.New()) + } + return []byte{} +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go new file mode 100644 index 00000000..c71d313a --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go @@ -0,0 +1,98 @@ +package signerverifier + +import ( + "context" + "crypto" + "crypto/ed25519" + "encoding/hex" + "fmt" + "os" +) + +const ED25519KeyType = "ed25519" + +// ED25519SignerVerifier is a dsse.SignerVerifier compliant interface to sign +// and verify signatures using ED25519 keys. +type ED25519SignerVerifier struct { + keyID string + private ed25519.PrivateKey + public ed25519.PublicKey +} + +// NewED25519SignerVerifierFromSSLibKey creates an Ed25519SignerVerifier from an +// SSLibKey. +func NewED25519SignerVerifierFromSSLibKey(key *SSLibKey) (*ED25519SignerVerifier, error) { + if len(key.KeyVal.Public) == 0 { + return nil, ErrInvalidKey + } + + public, err := hex.DecodeString(key.KeyVal.Public) + if err != nil { + return nil, fmt.Errorf("unable to create ED25519 signerverifier: %w", err) + } + + var private []byte + if len(key.KeyVal.Private) > 0 { + private, err = hex.DecodeString(key.KeyVal.Private) + if err != nil { + return nil, fmt.Errorf("unable to create ED25519 signerverifier: %w", err) + } + + // python-securesystemslib provides an interface to generate ed25519 + // keys but it differs slightly in how it serializes the key to disk. + // Specifically, the keyval.private field includes _only_ the private + // portion of the key while libraries such as crypto/ed25519 also expect + // the public portion. So, if the private portion is half of what we + // expect, we append the public portion as well. + if len(private) == ed25519.PrivateKeySize/2 { + private = append(private, public...) + } + } + + return &ED25519SignerVerifier{ + keyID: key.KeyID, + public: ed25519.PublicKey(public), + private: ed25519.PrivateKey(private), + }, nil +} + +// Sign creates a signature for `data`. +func (sv *ED25519SignerVerifier) Sign(ctx context.Context, data []byte) ([]byte, error) { + if len(sv.private) == 0 { + return nil, ErrNotPrivateKey + } + + signature := ed25519.Sign(sv.private, data) + return signature, nil +} + +// Verify verifies the `sig` value passed in against `data`. +func (sv *ED25519SignerVerifier) Verify(ctx context.Context, data []byte, sig []byte) error { + if ok := ed25519.Verify(sv.public, data, sig); ok { + return nil + } + return ErrSignatureVerificationFailed +} + +// KeyID returns the identifier of the key used to create the +// ED25519SignerVerifier instance. +func (sv *ED25519SignerVerifier) KeyID() (string, error) { + return sv.keyID, nil +} + +// Public returns the public portion of the key used to create the +// ED25519SignerVerifier instance. +func (sv *ED25519SignerVerifier) Public() crypto.PublicKey { + return sv.public +} + +// LoadED25519KeyFromFile returns an SSLibKey instance for an ED25519 key stored +// in a file in the custom securesystemslib format. +func LoadED25519KeyFromFile(path string) (*SSLibKey, error) { + contents, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("unable to load ED25519 key from file: %w", err) + } + + return loadKeyFromSSLibBytes(contents) +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go new file mode 100644 index 00000000..3612f28a --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go @@ -0,0 +1,141 @@ +package signerverifier + +import ( + "context" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "fmt" + "os" + "strings" +) + +const ( + RSAKeyType = "rsa" + RSAKeyScheme = "rsassa-pss-sha256" + RSAPrivateKeyPEM = "RSA PRIVATE KEY" +) + +// RSAPSSSignerVerifier is a dsse.SignerVerifier compliant interface to sign and +// verify signatures using RSA keys following the RSA-PSS scheme. +type RSAPSSSignerVerifier struct { + keyID string + private *rsa.PrivateKey + public *rsa.PublicKey +} + +// NewRSAPSSSignerVerifierFromSSLibKey creates an RSAPSSSignerVerifier from an +// SSLibKey. +func NewRSAPSSSignerVerifierFromSSLibKey(key *SSLibKey) (*RSAPSSSignerVerifier, error) { + if len(key.KeyVal.Public) == 0 { + return nil, ErrInvalidKey + } + + _, publicParsedKey, err := decodeAndParsePEM([]byte(key.KeyVal.Public)) + if err != nil { + return nil, fmt.Errorf("unable to create RSA-PSS signerverifier: %w", err) + } + + if len(key.KeyVal.Private) > 0 { + _, privateParsedKey, err := decodeAndParsePEM([]byte(key.KeyVal.Private)) + if err != nil { + return nil, fmt.Errorf("unable to create RSA-PSS signerverifier: %w", err) + } + + return &RSAPSSSignerVerifier{ + keyID: key.KeyID, + public: publicParsedKey.(*rsa.PublicKey), + private: privateParsedKey.(*rsa.PrivateKey), + }, nil + } + + return &RSAPSSSignerVerifier{ + keyID: key.KeyID, + public: publicParsedKey.(*rsa.PublicKey), + private: nil, + }, nil +} + +// Sign creates a signature for `data`. +func (sv *RSAPSSSignerVerifier) Sign(ctx context.Context, data []byte) ([]byte, error) { + if sv.private == nil { + return nil, ErrNotPrivateKey + } + + hashedData := hashBeforeSigning(data, sha256.New()) + + return rsa.SignPSS(rand.Reader, sv.private, crypto.SHA256, hashedData, &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}) +} + +// Verify verifies the `sig` value passed in against `data`. +func (sv *RSAPSSSignerVerifier) Verify(ctx context.Context, data []byte, sig []byte) error { + hashedData := hashBeforeSigning(data, sha256.New()) + + if err := rsa.VerifyPSS(sv.public, crypto.SHA256, hashedData, sig, &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}); err != nil { + return ErrSignatureVerificationFailed + } + + return nil +} + +// KeyID returns the identifier of the key used to create the +// RSAPSSSignerVerifier instance. +func (sv *RSAPSSSignerVerifier) KeyID() (string, error) { + return sv.keyID, nil +} + +// Public returns the public portion of the key used to create the +// RSAPSSSignerVerifier instance. +func (sv *RSAPSSSignerVerifier) Public() crypto.PublicKey { + return sv.public +} + +// LoadRSAPSSKeyFromFile returns an SSLibKey instance for an RSA key stored in a +// file. +func LoadRSAPSSKeyFromFile(path string) (*SSLibKey, error) { + contents, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("unable to load RSA key from file: %w", err) + } + + pemData, keyObj, err := decodeAndParsePEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to load RSA key from file: %w", err) + } + + key := &SSLibKey{ + KeyType: RSAKeyType, + Scheme: RSAKeyScheme, + KeyIDHashAlgorithms: KeyIDHashAlgorithms, + KeyVal: KeyVal{}, + } + + switch k := keyObj.(type) { + case *rsa.PublicKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(k) + if err != nil { + return nil, fmt.Errorf("unable to load RSA key from file: %w", err) + } + key.KeyVal.Public = strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, PublicKeyPEM))) + + case *rsa.PrivateKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(k.Public()) + if err != nil { + return nil, fmt.Errorf("unable to load RSA key from file: %w", err) + } + key.KeyVal.Public = strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, PublicKeyPEM))) + key.KeyVal.Private = strings.TrimSpace(string(generatePEMBlock(pemData.Bytes, RSAPrivateKeyPEM))) + } + + if len(key.KeyID) == 0 { + keyID, err := calculateKeyID(key) + if err != nil { + return nil, fmt.Errorf("unable to load RSA key from file: %w", err) + } + key.KeyID = keyID + } + + return key, nil +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go new file mode 100644 index 00000000..5f510f7b --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go @@ -0,0 +1,34 @@ +package signerverifier + +import ( + "errors" +) + +var KeyIDHashAlgorithms = []string{"sha256", "sha512"} + +var ( + ErrNotPrivateKey = errors.New("loaded key is not a private key") + ErrSignatureVerificationFailed = errors.New("failed to verify signature") + ErrUnknownKeyType = errors.New("unknown key type") + ErrInvalidThreshold = errors.New("threshold is either less than 1 or greater than number of provided public keys") + ErrInvalidKey = errors.New("key object has no value") +) + +const ( + PublicKeyPEM = "PUBLIC KEY" + PrivateKeyPEM = "PRIVATE KEY" +) + +type SSLibKey struct { + KeyIDHashAlgorithms []string `json:"keyid_hash_algorithms"` + KeyType string `json:"keytype"` + KeyVal KeyVal `json:"keyval"` + Scheme string `json:"scheme"` + KeyID string `json:"keyid"` +} + +type KeyVal struct { + Private string `json:"private,omitempty"` + Public string `json:"public"` + Certificate string `json:"certificate,omitempty"` +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go new file mode 100644 index 00000000..73aaa77d --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go @@ -0,0 +1,150 @@ +package signerverifier + +import ( + "crypto/sha256" + "crypto/x509" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "hash" + "testing" + + "github.com/secure-systems-lab/go-securesystemslib/cjson" +) + +/* +Credits: Parts of this file were originally authored for in-toto-golang. +*/ + +var ( + // ErrNoPEMBlock gets triggered when there is no PEM block in the provided file + ErrNoPEMBlock = errors.New("failed to decode the data as PEM block (are you sure this is a pem file?)") + // ErrFailedPEMParsing gets returned when PKCS1, PKCS8 or PKIX key parsing fails + ErrFailedPEMParsing = errors.New("failed parsing the PEM block: unsupported PEM type") +) + +// loadKeyFromSSLibBytes returns a pointer to a Key instance created from the +// contents of the bytes. The key contents are expected to be in the custom +// securesystemslib format. +func loadKeyFromSSLibBytes(contents []byte) (*SSLibKey, error) { + var key *SSLibKey + if err := json.Unmarshal(contents, &key); err != nil { + return nil, err + } + + if len(key.KeyID) == 0 { + keyID, err := calculateKeyID(key) + if err != nil { + return nil, err + } + key.KeyID = keyID + } + + return key, nil +} + +func calculateKeyID(k *SSLibKey) (string, error) { + key := map[string]any{ + "keytype": k.KeyType, + "scheme": k.Scheme, + "keyid_hash_algorithms": k.KeyIDHashAlgorithms, + "keyval": map[string]string{ + "public": k.KeyVal.Public, + }, + } + canonical, err := cjson.EncodeCanonical(key) + if err != nil { + return "", err + } + digest := sha256.Sum256(canonical) + return hex.EncodeToString(digest[:]), nil +} + +/* +generatePEMBlock creates a PEM block from scratch via the keyBytes and the pemType. +If successful it returns a PEM block as []byte slice. This function should always +succeed, if keyBytes is empty the PEM block will have an empty byte block. +Therefore only header and footer will exist. +*/ +func generatePEMBlock(keyBytes []byte, pemType string) []byte { + // construct PEM block + pemBlock := &pem.Block{ + Type: pemType, + Headers: nil, + Bytes: keyBytes, + } + return pem.EncodeToMemory(pemBlock) +} + +/* +decodeAndParsePEM receives potential PEM bytes decodes them via pem.Decode +and pushes them to parseKey. If any error occurs during this process, +the function will return nil and an error (either ErrFailedPEMParsing +or ErrNoPEMBlock). On success it will return the decoded pemData, the +key object interface and nil as error. We need the decoded pemData, +because LoadKey relies on decoded pemData for operating system +interoperability. +*/ +func decodeAndParsePEM(pemBytes []byte) (*pem.Block, any, error) { + // pem.Decode returns the parsed pem block and a rest. + // The rest is everything, that could not be parsed as PEM block. + // Therefore we can drop this via using the blank identifier "_" + data, _ := pem.Decode(pemBytes) + if data == nil { + return nil, nil, ErrNoPEMBlock + } + + // Try to load private key, if this fails try to load + // key as public key + key, err := parsePEMKey(data.Bytes) + if err != nil { + return nil, nil, err + } + return data, key, nil +} + +/* +parseKey tries to parse a PEM []byte slice. Using the following standards +in the given order: + + - PKCS8 + - PKCS1 + - PKIX + +On success it returns the parsed key and nil. +On failure it returns nil and the error ErrFailedPEMParsing +*/ +func parsePEMKey(data []byte) (any, error) { + key, err := x509.ParsePKCS8PrivateKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParsePKCS1PrivateKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParsePKIXPublicKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParseECPrivateKey(data) + if err == nil { + return key, nil + } + return nil, ErrFailedPEMParsing +} + +func hashBeforeSigning(data []byte, h hash.Hash) []byte { + h.Write(data) + return h.Sum(nil) +} + +func hexDecode(t *testing.T, data string) []byte { + t.Helper() + b, err := hex.DecodeString(data) + if err != nil { + t.Fatal(err) + } + return b +} diff --git a/vendor/github.com/skeema/knownhosts/NOTICE b/vendor/github.com/skeema/knownhosts/NOTICE index 794c676a..619a5a7e 100644 --- a/vendor/github.com/skeema/knownhosts/NOTICE +++ b/vendor/github.com/skeema/knownhosts/NOTICE @@ -1,4 +1,4 @@ -Copyright 2023 Skeema LLC and the Skeema authors +Copyright 2023 Skeema LLC and the Skeema Knownhosts authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/skeema/knownhosts/README.md b/vendor/github.com/skeema/knownhosts/README.md index 3740039f..85339bc0 100644 --- a/vendor/github.com/skeema/knownhosts/README.md +++ b/vendor/github.com/skeema/knownhosts/README.md @@ -13,11 +13,12 @@ Go provides excellent functionality for OpenSSH known_hosts files in its external package [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts). However, that package is somewhat low-level, making it difficult to implement full known_hosts management similar to command-line `ssh`'s behavior for `StrictHostKeyChecking=no` configuration. -This repo ([github.com/skeema/knownhosts](https://github.com/skeema/knownhosts)) is a thin wrapper package around [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts), adding functions which provide the following functionality: +This repo ([github.com/skeema/knownhosts](https://github.com/skeema/knownhosts)) is a thin wrapper package around [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts), adding the following functionality: * Look up known_hosts public keys for any given host -* Auto-populate ssh.ClientConfig.HostKeyAlgorithms easily based on known_hosts +* Auto-populate ssh.ClientConfig.HostKeyAlgorithms easily based on known_hosts, providing a solution for [golang/go#29286](https://github.com/golang/go/issues/29286) * Write new known_hosts entries to an io.Writer +* Properly format/normalize new known_hosts entries containing ipv6 addresses, providing a solution for [golang/go#53463](https://github.com/golang/go/issues/53463) * Determine if an ssh.HostKeyCallback's error corresponds to a host whose key has changed (indicating potential MitM attack) vs a host that just isn't known yet ## How host key lookup works @@ -99,7 +100,7 @@ config := &ssh.ClientConfig{ ## License -**Source code copyright 2023 Skeema LLC and the Skeema authors** +**Source code copyright 2023 Skeema LLC and the Skeema Knownhosts authors** ```text Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/vendor/github.com/skeema/knownhosts/knownhosts.go b/vendor/github.com/skeema/knownhosts/knownhosts.go index be3047c6..c460031b 100644 --- a/vendor/github.com/skeema/knownhosts/knownhosts.go +++ b/vendor/github.com/skeema/knownhosts/knownhosts.go @@ -3,10 +3,12 @@ package knownhosts import ( + "encoding/base64" "errors" "io" "net" "sort" + "strings" "golang.org/x/crypto/ssh" xknownhosts "golang.org/x/crypto/ssh/knownhosts" @@ -42,9 +44,7 @@ func (hkcb HostKeyCallback) HostKeys(hostWithPort string) (keys []ssh.PublicKey) placeholderPubKey := &fakePublicKey{} var kkeys []xknownhosts.KnownKey if hkcbErr := hkcb(hostWithPort, placeholderAddr, placeholderPubKey); errors.As(hkcbErr, &keyErr) { - for _, knownKey := range keyErr.Want { - kkeys = append(kkeys, knownKey) - } + kkeys = append(kkeys, keyErr.Want...) knownKeyLess := func(i, j int) bool { if kkeys[i].Filename < kkeys[j].Filename { return true @@ -98,6 +98,40 @@ func IsHostUnknown(err error) bool { return errors.As(err, &keyErr) && len(keyErr.Want) == 0 } +// Normalize normalizes an address into the form used in known_hosts. This +// implementation includes a fix for https://github.com/golang/go/issues/53463 +// and will omit brackets around ipv6 addresses on standard port 22. +func Normalize(address string) string { + host, port, err := net.SplitHostPort(address) + if err != nil { + host = address + port = "22" + } + entry := host + if port != "22" { + entry = "[" + entry + "]:" + port + } else if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + entry = entry[1 : len(entry)-1] + } + return entry +} + +// Line returns a line to append to the known_hosts files. This implementation +// uses the local patched implementation of Normalize in order to solve +// https://github.com/golang/go/issues/53463. +func Line(addresses []string, key ssh.PublicKey) string { + var trimmed []string + for _, a := range addresses { + trimmed = append(trimmed, Normalize(a)) + } + + return strings.Join([]string{ + strings.Join(trimmed, ","), + key.Type(), + base64.StdEncoding.EncodeToString(key.Marshal()), + }, " ") +} + // WriteKnownHost writes a known_hosts line to writer for the supplied hostname, // remote, and key. This is useful when writing a custom hostkey callback which // wraps a callback obtained from knownhosts.New to provide additional @@ -108,11 +142,11 @@ func WriteKnownHost(w io.Writer, hostname string, remote net.Addr, key ssh.Publi // and doesn't normalize to the same string as hostname. addresses := []string{hostname} remoteStr := remote.String() - remoteStrNormalized := xknownhosts.Normalize(remoteStr) - if remoteStrNormalized != "[0.0.0.0]:0" && remoteStrNormalized != xknownhosts.Normalize(hostname) { + remoteStrNormalized := Normalize(remoteStr) + if remoteStrNormalized != "[0.0.0.0]:0" && remoteStrNormalized != Normalize(hostname) { addresses = append(addresses, remoteStr) } - line := xknownhosts.Line(addresses, key) + "\n" + line := Line(addresses, key) + "\n" _, err := w.Write([]byte(line)) return err } diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2/common/package.go b/vendor/github.com/spdx/tools-golang/spdx/v2/common/package.go index de5a0752..349cef25 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2/common/package.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/common/package.go @@ -70,15 +70,13 @@ func (o *Originator) UnmarshalJSON(data []byte) error { return nil } - originatorFields := strings.SplitN(originatorStr, ": ", 2) - + originatorFields := strings.SplitN(originatorStr, ":", 2) if len(originatorFields) != 2 { return fmt.Errorf("failed to parse Originator '%s'", originatorStr) } o.OriginatorType = originatorFields[0] - o.Originator = originatorFields[1] - + o.Originator = strings.TrimLeft(originatorFields[1], " \t") return nil } diff --git a/vendor/github.com/sylabs/sif/v2/LICENSE.md b/vendor/github.com/sylabs/sif/v2/LICENSE.md index dea3e409..9e825f69 100644 --- a/vendor/github.com/sylabs/sif/v2/LICENSE.md +++ b/vendor/github.com/sylabs/sif/v2/LICENSE.md @@ -1,6 +1,6 @@ # LICENSE -Copyright (c) 2018-2022, Sylabs Inc. All rights reserved. +Copyright (c) 2018-2023, Sylabs Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go index e65bdb74..0e1f9103 100644 --- a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved. +// Copyright (c) 2018-2023, Sylabs Inc. All rights reserved. // Copyright (c) 2017, SingularityWare, LLC. All rights reserved. // Copyright (c) 2017, Yannick Cote All rights reserved. // This software is licensed under a 3-clause BSD license. Please consult the @@ -69,7 +69,7 @@ func (f *FileImage) writeDataObject(i int, di DescriptorInput, t time.Time) erro // If this is a primary partition, verify there isn't another primary partition, and update the // architecture in the global header. - if p, ok := di.opts.extra.(partition); ok && p.Parttype == PartPrimSys { + if p, ok := di.opts.md.(partition); ok && p.Parttype == PartPrimSys { if ds, err := f.GetDescriptors(WithPartitionType(PartPrimSys)); err == nil && len(ds) > 0 { return errPrimaryPartition } @@ -104,7 +104,7 @@ func (f *FileImage) writeDescriptors() error { return binary.Write(f.rw, binary.LittleEndian, f.rds) } -// writeHeader writes the the global header in f to backing storage. +// writeHeader writes the global header in f to backing storage. func (f *FileImage) writeHeader() error { if _, err := f.rw.Seek(0, io.SeekStart); err != nil { return err @@ -251,7 +251,7 @@ func createContainer(rw ReadWriter, co createOpts) (*FileImage, error) { // By default, the image ID is set to a randomly generated value. To override this, consider using // OptCreateDeterministic or OptCreateWithID. // -// By default, the image creation time is set to time.Now(). To override this, consider using +// By default, the image creation time is set to the current time. To override this, consider using // OptCreateDeterministic or OptCreateWithTime. // // By default, the image will support a maximum of 48 descriptors. To change this, consider using @@ -296,7 +296,7 @@ func CreateContainer(rw ReadWriter, opts ...CreateOpt) (*FileImage, error) { // By default, the image ID is set to a randomly generated value. To override this, consider using // OptCreateDeterministic or OptCreateWithID. // -// By default, the image creation time is set to time.Now(). To override this, consider using +// By default, the image creation time is set to the current time. To override this, consider using // OptCreateDeterministic or OptCreateWithTime. // // By default, the image will support a maximum of 48 descriptors. To change this, consider using @@ -393,11 +393,13 @@ func OptAddWithTime(t time.Time) AddOpt { // AddObject adds a new data object and its descriptor into the specified SIF file. // -// By default, the image modification time is set to the current time. To override this, consider -// using OptAddDeterministic or OptAddWithTime. +// By default, the image modification time is set to the current time for non-deterministic images, +// and unset otherwise. To override this, consider using OptAddDeterministic or OptAddWithTime. func (f *FileImage) AddObject(di DescriptorInput, opts ...AddOpt) error { - ao := addOpts{ - t: time.Now(), + ao := addOpts{} + + if !f.isDeterministic() { + ao.t = time.Now() } for _, opt := range opts { @@ -449,11 +451,7 @@ func (f *FileImage) isLast(d *rawDescriptor) bool { func (f *FileImage) truncateAt(d *rawDescriptor) error { start := d.Offset + d.Size - d.SizeWithPadding - if err := f.rw.Truncate(start); err != nil { - return err - } - - return nil + return f.rw.Truncate(start) } // deleteOpts accumulates object deletion options. @@ -506,11 +504,14 @@ var errCompactNotImplemented = errors.New("compact not implemented for non-last // To zero the data region of the deleted object, use OptDeleteZero. To compact the file following // object deletion, use OptDeleteCompact. // -// By default, the image modification time is set to time.Now(). To override this, consider using -// OptDeleteDeterministic or OptDeleteWithTime. +// By default, the image modification time is set to the current time for non-deterministic images, +// and unset otherwise. To override this, consider using OptDeleteDeterministic or +// OptDeleteWithTime. func (f *FileImage) DeleteObject(id uint32, opts ...DeleteOpt) error { - do := deleteOpts{ - t: time.Now(), + do := deleteOpts{} + + if !f.isDeterministic() { + do.t = time.Now() } for _, opt := range opts { @@ -596,11 +597,14 @@ var ( // SetPrimPart sets the specified system partition to be the primary one. // -// By default, the image/object modification times are set to time.Now(). To override this, -// consider using OptSetDeterministic or OptSetWithTime. +// By default, the image/object modification times are set to the current time for +// non-deterministic images, and unset otherwise. To override this, consider using +// OptSetDeterministic or OptSetWithTime. func (f *FileImage) SetPrimPart(id uint32, opts ...SetOpt) error { - so := setOpts{ - t: time.Now(), + so := setOpts{} + + if !f.isDeterministic() { + so.t = time.Now() } for _, opt := range opts { @@ -618,58 +622,52 @@ func (f *FileImage) SetPrimPart(id uint32, opts ...SetOpt) error { return fmt.Errorf("%w", errNotPartition) } - fs, pt, arch, err := descr.getPartitionMetadata() - if err != nil { + var p partition + if err := descr.getExtra(binaryUnmarshaler{&p}); err != nil { return fmt.Errorf("%w", err) } // if already primary system partition, nothing to do - if pt == PartPrimSys { + if p.Parttype == PartPrimSys { return nil } - if pt != PartSystem { + if p.Parttype != PartSystem { return fmt.Errorf("%w", errNotSystem) } - olddescr, err := f.getDescriptor(WithPartitionType(PartPrimSys)) - if err != nil && !errors.Is(err, ErrObjectNotFound) { - return fmt.Errorf("%w", err) - } + // If there is currently a primary system partition, update it. + if d, err := f.getDescriptor(WithPartitionType(PartPrimSys)); err == nil { + var p partition + if err := d.getExtra(binaryUnmarshaler{&p}); err != nil { + return fmt.Errorf("%w", err) + } - f.h.Arch = getSIFArch(arch) + p.Parttype = PartSystem - extra := partition{ - Fstype: fs, - Parttype: PartPrimSys, - } - copy(extra.Arch[:], arch) + if err := d.setExtra(p); err != nil { + return fmt.Errorf("%w", err) + } - if err := descr.setExtra(extra); err != nil { + d.ModifiedAt = so.t.Unix() + } else if !errors.Is(err, ErrObjectNotFound) { return fmt.Errorf("%w", err) } - if olddescr != nil { - oldfs, _, oldarch, err := olddescr.getPartitionMetadata() - if err != nil { - return fmt.Errorf("%w", err) - } + // Update the descriptor of the new primary system partition. + p.Parttype = PartPrimSys - oldextra := partition{ - Fstype: oldfs, - Parttype: PartSystem, - Arch: getSIFArch(oldarch), - } - - if err := olddescr.setExtra(oldextra); err != nil { - return fmt.Errorf("%w", err) - } + if err := descr.setExtra(p); err != nil { + return fmt.Errorf("%w", err) } + descr.ModifiedAt = so.t.Unix() + if err := f.writeDescriptors(); err != nil { return fmt.Errorf("%w", err) } + f.h.Arch = p.Arch f.h.ModifiedAt = so.t.Unix() if err := f.writeHeader(); err != nil { diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go index 03ed2b04..19272824 100644 --- a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2022, Sylabs Inc. All rights reserved. +// Copyright (c) 2018-2023, Sylabs Inc. All rights reserved. // Copyright (c) 2017, SingularityWare, LLC. All rights reserved. // Copyright (c) 2017, Yannick Cote All rights reserved. // This software is licensed under a 3-clause BSD license. Please consult the @@ -10,6 +10,7 @@ package sif import ( "bytes" "crypto" + "encoding" "encoding/binary" "errors" "fmt" @@ -44,6 +45,11 @@ type partition struct { Arch archType } +// MarshalBinary encodes p into binary format. +func (p partition) MarshalBinary() ([]byte, error) { + return binaryMarshaler{p}.MarshalBinary() +} + // signature represents the SIF signature data object descriptor. type signature struct { Hashtype hashType @@ -61,6 +67,26 @@ type sbom struct { Format SBOMFormat } +// The binaryMarshaler type is an adapter that allows a type suitable for use with the +// encoding/binary package to be used as an encoding.BinaryMarshaler. +type binaryMarshaler struct{ any } + +// MarshalBinary encodes m into binary format. +func (m binaryMarshaler) MarshalBinary() ([]byte, error) { + var b bytes.Buffer + err := binary.Write(&b, binary.LittleEndian, m.any) + return b.Bytes(), err +} + +// The binaryUnmarshaler type is an adapter that allows a type suitable for use with the +// encoding/binary package to be used as an encoding.BinaryUnmarshaler. +type binaryUnmarshaler struct{ any } + +// UnmarshalBinary decodes b into u. +func (u binaryUnmarshaler) UnmarshalBinary(b []byte) error { + return binary.Read(bytes.NewReader(b), binary.LittleEndian, u.any) +} + var errNameTooLarge = errors.New("name value too large") // setName encodes name into the name field of d. @@ -78,28 +104,33 @@ func (d *rawDescriptor) setName(name string) error { var errExtraTooLarge = errors.New("extra value too large") -// setExtra encodes v into the extra field of d. -func (d *rawDescriptor) setExtra(v interface{}) error { - if v == nil { +// setExtra marshals metadata from md into the "extra" field of d. +func (d *rawDescriptor) setExtra(md encoding.BinaryMarshaler) error { + if md == nil { return nil } - if binary.Size(v) > len(d.Extra) { - return errExtraTooLarge + extra, err := md.MarshalBinary() + if err != nil { + return err } - b := new(bytes.Buffer) - if err := binary.Write(b, binary.LittleEndian, v); err != nil { - return err + if len(extra) > len(d.Extra) { + return errExtraTooLarge } - for i := copy(d.Extra[:], b.Bytes()); i < len(d.Extra); i++ { + for i := copy(d.Extra[:], extra); i < len(d.Extra); i++ { d.Extra[i] = 0 } return nil } +// getExtra unmarshals metadata from the "extra" field of d into md. +func (d *rawDescriptor) getExtra(md encoding.BinaryUnmarshaler) error { + return md.UnmarshalBinary(d.Extra[:]) +} + // getPartitionMetadata gets metadata for a partition data object. func (d rawDescriptor) getPartitionMetadata() (FSType, PartType, string, error) { if got, want := d.DataType, DataPartition; got != want { @@ -108,9 +139,8 @@ func (d rawDescriptor) getPartitionMetadata() (FSType, PartType, string, error) var p partition - b := bytes.NewReader(d.Extra[:]) - if err := binary.Read(b, binary.LittleEndian, &p); err != nil { - return 0, 0, "", fmt.Errorf("%w", err) + if err := d.getExtra(binaryUnmarshaler{&p}); err != nil { + return 0, 0, "", err } return p.Fstype, p.Parttype, p.Arch.GoArch(), nil @@ -168,11 +198,23 @@ func (d Descriptor) ModifiedAt() time.Time { return time.Unix(d.raw.ModifiedAt, // Name returns the name of the data object. func (d Descriptor) Name() string { return strings.TrimRight(string(d.raw.Name[:]), "\000") } +// GetMetadata unmarshals metadata from the "extra" field of d into md. +func (d Descriptor) GetMetadata(md encoding.BinaryUnmarshaler) error { + if err := d.raw.getExtra(md); err != nil { + return fmt.Errorf("%w", err) + } + return nil +} + // PartitionMetadata gets metadata for a partition data object. // //nolint:nonamedreturns // Named returns effective as documentation. func (d Descriptor) PartitionMetadata() (fs FSType, pt PartType, arch string, err error) { - return d.raw.getPartitionMetadata() + fs, pt, arch, err = d.raw.getPartitionMetadata() + if err != nil { + return 0, 0, "", fmt.Errorf("%w", err) + } + return fs, pt, arch, err } var errHashUnsupported = errors.New("hash algorithm unsupported") @@ -204,8 +246,7 @@ func (d Descriptor) SignatureMetadata() (ht crypto.Hash, fp []byte, err error) { var s signature - b := bytes.NewReader(d.raw.Extra[:]) - if err := binary.Read(b, binary.LittleEndian, &s); err != nil { + if err := d.raw.getExtra(binaryUnmarshaler{&s}); err != nil { return ht, fp, fmt.Errorf("%w", err) } @@ -214,6 +255,11 @@ func (d Descriptor) SignatureMetadata() (ht crypto.Hash, fp []byte, err error) { } fp = make([]byte, 20) + + if bytes.Equal(s.Entity[:len(fp)], fp) { + return ht, nil, nil // Fingerprint not present. + } + copy(fp, s.Entity[:]) return ht, fp, nil @@ -227,8 +273,7 @@ func (d Descriptor) CryptoMessageMetadata() (FormatType, MessageType, error) { var m cryptoMessage - b := bytes.NewReader(d.raw.Extra[:]) - if err := binary.Read(b, binary.LittleEndian, &m); err != nil { + if err := d.raw.getExtra(binaryUnmarshaler{&m}); err != nil { return 0, 0, fmt.Errorf("%w", err) } @@ -243,8 +288,7 @@ func (d Descriptor) SBOMMetadata() (SBOMFormat, error) { var s sbom - b := bytes.NewReader(d.raw.Extra[:]) - if err := binary.Read(b, binary.LittleEndian, &s); err != nil { + if err := d.raw.getExtra(binaryUnmarshaler{&s}); err != nil { return 0, fmt.Errorf("%w", err) } diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go index 3e81c394..3cfe5c65 100644 --- a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go @@ -1,4 +1,4 @@ -// Copyright (c) 2021-2022, Sylabs Inc. All rights reserved. +// Copyright (c) 2021-2023, Sylabs Inc. All rights reserved. // This software is licensed under a 3-clause BSD license. Please consult the // LICENSE file distributed with the sources of this project regarding your // rights to use or distribute this software. @@ -7,6 +7,7 @@ package sif import ( "crypto" + "encoding" "errors" "fmt" "io" @@ -19,7 +20,7 @@ type descriptorOpts struct { linkID uint32 alignment int name string - extra interface{} + md encoding.BinaryMarshaler t time.Time } @@ -92,6 +93,14 @@ func OptObjectTime(t time.Time) DescriptorInputOpt { } } +// OptMetadata marshals metadata from md into the "extra" field of d. +func OptMetadata(md encoding.BinaryMarshaler) DescriptorInputOpt { + return func(t DataType, opts *descriptorOpts) error { + opts.md = md + return nil + } +} + type unexpectedDataTypeError struct { got DataType want []DataType @@ -155,7 +164,7 @@ func OptCryptoMessageMetadata(ft FormatType, mt MessageType) DescriptorInputOpt Messagetype: mt, } - opts.extra = m + opts.md = binaryMarshaler{m} return nil } } @@ -184,7 +193,7 @@ func OptPartitionMetadata(fs FSType, pt PartType, arch string) DescriptorInputOp Arch: sifarch, } - opts.extra = p + opts.md = p return nil } } @@ -221,7 +230,7 @@ func OptSignatureMetadata(ht crypto.Hash, fp []byte) DescriptorInputOpt { } copy(s.Entity[:], fp) - opts.extra = s + opts.md = binaryMarshaler{s} return nil } } @@ -239,7 +248,7 @@ func OptSBOMMetadata(f SBOMFormat) DescriptorInputOpt { Format: f, } - opts.extra = s + opts.md = binaryMarshaler{s} return nil } } @@ -259,7 +268,8 @@ const DefaultObjectGroup = 1 // // It is possible (and often necessary) to store additional metadata related to certain types of // data objects. Consider supplying options such as OptCryptoMessageMetadata, OptPartitionMetadata, -// OptSignatureMetadata, and OptSBOMMetadata for this purpose. +// OptSignatureMetadata, and OptSBOMMetadata for this purpose. To set custom metadata, use +// OptMetadata. // // By default, the data object will be placed in the default data object group (1). To override // this behavior, use OptNoGroup or OptGroupID. To link this data object, use OptLinkedID or @@ -317,5 +327,5 @@ func (di DescriptorInput) fillDescriptor(t time.Time, d *rawDescriptor) error { return err } - return d.setExtra(di.opts.extra) + return d.setExtra(di.opts.md) } diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go index 2d1c2091..74ff1007 100644 --- a/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2022, Sylabs Inc. All rights reserved. +// Copyright (c) 2018-2023, Sylabs Inc. All rights reserved. // Copyright (c) 2017, SingularityWare, LLC. All rights reserved. // Copyright (c) 2017, Yannick Cote All rights reserved. // This software is licensed under a 3-clause BSD license. Please consult the @@ -402,3 +402,9 @@ func (f *FileImage) DataSize() int64 { return f.h.DataSize } func (f *FileImage) GetHeaderIntegrityReader() io.Reader { return f.h.GetIntegrityReader() } + +// isDeterministic returns true if the UUID and timestamps in the header of f are set to +// deterministic values. +func (f *FileImage) isDeterministic() bool { + return f.h.ID == uuid.Nil && f.CreatedAt().IsZero() && f.ModifiedAt().IsZero() +} diff --git a/vendor/github.com/vifraa/gopom/example_pom.go b/vendor/github.com/vifraa/gopom/example_pom.go index de3ed3e7..dbd8b71e 100644 --- a/vendor/github.com/vifraa/gopom/example_pom.go +++ b/vendor/github.com/vifraa/gopom/example_pom.go @@ -325,7 +325,11 @@ var examplePom = ` goal inherited - configuration + + value + value2 + value3 + @@ -350,7 +354,11 @@ var examplePom = ` goal inherited - configuration + + value + value2 + value3 + @@ -368,7 +376,11 @@ var examplePom = ` goal inherited - configuration + + value + value2 + value3 + @@ -393,7 +405,11 @@ var examplePom = ` goal inherited - configuration + + value + value2 + value3 + @@ -414,11 +430,19 @@ var examplePom = ` report inherited - configuration + + value + value2 + value3 + inherited - configuration + + value + value2 + value3 + @@ -492,7 +516,11 @@ var examplePom = ` goal inherited - configuration + + value + value2 + value3 + @@ -517,7 +545,11 @@ var examplePom = ` goal inherited - configuration + + value + value2 + value3 + @@ -535,7 +567,11 @@ var examplePom = ` goal inherited - configuration + + value + value2 + value3 + @@ -560,7 +596,11 @@ var examplePom = ` goal inherited - configuration + + value + value2 + value3 + @@ -719,11 +759,19 @@ var examplePom = ` report inherited - configuration + + value + value2 + value3 + inherited - configuration + + value + value2 + value3 + diff --git a/vendor/github.com/vifraa/gopom/gopom.go b/vendor/github.com/vifraa/gopom/gopom.go index b8db8822..ffe3b6c6 100644 --- a/vendor/github.com/vifraa/gopom/gopom.go +++ b/vendor/github.com/vifraa/gopom/gopom.go @@ -36,36 +36,36 @@ func ParseFromReader(reader io.Reader) (*Project, error) { } type Project struct { - XMLName xml.Name `xml:"project"` - ModelVersion string `xml:"modelVersion"` - Parent Parent `xml:"parent"` - GroupID string `xml:"groupId"` - ArtifactID string `xml:"artifactId"` - Version string `xml:"version"` - Packaging string `xml:"packaging"` - Name string `xml:"name"` - Description string `xml:"description"` - URL string `xml:"url"` - InceptionYear string `xml:"inceptionYear"` - Organization Organization `xml:"organization"` - Licenses []License `xml:"licenses>license"` - Developers []Developer `xml:"developers>developer"` - Contributors []Contributor `xml:"contributors>contributor"` - MailingLists []MailingList `xml:"mailingLists>mailingList"` - Prerequisites Prerequisites `xml:"prerequisites"` - Modules []string `xml:"modules>module"` - SCM Scm `xml:"scm"` - IssueManagement IssueManagement `xml:"issueManagement"` - CIManagement CIManagement `xml:"ciManagement"` - DistributionManagement DistributionManagement `xml:"distributionManagement"` - DependencyManagement DependencyManagement `xml:"dependencyManagement"` - Dependencies []Dependency `xml:"dependencies>dependency"` - Repositories []Repository `xml:"repositories>repository"` - PluginRepositories []PluginRepository `xml:"pluginRepositories>pluginRepository"` - Build Build `xml:"build"` - Reporting Reporting `xml:"reporting"` - Profiles []Profile `xml:"profiles>profile"` - Properties Properties `xml:"properties"` + XMLName *xml.Name `xml:"project,omitempty"` + ModelVersion *string `xml:"modelVersion,omitempty"` + Parent *Parent `xml:"parent,omitempty"` + GroupID *string `xml:"groupId,omitempty"` + ArtifactID *string `xml:"artifactId,omitempty"` + Version *string `xml:"version,omitempty"` + Packaging *string `xml:"packaging,omitempty"` + Name *string `xml:"name,omitempty"` + Description *string `xml:"description,omitempty"` + URL *string `xml:"url,omitempty"` + InceptionYear *string `xml:"inceptionYear,omitempty"` + Organization *Organization `xml:"organization,omitempty"` + Licenses *[]License `xml:"licenses>license,omitempty"` + Developers *[]Developer `xml:"developers>developer,omitempty"` + Contributors *[]Contributor `xml:"contributors>contributor,omitempty"` + MailingLists *[]MailingList `xml:"mailingLists>mailingList,omitempty"` + Prerequisites *Prerequisites `xml:"prerequisites,omitempty"` + Modules *[]string `xml:"modules>module,omitempty"` + SCM *Scm `xml:"scm,omitempty"` + IssueManagement *IssueManagement `xml:"issueManagement,omitempty"` + CIManagement *CIManagement `xml:"ciManagement,omitempty"` + DistributionManagement *DistributionManagement `xml:"distributionManagement,omitempty"` + DependencyManagement *DependencyManagement `xml:"dependencyManagement,omitempty"` + Dependencies *[]Dependency `xml:"dependencies>dependency,omitempty"` + Repositories *[]Repository `xml:"repositories>repository,omitempty"` + PluginRepositories *[]PluginRepository `xml:"pluginRepositories>pluginRepository,omitempty"` + Build *Build `xml:"build,omitempty"` + Reporting *Reporting `xml:"reporting,omitempty"` + Profiles *[]Profile `xml:"profiles>profile,omitempty"` + Properties *Properties `xml:"properties,omitempty"` } type Properties struct { @@ -114,267 +114,271 @@ func (p Properties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { } type Parent struct { - GroupID string `xml:"groupId"` - ArtifactID string `xml:"artifactId"` - Version string `xml:"version"` - RelativePath string `xml:"relativePath"` + GroupID *string `xml:"groupId,omitempty"` + ArtifactID *string `xml:"artifactId,omitempty"` + Version *string `xml:"version,omitempty"` + RelativePath *string `xml:"relativePath,omitempty"` } type Organization struct { - Name string `xml:"name"` - URL string `xml:"url"` + Name *string `xml:"name,omitempty"` + URL *string `xml:"url,omitempty"` } type License struct { - Name string `xml:"name"` - URL string `xml:"url"` - Distribution string `xml:"distribution"` - Comments string `xml:"comments"` + Name *string `xml:"name,omitempty"` + URL *string `xml:"url,omitempty"` + Distribution *string `xml:"distribution,omitempty"` + Comments *string `xml:"comments,omitempty"` } type Developer struct { - ID string `xml:"id"` - Name string `xml:"name"` - Email string `xml:"email"` - URL string `xml:"url"` - Organization string `xml:"organization"` - OrganizationURL string `xml:"organizationUrl"` - Roles []string `xml:"roles>role"` - Timezone string `xml:"timezone"` - Properties Properties `xml:"properties"` + ID *string `xml:"id,omitempty"` + Name *string `xml:"name,omitempty"` + Email *string `xml:"email,omitempty"` + URL *string `xml:"url,omitempty"` + Organization *string `xml:"organization,omitempty"` + OrganizationURL *string `xml:"organizationUrl,omitempty"` + Roles *[]string `xml:"roles>role,omitempty"` + Timezone *string `xml:"timezone,omitempty"` + Properties *Properties `xml:"properties,omitempty"` } type Contributor struct { - Name string `xml:"name"` - Email string `xml:"email"` - URL string `xml:"url"` - Organization string `xml:"organization"` - OrganizationURL string `xml:"organizationUrl"` - Roles []string `xml:"roles>role"` - Timezone string `xml:"timezone"` - Properties Properties `xml:"properties"` + Name *string `xml:"name,omitempty"` + Email *string `xml:"email,omitempty"` + URL *string `xml:"url,omitempty"` + Organization *string `xml:"organization,omitempty"` + OrganizationURL *string `xml:"organizationUrl,omitempty"` + Roles *[]string `xml:"roles>role,omitempty"` + Timezone *string `xml:"timezone,omitempty"` + Properties *Properties `xml:"properties,omitempty"` } type MailingList struct { - Name string `xml:"name"` - Subscribe string `xml:"subscribe"` - Unsubscribe string `xml:"unsubscribe"` - Post string `xml:"post"` - Archive string `xml:"archive"` - OtherArchives []string `xml:"otherArchives>otherArchive"` + Name *string `xml:"name,omitempty"` + Subscribe *string `xml:"subscribe,omitempty"` + Unsubscribe *string `xml:"unsubscribe,omitempty"` + Post *string `xml:"post,omitempty"` + Archive *string `xml:"archive,omitempty"` + OtherArchives *[]string `xml:"otherArchives>otherArchive,omitempty"` } type Prerequisites struct { - Maven string `xml:"maven"` + Maven *string `xml:"maven,omitempty"` } type Scm struct { - Connection string `xml:"connection"` - DeveloperConnection string `xml:"developerConnection"` - Tag string `xml:"tag"` - URL string `xml:"url"` + Connection *string `xml:"connection,omitempty"` + DeveloperConnection *string `xml:"developerConnection,omitempty"` + Tag *string `xml:"tag,omitempty"` + URL *string `xml:"url,omitempty"` } type IssueManagement struct { - System string `xml:"system"` - URL string `xml:"url"` + System *string `xml:"system,omitempty"` + URL *string `xml:"url,omitempty"` } type CIManagement struct { - System string `xml:"system"` - URL string `xml:"url"` - Notifiers []Notifier `xml:"notifiers>notifier"` + System *string `xml:"system,omitempty"` + URL *string `xml:"url,omitempty"` + Notifiers *[]Notifier `xml:"notifiers>notifier,omitempty"` } type Notifier struct { - Type string `xml:"type"` - SendOnError bool `xml:"sendOnError"` - SendOnFailure bool `xml:"sendOnFailure"` - SendOnSuccess bool `xml:"sendOnSuccess"` - SendOnWarning bool `xml:"sendOnWarning"` - Address string `xml:"address"` - Configuration Properties `xml:"configuration"` + Type *string `xml:"type,omitempty"` + SendOnError *bool `xml:"sendOnError,omitempty"` + SendOnFailure *bool `xml:"sendOnFailure,omitempty"` + SendOnSuccess *bool `xml:"sendOnSuccess,omitempty"` + SendOnWarning *bool `xml:"sendOnWarning,omitempty"` + Address *string `xml:"address,omitempty"` + Configuration *Properties `xml:"configuration,omitempty"` } type DistributionManagement struct { - Repository Repository `xml:"repository"` - SnapshotRepository Repository `xml:"snapshotRepository"` - Site Site `xml:"site"` - DownloadURL string `xml:"downloadUrl"` - Relocation Relocation `xml:"relocation"` - Status string `xml:"status"` + Repository *Repository `xml:"repository,omitempty"` + SnapshotRepository *Repository `xml:"snapshotRepository,omitempty"` + Site *Site `xml:"site,omitempty"` + DownloadURL *string `xml:"downloadUrl,omitempty"` + Relocation *Relocation `xml:"relocation,omitempty"` + Status *string `xml:"status,omitempty"` } type Site struct { - ID string `xml:"id"` - Name string `xml:"name"` - URL string `xml:"url"` + ID *string `xml:"id,omitempty"` + Name *string `xml:"name,omitempty"` + URL *string `xml:"url,omitempty"` } type Relocation struct { - GroupID string `xml:"groupId"` - ArtifactID string `xml:"artifactId"` - Version string `xml:"version"` - Message string `xml:"message"` + GroupID *string `xml:"groupId,omitempty"` + ArtifactID *string `xml:"artifactId,omitempty"` + Version *string `xml:"version,omitempty"` + Message *string `xml:"message,omitempty"` } type DependencyManagement struct { - Dependencies []Dependency `xml:"dependencies>dependency"` + Dependencies *[]Dependency `xml:"dependencies>dependency,omitempty"` } type Dependency struct { - GroupID string `xml:"groupId"` - ArtifactID string `xml:"artifactId"` - Version string `xml:"version"` - Type string `xml:"type"` - Classifier string `xml:"classifier"` - Scope string `xml:"scope"` - SystemPath string `xml:"systemPath"` - Exclusions []Exclusion `xml:"exclusions>exclusion"` - Optional string `xml:"optional"` + GroupID *string `xml:"groupId,omitempty"` + ArtifactID *string `xml:"artifactId,omitempty"` + Version *string `xml:"version,omitempty"` + Type *string `xml:"type,omitempty"` + Classifier *string `xml:"classifier,omitempty"` + Scope *string `xml:"scope,omitempty"` + SystemPath *string `xml:"systemPath,omitempty"` + Exclusions *[]Exclusion `xml:"exclusions>exclusion,omitempty"` + Optional *string `xml:"optional,omitempty"` } type Exclusion struct { - ArtifactID string `xml:"artifactId"` - GroupID string `xml:"groupId"` + ArtifactID *string `xml:"artifactId,omitempty"` + GroupID *string `xml:"groupId,omitempty"` } type Repository struct { - UniqueVersion bool `xml:"uniqueVersion"` - Releases RepositoryPolicy `xml:"releases"` - Snapshots RepositoryPolicy `xml:"snapshots"` - ID string `xml:"id"` - Name string `xml:"name"` - URL string `xml:"url"` - Layout string `xml:"layout"` + UniqueVersion *bool `xml:"uniqueVersion,omitempty"` + Releases *RepositoryPolicy `xml:"releases,omitempty"` + Snapshots *RepositoryPolicy `xml:"snapshots,omitempty"` + ID *string `xml:"id,omitempty"` + Name *string `xml:"name,omitempty"` + URL *string `xml:"url,omitempty"` + Layout *string `xml:"layout,omitempty"` } type RepositoryPolicy struct { - Enabled string `xml:"enabled"` - UpdatePolicy string `xml:"updatePolicy"` - ChecksumPolicy string `xml:"checksumPolicy"` + Enabled *string `xml:"enabled,omitempty"` + UpdatePolicy *string `xml:"updatePolicy,omitempty"` + ChecksumPolicy *string `xml:"checksumPolicy,omitempty"` } type PluginRepository struct { - Releases RepositoryPolicy `xml:"releases"` - Snapshots RepositoryPolicy `xml:"snapshots"` - ID string `xml:"id"` - Name string `xml:"name"` - URL string `xml:"url"` - Layout string `xml:"layout"` + Releases *RepositoryPolicy `xml:"releases,omitempty"` + Snapshots *RepositoryPolicy `xml:"snapshots,omitempty"` + ID *string `xml:"id,omitempty"` + Name *string `xml:"name,omitempty"` + URL *string `xml:"url,omitempty"` + Layout *string `xml:"layout,omitempty"` } type BuildBase struct { - DefaultGoal string `xml:"defaultGoal"` - Resources []Resource `xml:"resources>resource"` - TestResources []Resource `xml:"testResources>testResource"` - Directory string `xml:"directory"` - FinalName string `xml:"finalName"` - Filters []string `xml:"filters>filter"` - PluginManagement PluginManagement `xml:"pluginManagement"` - Plugins []Plugin `xml:"plugins>plugin"` + DefaultGoal *string `xml:"defaultGoal,omitempty"` + Resources *[]Resource `xml:"resources>resource,omitempty"` + TestResources *[]Resource `xml:"testResources>testResource,omitempty"` + Directory *string `xml:"directory,omitempty"` + FinalName *string `xml:"finalName,omitempty"` + Filters *[]string `xml:"filters>filter,omitempty"` + PluginManagement *PluginManagement `xml:"pluginManagement,omitempty"` + Plugins *[]Plugin `xml:"plugins>plugin,omitempty"` } type Build struct { - SourceDirectory string `xml:"sourceDirectory"` - ScriptSourceDirectory string `xml:"scriptSourceDirectory"` - TestSourceDirectory string `xml:"testSourceDirectory"` - OutputDirectory string `xml:"outputDirectory"` - TestOutputDirectory string `xml:"testOutputDirectory"` - Extensions []Extension `xml:"extensions>extension"` + SourceDirectory *string `xml:"sourceDirectory,omitempty"` + ScriptSourceDirectory *string `xml:"scriptSourceDirectory,omitempty"` + TestSourceDirectory *string `xml:"testSourceDirectory,omitempty"` + OutputDirectory *string `xml:"outputDirectory,omitempty"` + TestOutputDirectory *string `xml:"testOutputDirectory,omitempty"` + Extensions *[]Extension `xml:"extensions>extension,omitempty"` BuildBase } type Extension struct { - GroupID string `xml:"groupId"` - ArtifactID string `xml:"artifactId"` - Version string `xml:"version"` + GroupID *string `xml:"groupId,omitempty"` + ArtifactID *string `xml:"artifactId,omitempty"` + Version *string `xml:"version,omitempty"` } type Resource struct { - TargetPath string `xml:"targetPath"` - Filtering string `xml:"filtering"` - Directory string `xml:"directory"` - Includes []string `xml:"includes>include"` - Excludes []string `xml:"excludes>exclude"` + TargetPath *string `xml:"targetPath,omitempty"` + Filtering *string `xml:"filtering,omitempty"` + Directory *string `xml:"directory,omitempty"` + Includes *[]string `xml:"includes>include,omitempty"` + Excludes *[]string `xml:"excludes>exclude,omitempty"` } type PluginManagement struct { - Plugins []Plugin `xml:"plugins>plugin"` + Plugins *[]Plugin `xml:"plugins>plugin,omitempty"` } type Plugin struct { - GroupID string `xml:"groupId"` - ArtifactID string `xml:"artifactId"` - Version string `xml:"version"` - Extensions string `xml:"extensions"` - Executions []PluginExecution `xml:"executions>execution"` - Dependencies []Dependency `xml:"dependencies>dependency"` - Inherited string `xml:"inherited"` + GroupID *string `xml:"groupId,omitempty"` + ArtifactID *string `xml:"artifactId,omitempty"` + Version *string `xml:"version,omitempty"` + Extensions *string `xml:"extensions,omitempty"` + Executions *[]PluginExecution `xml:"executions>execution,omitempty"` + Dependencies *[]Dependency `xml:"dependencies>dependency,omitempty"` + Inherited *string `xml:"inherited,omitempty"` + Configuration *Properties `xml:"configuration,omitempty"` } type PluginExecution struct { - ID string `xml:"id"` - Phase string `xml:"phase"` - Goals []string `xml:"goals>goal"` - Inherited string `xml:"inherited"` + ID *string `xml:"id,omitempty"` + Phase *string `xml:"phase,omitempty"` + Goals *[]string `xml:"goals>goal,omitempty"` + Inherited *string `xml:"inherited,omitempty"` + Configuration *Properties `xml:"configuration,omitempty"` } type Reporting struct { - ExcludeDefaults string `xml:"excludeDefaults"` - OutputDirectory string `xml:"outputDirectory"` - Plugins []ReportingPlugin `xml:"plugins>plugin"` + ExcludeDefaults *string `xml:"excludeDefaults,omitempty"` + OutputDirectory *string `xml:"outputDirectory,omitempty"` + Plugins *[]ReportingPlugin `xml:"plugins>plugin,omitempty"` } type ReportingPlugin struct { - GroupID string `xml:"groupId"` - ArtifactID string `xml:"artifactId"` - Version string `xml:"version"` - Inherited string `xml:"inherited"` - ReportSets []ReportSet `xml:"reportSets>reportSet"` + GroupID *string `xml:"groupId,omitempty"` + ArtifactID *string `xml:"artifactId,omitempty"` + Version *string `xml:"version,omitempty"` + Inherited *string `xml:"inherited,omitempty"` + ReportSets *[]ReportSet `xml:"reportSets>reportSet,omitempty"` + Configuration *Properties `xml:"configuration,omitempty"` } type ReportSet struct { - ID string `xml:"id"` - Reports []string `xml:"reports>report"` - Inherited string `xml:"inherited"` + ID *string `xml:"id,omitempty"` + Reports *[]string `xml:"reports>report,omitempty"` + Inherited *string `xml:"inherited,omitempty"` + Configuration *Properties `xml:"configuration,omitempty"` } type Profile struct { - ID string `xml:"id"` - Activation Activation `xml:"activation"` - Build BuildBase `xml:"build"` - Modules []string `xml:"modules>module"` - DistributionManagement DistributionManagement `xml:"distributionManagement"` - Properties Properties `xml:"properties"` - DependencyManagement DependencyManagement `xml:"dependencyManagement"` - Dependencies []Dependency `xml:"dependencies>dependency"` - Repositories []Repository `xml:"repositories>repository"` - PluginRepositories []PluginRepository `xml:"pluginRepositories>pluginRepository"` - Reporting Reporting `xml:"reporting"` + ID *string `xml:"id,omitempty"` + Activation *Activation `xml:"activation,omitempty"` + Build *BuildBase `xml:"build,omitempty"` + Modules *[]string `xml:"modules>module,omitempty"` + DistributionManagement *DistributionManagement `xml:"distributionManagement,omitempty"` + Properties *Properties `xml:"properties,omitempty"` + DependencyManagement *DependencyManagement `xml:"dependencyManagement,omitempty"` + Dependencies *[]Dependency `xml:"dependencies>dependency,omitempty"` + Repositories *[]Repository `xml:"repositories>repository,omitempty"` + PluginRepositories *[]PluginRepository `xml:"pluginRepositories>pluginRepository,omitempty"` + Reporting *Reporting `xml:"reporting,omitempty"` } type Activation struct { - ActiveByDefault bool `xml:"activeByDefault"` - JDK string `xml:"jdk"` - OS ActivationOS `xml:"os"` - Property ActivationProperty `xml:"property"` - File ActivationFile `xml:"file"` + ActiveByDefault *bool `xml:"activeByDefault,omitempty"` + JDK *string `xml:"jdk,omitempty"` + OS *ActivationOS `xml:"os,omitempty"` + Property *ActivationProperty `xml:"property,omitempty"` + File *ActivationFile `xml:"file,omitempty"` } type ActivationOS struct { - Name string `xml:"name"` - Family string `xml:"family"` - Arch string `xml:"arch"` - Version string `xml:"version"` + Name *string `xml:"name,omitempty"` + Family *string `xml:"family,omitempty"` + Arch *string `xml:"arch,omitempty"` + Version *string `xml:"version,omitempty"` } type ActivationProperty struct { - Name string `xml:"name"` - Value string `xml:"value"` + Name *string `xml:"name,omitempty"` + Value *string `xml:"value,omitempty"` } type ActivationFile struct { - Missing string `xml:"missing"` - Exists string `xml:"exists"` + Missing *string `xml:"missing,omitempty"` + Exists *string `xml:"exists,omitempty"` } diff --git a/vendor/go.mozilla.org/pkcs7/.gitignore b/vendor/go.mozilla.org/pkcs7/.gitignore new file mode 100644 index 00000000..daf913b1 --- /dev/null +++ b/vendor/go.mozilla.org/pkcs7/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/go.mozilla.org/pkcs7/LICENSE b/vendor/go.mozilla.org/pkcs7/LICENSE new file mode 100644 index 00000000..75f32090 --- /dev/null +++ b/vendor/go.mozilla.org/pkcs7/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Andrew Smith + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/go.mozilla.org/pkcs7/Makefile b/vendor/go.mozilla.org/pkcs7/Makefile new file mode 100644 index 00000000..47c73b86 --- /dev/null +++ b/vendor/go.mozilla.org/pkcs7/Makefile @@ -0,0 +1,20 @@ +all: vet staticcheck test + +test: + go test -covermode=count -coverprofile=coverage.out . + +showcoverage: test + go tool cover -html=coverage.out + +vet: + go vet . + +lint: + golint . + +staticcheck: + staticcheck . + +gettools: + go get -u honnef.co/go/tools/... + go get -u golang.org/x/lint/golint diff --git a/vendor/go.mozilla.org/pkcs7/README.md b/vendor/go.mozilla.org/pkcs7/README.md new file mode 100644 index 00000000..a55d117c --- /dev/null +++ b/vendor/go.mozilla.org/pkcs7/README.md @@ -0,0 +1,69 @@ +# pkcs7 + +[![GoDoc](https://godoc.org/go.mozilla.org/pkcs7?status.svg)](https://godoc.org/go.mozilla.org/pkcs7) +[![Build Status](https://github.com/mozilla-services/pkcs7/workflows/CI/badge.svg?branch=master&event=push)](https://github.com/mozilla-services/pkcs7/actions/workflows/ci.yml?query=branch%3Amaster+event%3Apush) + +pkcs7 implements parsing and creating signed and enveloped messages. + +```go +package main + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "os" + + "go.mozilla.org/pkcs7" +) + +func SignAndDetach(content []byte, cert *x509.Certificate, privkey *rsa.PrivateKey) (signed []byte, err error) { + toBeSigned, err := NewSignedData(content) + if err != nil { + err = fmt.Errorf("Cannot initialize signed data: %s", err) + return + } + if err = toBeSigned.AddSigner(cert, privkey, SignerInfoConfig{}); err != nil { + err = fmt.Errorf("Cannot add signer: %s", err) + return + } + + // Detach signature, omit if you want an embedded signature + toBeSigned.Detach() + + signed, err = toBeSigned.Finish() + if err != nil { + err = fmt.Errorf("Cannot finish signing data: %s", err) + return + } + + // Verify the signature + pem.Encode(os.Stdout, &pem.Block{Type: "PKCS7", Bytes: signed}) + p7, err := pkcs7.Parse(signed) + if err != nil { + err = fmt.Errorf("Cannot parse our signed data: %s", err) + return + } + + // since the signature was detached, reattach the content here + p7.Content = content + + if bytes.Compare(content, p7.Content) != 0 { + err = fmt.Errorf("Our content was not in the parsed data:\n\tExpected: %s\n\tActual: %s", content, p7.Content) + return + } + if err = p7.Verify(); err != nil { + err = fmt.Errorf("Cannot verify our signed data: %s", err) + return + } + + return signed, nil +} +``` + + + +## Credits +This is a fork of [fullsailor/pkcs7](https://github.com/fullsailor/pkcs7) diff --git a/vendor/go.mozilla.org/pkcs7/ber.go b/vendor/go.mozilla.org/pkcs7/ber.go new file mode 100644 index 00000000..73da024a --- /dev/null +++ b/vendor/go.mozilla.org/pkcs7/ber.go @@ -0,0 +1,271 @@ +package pkcs7 + +import ( + "bytes" + "errors" +) + +var encodeIndent = 0 + +type asn1Object interface { + EncodeTo(writer *bytes.Buffer) error +} + +type asn1Structured struct { + tagBytes []byte + content []asn1Object +} + +func (s asn1Structured) EncodeTo(out *bytes.Buffer) error { + //fmt.Printf("%s--> tag: % X\n", strings.Repeat("| ", encodeIndent), s.tagBytes) + encodeIndent++ + inner := new(bytes.Buffer) + for _, obj := range s.content { + err := obj.EncodeTo(inner) + if err != nil { + return err + } + } + encodeIndent-- + out.Write(s.tagBytes) + encodeLength(out, inner.Len()) + out.Write(inner.Bytes()) + return nil +} + +type asn1Primitive struct { + tagBytes []byte + length int + content []byte +} + +func (p asn1Primitive) EncodeTo(out *bytes.Buffer) error { + _, err := out.Write(p.tagBytes) + if err != nil { + return err + } + if err = encodeLength(out, p.length); err != nil { + return err + } + //fmt.Printf("%s--> tag: % X length: %d\n", strings.Repeat("| ", encodeIndent), p.tagBytes, p.length) + //fmt.Printf("%s--> content length: %d\n", strings.Repeat("| ", encodeIndent), len(p.content)) + out.Write(p.content) + + return nil +} + +func ber2der(ber []byte) ([]byte, error) { + if len(ber) == 0 { + return nil, errors.New("ber2der: input ber is empty") + } + //fmt.Printf("--> ber2der: Transcoding %d bytes\n", len(ber)) + out := new(bytes.Buffer) + + obj, _, err := readObject(ber, 0) + if err != nil { + return nil, err + } + obj.EncodeTo(out) + + // if offset < len(ber) { + // return nil, fmt.Errorf("ber2der: Content longer than expected. Got %d, expected %d", offset, len(ber)) + //} + + return out.Bytes(), nil +} + +// encodes lengths that are longer than 127 into string of bytes +func marshalLongLength(out *bytes.Buffer, i int) (err error) { + n := lengthLength(i) + + for ; n > 0; n-- { + err = out.WriteByte(byte(i >> uint((n-1)*8))) + if err != nil { + return + } + } + + return nil +} + +// computes the byte length of an encoded length value +func lengthLength(i int) (numBytes int) { + numBytes = 1 + for i > 255 { + numBytes++ + i >>= 8 + } + return +} + +// encodes the length in DER format +// If the length fits in 7 bits, the value is encoded directly. +// +// Otherwise, the number of bytes to encode the length is first determined. +// This number is likely to be 4 or less for a 32bit length. This number is +// added to 0x80. The length is encoded in big endian encoding follow after +// +// Examples: +// length | byte 1 | bytes n +// 0 | 0x00 | - +// 120 | 0x78 | - +// 200 | 0x81 | 0xC8 +// 500 | 0x82 | 0x01 0xF4 +// +func encodeLength(out *bytes.Buffer, length int) (err error) { + if length >= 128 { + l := lengthLength(length) + err = out.WriteByte(0x80 | byte(l)) + if err != nil { + return + } + err = marshalLongLength(out, length) + if err != nil { + return + } + } else { + err = out.WriteByte(byte(length)) + if err != nil { + return + } + } + return +} + +func readObject(ber []byte, offset int) (asn1Object, int, error) { + berLen := len(ber) + if offset >= berLen { + return nil, 0, errors.New("ber2der: offset is after end of ber data") + } + tagStart := offset + b := ber[offset] + offset++ + if offset >= berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + tag := b & 0x1F // last 5 bits + if tag == 0x1F { + tag = 0 + for ber[offset] >= 0x80 { + tag = tag*128 + ber[offset] - 0x80 + offset++ + if offset > berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + } + // jvehent 20170227: this doesn't appear to be used anywhere... + //tag = tag*128 + ber[offset] - 0x80 + offset++ + if offset > berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + } + tagEnd := offset + + kind := b & 0x20 + if kind == 0 { + debugprint("--> Primitive\n") + } else { + debugprint("--> Constructed\n") + } + // read length + var length int + l := ber[offset] + offset++ + if offset > berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + indefinite := false + if l > 0x80 { + numberOfBytes := (int)(l & 0x7F) + if numberOfBytes > 4 { // int is only guaranteed to be 32bit + return nil, 0, errors.New("ber2der: BER tag length too long") + } + if numberOfBytes == 4 && (int)(ber[offset]) > 0x7F { + return nil, 0, errors.New("ber2der: BER tag length is negative") + } + if (int)(ber[offset]) == 0x0 { + return nil, 0, errors.New("ber2der: BER tag length has leading zero") + } + debugprint("--> (compute length) indicator byte: %x\n", l) + debugprint("--> (compute length) length bytes: % X\n", ber[offset:offset+numberOfBytes]) + for i := 0; i < numberOfBytes; i++ { + length = length*256 + (int)(ber[offset]) + offset++ + if offset > berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + } + } else if l == 0x80 { + indefinite = true + } else { + length = (int)(l) + } + if length < 0 { + return nil, 0, errors.New("ber2der: invalid negative value found in BER tag length") + } + //fmt.Printf("--> length : %d\n", length) + contentEnd := offset + length + if contentEnd > len(ber) { + return nil, 0, errors.New("ber2der: BER tag length is more than available data") + } + debugprint("--> content start : %d\n", offset) + debugprint("--> content end : %d\n", contentEnd) + debugprint("--> content : % X\n", ber[offset:contentEnd]) + var obj asn1Object + if indefinite && kind == 0 { + return nil, 0, errors.New("ber2der: Indefinite form tag must have constructed encoding") + } + if kind == 0 { + obj = asn1Primitive{ + tagBytes: ber[tagStart:tagEnd], + length: length, + content: ber[offset:contentEnd], + } + } else { + var subObjects []asn1Object + for (offset < contentEnd) || indefinite { + var subObj asn1Object + var err error + subObj, offset, err = readObject(ber, offset) + if err != nil { + return nil, 0, err + } + subObjects = append(subObjects, subObj) + + if indefinite { + terminated, err := isIndefiniteTermination(ber, offset) + if err != nil { + return nil, 0, err + } + + if terminated { + break + } + } + } + obj = asn1Structured{ + tagBytes: ber[tagStart:tagEnd], + content: subObjects, + } + } + + // Apply indefinite form length with 0x0000 terminator. + if indefinite { + contentEnd = offset + 2 + } + + return obj, contentEnd, nil +} + +func isIndefiniteTermination(ber []byte, offset int) (bool, error) { + if len(ber) - offset < 2 { + return false, errors.New("ber2der: Invalid BER format") + } + + return bytes.Index(ber[offset:], []byte{0x0, 0x0}) == 0, nil +} + +func debugprint(format string, a ...interface{}) { + //fmt.Printf(format, a) +} diff --git a/vendor/go.mozilla.org/pkcs7/decrypt.go b/vendor/go.mozilla.org/pkcs7/decrypt.go new file mode 100644 index 00000000..0d088d62 --- /dev/null +++ b/vendor/go.mozilla.org/pkcs7/decrypt.go @@ -0,0 +1,177 @@ +package pkcs7 + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/asn1" + "errors" + "fmt" +) + +// ErrUnsupportedAlgorithm tells you when our quick dev assumptions have failed +var ErrUnsupportedAlgorithm = errors.New("pkcs7: cannot decrypt data: only RSA, DES, DES-EDE3, AES-256-CBC and AES-128-GCM supported") + +// ErrNotEncryptedContent is returned when attempting to Decrypt data that is not encrypted data +var ErrNotEncryptedContent = errors.New("pkcs7: content data is a decryptable data type") + +// Decrypt decrypts encrypted content info for recipient cert and private key +func (p7 *PKCS7) Decrypt(cert *x509.Certificate, pkey crypto.PrivateKey) ([]byte, error) { + data, ok := p7.raw.(envelopedData) + if !ok { + return nil, ErrNotEncryptedContent + } + recipient := selectRecipientForCertificate(data.RecipientInfos, cert) + if recipient.EncryptedKey == nil { + return nil, errors.New("pkcs7: no enveloped recipient for provided certificate") + } + switch pkey := pkey.(type) { + case *rsa.PrivateKey: + var contentKey []byte + contentKey, err := rsa.DecryptPKCS1v15(rand.Reader, pkey, recipient.EncryptedKey) + if err != nil { + return nil, err + } + return data.EncryptedContentInfo.decrypt(contentKey) + } + return nil, ErrUnsupportedAlgorithm +} + +// DecryptUsingPSK decrypts encrypted data using caller provided +// pre-shared secret +func (p7 *PKCS7) DecryptUsingPSK(key []byte) ([]byte, error) { + data, ok := p7.raw.(encryptedData) + if !ok { + return nil, ErrNotEncryptedContent + } + return data.EncryptedContentInfo.decrypt(key) +} + +func (eci encryptedContentInfo) decrypt(key []byte) ([]byte, error) { + alg := eci.ContentEncryptionAlgorithm.Algorithm + if !alg.Equal(OIDEncryptionAlgorithmDESCBC) && + !alg.Equal(OIDEncryptionAlgorithmDESEDE3CBC) && + !alg.Equal(OIDEncryptionAlgorithmAES256CBC) && + !alg.Equal(OIDEncryptionAlgorithmAES128CBC) && + !alg.Equal(OIDEncryptionAlgorithmAES128GCM) && + !alg.Equal(OIDEncryptionAlgorithmAES256GCM) { + fmt.Printf("Unsupported Content Encryption Algorithm: %s\n", alg) + return nil, ErrUnsupportedAlgorithm + } + + // EncryptedContent can either be constructed of multple OCTET STRINGs + // or _be_ a tagged OCTET STRING + var cyphertext []byte + if eci.EncryptedContent.IsCompound { + // Complex case to concat all of the children OCTET STRINGs + var buf bytes.Buffer + cypherbytes := eci.EncryptedContent.Bytes + for { + var part []byte + cypherbytes, _ = asn1.Unmarshal(cypherbytes, &part) + buf.Write(part) + if cypherbytes == nil { + break + } + } + cyphertext = buf.Bytes() + } else { + // Simple case, the bytes _are_ the cyphertext + cyphertext = eci.EncryptedContent.Bytes + } + + var block cipher.Block + var err error + + switch { + case alg.Equal(OIDEncryptionAlgorithmDESCBC): + block, err = des.NewCipher(key) + case alg.Equal(OIDEncryptionAlgorithmDESEDE3CBC): + block, err = des.NewTripleDESCipher(key) + case alg.Equal(OIDEncryptionAlgorithmAES256CBC), alg.Equal(OIDEncryptionAlgorithmAES256GCM): + fallthrough + case alg.Equal(OIDEncryptionAlgorithmAES128GCM), alg.Equal(OIDEncryptionAlgorithmAES128CBC): + block, err = aes.NewCipher(key) + } + + if err != nil { + return nil, err + } + + if alg.Equal(OIDEncryptionAlgorithmAES128GCM) || alg.Equal(OIDEncryptionAlgorithmAES256GCM) { + params := aesGCMParameters{} + paramBytes := eci.ContentEncryptionAlgorithm.Parameters.Bytes + + _, err := asn1.Unmarshal(paramBytes, ¶ms) + if err != nil { + return nil, err + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return nil, err + } + + if len(params.Nonce) != gcm.NonceSize() { + return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect") + } + if params.ICVLen != gcm.Overhead() { + return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect") + } + + plaintext, err := gcm.Open(nil, params.Nonce, cyphertext, nil) + if err != nil { + return nil, err + } + + return plaintext, nil + } + + iv := eci.ContentEncryptionAlgorithm.Parameters.Bytes + if len(iv) != block.BlockSize() { + return nil, errors.New("pkcs7: encryption algorithm parameters are malformed") + } + mode := cipher.NewCBCDecrypter(block, iv) + plaintext := make([]byte, len(cyphertext)) + mode.CryptBlocks(plaintext, cyphertext) + if plaintext, err = unpad(plaintext, mode.BlockSize()); err != nil { + return nil, err + } + return plaintext, nil +} + +func unpad(data []byte, blocklen int) ([]byte, error) { + if blocklen < 1 { + return nil, fmt.Errorf("invalid blocklen %d", blocklen) + } + if len(data)%blocklen != 0 || len(data) == 0 { + return nil, fmt.Errorf("invalid data len %d", len(data)) + } + + // the last byte is the length of padding + padlen := int(data[len(data)-1]) + + // check padding integrity, all bytes should be the same + pad := data[len(data)-padlen:] + for _, padbyte := range pad { + if padbyte != byte(padlen) { + return nil, errors.New("invalid padding") + } + } + + return data[:len(data)-padlen], nil +} + +func selectRecipientForCertificate(recipients []recipientInfo, cert *x509.Certificate) recipientInfo { + for _, recp := range recipients { + if isCertMatchForIssuerAndSerial(cert, recp.IssuerAndSerialNumber) { + return recp + } + } + return recipientInfo{} +} diff --git a/vendor/go.mozilla.org/pkcs7/encrypt.go b/vendor/go.mozilla.org/pkcs7/encrypt.go new file mode 100644 index 00000000..6b265570 --- /dev/null +++ b/vendor/go.mozilla.org/pkcs7/encrypt.go @@ -0,0 +1,399 @@ +package pkcs7 + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" +) + +type envelopedData struct { + Version int + RecipientInfos []recipientInfo `asn1:"set"` + EncryptedContentInfo encryptedContentInfo +} + +type encryptedData struct { + Version int + EncryptedContentInfo encryptedContentInfo +} + +type recipientInfo struct { + Version int + IssuerAndSerialNumber issuerAndSerial + KeyEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedKey []byte +} + +type encryptedContentInfo struct { + ContentType asn1.ObjectIdentifier + ContentEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedContent asn1.RawValue `asn1:"tag:0,optional"` +} + +const ( + // EncryptionAlgorithmDESCBC is the DES CBC encryption algorithm + EncryptionAlgorithmDESCBC = iota + + // EncryptionAlgorithmAES128CBC is the AES 128 bits with CBC encryption algorithm + // Avoid this algorithm unless required for interoperability; use AES GCM instead. + EncryptionAlgorithmAES128CBC + + // EncryptionAlgorithmAES256CBC is the AES 256 bits with CBC encryption algorithm + // Avoid this algorithm unless required for interoperability; use AES GCM instead. + EncryptionAlgorithmAES256CBC + + // EncryptionAlgorithmAES128GCM is the AES 128 bits with GCM encryption algorithm + EncryptionAlgorithmAES128GCM + + // EncryptionAlgorithmAES256GCM is the AES 256 bits with GCM encryption algorithm + EncryptionAlgorithmAES256GCM +) + +// ContentEncryptionAlgorithm determines the algorithm used to encrypt the +// plaintext message. Change the value of this variable to change which +// algorithm is used in the Encrypt() function. +var ContentEncryptionAlgorithm = EncryptionAlgorithmDESCBC + +// ErrUnsupportedEncryptionAlgorithm is returned when attempting to encrypt +// content with an unsupported algorithm. +var ErrUnsupportedEncryptionAlgorithm = errors.New("pkcs7: cannot encrypt content: only DES-CBC, AES-CBC, and AES-GCM supported") + +// ErrPSKNotProvided is returned when attempting to encrypt +// using a PSK without actually providing the PSK. +var ErrPSKNotProvided = errors.New("pkcs7: cannot encrypt content: PSK not provided") + +const nonceSize = 12 + +type aesGCMParameters struct { + Nonce []byte `asn1:"tag:4"` + ICVLen int +} + +func encryptAESGCM(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) { + var keyLen int + var algID asn1.ObjectIdentifier + switch ContentEncryptionAlgorithm { + case EncryptionAlgorithmAES128GCM: + keyLen = 16 + algID = OIDEncryptionAlgorithmAES128GCM + case EncryptionAlgorithmAES256GCM: + keyLen = 32 + algID = OIDEncryptionAlgorithmAES256GCM + default: + return nil, nil, fmt.Errorf("invalid ContentEncryptionAlgorithm in encryptAESGCM: %d", ContentEncryptionAlgorithm) + } + if key == nil { + // Create AES key + key = make([]byte, keyLen) + + _, err := rand.Read(key) + if err != nil { + return nil, nil, err + } + } + + // Create nonce + nonce := make([]byte, nonceSize) + + _, err := rand.Read(nonce) + if err != nil { + return nil, nil, err + } + + // Encrypt content + block, err := aes.NewCipher(key) + if err != nil { + return nil, nil, err + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return nil, nil, err + } + + ciphertext := gcm.Seal(nil, nonce, content, nil) + + // Prepare ASN.1 Encrypted Content Info + paramSeq := aesGCMParameters{ + Nonce: nonce, + ICVLen: gcm.Overhead(), + } + + paramBytes, err := asn1.Marshal(paramSeq) + if err != nil { + return nil, nil, err + } + + eci := encryptedContentInfo{ + ContentType: OIDData, + ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: algID, + Parameters: asn1.RawValue{ + Tag: asn1.TagSequence, + Bytes: paramBytes, + }, + }, + EncryptedContent: marshalEncryptedContent(ciphertext), + } + + return key, &eci, nil +} + +func encryptDESCBC(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) { + if key == nil { + // Create DES key + key = make([]byte, 8) + + _, err := rand.Read(key) + if err != nil { + return nil, nil, err + } + } + + // Create CBC IV + iv := make([]byte, des.BlockSize) + _, err := rand.Read(iv) + if err != nil { + return nil, nil, err + } + + // Encrypt padded content + block, err := des.NewCipher(key) + if err != nil { + return nil, nil, err + } + mode := cipher.NewCBCEncrypter(block, iv) + plaintext, err := pad(content, mode.BlockSize()) + if err != nil { + return nil, nil, err + } + cyphertext := make([]byte, len(plaintext)) + mode.CryptBlocks(cyphertext, plaintext) + + // Prepare ASN.1 Encrypted Content Info + eci := encryptedContentInfo{ + ContentType: OIDData, + ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: OIDEncryptionAlgorithmDESCBC, + Parameters: asn1.RawValue{Tag: 4, Bytes: iv}, + }, + EncryptedContent: marshalEncryptedContent(cyphertext), + } + + return key, &eci, nil +} + +func encryptAESCBC(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) { + var keyLen int + var algID asn1.ObjectIdentifier + switch ContentEncryptionAlgorithm { + case EncryptionAlgorithmAES128CBC: + keyLen = 16 + algID = OIDEncryptionAlgorithmAES128CBC + case EncryptionAlgorithmAES256CBC: + keyLen = 32 + algID = OIDEncryptionAlgorithmAES256CBC + default: + return nil, nil, fmt.Errorf("invalid ContentEncryptionAlgorithm in encryptAESCBC: %d", ContentEncryptionAlgorithm) + } + + if key == nil { + // Create AES key + key = make([]byte, keyLen) + + _, err := rand.Read(key) + if err != nil { + return nil, nil, err + } + } + + // Create CBC IV + iv := make([]byte, aes.BlockSize) + _, err := rand.Read(iv) + if err != nil { + return nil, nil, err + } + + // Encrypt padded content + block, err := aes.NewCipher(key) + if err != nil { + return nil, nil, err + } + mode := cipher.NewCBCEncrypter(block, iv) + plaintext, err := pad(content, mode.BlockSize()) + if err != nil { + return nil, nil, err + } + cyphertext := make([]byte, len(plaintext)) + mode.CryptBlocks(cyphertext, plaintext) + + // Prepare ASN.1 Encrypted Content Info + eci := encryptedContentInfo{ + ContentType: OIDData, + ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: algID, + Parameters: asn1.RawValue{Tag: 4, Bytes: iv}, + }, + EncryptedContent: marshalEncryptedContent(cyphertext), + } + + return key, &eci, nil +} + +// Encrypt creates and returns an envelope data PKCS7 structure with encrypted +// recipient keys for each recipient public key. +// +// The algorithm used to perform encryption is determined by the current value +// of the global ContentEncryptionAlgorithm package variable. By default, the +// value is EncryptionAlgorithmDESCBC. To use a different algorithm, change the +// value before calling Encrypt(). For example: +// +// ContentEncryptionAlgorithm = EncryptionAlgorithmAES128GCM +// +// TODO(fullsailor): Add support for encrypting content with other algorithms +func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) { + var eci *encryptedContentInfo + var key []byte + var err error + + // Apply chosen symmetric encryption method + switch ContentEncryptionAlgorithm { + case EncryptionAlgorithmDESCBC: + key, eci, err = encryptDESCBC(content, nil) + case EncryptionAlgorithmAES128CBC: + fallthrough + case EncryptionAlgorithmAES256CBC: + key, eci, err = encryptAESCBC(content, nil) + case EncryptionAlgorithmAES128GCM: + fallthrough + case EncryptionAlgorithmAES256GCM: + key, eci, err = encryptAESGCM(content, nil) + + default: + return nil, ErrUnsupportedEncryptionAlgorithm + } + + if err != nil { + return nil, err + } + + // Prepare each recipient's encrypted cipher key + recipientInfos := make([]recipientInfo, len(recipients)) + for i, recipient := range recipients { + encrypted, err := encryptKey(key, recipient) + if err != nil { + return nil, err + } + ias, err := cert2issuerAndSerial(recipient) + if err != nil { + return nil, err + } + info := recipientInfo{ + Version: 0, + IssuerAndSerialNumber: ias, + KeyEncryptionAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: OIDEncryptionAlgorithmRSA, + }, + EncryptedKey: encrypted, + } + recipientInfos[i] = info + } + + // Prepare envelope content + envelope := envelopedData{ + EncryptedContentInfo: *eci, + Version: 0, + RecipientInfos: recipientInfos, + } + innerContent, err := asn1.Marshal(envelope) + if err != nil { + return nil, err + } + + // Prepare outer payload structure + wrapper := contentInfo{ + ContentType: OIDEnvelopedData, + Content: asn1.RawValue{Class: 2, Tag: 0, IsCompound: true, Bytes: innerContent}, + } + + return asn1.Marshal(wrapper) +} + +// EncryptUsingPSK creates and returns an encrypted data PKCS7 structure, +// encrypted using caller provided pre-shared secret. +func EncryptUsingPSK(content []byte, key []byte) ([]byte, error) { + var eci *encryptedContentInfo + var err error + + if key == nil { + return nil, ErrPSKNotProvided + } + + // Apply chosen symmetric encryption method + switch ContentEncryptionAlgorithm { + case EncryptionAlgorithmDESCBC: + _, eci, err = encryptDESCBC(content, key) + + case EncryptionAlgorithmAES128GCM: + fallthrough + case EncryptionAlgorithmAES256GCM: + _, eci, err = encryptAESGCM(content, key) + + default: + return nil, ErrUnsupportedEncryptionAlgorithm + } + + if err != nil { + return nil, err + } + + // Prepare encrypted-data content + ed := encryptedData{ + Version: 0, + EncryptedContentInfo: *eci, + } + innerContent, err := asn1.Marshal(ed) + if err != nil { + return nil, err + } + + // Prepare outer payload structure + wrapper := contentInfo{ + ContentType: OIDEncryptedData, + Content: asn1.RawValue{Class: 2, Tag: 0, IsCompound: true, Bytes: innerContent}, + } + + return asn1.Marshal(wrapper) +} + +func marshalEncryptedContent(content []byte) asn1.RawValue { + asn1Content, _ := asn1.Marshal(content) + return asn1.RawValue{Tag: 0, Class: 2, Bytes: asn1Content, IsCompound: true} +} + +func encryptKey(key []byte, recipient *x509.Certificate) ([]byte, error) { + if pub := recipient.PublicKey.(*rsa.PublicKey); pub != nil { + return rsa.EncryptPKCS1v15(rand.Reader, pub, key) + } + return nil, ErrUnsupportedAlgorithm +} + +func pad(data []byte, blocklen int) ([]byte, error) { + if blocklen < 1 { + return nil, fmt.Errorf("invalid blocklen %d", blocklen) + } + padlen := blocklen - (len(data) % blocklen) + if padlen == 0 { + padlen = blocklen + } + pad := bytes.Repeat([]byte{byte(padlen)}, padlen) + return append(data, pad...), nil +} diff --git a/vendor/go.mozilla.org/pkcs7/pkcs7.go b/vendor/go.mozilla.org/pkcs7/pkcs7.go new file mode 100644 index 00000000..ccc6cc6d --- /dev/null +++ b/vendor/go.mozilla.org/pkcs7/pkcs7.go @@ -0,0 +1,291 @@ +// Package pkcs7 implements parsing and generation of some PKCS#7 structures. +package pkcs7 + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "sort" + + _ "crypto/sha1" // for crypto.SHA1 +) + +// PKCS7 Represents a PKCS7 structure +type PKCS7 struct { + Content []byte + Certificates []*x509.Certificate + CRLs []pkix.CertificateList + Signers []signerInfo + raw interface{} +} + +type contentInfo struct { + ContentType asn1.ObjectIdentifier + Content asn1.RawValue `asn1:"explicit,optional,tag:0"` +} + +// ErrUnsupportedContentType is returned when a PKCS7 content is not supported. +// Currently only Data (1.2.840.113549.1.7.1), Signed Data (1.2.840.113549.1.7.2), +// and Enveloped Data are supported (1.2.840.113549.1.7.3) +var ErrUnsupportedContentType = errors.New("pkcs7: cannot parse data: unimplemented content type") + +type unsignedData []byte + +var ( + // Signed Data OIDs + OIDData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 1} + OIDSignedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 2} + OIDEnvelopedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 3} + OIDEncryptedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 6} + OIDAttributeContentType = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 3} + OIDAttributeMessageDigest = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 4} + OIDAttributeSigningTime = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 5} + + // Digest Algorithms + OIDDigestAlgorithmSHA1 = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 26} + OIDDigestAlgorithmSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1} + OIDDigestAlgorithmSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2} + OIDDigestAlgorithmSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3} + + OIDDigestAlgorithmDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} + OIDDigestAlgorithmDSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} + + OIDDigestAlgorithmECDSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} + OIDDigestAlgorithmECDSASHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} + OIDDigestAlgorithmECDSASHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} + OIDDigestAlgorithmECDSASHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} + + // Signature Algorithms + OIDEncryptionAlgorithmRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} + OIDEncryptionAlgorithmRSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} + OIDEncryptionAlgorithmRSASHA256 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} + OIDEncryptionAlgorithmRSASHA384 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} + OIDEncryptionAlgorithmRSASHA512 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + + OIDEncryptionAlgorithmECDSAP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} + OIDEncryptionAlgorithmECDSAP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} + OIDEncryptionAlgorithmECDSAP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} + + // Encryption Algorithms + OIDEncryptionAlgorithmDESCBC = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 7} + OIDEncryptionAlgorithmDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7} + OIDEncryptionAlgorithmAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} + OIDEncryptionAlgorithmAES128GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 6} + OIDEncryptionAlgorithmAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2} + OIDEncryptionAlgorithmAES256GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 46} +) + +func getHashForOID(oid asn1.ObjectIdentifier) (crypto.Hash, error) { + switch { + case oid.Equal(OIDDigestAlgorithmSHA1), oid.Equal(OIDDigestAlgorithmECDSASHA1), + oid.Equal(OIDDigestAlgorithmDSA), oid.Equal(OIDDigestAlgorithmDSASHA1), + oid.Equal(OIDEncryptionAlgorithmRSA): + return crypto.SHA1, nil + case oid.Equal(OIDDigestAlgorithmSHA256), oid.Equal(OIDDigestAlgorithmECDSASHA256): + return crypto.SHA256, nil + case oid.Equal(OIDDigestAlgorithmSHA384), oid.Equal(OIDDigestAlgorithmECDSASHA384): + return crypto.SHA384, nil + case oid.Equal(OIDDigestAlgorithmSHA512), oid.Equal(OIDDigestAlgorithmECDSASHA512): + return crypto.SHA512, nil + } + return crypto.Hash(0), ErrUnsupportedAlgorithm +} + +// getDigestOIDForSignatureAlgorithm takes an x509.SignatureAlgorithm +// and returns the corresponding OID digest algorithm +func getDigestOIDForSignatureAlgorithm(digestAlg x509.SignatureAlgorithm) (asn1.ObjectIdentifier, error) { + switch digestAlg { + case x509.SHA1WithRSA, x509.ECDSAWithSHA1: + return OIDDigestAlgorithmSHA1, nil + case x509.SHA256WithRSA, x509.ECDSAWithSHA256: + return OIDDigestAlgorithmSHA256, nil + case x509.SHA384WithRSA, x509.ECDSAWithSHA384: + return OIDDigestAlgorithmSHA384, nil + case x509.SHA512WithRSA, x509.ECDSAWithSHA512: + return OIDDigestAlgorithmSHA512, nil + } + return nil, fmt.Errorf("pkcs7: cannot convert hash to oid, unknown hash algorithm") +} + +// getOIDForEncryptionAlgorithm takes the private key type of the signer and +// the OID of a digest algorithm to return the appropriate signerInfo.DigestEncryptionAlgorithm +func getOIDForEncryptionAlgorithm(pkey crypto.PrivateKey, OIDDigestAlg asn1.ObjectIdentifier) (asn1.ObjectIdentifier, error) { + switch pkey.(type) { + case *rsa.PrivateKey: + switch { + default: + return OIDEncryptionAlgorithmRSA, nil + case OIDDigestAlg.Equal(OIDEncryptionAlgorithmRSA): + return OIDEncryptionAlgorithmRSA, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA1): + return OIDEncryptionAlgorithmRSASHA1, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA256): + return OIDEncryptionAlgorithmRSASHA256, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA384): + return OIDEncryptionAlgorithmRSASHA384, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA512): + return OIDEncryptionAlgorithmRSASHA512, nil + } + case *ecdsa.PrivateKey: + switch { + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA1): + return OIDDigestAlgorithmECDSASHA1, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA256): + return OIDDigestAlgorithmECDSASHA256, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA384): + return OIDDigestAlgorithmECDSASHA384, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA512): + return OIDDigestAlgorithmECDSASHA512, nil + } + case *dsa.PrivateKey: + return OIDDigestAlgorithmDSA, nil + } + return nil, fmt.Errorf("pkcs7: cannot convert encryption algorithm to oid, unknown private key type %T", pkey) + +} + +// Parse decodes a DER encoded PKCS7 package +func Parse(data []byte) (p7 *PKCS7, err error) { + if len(data) == 0 { + return nil, errors.New("pkcs7: input data is empty") + } + var info contentInfo + der, err := ber2der(data) + if err != nil { + return nil, err + } + rest, err := asn1.Unmarshal(der, &info) + if len(rest) > 0 { + err = asn1.SyntaxError{Msg: "trailing data"} + return + } + if err != nil { + return + } + + // fmt.Printf("--> Content Type: %s", info.ContentType) + switch { + case info.ContentType.Equal(OIDSignedData): + return parseSignedData(info.Content.Bytes) + case info.ContentType.Equal(OIDEnvelopedData): + return parseEnvelopedData(info.Content.Bytes) + case info.ContentType.Equal(OIDEncryptedData): + return parseEncryptedData(info.Content.Bytes) + } + return nil, ErrUnsupportedContentType +} + +func parseEnvelopedData(data []byte) (*PKCS7, error) { + var ed envelopedData + if _, err := asn1.Unmarshal(data, &ed); err != nil { + return nil, err + } + return &PKCS7{ + raw: ed, + }, nil +} + +func parseEncryptedData(data []byte) (*PKCS7, error) { + var ed encryptedData + if _, err := asn1.Unmarshal(data, &ed); err != nil { + return nil, err + } + return &PKCS7{ + raw: ed, + }, nil +} + +func (raw rawCertificates) Parse() ([]*x509.Certificate, error) { + if len(raw.Raw) == 0 { + return nil, nil + } + + var val asn1.RawValue + if _, err := asn1.Unmarshal(raw.Raw, &val); err != nil { + return nil, err + } + + return x509.ParseCertificates(val.Bytes) +} + +func isCertMatchForIssuerAndSerial(cert *x509.Certificate, ias issuerAndSerial) bool { + return cert.SerialNumber.Cmp(ias.SerialNumber) == 0 && bytes.Equal(cert.RawIssuer, ias.IssuerName.FullBytes) +} + +// Attribute represents a key value pair attribute. Value must be marshalable byte +// `encoding/asn1` +type Attribute struct { + Type asn1.ObjectIdentifier + Value interface{} +} + +type attributes struct { + types []asn1.ObjectIdentifier + values []interface{} +} + +// Add adds the attribute, maintaining insertion order +func (attrs *attributes) Add(attrType asn1.ObjectIdentifier, value interface{}) { + attrs.types = append(attrs.types, attrType) + attrs.values = append(attrs.values, value) +} + +type sortableAttribute struct { + SortKey []byte + Attribute attribute +} + +type attributeSet []sortableAttribute + +func (sa attributeSet) Len() int { + return len(sa) +} + +func (sa attributeSet) Less(i, j int) bool { + return bytes.Compare(sa[i].SortKey, sa[j].SortKey) < 0 +} + +func (sa attributeSet) Swap(i, j int) { + sa[i], sa[j] = sa[j], sa[i] +} + +func (sa attributeSet) Attributes() []attribute { + attrs := make([]attribute, len(sa)) + for i, attr := range sa { + attrs[i] = attr.Attribute + } + return attrs +} + +func (attrs *attributes) ForMarshalling() ([]attribute, error) { + sortables := make(attributeSet, len(attrs.types)) + for i := range sortables { + attrType := attrs.types[i] + attrValue := attrs.values[i] + asn1Value, err := asn1.Marshal(attrValue) + if err != nil { + return nil, err + } + attr := attribute{ + Type: attrType, + Value: asn1.RawValue{Tag: 17, IsCompound: true, Bytes: asn1Value}, // 17 == SET tag + } + encoded, err := asn1.Marshal(attr) + if err != nil { + return nil, err + } + sortables[i] = sortableAttribute{ + SortKey: encoded, + Attribute: attr, + } + } + sort.Sort(sortables) + return sortables.Attributes(), nil +} diff --git a/vendor/go.mozilla.org/pkcs7/sign.go b/vendor/go.mozilla.org/pkcs7/sign.go new file mode 100644 index 00000000..31c3654c --- /dev/null +++ b/vendor/go.mozilla.org/pkcs7/sign.go @@ -0,0 +1,429 @@ +package pkcs7 + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "math/big" + "time" +) + +// SignedData is an opaque data structure for creating signed data payloads +type SignedData struct { + sd signedData + certs []*x509.Certificate + data, messageDigest []byte + digestOid asn1.ObjectIdentifier + encryptionOid asn1.ObjectIdentifier +} + +// NewSignedData takes data and initializes a PKCS7 SignedData struct that is +// ready to be signed via AddSigner. The digest algorithm is set to SHA1 by default +// and can be changed by calling SetDigestAlgorithm. +func NewSignedData(data []byte) (*SignedData, error) { + content, err := asn1.Marshal(data) + if err != nil { + return nil, err + } + ci := contentInfo{ + ContentType: OIDData, + Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true}, + } + sd := signedData{ + ContentInfo: ci, + Version: 1, + } + return &SignedData{sd: sd, data: data, digestOid: OIDDigestAlgorithmSHA1}, nil +} + +// SignerInfoConfig are optional values to include when adding a signer +type SignerInfoConfig struct { + ExtraSignedAttributes []Attribute + ExtraUnsignedAttributes []Attribute +} + +type signedData struct { + Version int `asn1:"default:1"` + DigestAlgorithmIdentifiers []pkix.AlgorithmIdentifier `asn1:"set"` + ContentInfo contentInfo + Certificates rawCertificates `asn1:"optional,tag:0"` + CRLs []pkix.CertificateList `asn1:"optional,tag:1"` + SignerInfos []signerInfo `asn1:"set"` +} + +type signerInfo struct { + Version int `asn1:"default:1"` + IssuerAndSerialNumber issuerAndSerial + DigestAlgorithm pkix.AlgorithmIdentifier + AuthenticatedAttributes []attribute `asn1:"optional,omitempty,tag:0"` + DigestEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedDigest []byte + UnauthenticatedAttributes []attribute `asn1:"optional,omitempty,tag:1"` +} + +type attribute struct { + Type asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"set"` +} + +func marshalAttributes(attrs []attribute) ([]byte, error) { + encodedAttributes, err := asn1.Marshal(struct { + A []attribute `asn1:"set"` + }{A: attrs}) + if err != nil { + return nil, err + } + + // Remove the leading sequence octets + var raw asn1.RawValue + asn1.Unmarshal(encodedAttributes, &raw) + return raw.Bytes, nil +} + +type rawCertificates struct { + Raw asn1.RawContent +} + +type issuerAndSerial struct { + IssuerName asn1.RawValue + SerialNumber *big.Int +} + +// SetDigestAlgorithm sets the digest algorithm to be used in the signing process. +// +// This should be called before adding signers +func (sd *SignedData) SetDigestAlgorithm(d asn1.ObjectIdentifier) { + sd.digestOid = d +} + +// SetEncryptionAlgorithm sets the encryption algorithm to be used in the signing process. +// +// This should be called before adding signers +func (sd *SignedData) SetEncryptionAlgorithm(d asn1.ObjectIdentifier) { + sd.encryptionOid = d +} + +// AddSigner is a wrapper around AddSignerChain() that adds a signer without any parent. +func (sd *SignedData) AddSigner(ee *x509.Certificate, pkey crypto.PrivateKey, config SignerInfoConfig) error { + var parents []*x509.Certificate + return sd.AddSignerChain(ee, pkey, parents, config) +} + +// AddSignerChain signs attributes about the content and adds certificates +// and signers infos to the Signed Data. The certificate and private key +// of the end-entity signer are used to issue the signature, and any +// parent of that end-entity that need to be added to the list of +// certifications can be specified in the parents slice. +// +// The signature algorithm used to hash the data is the one of the end-entity +// certificate. +func (sd *SignedData) AddSignerChain(ee *x509.Certificate, pkey crypto.PrivateKey, parents []*x509.Certificate, config SignerInfoConfig) error { + // Following RFC 2315, 9.2 SignerInfo type, the distinguished name of + // the issuer of the end-entity signer is stored in the issuerAndSerialNumber + // section of the SignedData.SignerInfo, alongside the serial number of + // the end-entity. + var ias issuerAndSerial + ias.SerialNumber = ee.SerialNumber + if len(parents) == 0 { + // no parent, the issuer is the end-entity cert itself + ias.IssuerName = asn1.RawValue{FullBytes: ee.RawIssuer} + } else { + err := verifyPartialChain(ee, parents) + if err != nil { + return err + } + // the first parent is the issuer + ias.IssuerName = asn1.RawValue{FullBytes: parents[0].RawSubject} + } + sd.sd.DigestAlgorithmIdentifiers = append(sd.sd.DigestAlgorithmIdentifiers, + pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, + ) + hash, err := getHashForOID(sd.digestOid) + if err != nil { + return err + } + h := hash.New() + h.Write(sd.data) + sd.messageDigest = h.Sum(nil) + encryptionOid, err := getOIDForEncryptionAlgorithm(pkey, sd.digestOid) + if err != nil { + return err + } + attrs := &attributes{} + attrs.Add(OIDAttributeContentType, sd.sd.ContentInfo.ContentType) + attrs.Add(OIDAttributeMessageDigest, sd.messageDigest) + attrs.Add(OIDAttributeSigningTime, time.Now().UTC()) + for _, attr := range config.ExtraSignedAttributes { + attrs.Add(attr.Type, attr.Value) + } + finalAttrs, err := attrs.ForMarshalling() + if err != nil { + return err + } + unsignedAttrs := &attributes{} + for _, attr := range config.ExtraUnsignedAttributes { + unsignedAttrs.Add(attr.Type, attr.Value) + } + finalUnsignedAttrs, err := unsignedAttrs.ForMarshalling() + if err != nil { + return err + } + // create signature of signed attributes + signature, err := signAttributes(finalAttrs, pkey, hash) + if err != nil { + return err + } + signer := signerInfo{ + AuthenticatedAttributes: finalAttrs, + UnauthenticatedAttributes: finalUnsignedAttrs, + DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, + DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: encryptionOid}, + IssuerAndSerialNumber: ias, + EncryptedDigest: signature, + Version: 1, + } + sd.certs = append(sd.certs, ee) + if len(parents) > 0 { + sd.certs = append(sd.certs, parents...) + } + sd.sd.SignerInfos = append(sd.sd.SignerInfos, signer) + return nil +} + +// SignWithoutAttr issues a signature on the content of the pkcs7 SignedData. +// Unlike AddSigner/AddSignerChain, it calculates the digest on the data alone +// and does not include any signed attributes like timestamp and so on. +// +// This function is needed to sign old Android APKs, something you probably +// shouldn't do unless you're maintaining backward compatibility for old +// applications. +func (sd *SignedData) SignWithoutAttr(ee *x509.Certificate, pkey crypto.PrivateKey, config SignerInfoConfig) error { + var signature []byte + sd.sd.DigestAlgorithmIdentifiers = append(sd.sd.DigestAlgorithmIdentifiers, pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}) + hash, err := getHashForOID(sd.digestOid) + if err != nil { + return err + } + h := hash.New() + h.Write(sd.data) + sd.messageDigest = h.Sum(nil) + switch pkey := pkey.(type) { + case *dsa.PrivateKey: + // dsa doesn't implement crypto.Signer so we make a special case + // https://github.com/golang/go/issues/27889 + r, s, err := dsa.Sign(rand.Reader, pkey, sd.messageDigest) + if err != nil { + return err + } + signature, err = asn1.Marshal(dsaSignature{r, s}) + if err != nil { + return err + } + default: + key, ok := pkey.(crypto.Signer) + if !ok { + return errors.New("pkcs7: private key does not implement crypto.Signer") + } + signature, err = key.Sign(rand.Reader, sd.messageDigest, hash) + if err != nil { + return err + } + } + var ias issuerAndSerial + ias.SerialNumber = ee.SerialNumber + // no parent, the issue is the end-entity cert itself + ias.IssuerName = asn1.RawValue{FullBytes: ee.RawIssuer} + if sd.encryptionOid == nil { + // if the encryption algorithm wasn't set by SetEncryptionAlgorithm, + // infer it from the digest algorithm + sd.encryptionOid, err = getOIDForEncryptionAlgorithm(pkey, sd.digestOid) + } + if err != nil { + return err + } + signer := signerInfo{ + DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, + DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.encryptionOid}, + IssuerAndSerialNumber: ias, + EncryptedDigest: signature, + Version: 1, + } + // create signature of signed attributes + sd.certs = append(sd.certs, ee) + sd.sd.SignerInfos = append(sd.sd.SignerInfos, signer) + return nil +} + +func (si *signerInfo) SetUnauthenticatedAttributes(extraUnsignedAttrs []Attribute) error { + unsignedAttrs := &attributes{} + for _, attr := range extraUnsignedAttrs { + unsignedAttrs.Add(attr.Type, attr.Value) + } + finalUnsignedAttrs, err := unsignedAttrs.ForMarshalling() + if err != nil { + return err + } + + si.UnauthenticatedAttributes = finalUnsignedAttrs + + return nil +} + +// AddCertificate adds the certificate to the payload. Useful for parent certificates +func (sd *SignedData) AddCertificate(cert *x509.Certificate) { + sd.certs = append(sd.certs, cert) +} + +// Detach removes content from the signed data struct to make it a detached signature. +// This must be called right before Finish() +func (sd *SignedData) Detach() { + sd.sd.ContentInfo = contentInfo{ContentType: OIDData} +} + +// GetSignedData returns the private Signed Data +func (sd *SignedData) GetSignedData() *signedData { + return &sd.sd +} + +// Finish marshals the content and its signers +func (sd *SignedData) Finish() ([]byte, error) { + sd.sd.Certificates = marshalCertificates(sd.certs) + inner, err := asn1.Marshal(sd.sd) + if err != nil { + return nil, err + } + outer := contentInfo{ + ContentType: OIDSignedData, + Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: inner, IsCompound: true}, + } + return asn1.Marshal(outer) +} + +// RemoveAuthenticatedAttributes removes authenticated attributes from signedData +// similar to OpenSSL's PKCS7_NOATTR or -noattr flags +func (sd *SignedData) RemoveAuthenticatedAttributes() { + for i := range sd.sd.SignerInfos { + sd.sd.SignerInfos[i].AuthenticatedAttributes = nil + } +} + +// RemoveUnauthenticatedAttributes removes unauthenticated attributes from signedData +func (sd *SignedData) RemoveUnauthenticatedAttributes() { + for i := range sd.sd.SignerInfos { + sd.sd.SignerInfos[i].UnauthenticatedAttributes = nil + } +} + +// verifyPartialChain checks that a given cert is issued by the first parent in the list, +// then continue down the path. It doesn't require the last parent to be a root CA, +// or to be trusted in any truststore. It simply verifies that the chain provided, albeit +// partial, makes sense. +func verifyPartialChain(cert *x509.Certificate, parents []*x509.Certificate) error { + if len(parents) == 0 { + return fmt.Errorf("pkcs7: zero parents provided to verify the signature of certificate %q", cert.Subject.CommonName) + } + err := cert.CheckSignatureFrom(parents[0]) + if err != nil { + return fmt.Errorf("pkcs7: certificate signature from parent is invalid: %v", err) + } + if len(parents) == 1 { + // there is no more parent to check, return + return nil + } + return verifyPartialChain(parents[0], parents[1:]) +} + +func cert2issuerAndSerial(cert *x509.Certificate) (issuerAndSerial, error) { + var ias issuerAndSerial + // The issuer RDNSequence has to match exactly the sequence in the certificate + // We cannot use cert.Issuer.ToRDNSequence() here since it mangles the sequence + ias.IssuerName = asn1.RawValue{FullBytes: cert.RawIssuer} + ias.SerialNumber = cert.SerialNumber + + return ias, nil +} + +// signs the DER encoded form of the attributes with the private key +func signAttributes(attrs []attribute, pkey crypto.PrivateKey, digestAlg crypto.Hash) ([]byte, error) { + attrBytes, err := marshalAttributes(attrs) + if err != nil { + return nil, err + } + h := digestAlg.New() + h.Write(attrBytes) + hash := h.Sum(nil) + + // dsa doesn't implement crypto.Signer so we make a special case + // https://github.com/golang/go/issues/27889 + switch pkey := pkey.(type) { + case *dsa.PrivateKey: + r, s, err := dsa.Sign(rand.Reader, pkey, hash) + if err != nil { + return nil, err + } + return asn1.Marshal(dsaSignature{r, s}) + } + + key, ok := pkey.(crypto.Signer) + if !ok { + return nil, errors.New("pkcs7: private key does not implement crypto.Signer") + } + return key.Sign(rand.Reader, hash, digestAlg) +} + +type dsaSignature struct { + R, S *big.Int +} + +// concats and wraps the certificates in the RawValue structure +func marshalCertificates(certs []*x509.Certificate) rawCertificates { + var buf bytes.Buffer + for _, cert := range certs { + buf.Write(cert.Raw) + } + rawCerts, _ := marshalCertificateBytes(buf.Bytes()) + return rawCerts +} + +// Even though, the tag & length are stripped out during marshalling the +// RawContent, we have to encode it into the RawContent. If its missing, +// then `asn1.Marshal()` will strip out the certificate wrapper instead. +func marshalCertificateBytes(certs []byte) (rawCertificates, error) { + var val = asn1.RawValue{Bytes: certs, Class: 2, Tag: 0, IsCompound: true} + b, err := asn1.Marshal(val) + if err != nil { + return rawCertificates{}, err + } + return rawCertificates{Raw: b}, nil +} + +// DegenerateCertificate creates a signed data structure containing only the +// provided certificate or certificate chain. +func DegenerateCertificate(cert []byte) ([]byte, error) { + rawCert, err := marshalCertificateBytes(cert) + if err != nil { + return nil, err + } + emptyContent := contentInfo{ContentType: OIDData} + sd := signedData{ + Version: 1, + ContentInfo: emptyContent, + Certificates: rawCert, + CRLs: []pkix.CertificateList{}, + } + content, err := asn1.Marshal(sd) + if err != nil { + return nil, err + } + signedContent := contentInfo{ + ContentType: OIDSignedData, + Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true}, + } + return asn1.Marshal(signedContent) +} diff --git a/vendor/go.mozilla.org/pkcs7/verify.go b/vendor/go.mozilla.org/pkcs7/verify.go new file mode 100644 index 00000000..f09e2724 --- /dev/null +++ b/vendor/go.mozilla.org/pkcs7/verify.go @@ -0,0 +1,343 @@ +package pkcs7 + +import ( + "crypto/subtle" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "time" +) + +// Verify is a wrapper around VerifyWithChain() that initializes an empty +// trust store, effectively disabling certificate verification when validating +// a signature. +func (p7 *PKCS7) Verify() (err error) { + return p7.VerifyWithChain(nil) +} + +// VerifyWithChain checks the signatures of a PKCS7 object. +// +// If truststore is not nil, it also verifies the chain of trust of +// the end-entity signer cert to one of the roots in the +// truststore. When the PKCS7 object includes the signing time +// authenticated attr verifies the chain at that time and UTC now +// otherwise. +func (p7 *PKCS7) VerifyWithChain(truststore *x509.CertPool) (err error) { + if len(p7.Signers) == 0 { + return errors.New("pkcs7: Message has no signers") + } + for _, signer := range p7.Signers { + if err := verifySignature(p7, signer, truststore); err != nil { + return err + } + } + return nil +} + +// VerifyWithChainAtTime checks the signatures of a PKCS7 object. +// +// If truststore is not nil, it also verifies the chain of trust of +// the end-entity signer cert to a root in the truststore at +// currentTime. It does not use the signing time authenticated +// attribute. +func (p7 *PKCS7) VerifyWithChainAtTime(truststore *x509.CertPool, currentTime time.Time) (err error) { + if len(p7.Signers) == 0 { + return errors.New("pkcs7: Message has no signers") + } + for _, signer := range p7.Signers { + if err := verifySignatureAtTime(p7, signer, truststore, currentTime); err != nil { + return err + } + } + return nil +} + +func verifySignatureAtTime(p7 *PKCS7, signer signerInfo, truststore *x509.CertPool, currentTime time.Time) (err error) { + signedData := p7.Content + ee := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber) + if ee == nil { + return errors.New("pkcs7: No certificate for signer") + } + if len(signer.AuthenticatedAttributes) > 0 { + // TODO(fullsailor): First check the content type match + var ( + digest []byte + signingTime time.Time + ) + err := unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeMessageDigest, &digest) + if err != nil { + return err + } + hash, err := getHashForOID(signer.DigestAlgorithm.Algorithm) + if err != nil { + return err + } + h := hash.New() + h.Write(p7.Content) + computed := h.Sum(nil) + if subtle.ConstantTimeCompare(digest, computed) != 1 { + return &MessageDigestMismatchError{ + ExpectedDigest: digest, + ActualDigest: computed, + } + } + signedData, err = marshalAttributes(signer.AuthenticatedAttributes) + if err != nil { + return err + } + err = unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeSigningTime, &signingTime) + if err == nil { + // signing time found, performing validity check + if signingTime.After(ee.NotAfter) || signingTime.Before(ee.NotBefore) { + return fmt.Errorf("pkcs7: signing time %q is outside of certificate validity %q to %q", + signingTime.Format(time.RFC3339), + ee.NotBefore.Format(time.RFC3339), + ee.NotAfter.Format(time.RFC3339)) + } + } + } + if truststore != nil { + _, err = verifyCertChain(ee, p7.Certificates, truststore, currentTime) + if err != nil { + return err + } + } + sigalg, err := getSignatureAlgorithm(signer.DigestEncryptionAlgorithm, signer.DigestAlgorithm) + if err != nil { + return err + } + return ee.CheckSignature(sigalg, signedData, signer.EncryptedDigest) +} + +func verifySignature(p7 *PKCS7, signer signerInfo, truststore *x509.CertPool) (err error) { + signedData := p7.Content + ee := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber) + if ee == nil { + return errors.New("pkcs7: No certificate for signer") + } + signingTime := time.Now().UTC() + if len(signer.AuthenticatedAttributes) > 0 { + // TODO(fullsailor): First check the content type match + var digest []byte + err := unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeMessageDigest, &digest) + if err != nil { + return err + } + hash, err := getHashForOID(signer.DigestAlgorithm.Algorithm) + if err != nil { + return err + } + h := hash.New() + h.Write(p7.Content) + computed := h.Sum(nil) + if subtle.ConstantTimeCompare(digest, computed) != 1 { + return &MessageDigestMismatchError{ + ExpectedDigest: digest, + ActualDigest: computed, + } + } + signedData, err = marshalAttributes(signer.AuthenticatedAttributes) + if err != nil { + return err + } + err = unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeSigningTime, &signingTime) + if err == nil { + // signing time found, performing validity check + if signingTime.After(ee.NotAfter) || signingTime.Before(ee.NotBefore) { + return fmt.Errorf("pkcs7: signing time %q is outside of certificate validity %q to %q", + signingTime.Format(time.RFC3339), + ee.NotBefore.Format(time.RFC3339), + ee.NotAfter.Format(time.RFC3339)) + } + } + } + if truststore != nil { + _, err = verifyCertChain(ee, p7.Certificates, truststore, signingTime) + if err != nil { + return err + } + } + sigalg, err := getSignatureAlgorithm(signer.DigestEncryptionAlgorithm, signer.DigestAlgorithm) + if err != nil { + return err + } + return ee.CheckSignature(sigalg, signedData, signer.EncryptedDigest) +} + +// GetOnlySigner returns an x509.Certificate for the first signer of the signed +// data payload. If there are more or less than one signer, nil is returned +func (p7 *PKCS7) GetOnlySigner() *x509.Certificate { + if len(p7.Signers) != 1 { + return nil + } + signer := p7.Signers[0] + return getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber) +} + +// UnmarshalSignedAttribute decodes a single attribute from the signer info +func (p7 *PKCS7) UnmarshalSignedAttribute(attributeType asn1.ObjectIdentifier, out interface{}) error { + sd, ok := p7.raw.(signedData) + if !ok { + return errors.New("pkcs7: payload is not signedData content") + } + if len(sd.SignerInfos) < 1 { + return errors.New("pkcs7: payload has no signers") + } + attributes := sd.SignerInfos[0].AuthenticatedAttributes + return unmarshalAttribute(attributes, attributeType, out) +} + +func parseSignedData(data []byte) (*PKCS7, error) { + var sd signedData + asn1.Unmarshal(data, &sd) + certs, err := sd.Certificates.Parse() + if err != nil { + return nil, err + } + // fmt.Printf("--> Signed Data Version %d\n", sd.Version) + + var compound asn1.RawValue + var content unsignedData + + // The Content.Bytes maybe empty on PKI responses. + if len(sd.ContentInfo.Content.Bytes) > 0 { + if _, err := asn1.Unmarshal(sd.ContentInfo.Content.Bytes, &compound); err != nil { + return nil, err + } + } + // Compound octet string + if compound.IsCompound { + if compound.Tag == 4 { + if _, err = asn1.Unmarshal(compound.Bytes, &content); err != nil { + return nil, err + } + } else { + content = compound.Bytes + } + } else { + // assuming this is tag 04 + content = compound.Bytes + } + return &PKCS7{ + Content: content, + Certificates: certs, + CRLs: sd.CRLs, + Signers: sd.SignerInfos, + raw: sd}, nil +} + +// verifyCertChain takes an end-entity certs, a list of potential intermediates and a +// truststore, and built all potential chains between the EE and a trusted root. +// +// When verifying chains that may have expired, currentTime can be set to a past date +// to allow the verification to pass. If unset, currentTime is set to the current UTC time. +func verifyCertChain(ee *x509.Certificate, certs []*x509.Certificate, truststore *x509.CertPool, currentTime time.Time) (chains [][]*x509.Certificate, err error) { + intermediates := x509.NewCertPool() + for _, intermediate := range certs { + intermediates.AddCert(intermediate) + } + verifyOptions := x509.VerifyOptions{ + Roots: truststore, + Intermediates: intermediates, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + CurrentTime: currentTime, + } + chains, err = ee.Verify(verifyOptions) + if err != nil { + return chains, fmt.Errorf("pkcs7: failed to verify certificate chain: %v", err) + } + return +} + +// MessageDigestMismatchError is returned when the signer data digest does not +// match the computed digest for the contained content +type MessageDigestMismatchError struct { + ExpectedDigest []byte + ActualDigest []byte +} + +func (err *MessageDigestMismatchError) Error() string { + return fmt.Sprintf("pkcs7: Message digest mismatch\n\tExpected: %X\n\tActual : %X", err.ExpectedDigest, err.ActualDigest) +} + +func getSignatureAlgorithm(digestEncryption, digest pkix.AlgorithmIdentifier) (x509.SignatureAlgorithm, error) { + switch { + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA1): + return x509.ECDSAWithSHA1, nil + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA256): + return x509.ECDSAWithSHA256, nil + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA384): + return x509.ECDSAWithSHA384, nil + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA512): + return x509.ECDSAWithSHA512, nil + case digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSA), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA1), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA256), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA384), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA512): + switch { + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1): + return x509.SHA1WithRSA, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256): + return x509.SHA256WithRSA, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA384): + return x509.SHA384WithRSA, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA512): + return x509.SHA512WithRSA, nil + default: + return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q", + digest.Algorithm.String(), digestEncryption.Algorithm.String()) + } + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmDSA), + digestEncryption.Algorithm.Equal(OIDDigestAlgorithmDSASHA1): + switch { + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1): + return x509.DSAWithSHA1, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256): + return x509.DSAWithSHA256, nil + default: + return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q", + digest.Algorithm.String(), digestEncryption.Algorithm.String()) + } + case digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP256), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP384), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP521): + switch { + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1): + return x509.ECDSAWithSHA1, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256): + return x509.ECDSAWithSHA256, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA384): + return x509.ECDSAWithSHA384, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA512): + return x509.ECDSAWithSHA512, nil + default: + return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q", + digest.Algorithm.String(), digestEncryption.Algorithm.String()) + } + default: + return -1, fmt.Errorf("pkcs7: unsupported algorithm %q", + digestEncryption.Algorithm.String()) + } +} + +func getCertFromCertsByIssuerAndSerial(certs []*x509.Certificate, ias issuerAndSerial) *x509.Certificate { + for _, cert := range certs { + if isCertMatchForIssuerAndSerial(cert, ias) { + return cert + } + } + return nil +} + +func unmarshalAttribute(attrs []attribute, attributeType asn1.ObjectIdentifier, out interface{}) error { + for _, attr := range attrs { + if attr.Type.Equal(attributeType) { + _, err := asn1.Unmarshal(attr.Value.Bytes, out) + return err + } + } + return errors.New("pkcs7: attribute type not in attributes") +} diff --git a/vendor/go.mozilla.org/pkcs7/verify_test_dsa.go b/vendor/go.mozilla.org/pkcs7/verify_test_dsa.go new file mode 100644 index 00000000..1eb05bc3 --- /dev/null +++ b/vendor/go.mozilla.org/pkcs7/verify_test_dsa.go @@ -0,0 +1,182 @@ +// +build go1.11 go1.12 go1.13 go1.14 go1.15 + +package pkcs7 + +import ( + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + "os/exec" + "testing" +) + +func TestVerifyEC2(t *testing.T) { + fixture := UnmarshalDSATestFixture(EC2IdentityDocumentFixture) + p7, err := Parse(fixture.Input) + if err != nil { + t.Errorf("Parse encountered unexpected error: %v", err) + } + p7.Certificates = []*x509.Certificate{fixture.Certificate} + if err := p7.Verify(); err != nil { + t.Errorf("Verify failed with error: %v", err) + } +} + +var EC2IdentityDocumentFixture = ` +-----BEGIN PKCS7----- +MIAGCSqGSIb3DQEHAqCAMIACAQExCzAJBgUrDgMCGgUAMIAGCSqGSIb3DQEHAaCA +JIAEggGmewogICJwcml2YXRlSXAiIDogIjE3Mi4zMC4wLjI1MiIsCiAgImRldnBh +eVByb2R1Y3RDb2RlcyIgOiBudWxsLAogICJhdmFpbGFiaWxpdHlab25lIiA6ICJ1 +cy1lYXN0LTFhIiwKICAidmVyc2lvbiIgOiAiMjAxMC0wOC0zMSIsCiAgImluc3Rh +bmNlSWQiIDogImktZjc5ZmU1NmMiLAogICJiaWxsaW5nUHJvZHVjdHMiIDogbnVs +bCwKICAiaW5zdGFuY2VUeXBlIiA6ICJ0Mi5taWNybyIsCiAgImFjY291bnRJZCIg +OiAiMTIxNjU5MDE0MzM0IiwKICAiaW1hZ2VJZCIgOiAiYW1pLWZjZTNjNjk2IiwK +ICAicGVuZGluZ1RpbWUiIDogIjIwMTYtMDQtMDhUMDM6MDE6MzhaIiwKICAiYXJj +aGl0ZWN0dXJlIiA6ICJ4ODZfNjQiLAogICJrZXJuZWxJZCIgOiBudWxsLAogICJy +YW1kaXNrSWQiIDogbnVsbCwKICAicmVnaW9uIiA6ICJ1cy1lYXN0LTEiCn0AAAAA +AAAxggEYMIIBFAIBATBpMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5n +dG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2Vi +IFNlcnZpY2VzIExMQwIJAJa6SNnlXhpnMAkGBSsOAwIaBQCgXTAYBgkqhkiG9w0B +CQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJBTEPFw0xNjA0MDgwMzAxNDRaMCMG +CSqGSIb3DQEJBDEWBBTuUc28eBXmImAautC+wOjqcFCBVjAJBgcqhkjOOAQDBC8w +LQIVAKA54NxGHWWCz5InboDmY/GHs33nAhQ6O/ZI86NwjA9Vz3RNMUJrUPU5tAAA +AAAAAA== +-----END PKCS7----- +-----BEGIN CERTIFICATE----- +MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw +FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD +VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z +ODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u +IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl +cnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e +ih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3 +VyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P +hviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j +k+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U +hhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF +lRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf +MNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW +MXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw +vSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw +7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K +-----END CERTIFICATE-----` + +func TestDSASignWithOpenSSLAndVerify(t *testing.T) { + content := []byte(` +A ship in port is safe, +but that's not what ships are built for. +-- Grace Hopper`) + // write the content to a temp file + tmpContentFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_content") + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(tmpContentFile.Name(), content, 0755) + + // write the signer cert to a temp file + tmpSignerCertFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_signer") + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(tmpSignerCertFile.Name(), dsaPublicCert, 0755) + + // write the signer key to a temp file + tmpSignerKeyFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_key") + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(tmpSignerKeyFile.Name(), dsaPrivateKey, 0755) + + tmpSignedFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_signature") + if err != nil { + t.Fatal(err) + } + // call openssl to sign the content + opensslCMD := exec.Command("openssl", "smime", "-sign", "-nodetach", "-md", "sha1", + "-in", tmpContentFile.Name(), "-out", tmpSignedFile.Name(), + "-signer", tmpSignerCertFile.Name(), "-inkey", tmpSignerKeyFile.Name(), + "-certfile", tmpSignerCertFile.Name(), "-outform", "PEM") + out, err := opensslCMD.CombinedOutput() + if err != nil { + t.Fatalf("openssl command failed with %s: %s", err, out) + } + + // verify the signed content + pemSignature, err := ioutil.ReadFile(tmpSignedFile.Name()) + if err != nil { + t.Fatal(err) + } + fmt.Printf("%s\n", pemSignature) + derBlock, _ := pem.Decode(pemSignature) + if derBlock == nil { + t.Fatalf("failed to read DER block from signature PEM %s", tmpSignedFile.Name()) + } + p7, err := Parse(derBlock.Bytes) + if err != nil { + t.Fatalf("Parse encountered unexpected error: %v", err) + } + if err := p7.Verify(); err != nil { + t.Fatalf("Verify failed with error: %v", err) + } + os.Remove(tmpSignerCertFile.Name()) // clean up + os.Remove(tmpSignerKeyFile.Name()) // clean up + os.Remove(tmpContentFile.Name()) // clean up +} + +var dsaPrivateKey = []byte(`-----BEGIN PRIVATE KEY----- +MIIBSwIBADCCASwGByqGSM44BAEwggEfAoGBAP1/U4EddRIpUt9KnC7s5Of2EbdS +PO9EAMMeP4C2USZpRV1AIlH7WT2NWPq/xfW6MPbLm1Vs14E7gB00b/JmYLdrmVCl +pJ+f6AR7ECLCT7up1/63xhv4O1fnxqimFQ8E+4P208UewwI1VBNaFpEy9nXzrith +1yrv8iIDGZ3RSAHHAhUAl2BQjxUjC8yykrmCouuEC/BYHPUCgYEA9+GghdabPd7L +vKtcNrhXuXmUr7v6OuqC+VdMCz0HgmdRWVeOutRZT+ZxBxCBgLRJFnEj6EwoFhO3 +zwkyjMim4TwWeotUfI0o4KOuHiuzpnWRbqN/C/ohNWLx+2J6ASQ7zKTxvqhRkImo +g9/hWuWfBpKLZl6Ae1UlZAFMO/7PSSoEFgIUfW4aPdQBn9gJZp2KuNpzgHzvfsE= +-----END PRIVATE KEY-----`) + +var dsaPublicCert = []byte(`-----BEGIN CERTIFICATE----- +MIIDOjCCAvWgAwIBAgIEPCY/UDANBglghkgBZQMEAwIFADBsMRAwDgYDVQQGEwdV +bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD +VQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRAwDgYDVQQDEwdVbmtub3du +MB4XDTE4MTAyMjEzNDMwN1oXDTQ2MDMwOTEzNDMwN1owbDEQMA4GA1UEBhMHVW5r +bm93bjEQMA4GA1UECBMHVW5rbm93bjEQMA4GA1UEBxMHVW5rbm93bjEQMA4GA1UE +ChMHVW5rbm93bjEQMA4GA1UECxMHVW5rbm93bjEQMA4GA1UEAxMHVW5rbm93bjCC +AbgwggEsBgcqhkjOOAQBMIIBHwKBgQD9f1OBHXUSKVLfSpwu7OTn9hG3UjzvRADD +Hj+AtlEmaUVdQCJR+1k9jVj6v8X1ujD2y5tVbNeBO4AdNG/yZmC3a5lQpaSfn+gE +exAiwk+7qdf+t8Yb+DtX58aophUPBPuD9tPFHsMCNVQTWhaRMvZ1864rYdcq7/Ii +Axmd0UgBxwIVAJdgUI8VIwvMspK5gqLrhAvwWBz1AoGBAPfhoIXWmz3ey7yrXDa4 +V7l5lK+7+jrqgvlXTAs9B4JnUVlXjrrUWU/mcQcQgYC0SRZxI+hMKBYTt88JMozI +puE8FnqLVHyNKOCjrh4rs6Z1kW6jfwv6ITVi8ftiegEkO8yk8b6oUZCJqIPf4Vrl +nwaSi2ZegHtVJWQBTDv+z0kqA4GFAAKBgQDCriMPbEVBoRK4SOUeFwg7+VRf4TTp +rcOQC9IVVoCjXzuWEGrp3ZI7YWJSpFnSch4lk29RH8O0HpI/NOzKnOBtnKr782pt +1k/bJVMH9EaLd6MKnAVjrCDMYBB0MhebZ8QHY2elZZCWoqDYAcIDOsEx+m4NLErT +ypPnjS5M0jm1PKMhMB8wHQYDVR0OBBYEFC0Yt5XdM0Kc95IX8NQ8XRssGPx7MA0G +CWCGSAFlAwQDAgUAAzAAMC0CFQCIgQtrZZ9hdZG1ROhR5hc8nYEmbgIUAIlgC688 +qzy/7yePTlhlpj+ahMM= +-----END CERTIFICATE-----`) + +type DSATestFixture struct { + Input []byte + Certificate *x509.Certificate +} + +func UnmarshalDSATestFixture(testPEMBlock string) DSATestFixture { + var result DSATestFixture + var derBlock *pem.Block + var pemBlock = []byte(testPEMBlock) + for { + derBlock, pemBlock = pem.Decode(pemBlock) + if derBlock == nil { + break + } + switch derBlock.Type { + case "PKCS7": + result.Input = derBlock.Bytes + case "CERTIFICATE": + result.Certificate, _ = x509.ParseCertificate(derBlock.Bytes) + } + } + + return result +} diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go index 9ba6e10a..b419c761 100644 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -49,7 +49,8 @@ var supportedKexAlgos = []string{ // P384 and P521 are not constant-time yet, but since we don't // reuse ephemeral keys, using them for ECDH should be OK. kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA256, kexAlgoDH14SHA1, kexAlgoDH1SHA1, + kexAlgoDH14SHA256, kexAlgoDH16SHA512, kexAlgoDH14SHA1, + kexAlgoDH1SHA1, } // serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden @@ -59,8 +60,9 @@ var serverForbiddenKexAlgos = map[string]struct{}{ kexAlgoDHGEXSHA256: {}, // server half implementation is only minimal to satisfy the automated tests } -// preferredKexAlgos specifies the default preference for key-exchange algorithms -// in preference order. +// preferredKexAlgos specifies the default preference for key-exchange +// algorithms in preference order. The diffie-hellman-group16-sha512 algorithm +// is disabled by default because it is a bit slower than the others. var preferredKexAlgos = []string{ kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, @@ -70,12 +72,12 @@ var preferredKexAlgos = []string{ // supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods // of authenticating servers) in preference order. var supportedHostKeyAlgos = []string{ - CertAlgoRSASHA512v01, CertAlgoRSASHA256v01, + CertAlgoRSASHA256v01, CertAlgoRSASHA512v01, CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, - KeyAlgoRSASHA512, KeyAlgoRSASHA256, + KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA, KeyAlgoDSA, KeyAlgoED25519, @@ -85,7 +87,7 @@ var supportedHostKeyAlgos = []string{ // This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed // because they have reached the end of their useful life. var supportedMACs = []string{ - "hmac-sha2-512-etm@openssh.com", "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha2-512", "hmac-sha1", "hmac-sha1-96", + "hmac-sha2-256-etm@openssh.com", "hmac-sha2-512-etm@openssh.com", "hmac-sha2-256", "hmac-sha2-512", "hmac-sha1", "hmac-sha1-96", } var supportedCompressions = []string{compressionNone} @@ -119,6 +121,13 @@ func algorithmsForKeyFormat(keyFormat string) []string { } } +// isRSA returns whether algo is a supported RSA algorithm, including certificate +// algorithms. +func isRSA(algo string) bool { + algos := algorithmsForKeyFormat(KeyAlgoRSA) + return contains(algos, underlyingAlgo(algo)) +} + // supportedPubKeyAuthAlgos specifies the supported client public key // authentication algorithms. Note that this doesn't include certificate types // since those use the underlying algorithm. This list is sent to the client if @@ -262,16 +271,16 @@ type Config struct { // unspecified, a size suitable for the chosen cipher is used. RekeyThreshold uint64 - // The allowed key exchanges algorithms. If unspecified then a - // default set of algorithms is used. + // The allowed key exchanges algorithms. If unspecified then a default set + // of algorithms is used. Unsupported values are silently ignored. KeyExchanges []string - // The allowed cipher algorithms. If unspecified then a sensible - // default is used. + // The allowed cipher algorithms. If unspecified then a sensible default is + // used. Unsupported values are silently ignored. Ciphers []string - // The allowed MAC algorithms. If unspecified then a sensible default - // is used. + // The allowed MAC algorithms. If unspecified then a sensible default is + // used. Unsupported values are silently ignored. MACs []string } @@ -288,7 +297,7 @@ func (c *Config) SetDefaults() { var ciphers []string for _, c := range c.Ciphers { if cipherModes[c] != nil { - // reject the cipher if we have no cipherModes definition + // Ignore the cipher if we have no cipherModes definition. ciphers = append(ciphers, c) } } @@ -297,10 +306,26 @@ func (c *Config) SetDefaults() { if c.KeyExchanges == nil { c.KeyExchanges = preferredKexAlgos } + var kexs []string + for _, k := range c.KeyExchanges { + if kexAlgoMap[k] != nil { + // Ignore the KEX if we have no kexAlgoMap definition. + kexs = append(kexs, k) + } + } + c.KeyExchanges = kexs if c.MACs == nil { c.MACs = supportedMACs } + var macs []string + for _, m := range c.MACs { + if macModes[m] != nil { + // Ignore the MAC if we have no macModes definition. + macs = append(macs, m) + } + } + c.MACs = macs if c.RekeyThreshold == 0 { // cipher specific default diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go index 927a90cd..8a05f799 100644 --- a/vendor/golang.org/x/crypto/ssh/kex.go +++ b/vendor/golang.org/x/crypto/ssh/kex.go @@ -23,6 +23,7 @@ const ( kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" kexAlgoDH14SHA256 = "diffie-hellman-group14-sha256" + kexAlgoDH16SHA512 = "diffie-hellman-group16-sha512" kexAlgoECDH256 = "ecdh-sha2-nistp256" kexAlgoECDH384 = "ecdh-sha2-nistp384" kexAlgoECDH521 = "ecdh-sha2-nistp521" @@ -430,6 +431,17 @@ func init() { hashFunc: crypto.SHA256, } + // This is the group called diffie-hellman-group16-sha512 in RFC + // 8268 and Oakley Group 16 in RFC 3526. + p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF", 16) + + kexAlgoMap[kexAlgoDH16SHA512] = &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + hashFunc: crypto.SHA512, + } + kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index 9e387029..b21322af 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -370,6 +370,25 @@ func gssExchangeToken(gssapiConfig *GSSAPIWithMICConfig, firstToken []byte, s *c return authErr, perms, nil } +// isAlgoCompatible checks if the signature format is compatible with the +// selected algorithm taking into account edge cases that occur with old +// clients. +func isAlgoCompatible(algo, sigFormat string) bool { + // Compatibility for old clients. + // + // For certificate authentication with OpenSSH 7.2-7.7 signature format can + // be rsa-sha2-256 or rsa-sha2-512 for the algorithm + // ssh-rsa-cert-v01@openssh.com. + // + // With gpg-agent < 2.2.6 the algorithm can be rsa-sha2-256 or rsa-sha2-512 + // for signature format ssh-rsa. + if isRSA(algo) && isRSA(sigFormat) { + return true + } + // Standard case: the underlying algorithm must match the signature format. + return underlyingAlgo(algo) == sigFormat +} + // ServerAuthError represents server authentication errors and is // sometimes returned by NewServerConn. It appends any authentication // errors that may occur, and is returned if all of the authentication @@ -567,7 +586,7 @@ userAuthLoop: authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) break } - if underlyingAlgo(algo) != sig.Format { + if !isAlgoCompatible(algo, sig.Format) { authErr = fmt.Errorf("ssh: signature %q not compatible with selected algorithm %q", sig.Format, algo) break } diff --git a/vendor/golang.org/x/net/html/render.go b/vendor/golang.org/x/net/html/render.go index 8b280319..e8c12334 100644 --- a/vendor/golang.org/x/net/html/render.go +++ b/vendor/golang.org/x/net/html/render.go @@ -194,9 +194,8 @@ func render1(w writer, n *Node) error { } } - // Render any child nodes. - switch n.Data { - case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp": + // Render any child nodes + if childTextNodesAreLiteral(n) { for c := n.FirstChild; c != nil; c = c.NextSibling { if c.Type == TextNode { if _, err := w.WriteString(c.Data); err != nil { @@ -213,7 +212,7 @@ func render1(w writer, n *Node) error { // last element in the file, with no closing tag. return plaintextAbort } - default: + } else { for c := n.FirstChild; c != nil; c = c.NextSibling { if err := render1(w, c); err != nil { return err @@ -231,6 +230,27 @@ func render1(w writer, n *Node) error { return w.WriteByte('>') } +func childTextNodesAreLiteral(n *Node) bool { + // Per WHATWG HTML 13.3, if the parent of the current node is a style, + // script, xmp, iframe, noembed, noframes, or plaintext element, and the + // current node is a text node, append the value of the node's data + // literally. The specification is not explicit about it, but we only + // enforce this if we are in the HTML namespace (i.e. when the namespace is + // ""). + // NOTE: we also always include noscript elements, although the + // specification states that they should only be rendered as such if + // scripting is enabled for the node (which is not something we track). + if n.Namespace != "" { + return false + } + switch n.Data { + case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp": + return true + default: + return false + } +} + // writeQuoted writes s to w surrounded by quotes. Normally it will use double // quotes, but if s contains a double quote, it will use single quotes. // It is used for writing the identifiers in a doctype declaration. diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 0c4d1492..8f775faf 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -624,7 +624,7 @@ ccflags="$@" $2 ~ /^MEM/ || $2 ~ /^WG/ || $2 ~ /^FIB_RULE_/ || - $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} + $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE|IOMIN$|IOOPT$|ALIGNOFF$|DISCARD|ROTATIONAL$|ZEROOUT$|GETDISKSEQ$)/ {printf("\t%s = C.%s\n", $2, $2)} $2 ~ /^__WCOREFLAG$/ {next} $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} diff --git a/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/vendor/golang.org/x/sys/unix/mmap_nomremap.go new file mode 100644 index 00000000..ca051363 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mmap_nomremap.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris +// +build aix darwin dragonfly freebsd openbsd solaris + +package unix + +var mapper = &mmapper{ + active: make(map[*byte][]byte), + mmap: mmap, + munmap: munmap, +} diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go index 86213c05..fa93d0aa 100644 --- a/vendor/golang.org/x/sys/unix/mremap.go +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build linux -// +build linux +//go:build linux || netbsd +// +build linux netbsd package unix @@ -14,8 +14,17 @@ type mremapMmapper struct { mremap func(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error) } +var mapper = &mremapMmapper{ + mmapper: mmapper{ + active: make(map[*byte][]byte), + mmap: mmap, + munmap: munmap, + }, + mremap: mremap, +} + func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { - if newLength <= 0 || len(oldData) == 0 || len(oldData) != cap(oldData) || flags&MREMAP_FIXED != 0 { + if newLength <= 0 || len(oldData) == 0 || len(oldData) != cap(oldData) || flags&mremapFixed != 0 { return nil, EINVAL } @@ -32,9 +41,13 @@ func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data [ } bNew := unsafe.Slice((*byte)(unsafe.Pointer(newAddr)), newLength) pNew := &bNew[cap(bNew)-1] - if flags&MREMAP_DONTUNMAP == 0 { + if flags&mremapDontunmap == 0 { delete(m.active, pOld) } m.active[pNew] = bNew return bNew, nil } + +func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { + return mapper.Mremap(oldData, newLength, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index c406ae00..9a6e5aca 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -535,21 +535,6 @@ func Fsync(fd int) error { //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = nsendmsg //sys munmap(addr uintptr, length uintptr) (err error) - -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, -} - -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - //sys Madvise(b []byte, advice int) (err error) //sys Mprotect(b []byte, prot int) (err error) //sys Mlock(b []byte) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 7705c327..4217de51 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -601,20 +601,6 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { // Gethostuuid(uuid *byte, timeout *Timespec) (err error) // Ptrace(req int, pid int, addr uintptr, data int) (ret uintptr, err error) -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, -} - -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - //sys Madvise(b []byte, behav int) (err error) //sys Mlock(b []byte) (err error) //sys Mlockall(flags int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 20692150..135cc3cd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -510,30 +510,36 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { return nil, err } - // Find size. - n := uintptr(0) - if err := sysctl(mib, nil, &n, nil, 0); err != nil { - return nil, err - } - if n == 0 { - return nil, nil - } - if n%SizeofKinfoProc != 0 { - return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) - } + for { + // Find size. + n := uintptr(0) + if err := sysctl(mib, nil, &n, nil, 0); err != nil { + return nil, err + } + if n == 0 { + return nil, nil + } + if n%SizeofKinfoProc != 0 { + return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) + } - // Read into buffer of that size. - buf := make([]KinfoProc, n/SizeofKinfoProc) - if err := sysctl(mib, (*byte)(unsafe.Pointer(&buf[0])), &n, nil, 0); err != nil { - return nil, err - } - if n%SizeofKinfoProc != 0 { - return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) - } + // Read into buffer of that size. + buf := make([]KinfoProc, n/SizeofKinfoProc) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&buf[0])), &n, nil, 0); err != nil { + if err == ENOMEM { + // Process table grew. Try again. + continue + } + return nil, err + } + if n%SizeofKinfoProc != 0 { + return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) + } - // The actual call may return less than the original reported required - // size so ensure we deal with that. - return buf[:n/SizeofKinfoProc], nil + // The actual call may return less than the original reported required + // size so ensure we deal with that. + return buf[:n/SizeofKinfoProc], nil + } } //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 39de5f14..a730878e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1885,7 +1885,7 @@ func Getpgrp() (pid int) { //sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) -//sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6 +//sys pselect6(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *sigset_argpack) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Removexattr(path string, attr string) (err error) //sys Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) @@ -2125,28 +2125,6 @@ func writevRacedetect(iovecs []Iovec, n int) { // mmap varies by architecture; see syscall_linux_*.go. //sys munmap(addr uintptr, length uintptr) (err error) //sys mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error) - -var mapper = &mremapMmapper{ - mmapper: mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, - }, - mremap: mremap, -} - -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - -func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { - return mapper.Mremap(oldData, newLength, flags) -} - //sys Madvise(b []byte, advice int) (err error) //sys Mprotect(b []byte, prot int) (err error) //sys Mlock(b []byte) (err error) @@ -2155,6 +2133,12 @@ func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { //sys Munlock(b []byte) (err error) //sys Munlockall() (err error) +const ( + mremapFixed = MREMAP_FIXED + mremapDontunmap = MREMAP_DONTUNMAP + mremapMaymove = MREMAP_MAYMOVE +) + // Vmsplice splices user pages from a slice of Iovecs into a pipe specified by fd, // using the specified flags. func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) { @@ -2454,6 +2438,39 @@ func Getresgid() (rgid, egid, sgid int) { return int(r), int(e), int(s) } +// Pselect is a wrapper around the Linux pselect6 system call. +// This version does not modify the timeout argument. +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + // Per https://man7.org/linux/man-pages/man2/select.2.html#NOTES, + // The Linux pselect6() system call modifies its timeout argument. + // [Not modifying the argument] is the behavior required by POSIX.1-2001. + var mutableTimeout *Timespec + if timeout != nil { + mutableTimeout = new(Timespec) + *mutableTimeout = *timeout + } + + // The final argument of the pselect6() system call is not a + // sigset_t * pointer, but is instead a structure + var kernelMask *sigset_argpack + if sigmask != nil { + wordBits := 32 << (^uintptr(0) >> 63) // see math.intSize + + // A sigset stores one bit per signal, + // offset by 1 (because signal 0 does not exist). + // So the number of words needed is ⌈__C_NSIG - 1 / wordBits⌉. + sigsetWords := (_C__NSIG - 1 + wordBits - 1) / (wordBits) + + sigsetBytes := uintptr(sigsetWords * (wordBits / 8)) + kernelMask = &sigset_argpack{ + ss: sigmask, + ssLen: sigsetBytes, + } + } + + return pselect6(nfd, r, w, e, mutableTimeout, kernelMask) +} + /* * Unimplemented */ diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 5b21fcfd..70601ce3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -40,7 +40,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index a81f5742..f5266689 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -33,7 +33,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 69d2d7c3..f6ab02ec 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -28,7 +28,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 76d56409..93fe59d2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -31,7 +31,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 35851ef7..5e6ceee1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -32,7 +32,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) @@ -177,3 +177,14 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +//sys riscvHWProbe(pairs []RISCVHWProbePairs, cpuCount uintptr, cpus *CPUSet, flags uint) (err error) + +func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error) { + var setSize uintptr + + if set != nil { + setSize = uintptr(unsafe.Sizeof(*set)) + } + return riscvHWProbe(pairs, setSize, set, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 018d7d47..ddd1ac85 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -360,6 +360,18 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { //sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) +const ( + mremapFixed = MAP_FIXED + mremapDontunmap = 0 + mremapMaymove = 0 +) + +//sys mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) = SYS_MREMAP + +func mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (uintptr, error) { + return mremapNetBSD(oldaddr, oldlength, newaddr, newlength, flags) +} + /* * Unimplemented */ @@ -564,7 +576,6 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { // mq_timedreceive // mq_timedsend // mq_unlink -// mremap // msgget // msgrcv // msgsnd diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index b600a289..72d23575 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -716,20 +716,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { return } -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, -} - -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - // Event Ports type fileObjCookie struct { diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 8e48c29e..8bb30e7c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -147,6 +147,14 @@ func (m *mmapper) Munmap(data []byte) (err error) { return nil } +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index d3d49ec3..44e72edb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -285,25 +285,11 @@ func Close(fd int) (err error) { return } -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, -} - // Dummy function: there are no semantics for Madvise on z/OS func Madvise(b []byte, advice int) (err error) { return } -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A //sysnb Getegid() (egid int) //sysnb Geteuid() (uid int) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index a46df0f1..cfb14300 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80041270 BLKBSZSET = 0x40041271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80041272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 6cd4a3ea..df64f2d5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index c7ebee24..3025cd5b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80041270 BLKBSZSET = 0x40041271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80041272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 12a9a138..09e1ffbe 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index f26a164f..a4572354 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 890bc3c9..fee7dfb8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40041270 BLKBSZSET = 0x80041271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40041272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 549f26ac..a5b2373a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index e0365e32..5dde82c9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index fdccce15..2e80ea6b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40041270 BLKBSZSET = 0x80041271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40041272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index b2205c83..a65dcd7c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -27,22 +27,31 @@ const ( B57600 = 0x10 B576000 = 0x15 B921600 = 0x16 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40041270 BLKBSZSET = 0x80041271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40041272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1f BS1 = 0x8000 BSDLY = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 81aa5ad0..cbd34e3d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -27,22 +27,31 @@ const ( B57600 = 0x10 B576000 = 0x15 B921600 = 0x16 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1f BS1 = 0x8000 BSDLY = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 76807a1f..e4afa7a3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -27,22 +27,31 @@ const ( B57600 = 0x10 B576000 = 0x15 B921600 = 0x16 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1f BS1 = 0x8000 BSDLY = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index d4a5ab9e..44f45a03 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 66e65db9..74733e26 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 48984202..f5f3934b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -30,22 +30,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 7ceec233..a07321be 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1356,7 +1356,7 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { +func pselect6(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *sigset_argpack) (n int, err error) { r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) n = int(r0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 0b292395..0ab4f2ed 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -531,3 +531,19 @@ func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, f } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func riscvHWProbe(pairs []RISCVHWProbePairs, cpuCount uintptr, cpus *CPUSet, flags uint) (err error) { + var _p0 unsafe.Pointer + if len(pairs) > 0 { + _p0 = unsafe.Pointer(&pairs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_RISCV_HWPROBE, uintptr(_p0), uintptr(len(pairs)), uintptr(cpuCount), uintptr(unsafe.Pointer(cpus)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index cdb2af5a..35f499b3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -1858,3 +1858,14 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 9d25f76b..3cda65b0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -1858,3 +1858,14 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index d3f80351..1e1fea90 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -1858,3 +1858,14 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 887188a5..3b77da11 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -1858,3 +1858,14 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 3e594a8c..ef285c56 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -251,6 +251,8 @@ const ( SYS_ACCEPT4 = 242 SYS_RECVMMSG = 243 SYS_ARCH_SPECIFIC_SYSCALL = 244 + SYS_RISCV_HWPROBE = 258 + SYS_RISCV_FLUSH_ICACHE = 259 SYS_WAIT4 = 260 SYS_PRLIMIT64 = 261 SYS_FANOTIFY_INIT = 262 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 02e2462c..26ef52aa 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -866,6 +866,11 @@ const ( POLLNVAL = 0x20 ) +type sigset_argpack struct { + ss *Sigset_t + ssLen uintptr +} + type SignalfdSiginfo struct { Signo uint32 Errno int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 9ea54b7b..83c69c11 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -718,3 +718,26 @@ type SysvShmDesc struct { _ uint64 _ uint64 } + +type RISCVHWProbePairs struct { + Key int64 + Value uint64 +} + +const ( + RISCV_HWPROBE_KEY_MVENDORID = 0x0 + RISCV_HWPROBE_KEY_MARCHID = 0x1 + RISCV_HWPROBE_KEY_MIMPID = 0x2 + RISCV_HWPROBE_KEY_BASE_BEHAVIOR = 0x3 + RISCV_HWPROBE_BASE_BEHAVIOR_IMA = 0x1 + RISCV_HWPROBE_KEY_IMA_EXT_0 = 0x4 + RISCV_HWPROBE_IMA_FD = 0x1 + RISCV_HWPROBE_IMA_C = 0x2 + RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 + RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 + RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 + RISCV_HWPROBE_MISALIGNED_SLOW = 0x2 + RISCV_HWPROBE_MISALIGNED_FAST = 0x3 + RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4 + RISCV_HWPROBE_MISALIGNED_MASK = 0x7 +) diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 96459007..373d1638 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -135,14 +135,14 @@ func Getpagesize() int { return 4096 } // NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention. // This is useful when interoperating with Windows code requiring callbacks. -// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. +// The argument is expected to be a function with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. func NewCallback(fn interface{}) uintptr { return syscall.NewCallback(fn) } // NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention. // This is useful when interoperating with Windows code requiring callbacks. -// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. +// The argument is expected to be a function with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. func NewCallbackCDecl(fn interface{}) uintptr { return syscall.NewCallbackCDecl(fn) } diff --git a/vendor/golang.org/x/text/language/match.go b/vendor/golang.org/x/text/language/match.go index ee45f494..1153baf2 100644 --- a/vendor/golang.org/x/text/language/match.go +++ b/vendor/golang.org/x/text/language/match.go @@ -434,7 +434,7 @@ func newMatcher(supported []Tag, options []MatchOption) *matcher { // (their canonicalization simply substitutes a different language code, but // nothing else), the match confidence is Exact, otherwise it is High. for i, lm := range language.AliasMap { - // If deprecated codes match and there is no fiddling with the script or + // If deprecated codes match and there is no fiddling with the script // or region, we consider it an exact match. conf := Exact if language.AliasTypes[i] != language.Macro { diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index be8f5a86..aa7dfacc 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -113,6 +113,20 @@ const ( opObj = 'O' // .Obj() (Named, TypeParam) ) +// For is equivalent to new(Encoder).For(obj). +// +// It may be more efficient to reuse a single Encoder across several calls. +func For(obj types.Object) (Path, error) { + return new(Encoder).For(obj) +} + +// An Encoder amortizes the cost of encoding the paths of multiple objects. +// The zero value of an Encoder is ready to use. +type Encoder struct { + scopeNamesMemo map[*types.Scope][]string // memoization of Scope.Names() + namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods() +} + // For returns the path to an object relative to its package, // or an error if the object is not accessible from the package's Scope. // @@ -145,24 +159,7 @@ const ( // .Type().Field(0) (field Var X) // // where p is the package (*types.Package) to which X belongs. -func For(obj types.Object) (Path, error) { - return newEncoderFor()(obj) -} - -// An encoder amortizes the cost of encoding the paths of multiple objects. -// Nonexported pending approval of proposal 58668. -type encoder struct { - scopeNamesMemo map[*types.Scope][]string // memoization of Scope.Names() - namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods() -} - -// Exposed to gopls via golang.org/x/tools/internal/typesinternal -// pending approval of proposal 58668. -// -//go:linkname newEncoderFor -func newEncoderFor() func(types.Object) (Path, error) { return new(encoder).For } - -func (enc *encoder) For(obj types.Object) (Path, error) { +func (enc *Encoder) For(obj types.Object) (Path, error) { pkg := obj.Pkg() // This table lists the cases of interest. @@ -341,7 +338,7 @@ func appendOpArg(path []byte, op byte, arg int) []byte { // This function is just an optimization that avoids the general scope walking // approach. You are expected to fall back to the general approach if this // function fails. -func (enc *encoder) concreteMethod(meth *types.Func) (Path, bool) { +func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // Concrete methods can only be declared on package-scoped named types. For // that reason we can skip the expensive walk over the package scope: the // path will always be package -> named type -> method. We can trivially get @@ -421,7 +418,13 @@ func (enc *encoder) concreteMethod(meth *types.Func) (Path, bool) { } } - panic(fmt.Sprintf("couldn't find method %s on type %s", meth, named)) + // Due to golang/go#59944, go/types fails to associate the receiver with + // certain methods on cgo types. + // + // TODO(rfindley): replace this panic once golang/go#59944 is fixed in all Go + // versions gopls supports. + return "", false + // panic(fmt.Sprintf("couldn't find method %s on type %s; methods: %#v", meth, named, enc.namedMethods(named))) } // find finds obj within type T, returning the path to it, or nil if not found. @@ -730,23 +733,8 @@ func namedMethods(named *types.Named) []*types.Func { return methods } -// scopeNames is a memoization of scope.Names. Callers must not modify the result. -func (enc *encoder) scopeNames(scope *types.Scope) []string { - m := enc.scopeNamesMemo - if m == nil { - m = make(map[*types.Scope][]string) - enc.scopeNamesMemo = m - } - names, ok := m[scope] - if !ok { - names = scope.Names() // allocates and sorts - m[scope] = names - } - return names -} - // namedMethods is a memoization of the namedMethods function. Callers must not modify the result. -func (enc *encoder) namedMethods(named *types.Named) []*types.Func { +func (enc *Encoder) namedMethods(named *types.Named) []*types.Func { m := enc.namedMethodsMemo if m == nil { m = make(map[*types.Named][]*types.Func) @@ -758,5 +746,19 @@ func (enc *encoder) namedMethods(named *types.Named) []*types.Func { m[named] = methods } return methods +} +// scopeNames is a memoization of scope.Names. Callers must not modify the result. +func (enc *Encoder) scopeNames(scope *types.Scope) []string { + m := enc.scopeNamesMemo + if m == nil { + m = make(map[*types.Scope][]string) + enc.scopeNamesMemo = m + } + names, ok := m[scope] + if !ok { + names = scope.Names() // allocates and sorts + m[scope] = names + } + return names } diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index d5055169..3c0afe72 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -8,10 +8,12 @@ package gocommand import ( "bytes" "context" + "errors" "fmt" "io" "log" "os" + "reflect" "regexp" "runtime" "strconv" @@ -215,6 +217,18 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd := exec.Command("go", goArgs...) cmd.Stdout = stdout cmd.Stderr = stderr + + // cmd.WaitDelay was added only in go1.20 (see #50436). + if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + waitDelay.Set(reflect.ValueOf(30 * time.Second)) + } + // On darwin the cwd gets resolved to the real path, which breaks anything that // expects the working directory to keep the original path, including the // go command when dealing with modules. @@ -229,6 +243,7 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) cmd.Dir = i.WorkingDir } + defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) return runCmdContext(ctx, cmd) @@ -242,10 +257,85 @@ var DebugHangingGoCommands = false // runCmdContext is like exec.CommandContext except it sends os.Interrupt // before os.Kill. -func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { - if err := cmd.Start(); err != nil { +func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { + // If cmd.Stdout is not an *os.File, the exec package will create a pipe and + // copy it to the Writer in a goroutine until the process has finished and + // either the pipe reaches EOF or command's WaitDelay expires. + // + // However, the output from 'go list' can be quite large, and we don't want to + // keep reading (and allocating buffers) if we've already decided we don't + // care about the output. We don't want to wait for the process to finish, and + // we don't wait to wait for the WaitDelay to expire either. + // + // Instead, if cmd.Stdout requires a copying goroutine we explicitly replace + // it with a pipe (which is an *os.File), which we can close in order to stop + // copying output as soon as we realize we don't care about it. + var stdoutW *os.File + if cmd.Stdout != nil { + if _, ok := cmd.Stdout.(*os.File); !ok { + var stdoutR *os.File + stdoutR, stdoutW, err = os.Pipe() + if err != nil { + return err + } + prevStdout := cmd.Stdout + cmd.Stdout = stdoutW + + stdoutErr := make(chan error, 1) + go func() { + _, err := io.Copy(prevStdout, stdoutR) + if err != nil { + err = fmt.Errorf("copying stdout: %w", err) + } + stdoutErr <- err + }() + defer func() { + // We started a goroutine to copy a stdout pipe. + // Wait for it to finish, or terminate it if need be. + var err2 error + select { + case err2 = <-stdoutErr: + stdoutR.Close() + case <-ctx.Done(): + stdoutR.Close() + // Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close + // should cause the Read call in io.Copy to unblock and return + // immediately, but we still need to receive from stdoutErr to confirm + // that that has happened. + <-stdoutErr + err2 = ctx.Err() + } + if err == nil { + err = err2 + } + }() + + // Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the + // same writer, and have a type that can be compared with ==, at most + // one goroutine at a time will call Write.” + // + // Since we're starting a goroutine that writes to cmd.Stdout, we must + // also update cmd.Stderr so that that still holds. + func() { + defer func() { recover() }() + if cmd.Stderr == prevStdout { + cmd.Stderr = cmd.Stdout + } + }() + } + } + + err = cmd.Start() + if stdoutW != nil { + // The child process has inherited the pipe file, + // so close the copy held in this process. + stdoutW.Close() + stdoutW = nil + } + if err != nil { return err } + resChan := make(chan error, 1) go func() { resChan <- cmd.Wait() @@ -253,11 +343,14 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { // If we're interested in debugging hanging Go commands, stop waiting after a // minute and panic with interesting information. - if DebugHangingGoCommands { + debug := DebugHangingGoCommands + if debug { + timer := time.NewTimer(1 * time.Minute) + defer timer.Stop() select { case err := <-resChan: return err - case <-time.After(1 * time.Minute): + case <-timer.C: HandleHangingGoCommand(cmd.Process) case <-ctx.Done(): } @@ -270,30 +363,25 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { } // Cancelled. Interrupt and see if it ends voluntarily. - cmd.Process.Signal(os.Interrupt) - select { - case err := <-resChan: - return err - case <-time.After(time.Second): + if err := cmd.Process.Signal(os.Interrupt); err == nil { + // (We used to wait only 1s but this proved + // fragile on loaded builder machines.) + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case err := <-resChan: + return err + case <-timer.C: + } } // Didn't shut down in response to interrupt. Kill it hard. // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT // on certain platforms, such as unix. - if err := cmd.Process.Kill(); err != nil && DebugHangingGoCommands { - // Don't panic here as this reliably fails on windows with EINVAL. + if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug { log.Printf("error killing the Go command: %v", err) } - // See above: don't wait indefinitely if we're debugging hanging Go commands. - if DebugHangingGoCommands { - select { - case err := <-resChan: - return err - case <-time.After(10 * time.Second): // a shorter wait as resChan should return quickly following Kill - HandleHangingGoCommand(cmd.Process) - } - } return <-resChan } diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go index 307a76d4..446c5846 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/version.go +++ b/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -23,21 +23,11 @@ import ( func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { inv.Verb = "list" inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} - inv.Env = append(append([]string{}, inv.Env...), "GO111MODULE=off") - // Unset any unneeded flags, and remove them from BuildFlags, if they're - // present. - inv.ModFile = "" + inv.BuildFlags = nil // This is not a build command. inv.ModFlag = "" - var buildFlags []string - for _, flag := range inv.BuildFlags { - // Flags can be prefixed by one or two dashes. - f := strings.TrimPrefix(strings.TrimPrefix(flag, "-"), "-") - if strings.HasPrefix(f, "mod=") || strings.HasPrefix(f, "modfile=") { - continue - } - buildFlags = append(buildFlags, flag) - } - inv.BuildFlags = buildFlags + inv.ModFile = "" + inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off") + stdoutBytes, err := r.Run(ctx, inv) if err != nil { return 0, err diff --git a/vendor/modules.txt b/vendor/modules.txt index 40f81be3..f786bfbf 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,3 +1,6 @@ +# dario.cat/mergo v1.0.0 +## explicit; go 1.13 +dario.cat/mergo # github.com/CycloneDX/cyclonedx-go v0.7.1 ## explicit; go 1.17 github.com/CycloneDX/cyclonedx-go @@ -23,7 +26,7 @@ github.com/Microsoft/go-winio/internal/fs github.com/Microsoft/go-winio/internal/socket github.com/Microsoft/go-winio/internal/stringbuffer github.com/Microsoft/go-winio/pkg/guid -# github.com/ProtonMail/go-crypto v0.0.0-20230518184743-7afd39499903 +# github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 ## explicit; go 1.13 github.com/ProtonMail/go-crypto/bitcurves github.com/ProtonMail/go-crypto/brainpool @@ -64,7 +67,7 @@ github.com/anchore/go-struct-converter # github.com/anchore/packageurl-go v0.1.1-0.20230104203445-02e0a6721501 ## explicit; go 1.17 github.com/anchore/packageurl-go -# github.com/anchore/stereoscope v0.0.0-20230627195312-cd49355d934e +# github.com/anchore/stereoscope v0.0.0-20230727211946-d1f3d766295e ## explicit; go 1.19 github.com/anchore/stereoscope github.com/anchore/stereoscope/internal/bus @@ -81,7 +84,7 @@ github.com/anchore/stereoscope/pkg/image/oci github.com/anchore/stereoscope/pkg/image/sif github.com/anchore/stereoscope/pkg/tree github.com/anchore/stereoscope/pkg/tree/node -# github.com/anchore/syft v0.85.0 +# github.com/anchore/syft v0.87.1 ## explicit; go 1.19 github.com/anchore/syft/internal github.com/anchore/syft/internal/bus @@ -121,6 +124,7 @@ github.com/anchore/syft/syft/pkg/cataloger/alpm github.com/anchore/syft/syft/pkg/cataloger/apkdb github.com/anchore/syft/syft/pkg/cataloger/binary github.com/anchore/syft/syft/pkg/cataloger/common/cpe +github.com/anchore/syft/syft/pkg/cataloger/common/cpe/dictionary github.com/anchore/syft/syft/pkg/cataloger/cpp github.com/anchore/syft/syft/pkg/cataloger/dart github.com/anchore/syft/syft/pkg/cataloger/deb @@ -150,6 +154,12 @@ github.com/anchore/syft/syft/source # github.com/andybalholm/brotli v1.0.4 ## explicit; go 1.12 github.com/andybalholm/brotli +# github.com/aquasecurity/go-pep440-version v0.0.0-20210121094942-22b2f8951d46 +## explicit; go 1.15 +github.com/aquasecurity/go-pep440-version +# github.com/aquasecurity/go-version v0.0.0-20210121072130-637058cfe492 +## explicit; go 1.15 +github.com/aquasecurity/go-version/pkg/part # github.com/becheran/wildmatch-go v1.0.0 ## explicit; go 1.15 github.com/becheran/wildmatch-go @@ -182,7 +192,7 @@ github.com/containerd/stargz-snapshotter/estargz/errorutil github.com/deitch/magic/pkg/magic github.com/deitch/magic/pkg/magic/internal github.com/deitch/magic/pkg/magic/parser -# github.com/docker/cli v23.0.5+incompatible +# github.com/docker/cli v24.0.0+incompatible ## explicit github.com/docker/cli/cli/config github.com/docker/cli/cli/config/configfile @@ -196,7 +206,7 @@ github.com/docker/cli/cli/connhelper/ssh github.com/docker/distribution/digestset github.com/docker/distribution/reference github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v24.0.2+incompatible +# github.com/docker/docker v24.0.5+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -240,6 +250,9 @@ github.com/dsnet/compress/internal/prefix # github.com/dustin/go-humanize v1.0.1 ## explicit; go 1.16 github.com/dustin/go-humanize +# github.com/edsrzf/mmap-go v1.1.0 +## explicit; go 1.17 +github.com/edsrzf/mmap-go # github.com/emirpasic/gods v1.18.1 ## explicit; go 1.2 github.com/emirpasic/gods/containers @@ -274,10 +287,11 @@ github.com/go-git/go-billy/v5/helper/polyfill github.com/go-git/go-billy/v5/memfs github.com/go-git/go-billy/v5/osfs github.com/go-git/go-billy/v5/util -# github.com/go-git/go-git/v5 v5.7.0 +# github.com/go-git/go-git/v5 v5.8.1 ## explicit; go 1.18 github.com/go-git/go-git/v5 github.com/go-git/go-git/v5/config +github.com/go-git/go-git/v5/internal/path_util github.com/go-git/go-git/v5/internal/revision github.com/go-git/go-git/v5/internal/url github.com/go-git/go-git/v5/plumbing @@ -348,7 +362,7 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/go-containerregistry v0.15.2 +# github.com/google/go-containerregistry v0.16.1 ## explicit; go 1.18 github.com/google/go-containerregistry/internal/and github.com/google/go-containerregistry/internal/compression @@ -393,12 +407,13 @@ github.com/huandu/xstrings # github.com/imdario/mergo v0.3.15 ## explicit; go 1.13 github.com/imdario/mergo -# github.com/in-toto/in-toto-golang v0.4.1-0.20221018183522-731d0640b65f -## explicit; go 1.17 +# github.com/in-toto/in-toto-golang v0.9.0 +## explicit; go 1.20 github.com/in-toto/in-toto-golang/in_toto github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1 github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2 +github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1 # github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 ## explicit github.com/jbenet/go-context/io @@ -498,6 +513,10 @@ github.com/pkg/errors # github.com/rivo/uniseg v0.2.0 ## explicit; go 1.12 github.com/rivo/uniseg +# github.com/saferwall/pe v1.4.4 +## explicit; go 1.15 +github.com/saferwall/pe +github.com/saferwall/pe/log # github.com/sassoftware/go-rpmutils v0.2.0 ## explicit; go 1.13 github.com/sassoftware/go-rpmutils @@ -507,10 +526,11 @@ github.com/sassoftware/go-rpmutils/fileutil ## explicit github.com/scylladb/go-set/iset github.com/scylladb/go-set/strset -# github.com/secure-systems-lab/go-securesystemslib v0.4.0 -## explicit; go 1.17 +# github.com/secure-systems-lab/go-securesystemslib v0.6.0 +## explicit; go 1.20 github.com/secure-systems-lab/go-securesystemslib/cjson github.com/secure-systems-lab/go-securesystemslib/dsse +github.com/secure-systems-lab/go-securesystemslib/signerverifier # github.com/sergi/go-diff v1.3.1 ## explicit; go 1.12 github.com/sergi/go-diff/diffmatchpatch @@ -523,10 +543,10 @@ github.com/shopspring/decimal # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/skeema/knownhosts v1.1.1 +# github.com/skeema/knownhosts v1.2.0 ## explicit; go 1.17 github.com/skeema/knownhosts -# github.com/spdx/tools-golang v0.5.2 +# github.com/spdx/tools-golang v0.5.3 ## explicit; go 1.13 github.com/spdx/tools-golang/convert github.com/spdx/tools-golang/json @@ -552,8 +572,8 @@ github.com/spf13/afero/mem # github.com/spf13/cast v1.5.1 ## explicit; go 1.18 github.com/spf13/cast -# github.com/sylabs/sif/v2 v2.8.1 -## explicit; go 1.18 +# github.com/sylabs/sif/v2 v2.11.5 +## explicit; go 1.19 github.com/sylabs/sif/v2/pkg/sif # github.com/sylabs/squashfs v0.6.1 ## explicit; go 1.19 @@ -581,7 +601,7 @@ github.com/vbatts/go-mtree/xattr # github.com/vbatts/tar-split v0.11.3 ## explicit; go 1.15 github.com/vbatts/tar-split/archive/tar -# github.com/vifraa/gopom v0.2.1 +# github.com/vifraa/gopom v1.0.0 ## explicit; go 1.15 github.com/vifraa/gopom # github.com/wagoodman/go-partybus v0.0.0-20230516145632-8ccac152c651 @@ -596,9 +616,12 @@ github.com/xanzy/ssh-agent # github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 ## explicit github.com/xi2/xz +# go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 +## explicit; go 1.11 +go.mozilla.org/pkcs7 # go.uber.org/goleak v1.2.0 ## explicit; go 1.18 -# golang.org/x/crypto v0.11.0 +# golang.org/x/crypto v0.12.0 ## explicit; go 1.17 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt @@ -637,7 +660,7 @@ golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.12.0 +# golang.org/x/net v0.14.0 ## explicit; go 1.17 golang.org/x/net/context golang.org/x/net/html @@ -645,10 +668,10 @@ golang.org/x/net/html/atom golang.org/x/net/html/charset golang.org/x/net/internal/socks golang.org/x/net/proxy -# golang.org/x/sync v0.1.0 +# golang.org/x/sync v0.2.0 ## explicit golang.org/x/sync/errgroup -# golang.org/x/sys v0.10.0 +# golang.org/x/sys v0.11.0 ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/execabs @@ -656,10 +679,10 @@ golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.10.0 +# golang.org/x/term v0.11.0 ## explicit; go 1.17 golang.org/x/term -# golang.org/x/text v0.11.0 +# golang.org/x/text v0.12.0 ## explicit; go 1.17 golang.org/x/text/encoding golang.org/x/text/encoding/charmap @@ -679,7 +702,7 @@ golang.org/x/text/language golang.org/x/text/runes golang.org/x/text/transform golang.org/x/text/unicode/norm -# golang.org/x/tools v0.8.0 +# golang.org/x/tools v0.9.1 ## explicit; go 1.18 golang.org/x/tools/cmd/stringer golang.org/x/tools/go/gcexportdata From ec7a64a37bdd0ee71e720ddcdf1b7615ff4f3626 Mon Sep 17 00:00:00 2001 From: Justin Chadwell Date: Thu, 17 Aug 2023 10:13:04 +0100 Subject: [PATCH 2/2] examples: remove digest from golang example Signed-off-by: Justin Chadwell --- examples/golang/checks/sbom-base.spdx.json | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/examples/golang/checks/sbom-base.spdx.json b/examples/golang/checks/sbom-base.spdx.json index 119bd7dc..9ab433a0 100644 --- a/examples/golang/checks/sbom-base.spdx.json +++ b/examples/golang/checks/sbom-base.spdx.json @@ -3,10 +3,7 @@ "predicateType": "https://spdx.dev/Document", "subject": [ { - "name": "bin/app", - "digest": { - "sha256": "137e71592242436360271294fa4b9e969480fc99ae2ccbb87166d7ffa29c1386" - } + "name": "bin/app" } ], "predicate": {