diff --git a/.allstar/binary_artifacts.yaml b/.allstar/binary_artifacts.yaml
new file mode 100644
index 000000000..7176b965c
--- /dev/null
+++ b/.allstar/binary_artifacts.yaml
@@ -0,0 +1,63 @@
+# Ignore reason: These artifacts are used in unit tests or they are tools
+# used by the build. It's impractical to make changes at this stage of the
+# project to generate the tools from source.
+ignorePaths:
+- omaha/internal/tools/ApplyTag.exe
+- omaha/recovery/lib/bin/dbg/google_update_recovery.lib
+- omaha/recovery/lib/bin/opt/google_update_recovery.lib
+- omaha/testing/unittest_support/CodeRed.crx3
+- omaha/testing/unittest_support/GoogleUpdateHelper.msi
+- omaha/testing/unittest_support/GoogleUpdate_corrupted.exe
+- omaha/testing/unittest_support/GoogleUpdate_now_expired_cert.exe
+- omaha/testing/unittest_support/GoogleUpdate_old_signature.exe
+- omaha/testing/unittest_support/LongRunning.exe
+- omaha/testing/unittest_support/LongRunningSilent.exe
+- omaha/testing/unittest_support/Omaha_1.2.x_resources/goopdateres_ar.dll
+- omaha/testing/unittest_support/Omaha_1.2.x_resources/goopdateres_bg.dll
+- omaha/testing/unittest_support/Omaha_1.2.x_resources/goopdateres_ca.dll
+- omaha/testing/unittest_support/SaveArguments.exe
+- omaha/testing/unittest_support/SaveArguments_OmahaTestSigned.exe
+- omaha/testing/unittest_support/SaveArguments_different_ou.exe
+- omaha/testing/unittest_support/SaveArguments_multiple_cn.exe
+- omaha/testing/unittest_support/SaveArguments_no_cn.exe
+- omaha/testing/unittest_support/SaveArguments_unsigned_no_resources.exe
+- omaha/testing/unittest_support/SaveArguments_unsigned_wrong_markup_size.exe
+- omaha/testing/unittest_support/SaveArguments_unsigned_wrong_markup_value.exe
+- omaha/testing/unittest_support/SaveArguments_unsigned_wrong_resource_name.exe
+- omaha/testing/unittest_support/SaveArguments_wrong_cn.exe
+- omaha/testing/unittest_support/Sha1_4c40dba5f988fae57a57d6457495f98b.exe
+- omaha/testing/unittest_support/Sha1_4c40dba5f988fae57a57d6457495f98b_and_sha2_2a9c21acaaa63a3c58a7b9322bee948d.exe
+- omaha/testing/unittest_support/chrome_certificate_09E28B26DB593EC4E73286B66499C370.dll
+- omaha/testing/unittest_support/chrome_certificate_2912C70C9A2B8A3EF6F6074662D68B8D.dll
+- omaha/testing/unittest_support/chrome_setup.exe
+- omaha/testing/unittest_support/download_cache_test/{7101D597-3481-4971-AD23-455542964072}/livelysetup.exe
+- omaha/testing/unittest_support/download_cache_test/{89640431-FE64-4da8-9860-1A1085A60E13}/gears-win32-opt.msi
+- omaha/testing/unittest_support/download_cache_test/{C5CC8735-9BE0-45c5-804C-F117E96047C7}/GoogleUpdateSetup.exe
+- omaha/testing/unittest_support/old_google_certificate.dll
+- omaha/testing/unittest_support/omaha_1.0.x/GoogleUpdate.exe
+- omaha/testing/unittest_support/omaha_1.0.x/goopdate.dll
+- omaha/testing/unittest_support/omaha_1.1.x/GoogleUpdate.exe
+- omaha/testing/unittest_support/omaha_1.1.x/goopdate.dll
+- omaha/testing/unittest_support/omaha_1.1.x/goopdateres_en.dll
+- omaha/testing/unittest_support/omaha_1.2.131.7_shell/GoogleUpdate.exe
+- omaha/testing/unittest_support/omaha_1.2.183.9_shell/GoogleUpdate.exe
+- omaha/testing/unittest_support/omaha_1.2.x/GoogleUpdate.exe
+- omaha/testing/unittest_support/omaha_1.2.x/goopdate.dll
+- omaha/testing/unittest_support/omaha_1.2.x/goopdateres_en.dll
+- omaha/testing/unittest_support/omaha_1.2.x_newer/GoogleUpdate.exe
+- omaha/testing/unittest_support/omaha_1.3.x/GoogleUpdate.exe
+- omaha/testing/unittest_support/omaha_1.3.x/goopdate.dll
+- omaha/testing/unittest_support/omaha_1.3.x/goopdateres_en.dll
+- omaha/testing/unittest_support/sha1_06aea76bac46a9e8cfe6d29e45aaf033.sys
+- omaha/testing/unittest_support/sha1_14F8FDD167F92402B1570B5DC495C815.sys
+- omaha/testing/unittest_support/sha2_0c15be4a15bb0903c901b1d6c265302f.msi
+- omaha/testing/unittest_support/sha2_0e4418e2dede36dd2974c3443afb5ce5.msi
+- omaha/testing/unittest_support/sha2_2a9c21acaaa63a3c58a7b9322bee948d.exe
+- omaha/testing/unittest_support/unsigned.crx3
+- omaha/testing/unittest_support/valid.crx2
+- omaha/testing/unittest_support/valid_no_publisher.crx3
+- omaha/testing/unittest_support/valid_publisher.crx3
+- omaha/tools/MsiTagger.exe
+- omaha/tools/resmerge.exe
+- third_party/lzma/files/7zr.exe
+- third_party/lzma/files/lzma.exe
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 000000000..b73030536
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,28 @@
+# Defines the Chromium style for automatic reformatting.
+# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
+BasedOnStyle: Chromium
+# This defaults to 'Auto'. Explicitly set it for a while, so that
+# 'vector >' in existing files gets formatted to
+# 'vector>'. ('Auto' means that clang-format will only use
+# 'int>>' if the file already contains at least one such instance.)
+Standard: Cpp11
+
+# Make sure code like:
+# IPC_BEGIN_MESSAGE_MAP()
+# IPC_MESSAGE_HANDLER(WidgetHostViewHost_Update, OnUpdate)
+# IPC_END_MESSAGE_MAP()
+# gets correctly indented.
+MacroBlockBegin: "^\
+BEGIN_COM_MAP|\
+BEGIN_MSG_MAP|\
+BEGIN_OBJECT_MAP|\
+BEGIN_PROP_MAP|\
+BEGIN_REGISTRY_MAP|\
+BEGIN_SERVICE_MAP$"
+MacroBlockEnd: "^\
+END_COM_MAP|\
+END_MSG_MAP|\
+END_OBJECT_MAP|\
+END_PROP_MAP|\
+END_REGISTRY_MAP|\
+END_SERVICE_MAP$"
\ No newline at end of file
diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml
new file mode 100644
index 000000000..412e0f411
--- /dev/null
+++ b/.github/workflows/scorecard.yml
@@ -0,0 +1,73 @@
+# This workflow uses actions that are not certified by GitHub. They are provided
+# by a third-party and are governed by separate terms of service, privacy
+# policy, and support documentation.
+
+name: Scorecard supply-chain security
+on:
+ # For Branch-Protection check. Only the default branch is supported. See
+ # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
+ branch_protection_rule:
+ # To guarantee Maintained check is occasionally updated. See
+ # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
+ schedule:
+ - cron: '35 20 * * 0'
+ push:
+ branches: [ "main" ]
+ workflow_dispatch:
+
+# Declare default permissions as read only.
+permissions: read-all
+
+jobs:
+ analysis:
+ name: Scorecard analysis
+ runs-on: ubuntu-latest
+ permissions:
+ # Needed to upload the results to code-scanning dashboard.
+ security-events: write
+ # Needed to publish results and get a badge (see publish_results below).
+ id-token: write
+ # Uncomment the permissions below if installing in a private repository.
+ # contents: read
+ # actions: read
+
+ steps:
+ - name: "Checkout code"
+ uses: actions/checkout@v4
+ with:
+ persist-credentials: false
+
+ - name: "Run analysis"
+ uses: ossf/scorecard-action@v2.3.1
+ with:
+ results_file: results.sarif
+ results_format: sarif
+ # (Optional) "write" PAT token. Uncomment the `repo_token` line below if:
+ # - you want to enable the Branch-Protection check on a *public* repository, or
+ # - you are installing Scorecard on a *private* repository
+ # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat.
+ # repo_token: ${{ secrets.SCORECARD_TOKEN }}
+
+ # Public repositories:
+ # - Publish results to OpenSSF REST API for easy access by consumers
+ # - Allows the repository to include the Scorecard badge.
+ # - See https://github.com/ossf/scorecard-action#publishing-results.
+ # For private repositories:
+ # - `publish_results` will always be set to `false`, regardless
+ # of the value entered here.
+ publish_results: true
+
+ # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
+ # format to the repository Actions tab.
+ - name: "Upload artifact"
+ uses: actions/upload-artifact@v4
+ with:
+ name: SARIF file
+ path: results.sarif
+ retention-days: 5
+
+ # Upload the results to GitHub's code scanning dashboard.
+ - name: "Upload to code-scanning"
+ uses: github/codeql-action/upload-sarif@v3.24.10
+ with:
+ sarif_file: results.sarif
diff --git a/.gitignore b/.gitignore
index 7b66acb4a..6b4540c91 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,11 +16,9 @@
*.idb
*.pdb
*.pyc
+.vs/
.vscode/
omaha/common/omaha_customization_proxy_clsid.h
omaha/proxy_clsids.txt
-omaha/scons-out/**
-third_party/breakpad/**
-third_party/googletest/**
-third_party/libzip/**
-third_party/zlib/**
+omaha/scons-out/
+third_party/lzma/files/**/obj/
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 000000000..08b70f182
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,12 @@
+[submodule "third_party/breakpad"]
+ path = third_party/breakpad
+ url = https://github.com/google/breakpad.git
+[submodule "third_party/googletest"]
+ path = third_party/googletest
+ url = https://github.com/google/googletest.git
+[submodule "third_party/libzip"]
+ path = third_party/libzip
+ url = https://github.com/nih-at/libzip.git
+[submodule "third_party/zlib"]
+ path = third_party/zlib
+ url = https://github.com/madler/zlib.git
diff --git a/CHANGELOG.txt b/CHANGELOG.txt
index 2759653a0..36acb9c6c 100644
--- a/CHANGELOG.txt
+++ b/CHANGELOG.txt
@@ -1,3 +1,6 @@
+# CHANGELOG.txt is not going to be be updated beyond this point
+# To see what is being changed, please inspect the git log.
+
## 2019-06-05 @247089589,251531419
### Changes
diff --git a/README.md b/README.md
index 4084e4148..44c7f70e8 100644
--- a/README.md
+++ b/README.md
@@ -2,8 +2,8 @@
## This is not an official Google product.
-Omaha is the open-source version of Google Update, a program to install requested software and keep it up to date. The Google-branded version of Omaha is used to support software patching (both background updating, and on-demand update checks) for Google Chrome, Earth, and a variety of other Google products on Windows.
+Omaha is the open-source version of Google Update, a program to install requested software and keep it up to date. The Google-branded version of Omaha is used to support software patching (both background updating, and on-demand update checks) for Google Chrome, Earth, and a variety of other Google products on Windows 7, 8, and 10.
-We know that keeping software updated is both important and hard, and so by open-sourcing this project, our hope is that perhaps we can help others solve this problem. So, if you'd like to get involved, or even use Omaha to support your own software projects, then just follow the instructions in the [Developer Setup Guide](https://github.com/google/omaha/blob/master/doc/DeveloperSetupGuide.md), and you'll be good to go!
+For a quick overview of how Omaha works, you can see [this unofficial tutorial](https://fman.io/blog/google-omaha-tutorial/). Please note that it was written by a third party so we cannot guarantee its availability, accuracy or safety.
-There is also an unofficial [tutorial](https://fman.io/blog/google-omaha-tutorial/). Please note that it was written by a third party so we cannot guarantee its availability, accuracy or safety.
+We know that keeping software updated is both important and hard, and so by open-sourcing this project, our hope is that perhaps we can help others solve this problem. So, if you'd like to get involved, or even use Omaha to support your own software projects, then just follow the instructions in the [Developer Setup Guide](https://github.com/google/omaha/blob/master/doc/DeveloperSetupGuide.md), and you'll be good to go!
diff --git a/common/certificate_tag/README b/common/certificate_tag/README
index 02007c809..2b5aa02a1 100644
--- a/common/certificate_tag/README
+++ b/common/certificate_tag/README
@@ -13,3 +13,9 @@ arbitrary data in extensions. Since they are also not hashed when verifying
signatures, that data can also be changed without invalidating it.
More details are here: http://b/12236017
+
+The tool was updated in 2020 to support MSI files: b/172261939, b/165818147.
+
+The test file is integrated from google3, but is modified here to make it
+easier to run outside of google3; see the comment near the beginning of
+certificate_tag_test.go
diff --git a/common/certificate_tag/certificate_tag.go b/common/certificate_tag/certificate_tag.go
index aba87523d..b5c29e97c 100644
--- a/common/certificate_tag/certificate_tag.go
+++ b/common/certificate_tag/certificate_tag.go
@@ -13,7 +13,7 @@
// limitations under the License.
// ========================================================================
//
-// certificate_tag.go is a tool for manipulating "tags" in Authenticode-signed,
+// Program certificate_tag manipulates "tags" in Authenticode-signed
// Windows binaries.
//
// Traditionally we have inserted tag data after the PKCS#7 blob in the file
@@ -26,7 +26,8 @@
// certificates, inserted into the PKCS#7 certificate chain, that can contain
// arbitrary data in extensions. Since they are also not hashed when verifying
// signatures, that data can also be changed without invalidating it.
-
+//
+// The tool supports PE32 exe files and MSI files.
package main
import (
@@ -247,6 +248,150 @@ func getAttributeCertificates(bin []byte) (offset, size, sizeOffset int, err err
return
}
+func lengthAsn1(asn1 []byte) (asn1Length int, err error) {
+ // Read the ASN.1 length of the object.
+ if asn1[1]&0x80 == 0 {
+ // Short form length.
+ asn1Length = int(asn1[1]) + 2
+ } else {
+ numBytes := int(asn1[1] & 0x7f)
+ if numBytes == 0 || numBytes > 2 {
+ err = fmt.Errorf("bad number of bytes in ASN.1 length: %d", numBytes)
+ return
+ }
+ if len(asn1) < numBytes+2 {
+ err = errors.New("ASN.1 structure truncated")
+ return
+ }
+ asn1Length = int(asn1[2])
+ if numBytes == 2 {
+ asn1Length <<= 8
+ asn1Length |= int(asn1[3])
+ }
+ asn1Length += 2 + numBytes
+ }
+ return
+}
+
+func parseSignedData(asn1Data []byte) (*signedData, error) {
+ var signedData signedData
+ if _, err := asn1.Unmarshal(asn1Data, &signedData); err != nil {
+ return nil, errors.New("authenticodetag: error while parsing SignedData structure: " + err.Error())
+ }
+
+ der, err := asn1.Marshal(signedData)
+ if err != nil {
+ return nil, errors.New("authenticodetag: error while marshaling SignedData structure: " + err.Error())
+ }
+
+ if !bytes.Equal(der, asn1Data) {
+ return nil, errors.New("authenticodetag: ASN.1 parse/unparse test failed")
+ }
+ return &signedData, nil
+}
+
+func getSuperfluousCert(signedData *signedData) (cert *x509.Certificate, index int, err error) {
+ n := len(signedData.PKCS7.Certs)
+ if n == 0 {
+ return nil, -1, nil
+ }
+
+ for index, certASN1 := range signedData.PKCS7.Certs {
+ if cert, err = x509.ParseCertificate(certASN1.FullBytes); err != nil {
+ return nil, -1, err
+ }
+
+ for _, ext := range cert.Extensions {
+ if !ext.Critical && ext.Id.Equal(oidChromeTag) {
+ return cert, index, nil
+ }
+ }
+ }
+
+ return nil, -1, nil
+}
+
+// SetSuperfluousCertTag modifies signedData, adding the superfluous cert with the given tag.
+// It returns the asn1 serialization of the modified signedData.
+func SetSuperfluousCertTag(signedData *signedData, tag []byte) ([]byte, error) {
+ cert, index, err := getSuperfluousCert(signedData)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't identity if any existing certificates are superfluous because of parse error: %w", err)
+ }
+
+ if cert != nil {
+ pkcs7 := &signedData.PKCS7
+ certs := pkcs7.Certs
+
+ var newCerts []asn1.RawValue
+ newCerts = append(newCerts, certs[:index]...)
+ newCerts = append(newCerts, certs[index+1:]...)
+ pkcs7.Certs = newCerts
+ }
+
+ notBefore := parseUnixTimeOrDie(notBeforeTime)
+ notAfter := parseUnixTimeOrDie(notAfterTime)
+
+ priv, err := rsa.GenerateKey(rand.Reader, rsaKeyBits)
+ if err != nil {
+ return nil, err
+ }
+
+ issuerTemplate := x509.Certificate{
+ SerialNumber: new(big.Int).SetInt64(1),
+ Subject: pkix.Name{
+ CommonName: "Unknown issuer",
+ },
+ NotBefore: notBefore,
+ NotAfter: notAfter,
+ KeyUsage: x509.KeyUsageCertSign,
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
+ SignatureAlgorithm: x509.SHA1WithRSA,
+ BasicConstraintsValid: true,
+ IsCA: true,
+ }
+
+ template := x509.Certificate{
+ SerialNumber: new(big.Int).SetInt64(1),
+ Subject: pkix.Name{
+ CommonName: "Dummy certificate",
+ },
+ Issuer: pkix.Name{
+ CommonName: "Unknown issuer",
+ },
+ NotBefore: notBefore,
+ NotAfter: notAfter,
+ KeyUsage: x509.KeyUsageCertSign,
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
+ SignatureAlgorithm: x509.SHA1WithRSA,
+ BasicConstraintsValid: true,
+ IsCA: false,
+ ExtraExtensions: []pkix.Extension{
+ {
+ // This includes the tag in an extension in the
+ // certificate.
+ Id: oidChromeTag,
+ Value: tag,
+ },
+ },
+ }
+
+ derBytes, err := x509.CreateCertificate(rand.Reader, &template, &issuerTemplate, &priv.PublicKey, priv)
+ if err != nil {
+ return nil, err
+ }
+
+ signedData.PKCS7.Certs = append(signedData.PKCS7.Certs, asn1.RawValue{
+ FullBytes: derBytes,
+ })
+
+ asn1Bytes, err := asn1.Marshal(*signedData)
+ if err != nil {
+ return nil, err
+ }
+ return asn1Bytes, nil
+}
+
// Certificate constants. See
// http://msdn.microsoft.com/en-us/library/ms920091.aspx.
const (
@@ -293,29 +438,10 @@ func processAttributeCertificates(certs []byte) (asn1, appendedTag []byte, err e
return
}
- // Read the ASN.1 length of the object.
- var asn1Length int
- if asn1[1]&0x80 == 0 {
- // Short form length.
- asn1Length = int(asn1[1]) + 2
- } else {
- numBytes := int(asn1[1] & 0x7f)
- if numBytes == 0 || numBytes > 2 {
- err = fmt.Errorf("bad number of bytes in ASN.1 length: %d", numBytes)
- return
- }
- if len(asn1) < numBytes+2 {
- err = errors.New("ASN.1 structure truncated")
- return
- }
- asn1Length = int(asn1[2])
- if numBytes == 2 {
- asn1Length <<= 8
- asn1Length |= int(asn1[3])
- }
- asn1Length += 2 + numBytes
+ asn1Length, err := lengthAsn1(asn1)
+ if err != nil {
+ return
}
-
appendedTag = asn1[asn1Length:]
asn1 = asn1[:asn1Length]
@@ -334,19 +460,44 @@ type signedData struct {
} `asn1:"explicit,tag:0"`
}
-// Binary represents a PE binary.
-type Binary struct {
+// Binary represents a taggable binary of any format.
+type Binary interface {
+ AppendedTag() (data []byte, ok bool)
+ asn1Data() []byte
+ buildBinary(asn1Data, tag []byte) ([]byte, error) // the tag argument is a legacy-style tag.
+ RemoveAppendedTag() (contents []byte, err error)
+ SetAppendedTag(tagContents []byte) (contents []byte, err error)
+ getSuperfluousCert() (cert *x509.Certificate, index int, err error)
+ SetSuperfluousCertTag(tag []byte) (contents []byte, err error)
+ certificateOffset() int64
+}
+
+// PE32Binary represents a PE binary.
+type PE32Binary struct {
contents []byte // the full file
attrCertOffset int // the offset to the attribute certificates table
certSizeOffset int // the offset to the size of the attribute certificates table
- asn1Data []byte // the PKCS#7, SignedData in DER form.
+ asn1Bytes []byte // the PKCS#7, SignedData in DER form.
appendedTag []byte // the appended tag, if any.
- signedData *signedData // the parsed, SignedData structure.
+ signedData *signedData // the parsed SignedData structure.
+}
+
+// NewBinary returns a Binary that contains details of the PE32 or MSI binary given in |contents|.
+// |contents| is modified if it is an MSI file.
+func NewBinary(contents []byte) (Binary, error) {
+ pe, peErr := NewPE32Binary(contents)
+ if peErr == nil {
+ return pe, peErr
+ }
+ msi, msiErr := NewMSIBinary(contents)
+ if msiErr == nil {
+ return msi, msiErr
+ }
+ return nil, errors.New("Could not parse input as either PE32 or MSI:\nPE32: " + peErr.Error() + "\nMSI: " + msiErr.Error())
}
-// NewBinary returns a Binary that contains details of the PE binary given in
-// contents.
-func NewBinary(contents []byte) (*Binary, error) {
+// NewPE32Binary returns a Binary that contains details of the PE32 binary given in contents.
+func NewPE32Binary(contents []byte) (*PE32Binary, error) {
offset, size, certSizeOffset, err := getAttributeCertificates(contents)
if err != nil {
return nil, errors.New("authenticodetag: error parsing headers: " + err.Error())
@@ -358,32 +509,31 @@ func NewBinary(contents []byte) (*Binary, error) {
return nil, errors.New("authenticodetag: error parsing attribute certificate section: " + err.Error())
}
- var signedData signedData
- if _, err := asn1.Unmarshal(asn1Data, &signedData); err != nil {
- return nil, errors.New("authenticodetag: error while parsing SignedData structure: " + err.Error())
- }
-
- der, err := asn1.Marshal(signedData)
+ signedData, err := parseSignedData(asn1Data)
if err != nil {
- return nil, errors.New("authenticodetag: error while marshaling SignedData structure: " + err.Error())
- }
-
- if !bytes.Equal(der, asn1Data) {
- return nil, errors.New("authenticodetag: ASN.1 parse/unparse test failed: " + err.Error())
+ return nil, err
}
- return &Binary{
+ return &PE32Binary{
contents: contents,
attrCertOffset: offset,
certSizeOffset: certSizeOffset,
- asn1Data: asn1Data,
+ asn1Bytes: asn1Data,
appendedTag: appendedTag,
- signedData: &signedData,
+ signedData: signedData,
}, nil
}
+func (bin *PE32Binary) certificateOffset() int64 {
+ return int64(bin.attrCertOffset)
+}
+
+func (bin *PE32Binary) asn1Data() []byte {
+ return bin.asn1Bytes
+}
+
// AppendedTag returns the appended tag, if any.
-func (bin *Binary) AppendedTag() (data []byte, ok bool) {
+func (bin *PE32Binary) AppendedTag() (data []byte, ok bool) {
isAllZero := true
for _, b := range bin.appendedTag {
if b != 0 {
@@ -400,7 +550,7 @@ func (bin *Binary) AppendedTag() (data []byte, ok bool) {
// buildBinary builds a PE binary based on bin but with the given SignedData
// and appended tag.
-func (bin *Binary) buildBinary(asn1Data, tag []byte) (contents []byte) {
+func (bin *PE32Binary) buildBinary(asn1Data, tag []byte) (contents []byte, err error) {
contents = append(contents, bin.contents[:bin.certSizeOffset]...)
for (len(asn1Data)+len(tag))&7 > 0 {
tag = append(tag, 0)
@@ -417,42 +567,35 @@ func (bin *Binary) buildBinary(asn1Data, tag []byte) (contents []byte) {
binary.LittleEndian.PutUint16(header[6:], attributeCertificateTypePKCS7SignedData)
contents = append(contents, header[:]...)
contents = append(contents, asn1Data...)
- return append(contents, tag...)
+ return append(contents, tag...), nil
}
-func (bin *Binary) RemoveAppendedTag() (contents []byte, err error) {
+// RemoveAppendedTag removes a legacy-style tag from the end of the signedData container.
+func (bin *PE32Binary) RemoveAppendedTag() (contents []byte, err error) {
if _, ok := bin.AppendedTag(); !ok {
return nil, errors.New("authenticodetag: no appended tag found")
}
- return bin.buildBinary(bin.asn1Data, nil), nil
+ return bin.buildBinary(bin.asn1Data(), nil)
}
-func (bin *Binary) SetAppendedTag(tagContents []byte) (contents []byte, err error) {
- return bin.buildBinary(bin.asn1Data, tagContents), nil
+// SetAppendedTag adds a legacy-style tag at the end of the signedData container.
+func (bin *PE32Binary) SetAppendedTag(tagContents []byte) (contents []byte, err error) {
+ return bin.buildBinary(bin.asn1Data(), tagContents)
}
// oidChromeTag is an OID that we use for the extension in the superfluous
// certificate. It's in the Google arc, but not officially assigned.
var oidChromeTag = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 11129, 2, 1, 9999})
-func (bin *Binary) getSuperfluousCert() (cert *x509.Certificate, err error) {
- n := len(bin.signedData.PKCS7.Certs)
- if n == 0 {
- return nil, nil
- }
-
- if cert, err = x509.ParseCertificate(bin.signedData.PKCS7.Certs[n-1].FullBytes); err != nil {
- return nil, err
- }
+// oidChromeTagSearchBytes is used to find the final location of the tag buffer.
+// This is followed by the 2-byte length of the buffer, and then the buffer itself.
+// x060b - OID and length; 11 bytes of OID; x0482 - Octet string, 2-byte length prefix.
+// (In practice our tags are 8206 bytes, so the size fits in two bytes.)
+var oidChromeTagSearchBytes = []byte{0x06, 0x0b, 0x2b, 0x06, 0x01, 0x04, 0x01, 0xd6, 0x79, 0x02, 0x01, 0xce, 0x0f, 0x04, 0x82}
- for _, ext := range cert.Extensions {
- if !ext.Critical && ext.Id.Equal(oidChromeTag) {
- return cert, nil
- }
- }
-
- return nil, nil
+func (bin *PE32Binary) getSuperfluousCert() (cert *x509.Certificate, index int, err error) {
+ return getSuperfluousCert(bin.signedData)
}
func parseUnixTimeOrDie(unixTime string) time.Time {
@@ -465,75 +608,610 @@ func parseUnixTimeOrDie(unixTime string) time.Time {
// SetSuperfluousCertTag returns a PE binary based on bin, but where the
// superfluous certificate contains the given tag data.
-func (bin *Binary) SetSuperfluousCertTag(tag []byte) (contents []byte, err error) {
- cert, err := bin.getSuperfluousCert()
- if cert != nil {
- pkcs7 := &bin.signedData.PKCS7
- pkcs7.Certs = pkcs7.Certs[:len(pkcs7.Certs)-1]
+// The (parsed) bin.signedData is modified; but bin.asn1Bytes, which contains
+// the raw original bytes, is not.
+func (bin *PE32Binary) SetSuperfluousCertTag(tag []byte) (contents []byte, err error) {
+ asn1Bytes, err := SetSuperfluousCertTag(bin.signedData, tag)
+ if err != nil {
+ return nil, err
}
- notBefore := parseUnixTimeOrDie(notBeforeTime)
- notAfter := parseUnixTimeOrDie(notAfterTime)
+ return bin.buildBinary(asn1Bytes, bin.appendedTag)
+}
- priv, err := rsa.GenerateKey(rand.Reader, rsaKeyBits)
+// Variables now defined as secT and offT were initially hardcoded as |int| for simplicity,
+// but this produced errors when run on a Windows machine, which defaulted to a 32-bit arch.
+// See b/172261939.
+
+// secT is the type of a sector ID, or an index into the FAT (which describes what is in
+// that sector), or a number of sectors.
+type secT uint32
+
+// offT is the type of an offset into the MSI file contents, or a number of bytes.
+type offT uint64
+
+// MSIBinary represents an MSI binary.
+// |headerBytes| and |contents| are non-overlapping slices of the same backing array.
+type MSIBinary struct {
+ headerBytes []byte // the header (512 bytes).
+ header *MSIHeader // the parsed msi header.
+ sector SectorFormat // sector parameters.
+ contents []byte // the file content (no header), with SignedData removed.
+ sigDirOffset offT // the offset of the signedData stream directory in |contents|.
+ sigDirEntry *MSIDirEntry // the parsed contents of the signedData stream directory.
+ signedDataBytes []byte // the PKCS#7, SignedData in asn1 DER form.
+ signedData *signedData // the parsed SignedData structure.
+ fatEntries []secT // a copy of the FAT entries in one list.
+ difatEntries []secT // a copy of the DIFAT entries in one list.
+ difatSectors []secT // a list of the dedicated DIFAT sectors (if any), for convenience.
+}
+
+// MSIHeader represents a parsed MSI header.
+type MSIHeader struct {
+ Magic [8]byte
+ Clsid [16]byte
+ MinorVersion uint16
+ DllVersion uint16
+ ByteOrder uint16
+ SectorShift uint16
+ MiniSectorShift uint16
+ Reserved [6]byte
+ NumDirSectors uint32
+ NumFatSectors uint32
+ FirstDirSector uint32
+ TransactionSignatureNumber uint32
+ MiniStreamCutoffSize uint32
+ FirstMiniFatSector uint32
+ NumMiniFatSectors uint32
+ FirstDifatSector uint32
+ NumDifatSectors uint32
+}
+
+// MSIDirEntry represents a parsed MSI directory entry for a stream.
+type MSIDirEntry struct {
+ Name [64]byte
+ NumNameBytes uint16
+ ObjectType uint8
+ ColorFlag uint8
+ Left uint32
+ Right uint32
+ Child uint32
+ Clsid [16]byte
+ StateFlags uint32
+ CreateTime uint64
+ ModifyTime uint64
+ StreamFirstSector uint32
+ StreamSize uint64
+}
+
+// SectorFormat represents parameters of an MSI file sector.
+type SectorFormat struct {
+ Size offT // the size of a sector in bytes; 512 for dll v3 and 4096 for v4.
+ Ints int // the number of int32s in a sector.
+}
+
+const (
+ numHeaderContentBytes = 76
+ numHeaderTotalBytes = 512
+ numDifatHeaderEntries = 109
+ numDirEntryBytes = 128
+ miniStreamSectorSize = 64
+ miniStreamCutoffSize = 4096
+ // Constants and names from https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-cfb/
+ fatFreesect = 0xFFFFFFFF // An unallocated sector (used in the FAT or DIFAT).
+ fatEndofchain = 0xFFFFFFFE // End of a linked chain (in the FAT); or end of DIFAT sector chain.
+ fatFatsect = 0xFFFFFFFD // A FAT sector (used in the FAT).
+ fatDifsect = 0xFFFFFFFC // A DIFAT sector (used in the FAT).
+ fatReserved = 0xFFFFFFFB // Reserved value.
+)
+
+func newSectorFormat(sectorShift uint16) (format SectorFormat, err error) {
+ sectorSize := offT(1) << sectorShift
+ if sectorSize != 4096 && sectorSize != 512 {
+ return format, fmt.Errorf("unexpected msi sector shift, wanted sector size 4096 or 512, got %d", sectorSize)
+ }
+ return SectorFormat{
+ Size: sectorSize,
+ Ints: int(sectorSize / 4),
+ }, nil
+}
+
+// isLastInSector returns whether the index into difatEntries corresponds to the last entry in
+// a sector.
+//
+// The last entry in each difat sector is a pointer to the next difat sector.
+// (Or is an end-of-chain marker.)
+// This does not apply to the last entry stored in the MSI header.
+func (format SectorFormat) isLastInSector(index int) bool {
+ return index > numDifatHeaderEntries && (index-numDifatHeaderEntries+1)%format.Ints == 0
+}
+
+// readStream reads the stream starting at the given start sector. The name is optional,
+// it is only used for error reporting.
+func (bin *MSIBinary) readStream(name string, start secT, streamSize offT, forceFAT, freeData bool) (stream []byte, err error) {
+ var sectorSize offT
+ var fatEntries []secT // May be FAT or mini FAT.
+ var contents []byte // May be file contents or mini stream.
+ if forceFAT || streamSize >= miniStreamCutoffSize {
+ fatEntries = bin.fatEntries
+ contents = bin.contents
+ sectorSize = bin.sector.Size
+ } else {
+ // Load the mini FAT.
+ s, err := bin.readStream("mini FAT", secT(bin.header.FirstMiniFatSector), offT(bin.header.NumMiniFatSectors)*bin.sector.Size, true, false)
+ if err != nil {
+ return nil, err
+ }
+ for offset := 0; offset < len(s); offset += 4 {
+ fatEntries = append(fatEntries, secT(binary.LittleEndian.Uint32(s[offset:])))
+ }
+ // Load the mini stream. (root directory's stream, root must be dir entry zero)
+ root := &MSIDirEntry{}
+ offset := offT(bin.header.FirstDirSector) * bin.sector.Size
+ binary.Read(bytes.NewBuffer(bin.contents[offset:]), binary.LittleEndian, root)
+ contents, err = bin.readStream("mini stream", secT(root.StreamFirstSector), offT(root.StreamSize), true, false)
+ if err != nil {
+ return nil, err
+ }
+ sectorSize = miniStreamSectorSize
+ }
+ sector := start
+ size := streamSize
+ for size > 0 {
+ if sector == fatEndofchain || sector == fatFreesect {
+ return nil, fmt.Errorf("msi readStream: ran out of sectors in copying stream %q", name)
+ }
+ n := size
+ if n > sectorSize {
+ n = sectorSize
+ }
+ offset := sectorSize * offT(sector)
+ stream = append(stream, contents[offset:offset+n]...)
+ size -= n
+
+ // Zero out the existing stream bytes, if requested.
+ // For example, new signedData will be written at the end of
+ // the file (which may be where the existing stream is, but this works regardless).
+ // The stream bytes could be left as unused junk, but unused bytes in an MSI file are
+ // typically zeroed.
+
+ // Set the data in the sector to zero.
+ if freeData {
+ for i := offT(0); i < n; i++ {
+ contents[offset+i] = 0
+ }
+ }
+ // Find the next sector, then free the FAT entry of the current sector.
+ old := sector
+ sector = fatEntries[sector]
+ if freeData {
+ fatEntries[old] = fatFreesect
+ }
+ }
+ return stream, nil
+}
+
+// Parse-time functionality is broken out into populate*() methods for clarity.
+
+// populateFatEntries does what it says and should only be called from NewMSIBinary().
+func (bin *MSIBinary) populateFatEntries() error {
+ var fatEntries []secT
+ for i, sector := range bin.difatEntries {
+ // The last entry in a difat sector is a chaining entry.
+ isLastInSector := bin.sector.isLastInSector(i)
+ if sector == fatFreesect || sector == fatEndofchain || isLastInSector {
+ continue
+ }
+ offset := offT(sector) * bin.sector.Size
+ for i := 0; i < bin.sector.Ints; i++ {
+ fatEntries = append(fatEntries, secT(binary.LittleEndian.Uint32(bin.contents[offset+offT(i)*4:])))
+ }
+ }
+ bin.fatEntries = fatEntries
+ return nil
+}
+
+// populateDifatEntries does what it says and should only be called from NewMSIBinary().
+func (bin *MSIBinary) populateDifatEntries() error {
+ // Copy the difat entries and make a list of difat sectors (if any).
+ // The first 109 difat entries must exist and are read from the MSI header, the rest come from
+ // optional additional sectors.
+ difatEntries := make([]secT, numDifatHeaderEntries, numDifatHeaderEntries+int(bin.header.NumDifatSectors)*bin.sector.Ints)
+ for i := 0; i < numDifatHeaderEntries; i++ {
+ difatEntries[i] = secT(binary.LittleEndian.Uint32(bin.headerBytes[numHeaderContentBytes+i*4:]))
+ }
+
+ // Code (here and elsewhere) that manages additional difat sectors probably won't run in prod,
+ // but is implemented to avoid a hidden scaling limit.
+ // (109 difat sector entries) x (1024 fat sector entries/difat sector) x (4096 bytes/ fat sector)
+ // => files up to ~457 MB in size don't require additional difat sectors.
+ var difatSectors []secT
+ for i := 0; i < int(bin.header.NumDifatSectors); i++ {
+ var sector secT
+ if i == 0 {
+ sector = secT(bin.header.FirstDifatSector)
+ } else {
+ sector = difatEntries[len(difatEntries)-1]
+ }
+ difatSectors = append(difatSectors, sector)
+ start := offT(sector) * bin.sector.Size
+ for j := 0; j < bin.sector.Ints; j++ {
+ difatEntries = append(difatEntries, secT(binary.LittleEndian.Uint32(bin.contents[start+offT(j)*4:])))
+ }
+ }
+ bin.difatEntries = difatEntries
+ bin.difatSectors = difatSectors
+ return nil
+}
+
+var (
+ // UTF-16 for "\05DigitalSignature"
+ signatureName = []byte{0x05, 0x00, 0x44, 0x00, 0x69, 0x00, 0x67, 0x00, 0x69, 0x00, 0x74, 0x00, 0x61, 0x00, 0x6c, 0x00, 0x53, 0x00, 0x69, 0x00, 0x67, 0x00, 0x6e, 0x00, 0x61, 0x00, 0x74, 0x00, 0x75, 0x00, 0x72, 0x00, 0x65, 0x00, 0x00, 0x00}
+)
+
+// signedDataDirFromSector returns the directory entry for the signedData stream,
+// if it exists in the given sector.
+func (bin *MSIBinary) signedDataDirFromSector(dirSector secT) (sigDirEntry *MSIDirEntry, offset offT, found bool) {
+ sigDirEntry = &MSIDirEntry{}
+ // Fixed 128 byte directory entry size.
+ for i := offT(0); i < bin.sector.Size/numDirEntryBytes; i++ {
+ offset = offT(dirSector)*bin.sector.Size + i*numDirEntryBytes
+ binary.Read(bytes.NewBuffer(bin.contents[offset:]), binary.LittleEndian, sigDirEntry)
+ if bytes.Equal(sigDirEntry.Name[:sigDirEntry.NumNameBytes], signatureName) {
+ return sigDirEntry, offset, true
+ }
+ }
+ return
+}
+
+// populateSignatureDirEntry does what it says and should only be called from NewMSIBinary().
+func (bin *MSIBinary) populateSignatureDirEntry() error {
+ dirSector := secT(bin.header.FirstDirSector)
+ for {
+ if sigDirEntry, sigDirOffset, found := bin.signedDataDirFromSector(dirSector); found {
+ bin.sigDirEntry = sigDirEntry
+ bin.sigDirOffset = sigDirOffset
+ return nil
+ }
+ // Did not find the entry, go to the next directory sector.
+ // This is run on MSIs that Google creates, so don't worry about a malicious infinite loop
+ // in the entries.
+ dirSector = bin.fatEntries[dirSector]
+ if dirSector == fatEndofchain {
+ return errors.New("did not find signature stream in MSI file")
+ }
+ }
+}
+
+// populateSignedData does what it says and should only be called from NewMSIBinary().
+func (bin *MSIBinary) populateSignedData() (err error) {
+ sector := secT(bin.sigDirEntry.StreamFirstSector)
+ size := offT(bin.sigDirEntry.StreamSize)
+ if bin.header.DllVersion == 3 {
+ size = size & 0x7FFFFFFF
+ }
+ stream, err := bin.readStream("signedData", sector, size, false, true)
if err != nil {
- return nil, err
+ return err
+ }
+ bin.signedDataBytes = stream
+ bin.signedData, err = parseSignedData(bin.signedDataBytes)
+ if err != nil {
+ return err
}
+ return nil
+}
- issuerTemplate := x509.Certificate{
- SerialNumber: new(big.Int).SetInt64(1),
- Subject: pkix.Name{
- CommonName: "Unknown issuer",
- },
- NotBefore: notBefore,
- NotAfter: notAfter,
- KeyUsage: x509.KeyUsageCertSign,
- ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
- SignatureAlgorithm: x509.SHA1WithRSA,
- BasicConstraintsValid: true,
- IsCA: true,
+var (
+ msiHeaderSignature = []byte{0xd0, 0xcf, 0x11, 0xe0, 0xa1, 0xb1, 0x1a, 0xe1}
+ msiHeaderClsid = make([]byte, 16)
+)
+
+// NewMSIBinary returns a Binary that contains details of the MSI binary given in |contents|.
+// |contents| is modified; the region occupied by the cert section is zeroed out.
+func NewMSIBinary(fileContents []byte) (*MSIBinary, error) {
+ // Parses the MSI header, the directory entry for the SignedData, and the SignedData itself.
+ // Makes copies of the list of FAT and DIFAT entries, for easier manipulation.
+ // Zeroes out the SignedData stream in |contents|, as it may move.
+ // When writing, the elements: (header, dir entry, SignedData, FAT and DIFAT entries)
+ // are considered dirty (modified), and written back into fileContents.
+ if len(fileContents) < numHeaderTotalBytes {
+ return nil, fmt.Errorf("msi file is too short to contain header, want >= %d bytes got %d bytes", numHeaderTotalBytes, len(fileContents))
}
- template := x509.Certificate{
- SerialNumber: new(big.Int).SetInt64(1),
- Subject: pkix.Name{
- CommonName: "Dummy certificate",
- },
- Issuer: pkix.Name{
- CommonName: "Unknown issuer",
- },
- NotBefore: notBefore,
- NotAfter: notAfter,
- KeyUsage: x509.KeyUsageCertSign,
- ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
- SignatureAlgorithm: x509.SHA1WithRSA,
- BasicConstraintsValid: true,
- IsCA: false,
- ExtraExtensions: []pkix.Extension{
- {
- // This includes the tag in an extension in the
- // certificate.
- Id: oidChromeTag,
- Value: tag,
- },
- },
+ // Parse the header.
+ headerBytes := fileContents[:numHeaderTotalBytes]
+ var header MSIHeader
+ binary.Read(bytes.NewBuffer(headerBytes[:numHeaderContentBytes]), binary.LittleEndian, &header)
+ if !bytes.Equal(header.Magic[:], msiHeaderSignature) || !bytes.Equal(header.Clsid[:], msiHeaderClsid) {
+ return nil, fmt.Errorf("msi file is not an msi file: either the header signature is missing or the clsid is not zero as required")
}
- derBytes, err := x509.CreateCertificate(rand.Reader, &template, &issuerTemplate, &priv.PublicKey, priv)
+ format, err := newSectorFormat(header.SectorShift)
if err != nil {
return nil, err
}
+ if offT(len(fileContents)) < format.Size {
+ return nil, fmt.Errorf("msi file is too short to contain a full header sector, want >= %d bytes got %d bytes", format.Size, len(fileContents))
+ }
+ contents := fileContents[format.Size:]
- bin.signedData.PKCS7.Certs = append(bin.signedData.PKCS7.Certs, asn1.RawValue{
- FullBytes: derBytes,
- })
+ bin := &MSIBinary{
+ headerBytes: headerBytes,
+ header: &header,
+ sector: format,
+ contents: contents,
+ }
+
+ // The difat entries must be populated before the fat entries.
+ if err := bin.populateDifatEntries(); err != nil {
+ return nil, err
+ }
+ if err := bin.populateFatEntries(); err != nil {
+ return nil, err
+ }
+ // The signature dir entry must be populated before the signed data.
+ if err := bin.populateSignatureDirEntry(); err != nil {
+ return nil, err
+ }
+ if err := bin.populateSignedData(); err != nil {
+ return nil, err
+ }
+ return bin, nil
+}
+
+// firstFreeFatEntry returns the index of the first free entry at the end of a slice of fat entries.
+// It returns one past the end of list if there are no free entries at the end.
+func firstFreeFatEntry(entries []secT) secT {
+ firstFreeIndex := secT(len(entries))
+ for entries[firstFreeIndex-1] == fatFreesect {
+ firstFreeIndex--
+ }
+ return firstFreeIndex
+}
+
+func (bin *MSIBinary) firstFreeFatEntry() secT {
+ return firstFreeFatEntry(bin.fatEntries)
+}
+
+// ensureFreeFatEntries ensures there are at least n free entries at the end of the FAT list,
+// and returns the first free entry.
+//
+// The bin.fatEntry slice may be modified, any local references to the slice are invalidated.
+// bin.fatEntry elements may be assigned, so any local references to entries (such as the
+// first free index) are also invalidated.
+// The function is re-entrant.
+func (bin *MSIBinary) ensureFreeFatEntries(n secT) secT {
+ sizeFat := secT(len(bin.fatEntries))
+ firstFreeIndex := bin.firstFreeFatEntry() // Is past end of slice if there are no free entries.
+ if sizeFat-firstFreeIndex >= n {
+ // Nothing to do, there were already enough free sectors.
+ return firstFreeIndex
+ }
+ // Append another FAT sector.
+ for i := 0; i < bin.sector.Ints; i++ {
+ bin.fatEntries = append(bin.fatEntries, fatFreesect)
+ }
+ // firstFreeIndex is free; assign it to the created FAT sector.
+ // (Do not change the order of these calls; assignDifatEntry() could invalidate firstFreeIndex.)
+ bin.fatEntries[firstFreeIndex] = fatFatsect
+ bin.assignDifatEntry(firstFreeIndex)
+
+ // Update the MSI header.
+ bin.header.NumFatSectors++
+
+ // If n is large enough, it's possible adding an additional sector was insufficient.
+ // This won't happen for our use case; but the call to verify or fix it is cheap.
+ bin.ensureFreeFatEntries(n)
+
+ return bin.firstFreeFatEntry()
+}
+
+// assignDifatEntries assigns an entry (the sector# of a FAT sector) to the end of the difat list.
+//
+// The bin.fatEntry slice may be modified, any local references to the slice are invalidated.
+// bin.fatEntry elements may be assigned, so any local references to entries (such as the
+// first free index) are also invalidated.
+func (bin *MSIBinary) assignDifatEntry(fatSector secT) {
+ bin.ensureFreeDifatEntry()
+ // Find first free entry at end of list.
+ i := len(bin.difatEntries) - 1
+
+ // If there are sectors, i could be pointing to a fatEndofchain marker, but in that case
+ // it is guaranteed (by ensureFreeDifatEntry()) that the prior element is a free sector,
+ // and the following loop works.
+
+ // As long as the prior element is a free sector, decrement i.
+ // If the prior element is at the end of a difat sector, skip over it.
+ for bin.difatEntries[i-1] == fatFreesect ||
+ (bin.sector.isLastInSector(i-1) && bin.difatEntries[i-2] == fatFreesect) {
+ i--
+ }
+ bin.difatEntries[i] = fatSector
+}
+
+// ensureFreeDifatEntry ensures there is at least one free entry at the end of the DIFAT list.
+//
+// The bin.fatEntry slice may be modified, any local references to the slice are invalidated.
+// bin.fatEntry elements may be assigned, so any local references to entries (such as the
+// first free index) are also invalidated.
+func (bin *MSIBinary) ensureFreeDifatEntry() {
+ // By construction, difatEntries is at least numDifatHeaderEntries (109) long.
+ i := len(bin.difatEntries) - 1
+ if bin.difatEntries[i] == fatEndofchain {
+ i--
+ }
+ if bin.difatEntries[i] == fatFreesect {
+ return // There is at least one free entry.
+ }
+
+ oldDifatTail := len(bin.difatEntries) - 1
+
+ // Allocate another sector of difat entries.
+ for i := 0; i < bin.sector.Ints; i++ {
+ bin.difatEntries = append(bin.difatEntries, fatFreesect)
+ }
+ bin.difatEntries[len(bin.difatEntries)-1] = fatEndofchain
+
+ // Assign the new difat sector in the FAT.
+ sector := bin.ensureFreeFatEntries(1)
+ bin.fatEntries[sector] = fatDifsect
+
+ // Assign the "next sector" pointer in the previous sector or header.
+ if bin.header.NumDifatSectors == 0 {
+ bin.header.FirstDifatSector = uint32(sector)
+ } else {
+ bin.difatEntries[oldDifatTail] = sector
+ }
+ bin.header.NumDifatSectors++
+ bin.difatSectors = append(bin.difatSectors, sector) // A helper slice.
+}
- asn1Bytes, err := asn1.Marshal(*bin.signedData)
+// AppendedTag is not supported for MSI files.
+func (bin *MSIBinary) AppendedTag() (data []byte, ok bool) {
+ return nil, false
+}
+
+func (bin *MSIBinary) asn1Data() []byte {
+ return bin.signedDataBytes
+}
+
+// buildBinary builds an MSI binary based on bin but with the given SignedData and appended tag.
+// Appended tag is not supported for MSI.
+// buildBinary may add free sectors to |bin|, but otherwise does not modify it.
+func (bin *MSIBinary) buildBinary(signedData, tag []byte) ([]byte, error) {
+ if len(tag) > 0 {
+ return nil, errors.New("appended tags not supported in MSI files")
+ }
+ // Writing to the mini FAT is not supported.
+ if len(signedData) < miniStreamCutoffSize {
+ return nil, fmt.Errorf("writing SignedData less than %d bytes is not supported", len(signedData))
+ }
+ // Ensure enough free FAT entries for the signedData.
+ numSignedDataSectors := secT((offT(len(signedData))-1)/bin.sector.Size) + 1
+ firstSignedDataSector := bin.ensureFreeFatEntries(numSignedDataSectors)
+
+ // Allocate sectors for the signedData, in a copy of the FAT entries.
+ newFatEntries := make([]secT, len(bin.fatEntries))
+ copy(newFatEntries, bin.fatEntries)
+ for i := secT(0); i < numSignedDataSectors-1; i++ {
+ newFatEntries[firstSignedDataSector+i] = firstSignedDataSector + i + 1
+ }
+ newFatEntries[firstSignedDataSector+numSignedDataSectors-1] = fatEndofchain
+
+ // Update the signedData stream's directory entry (location and size), in copy of dir entry.
+ newSigDirEntry := *bin.sigDirEntry
+ newSigDirEntry.StreamFirstSector = uint32(firstSignedDataSector)
+ newSigDirEntry.StreamSize = uint64(len(signedData))
+
+ // Write out the...
+ // ...header,
+ headerSectorBytes := make([]byte, bin.sector.Size)
+ out := new(bytes.Buffer)
+ binary.Write(out, binary.LittleEndian, bin.header)
+ copy(headerSectorBytes[:], out.Bytes())
+ for i := 0; i < numDifatHeaderEntries; i++ {
+ binary.LittleEndian.PutUint32(headerSectorBytes[numHeaderContentBytes+i*4:], uint32(bin.difatEntries[i]))
+ }
+ // ...content,
+ // Make a copy of the content bytes, since new data will be overlaid on it.
+ // The new content slice should accommodate the new content size.
+ firstFreeSector := firstFreeFatEntry(newFatEntries)
+ contents := make([]byte, bin.sector.Size*offT(firstFreeSector)) // zero-based sector counting.
+ copy(contents, bin.contents)
+
+ // ...signedData directory entry (from local modified copy),
+ out.Reset()
+ binary.Write(out, binary.LittleEndian, &newSigDirEntry)
+ copy(contents[bin.sigDirOffset:], out.Bytes())
+
+ // ...difat entries,
+ // They might have been modified, although usually not.
+ for i, sector := range bin.difatSectors {
+ index := numDifatHeaderEntries + i*bin.sector.Ints
+ offset := offT(sector) * bin.sector.Size
+ for j := 0; j < bin.sector.Ints; j++ {
+ binary.LittleEndian.PutUint32(contents[offset+offT(j)*4:], uint32(bin.difatEntries[index+j]))
+ }
+ }
+ // ...fat entries (from local modified copy),
+ index := 0
+ for i, sector := range bin.difatEntries {
+ // The last entry in each difat sector is a pointer to the next difat sector.
+ // This does not apply to the header entries.
+ isLastInSector := bin.sector.isLastInSector(i)
+ if sector != fatFreesect && sector != fatEndofchain && !isLastInSector {
+ offset := offT(sector) * bin.sector.Size
+ for i := 0; i < bin.sector.Ints; i++ {
+ binary.LittleEndian.PutUint32(contents[offset+offT(i)*4:], uint32(newFatEntries[index+i]))
+ }
+ index += bin.sector.Ints
+ }
+ }
+ // ...signedData
+ // |contents| was zero-initialized, so no need to add padding to end of sector.
+ // The sectors allocated for signedData were guaranteed contiguous.
+ copy(contents[offT(firstSignedDataSector)*bin.sector.Size:], signedData)
+
+ return append(headerSectorBytes, contents...), nil
+}
+
+// RemoveAppendedTag is not supported for MSI files.
+func (bin *MSIBinary) RemoveAppendedTag() (contents []byte, err error) {
+ return nil, errors.New("authenticodetag: appended tags not supported in MSI files")
+}
+
+// SetAppendedTag is not supported for MSI files.
+func (bin *MSIBinary) SetAppendedTag(tagContents []byte) (contents []byte, err error) {
+ return nil, errors.New("authenticodetag: appended tags not supported in MSI files")
+}
+
+func (bin *MSIBinary) getSuperfluousCert() (cert *x509.Certificate, index int, err error) {
+ return getSuperfluousCert(bin.signedData)
+}
+
+// SetSuperfluousCertTag returns an MSI binary based on bin, but where the
+// superfluous certificate contains the given tag data.
+// The (parsed) bin.signedData is modified; but bin.signedDataBytes, which contains
+// the raw original bytes, is not.
+func (bin *MSIBinary) SetSuperfluousCertTag(tag []byte) (contents []byte, err error) {
+ asn1Bytes, err := SetSuperfluousCertTag(bin.signedData, tag)
if err != nil {
return nil, err
}
- return bin.buildBinary(asn1Bytes, bin.appendedTag), nil
+ return bin.buildBinary(asn1Bytes, nil)
+}
+
+func (bin *MSIBinary) certificateOffset() int64 {
+ // The signedData will be written at the first free sector at the end of file.
+ return int64(offT(bin.firstFreeFatEntry()) * bin.sector.Size)
+}
+
+// findTag returns the offset of the superfluous-cert tag in |contents|, or (-1, 0) if not found.
+// The caller should restrict the search to the certificate section of the contents, if known.
+func findTag(contents []byte, start int64) (offset, length int64, err error) {
+ // An MSI can have a tagged Omaha inside of it, but that is the wrong tag -- it should be the
+ // one on the outermost container, or none.
+ contents = contents[start:]
+ lenContents := int64(len(contents))
+
+ // Find the oidChromeTag in the contents. The search string includes everything up to the
+ // asn1 length specification right before the Omaha2.0 marker.
+ offset = int64(bytes.LastIndex(contents, oidChromeTagSearchBytes))
+ if offset < 0 { // Not an error, simply not found.
+ return -1, 0, nil
+ }
+ offset += int64(len(oidChromeTagSearchBytes))
+ if offset > lenContents-2 {
+ return -1, 0, fmt.Errorf("failed in findTag, want offset plus tag size bytes to fit in file size %d, but offset %d is too large", lenContents, offset)
+ }
+ length = int64(binary.BigEndian.Uint16(contents[offset:]))
+ offset += 2
+ if offset+length > lenContents {
+ return -1, 0, fmt.Errorf("failed in findTag, want tag buffer to fit in file size %d, but offset (%d) plus length (%d) is %d", lenContents, offset, length, offset+length)
+ }
+ return start + offset, length, nil
}
var (
@@ -544,6 +1222,7 @@ var (
paddedLength *int = flag.Int("padded-length", 0, "A superfluous cert tag will be padded with zeros to at least this number of bytes")
savePKCS7 *string = flag.String("save-pkcs7", "", "If set to a filename, the PKCS7 data from the original binary will be written to that file.")
outFilename *string = flag.String("out", "", "If set, the updated binary is written to this file. Otherwise the binary is updated in place.")
+ printTagDetails *bool = flag.Bool("print-tag-details", false, "IF set, print to stdout the location and size of the superfluous cert's Gact2.0 marker plus buffer.")
)
func main() {
@@ -569,10 +1248,11 @@ func main() {
os.Exit(1)
}
+ var finalContents []byte
didSomething := false
if len(*savePKCS7) > 0 {
- if err := ioutil.WriteFile(*savePKCS7, bin.asn1Data, 0644); err != nil {
+ if err := ioutil.WriteFile(*savePKCS7, bin.asn1Data(), 0644); err != nil {
fmt.Fprintf(os.Stderr, "Error while writing file: %s\n", err)
os.Exit(1)
}
@@ -599,6 +1279,7 @@ func main() {
fmt.Fprintf(os.Stderr, "Error while writing updated file: %s\n", err)
os.Exit(1)
}
+ finalContents = contents
didSomething = true
}
@@ -609,10 +1290,15 @@ func main() {
os.Exit(1)
}
contents, err := bin.SetAppendedTag(tagContents)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error while setting appended tag: %s\n", err)
+ os.Exit(1)
+ }
if err := ioutil.WriteFile(*outFilename, contents, 0644); err != nil {
fmt.Fprintf(os.Stderr, "Error while writing updated file: %s\n", err)
os.Exit(1)
}
+ finalContents = contents
didSomething = true
}
@@ -620,7 +1306,7 @@ func main() {
var tagContents []byte
if strings.HasPrefix(*setSuperfluousCertTag, "0x") {
- tagContents, err = hex.DecodeString(*setSuperfluousCertTag)
+ tagContents, err = hex.DecodeString((*setSuperfluousCertTag)[2:])
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to parse tag contents from command line: %s\n", err)
os.Exit(1)
@@ -632,6 +1318,15 @@ func main() {
for len(tagContents) < *paddedLength {
tagContents = append(tagContents, 0)
}
+ // print-tag-details only works if the length requires 2 bytes to specify. (The length bytes
+ // length is part of the search string.)
+ // Lorry only tags properly (aside from tag-in-zip) if the length is 8206 or more. b/173139534
+ // Omaha may or may not have a practical buffer size limit; 8206 is known to work.
+ if len(tagContents) < 0x100 || len(tagContents) > 0xffff {
+ fmt.Fprintf(os.Stderr, "Want final tag length in range [256, 65535], got %d\n", len(tagContents))
+ os.Exit(1)
+ }
+
contents, err := bin.SetSuperfluousCertTag(tagContents)
if err != nil {
fmt.Fprintf(os.Stderr, "Error while setting superfluous certificate tag: %s\n", err)
@@ -641,6 +1336,24 @@ func main() {
fmt.Fprintf(os.Stderr, "Error while writing updated file: %s\n", err)
os.Exit(1)
}
+ finalContents = contents
+ didSomething = true
+ }
+
+ if *printTagDetails {
+ if finalContents == nil {
+ // Re-read the input, as NewBinary() may modify it.
+ finalContents, err = ioutil.ReadFile(inFilename)
+ if err != nil {
+ panic(err)
+ }
+ }
+ offset, length, err := findTag(finalContents, bin.certificateOffset())
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error while searching for tag in file bytes: %s\n", err)
+ os.Exit(1)
+ }
+ fmt.Printf("Omaha Tag offset, length: (%d, %d)\n", offset, length)
didSomething = true
}
diff --git a/common/certificate_tag/certificate_tag_test.go b/common/certificate_tag/certificate_tag_test.go
index 5c19d66bf..84b22ef0f 100644
--- a/common/certificate_tag/certificate_tag_test.go
+++ b/common/certificate_tag/certificate_tag_test.go
@@ -17,47 +17,73 @@ package main
import (
"bytes"
+ "encoding/binary"
+ "flag"
+ "fmt"
"io/ioutil"
"os/exec"
"path/filepath"
"strings"
"testing"
-
- "google3/testing/gobase/googletest"
)
-const directory = "google3/googleclient/installer/tools"
+// Modified from the google3/googleclient/tools version so it can run outside of google3.
+// Build certificate_tag separately, and point flag tag-binary-dir to the build location.
+//
+// Here is an example of testing the 32-bit version on Linux:
+//
+// $ GOARCH=386 CC=gcc go build -o /tmp/certificate_tag common/certificate_tag/certificate_tag.go
+// $ GOARCH=386 CC=gcc go test common/certificate_tag/certificate_tag_test.go common/certificate_tag/certificate_tag.go
+//
+// Here is an example of testing the 32-bit version on Windows 10.
+//
+// $ go build -o C:/tmp/certificate_tag common/certificate_tag/certificate_tag.go
+// $ go test common/certificate_tag/certificate_tag_test.go common/certificate_tag/certificate_tag.go -tag-binary-dir "C:/tmp"
var (
+ tagBinaryDir *string = flag.String("tag-binary-dir", "/tmp", "Path to directory with the tag binary.")
// tagBinary contains the path to the certificate_tag program.
tagBinary string
// sourceExe contains the path to a Chrome installer exe file.
sourceExe string
+ // sourceMSI* contains the path to a signed MSI file.
+ sourceMSI1, sourceMSI2, sourceMSI3, sourceMSI4 string
)
-func init() {
- tagBinary = filepath.Join(googletest.TestSrcDir, directory, "certificate_tag")
- sourceExe = filepath.Join(googletest.TestSrcDir, directory, "testdata/ChromeSetup.exe")
+// existingTagSubstring is a segment of the superfluous-cert tag that's already
+// in ChromeSetup.exe.
+const existingTagSubstring = ".....Gact.?omah"
+
+func TestMain(m *testing.M) {
+ flag.Parse()
+
+ tagBinary = filepath.Join(*tagBinaryDir, "certificate_tag")
+ sourceExe = filepath.Join("testdata/ChromeSetup.exe")
+ sourceMSI1 = filepath.Join("testdata/googlechromestandaloneenterprise.msi")
+ sourceMSI2 = filepath.Join("testdata/test7zSigned.msi")
+ sourceMSI3 = filepath.Join("testdata/OmahaTestSigned.msi")
+ sourceMSI4 = filepath.Join("testdata/test7zSigned-smallcert.msi")
+
+ m.Run()
}
func TestPrintAppendedTag(t *testing.T) {
cmd := exec.Command(tagBinary, "--dump-appended-tag", sourceExe)
output, err := cmd.CombinedOutput()
if err != nil {
- t.Fatal(err)
+ t.Fatalf("Error executing %q: %v; output:\n%s", tagBinary, err, output)
}
- const expected = ".....Gact.?omah"
- if out := string(output); !strings.Contains(out, expected) {
- t.Errorf("Output of --dump-appended-tag didn't contain %s, as expected. Got:\n%s", expected, out)
+ if out := string(output); !strings.Contains(out, existingTagSubstring) {
+ t.Errorf("Output of --dump-appended-tag didn't contain %s, as expected. Got:\n%s", existingTagSubstring, out)
}
}
// tempFileName returns a path that can be used as temp file. This is only safe
// because we know that only our process can write in the test's temp
// directory.
-func tempFileName() string {
- f, err := ioutil.TempFile(googletest.TestTmpDir, "certificate_tag_test")
+func tempFileName(t *testing.T) string {
+ f, err := ioutil.TempFile(t.TempDir(), "certificate_tag_test")
if err != nil {
panic(err)
}
@@ -66,34 +92,638 @@ func tempFileName() string {
return path
}
-func TestSetSuperfluousCertTag(t *testing.T) {
- out := tempFileName()
+func SetSuperfluousCertTagHelper(t *testing.T, source string) {
+ out := tempFileName(t)
- const expected = "34cf251b916a54dc9351b832bb0ac7ce"
- cmd := exec.Command(tagBinary, "--out", out, "--set-superfluous-cert-tag", expected, sourceExe)
- if err := cmd.Run(); err != nil {
- t.Fatal(err)
+ expected := "34cf251b916a54dc9351b832bb0ac7ce" + strings.Repeat(" ", 256)
+ cmd := exec.Command(tagBinary, "--out", out, "--set-superfluous-cert-tag", expected, source)
+ if output, err := cmd.CombinedOutput(); err != nil {
+ t.Fatalf("Test input %s, error executing %q: %v; output:\n%s", source, tagBinary, err, output)
}
contents, err := ioutil.ReadFile(out)
if err != nil {
- t.Fatalf("Failed to read output file: %s", err)
+ t.Fatalf("Test input %s, failed to read output file: %s", source, err)
}
if !bytes.Contains(contents, []byte(expected)) {
- t.Error("Output doesn't contain expected bytes")
+ t.Errorf("Test input %s, output doesn't contain expected bytes", source)
+ }
+ if bytes.Contains(contents, []byte(existingTagSubstring)) {
+ t.Errorf("Test input %s, output still contains old tag that should have been replaced", source)
}
- cmd = exec.Command(tagBinary, "--out", out, "--set-superfluous-cert-tag", expected, "--padded-length", "256", sourceExe)
- if err = cmd.Run(); err != nil {
- t.Fatal(err)
+ cmd = exec.Command(tagBinary, "--out", out, "--set-superfluous-cert-tag", expected, "--padded-length", "512", source)
+ if output, err := cmd.CombinedOutput(); err != nil {
+ t.Fatalf("Test input %s, error executing %q: %v; output:\n%s", source, tagBinary, err, output)
}
contents, err = ioutil.ReadFile(out)
if err != nil {
- t.Fatalf("Failed to read output file: %s", err)
+ t.Fatalf("Test input %s, failed to read output file: %s", source, err)
}
var zeros [16]byte
if !bytes.Contains(contents, append([]byte(expected), zeros[:]...)) {
- t.Error("Output doesn't contain expected bytes with padding")
+ t.Errorf("Test input %s, output doesn't contain expected bytes with padding", source)
+ }
+}
+
+func TestSetSuperfluousCertTag(t *testing.T) {
+ expect := []struct {
+ infile string
+ }{
+ {sourceExe},
+ {sourceMSI1},
+ {sourceMSI2},
+ {sourceMSI3},
+ }
+ for _, e := range expect {
+ SetSuperfluousCertTagHelper(t, e.infile)
+ }
+}
+
+func TestIsLastInSector(t *testing.T) {
+ expect := []struct {
+ in int
+ shift uint16 // 12 for 4096-byte sectors, 9 for 512-byte sectors.
+ want bool
+ }{
+ {0, 12, false},
+ {1, 12, false},
+ {107, 12, false},
+ {108, 12, false},
+ {109, 12, false},
+ {1131, 12, false},
+ {1132, 12, true},
+ {1133, 12, false},
+ {2156, 12, true},
+ {0, 9, false},
+ {1, 9, false},
+ {107, 9, false},
+ {108, 9, false},
+ {109, 9, false},
+ {236, 9, true},
+ {364, 9, true},
+ }
+ for _, e := range expect {
+ format, _ := newSectorFormat(e.shift)
+ got := format.isLastInSector(e.in)
+ if got != e.want {
+ t.Errorf("Arguments (%d, %d): got %t, want %t", e.in, e.shift, got, e.want)
+ }
+ }
+}
+
+func TestFirstFreeFatEntry(t *testing.T) {
+ expect := []struct {
+ in int
+ want secT
+ }{
+ {1023, 1024},
+ {1000, 1001},
+ {10, 11},
+ {0, 1},
+ }
+ for _, e := range expect {
+ entries := make([]secT, 0, 1024)
+ for i := 0; i < 1024; i++ {
+ entries = append(entries, fatFreesect)
+ }
+ entries[e.in] = 1
+ got := firstFreeFatEntry(entries)
+ if got != e.want {
+ t.Errorf("Argument %d, got %d, want %d", e.in, got, e.want)
+ }
+ }
+}
+
+func getFat(sectors, free int) []secT {
+ // Zero is a valid sector. In a valid file, though, the sector number wouldn't repeat like this.
+ used := 1024*sectors - free // 1024 int32 entries per sector.
+ entries := make([]secT, used, used+free)
+
+ // Set a non-contiguous free sector before the end, which shouldn't affect anything.
+ if used > 2 {
+ entries[used-2] = fatFreesect
+ }
+ for i := 0; i < free; i++ {
+ entries = append(entries, fatFreesect)
+ }
+ return entries
+}
+
+func getDifat(sectors, free int) []secT {
+ // Similar to getFat, but there are always 109 (non-sector) elements from the header;
+ // and the last element of any sectors should be fatEndofchain or a sector #.
+ entries := make([]secT, 109, 109+sectors*1024)
+ sentinel := secT(123) // Some sector number
+ for ; sectors > 0; sectors-- {
+ new := make([]secT, 1024)
+ if sectors == 1 {
+ new[1023] = fatEndofchain
+ } else {
+ new[1023] = sentinel
+ }
+ entries = append(entries, new...)
+ }
+ i := len(entries) - 1
+ for free > 0 {
+ if entries[i] != fatEndofchain && entries[i] != sentinel {
+ entries[i] = fatFreesect
+ free--
+ }
+ i--
+ }
+ return entries
+}
+
+func getBin(fatEntries, difatEntries []secT) *MSIBinary {
+ // Uses dll version 4, 4096-byte sectors.
+ // There are difat sectors only if len(difatEntries) > 109.
+ n := 0
+ if len(difatEntries) > numDifatHeaderEntries {
+ n = (len(difatEntries)-numDifatHeaderEntries-1)/1024 + 1
+ }
+ // If n>0, zero is a fine sector number.
+ difatSectors := make([]secT, n)
+
+ header := &MSIHeader{
+ DllVersion: 4,
+ SectorShift: 12,
+ NumDifatSectors: uint32(n),
+ }
+
+ // Make copies so we can compare before and after method calls.
+ fat := make([]secT, len(fatEntries))
+ copy(fat, fatEntries)
+ difat := make([]secT, len(difatEntries))
+ copy(difat, difatEntries)
+
+ format, _ := newSectorFormat(12)
+ return &MSIBinary{
+ headerBytes: nil,
+ header: header,
+ sector: format,
+ contents: nil,
+ sigDirOffset: 0,
+ sigDirEntry: nil,
+ signedDataBytes: nil,
+ signedData: nil,
+ fatEntries: fat,
+ difatEntries: difat,
+ difatSectors: difatSectors,
+ }
+}
+
+func verifyAndRemoveDifatChaining(t *testing.T, entries []secT, which, name string, id int) []secT {
+ format, _ := newSectorFormat(12)
+ for i := len(entries) - 1; i >= 0; i-- {
+ if format.isLastInSector(i) {
+ if i == len(entries)-1 && entries[i] != fatEndofchain {
+ t.Errorf("%s end of chain %s was modified, case %d, i %d: wanted %d (fatEndofchain), got %d", which, name, id, i, secT(fatEndofchain), entries[i])
+ }
+ if i != len(entries)-1 && entries[i] >= fatReserved {
+ t.Errorf("%s %s entries weren't chained, case %d: wanted (< %d) (fatReserved), got %d", which, name, id, secT(fatReserved), entries[i])
+ }
+ if i == len(entries)-1 {
+ entries = entries[:i]
+ } else {
+ entries = append(entries[:i], entries[i+1:]...)
+ }
+ }
+ }
+ return entries
+}
+
+func verifyEntries(t *testing.T, name string, id, added int, changed, old, new []secT, isDifat bool) {
+ if len(new)-len(old) != added {
+ t.Errorf("Wrong num added %s entries, case %d: wanted %d, got %d", name, id, added, len(new)-len(old))
+ }
+ // If this is difat, check and remove the chaining entries. This simplifies the checks below.
+ if isDifat {
+ // If there is an error in "old", the test case wasn't set up correctly.
+ old = verifyAndRemoveDifatChaining(t, old, "old", name, id)
+ new = verifyAndRemoveDifatChaining(t, new, "new", name, id)
+ }
+ firstFree := len(old) // Can be past end of slice.
+ for firstFree > 0 && old[firstFree-1] == fatFreesect {
+ firstFree--
+ }
+ same := new[:firstFree]
+ diff := new[firstFree : firstFree+len(changed)]
+ free := new[firstFree+len(changed):]
+ for i := 0; i < len(same); i++ {
+ if old[i] != same[i] {
+ t.Errorf("Entry in %s should not be changed, case %d, i %d: wanted %d, got %d", name, id, i, old[i], same[i])
+ }
+ }
+ for i := 0; i < len(diff); i++ {
+ if changed[i] != diff[i] {
+ t.Errorf("Entry in %s is not changed or not changed to correct value, case %d, offset %d, i %d: wanted %d, got %d", name, id, firstFree, i, changed[i], diff[i])
+ }
+ }
+ for i := 0; i < len(free); i++ {
+ if free[i] != fatFreesect {
+ t.Errorf("Entry in %s should be free but isn't, case %d, offset %d, i %d: wanted %d (fatFreesect), got %d", name, id, firstFree+len(changed), i, secT(fatFreesect), free[i])
+ }
+ }
+}
+
+func TestEnsureFreeDifatEntry(t *testing.T) {
+ expect := []struct {
+ id int // case id
+ difatSectors int // in: # difat sectors
+ difatFree int // in: # free difat entries
+ changedDifat []secT // expect: value of changed difat entries
+ addedDifat int // expect: # difat entries added
+ fatSectors int // in: # fat sectors
+ fatFree int // in: # free fat entries
+ changedFat []secT // expect: value of changed fat entries
+ addedFat int // expect: # fat entries added
+ }{
+ // Note: The number of difat used entries should imply the # of fat sectors.
+ // But that inconsistency doesn't affect these tests.
+
+ // Free difat entry in header, no change.
+ {0, 0, 108, []secT{}, 0, 1, 40, []secT{}, 0},
+ // No free difat entry, add a difat sector (1024 entries).
+ {1, 0, 0, []secT{}, 1024, 1, 40, []secT{fatDifsect}, 0},
+ // Free difat entry in sector, no change.
+ {2, 1, 1, []secT{}, 0, 1, 40, []secT{}, 0},
+ // No free difat entry, add a difat sector.
+ {3, 1, 0, []secT{}, 1024, 1, 40, []secT{fatDifsect}, 0},
+ // Additional sector is completely empty, no change.
+ {4, 1, 1023, []secT{}, 0, 1, 40, []secT{}, 0},
+ // Free difat entry; No free fat entry. No change to either.
+ {5, 0, 10, []secT{}, 0, 1, 0, []secT{}, 0},
+ // No free difat entry; add a difat sector. No free fat entry; add a fat sector.
+ {6, 0, 0, []secT{1024}, 1024, 1, 0, []secT{fatFatsect, fatDifsect}, 1024},
+ {7, 1, 0, []secT{1024}, 1024, 1, 0, []secT{fatFatsect, fatDifsect}, 1024},
+ }
+
+ for _, e := range expect {
+ fat := getFat(e.fatSectors, e.fatFree)
+ difat := getDifat(e.difatSectors, e.difatFree)
+ bin := getBin(fat, difat)
+ bin.ensureFreeDifatEntry()
+
+ // Check added entries.
+ verifyEntries(t, "difat", e.id, e.addedDifat, e.changedDifat, difat, bin.difatEntries, true)
+ verifyEntries(t, "fat", e.id, e.addedFat, e.changedFat, fat, bin.fatEntries, false)
+ }
+}
+
+func TestEnsureFreeFatEntries(t *testing.T) {
+ expect := []struct {
+ id int // case id
+ difatSectors int // in: # difat sectors
+ difatFree int // in: # free difat entries
+ changedDifat []secT // expect: value of changed difat entries
+ addedDifat int // expect: # difat entries added
+ fatSectors int // in: # fat sectors
+ fatFree int // in: # free fat entries available
+ fatRequest secT // in: # free fat entries requested
+ changedFat []secT // expect: value of changed fat entries
+ addedFat int // expect: # fat entries added
+ }{
+ // Note: The number of difat used entries should imply the # of fat sectors.
+ // But that inconsistency doesn't affect these tests.
+
+ {0, 0, 1, []secT{}, 0, 1, 2, 2, []secT{}, 0},
+ {1, 0, 0, []secT{}, 0, 1, 2, 2, []secT{}, 0},
+ {2, 0, 1, []secT{1022}, 0, 1, 2, 4, []secT{fatFatsect}, 1024},
+ {3, 0, 0, []secT{1022}, 1024, 1, 2, 4, []secT{fatFatsect, fatDifsect}, 1024},
+ {4, 0, 1, []secT{1024}, 0, 1, 0, 4, []secT{fatFatsect}, 1024},
+ {5, 0, 0, []secT{1024}, 1024, 1, 0, 4, []secT{fatFatsect, fatDifsect}, 1024},
+ {6, 1, 1, []secT{1022}, 0, 1, 2, 4, []secT{fatFatsect}, 1024},
+ {7, 1, 0, []secT{1022}, 1024, 1, 2, 4, []secT{fatFatsect, fatDifsect}, 1024},
+ {8, 2, 1, []secT{2046}, 0, 2, 2, 4, []secT{fatFatsect}, 1024},
+ {9, 2, 0, []secT{2046}, 1024, 2, 2, 4, []secT{fatFatsect, fatDifsect}, 1024},
+
+ // These are unlikely cases, but they should work.
+ // Request exactly one more sector free. (The difat sector will consume a fat entry as well.)
+ {10, 0, 1, []secT{1022}, 0, 1, 2, 1025, []secT{fatFatsect}, 1024},
+ // Request more than one more sector.
+ {11, 0, 2, []secT{1022, 1023}, 0, 1, 2, 1026, []secT{fatFatsect, fatFatsect}, 2048},
+ // Request more than one sector because of additional difat sector.
+ {12, 0, 0, []secT{1022, 1024}, 1024, 1, 2, 1025, []secT{fatFatsect, fatDifsect, fatFatsect}, 2048},
+ }
+
+ for _, e := range expect {
+ fat := getFat(e.fatSectors, e.fatFree)
+ difat := getDifat(e.difatSectors, e.difatFree)
+ bin := getBin(fat, difat)
+ bin.ensureFreeFatEntries(e.fatRequest)
+
+ // Check added entries.
+ verifyEntries(t, "difat", e.id, e.addedDifat, e.changedDifat, difat, bin.difatEntries, true)
+ verifyEntries(t, "fat", e.id, e.addedFat, e.changedFat, fat, bin.fatEntries, false)
+ }
+}
+
+func TestAssignDifatEntry(t *testing.T) {
+ expect := []struct {
+ id int // case id
+ difatSectors int // in: # difat sectors
+ difatFree int // in: # free difat entries
+ assignedIndex int // expect: which difat index assigned
+ assignedValue secT // in/expect: value assigned
+ fatSectors int // in: # fat sectors
+ fatFree int // in: # free fat entries
+ }{
+ {1, 0, 1, 108, 1000, 1, 23},
+ {2, 0, 0, 109, 1000, 1, 23},
+ {3, 1, 1, 1131, 1000, 1, 23},
+ {4, 1, 0, 1133, 1000, 1, 23},
+ }
+ for _, e := range expect {
+ fat := getFat(e.fatSectors, e.fatFree)
+ difat := getDifat(e.difatSectors, e.difatFree)
+ bin := getBin(fat, difat)
+ bin.assignDifatEntry(e.assignedValue)
+
+ if len(bin.difatEntries) < e.assignedIndex+1 {
+ t.Errorf("Slice too short, index not valid, case %d. Wanted index %d, got slice length %d", e.id, e.assignedIndex, len(bin.difatEntries))
+ } else {
+ if bin.difatEntries[e.assignedIndex] != e.assignedValue {
+ t.Errorf("Wrong index assigned, case %d. At index %d, wanted %d, got %d", e.id, e.assignedIndex, e.assignedValue, bin.difatEntries[e.assignedIndex])
+ }
+ }
+ }
+}
+
+// Validate returns an error if the MSI doesn't pass internal consistency checks.
+// If another MSIBinary is provided, Validate checks that data streams are bitwise identical.
+// It also returns whether the dummy certificate was found.
+func (bin MSIBinary) Validate(other *MSIBinary) (bool, error) {
+ // Check that fat sectors are marked as such in the fat.
+ for i, s := range bin.difatEntries {
+ if s != fatFreesect && !bin.sector.isLastInSector(i) && bin.fatEntries[s] != fatFatsect {
+ return false, fmt.Errorf("fat sector %d (index %d) is not marked as such in the fat", s, i)
+ }
+ }
+ // Check that difat sectors are marked as such in the fat.
+ s := secT(bin.header.FirstDifatSector)
+ i := numDifatHeaderEntries - 1
+ num := 0
+ for s != fatEndofchain {
+ if bin.fatEntries[s] != fatDifsect {
+ return false, fmt.Errorf("difat sector %d (offset %d in chain) is not marked as such in the fat", s, num)
+ }
+ i += int(bin.sector.Ints)
+ s = bin.difatEntries[i]
+ num++
+ }
+ if num != int(bin.header.NumDifatSectors) {
+ return false, fmt.Errorf("wrong number of difat sectors found, wanted %d got %d", bin.header.NumDifatSectors, num)
+ }
+
+ // Enumerate the directory entries.
+ // 1) Validate streams in the fat: Walk the chain, validate the stream length,
+ // and mark sectors in a copy of the fat so we can tell if any sectors are re-used.
+ // 2) Compare bytes in the data streams, to validate none of them changed.
+ // In principle we should match stream names, but in practice the directory entries are not
+ // reordered and the streams are not moved.
+ fatEntries := make([]secT, len(bin.fatEntries))
+ copy(fatEntries, bin.fatEntries)
+ dirSector := secT(bin.header.FirstDirSector)
+ var entry MSIDirEntry
+ for {
+ // Fixed 128 byte directory entry size.
+ for i := offT(0); i < bin.sector.Size/numDirEntryBytes; i++ {
+ offset := offT(dirSector)*bin.sector.Size + i*numDirEntryBytes
+ binary.Read(bytes.NewBuffer(bin.contents[offset:]), binary.LittleEndian, &entry)
+
+ // The mini fat hasn't been parsed, so skip those. The size check also skips non-stream
+ // entries. The signature stream has been freed, so skip that one too.
+ if entry.StreamSize < miniStreamCutoffSize ||
+ bytes.Equal(entry.Name[:entry.NumNameBytes], signatureName) {
+ continue
+ }
+ allocatedSize := offT(0)
+ sector := secT(entry.StreamFirstSector)
+ for {
+ allocatedSize += bin.sector.Size
+ if fatEntries[sector] != fatEndofchain && fatEntries[sector] >= fatReserved {
+ return false, fmt.Errorf("Found bad/reused fat entry at sector %d; wanted value < %d (fatReserved), got %d", sector, secT(fatReserved), fatEntries[sector])
+ }
+ // Technically we need not check beyond the end of stream data, but these sectors
+ // should not be modified at all.
+ if other != nil {
+ offset := offT(sector) * bin.sector.Size
+ if !bytes.Equal(bin.contents[offset:offset+bin.sector.Size], other.contents[offset:offset+bin.sector.Size]) {
+ return false, fmt.Errorf("Found difference in streams at sector %d", sector)
+ }
+ }
+ next := fatEntries[sector]
+ fatEntries[sector] = fatReserved // Detect if this is re-used.
+ if next == fatEndofchain {
+ break
+ }
+ sector = next
+ }
+ if uint64(allocatedSize) < entry.StreamSize {
+ return false, fmt.Errorf("Found stream with size greater than allocation, starting sector %d", entry.StreamFirstSector)
+ }
+ }
+ // Go to the next directory sector.
+ dirSector = bin.fatEntries[dirSector]
+ if dirSector == fatEndofchain {
+ break
+ }
+ }
+
+ // Compare certs and signatures (other than dummy).
+ cert, index, err := getSuperfluousCert(bin.signedData)
+ if err != nil {
+ return false, fmt.Errorf("parse error in bin.signedData: %w", err)
+ }
+ if other != nil {
+ _, index2, err := getSuperfluousCert(other.signedData)
+ if err != nil {
+ return false, fmt.Errorf("parse error in other.signedData: %w", err)
+ }
+ pkcs7 := bin.signedData.PKCS7
+ pkcs7Other := other.signedData.PKCS7
+ i := 0
+ i2 := 0
+ for {
+ if i == index {
+ i++
+ }
+ if i2 == index2 {
+ i2++
+ }
+ if i >= len(pkcs7.Certs) || i2 >= len(pkcs7Other.Certs) {
+ if i < len(pkcs7.Certs) || i2 < len(pkcs7Other.Certs) {
+ return false, fmt.Errorf("number of certs mismatch, compare other %d vs this %d (possibly including dummy cert)", len(pkcs7Other.Certs), len(pkcs7.Certs))
+ }
+ break
+ }
+ if !bytes.Equal(pkcs7.Certs[i].FullBytes, pkcs7Other.Certs[i2].FullBytes) {
+ return false, fmt.Errorf("cert contents mismatch, other cert index %d vs this cert index %d", i2, i)
+ }
+ i++
+ i2++
+ }
+ }
+
+ return cert != nil, nil
+}
+
+func TestMsiSuperfluousCert(t *testing.T) {
+ const tag = "258c 6320 e4c4 0258 169b 481a def0 8856" // Random data
+ expect := []struct {
+ infile string
+ }{
+ {sourceMSI1},
+ {sourceMSI2},
+ {sourceMSI3},
+ }
+ for _, e := range expect {
+ contents, err := ioutil.ReadFile(e.infile)
+ if err != nil {
+ t.Fatalf("Error reading test input %s: %v", e.infile, err)
+ }
+
+ bin, err := NewBinary(contents)
+ if err != nil {
+ t.Fatalf("Error creating MSIBinary from test input %s: %v", e.infile, err)
+ }
+ msiBin := bin.(*MSIBinary)
+ hasDummy, err := msiBin.Validate(nil)
+ if err != nil {
+ t.Errorf("Input binary doesn't validate, created from test input %s: %v", e.infile, err)
+ } else if hasDummy {
+ t.Errorf("Input binary has the dummy cert (it shouldn't), created from test input %s", e.infile)
+ }
+
+ // Note this adds the dummy cert to |bin|.
+ contents, err = bin.SetSuperfluousCertTag([]byte(tag))
+ if err != nil {
+ t.Errorf("Error tagging test input %s: %v", e.infile, err)
+ continue
+ }
+ binTagged, err := NewBinary(contents)
+ if err != nil {
+ t.Errorf("Error parsing tagged binary from test input %s: %v", e.infile, err)
+ continue
+ }
+
+ msiBinTagged := binTagged.(*MSIBinary)
+ hasDummy, err = msiBinTagged.Validate(msiBin)
+ if err != nil {
+ t.Errorf("Tagged binary doesn't validate, created from test input %s: %v", e.infile, err)
+ } else if !hasDummy {
+ t.Errorf("Tagged binary doesn't have the dummy cert (it should), created from test input %s", e.infile)
+ }
+ }
+}
+
+func TestFindTag(t *testing.T) {
+ oid := []byte{
+ 0x06, 0x0b, 0x2b, 0x06, 0x01, 0x04, 0x01, 0xd6, 0x79, 0x02, 0x01, 0xce, 0x0f, 0x04, 0x82}
+ oidStr := string(oid)
+ oidSize := int64(len(oid) + 2) // includes size bytes
+ marker := []byte{0x47, 0x61, 0x63, 0x74, 0x32, 0x2e, 0x30, 0x4f, 0x6d, 0x61, 0x68, 0x61}
+
+ // Create test strings.
+ expect := []struct {
+ name string
+ in string
+ start int64
+ offset int64
+ length int64
+ hasErr bool
+ }{
+ {"no padding", oidStr + "\x00\x10" + strings.Repeat("0", 16), 0, oidSize, 16, false},
+ {"start padding", "1111" + oidStr + "\x00\x10" + strings.Repeat("1", 16), 0, 4 + oidSize, 16, false},
+ {"start and end padding", "2222" + oidStr + "\x00\x10" + strings.Repeat("2", 20), 0, 4 + oidSize, 16, false},
+ {"no tag", "3333" + "\x00\x10" + strings.Repeat("3", 20), 0, -1, 0, false},
+ {"tag prior to search start", "4444" + oidStr + "\x00\x10" + strings.Repeat("4", 20), 10, -1, 0, false},
+ {"error no length bytes", "5555" + oidStr, 0, -1, 0, true},
+ {"error length too long", "6666" + oidStr + "\x00\x10" + strings.Repeat("6", 15), 0, -1, 0, true},
+ }
+
+ for _, e := range expect {
+ offset, length, err := findTag([]byte(e.in), e.start)
+ if offset != e.offset {
+ t.Errorf("test %s, got offset %d, want %d", e.name, offset, e.offset)
+ }
+ if length != e.length {
+ t.Errorf("test %s, got length %d, want %d", e.name, length, e.length)
+ }
+ if (err != nil) != e.hasErr {
+ t.Errorf("test %s, got error %v, want %v", e.name, err, e.hasErr)
+ }
+ }
+
+ // Test end-to-end with testdata files.
+ expect2 := []struct {
+ infile string
+ size int64
+ }{
+ {sourceExe, 2048},
+ {sourceExe, 1000},
+ {sourceExe, 256},
+ {sourceMSI1, 2048},
+ {sourceMSI2, 2048},
+ {sourceMSI2, 1000},
+ {sourceMSI2, 256},
+ {sourceMSI3, 2048},
+ {sourceMSI4, 2048},
+ }
+ for i, e := range expect2 {
+ contents, err := ioutil.ReadFile(e.infile)
+ if err != nil {
+ t.Fatalf("Case %d, error reading test input %s: %v", i, e.infile, err)
+ }
+ bin, err := NewBinary(contents)
+ if err != nil {
+ t.Fatalf("Case %d, error creating MSIBinary from test input %s: %v", i, e.infile, err)
+ }
+ // NewBinary may modify |contents|.
+ contents, err = ioutil.ReadFile(e.infile)
+ if err != nil {
+ t.Fatalf("Case %d, error reading test input %s: %v", i, e.infile, err)
+ }
+
+ // No tag before tagging.
+ offset, _, err := findTag(contents, bin.certificateOffset())
+ if err != nil {
+ t.Errorf("Case %d, error in findTag for untagged source %s: %v", i, e.infile, err)
+ }
+ if offset != -1 {
+ t.Errorf("Case %d, found tag in untagged input %s, want offset -1 got %d", i, e.infile, offset)
+ }
+
+ // Apply a tag; find tag in contents; verify it's at marker with right size.
+ tag := make([]byte, e.size)
+ copy(tag[:], marker)
+ contents, err = bin.SetSuperfluousCertTag(tag)
+ if err != nil {
+ t.Fatalf("Case %d, error tagging source %s: %v", i, e.infile, err)
+ }
+ offset, length, err := findTag(contents, bin.certificateOffset())
+ if err != nil {
+ t.Errorf("Case %d, error in findTag for source %s: %v", i, e.infile, err)
+ }
+ if length != e.size {
+ t.Errorf("Case %d, error in findTag for source %s: wanted returned length %d, got %d", i, e.infile, e.size, length)
+ }
+ if offset < 0 {
+ t.Errorf("Case %d, error in findTag for source %s: wanted returned offset >=0, got %d", i, e.infile, offset)
+ } else {
+ // Expect to find size bytes just prior to offset.
+ size := int64(binary.BigEndian.Uint16(contents[offset-2:]))
+ if size != e.size {
+ // Either the size is wrong, or (more likely) we found the wrong offset.
+ t.Errorf("Case %d, error in findTag for source %s, offset %d: wanted embedded size %d, got %d", i, e.infile, offset, e.size, size)
+ }
+ // Expect to find the marker at |offset|
+ idx := bytes.Index(contents[offset:], marker)
+ if idx != 0 {
+ t.Errorf("Case %d, error in findTag for source %s: after offset %d, wanted to find marker at idx 0, got %d", i, e.infile, offset, idx)
+ }
+ }
}
}
diff --git a/common/certificate_tag/testdata/ChromeSetup.exe b/common/certificate_tag/testdata/ChromeSetup.exe
index 716a9e282..57940628f 100644
Binary files a/common/certificate_tag/testdata/ChromeSetup.exe and b/common/certificate_tag/testdata/ChromeSetup.exe differ
diff --git a/common/certificate_tag/testdata/OmahaTestSigned.msi b/common/certificate_tag/testdata/OmahaTestSigned.msi
new file mode 100644
index 000000000..bc1b91388
Binary files /dev/null and b/common/certificate_tag/testdata/OmahaTestSigned.msi differ
diff --git a/common/certificate_tag/testdata/googlechromestandaloneenterprise.msi b/common/certificate_tag/testdata/googlechromestandaloneenterprise.msi
new file mode 100644
index 000000000..bcbe4f216
Binary files /dev/null and b/common/certificate_tag/testdata/googlechromestandaloneenterprise.msi differ
diff --git a/common/certificate_tag/testdata/test7zSigned-smallcert.msi b/common/certificate_tag/testdata/test7zSigned-smallcert.msi
new file mode 100644
index 000000000..66f134d63
Binary files /dev/null and b/common/certificate_tag/testdata/test7zSigned-smallcert.msi differ
diff --git a/common/certificate_tag/testdata/test7zSigned.msi b/common/certificate_tag/testdata/test7zSigned.msi
new file mode 100644
index 000000000..ba081c66a
Binary files /dev/null and b/common/certificate_tag/testdata/test7zSigned.msi differ
diff --git a/doc/ClientLog.md b/doc/ClientLog.md
index dee260cec..d7634f684 100644
--- a/doc/ClientLog.md
+++ b/doc/ClientLog.md
@@ -27,7 +27,6 @@ Non-opt builds (dbg-win and coverage-win) allow provide much more logging and ha
[LoggingLevel]
LC_CORE=5
LC_NET=4
-LC_PLUGIN=3
LC_SERVICE=3
LC_SETUP=3
LC_SHELL=3
@@ -38,7 +37,6 @@ LC_REPORT=3
[LoggingSettings]
EnableLogging=1
-LogFilePath="C:\foo\GoogleUpdate.log"
MaxLogFileSize=10000000
ShowTime=1
@@ -53,4 +51,4 @@ NoSendDumpToServer=1
NoSendStackToServer=1
```
# Log Size Limits #
-Omaha tries to archive the log when the log size is greater than 10 MB. When the log is in use by more than one instance of Omaha the archiving operation will fail. However, there is a 100 MB limit to how big the log can be to prevent overfilling the hard drive. When this limit is reached the log file is cleared and the logging starts from the beginning.
\ No newline at end of file
+Omaha tries to archive the log when the log size is greater than 10 MB. When the log is in use by more than one instance of Omaha the archiving operation will fail. However, there is a 100 MB limit to how big the log can be to prevent overfilling the hard drive. When this limit is reached the log file is cleared and the logging starts from the beginning.
diff --git a/doc/ClientUpdateProtocolEcdsa.md b/doc/ClientUpdateProtocolEcdsa.md
index 4d91b49a4..c4b29d809 100644
--- a/doc/ClientUpdateProtocolEcdsa.md
+++ b/doc/ClientUpdateProtocolEcdsa.md
@@ -39,7 +39,7 @@ The server publishes an elliptic curve field/equation and a public key curve poi
For each request, the client assembles three components:
* The message body (the update request to be sent to the server).
- * A small random number to be used as a client nonce for freshness (at least 32 bits).
+ * A small random number to be used as a client nonce for freshness (at least 256 bits).
* A code to identify the public key the client will use to verify this request.
The client converts the public key id and nonce to a string: the public key is converted to decimal, and the nonce to hexadecimal (lowercase a-f).
@@ -50,9 +50,13 @@ The server receives an update request XML, public key id, and nonce; it performs
The server attempts to find a matching ECDSA private key for the specified public key id, returning an HTTP error if no such private key exists. Finally, it assembles the update response.
-Before sending, the server stores the update response XML (also in UTF-8) in a buffer. It appends the computed SHA-256 hash of the request body+keyid+nonce to the buffer. It then calculates an ECDSA signature over that combined buffer, using the server’s private key. It sends the ECDSA signature and the response body + client hash back to the user.
+Before sending, the server stores the SHA-256 hash of the request body in a buffer. It appends the SHA-256 hash of the response body, then the cup2key query value (%d:%u, where the first parameter is the keypair id, and the second is the client freshness nonce). It then calculates an ECDSA signature over the SHA-256 hash of that buffer, using the server's private key. It sends the ECDSA signature and the client hash (i.e. hash of the request body) back to the user.
-The client receives the response XML, observed client hash, and ECDSA signature. It concatenates its copy of the request hash to the response XML, and attempts to verify the ECDSA signature using its public key. If the signature does not match, the client recognizes that the server response has been tampered in transit, and rejects the exchange.
+
+
+The client receives the response XML, observed client hash, and ECDSA signature. It creates a buffer containing the SHA-256 hash of the request body. It then appends the SHA-256 hash of the response body, then the cup2key query value (see above). It then tests whether the received ECDSA signature can be verified to match the SHA-256 hash of this buffer using the public key. If the signature does not match, the client recognizes that the server response has been tampered in transit, and rejects the exchange.
+
+
The client then compares the SHA-256 hash in the response to the original hash of the request. If the hashes do not match, the client recognizes that the request has been tampered in transit, and rejects the exchange.
@@ -74,4 +78,4 @@ The server should return the ECDSA signature and client SHA-256 hash in the **ET
* The signature consists of two 256-bit integers (“R†and “Sâ€), in a ASN.1 sequence, encoded in DER; the hash is 256 bits.
* Convert the DER-encoded signature to lowercase hex. The SHA-256 hash will be standard hex representation.
- * Concatenate them with a colon as a delimiter: “signature:hashâ€. The final ETag value will max out at 194 characters (plus \n), which is a bit long, but shouldn’t be risking the 8k limit on HTTP headers.
\ No newline at end of file
+ * Concatenate them with a colon as a delimiter: “signature:hashâ€. The final ETag value will max out at 194 characters (plus \n), which is a bit long, but shouldn’t be risking the 8k limit on HTTP headers.
diff --git a/doc/CustomizingOmaha.md b/doc/CustomizingOmaha.md
index 82dd951f9..bc7679142 100644
--- a/doc/CustomizingOmaha.md
+++ b/doc/CustomizingOmaha.md
@@ -20,7 +20,7 @@ The following items **MUST** be changed before releasing a fork of Omaha. Prefe
> Modify **`kGlobalPrefix`** at the top of the file to contain your company name.
- * **`omaha\base\const_goopdate.h`**
+ * **`omaha\common\const_goopdate.h`**
> Modify the names of the service names (examples: **`omaha_task_name_c`**, **`omaham_service_name`**, etc.) to contain your product's name.
@@ -32,7 +32,6 @@ The following items **MUST** be changed before releasing a fork of Omaha. Prefe
> Generate new GUIDs for every interface and coclass. Changing the descriptive names for them isn't a bad idea either. (Do not, however, change code-level names such as `IAppBundle` or `GoogleUpdate3UserClass`.)
- * **`omaha\plugins\update\activex\update_control_idl.idl`**
> Generate new GUIDs for every interface and coclass.
@@ -62,4 +61,4 @@ We strongly recommend making these changes before you release:
The version number stored in all outputs is set in the file **`omaha\VERSION`**. Omaha has some functionality in it from Google Update related to bug workarounds when upgrading from prior versions, so don't set the VERSION to any lower than 1.3.23.0.
-When releasing your fork of Omaha, we recommend starting the version at 1.3.25.0. Remember to bump the version up whenever releasing an updated version.
\ No newline at end of file
+When releasing your fork of Omaha, we recommend starting the version at 1.3.25.0. Remember to bump the version up whenever releasing an updated version.
diff --git a/doc/DeveloperSetupGuide.md b/doc/DeveloperSetupGuide.md
index d7b530ec1..e685eaa84 100644
--- a/doc/DeveloperSetupGuide.md
+++ b/doc/DeveloperSetupGuide.md
@@ -4,47 +4,50 @@ These instructions are intended to assist the would-be Omaha developer with sett
We are striving to make the code build with the latest Windows toolchain from Microsoft. Since there is no continuous integration for this project, the code may not build using previous versions of the toolchain.
-#### Currently, the supported toolchain is Visual Studio 2019 Update 16.1.1 and Windows SDK 10.0.17763.0. ####
+#### Currently, the supported toolchain is Visual Studio 2022 Update 17.8.3 and Windows SDK 10.0.22621.0. ####
-Visual Studio 2017 Update 15.9.12 should work too.
+The updater runs on Windows 7, 8, and 10. Windows XP is not supported in the current build configuration due to a number of issues, such as thread-safe initializing of static local variables, etc.
# Required Downloads/Tools #
The following packages are required to build Omaha:
* A copy of the Omaha source code. This can be done by cloning this repository.
- * Microsoft Visual Studio 2017 or 2019. The free Visual Studio Community edition is sufficient to build.
+ * Microsoft Visual Studio 2022. The free Visual Studio Community edition is sufficient to build.
* Download [here](https://visualstudio.microsoft.com/downloads)
- * ATL Server headers
- * Download [here](http://atlserver.codeplex.com). Omaha needs this library for regular expression support.
* Windows 10 SDK.
- * Download Windows 10 SDK [here](https://dev.windows.com/en-us/downloads/windows-10-sdk).
- * Microsoft .NET Framework 2.0
- * This should be pre-installed on Windows Vista and Windows 7. This old version of SDK is needed for click-once compatibility with Windows XP systems.
- * To verify, see if the file %WINDIR%\Microsoft.NET\Framework\v2.0.50727\csc.exe exists on your system.
- * Download [here](https://www.microsoft.com/en-us/download/details.aspx?id=19988).
- * The Windows Template Library (WTL)
+ * Visual Studio copy of Windows 10 SDK is sufficient to build with, if desired.
+ * Optionally, download and intall Windows 10 SDK [here](https://dev.windows.com/en-us/downloads/windows-10-sdk).
+ * The Windows Template Library (WTL) - WTL 10.0.10320 Release
* Download WTL [here](http://sourceforge.net/projects/wtl/).
+ * hammer.bat has `OMAHA_WTL_DIR` set to `C:\wtl\files`. Change this if you unpacked to a different location.
* The Windows Install XML (WiX) Toolkit, version 3.0 or later.
* Download any of the v3 binaries packages [here](http://wix.sourceforge.net/).
+ * Set the `WIX` environment variable to the directory where you unpacked WiX.
* Python 2.7.x
* Download Python [here](https://www.python.org/downloads/release/python-2716). It can coexist with newer Python installs on a system.
* You'll also need the pywin32 (Python for Windows) extensions for Python 2.7.
- You can install with pip: `> python -m pip install pywin32` - assuming `python` is added to your `PATH` environmental variable.
- It can also be downloaded [here](https://github.com/mhammond/pywin32/releases/download/b224/pywin32-224.win-amd64-py2.7.exe).
+ * The `OMAHA_PYTHON_DIR` is set to `C:\Python27`. Change this if you installed to a different location.
* SCons 1.3.x (Be sure to use **1.3**, the 2.0 series is not backwards-compatible!)
* Download SCons [here](http://sourceforge.net/projects/scons/files/scons/1.3.1/).
+ * Change this line in hammer.bat if you installed to a different location: `SCONS_DIR=C:\Python27\scons-1.3.1`.
* Google Software Construction Toolkit
- * Get the SCT source [here](http://code.google.com/p/swtoolkit/), either via direct download or via SVN checkout.
+ * Get the SCT source [here](https://code.google.com/archive/p/swtoolkit/downloads), either via direct download or via SVN checkout.
+ * Change this line in hammer.bat if you installed to a different location: `set SCT_DIR=C:\swtoolkit`.
* The GO programming language
* Download [here](https://golang.org/dl/)
- * Google Protocol Buffers (3.6.0 or higher) [here](https://github.com/google/protobuf/releases).
- * From the [release page](https://github.com/google/protobuf/releases), download the zip file protoc-$VERSION-win32.zip. It contains the protoc binary. Unzip the contents under C:\protobuf. After that, download the zip file protobuf-cpp-$VERSION.zip. Unzip the "src" sub-directory contents to C:\protobuf\src. If other directory is used, please edit the environment variables in the hammer.bat, specifically, OMAHA_PROTOBUF_BIN_DIR and OMAHA_PROTOBUF_SRC_DIR.
+ * Change this line in hammer.bat if you installed to a different location: `set GOROOT=C:\go`.
+ * Google Protocol Buffers (currently tested with v3.17.3) [here](https://github.com/protocolbuffers/protobuf/releases).
+ * From the [release page](https://github.com/protocolbuffers/protobuf/releases), download the zip file `protoc-$VERSION-win32.zip`. It contains the protoc binary. Unzip the contents under `C:\protobuf`. After that, download the zip file `protobuf-cpp-$VERSION.zip`. Unzip the `src` sub-directory contents to `C:\protobuf\src`. If other directory is used, please edit the environment variables in the hammer.bat, specifically, `OMAHA_PROTOBUF_BIN_DIR` and `OMAHA_PROTOBUF_SRC_DIR`.
* Third-party dependencies:
- * breakpad. Source code [here](https://code.google.com/p/google-breakpad/source/checkout)
- * googletest. Source code [here](https://github.com/google/googletest). This includes both gtest and gmock frameworks.
- * Use git clone, git svn clone, or other way to get the source code for these projects into the third_party directory in the root of this repository.
- * libzip 1.5.2. Source code [here](https://libzip.org/download/libzip-1.5.2.tar.xz). Unzip the contents of libzip-1.5.2.tar.gz\libzip-1.5.2.tar\libzip-1.5.2\ into the directory googleclient\third_party\libzip. The Omaha repository contains two generated configuration files in `base\libzip`, or one could build the libzip library and generate the files. A change has been made to config.h to disable zip crypto `#undef HAVE_CRYPTO`, or else the zip code won't build because of a compile time bug.
- * zlib 1.2.11. Source code [here](https://zlib.net/zlib-1.2.11.tar.gz). Unzip the contents of zlib-1.2.11.tar.gz\zlib-1.2.11.tar\zlib-1.2.11\ into the directory googleclient\third_party\zlib\v1_2_11.
+ * breakpad. Download [here](https://github.com/google/breakpad/archive/refs/heads/main.zip). Tested with commit [11ec9c](https://github.com/google/breakpad/commit/11ec9c32888c06665b8838f709bd66c0be9789a6) from Dec 11, 2023.
+ - Unzip everything inside `breakpad-master.zip\breakpad-master` to `third_party\breakpad`.
+ * googletest. Download [here](https://github.com/google/googletest/archive/refs/heads/master.zip). Tested with commit [96eadf
+](https://github.com/google/googletest/commit/96eadf659fb75ecda943bd97413c71d4c17c4f43) from Dec 22, 2023. This includes both gtest and gmock frameworks.
+ - Unzip everything inside `googletest-master.zip\googletest-master` to `third_party\googletest`.
+ * libzip 1.7.3. Source code [here](https://libzip.org/download/libzip-1.7.3.tar.xz). Unzip the contents of `libzip-1.7.3.tar.gz\libzip-1.7.3.tar\libzip-1.7.3\` into the directory `third_party\libzip`. The Omaha repository contains two generated configuration files in `base\libzip`, or one could build the libzip library and generate the files. A change has been made to config.h to disable zip crypto `#undef HAVE_CRYPTO`, or else the zip code won't build because of a compile time bug.
+ * zlib 1.2.11. Source code [here](https://zlib.net/zlib-1.2.11.tar.gz). Unzip the contents of `zlib-1.2.11.tar.gz\zlib-1.2.11.tar\zlib-1.2.11\` into the directory `third_party\zlib`.
To run the unit tests, one more package is needed. Download the Windows Sysinternals PSTools suite [here](https://technet.microsoft.com/en-us/sysinternals/bb897553) and save psexec.exe somewhere. Then, set a system environment variable named OMAHA_PSEXEC_DIR to the directory containing psexec.exe.
@@ -66,10 +69,11 @@ To run the unit tests, one more package is needed. Download the Windows Sysinter
d---rwx---+ 1 sorin Domain Users 0 Jun 30 17:58 third_party
d:\src\omahaopensource\omaha>ls -l third_party
- total 16
- d---rwx---+ 1 sorin Domain Users 0 Jul 14 12:52 breakpad
- drwxrwx---+ 1 Administrators Domain Users 0 Sep 1 11:52 googletest
- d---rwx---+ 1 sorin Domain Users 0 Aug 7 18:58 lzma
+ drwxrwxrwx 1 sorin sorin 4096 Mar  1 19:37 breakpad
+ drwxrwxrwx 1 sorin sorin 4096 Mar  1 19:41 googletest
+ drwxrwxrwx 1 sorin sorin 4096 Mar  1 19:58 libzip
+ drwxrwxrwx 1 sorin sorin 4096 Mar  1 16:30 lzma
+ drwxrwxrwx 1 sorin sorin 4096 Mar  1 20:07 zlib
```
## Environment Variables ##
@@ -91,7 +95,7 @@ A larger suite of unit tests is also included in the Omaha source.
## Running Unit Tests ##
-The Omaha build proces includes building an automated unit test suite, based on the [GTest](https://github.com/google/googletest) framework. In order to run it, there are two pieces of preparation you must do:
+The Omaha build process includes building an automated unit test suite, based on the [GTest](https://github.com/google/googletest) framework. In order to run it, there are two pieces of preparation you must do:
* Create the following registry key: `HKEY_LOCAL_MACHINE\SOFTWARE\OmahaCompanyName\UpdateDev`. Then, add a string value named `TestSource` with the value `ossdev`. (Note: If you are on 64 bit Windows and are using `regedit` to create the value then you need to place it in `HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\OmahaCompanyName\UpdateDev`. [This allows 32 bit processes to read it.](https://support.microsoft.com/en-us/kb/305097)).
* Download the Windows Sysinternals PSTools suite (available [here](http://technet.microsoft.com/en-us/sysinternals/bb897553)) and save `psexec.exe` somewhere. Then, set an environment variable named `OMAHA_PSEXEC_DIR` to the directory containing `psexec.exe`.
diff --git a/doc/Omaha3SourceOrganization.md b/doc/Omaha3SourceOrganization.md
index 8e9004e9b..d18e6d4fd 100644
--- a/doc/Omaha3SourceOrganization.md
+++ b/doc/Omaha3SourceOrganization.md
@@ -31,7 +31,6 @@ omaha\
mi_exe_stub\ Produces a stub EXE, mi_exe_stub.exe, that will be combined
with a TAR to produce the untagged meta-installer. (The script to
actually do the merge lives in installers\, mentioned below.)
-plugins\ Produces the browser plugin, npGoogleUpdate3.dll.
recovery\ Produces tools for “Code Red†- a mechanism that the apps being
managed by Omaha can use to check Omaha’s integrity, and
restore it if it appears broken.
diff --git a/doc/Omaha3Walkthrough.md b/doc/Omaha3Walkthrough.md
index 484ab5894..fa03aeb17 100644
--- a/doc/Omaha3Walkthrough.md
+++ b/doc/Omaha3Walkthrough.md
@@ -35,7 +35,7 @@ The Omaha Client always operates at user privilege levels and owns the UI of Oma
* Setup - Create or update a permanent Omaha install, of either user or machine variety.
* Install - Invoke the COM server to create a state machine object, fill it out with apps to be managed, and call a suitable function on it such as `checkForUpdate()`, `download()`, or `install()`. From that point onwards, poll the state object as the COM server does the work for you, and update the UI as the states advance.
-In general, when referring to “the clientâ€, we’re referring to the official Google Update client, which happens to live in the same executable as the COM Server; the role that the executable plays is decided simply by which command line is passed to it. However, there are other clients that may access the COM server; some of them we own (the web browser plugins), and some we do not own (partner applications which access our COM APIs directly). The server must stay as secure as possible, and sanitize all input.
+In general, when referring to “the clientâ€, we’re referring to the official Google Update client, which happens to live in the same executable as the COM Server; the role that the executable plays is decided simply by which command line is passed to it. However, there are other clients that may access the COM server; The server must stay as secure as possible, and sanitize all input.
## Example Code Flow ##
@@ -50,7 +50,6 @@ appname=Google%20Chrome&needsadmin=False&lang=en"
* We check the machine to see if there’s already a user Omaha installed with a version newer than or equal to ours. (If it’s equal to ours, we will do some supplementary checking to make sure that the installed copy is sane and working properly, and if not, we over-install.) Let’s assume that there is no user Omaha installed. We will create the direct directory in `AppData`, copy over the files, and then make entries in the Registry to do the following:
* Register our COM servers
* Create scheduled tasks to check for an update every five hours
- * Expose our web browser plugins to IE/Firefox/Chrome/Safari/Opera
* Store initial configuration/state for Omaha itself in the Registry
* Register Omaha itself as an Omaha-managed application, so it can check for updates for itself
* The client then starts a new copy of itself in its permanent installed location, modifying the command line from `/install` to `/handoff`. Once again, the constant shell loads Goopdate and passes the command line along - this time, however, we’re using the constant shell in the newly-created permanent install of Omaha, rather than the one in the temp directory.
@@ -68,25 +67,26 @@ A crucial thing to pick up here is that, since one file (goopdate.dll) does many
So, what files are actually in a permanent install of Omaha once it’s completed?
-| `GoogleUpdate.exe` | The Constant Shell. Just takes the command line given to it and passes it to goopdate.dll; if necessary, it will validate that goopdate has an intact digital signature from Google. |
+| Filename | Description |
|:-------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `GoogleCrashHandler.exe` | A copy of the constant shell, renamed for Omaha2 compatibility reasons. Expected to always be started with /crashhandler. |
-| `GoogleUpdateBroker.exe` GoogleUpdateOnDemand.exe
COM Forwarders. Both of these are small EXEs whose sole purpose is to take their own command line, append a command line switch to the end, and pass it to the Constant Shell.
-
goopdate.dll
The central Omaha3 binary.
-
goopdateres_*.dll
Resource-only DLLs, one per language, containing localized strings. As part of its startup, Goopdate will read the “lang†extra-args parameter if one exists (or the Registry) and select a language to load.
-
npGoogleUpdate3.dll
Our web browser plugin. (It actually contains two plugins: ActiveX plugins for IE, and an NPAPI plugin for Firefox, Chrome, and other browsers that use that.) Allows Javascript on selected subdomains of google.com to access and use the COM Server.
-
psmachine.dll psuser.dll
Custom marshaling stubs used by the COM Server. Used in order to work around some Windows bugs that are triggered by having both Machine and User Omaha installed simultaneously.
-
-The directory tree typically looks like this:
-
-
Google\
- Update\
- 1.3.21.53\ The install location for the current version of Omaha.
- ... the files listed above ...
- Download\ Temp area for installers currently being downloaded.
- Install\ Temp area for installers that are verified and about to be launched.
- GoogleUpdate.exe A copy of the the constant shell. This will look into the registry for
- the most recently successfully installed version of Omaha, and
- use the goopdate.dll there.
-
+| `GoogleUpdate.exe` | **The Constant Shell.** Just takes the command line given to it and passes it to goopdate.dll; if necessary, it will validate that goopdate has an intact digital signature from Google. |
+| `GoogleCrashHandler.exe` | A copy of the constant shell, renamed for Omaha2 compatibility reasons. Expected to always be started with /crashhandler. |
+| `GoogleUpdateBroker.exe` `GoogleUpdateOnDemand.exe` | **COM Forwarders**. Both of these are small EXEs whose sole purpose is to take their own command line, append a command line switch to the end, and pass it to the Constant Shell.
+| `goopdate.dll` | The central Omaha3 binary. |
+| `goopdateres_*.dll` | **Resource-only DLLs, one per language, containing localized strings**. As part of its startup, Goopdate will read the “lang†extra-args parameter if one exists (or the Registry) and select a language to load. |
+| `psmachine.dll` `psuser.dll` | **Custom marshaling stubs used by the COM Server**. Used in order to work around some Windows bugs that are triggered by having both Machine and User Omaha installed simultaneously.|
+
+
+The directory tree typically looks like this:
+```
+Google\
+ Update\
+ 1.3.21.53\ The install location for the current version of Omaha.
+ ... the files listed above ...
+ Download\ Temp area for installers currently being downloaded.
+ Install\ Temp area for installers that are verified and about to be launched.
+ GoogleUpdate.exe A copy of the the constant shell. This will look into the registry for
+ the most recently successfully installed version of Omaha, and
+ use the goopdate.dll there.
+```
At this point, the value of the Constant Shell becomes obvious - we can modify or change the location of goopdate.dll, without having to touch GoogleUpdate.exe in most cases. This means that minor changes or bugfixes in Omaha can be pushed out, in the form of an update to goopdate.dll, without triggering a prompt from firewalls, virus scanners, or process whitelisters that may be in place on a machine.
diff --git a/doc/OmahaOverview.html b/doc/OmahaOverview.html
index bc0aedfe2..20b858204 100644
--- a/doc/OmahaOverview.html
+++ b/doc/OmahaOverview.html
@@ -1143,7 +1143,7 @@
The Google Update server is not part of the Omaha open source project. Providing updates for applications requires a server that implements the
-Omaha Server Protocol.