Skip to content
This repository has been archived by the owner on Nov 7, 2024. It is now read-only.

Support non-/usr content if ostree configured with overlayfs #570

Merged
merged 1 commit into from
Dec 11, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion lib/src/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -842,7 +842,7 @@ async fn testing(opts: &TestingOpts) -> Result<()> {
TestingOpts::Run => crate::integrationtest::run_tests(),
TestingOpts::RunIMA => crate::integrationtest::test_ima(),
TestingOpts::FilterTar => {
crate::tar::filter_tar(std::io::stdin(), std::io::stdout()).map(|_| {})
crate::tar::filter_tar(std::io::stdin(), std::io::stdout(), false).map(|_| {})
}
}
}
Expand Down
14 changes: 14 additions & 0 deletions lib/src/container/store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -853,6 +853,16 @@ impl ImageImporter {
let target_imgref = self.target_imgref.as_ref().unwrap_or(&self.imgref);
let base_commit = import.ostree_commit_layer.commit.clone().unwrap();

let root_is_transient = {
let rootf = self
.repo
.read_commit(&base_commit, gio::Cancellable::NONE)?
.0;
let rootf = rootf.downcast_ref::<ostree::RepoFile>().unwrap();
crate::ostree_prepareroot::overlayfs_root_enabled(rootf)?
};
tracing::debug!("Base rootfs is transient: {root_is_transient}");

let ostree_ref = ref_for_image(&target_imgref.imgref)?;

let mut layer_commits = Vec::new();
Expand Down Expand Up @@ -881,6 +891,7 @@ impl ImageImporter {
let opts = crate::tar::WriteTarOptions {
base: Some(base_commit.clone()),
selinux: true,
allow_nonusr: root_is_transient,
};
let r =
crate::tar::write_tar(&self.repo, blob, layer.ostree_ref.as_str(), Some(opts));
Expand All @@ -890,7 +901,10 @@ impl ImageImporter {
layer_commits.push(r.commit);
if !r.filtered.is_empty() {
let filtered = HashMap::from_iter(r.filtered.into_iter());
tracing::debug!("Found {} filtered toplevels", filtered.len());
layer_filtered_content.insert(layer.digest().to_string(), filtered);
} else {
tracing::debug!("No filtered content");
}
if let Some(p) = self.layer_progress.as_ref() {
p.send(ImportProgress::DerivedLayerCompleted(layer.layer.clone()))
Expand Down
4 changes: 4 additions & 0 deletions lib/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ pub mod ima;
pub mod keyfileext;
pub(crate) mod logging;
pub mod mountutil;
pub mod ostree_prepareroot;
pub mod refescape;
#[doc(hidden)]
pub mod repair;
Expand All @@ -54,6 +55,9 @@ pub mod objectsource;
pub(crate) mod objgv;
#[cfg(feature = "internal-testing-api")]
pub mod ostree_manual;
#[cfg(not(feature = "internal-testing-api"))]
pub(crate) mod ostree_manual;

pub(crate) mod statistics;

mod utils;
Expand Down
57 changes: 57 additions & 0 deletions lib/src/ostree_prepareroot.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
//! Logic related to parsing ostree-prepare-root.conf.
//!

// SPDX-License-Identifier: Apache-2.0 OR MIT

use anyhow::{Context, Result};
use camino::Utf8Path;
use glib::Cast;
use ostree::prelude::FileExt;
use ostree::{gio, glib};

use crate::keyfileext::KeyFileExt;
use crate::ostree_manual;

pub(crate) const CONF_PATH: &str = "ostree/prepare-root.conf";

pub(crate) fn load_config(root: &ostree::RepoFile) -> Result<Option<glib::KeyFile>> {
let cancellable = gio::Cancellable::NONE;
let kf = glib::KeyFile::new();
for path in ["etc", "usr/lib"].into_iter().map(Utf8Path::new) {
let path = &path.join(CONF_PATH);
let f = root.resolve_relative_path(&path);
if !f.query_exists(cancellable) {
continue;
}
let f = f.downcast_ref::<ostree::RepoFile>().unwrap();
let contents = ostree_manual::repo_file_read_to_string(f)?;
kf.load_from_data(&contents, glib::KeyFileFlags::NONE)
.with_context(|| format!("Parsing {path}"))?;
tracing::debug!("Loaded {path}");
return Ok(Some(kf));
}
tracing::debug!("No {CONF_PATH} found");
Ok(None)
}

/// Query whether the target root has the `root.transient` key
/// which sets up a transient overlayfs.
pub(crate) fn overlayfs_root_enabled(root: &ostree::RepoFile) -> Result<bool> {
if let Some(config) = load_config(root)? {
overlayfs_enabled_in_config(&config)
} else {
Ok(false)
}
}

/// Query whether the config uses an overlayfs model (composefs or plain overlayfs).
pub fn overlayfs_enabled_in_config(config: &glib::KeyFile) -> Result<bool> {
let root_transient = config
.optional_bool("root", "transient")?
.unwrap_or_default();
let required_composefs = config
.optional_string("composefs", "enabled")?
.map(|s| s.as_str() == "yes")
.unwrap_or_default();
Ok(root_transient || required_composefs)
}
78 changes: 54 additions & 24 deletions lib/src/tar/write.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,10 @@ use std::sync::Arc;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite};
use tracing::instrument;

// Exclude things in https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems/
// from being placed in the rootfs.
const EXCLUDED_TOPLEVEL_PATHS: &[&str] = &["run", "tmp", "proc", "sys", "dev"];

/// Copy a tar entry to a new tar archive, optionally using a different filesystem path.
pub(crate) fn copy_entry(
entry: tar::Entry<impl std::io::Read>,
Expand Down Expand Up @@ -62,6 +66,8 @@ pub struct WriteTarOptions {
/// Enable SELinux labeling from the base commit
/// Requires the `base` option.
pub selinux: bool,
/// Allow content not in /usr; this should be paired with ostree rootfs.transient = true
pub allow_nonusr: bool,
}

/// The result of writing a tar stream.
Expand Down Expand Up @@ -97,13 +103,16 @@ fn sepolicy_from_base(repo: &ostree::Repo, base: &str) -> Result<tempfile::TempD
Ok(tempdir)
}

#[derive(Debug)]
#[derive(Debug, PartialEq, Eq)]
enum NormalizedPathResult<'a> {
Filtered(&'a str),
Normal(Utf8PathBuf),
}

fn normalize_validate_path(path: &Utf8Path) -> Result<NormalizedPathResult<'_>> {
fn normalize_validate_path(
path: &Utf8Path,
allow_nonusr: bool,
) -> Result<NormalizedPathResult<'_>> {
// This converts e.g. `foo//bar/./baz` into `foo/bar/baz`.
let mut components = path
.components()
Expand Down Expand Up @@ -140,7 +149,13 @@ fn normalize_validate_path(path: &Utf8Path) -> Result<NormalizedPathResult<'_>>
"var" => {
ret.push("usr/share/factory/var");
}
o => return Ok(NormalizedPathResult::Filtered(o)),
o if EXCLUDED_TOPLEVEL_PATHS.contains(&o) => {
return Ok(NormalizedPathResult::Filtered(part));
}
_ if allow_nonusr => ret.push(part),
_ => {
return Ok(NormalizedPathResult::Filtered(part));
}
}
} else {
ret.push(part);
Expand All @@ -165,6 +180,7 @@ fn normalize_validate_path(path: &Utf8Path) -> Result<NormalizedPathResult<'_>>
pub(crate) fn filter_tar(
src: impl std::io::Read,
dest: impl std::io::Write,
allow_nonusr: bool,
) -> Result<BTreeMap<String, u32>> {
let src = std::io::BufReader::new(src);
let mut src = tar::Archive::new(src);
Expand All @@ -174,6 +190,8 @@ pub(crate) fn filter_tar(

let ents = src.entries()?;

tracing::debug!("Filtering tar; allow_nonusr={allow_nonusr}");

// Lookaside data for dealing with hardlinked files into /sysroot; see below.
let mut changed_sysroot_objects = HashMap::new();
let mut new_sysroot_link_targets = HashMap::<Utf8PathBuf, Utf8PathBuf>::new();
Expand Down Expand Up @@ -241,8 +259,9 @@ pub(crate) fn filter_tar(
}
}

let normalized = match normalize_validate_path(path)? {
let normalized = match normalize_validate_path(path, allow_nonusr)? {
NormalizedPathResult::Filtered(path) => {
tracing::trace!("Filtered: {path}");
if let Some(v) = filtered.get_mut(path) {
*v += 1;
} else {
Expand All @@ -263,14 +282,15 @@ pub(crate) fn filter_tar(
async fn filter_tar_async(
src: impl AsyncRead + Send + 'static,
mut dest: impl AsyncWrite + Send + Unpin,
allow_nonusr: bool,
) -> Result<BTreeMap<String, u32>> {
let (tx_buf, mut rx_buf) = tokio::io::duplex(8192);
// The source must be moved to the heap so we know it is stable for passing to the worker thread
let src = Box::pin(src);
let tar_transformer = tokio::task::spawn_blocking(move || {
let mut src = tokio_util::io::SyncIoBridge::new(src);
let dest = tokio_util::io::SyncIoBridge::new(tx_buf);
let r = filter_tar(&mut src, dest);
let r = filter_tar(&mut src, dest, allow_nonusr);
// Pass ownership of the input stream back to the caller - see below.
(r, src)
});
Expand Down Expand Up @@ -345,7 +365,7 @@ pub async fn write_tar(
let mut child_stdout = r.stdout.take().unwrap();
let mut child_stderr = r.stderr.take().unwrap();
// Copy the filtered tar stream to child stdin
let filtered_result = filter_tar_async(src, child_stdin);
let filtered_result = filter_tar_async(src, child_stdin, options.allow_nonusr);
let output_copier = async move {
// Gather stdout/stderr to buffers
let mut child_stdout_buf = String::new();
Expand Down Expand Up @@ -401,39 +421,49 @@ mod tests {

#[test]
fn test_normalize_path() {
let valid = &[
let valid_all = &[
("/usr/bin/blah", "./usr/bin/blah"),
("usr/bin/blah", "./usr/bin/blah"),
("usr///share/.//blah", "./usr/share/blah"),
("./", "."),
("var/lib/blah", "./usr/share/factory/var/lib/blah"),
("./var/lib/blah", "./usr/share/factory/var/lib/blah"),
("./", "."),
];
for &(k, v) in valid {
let r = normalize_validate_path(k.into()).unwrap();
let valid_nonusr = &[("boot", "./boot"), ("opt/puppet/blah", "./opt/puppet/blah")];
for &(k, v) in valid_all {
let r = normalize_validate_path(k.into(), false).unwrap();
let r2 = normalize_validate_path(k.into(), true).unwrap();
assert_eq!(r, r2);
match r {
NormalizedPathResult::Filtered(o) => {
panic!("Case {} should not be filtered as {}", k, o)
}
NormalizedPathResult::Normal(p) => {
assert_eq!(v, p.as_str());
}
NormalizedPathResult::Normal(r) => assert_eq!(r, v),
NormalizedPathResult::Filtered(o) => panic!("Should not have filtered {o}"),
}
}
let filtered = &[("/boot/vmlinuz", "boot")];
for &(k, v) in filtered {
match normalize_validate_path(k.into()).unwrap() {
NormalizedPathResult::Filtered(f) => {
assert_eq!(v, f);
}
for &(k, v) in valid_nonusr {
let strict = normalize_validate_path(k.into(), false).unwrap();
assert!(
matches!(strict, NormalizedPathResult::Filtered(_)),
"Incorrect filter for {k}"
);
let nonusr = normalize_validate_path(k.into(), true).unwrap();
match nonusr {
NormalizedPathResult::Normal(r) => assert_eq!(r, v),
NormalizedPathResult::Filtered(o) => panic!("Should not have filtered {o}"),
}
}
let filtered = &["/run/blah", "/sys/foo", "/dev/somedev"];
for &k in filtered {
match normalize_validate_path(k.into(), true).unwrap() {
NormalizedPathResult::Filtered(_) => {}
NormalizedPathResult::Normal(_) => {
panic!("{} should be filtered", k)
}
}
}
let errs = &["usr/foo/../../bar"];
for &k in errs {
assert!(normalize_validate_path(k.into()).is_err());
assert!(normalize_validate_path(k.into(), true).is_err());
assert!(normalize_validate_path(k.into(), false).is_err());
}
}

Expand All @@ -451,7 +481,7 @@ mod tests {
let _ = rootfs_tar.into_inner()?;
let mut dest = Vec::new();
let src = tokio::io::BufReader::new(tokio::fs::File::open(rootfs_tar_path).await?);
filter_tar_async(src, &mut dest).await?;
filter_tar_async(src, &mut dest, false).await?;
let dest = dest.as_slice();
let mut final_tar = tar::Archive::new(Cursor::new(dest));
let destdir = &tempd.path().join("destdir");
Expand Down
5 changes: 3 additions & 2 deletions lib/tests/it/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -377,7 +377,8 @@ async fn test_tar_write() -> Result<()> {
let tmpvarlog = tmproot.open_dir("var/log")?;
tmpvarlog.write("foo.log", "foolog")?;
tmpvarlog.write("bar.log", "barlog")?;
tmproot.create_dir("boot")?;
tmproot.create_dir("run")?;
tmproot.write("run/somefile", "somestate")?;
let tmptar = "testlayer.tar";
cmd!(sh, "tar cf {tmptar} -C tmproot .").run()?;
let src = fixture.dir.open(tmptar)?;
Expand All @@ -393,7 +394,7 @@ async fn test_tar_write() -> Result<()> {
.run()?;
assert_eq!(r.filtered.len(), 1);
assert!(r.filtered.get("var").is_none());
assert_eq!(*r.filtered.get("boot").unwrap(), 1);
assert_eq!(*r.filtered.get("run").unwrap(), 2);

Ok(())
}
Expand Down
Loading