diff --git a/doc/README.mld b/doc/README.mld new file mode 100644 index 00000000..016046e6 --- /dev/null +++ b/doc/README.mld @@ -0,0 +1,342 @@ +{0 OBuilder} + +\[![GitHub CI][github-shield]\][github-ci] +[![docs][docs-shield]][docs] +[![OCaml-CI Build Status][ocaml-ci-shield]][ocaml-ci] + +OBuilder takes a build script \(similar to a Dockerfile\) and performs the steps in it in a sandboxed environment. + +After each step, OBuilder uses the snapshot feature of the filesystem \(ZFS or Btrfs\) to store the state of the build. There is also an Rsync backend that copies the build state. On Linux, it uses [runc] to sandbox the build steps, but any system that can run a command safely in a chroot could be used. Repeating a build will reuse the cached results where possible. + +OBuilder can also use Docker as a backend \(fully replacing of [runc] and the snapshotting filesystem\) on any system supported by Docker \(Linux, Windows, …\). + +OBuilder stores the log output of each build step. This is useful for CI, where you may still want to see the output even if the result was cached from some other build. + +At present, the initial base image is fetched from Docker Hub using [docker pull] on Linux and then snapshotted into the store. Other systems use a conceptually similar process with the implementation in each platform section. + +{1 Usage} + +OBuilder is designed to be used as a component of a build scheduler such as [OCluster][]. However, there is also a command-line interface for testing. + +To check that the system is working correctly, you can run a healthcheck. This checks that Docker is running and then does a simple test build \(pulling the [busybox] image if not already present\): + +{[ +$ obuilder healthcheck --store=zfs:tank +Healthcheck passed +]} +To build [example.spec] \(which builds OBuilder itself\) using the ZFS pool [tank] to cache the build results: + +{[ +$ obuilder build -f example.spec . --store=zfs:tank +]} +To use Btrfs directory [/mnt/btrfs] for the build cache, use [--store=btrfs:/mnt/btrfs] or specify a directory for Rsync to use [--store=rsync:/rsync]. + +{1 Notes} + +Some operations \(such as deleting btrfs snapshots\) require root access. +OBuilder currently uses [sudo] as necessary for such operations. + +You should only run one instance of the command-line client at a time with +a given store. OBuilder does support concurrent builds, but they must be +performed using a single builder object: + +{ul +{- If you try to perform an operation that is already being performed by another + build, it will just attach to the existing build. + +} +{- The new client will get the logs so far, and then stream new log data as it + arrives. + +} +{- If a client cancels, it just stops following the log. + The operation itself is cancelled if all its clients cancel. + +} +} + +OBuilder calculates a digest of the input files to decide whether a [copy] step +needs to be repeated. However, if it decides to copy the file to the build sandbox, +it does not check the digest again. Also, it only checks that it is not following +symlinks during the initial scan. Therefore, you must not modify the input +files while a build is in progress. + +Failed build steps are not cached. + +Files and directories in the store may have owners and groups that only make sense +in the context of some container. The store should therefore be configured so +that other processes on the host \(which might have the same IDs by coincidence\) +cannot reach them, e.g. by [chmod go-rwx /path/to/store]. + +Sync operations can be very slow, especially on btrfs. They're also +unnecessary, since if the computer crashes then we'll just discard the whole +build and start again. If you have runc version [v1.0.0-rc92] or later, you can +pass the [--fast-sync] option, which installs a seccomp filter that skips all +sync syscalls. However, if you attempt to use this with an earlier version of +runc then sync operations will instead fail with [EPERM]. + +{1 The build specification language} + +The spec files are loosely based on the Dockerfile format. +The main difference is that the format uses S-expressions rather than a custom format, +which should make it easier to generate and consume it automatically. + +When performing a build, the user gives OBuilder a specification file \(as described below\), +and a source directory, containing files which may be copied into the image using [copy]. + +{[ +((from BASE) OP…) +]} +Example: + +{[ +((from busybox@sha256:d366a4665ab44f0648d7a00ae3fae139d55e32f9712c67accd604bb55df9d05a) + (shell /bin/sh -c) + (run (shell "echo hello world"))) +]} +[BASE] identifies a Docker image, which will be fetched using [docker pull] and imported into the OBuilder cache. +OBuilder will not check for updates, so [BASE] should include a digest identifying the exact image, as shown above. + +The operations are performed in order. Each operation gets a build context and a filesystem snapshot, and may produce +a new context and a new snapshot. The initial filesystem snapshot is [BASE]. [run] and [copy] operations create new snapshots. + +The initial context is supplied by the user \(see {{: https://github.com/ocurrent/obuilder/blob/master/lib/build.mli} build.mli} for details\). +By default: + +{ul +{- The environment is taken from the Docker configuration of [BASE].} +{- The user is [(uid 0) (gid 0)] on Linux, [(name ContainerAdministrator)] on Windows.} +{- The workdir is [/], [C:/] on Windows.} +{- The shell is [bash -c], [C:\Windows\System32\cmd.exe /S /C] on Windows.} +} + +{2 Multi-stage builds} + +You can define nested builds and use the output from them in [copy] operations. +For example: + +{[ +((build dev + ((from ocaml/opam:alpine-3.18-ocaml-5.0) + (user (uid 1000) (gid 1000)) + (workdir /home/opam) + (run (shell "echo 'print_endline {|Hello, world!|}' > main.ml")) + (run (shell "opam exec -- ocamlopt -ccopt -static -o hello main.ml")))) + (from alpine:3.18) + (shell /bin/sh -c) + (copy (from (build dev)) + (src /home/opam/hello) + (dst /usr/local/bin/hello)) + (run (shell "hello"))) +]} +At the moment, the [(build …)] items must appear before the [(from …)] line. + +{2 workdir} + +{[ +(workdir DIR) +]} +Example: + +{[ +(workdir /usr/local) +]} +This operation sets the current working directory used for the following commands, until the next [workdir] operation. +If the path given is relative, it is combined with the previous setting. {b WARNING} Workdir implementation is idiosyncratic +on macOS, use with care. + +{2 shell} + +{[ +(shell ARG…) +]} +Example: + +{[ +(shell /bin/bash -c) +]} +This sets the shell used for future [(run (shell COMMAND))] operations. +The command run will be this list of arguments followed by the single argument [COMMAND]. + +{2 run} + +{[ +(run + (cache CACHE…)? + (network NETWORK…)? + (secrets SECRET…)? + (shell COMMAND)) +]} +Examples: + +{[ +(run (shell "echo hello")) + +(run + (cache (opam-archives (target /home/opam/.opam/download-cache))) + (network host) + (secrets (password (target /secrets/password))) + (shell "opam install utop")) +]} +Runs the single argument [COMMAND] using the values in the current context \(set by [workdir] and [shell]\). + +The [(cache CACHE…)] field can be used to mount one or more persistent caches for the command. +Each [CACHE] takes the form [(NAME (target PATH))], where [NAME] uniquely identifies the cache to use +and [PATH] is the mount point within the container. + +If the cache [NAME] does not yet exist then it is first created as an empty directory, +owned by the user in the build context. +A mutable copy of the cache is created for the command. When the command finishes \(whether successful or not\) +this copy becomes the new version of the cache, unless some other command updated the same cache first, in +which case this one is discarded. + +The [(network NETWORK…)] field specifies which network\(s\) the container will be connected to. +[(network host)] is a special value which runs the container in the host's network namespace. +Otherwise, a fresh network namespace is created for the container, with interfaces for the given +networks \(if any\). + +Currently, no other networks can be used, so the only options are [host] or an isolated private network. + +The [(secrets SECRET…)] field can be used to request values for chosen keys, mounted as read-only files in +the image. Each [SECRET] entry is under the form [(ID (target PATH))], where [ID] selects the secret, and +[PATH] is the location of the mounted secret file within the container. +The sandbox context API contains a [secrets] parameter to provide values to the runtime. +If a requested secret isn't provided with a value, the runtime fails. +Use the [--secret ID:PATH] option to provide the path of the file containing the +secret for [ID]. +When used with Docker, make sure to use the {b BuildKit} syntax, as only BuildKit supports a [--secret] option. +\(See {{: https://docs.docker.com/develop/develop-images/build_enhancements/#new-docker-build-secret-information} https://docs.docker.com/develop/develop-images/build_enhancements/#new-docker-build-secret-information}\) + +{2 copy} + +{[ +(copy + (from …)? + (src SRC…) + (dst DST) + (exclude EXCL…)?) +]} +Examples: + +{[ +(copy + (src .) + (dst build/) + (exclude .git _build)) + +(copy + (src platform.ml.linux) + (dst platform.ml)) +]} +This copies files, directories and symlinks from the source directory \(provided by the user when building\) into +the image. If [DST] does not start with [/] then it is relative to the current workdir. + +It has two forms: + +{ul +{- If [DST] ends with [/] then it copies each item in [SRC] to the directory [DST].} +{- Otherwise, it copies the single item [SRC] as [DST].} +} + +Files whose basenames are listed in [exclude] are ignored. +If [exclude] is not given, the empty list is used. +At present, glob patterns or full paths cannot be used here. + +If [(from (build NAME))] is given then the source directory is the root directory of the named nested build. +Otherwise, it is the source directory provided by the user. + +Notes: + +{ul +{- Unlike Docker's [COPY] operation, OBuilder copies the files using the current + user and group IDs, as set with [(user …)]. + +} +{- Both [SRC] and [DST] use [/] as the directory separator on all platforms. + +} +{- The copy is currently done by running [tar] inside the container to receive + the files. Therefore, the filesystem must have a working [tar] binary. On + Windows when using the Docker backend, OBuilder provides a [tar] binary. + +} +{- On Windows, copying from a build step image based on [Nano Server][nanoserver] + isn't supported. + +} +} + +{2 user} + +{[ +(user (uid UID) (gid GID)) +(user (name NAME)) ; on Windows and FreeBSD +]} +Example: + +{[ +(user (uid 1000) (gid 1000)) +]} +This updates the build context to set the user and group IDs used for the following [copy] and [run] commands. +Note that only numeric IDs are supported. + +{2 env} + +{[ +(env NAME VALUE) +]} +Example: + +{[ +(env OPTIONS "-O2 -Wall") +]} +Updates the build context so that the environment variable [NAME] has the value [VALUE] in future [run] operations. + +{1 Convert to Dockerfile format} + +You can convert an OBuilder spec to a Dockerfile like this: + +{[ +$ obuilder dockerfile -f example.spec > Dockerfile +]} +The dockerfile should work the same way as the spec file, except for these limitations: + +{ul +{- In [(copy (excludes …) …)] the excludes part is ignored. + You will need to ensure you have a suitable [.dockerignore] file instead. + +} +{- If you want to include caches or to use secrets, use [--buildkit] to output in the extended BuildKit syntax. + +} +{- All [(network …)] fields are ignored, as Docker does not allow per-step control of + networking. + +} +} + +{1 Other Platforms} + +OBuilder abstracts over a fetching mechanism for the Docker base image, the sandboxing for +the execution of build steps and the store for the cache. +This makes OBuilder extremely portable and there exists FreeBSD, macOS and Windows backends. +The FreeBSD backend uses jails and ZFS, the macOS backend re-uses ZFS and user isolation, and +the Windows backend currently requires Docker for Windows installed. + +{1 Licensing} + +OBuilder is licensed under the Apache License, Version 2.0. +See [LICENSE][] for the full license text. + + +Dockerfile: {{: https://docs.docker.com/engine/reference/builder/} https://docs.docker.com/engine/reference/builder/} +LICENSE: {{: https://github.com/ocurrent/obuilder/blob/master/LICENSE} https://github.com/ocurrent/obuilder/blob/master/LICENSE} +OCluster: {{: https://github.com/ocurrent/ocluster} https://github.com/ocurrent/ocluster} +docs: {{: https://ocurrent.github.io/obuilder/} https://ocurrent.github.io/obuilder/} +docs-shield: {{: https://img.shields.io/badge/doc-online-blue.svg} https://img.shields.io/badge/doc-online-blue.svg} +github-ci: {{: https://github.com/ocurrent/obuilder/actions/workflows/main.yml} https://github.com/ocurrent/obuilder/actions/workflows/main.yml} +github-shield: {{: https://github.com/ocurrent/obuilder/actions/workflows/main.yml/badge.svg} https://github.com/ocurrent/obuilder/actions/workflows/main.yml/badge.svg} +nanoserver: {{: https://hub.docker.com/_/microsoft-windows-nanoserver} https://hub.docker.com/_/microsoft-windows-nanoserver} +ocaml-ci: {{: https://ci.ocamllabs.io/github/ocurrent/obuilder} https://ci.ocamllabs.io/github/ocurrent/obuilder} +ocaml-ci-shield: {{: https://img.shields.io/endpoint?url=https%3A%2F%2Fci.ocamllabs.io%2Fbadge%2Focurrent%2Fobuilder%2Fmaster&logo=ocaml} https://img.shields.io/endpoint?url=https%3A%2F%2Fci.ocamllabs.io%2Fbadge%2Focurrent%2Fobuilder%2Fmaster&logo=ocaml} + diff --git a/doc/freebsd.md b/doc/freeBSD.mld similarity index 64% rename from doc/freebsd.md rename to doc/freeBSD.mld index eceb1b44..d57a3317 100644 --- a/doc/freebsd.md +++ b/doc/freeBSD.mld @@ -1,75 +1,77 @@ -# Porting OBuilder to FreeBSD +{0 Porting OBuilder to FreeBSD} -## The problem +{1 The problem} OBuilder is a tool used to perform arbitrary, reproduceable builds of OCaml-related software within a sandboxed environment. It has been written for Linux, with support for Windows and MacOS systems being added later. Porting to FreeBSD is the next logical step, since FreeBSD -(at least on amd64 and arm64 hardware) is a Tier 1 platform in the OCaml +\(at least on amd64 and arm64 hardware\) is a Tier 1 platform in the OCaml ecosystem. -## The challenge +{1 The challenge} Being initially Linux-centric, OBuilder is architected around three major requirements: -- initial build environments are `docker` images. -- sandboxing is performed using the Open Container Initiative tool `runc`. -- a filesystem with snapshot capabilities is needed, and acts as a cache of - identical build steps. +{ul +{- initial build environments are [docker] images.} +{- sandboxing is performed using the Open Container Initiative tool [runc].} +{- a filesystem with snapshot capabilities is needed, and acts as a cache of + identical build steps.} +} -Neither of the first two items are available under FreeBSD (a `docker` client -is available but quite useless as there is no native `docker` server), +Neither of the first two items are available under FreeBSD \(a [docker] client +is available but quite useless as there is no native [docker] server\), therefore alternative solutions must be found. As for the filesystem requirement, FreeBSD has been supporting Sun's ZFS filesystem out of the box for many releases now. Fortunately, the existing archicture in OBuilder encapsulates these needs as -`Fetcher`, `Sandbox` and `Store` modules, respectively, so the only work -required would be write FreeBSD-specific `Fetcher` and `Sandbox` modules. +[Fetcher], [Sandbox] and [Store] modules, respectively, so the only work +required would be write FreeBSD-specific [Fetcher] and [Sandbox] modules. -## Porting to FreeBSD +{1 Porting to FreeBSD} -### The fetcher +{2 The fetcher} -An initial attempt was made to fetch Docker images without using the `docker` -command. An existing script, `download-frozen-image`, can be found in the -`moby` Github project (the open source parts of `docker`) to that effect. +An initial attempt was made to fetch Docker images without using the [docker] +command. An existing script, [download-frozen-image], can be found in the +[moby] Github project \(the open source parts of [docker]\) to that effect. -However, although using that script to fetch the various layers of the `docker` +However, although using that script to fetch the various layers of the [docker] image and apply them in order, the result will be useless, from a FreeBSD -perspective, as all the `docker` images available are filled with Linux +perspective, as all the [docker] images available are filled with Linux binaries, which can run under FreeBSD with the help of the compatibility module, but would mislead the OCaml toolchain into believing it is running under Linux, and thus would build Linux binaries. -Until `docker` is available under FreeBSD, there won't be a repository of +Until [docker] is available under FreeBSD, there won't be a repository of FreeBSD images suitable for use for OBuilder. Such images will, at least in the beginning, be built locally in the Tarides CI network. It therefore -makes sense to expect `.tar.gz` archives to be available from the CI network, -and simply download and extract them to implement the `Fetcher` module. -Moreover, FreeBSD provides its own `fetch` command which is able to download -files over `http` and `https`, and can also use `file://` URIs, which turned +makes sense to expect [.tar.gz] archives to be available from the CI network, +and simply download and extract them to implement the [Fetcher] module. +Moreover, FreeBSD provides its own [fetch] command which is able to download +files over [http] and [https], and can also use [file://] URIs, which turned out to be very helpful during development. There is currently no attempt to support aliases or canonical names, so all the -`(from ...)` stanza in OBuilder command files will need to be adjusted for use +[(from ...)] stanza in OBuilder command files will need to be adjusted for use with FreeBSD. This limitation can be overcome by pre-populating the OBuilder cache with the most used images under their expected names on the OBuilder worker systems. -### The sandbox +{2 The sandbox} -FreeBSD comes with its own sandboxing mechanism, named `jail`, since the late +FreeBSD comes with its own sandboxing mechanism, named [jail], since the late 1990s. In addition to only having access to a subset of the filesystem, jails can also be denied network access, which fits the OBuilder usage pattern, where network access is only allowed to fetch build dependencies. -In order to start a jail, the `jail` command is invoked with either a plain text +In order to start a jail, the [jail] command is invoked with either a plain text configuration file providing its configuration, or with the configuration -parameters (in the "name=value" form) on its commandline. +parameters \(in the "name=value" form\) on its commandline. In order to keep things simple in OBuilder, and since the jail configuration will only need a few parameters, they are all passed on the commandline. This @@ -78,48 +80,52 @@ in the OBuilder command file, reach the FreeBSD command line size limit, but as this limit is a few hundred kilobytes, this does not seem to be a serious concern. -The `jail` invocation will provide: +The [jail] invocation will provide: -- a unique jail name. -- the absolute path of the jail filesystem. -- the command (or shell script) to run in the jail. -- the user on behalf of which the command will be run. This requires the user to - exist within the jail filesystem (`/etc/passwd` and `/etc/group` entries). +{ul +{- a unique jail name.} +{- the absolute path of the jail filesystem.} +{- the command \(or shell script\) to run in the jail.} +{- the user on behalf of which the command will be run. This requires the user to + exist within the jail filesystem \([/etc/passwd] and [/etc/group] entries\).} +} More options may be used to allow for network access, or specify commands to run on the host or within the jail at various states of the jail lifecycle. Also, for processes running under the jail to behave correctly, a stripped-down -`devfs` pseudo-filesystem needs to be mounted on the `/dev` directory within -the jail, and while this can be done automatically by `jail(8)` using the proper -`mount.devfs` option, care must be taken to correctly unmount this directory +[devfs] pseudo-filesystem needs to be mounted on the [/dev] directory within +the jail, and while this can be done automatically by [jail(8)] using the proper +[mount.devfs] option, care must be taken to correctly unmount this directory after the command run within the jail has exited. In order to be sure there will -be no leftover `devfs` mounts, which would prevent removal of the jail -filesystem at cleanup time, an `umount` command is run unconditionally by -OBuilder after the `jail` command exits. +be no leftover [devfs] mounts, which would prevent removal of the jail +filesystem at cleanup time, an [umount] command is run unconditionally by +OBuilder after the [jail] command exits. -Lastly, since most, if not all, OBuilder commands will expect a proper `opam` +Lastly, since most, if not all, OBuilder commands will expect a proper [opam] environment configuration, it is necessary to run the commands within a login shell, and such a shell can only be run as root. Therefore the command which will run within the jail is: -``` +{[ /usr/bin/su -l obuilder_user_name -c "cd obuilder_directory && obuilder_command" -``` +]} -# Solving the chicken-and-egg problem +{1 Solving the chicken-and-egg problem} With the fetcher and the sandbox modules written, a complete OBuilder run can be attempted. But in order to do this, two more pieces are needed: -- a base FreeBSD image, to be used by OBuilder runs. -- a native FreeBSD build of OBuilder. +{ul +{- a base FreeBSD image, to be used by OBuilder runs.} +{- a native FreeBSD build of OBuilder.} +} -The base FreeBSD image will require a proper `opam switch` to be created. +The base FreeBSD image will require a proper [opam switch] to be created. The commands used to create it can also be turned into an OBuilder command file, in order to be able to quickly build any particular switch version. -### FreeBSD setup +{2 FreeBSD setup} The process of setting up a FreeBSD system, combined with the use of zfs snapshots, allows various stages of the FreeBSD setup to be archived and @@ -127,158 +133,169 @@ used as starting point for OBuilder operations. We can take snapshots at the following points: -- after an initial simple FreeBSD installation, with a few external packages - required by OBuilder itself added, and an `opam` user created. This will be - known henceforth as "stage0". -- after `opam` has been installed and an initial `opam switch` created. This - setup can be directly used as a starting point for OBuilder operations, and - will be known henceforth as "stage1". +{ul +{- after an initial simple FreeBSD installation, with a few external packages + required by OBuilder itself added, and an [opam] user created. This will be + known henceforth as "stage0".} +{- after [opam] has been installed and an initial [opam switch] created. This + setup can be directly used as a starting point for OBuilder operations, and + will be known henceforth as "stage1".} +} -### Optional: setting up a FreeBSD Virtual Machine +{2 Optional: setting up a FreeBSD Virtual Machine} -If no physical machine is available to install FreeBSD on, one may use `qemu` +If no physical machine is available to install FreeBSD on, one may use [qemu] to run a virtual machine instead. Simply download the installation dvd, and setup a disk image: -``` +{[ qemu-img create -f qcow2 da0.qcow2 10G -``` +]} Then launch a virtual machine with a few CPUs and a few gigabytes of memory: -``` +{[ qemu-system-x86_64 \ -smp 4 \ -m 4G \ -drive file=da0.qcow2,format=qcow2 \ -drive file=FreeBSD-13.2-RELEASE-amd64-dvd1.iso,media=cdrom -``` +]} Since the disk image has been freshly created, this will boot into the installation dvd. -### Stage 0 +{2 Stage 0} FreeBSD installation is straightforward. There is nothing special to choose within the installer, except for the use of ZFS as the default filesystem -(which is the default choice nowadays) and, of course, proper network -configuration (although the defaults of DHCP for IPv4 and SLAAC for IPv6 ought -to work in most networks). +\(which is the default choice nowadays\) and, of course, proper network +configuration \(although the defaults of DHCP for IPv4 and SLAAC for IPv6 ought +to work in most networks\). The default ZFS setup creates one single pool for the disk, and separate filesystems in it. OBuilder will however require its own work pool, so the -default ZFS settings ("guided root on zfs" disk layout) in the installer can't +default ZFS settings \("guided root on zfs" disk layout\) in the installer can't be used. It will be easier to build base images anyway if there is no separate -zfs pool for `/usr/home`. +zfs pool for [/usr/home]. In order to save space, one might want to disable all optional system components when asked which ones to install. After the installation completes and the system reboots, one may log in as root. -A quick check of `/etc/rc.conf` should confirm that: +A quick check of [/etc/rc.conf] should confirm that: -- there is a `zfs_enable="YES"` line. -- network configuration is similar to - ``` +{ul +{- there is a [zfs_enable="YES"] line.} +{- network configuration is similar to + {[ ifconfig_DEFAULT="DHCP inet6 accept_rtadv" - ``` - or - ``` + +]} + + or + {[ ifconfig_em0="DHCP" ifconfig_em0_ipv6="inet6 accept_rtadv" - ``` + +]} +} +} -At this point, it is possible to add an `opam` user with `adduser`: +At this point, it is possible to add an [opam] user with [adduser]: -``` +{[ echo 'opam:1000:::::::/bin/sh:' | adduser -q -w random -f - -``` -(that's seven colons between the numerical uid and the shell.) +]} +\(that's seven colons between the numerical uid and the shell.\) -Note that the assigned random password of the `opam` user won't be displayed, +Note that the assigned random password of the [opam] user won't be displayed, but this does not really matter since all uses of this account will be performed -through `/usr/bin/su -l opam`. +through [/usr/bin/su -l opam]. A few packages need to be installed at this point: -``` +{[ pkg install -y bash curl git gmake patch rsync sudo zstd -``` +]} -Note that `zstd` is an optional dependency when building an OCaml 4 switch, +Note that [zstd] is an optional dependency when building an OCaml 4 switch, but a required dependency when building an OCaml 5 switch. -Now that `sudo` has been installed, it should be configured to let the `opam` -user be able to use `sudo` for any command without entering any password, as +Now that [sudo] has been installed, it should be configured to let the [opam] +user be able to use [sudo] for any command without entering any password, as OBuilder depends on this: -``` +{[ echo "opam ALL=(ALL:ALL) NOPASSWD: ALL" > /usr/local/etc/sudoers.d/opam chmod 440 /usr/local/etc/sudoers.d/opam -``` +]} It is then time to take a snapshot of the system. Assuming the name of the -zfs pool for the root directory is `zroot`, run: +zfs pool for the root directory is [zroot], run: -``` +{[ zfs snapshot zroot@stage0 -``` +]} The first snapshot can be cloned in order to build a filesystem archive of -`stage0`, suitable to be used by OBuilder. +[stage0], suitable to be used by OBuilder. -``` +{[ zfs clone zroot@stage0 zroot/clone -``` +]} However, some of the files have been made immutable during the installation -(for security reasons), and would cause errors while attempting to clean up, +\(for security reasons\), and would cause errors while attempting to clean up, so it is preferrable to remove these flags: -``` +{[ chflags -R 0 /zroot/clone -``` +]} Then, the image can be made significantly smaller by removing several file sets: -- rescue binaries and kernel modules (won't be used by OBuilder) -- profiling libraries -- manual pages -``` +{ul +{- rescue binaries and kernel modules \(won't be used by OBuilder\)} +{- profiling libraries} +{- manual pages} +} + +{[ cd /zroot/clone rm -fR rescue boot usr/share/games rm usr/bin/fortune usr/lib/lib*_p.a find usr/share/man -type f -delete cd / -``` +]} -The last step consists of setting the usual permissions on the `/tmp` directory, +The last step consists of setting the usual permissions on the [/tmp] directory, since there will be no specific filesystem mounted there. -``` +{[ chmod 1777 /zroot/clone/tmp -``` +]} -It is now possible to create the `stage0` archive and destroy the snapshot +It is now possible to create the [stage0] archive and destroy the snapshot clone: -``` +{[ mkdir /archive tar -C /zroot/clone -czf /archive/stage0.tar.gz . zfs destroy zroot/clone -``` +]} -The warnings regarding the inability to archive sockets in `/var` can be safely +The warnings regarding the inability to archive sockets in [/var] can be safely ignored. -### Stage 1 +{2 Stage 1} -Once an archive of `stage0` is available, it is possible to install `opam` and +Once an archive of [stage0] is available, it is possible to install [opam] and build an initial switch with the following OBuilder command file: -``` +{[ ((build dev ((from file:///path/to/stage0.tar.gz) (workdir /home/opam) @@ -307,18 +324,18 @@ build an initial switch with the following OBuilder command file: ; nothing to do after build, but a valid from stanza is required (from file:///path/to/stage0.tar.gz) ) -``` +]} The first time, of course, OBuilder is not available yet, so these commands need to be run manually: -``` +{[ /usr/bin/su -l opam -``` +]} Then: -``` +{[ fetch -q https://github.com/ocaml/opam/releases/download/2.1.4/opam-full-2.1.4.tar.gz && tar xzf opam-full-2.1.4.tar.gz && cd opam-full-2.1.4 && @@ -331,13 +348,13 @@ opam init -y -a --bare . ~/.opam/opam-init/init.sh opam switch create 4.14.1 exit -``` +]} Once this is done, stage1 is complete; a zfs snapshot can be created, then a new archive can be built; one may consider removing sources in order to make the archive smaller. -``` +{[ rm /usr/home/opam/opam-full-2.1.4.tar.gz rm -fR /usr/home/opam/opam-full-2.1.4 rm -fR /usr/home/opam/.opam/*/.opam-switch/sources @@ -350,14 +367,14 @@ tar -C /usr/home -cf - opam | tar -C ./usr/home -xpf - tar czf /archive/stage1.tar.gz . cd /archive rm -fR stage1 -``` +]} -### Building OBuilder under FreeBSD +{2 Building OBuilder under FreeBSD} -This step is quite straightforward (and will be even simpler once the FreeBSD -support bits are merged into the main repository): +This step is quite straightforward \(and will be even simpler once the FreeBSD +support bits are merged into the main repository\): -``` +{[ pkg install -y pkgconf sqlite3 /usr/bin/su -l opam git clone https://github.com/dustanddreams/obuilder.git @@ -368,12 +385,12 @@ opam install -y dune opam install -y --deps-only -t obuilder opam install -y crunch extunix fpath # missed by the above step dune build && dune install -``` +]} -or, using an existing OBuilder setup, adapted from `obuilder.spec` found in +or, using an existing OBuilder setup, adapted from [obuilder.spec] found in the OBuilder source repository: -``` +{[ ((build dev ((from file:///path/to/stage1.tar.gz) (workdir /src) @@ -405,33 +422,36 @@ the OBuilder source repository: (src /src/_build/default/main.exe) (dst /usr/local/bin/obuilder)) (run (shell "obuilder --help"))) -``` +]} -## Integrating with OCluster +{1 Integrating with OCluster} OCluster is a larger framework which processes build requests on a cluster of systems, each running OBuilder. In order to make the FreeBSD systems compatible with OCluster needs, a few more adjustments are necessary: -- a `docker` client needs to be installed on the OBuilder machine, even if it - will not be used, as part of OCluster checks. Fortunately the `docker` client - is available as a FreeBSD package, `pkg install -y docker` will do. -- an `obuilder` zfs pool needs to be created (on a separate disk or a separate - partition). +{ul +{- a [docker] client needs to be installed on the OBuilder machine, even if it + will not be used, as part of OCluster checks. Fortunately the [docker] client + is available as a FreeBSD package, [pkg install -y docker] will do.} +{- an [obuilder] zfs pool needs to be created \(on a separate disk or a separate + partition\).} +} -In addition to this, the `stage0` and `stage1` snapshots can be used to set up -an initial image cache on each OBuilder worker machine, using `zfs send` on the -source machine and `zfs recv` on the build machine, to make the `stage0` and -`stage1` snapshots available as `/obuilder/base-image/busybox` and -`/obuilder/base-image/ocaml-4.14.1` respectively. +In addition to this, the [stage0] and [stage1] snapshots can be used to set up +an initial image cache on each OBuilder worker machine, using [zfs send] on the +source machine and [zfs recv] on the build machine, to make the [stage0] and +[stage1] snapshots available as [/obuilder/base-image/busybox] and +[/obuilder/base-image/ocaml-4.14.1] respectively. Since the cache will take precedence over the fetcher action, this will allow OBuilder spec files to keep referring to the names from the docker registry. - -## Conclusion +{1 Conclusion} The modular design of OBuilder has allowed for it to be easily adapted to run under FreeBSD. A few FreeBSD systems are currently being set up as OBuilder workers within the OCluster orchestrator used by Tarides for automated OCaml package testing, and will hopefully benefit the OCaml FreeBSD community. + + diff --git a/doc/macOS.mld b/doc/macOS.mld new file mode 100644 index 00000000..1a81e2b7 --- /dev/null +++ b/doc/macOS.mld @@ -0,0 +1,59 @@ +{0 Experimental macOS Support} + +The macOS backend uses the "user" as the unit of abstraction for sandboxing. That is, for each build a new user is created. +This user inherits a home-directory from the store which may come from previous builds using the storage backend. + +A macOS base image is really just a home directory and only requires one file to work, an [.obuilder_profile.sh]. +This is sourced every time a command is run and can be useful for setting paths for a given type of build. + +For [spec]s that only need local, per-user access this is great but for quite a few builds we also need external system dependencies. +On macOS a vast majority of users do this using homebrew. Homebrew installs system dependencies into [/usr/local] using +pre-built binaries \(a.k.a bottles\). It can be placed elsewhere but will often then build from source. + +For OBuilder this means our per-user builds will break if they are all fighting over the global homebrew, so instead OBuilder does the following: + +{ul +{- On macOS we require a scoreboard directory in which we record a symlink that associates a users identifier \([uid]\) to the same user's current home directory.} +{- Another tool, {{: https://github.com/patricoferris/obuilder-fs} obuilderfs}, provides a [FUSE][] filesystem that rewrites access to a given + directory \(here [/usr/local]\) to where the symlink points to in the scoreboard directory.} +{- A set of {{: https://github.com/patricoferris/macos-infra/tree/main/scripts} scripts} allows us to initialise homebrew in a base image and use + this in the [(from )] stage of our builds.} +} + +The goal of the experimental macOS backend was to see how far we get without introducing any virtualisation. It is not intended to be used like the +runc-Linux backend because it requires a lot more manual setup and care to be taken. + +{1 Running the macOS backend with ZFS} + +In order to run the macOS backend to build a very simple [spec] \(one that doesn't require the FUSE filesystem\) you will need to: + +{ul +{- Install [openZFSonOSX][] and it should be [2.0] or later \(this is when automatic snapshot mounting was added\).} +{- Create the "base image" as a directory in [/Users] i.e [sudo mkdir /Users/empty] and add an empty [.obuilder_profile.sh] to that directory. Note this is if you are using the [User_temp] fetcher module as opposed to the [Docker] one.} +{- To get a ZFS pool quickly without partitioning your disk you can run [mkfile 128m ] and then [sudo zpool create tank ].} +{- Create a dummy [obuilderfs] binary that is in your [PATH], this can just be an empty shell-script.} +{- From the root of this project run: [sudo dune exec -- obuilder macos . --store=zfs:/Volumes/tank -f example.macos.spec --uid=705 --fallback=/tmp --scoreboard=/tmp]. + Because we are not running the FUSE filesystem the [fallback] and [scoreboard] directories should be somewhere you don't mind being written to but they won't + actually be used.} +} + +{1 Running the macOS backend with rsync} + +This requires much less setup at the cost of it being very slow in comparison to snap-shotting filesystems. All you need to do is create a directory somewhere for the "snapshots" to be written and pass this as [--rsync:/path/to/dir]. Of course, you will need [rsync] installed. + +{1 Docker images for macOS} + +As mentioned elsewhere, OBuilder also abstracts the initial [FETCHER] whose job it is, is to interpret the [(from )] stage and setup the base image in a given directory. When run on Linux this uses docker and exports the image and untars the filesystem into the directory. The same can be done for macOS! You can create a directory locally and then use the following dockerfile + +{[ +FROM scratch +COPY / +CMD [ "/bin/bash" ] +]} + +Note the [CMD] is important for the internal [docker create] call to not fail. + + +FUSE: {{: https://osxfuse.github.io/} https://osxfuse.github.io/} +openZFSonOSX: {{: https://openzfsonosx.org/wiki/Downloads#2.1.0} https://openzfsonosx.org/wiki/Downloads#2.1.0} + diff --git a/doc/windows.mld b/doc/windows.mld new file mode 100644 index 00000000..c0f93996 --- /dev/null +++ b/doc/windows.mld @@ -0,0 +1,247 @@ +{0 OBuilder's Docker backend} + +> OBuilder takes a build script \(similar to a Dockerfile\) and performs +> the steps in it in a sandboxed environment. +> +> After each step, OBuilder uses the snapshot feature of the +> filesystem to store the state of the build. […] Repeating a build +> will reuse the cached results where possible. +> +> +> BSD tar: {{: https://ss64.com/nt/tar.html} https://ss64.com/nt/tar.html} +> Esperanto: {{: https://github.com/dinosaure/esperanto} https://github.com/dinosaure/esperanto} +> VSS: {{: https://learn.microsoft.com/en-us/windows-server/storage/file-server/volume-shadow-copy-service} https://learn.microsoft.com/en-us/windows-server/storage/file-server/volume-shadow-copy-service} +> WinBtrfs: {{: https://github.com/maharmstone/btrfs} https://github.com/maharmstone/btrfs} +> ^1: {{: maybe} maybe} +> docker cp: {{: https://docs.docker.com/engine/reference/commandline/cp/} https://docs.docker.com/engine/reference/commandline/cp/} +> mounting volumes: {{: https://docs.docker.com/storage/volumes/} https://docs.docker.com/storage/volumes/} +> runc: {{: https://github.com/opencontainers/runc} https://github.com/opencontainers/runc} +> windowscontainers: {{: https://learn.microsoft.com/en-us/virtualization/windowscontainers/about/} https://learn.microsoft.com/en-us/virtualization/windowscontainers/about/} + +{1 Motivation} + +Windows offers [native containers][windowscontainers] for sandboxing. +Finding a snapshotting filesystem might be more involved, there's +[Volume Shadow Copy Service (VSS)][VSS] and [WinBtrfs][]. There's +however no direct API for these services, they're not stable yet and +have few users, and composing them seems a temerary endeavour. + +The choice was made to use Docker as a sandboxing and storage solution +for OBuilder. Docker has considerably more users, and hides some of the +complexity of interfacing with the operating system. + +Docker being a portable system \(with some caveats\), the OBuilder +Docker backend can itself be run in theory over any system Docker +supports. Using native components, wherever available, should be +preferred. On Windows, Docker can run sunboxed applications using +either containerization or virtual machines \(VM\) with Hyper-V. On +macOS Docker currently works using virtual machines. +The virtualization layer makes it more costly to run code, compared to +containerization under Linux. Virtual machines provide more effective +isolation and stability under Windows, prompting OBuilder to default +to VMs. + +{1 Comparing Docker and native backends} + +The distinction between the sandboxing engine and the storage layer in +OBuilder doesn't exactly map to the Docker backend, as both sandbox +and store are provided by Docker and can't be swapped out for another +implementation. As such, OBuilder will now differentiate between a +{e native} sandboxing solution, such as [runc][] under Linux, coupled +with a storage engine, and the Docker backend, providing all-in-one +sandbox and storage. + +The underlying [Store] and [Sandbox] modules can't be as decoupled +either with the Docker backend, they need to share more information. +This distinction is however useful enough for modularity that it is +retained. + +The main difference resides in the fact that with the usual native +sandbox and storage solution, OBuilder is totally in charge: it +creates its own build identifiers, manages the filesystem, and spawns +containers. With the Docker backend however, Docker's the second +player of the game. Docker has its own view of the global state, +assigns its own identifiers to images and containers. Extra care must +be taken to ensure that OBuilder and Docker have a consistent view of +objects they're tracking. + +Objects residing in the file system are "namespaced" using OBuilder's +state directory; with the Docker backend a small unique identifer for +each running OBuilder process is computed based on the instance +working directory, and makes up a prefix for all Docker objects +managed by this instance. This allows to track objects more easily, +and a clean table sweep of any left-overs. + +Another notable difference is that with runc and traditional +filesystems, OBuilder can use tools from the host filesystem, for +instance to copy or compress files, as well as tools from inside the +guest filesystem, chosing or not to run them in the sandbox. With the +Docker backend, guest data isn't accessible from the host, thus tools +must be present in the guest image, or mounted in volumes in running +containers to operate on data in guest images. + +{1 OBuilder operation} + +Using volumes is oftentimes problematic as standard users of the host +don't have read/write permissions on them by default, which involves +some system administration to set up. It's difficult to retain the +settings, and difficult to port, which is why OBuilder's Docker +backend tries to refrain from using Docker volumes as much as +possible, or only interact with them whilst mounted in containers. + +{1 Copying files in & out of guests} + +There's two mode of operation for copying files in OBuilder: from the +context \(the host filesystem\), or from a previous build stage. As +Docker images are not directly writable from the host filesystem, this +involves communicating the data to a running container. The data could +either be given through a mounted volume, with [`docker cp`][docker +cp], or with a container executing [tar]. Volumes management is hard, +[docker cp] fails on some files in Windows. For stability, we prefer +using a container and tar files \(preserving permissions, file +attributes, and allowing easy rewrite of paths\). In some cases, it is +necessary to backup the permissions of the destination directory to +restore them after the tarball's extraction. + +Creating a tar file in OBuilder involves creating a {e manifest}. It's +a tree data structure describing the file hierachy, with node types +\(file, directory, symlink\), names, and checksum for file content. The +manifest is generated in a fully-reproducible way, so that its +checksum can uniquely identify the data being copyied, in order to +cache the copy step. + +Copying files from a previous build step is a bit more involved, as +once again the host doesn't have direct read access to the content of +Docker images. A solution could have been to run the manifest creation +code of OBuilder itself in a container, by mounting a volume +containing an simple OCaml executable with this code. It would sadly +be difficult to accomplish[^1] in the general case, as the OCaml +executable would need to correspond to the Docker image \(arch, glibc\). +The choice was made instead of porting the manifest creation code, +originally in OCaml, to bash. It produces the same output and errors. +It is assumed that Linux distributions ship a bash interpretor, and +tar. For Windows, OBuilder starts by creating a volume, nicknamed +{e obuilder-libexec}, in which it copies the shell script, and necessary +tools from Cygwin to execute it \(the shell executable, some coreutils, +tar\). OBuilder can then run a container based on the source image, +with the {e libexec} volume mounted read-only, to create and output the +manifest. After the manifest is created, OBuilder calls [tar] in the +same fashion to extract data from the previous image, rewrites the tar +headers with the correct destination on-the-fly, and pipes the result +to the destination container, running tar in extraction mode, reading +from stdin. + +Windows 10 ships [BSD tar][], but it doesn't understand symlinks. + +not so much with [Esperanto][]? + +{1 OBuilder's snapshots and caches} + +When OBuilder executes a build step {e B} for the first time with a +snapshotting filesystem, it'll first look up or fetch the base image +{e A} of {e B}. OBuilder then creates a snapshot {e B'} of {e A}, and execute +the build using {e B'}. If the build step succeeds, {e B'} is promoted as +{e B}; if not, {e B'} is discarded. + +Using the Docker backend, this resolves to checking whether a Docker +image {e B} associated with an OBuilder build exists. If not, tag {e A} as +{e tmp-B}, and run the build {e B} in a Docker container with the tag +{e tmp-B}. If it succeeds, {e tmp-B} can be commited as the Docker image +{e B}, then {e tmp-B} is condemned to {e damnatio memoriae}. Special care +must be taken as committing the container replaces the {e entrypoint} +and {e cmd} fields of the Docker image by the commands given to run the +container. This is usually not intended, so these fields are retrieved +and restored from the base image. + +Below is a sample build script and OBuilder logs, run on Windows. + +{[ +((from mcr.microsoft.com/windows/servercore:ltsc2022) ; A + (run (shell "echo hello > world")) ; B + (run (shell "type world"))) ; C +]} + +{[ +$ obuilder build -f simple.spec --store=docker:./var --docker-cpus=8 --docker-memory=4g -v . +]} + +{[ +obuilder.exe: [INFO] Exec "docker" "container" "ls" "--all" "--filter" "name=^obuilder-3b98949" "-q" +obuilder.exe: [INFO] Removing left-over Docker images +obuilder.exe: [INFO] Exec "docker" "images" "--format={{ .Repository }}" "obuilder-3b98949-image-tmp-*" +obuilder.exe: [INFO] Removing left-over Docker volumes +obuilder.exe: [INFO] Exec "docker" "volume" "ls" "--quiet" "--filter" "name=^obuilder-3b98949-cache-tmp-" +obuilder.exe: [INFO] Exec "docker" "volume" "inspect" "--" "obuilder-3b98949-libexec" +(from mcr.microsoft.com/windows/servercore:ltsc2022) +obuilder.exe: [INFO] Exec "docker" "inspect" "--type=image" "--" "obuilder-3b98949-image-bc3bc8408e84c12c2b5f24aa91b444894b55e26069b66e8034890634b08aef1d" +Error: No such image: obuilder-3b98949-image-bc3bc8408e84c12c2b5f24aa91b444894b55e26069b66e8034890634b08aef1d +obuilder.exe: [INFO] Base image not present; importing "mcr.microsoft.com/windows/servercore:ltsc2022"… +obuilder.exe: [INFO] Exec "docker" "pull" "mcr.microsoft.com/windows/servercore:ltsc2022" +ltsc2022: Pulling from windows/servercore +Digest: sha256:3949614905ddf2c4451b18894563c36f0c0aa93ab0e17ea6f8ca3791313e4e4f +Status: Image is up to date for mcr.microsoft.com/windows/servercore:ltsc2022 +mcr.microsoft.com/windows/servercore:ltsc2022 +obuilder.exe: [INFO] Exec "docker" "tag" "mcr.microsoft.com/windows/servercore:ltsc2022" "obuilder-3b98949-image-bc3bc8408e84c12c2b5f24aa91b444894b55e26069b66e8034890634b08aef1d" +---> saved as "bc3bc8408e84c12c2b5f24aa91b444894b55e26069b66e8034890634b08aef1d" +C:/: (run (shell "echo hello > world")) +obuilder.exe: [INFO] Exec "docker" "inspect" "--type=image" "--" "obuilder-3b98949-image-ac4488b2ca69de829c9a8bbcd9efa2ddff493a3b5888a53ec20a1343ea34b2bd" +Error: No such image: obuilder-3b98949-image-ac4488b2ca69de829c9a8bbcd9efa2ddff493a3b5888a53ec20a1343ea34b2bd +obuilder.exe: [INFO] Exec "docker" "tag" "obuilder-3b98949-image-bc3bc8408e84c12c2b5f24aa91b444894b55e26069b66e8034890634b08aef1d" "obuilder-3b98949-image-tmp-ac4488b2ca69de829c9a8bbcd9efa2ddff493a3b5888a53ec20a1343ea34b2bd" +obuilder.exe: [INFO] Exec "docker" "inspect" "--type=container" "--" "obuilder-3b98949-container-ac4488b2ca69de829c9a8bbcd9efa2ddff493a3b5888a53ec20a1343ea34b2bd" +Error: No such container: obuilder-3b98949-container-ac4488b2ca69de829c9a8bbcd9efa2ddff493a3b5888a53ec20a1343ea34b2bd +obuilder.exe: [INFO] Exec "docker" "run" "-i" "--name" "obuilder-3b98949-container-ac4488b2ca69de829c9a8bbcd9efa2ddff493a3b5888a53ec20a1343ea34b2bd" "--cpus" "8.000000" "--isolation" "hyperv" "--hostname" "builder" "--workdir" "C:/" "--entrypoint" "cmd" "--memory" "4g" "--user" "ContainerAdministrator" "obuilder-3b98949-image-tmp-ac4488b2ca69de829c9a8bbcd9efa2ddff493a3b5888a53ec20a1343ea34b2bd" "/S" "/C" "echo hello > world" +obuilder.exe: [INFO] Exec "docker" "inspect" "--type=image" "--format={{json .Config.Entrypoint }}" "--" "obuilder-3b98949-image-tmp-ac4488b2ca69de829c9a8bbcd9efa2ddff493a3b5888a53ec20a1343ea34b2bd" +obuilder.exe: [INFO] Exec "docker" "inspect" "--type=image" "--format={{json .Config.Cmd }}" "--" "obuilder-3b98949-image-tmp-ac4488b2ca69de829c9a8bbcd9efa2ddff493a3b5888a53ec20a1343ea34b2bd" +obuilder.exe: [INFO] Exec "docker" "commit" "--change=CMD ["c:\\windows\\system32\\cmd.exe"]" "--" "obuilder-3b98949-container-ac4488b2ca69de829c9a8bbcd9efa2ddff493a3b5888a53ec20a1343ea34b2bd" "obuilder-3b98949-image-ac4488b2ca69de829c9a8bbcd9efa2ddff493a3b5888a53ec20a1343ea34b2bd" +obuilder.exe: [INFO] Exec "docker" "rm" "--force" "--" "obuilder-3b98949-container-ac4488b2ca69de829c9a8bbcd9efa2ddff493a3b5888a53ec20a1343ea34b2bd" +sha256:31d1fcc968e21a34fca97a73b56500b0e0208df9c8be60f5eed8369f107878ab +obuilder.exe: [INFO] Exec "docker" "image" "rm" "obuilder-3b98949-image-tmp-ac4488b2ca69de829c9a8bbcd9efa2ddff493a3b5888a53ec20a1343ea34b2bd" +obuilder-3b98949-container-ac4488b2ca69de829c9a8bbcd9efa2ddff493a3b5888a53ec20a1343ea34b2bd +Untagged: obuilder-3b98949-image-tmp-ac4488b2ca69de829c9a8bbcd9efa2ddff493a3b5888a53ec20a1343ea34b2bd:latest +---> saved as "ac4488b2ca69de829c9a8bbcd9efa2ddff493a3b5888a53ec20a1343ea34b2bd" +C:/: (run (shell "type world")) +obuilder.exe: [INFO] Exec "docker" "inspect" "--type=image" "--" "obuilder-3b98949-image-7332a0565a4047bdd2c0b778bf3a9175518218879547eb8ddde6832d57861153" +Error: No such image: obuilder-3b98949-image-7332a0565a4047bdd2c0b778bf3a9175518218879547eb8ddde6832d57861153 +obuilder.exe: [INFO] Exec "docker" "tag" "obuilder-3b98949-image-ac4488b2ca69de829c9a8bbcd9efa2ddff493a3b5888a53ec20a1343ea34b2bd" "obuilder-3b98949-image-tmp-7332a0565a4047bdd2c0b778bf3a9175518218879547eb8ddde6832d57861153" +obuilder.exe: [INFO] Exec "docker" "inspect" "--type=container" "--" "obuilder-3b98949-container-7332a0565a4047bdd2c0b778bf3a9175518218879547eb8ddde6832d57861153" +Error: No such container: obuilder-3b98949-container-7332a0565a4047bdd2c0b778bf3a9175518218879547eb8ddde6832d57861153 +obuilder.exe: [INFO] Exec "docker" "run" "-i" "--name" "obuilder-3b98949-container-7332a0565a4047bdd2c0b778bf3a9175518218879547eb8ddde6832d57861153" "--cpus" "8.000000" "--isolation" "hyperv" "--hostname" "builder" "--workdir" "C:/" "--entrypoint" "cmd" "--memory" "4g" "--user" "ContainerAdministrator" "obuilder-3b98949-image-tmp-7332a0565a4047bdd2c0b778bf3a9175518218879547eb8ddde6832d57861153" "/S" "/C" "type world" +hello +obuilder.exe: [INFO] Exec "docker" "inspect" "--type=image" "--format={{json .Config.Entrypoint }}" "--" "obuilder-3b98949-image-tmp-7332a0565a4047bdd2c0b778bf3a9175518218879547eb8ddde6832d57861153" +obuilder.exe: [INFO] Exec "docker" "inspect" "--type=image" "--format={{json .Config.Cmd }}" "--" "obuilder-3b98949-image-tmp-7332a0565a4047bdd2c0b778bf3a9175518218879547eb8ddde6832d57861153" +obuilder.exe: [INFO] Exec "docker" "commit" "--change=CMD ["c:\\windows\\system32\\cmd.exe"]" "--change=ENTRYPOINT ["cmd"]" "--" "obuilder-3b98949-container-7332a0565a4047bdd2c0b778bf3a9175518218879547eb8ddde6832d57861153" "obuilder-3b98949-image-7332a0565a4047bdd2c0b778bf3a9175518218879547eb8ddde6832d57861153" +obuilder.exe: [INFO] Exec "docker" "rm" "--force" "--" "obuilder-3b98949-container-7332a0565a4047bdd2c0b778bf3a9175518218879547eb8ddde6832d57861153" +sha256:fa67558f979026a08c63c56215253bec59da6d7dff67bf083fb580f96fe1a820 +obuilder-3b98949-container-7332a0565a4047bdd2c0b778bf3a9175518218879547eb8ddde6832d57861153 +obuilder.exe: [INFO] Exec "docker" "image" "rm" "obuilder-3b98949-image-tmp-7332a0565a4047bdd2c0b778bf3a9175518218879547eb8ddde6832d57861153" +Untagged: obuilder-3b98949-image-tmp-7332a0565a4047bdd2c0b778bf3a9175518218879547eb8ddde6832d57861153:latest +---> saved as "7332a0565a4047bdd2c0b778bf3a9175518218879547eb8ddde6832d57861153" +Got: "7332a0565a4047bdd2c0b778bf3a9175518218879547eb8ddde6832d57861153" +]} + +There's also the shared build cache which can be used to mount one or +more persistent caches for the command. It is also usually implemented +with the snapshotting filesystem. With Docker, this feature is +implemented by [mounting volumes][] in Docker containers. They have +the major disadvantage that there's no copy-on-write or snapshotting +available for volumes. They first have to be copied, and testing has +proved that copying on the host is unreliable because of permissions, +so the source volume is tar'ed in a container, and the tar is streamed +into a second container extracting it to the destination volume. + +A piece of advice: if you try to implement any feature with Docker on +Windows, make sure it works first in a shell script, if possible. + +{e Mettez Docker à l'ouvrage!} + + +BSD tar: {{: https://ss64.com/nt/tar.html} https://ss64.com/nt/tar.html} +Esperanto: {{: https://github.com/dinosaure/esperanto} https://github.com/dinosaure/esperanto} +VSS: {{: https://learn.microsoft.com/en-us/windows-server/storage/file-server/volume-shadow-copy-service} https://learn.microsoft.com/en-us/windows-server/storage/file-server/volume-shadow-copy-service} +WinBtrfs: {{: https://github.com/maharmstone/btrfs} https://github.com/maharmstone/btrfs} +^1: {{: maybe} maybe} +docker cp: {{: https://docs.docker.com/engine/reference/commandline/cp/} https://docs.docker.com/engine/reference/commandline/cp/} +mounting volumes: {{: https://docs.docker.com/storage/volumes/} https://docs.docker.com/storage/volumes/} +runc: {{: https://github.com/opencontainers/runc} https://github.com/opencontainers/runc} +windowscontainers: {{: https://learn.microsoft.com/en-us/virtualization/windowscontainers/about/} https://learn.microsoft.com/en-us/virtualization/windowscontainers/about/} + diff --git a/dune b/dune index 7d3a7c3c..c94366f5 100644 --- a/dune +++ b/dune @@ -5,17 +5,18 @@ (preprocess (pps ppx_deriving.show)) (libraries lwt lwt.unix fmt fmt.cli fmt.tty tar-unix obuilder cmdliner logs.fmt logs.cli)) -(rule - (targets README.mld macOS.mld windows.mld freebsd.mld) - (deps README.md doc/macOS.md doc/windows.md doc/freebsd.md) - (action - (progn - (with-stdout-to README.mld (run md2mld README.md)) - (with-stdout-to macOS.mld (run md2mld doc/macOS.md)) - (with-stdout-to freebsd.mld (run md2mld doc/freebsd.md)) - (with-stdout-to windows.mld (run md2mld doc/windows.md))))) -(copy_files doc/index.mld) +; (rule +; (targets README.mld macOS.mld windows.mld freebsd.mld) +; (deps README.md doc/macOS.md doc/windows.md doc/freebsd.md) +; (action +; (progn +; (with-stdout-to README.mld (run md2mld README.md)) +; (with-stdout-to macOS.mld (run md2mld doc/macOS.md)) +; (with-stdout-to freebsd.mld (run md2mld doc/freebsd.md)) +; (with-stdout-to windows.mld (run md2mld doc/windows.md))))) + +(copy_files doc/*.mld) (documentation (package obuilder) diff --git a/dune-project b/dune-project index 20678f20..f0bf3753 100644 --- a/dune-project +++ b/dune-project @@ -31,7 +31,7 @@ logs (cmdliner (>= 1.2.0)) (tar-unix (>= 2.4.0)) - (yojson (>= "1.6.0")) + (yojson (>= 1.6.0)) sexplib ppx_deriving ppx_sexp_conv @@ -42,7 +42,6 @@ fpath (extunix (>= 0.4.0)) (ocaml (>= 4.14.1)) - (md2mld (>= 0.7.0)) (alcotest-lwt (and (>= 1.7.0) :with-test)))) (package diff --git a/obuilder.opam b/obuilder.opam index 81c7eccd..d9e94f7d 100644 --- a/obuilder.opam +++ b/obuilder.opam @@ -42,7 +42,6 @@ depends: [ "fpath" "extunix" {>= "0.4.0"} "ocaml" {>= "4.14.1"} - "md2mld" {>= "0.7.0"} "alcotest-lwt" {>= "1.7.0" & with-test} "odoc" {with-doc} ]