diff --git a/cli/clitest/cli.go b/cli/clitest/cli.go index f6ff8cf..49f74a6 100644 --- a/cli/clitest/cli.go +++ b/cli/clitest/cli.go @@ -56,7 +56,7 @@ func New(t *testing.T, cmd string, args ...string) (context.Context, *cobra.Comm ctx = ctx(t, fs, execer, mnt, client) ) - root := cli.Root() + root := cli.Root(nil) // This is the one thing that isn't really mocked for the tests. // I cringe at the thought of introducing yet another mock so // let's avoid it for now. diff --git a/cli/docker.go b/cli/docker.go index 40d433e..928a7c2 100644 --- a/cli/docker.go +++ b/cli/docker.go @@ -7,6 +7,7 @@ import ( "io" "net/url" "os" + "os/exec" "path" "path/filepath" "sort" @@ -145,7 +146,7 @@ type flags struct { ethlink string } -func dockerCmd() *cobra.Command { +func dockerCmd(ch chan func() error) *cobra.Command { var flags flags cmd := &cobra.Command{ @@ -284,7 +285,7 @@ func dockerCmd() *cobra.Command { return xerrors.Errorf("wait for dockerd: %w", err) } - err = runDockerCVM(ctx, log, client, blog, flags) + err = runDockerCVM(ctx, log, client, blog, ch, flags) if err != nil { // It's possible we failed because we ran out of disk while // pulling the image. We should restart the daemon and use @@ -313,7 +314,7 @@ func dockerCmd() *cobra.Command { }() log.Debug(ctx, "reattempting container creation") - err = runDockerCVM(ctx, log, client, blog, flags) + err = runDockerCVM(ctx, log, client, blog, ch, flags) } if err != nil { blog.Errorf("Failed to run envbox: %v", err) @@ -356,7 +357,7 @@ func dockerCmd() *cobra.Command { return cmd } -func runDockerCVM(ctx context.Context, log slog.Logger, client dockerutil.DockerClient, blog buildlog.Logger, flags flags) error { +func runDockerCVM(ctx context.Context, log slog.Logger, client dockerutil.DockerClient, blog buildlog.Logger, shutdownCh chan func() error, flags flags) error { fs := xunix.GetFS(ctx) // Set our OOM score to something really unfavorable to avoid getting killed @@ -676,31 +677,71 @@ func runDockerCVM(ctx context.Context, log slog.Logger, client dockerutil.Docker } blog.Info("Envbox startup complete!") - - // The bootstrap script doesn't return since it execs the agent - // meaning that it can get pretty noisy if we were to log by default. - // In order to allow users to discern issues getting the bootstrap script - // to complete successfully we pipe the output to stdout if - // CODER_DEBUG=true. - debugWriter := io.Discard - if flags.debug { - debugWriter = os.Stdout - } - // Bootstrap the container if a script has been provided. - blog.Infof("Bootstrapping workspace...") - err = dockerutil.BootstrapContainer(ctx, client, dockerutil.BootstrapConfig{ - ContainerID: containerID, - User: imgMeta.UID, - Script: flags.boostrapScript, - // We set this because the default behavior is to download the agent - // to /tmp/coder.XXXX. This causes a race to happen where we finish - // downloading the binary but before we can execute systemd remounts - // /tmp. - Env: []string{fmt.Sprintf("BINARY_DIR=%s", bootDir)}, - StdOutErr: debugWriter, + if flags.boostrapScript == "" { + return nil + } + + bootstrapExec, err := client.ContainerExecCreate(ctx, containerID, dockertypes.ExecConfig{ + User: imgMeta.UID, + Cmd: []string{"/bin/sh", "-s"}, + Env: []string{fmt.Sprintf("BINARY_DIR=%s", bootDir)}, + AttachStdin: true, + AttachStdout: true, + AttachStderr: true, + Detach: true, }) if err != nil { - return xerrors.Errorf("boostrap container: %w", err) + return xerrors.Errorf("create exec: %w", err) + } + + resp, err := client.ContainerExecAttach(ctx, bootstrapExec.ID, dockertypes.ExecStartCheck{}) + if err != nil { + return xerrors.Errorf("attach exec: %w", err) + } + + _, err = io.Copy(resp.Conn, strings.NewReader(flags.boostrapScript)) + if err != nil { + return xerrors.Errorf("copy stdin: %w", err) + } + err = resp.CloseWrite() + if err != nil { + return xerrors.Errorf("close write: %w", err) + } + + go func() { + defer resp.Close() + rd := io.LimitReader(resp.Reader, 1<<10) + _, err := io.Copy(blog, rd) + if err != nil { + log.Error(ctx, "copy bootstrap output", slog.Error(err)) + } + }() + + // We can't just call ExecInspect because there's a race where the cmd + // hasn't been assigned a PID yet. + bootstrapPID, err := dockerutil.GetExecPID(ctx, client, bootstrapExec.ID) + if err != nil { + return xerrors.Errorf("exec inspect: %w", err) + } + + shutdownCh <- func() error { + log.Debug(ctx, "killing container", slog.F("bootstrap_pid", bootstrapPID)) + + // The PID returned is the PID _outside_ the container... + //nolint:gosec + out, err := exec.Command("kill", "-TERM", strconv.Itoa(bootstrapPID)).CombinedOutput() + if err != nil { + return xerrors.Errorf("kill bootstrap process (%s): %w", out, err) + } + + log.Debug(ctx, "sent kill signal waiting for process to exit") + err = dockerutil.WaitForExit(ctx, client, bootstrapExec.ID) + if err != nil { + return xerrors.Errorf("wait for exit: %w", err) + } + + log.Debug(ctx, "bootstrap process successfully exited") + return nil } return nil diff --git a/cli/root.go b/cli/root.go index 0656520..8a03644 100644 --- a/cli/root.go +++ b/cli/root.go @@ -4,7 +4,7 @@ import ( "github.com/spf13/cobra" ) -func Root() *cobra.Command { +func Root(ch chan func() error) *cobra.Command { cmd := &cobra.Command{ Use: "envbox", SilenceErrors: true, @@ -15,6 +15,6 @@ func Root() *cobra.Command { }, } - cmd.AddCommand(dockerCmd()) + cmd.AddCommand(dockerCmd(ch)) return cmd } diff --git a/cmd/envbox/main.go b/cmd/envbox/main.go index 2dfdb94..813e62b 100644 --- a/cmd/envbox/main.go +++ b/cmd/envbox/main.go @@ -1,20 +1,44 @@ package main import ( - "fmt" + "context" "os" + "os/signal" "runtime" + "syscall" + "cdr.dev/slog" + "cdr.dev/slog/sloggers/slogjson" "github.com/coder/envbox/cli" ) func main() { - _, err := cli.Root().ExecuteC() + ch := make(chan func() error, 1) + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGTERM, syscall.SIGINT, syscall.SIGWINCH) + go func() { + log := slog.Make(slogjson.Sink(os.Stderr)) + ctx := context.Background() + log.Info(ctx, "waiting for signal") + <-sigs + log.Info(ctx, "got signal") + select { + case fn := <-ch: + log.Info(ctx, "running shutdown function") + err := fn() + if err != nil { + log.Error(ctx, "shutdown function failed", slog.Error(err)) + os.Exit(1) + } + default: + log.Info(ctx, "no shutdown function") + } + log.Info(ctx, "exiting") + os.Exit(0) + }() + _, err := cli.Root(ch).ExecuteC() if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } - - // We exit the main thread while keepin all the other procs goin strong. runtime.Goexit() } diff --git a/dockerutil/container.go b/dockerutil/container.go index a10a371..1d27d4e 100644 --- a/dockerutil/container.go +++ b/dockerutil/container.go @@ -113,7 +113,7 @@ func BootstrapContainer(ctx context.Context, client DockerClient, conf Bootstrap var err error for r, n := retry.New(time.Second, time.Second*2), 0; r.Wait(ctx) && n < 10; n++ { - var out []byte + var out io.Reader out, err = ExecContainer(ctx, client, ExecConfig{ ContainerID: conf.ContainerID, User: conf.User, @@ -122,9 +122,16 @@ func BootstrapContainer(ctx context.Context, client DockerClient, conf Bootstrap Stdin: strings.NewReader(conf.Script), Env: conf.Env, StdOutErr: conf.StdOutErr, + Detach: conf.Detach, }) if err != nil { - err = xerrors.Errorf("boostrap container (%s): %w", out, err) + output, rerr := io.ReadAll(out) + if rerr != nil { + err = xerrors.Errorf("read all: %w", err) + continue + } + + err = xerrors.Errorf("boostrap container (%s): %w", output, err) continue } break diff --git a/dockerutil/dockerfake/client.go b/dockerutil/dockerfake/client.go index f735f1c..3706cd8 100644 --- a/dockerutil/dockerfake/client.go +++ b/dockerutil/dockerfake/client.go @@ -162,7 +162,9 @@ func (m MockClient) ContainerExecCreate(ctx context.Context, name string, config func (m MockClient) ContainerExecInspect(ctx context.Context, id string) (dockertypes.ContainerExecInspect, error) { if m.ContainerExecInspectFn == nil { - return dockertypes.ContainerExecInspect{}, nil + return dockertypes.ContainerExecInspect{ + Pid: 1, + }, nil } return m.ContainerExecInspectFn(ctx, id) diff --git a/dockerutil/exec.go b/dockerutil/exec.go index 6f78822..47423ed 100644 --- a/dockerutil/exec.go +++ b/dockerutil/exec.go @@ -4,11 +4,13 @@ import ( "bytes" "context" "io" + "time" dockertypes "github.com/docker/docker/api/types" "golang.org/x/xerrors" "github.com/coder/envbox/xio" + "github.com/coder/retry" ) type ExecConfig struct { @@ -24,9 +26,9 @@ type ExecConfig struct { // ExecContainer runs a command in a container. It returns the output and any error. // If an error occurs during the execution of the command, the output is appended to the error. -func ExecContainer(ctx context.Context, client DockerClient, config ExecConfig) ([]byte, error) { +func ExecContainer(ctx context.Context, client DockerClient, config ExecConfig) (io.Reader, error) { exec, err := client.ContainerExecCreate(ctx, config.ContainerID, dockertypes.ExecConfig{ - Detach: true, + Detach: config.Detach, Cmd: append([]string{config.Cmd}, config.Args...), User: config.User, AttachStderr: true, @@ -90,5 +92,41 @@ func ExecContainer(ctx context.Context, client DockerClient, config ExecConfig) return nil, xerrors.Errorf("%s: exit code %d", buf.Bytes(), inspect.ExitCode) } - return buf.Bytes(), nil + return &buf, nil +} + +func GetExecPID(ctx context.Context, client DockerClient, execID string) (int, error) { + for r := retry.New(time.Second, time.Second); r.Wait(ctx); { + inspect, err := client.ContainerExecInspect(ctx, execID) + if err != nil { + return 0, xerrors.Errorf("exec inspect: %w", err) + } + + if inspect.Pid == 0 { + continue + } + return inspect.Pid, nil + } + + return 0, ctx.Err() +} + +func WaitForExit(ctx context.Context, client DockerClient, execID string) error { + for r := retry.New(time.Second, time.Second); r.Wait(ctx); { + inspect, err := client.ContainerExecInspect(ctx, execID) + if err != nil { + return xerrors.Errorf("exec inspect: %w", err) + } + + if inspect.Running { + continue + } + + if inspect.ExitCode > 0 { + return xerrors.Errorf("exit code %d", inspect.ExitCode) + } + + return nil + } + return ctx.Err() } diff --git a/dockerutil/image.go b/dockerutil/image.go index fb1cfaf..5d545d2 100644 --- a/dockerutil/image.go +++ b/dockerutil/image.go @@ -1,7 +1,6 @@ package dockerutil import ( - "bytes" "context" "encoding/json" "fmt" @@ -208,7 +207,7 @@ func GetImageMetadata(ctx context.Context, client DockerClient, image, username return ImageMetadata{}, xerrors.Errorf("get /etc/passwd entry for %s: %w", username, err) } - users, err := xunix.ParsePasswd(bytes.NewReader(out)) + users, err := xunix.ParsePasswd(out) if err != nil { return ImageMetadata{}, xerrors.Errorf("parse passwd entry for (%s): %w", out, err) } diff --git a/integration/docker_test.go b/integration/docker_test.go index fc1b7af..d043e76 100644 --- a/integration/docker_test.go +++ b/integration/docker_test.go @@ -272,6 +272,60 @@ func TestDocker(t *testing.T) { require.NoError(t, err) require.Equal(t, expectedHostname, strings.TrimSpace(string(hostname))) }) + + t.Run("HandleSignals", func(t *testing.T) { + t.Parallel() + + pool, err := dockertest.NewPool("") + require.NoError(t, err) + + var ( + tmpdir = integrationtest.TmpDir(t) + binds = integrationtest.DefaultBinds(t, tmpdir) + ) + homeDir := filepath.Join(tmpdir, "home") + err = os.MkdirAll(homeDir, 0o777) + require.NoError(t, err) + + binds = append(binds, bindMount(homeDir, "/home/coder", false)) + + envs := []string{fmt.Sprintf("%s=%s:%s", cli.EnvMounts, "/home/coder", "/home/coder")} + // Run the envbox container. + resource := integrationtest.RunEnvbox(t, pool, &integrationtest.CreateDockerCVMConfig{ + Image: integrationtest.UbuntuImage, + Username: "root", + Envs: envs, + Binds: binds, + BootstrapScript: sigtrapScript, + }) + + _, err = integrationtest.ExecInnerContainer(t, pool, integrationtest.ExecConfig{ + ContainerID: resource.Container.ID, + Cmd: []string{"/bin/sh", "-c", "stat /home/coder/foo"}, + }) + require.Error(t, err) + + // Simulate a shutdown. + integrationtest.StopContainer(t, pool, resource.Container.ID, 30*time.Second) + + err = resource.Close() + require.NoError(t, err) + + t.Logf("envbox %q started successfully, recreating...", resource.Container.ID) + // Run the envbox container. + resource = integrationtest.RunEnvbox(t, pool, &integrationtest.CreateDockerCVMConfig{ + Image: integrationtest.UbuntuImage, + Username: "root", + Envs: envs, + Binds: binds, + BootstrapScript: sigtrapScript, + }) + _, err = integrationtest.ExecInnerContainer(t, pool, integrationtest.ExecConfig{ + ContainerID: resource.Container.ID, + Cmd: []string{"/bin/sh", "-c", "stat /home/coder/foo"}, + }) + require.NoError(t, err) + }) } func requireSliceNoContains(t *testing.T, ss []string, els ...string) { @@ -303,3 +357,17 @@ func bindMount(src, dest string, ro bool) string { } return fmt.Sprintf("%s:%s", src, dest) } + +const sigtrapScript = `#!/bin/bash +cleanup() { + echo "HANDLING A SIGNAL!" && touch /home/coder/foo && echo "touched file" + exit 0 +} + +trap 'cleanup' INT TERM + +while true; do + echo "Working..." + sleep 1 +done +` diff --git a/integration/integrationtest/docker.go b/integration/integrationtest/docker.go index 96ebd4e..088952c 100644 --- a/integration/integrationtest/docker.go +++ b/integration/integrationtest/docker.go @@ -264,6 +264,31 @@ func ExecEnvbox(t *testing.T, pool *dockertest.Pool, conf ExecConfig) ([]byte, e return buf.Bytes(), nil } +func StopContainer(t *testing.T, pool *dockertest.Pool, id string, to time.Duration) { + t.Helper() + + err := pool.Client.KillContainer(docker.KillContainerOptions{ + ID: id, + Signal: docker.SIGTERM, + }) + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), to) + defer cancel() + for r := retry.New(time.Second, time.Second); r.Wait(ctx); { + cnt, err := pool.Client.InspectContainer(id) + require.NoError(t, err) + + if cnt.State.Running { + continue + } + + return + } + + t.Fatalf("timed out waiting for container %s to stop", id) +} + // cmdLineEnvs returns args passed to the /envbox command // but using their env var alias. func cmdLineEnvs(c *CreateDockerCVMConfig) []string {