diff --git a/.copier-answers.yml b/.copier-answers.yml
index 7623a748..8e7734a5 100644
--- a/.copier-answers.yml
+++ b/.copier-answers.yml
@@ -5,9 +5,9 @@ description: Common data reduction tools for the ESS facility
 max_python: '3.13'
 min_python: '3.10'
 namespace_package: ess
-nightly_deps: scippnexus,scipp,sciline,cyclebane,scippneutron
+nightly_deps: scippnexus,scipp,sciline,cyclebane,scippneutron,tof
 orgname: scipp
 prettyname: ESSreduce
 projectname: essreduce
 related_projects: ESSdiffraction,ESSimaging,ESSnmx,ESSpolarization,ESSreflectometry,ESSsans,ESSspectroscopy
-year: 2024
+year: 2025
diff --git a/docs/api-reference/index.md b/docs/api-reference/index.md
index 60a0cb6b..50d92e43 100644
--- a/docs/api-reference/index.md
+++ b/docs/api-reference/index.md
@@ -31,6 +31,7 @@
    logging
    nexus
    streaming
+   time_of_flight
    ui
    uncertainty
    widgets
diff --git a/docs/user-guide/index.md b/docs/user-guide/index.md
index e484fcff..ce5235ee 100644
--- a/docs/user-guide/index.md
+++ b/docs/user-guide/index.md
@@ -6,6 +6,7 @@
 ---
 maxdepth: 2
 ---
+tof/index
 widget
 reduction-workflow-guidelines
 ```
diff --git a/docs/user-guide/tof/dream.ipynb b/docs/user-guide/tof/dream.ipynb
new file mode 100644
index 00000000..6209e957
--- /dev/null
+++ b/docs/user-guide/tof/dream.ipynb
@@ -0,0 +1,787 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "id": "0",
+   "metadata": {},
+   "source": [
+    "# The DREAM chopper cascade\n",
+    "\n",
+    "In this notebook, we simulate the beamline of the DREAM instrument and its pulse-shaping choppers.\n",
+    "We then show how to use `essreduce`'s `time_of_flight` module to compute neutron wavelengths from their arrival times at the detectors.\n",
+    "\n",
+    "The case of DREAM is interesting because the pulse-shaping choppers can be used in a number of different modes,\n",
+    "and the number of cutouts the choppers have typically does not equal the number of frames observed at the detectors."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "1",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import plopp as pp\n",
+    "import scipp as sc\n",
+    "import sciline as sl\n",
+    "from scippneutron.chopper import DiskChopper\n",
+    "from ess.reduce import time_of_flight"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "2",
+   "metadata": {},
+   "source": [
+    "## Setting up the beamline\n",
+    "\n",
+    "### Creating the beamline choppers\n",
+    "\n",
+    "We begin by defining the chopper settings for our beamline.\n",
+    "In principle, the chopper setting could simply be read from a NeXus file.\n",
+    "\n",
+    "The DREAM instrument has\n",
+    "\n",
+    "- 2 pulse-shaping choppers (PSC)\n",
+    "- 1 overlap chopper (OC)\n",
+    "- 1 band-control chopper (BCC)\n",
+    "- 1 T0 chopper"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "3",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "psc1 = DiskChopper(\n",
+    "    frequency=sc.scalar(14.0, unit=\"Hz\"),\n",
+    "    beam_position=sc.scalar(0.0, unit=\"deg\"),\n",
+    "    phase=sc.scalar(286 - 180, unit=\"deg\"),\n",
+    "    axle_position=sc.vector(value=[0, 0, 6.145], unit=\"m\"),\n",
+    "    slit_begin=sc.array(\n",
+    "        dims=[\"cutout\"],\n",
+    "        values=[-1.23, 70.49, 84.765, 113.565, 170.29, 271.635, 286.035, 301.17],\n",
+    "        unit=\"deg\",\n",
+    "    ),\n",
+    "    slit_end=sc.array(\n",
+    "        dims=[\"cutout\"],\n",
+    "        values=[1.23, 73.51, 88.035, 116.835, 175.31, 275.565, 289.965, 303.63],\n",
+    "        unit=\"deg\",\n",
+    "    ),\n",
+    "    slit_height=sc.scalar(10.0, unit=\"cm\"),\n",
+    "    radius=sc.scalar(30.0, unit=\"cm\"),\n",
+    ")\n",
+    "\n",
+    "psc2 = DiskChopper(\n",
+    "    frequency=sc.scalar(-14.0, unit=\"Hz\"),\n",
+    "    beam_position=sc.scalar(0.0, unit=\"deg\"),\n",
+    "    phase=sc.scalar(-236, unit=\"deg\"),\n",
+    "    axle_position=sc.vector(value=[0, 0, 6.155], unit=\"m\"),\n",
+    "    slit_begin=sc.array(\n",
+    "        dims=[\"cutout\"],\n",
+    "        values=[-1.23, 27.0, 55.8, 142.385, 156.765, 214.115, 257.23, 315.49],\n",
+    "        unit=\"deg\",\n",
+    "    ),\n",
+    "    slit_end=sc.array(\n",
+    "        dims=[\"cutout\"],\n",
+    "        values=[1.23, 30.6, 59.4, 145.615, 160.035, 217.885, 261.17, 318.11],\n",
+    "        unit=\"deg\",\n",
+    "    ),\n",
+    "    slit_height=sc.scalar(10.0, unit=\"cm\"),\n",
+    "    radius=sc.scalar(30.0, unit=\"cm\"),\n",
+    ")\n",
+    "\n",
+    "oc = DiskChopper(\n",
+    "    frequency=sc.scalar(14.0, unit=\"Hz\"),\n",
+    "    beam_position=sc.scalar(0.0, unit=\"deg\"),\n",
+    "    phase=sc.scalar(297 - 180 - 90, unit=\"deg\"),\n",
+    "    axle_position=sc.vector(value=[0, 0, 6.174], unit=\"m\"),\n",
+    "    slit_begin=sc.array(dims=[\"cutout\"], values=[-27.6 * 0.5], unit=\"deg\"),\n",
+    "    slit_end=sc.array(dims=[\"cutout\"], values=[27.6 * 0.5], unit=\"deg\"),\n",
+    "    slit_height=sc.scalar(10.0, unit=\"cm\"),\n",
+    "    radius=sc.scalar(30.0, unit=\"cm\"),\n",
+    ")\n",
+    "\n",
+    "bcc = DiskChopper(\n",
+    "    frequency=sc.scalar(112.0, unit=\"Hz\"),\n",
+    "    beam_position=sc.scalar(0.0, unit=\"deg\"),\n",
+    "    phase=sc.scalar(240 - 180, unit=\"deg\"),\n",
+    "    axle_position=sc.vector(value=[0, 0, 9.78], unit=\"m\"),\n",
+    "    slit_begin=sc.array(dims=[\"cutout\"], values=[-36.875, 143.125], unit=\"deg\"),\n",
+    "    slit_end=sc.array(dims=[\"cutout\"], values=[36.875, 216.875], unit=\"deg\"),\n",
+    "    slit_height=sc.scalar(10.0, unit=\"cm\"),\n",
+    "    radius=sc.scalar(30.0, unit=\"cm\"),\n",
+    ")\n",
+    "\n",
+    "t0 = DiskChopper(\n",
+    "    frequency=sc.scalar(28.0, unit=\"Hz\"),\n",
+    "    beam_position=sc.scalar(0.0, unit=\"deg\"),\n",
+    "    phase=sc.scalar(280 - 180, unit=\"deg\"),\n",
+    "    axle_position=sc.vector(value=[0, 0, 13.05], unit=\"m\"),\n",
+    "    slit_begin=sc.array(dims=[\"cutout\"], values=[-314.9 * 0.5], unit=\"deg\"),\n",
+    "    slit_end=sc.array(dims=[\"cutout\"], values=[314.9 * 0.5], unit=\"deg\"),\n",
+    "    slit_height=sc.scalar(10.0, unit=\"cm\"),\n",
+    "    radius=sc.scalar(30.0, unit=\"cm\"),\n",
+    ")\n",
+    "\n",
+    "disk_choppers = {\"psc1\": psc1, \"psc2\": psc2, \"oc\": oc, \"bcc\": bcc, \"t0\": t0}"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "4",
+   "metadata": {},
+   "source": [
+    "It is possible to visualize the properties of the choppers by inspecting their `repr`:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "5",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "psc2"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "6",
+   "metadata": {},
+   "source": [
+    "### Adding a detector"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "7",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "Ltotal = sc.scalar(76.55 + 1.125, unit=\"m\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "8",
+   "metadata": {},
+   "source": [
+    "## Creating some neutron events\n",
+    "\n",
+    "We create a semi-realistic set of neutron events based on the ESS pulse."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "9",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from ess.reduce.time_of_flight.fakes import FakeBeamline\n",
+    "\n",
+    "ess_beamline = FakeBeamline(\n",
+    "    choppers=disk_choppers,\n",
+    "    monitors={\"detector\": Ltotal},\n",
+    "    run_length=sc.scalar(1 / 14, unit=\"s\") * 4,\n",
+    "    events_per_pulse=200_000,\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "10",
+   "metadata": {},
+   "source": [
+    "The initial birth times and wavelengths of the generated neutrons can be visualized (for a single pulse):"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "11",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "one_pulse = ess_beamline.source.data[\"pulse\", 0]\n",
+    "one_pulse.hist(time=300).plot() + one_pulse.hist(wavelength=300).plot()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "12",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "ess_beamline.model_result.plot()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "13",
+   "metadata": {},
+   "source": [
+    "From this fake beamline, we extract the raw neutron signal at our detector:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "14",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "raw_data = ess_beamline.get_monitor(\"detector\")[0]\n",
+    "\n",
+    "# Visualize\n",
+    "raw_data.hist(event_time_offset=300).sum(\"pulse\").plot()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "15",
+   "metadata": {},
+   "source": [
+    "The total number of neutrons in our sample data that make it through the to detector is:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "16",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "raw_data.sum().value"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "17",
+   "metadata": {},
+   "source": [
+    "## Computing time-of-flight\n",
+    "\n",
+    "The chopper information is next used to construct a lookup table that provides an estimate of the real time-of-flight as a function of neutron time-of-arrival.\n",
+    "\n",
+    "We use the `tof` module to propagate a pulse of neutrons through the chopper system to the detectors,\n",
+    "and predict the most likely neutron wavelength for a given time-of-arrival and distance from source.\n",
+    "\n",
+    "From this,\n",
+    "we build a lookup table on which bilinear interpolation is used to compute a wavelength (and its corresponding time-of-flight)\n",
+    "for every neutron event.\n",
+    "\n",
+    "### Setting up the workflow"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "18",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "workflow = sl.Pipeline(\n",
+    "    time_of_flight.providers(), params=time_of_flight.default_parameters()\n",
+    ")\n",
+    "workflow[time_of_flight.RawData] = raw_data\n",
+    "workflow[time_of_flight.LtotalRange] = (sc.scalar(75.5, unit='m'),\n",
+    "                                        sc.scalar(78.0, unit='m'))\n",
+    "\n",
+    "workflow.visualize(time_of_flight.TofData)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "19",
+   "metadata": {},
+   "source": [
+    "We can see from the workflow diagram that we are still missing the simulated neutrons that are used to build the lookup table.\n",
+    "\n",
+    "Those are obtained by running a quick `tof` simulation of the beamline:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "20",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "workflow[time_of_flight.SimulationResults] = time_of_flight.simulate_beamline(\n",
+    "    choppers=disk_choppers,\n",
+    "    neutrons=2_000_000\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "21",
+   "metadata": {},
+   "source": [
+    "### Inspecting the lookup table\n",
+    "\n",
+    "The workflow first runs a simulation using the chopper parameters above,\n",
+    "and the result is stored in `SimulationResults` (see graph above).\n",
+    "\n",
+    "From these simulated neutrons, we create figures displaying the neutron wavelengths and time-of-flight,\n",
+    "as a function of arrival time at the detector.\n",
+    "\n",
+    "This is the basis for creating our lookup table."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "22",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "sim = workflow.compute(time_of_flight.SimulationResults)\n",
+    "# Compute time-of-arrival at the detector\n",
+    "tarrival = sim.time_of_arrival + ((Ltotal - sim.distance) / sim.speed).to(unit=\"us\")\n",
+    "# Compute time-of-flight at the detector\n",
+    "tflight = (Ltotal / sim.speed).to(unit=\"us\")\n",
+    "\n",
+    "events = sc.DataArray(\n",
+    "    data=sim.weight,\n",
+    "    coords={\"wavelength\": sim.wavelength, \"toa\": tarrival, \"tof\": tflight},\n",
+    ")\n",
+    "fig1 = events.hist(wavelength=300, toa=300).plot(norm=\"log\")\n",
+    "fig2 = events.hist(tof=300, toa=300).plot(norm=\"log\")\n",
+    "fig1 + fig2"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "23",
+   "metadata": {},
+   "source": [
+    "The lookup table is then obtained by computing the weighted mean of the time-of-flight inside each time-of-arrival bin.\n",
+    "\n",
+    "This is illustrated by the orange line in the figure below:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "24",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "table = workflow.compute(time_of_flight.TimeOfFlightLookupTable)\n",
+    "\n",
+    "# Overlay mean on the figure above\n",
+    "table[\"distance\", 13].plot(ax=fig2.ax, color=\"C1\", ls='-', marker=None)"
+   ]
+  },
+  {
+   "attachments": {},
+   "cell_type": "markdown",
+   "id": "25",
+   "metadata": {},
+   "source": [
+    "### Computing a time-of-flight coordinate\n",
+    "\n",
+    "We will now use our workflow to obtain our event data with a time-of-flight coordinate:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "26",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "tofs = workflow.compute(time_of_flight.TofData)\n",
+    "tofs"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "27",
+   "metadata": {},
+   "source": [
+    "Histogramming the data for a plot should show a profile with 6 bumps that correspond to the frames:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "28",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "tofs.bins.concat().hist(tof=300).plot()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "29",
+   "metadata": {},
+   "source": [
+    "### Converting to wavelength\n",
+    "\n",
+    "We can now convert our new time-of-flight coordinate to a neutron wavelength, using `tranform_coords`:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "30",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from scippneutron.conversion.graph.beamline import beamline\n",
+    "from scippneutron.conversion.graph.tof import elastic\n",
+    "\n",
+    "# Perform coordinate transformation\n",
+    "graph = {**beamline(scatter=False), **elastic(\"tof\")}\n",
+    "wav_wfm = tofs.transform_coords(\"wavelength\", graph=graph)\n",
+    "\n",
+    "# Define wavelength bin edges\n",
+    "wavs = sc.linspace(\"wavelength\", 0.8, 4.6, 201, unit=\"angstrom\")\n",
+    "\n",
+    "wav_wfm.hist(wavelength=wavs).sum(\"pulse\").plot()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "31",
+   "metadata": {},
+   "source": [
+    "### Comparing to the ground truth\n",
+    "\n",
+    "As a consistency check, because we actually know the wavelengths of the neutrons we created,\n",
+    "we can compare the true neutron wavelengths to those we computed above."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "32",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "ground_truth = ess_beamline.model_result[\"detector\"].data.flatten(to=\"event\")\n",
+    "ground_truth = ground_truth[~ground_truth.masks[\"blocked_by_others\"]]\n",
+    "\n",
+    "pp.plot(\n",
+    "    {\n",
+    "        \"wfm\": wav_wfm.hist(wavelength=wavs).sum(\"pulse\"),\n",
+    "        \"ground_truth\": ground_truth.hist(wavelength=wavs),\n",
+    "    }\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "33",
+   "metadata": {},
+   "source": [
+    "## Multiple detector pixels\n",
+    "\n",
+    "It is also possible to compute the neutron time-of-flight for multiple detector pixels at once,\n",
+    "where every pixel has different frame bounds\n",
+    "(because every pixel is at a different distance from the source).\n",
+    "\n",
+    "In our setup, we simply propagate the same neutrons to multiple detector pixels,\n",
+    "as if they were not absorbed by the first pixel they meet."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "34",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "Ltotal = sc.array(dims=[\"detector_number\"], values=[77.675, 76.0], unit=\"m\")\n",
+    "monitors = {f\"detector{i}\": ltot for i, ltot in enumerate(Ltotal)}\n",
+    "\n",
+    "ess_beamline = FakeBeamline(\n",
+    "    choppers=disk_choppers,\n",
+    "    monitors=monitors,\n",
+    "    run_length=sc.scalar(1 / 14, unit=\"s\") * 4,\n",
+    "    events_per_pulse=200_000,\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "35",
+   "metadata": {},
+   "source": [
+    "Our raw data has now a `detector_number` dimension of length 2.\n",
+    "\n",
+    "We can plot the neutron `event_time_offset` for the two detector pixels and see that the offsets are shifted to the left for the pixel that is closest to the source."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "36",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "raw_data = sc.concat(\n",
+    "    [ess_beamline.get_monitor(key)[0] for key in monitors.keys()],\n",
+    "    dim=\"detector_number\",\n",
+    ")\n",
+    "\n",
+    "# Visualize\n",
+    "pp.plot(\n",
+    "    sc.collapse(\n",
+    "        raw_data.hist(event_time_offset=300).sum(\"pulse\"), keep=\"event_time_offset\"\n",
+    "    )\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "37",
+   "metadata": {},
+   "source": [
+    "Computing time-of-flight is done in the same way as above.\n",
+    "We need to remember to update our workflow:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "38",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Update workflow\n",
+    "workflow[time_of_flight.RawData] = raw_data\n",
+    "\n",
+    "# Compute tofs and wavelengths\n",
+    "tofs = workflow.compute(time_of_flight.TofData)\n",
+    "wav_wfm = tofs.transform_coords(\"wavelength\", graph=graph)\n",
+    "\n",
+    "# Compare in plot\n",
+    "ground_truth = []\n",
+    "for det in ess_beamline.monitors:\n",
+    "    data = ess_beamline.model_result[det.name].data.flatten(to=\"event\")\n",
+    "    ground_truth.append(data[~data.masks[\"blocked_by_others\"]])\n",
+    "\n",
+    "figs = [\n",
+    "    pp.plot(\n",
+    "        {\n",
+    "            \"wfm\": wav_wfm[\"detector_number\", i].bins.concat().hist(wavelength=wavs),\n",
+    "            \"ground_truth\": ground_truth[i].hist(wavelength=wavs),\n",
+    "        },\n",
+    "        title=f\"Pixel {i+1}\",\n",
+    "    )\n",
+    "    for i in range(len(Ltotal))\n",
+    "]\n",
+    "\n",
+    "figs[0] + figs[1]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "39",
+   "metadata": {},
+   "source": [
+    "## Handling time overlap between subframes\n",
+    "\n",
+    "In some (relatively rare) cases, where a chopper cascade is slightly ill-defined,\n",
+    "it is sometimes possible for some subframes to overlap in time with other subframes.\n",
+    "\n",
+    "This is basically when neutrons passed through different pulse-shaping chopper openings,\n",
+    "but arrive at the same time at the detector.\n",
+    "\n",
+    "In this case, it is actually not possible to accurately determine the wavelength of the neutrons.\n",
+    "ScippNeutron handles this by masking the overlapping regions and throwing away any neutrons that lie within it.\n",
+    "\n",
+    "To simulate this, we modify slightly the phase and the cutouts of the band-control chopper:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "40",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "disk_choppers[\"bcc\"] = DiskChopper(\n",
+    "    frequency=sc.scalar(112.0, unit=\"Hz\"),\n",
+    "    beam_position=sc.scalar(0.0, unit=\"deg\"),\n",
+    "    phase=sc.scalar(240 - 180, unit=\"deg\"),\n",
+    "    axle_position=sc.vector(value=[0, 0, 9.78], unit=\"m\"),\n",
+    "    slit_begin=sc.array(dims=[\"cutout\"], values=[-36.875, 143.125], unit=\"deg\"),\n",
+    "    slit_end=sc.array(dims=[\"cutout\"], values=[46.875, 216.875], unit=\"deg\"),\n",
+    "    slit_height=sc.scalar(10.0, unit=\"cm\"),\n",
+    "    radius=sc.scalar(30.0, unit=\"cm\"),\n",
+    ")\n",
+    "\n",
+    "# Go back to a single detector pixel\n",
+    "Ltotal = sc.scalar(76.55 + 1.125, unit=\"m\")\n",
+    "\n",
+    "ess_beamline = FakeBeamline(\n",
+    "    choppers=disk_choppers,\n",
+    "    monitors={\"detector\": Ltotal},\n",
+    "    run_length=sc.scalar(1 / 14, unit=\"s\") * 4,\n",
+    "    events_per_pulse=200_000,\n",
+    ")\n",
+    "\n",
+    "ess_beamline.model_result.plot()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "41",
+   "metadata": {},
+   "source": [
+    "We can now see that there is no longer a gap between the two frames at the center of each pulse (green region).\n",
+    "\n",
+    "Another way of looking at this is looking at the wavelength vs time-of-arrival plot,\n",
+    "which also shows overlap in time at the junction between the two frames:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "42",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Update workflow\n",
+    "workflow[time_of_flight.SimulationResults] = time_of_flight.simulate_beamline(\n",
+    "    choppers=disk_choppers,\n",
+    "    neutrons=2_000_000\n",
+    ")\n",
+    "workflow[time_of_flight.RawData] = ess_beamline.get_monitor(\"detector\")[0]\n",
+    "\n",
+    "sim = workflow.compute(time_of_flight.SimulationResults)\n",
+    "# Compute time-of-arrival at the detector\n",
+    "tarrival = sim.time_of_arrival + ((Ltotal - sim.distance) / sim.speed).to(unit=\"us\")\n",
+    "# Compute time-of-flight at the detector\n",
+    "tflight = (Ltotal / sim.speed).to(unit=\"us\")\n",
+    "\n",
+    "events = sc.DataArray(\n",
+    "    data=sim.weight,\n",
+    "    coords={\"wavelength\": sim.wavelength, \"toa\": tarrival, \"tof\": tflight},\n",
+    ")\n",
+    "events.hist(wavelength=300, toa=300).plot(norm=\"log\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "43",
+   "metadata": {},
+   "source": [
+    "The data in the lookup table contains both the mean time-of-flight for each distance and time-of-arrival bin,\n",
+    "but also the variance inside each bin.\n",
+    "\n",
+    "In the regions where there is no time overlap,\n",
+    "the variance is small (the regions are close to a thin line).\n",
+    "However, in the central region where overlap occurs,\n",
+    "we are computing a mean between two regions which have similar 'brightness'.\n",
+    "\n",
+    "This leads to a large variance, and this is visible when plotting the relative standard deviations on a 2D figure."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "44",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "table = workflow.compute(time_of_flight.TimeOfFlightLookupTable)\n",
+    "table.plot() / (sc.stddevs(table) / sc.values(table)).plot(norm=\"log\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "45",
+   "metadata": {},
+   "source": [
+    "The workflow has a parameter which is used to mask out regions where the standard deviation is above a certain threshold.\n",
+    "\n",
+    "It is difficult to automatically detector this threshold,\n",
+    "as it can vary a lot depending on how much signal is received by the detectors,\n",
+    "and how far the detectors are from the source.\n",
+    "It is thus more robust to simply have a user tunable parameter on the workflow."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "46",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "workflow[time_of_flight.LookupTableRelativeErrorThreshold] = 1.0e-2\n",
+    "\n",
+    "workflow.compute(time_of_flight.MaskedTimeOfFlightLookupTable).plot()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "47",
+   "metadata": {},
+   "source": [
+    "We can now see that the central region is masked out.\n",
+    "\n",
+    "The neutrons in that region will be discarded in the time-of-flight calculation\n",
+    "(in practice, they are given a NaN value as a time-of-flight).\n",
+    "\n",
+    "This is visible when comparing to the true neutron wavelengths,\n",
+    "where we see that some counts were lost between the two frames."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "48",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Compute time-of-flight\n",
+    "tofs = workflow.compute(time_of_flight.TofData)\n",
+    "# Compute wavelength\n",
+    "wav_wfm = tofs.transform_coords(\"wavelength\", graph=graph)\n",
+    "\n",
+    "# Compare to the true wavelengths\n",
+    "ground_truth = ess_beamline.model_result[\"detector\"].data.flatten(to=\"event\")\n",
+    "ground_truth = ground_truth[~ground_truth.masks[\"blocked_by_others\"]]\n",
+    "\n",
+    "pp.plot(\n",
+    "    {\n",
+    "        \"wfm\": wav_wfm.hist(wavelength=wavs).sum(\"pulse\"),\n",
+    "        \"ground_truth\": ground_truth.hist(wavelength=wavs),\n",
+    "    }\n",
+    ")"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/user-guide/tof/frame-unwrapping.ipynb b/docs/user-guide/tof/frame-unwrapping.ipynb
new file mode 100644
index 00000000..aca44aae
--- /dev/null
+++ b/docs/user-guide/tof/frame-unwrapping.ipynb
@@ -0,0 +1,617 @@
+{
+ "cells": [
+  {
+   "attachments": {},
+   "cell_type": "markdown",
+   "id": "0",
+   "metadata": {},
+   "source": [
+    "# Frame Unwrapping\n",
+    "\n",
+    "## Context\n",
+    "\n",
+    "At time-of-flight neutron sources recording event-mode, time-stamps of detected neutrons are written to files in an `NXevent_data` group.\n",
+    "This contains two main time components, `event_time_zero` and `event_time_offset`.\n",
+    "The sum of the two would typically yield the absolute detection time of the neutron.\n",
+    "For computation of wavelengths or energies during data-reduction, a time-of-flight is required.\n",
+    "In principle the time-of-flight could be equivalent to `event_time_offset`, and the emission time of the neutron to `event_time_zero`.\n",
+    "Since an actual computation of time-of-flight would require knowledge about chopper settings, detector positions, and whether the scattering of the sample is elastic or inelastic, this may however not be the case in practice.\n",
+    "Instead, the data acquisition system may, e.g., record the time at which the proton pulse hits the target as `event_time_zero`, with `event_time_offset` representing the offset since then.\n",
+    "\n",
+    "We refer to the process of \"unwrapping\" these time stamps into an actual time-of-flight as *frame unwrapping*, since `event_time_offset` \"wraps around\" with the period of the proton pulse and neutrons created by different proton pulses may be recorded with the *same* `event_time_zero`.\n",
+    "The figures in the remainder of this document will clarify this."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "1",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import plopp as pp\n",
+    "import scipp as sc\n",
+    "import sciline as sl\n",
+    "from scippneutron.chopper import DiskChopper\n",
+    "from ess.reduce import time_of_flight\n",
+    "import tof\n",
+    "\n",
+    "Hz = sc.Unit(\"Hz\")\n",
+    "deg = sc.Unit(\"deg\")\n",
+    "meter = sc.Unit(\"m\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "2",
+   "metadata": {},
+   "source": [
+    "## Default mode"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "3",
+   "metadata": {},
+   "source": [
+    "Often there is a 1:1 correspondence between source pulses and neutron pulses propagated to the sample and detectors.\n",
+    "\n",
+    "In this first example:\n",
+    "\n",
+    "- We begin by creating a source of neutrons which mimics the ESS source.\n",
+    "- We set up a single chopper with a single opening\n",
+    "- We place 4 'monitors' along the path of the neutrons (none of which absorb any neutrons)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "4",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "source = tof.Source(facility=\"ess\", pulses=5)\n",
+    "chopper = tof.Chopper(\n",
+    "    frequency=14.0 * Hz,\n",
+    "    open=sc.array(dims=[\"cutout\"], values=[0.0], unit=\"deg\"),\n",
+    "    close=sc.array(dims=[\"cutout\"], values=[3.0], unit=\"deg\"),\n",
+    "    phase=85.0 * deg,\n",
+    "    distance=8.0 * meter,\n",
+    "    name=\"chopper\",\n",
+    ")\n",
+    "detectors = [\n",
+    "    tof.Detector(distance=20.0 * meter, name=\"beam\"),\n",
+    "    tof.Detector(distance=60.0 * meter, name=\"sample\"),\n",
+    "    tof.Detector(distance=80.0 * meter, name=\"monitor\"),\n",
+    "    tof.Detector(distance=108.0 * meter, name=\"detector\"),\n",
+    "]\n",
+    "\n",
+    "model = tof.Model(source=source, choppers=[chopper], detectors=detectors)\n",
+    "results = model.run()\n",
+    "pl = results.plot(cmap=\"viridis_r\")\n",
+    "\n",
+    "for i in range(source.pulses):\n",
+    "    pl.ax.axvline(\n",
+    "        i * (1.0 / source.frequency).to(unit=\"us\").value, color=\"k\", ls=\"dotted\"\n",
+    "    )\n",
+    "    x = [\n",
+    "        results[det.name].toas.data[\"visible\"][f\"pulse:{i}\"].coords[\"toa\"].min().value\n",
+    "        for det in detectors\n",
+    "    ]\n",
+    "    y = [det.distance.value for det in detectors]\n",
+    "    pl.ax.plot(x, y, \"--o\", color=\"magenta\", lw=3)\n",
+    "    if i == 0:\n",
+    "        pl.ax.text(\n",
+    "            x[2], y[2] * 1.05, \"pivot time\", va=\"bottom\", ha=\"right\", color=\"magenta\"\n",
+    "        )"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "5",
+   "metadata": {},
+   "source": [
+    "In the figure above, the dotted vertical lines represent the `event_time_zero` of each pulse,\n",
+    "i.e. the start of a new origin for `event_time_offset` recorded at the various detectors.\n",
+    "\n",
+    "The span between two dotted lines is called a 'frame'.\n",
+    "\n",
+    "The figure gives a good representation of the situation at each detector:\n",
+    "\n",
+    "- **beam** monitor: all the arrival times at the detector are inside the same frame within which the neutrons were created.\n",
+    "- **sample**: all the arrival times are offset by one frame\n",
+    "- **monitor**: most of the neutrons arrive with an offset of two frames, but a small amount of neutrons (shortest wavelengths) only have a 1-frame offset\n",
+    "- **detector**: most of the neutrons arrive with an offset of two frames, but a small amount of neutrons (longest wavelengths) have a 3-frame offset\n",
+    "\n",
+    "We can further illustrate this by making histograms of the `event_time_offset` of the neutrons for each detector:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "6",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "subplots = pp.tiled(2, 2, figsize=(9, 6))\n",
+    "nxevent_data = results.to_nxevent_data()\n",
+    "for i, det in enumerate(detectors):\n",
+    "    data = nxevent_data[\"detector_number\", i]\n",
+    "    subplots[i // 2, i % 2] = (\n",
+    "        data.bins.concat()\n",
+    "        .hist(event_time_offset=200)\n",
+    "        .plot(title=f\"{det.name}={det.distance:c}\", color=f\"C{i}\")\n",
+    "    )\n",
+    "    f = subplots[i // 2, i % 2]\n",
+    "    xpiv = min(\n",
+    "        da.coords[\"toa\"].min() % (1.0 / source.frequency).to(unit=\"us\")\n",
+    "        for da in results[det.name].toas.data[\"visible\"].values()\n",
+    "    ).value\n",
+    "    f.ax.axvline(xpiv, ls=\"dashed\", color=\"magenta\", lw=2)\n",
+    "    f.ax.text(xpiv, 20, \"pivot time\", rotation=90, color=\"magenta\")\n",
+    "    f.canvas.draw()\n",
+    "subplots"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "7",
+   "metadata": {},
+   "source": [
+    "### Pivot time\n",
+    "\n",
+    "To compute the time-of-flight for a neutron, we need to identify which source pulse it originated from.\n",
+    "\n",
+    "In the first figure, the pink lines represent the earliest recorded arrival time at each detector:\n",
+    "we know that within a given frame at a selected detector,\n",
+    "any neutron recorded at a time earlier than this 'pivot' time must from from a previous pulse.\n",
+    "\n",
+    "The position of the pink lines is repeated in the second figure (above).\n",
+    "We can use this knowledge to unwrap the frames and compute the absolute time-of-arrival of the neutrons at the detectors."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "8",
+   "metadata": {},
+   "source": [
+    "### Computing time-of-flight\n",
+    "\n",
+    "The pivot time and the resulting offsets can be computed from the properties of the source pulse and the chopper cascade.\n",
+    "\n",
+    "We describe in this section the workflow that computes time-of-flight,\n",
+    "given `event_time_zero` and `event_time_offset` for neutron events,\n",
+    "as well as the properties of the source pulse and the choppers in the beamline.\n",
+    "\n",
+    "The workflow can be visualized as follows:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "9",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "workflow = sl.Pipeline(\n",
+    "    time_of_flight.providers(), params=time_of_flight.default_parameters()\n",
+    ")\n",
+    "\n",
+    "workflow[time_of_flight.RawData] = nxevent_data\n",
+    "workflow[time_of_flight.LtotalRange] = detectors[0].distance, detectors[-1].distance\n",
+    "workflow[time_of_flight.SimulationResults] = time_of_flight.simulate_beamline(\n",
+    "    choppers={\n",
+    "        \"chopper\": DiskChopper(\n",
+    "            frequency=-chopper.frequency,\n",
+    "            beam_position=sc.scalar(0.0, unit=\"deg\"),\n",
+    "            phase=-chopper.phase,\n",
+    "            axle_position=sc.vector(\n",
+    "                value=[0, 0, chopper.distance.value], unit=chopper.distance.unit\n",
+    "            ),\n",
+    "            slit_begin=chopper.open,\n",
+    "            slit_end=chopper.close,\n",
+    "            slit_height=sc.scalar(10.0, unit=\"cm\"),\n",
+    "            radius=sc.scalar(30.0, unit=\"cm\"),\n",
+    "        )\n",
+    "    }\n",
+    ")\n",
+    "\n",
+    "workflow.visualize(time_of_flight.TofData)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "10",
+   "metadata": {},
+   "source": [
+    "#### Unwrapped neutron time-of-arrival\n",
+    "\n",
+    "The first step that is computed in the workflow is the unwrapped detector arrival time of each neutron.\n",
+    "This is essentially just `event_time_offset + event_time_zero`."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "11",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "da = workflow.compute(time_of_flight.UnwrappedTimeOfArrival)[\n",
+    "    \"detector_number\", 2\n",
+    "]  # Look at a single detector\n",
+    "da.bins.concat().value.hist(time_of_arrival=300).plot()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "12",
+   "metadata": {},
+   "source": [
+    "#### Unwrapped neutron time-of-arrival minus pivot time\n",
+    "\n",
+    "The next step is to subtract the pivot time to the unwrapped arrival times,\n",
+    "to align the times so that they start at zero.\n",
+    "\n",
+    "This allows us to perform a computationally cheap modulo operation on the times below."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "13",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "da = workflow.compute(time_of_flight.UnwrappedTimeOfArrivalMinusPivotTime)[\n",
+    "    \"detector_number\", 2\n",
+    "]\n",
+    "f = da.bins.concat().value.hist(time_of_arrival=300).plot()\n",
+    "for i in range(source.pulses):\n",
+    "    f.ax.axvline(\n",
+    "        i * (1.0 / source.frequency).to(unit=\"us\").value, color=\"k\", ls=\"dotted\"\n",
+    "    )\n",
+    "f"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "14",
+   "metadata": {},
+   "source": [
+    "The vertical dotted lines here represent the frame period.\n",
+    "\n",
+    "#### Unwrapped neutron time-of-arrival modulo the frame period\n",
+    "\n",
+    "We now wrap the arrival times with the frame period to obtain well formed (unbroken) set of events.\n",
+    "\n",
+    "We also re-add the pivot time offset we had subtracted earlier (to enable to modulo operation)."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "15",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "da = workflow.compute(time_of_flight.FrameFoldedTimeOfArrival)[\"detector_number\", 2]\n",
+    "da.bins.concat().value.hist(time_of_arrival=200).plot()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "16",
+   "metadata": {},
+   "source": [
+    "#### Create a lookup table\n",
+    "\n",
+    "The chopper information is next used to construct a lookup table that provides an estimate of the real time-of-flight as a function of time-of-arrival.\n",
+    "\n",
+    "The `tof` module can be used to propagate a pulse of neutrons through the chopper system to the detectors,\n",
+    "and predict the most likely neutron wavelength for a given time-of-arrival.\n",
+    "\n",
+    "We typically have hundreds of thousands of pixels in an instrument,\n",
+    "but it is actually not necessary to propagate the neutrons to 10<sup>5</sup> detectors.\n",
+    "\n",
+    "Instead, we make a table that spans the entire range of distances of all the pixels,\n",
+    "with a modest resolution,\n",
+    "and use a linear interpolation for values that lie between the points in the table.\n",
+    "\n",
+    "To create the table, we thus:\n",
+    "\n",
+    "- run a simulation where a pulse of neutrons passes through the choppers and reaches the sample (or any location after the last chopper)\n",
+    "- propagate the neutrons from the sample to a range of distances that span the minimum and maximum pixel distance from the sample (assuming neutron wavelengths do not change)\n",
+    "- bin the neutrons in both distance and time-of-arrival (yielding a 2D binned data array)\n",
+    "- compute the (weighted) mean wavelength inside each bin\n",
+    "- convert the wavelengths to a real time-of-flight to give our final lookup table\n",
+    "\n",
+    "The table can be visualized here:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "17",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "table = workflow.compute(time_of_flight.TimeOfFlightLookupTable)\n",
+    "table.plot()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "18",
+   "metadata": {},
+   "source": [
+    "#### Computing time-of-flight from the lookup\n",
+    "\n",
+    "We now use the above table to perform a bilinear interpolation and compute the time-of-flight of every neutron."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "19",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "tofs = workflow.compute(time_of_flight.TofData)\n",
+    "\n",
+    "tof_hist = tofs.bins.concat(\"pulse\").hist(tof=sc.scalar(500.0, unit=\"us\"))\n",
+    "pp.plot({det.name: tof_hist[\"detector_number\", i] for i, det in enumerate(detectors)})"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "20",
+   "metadata": {},
+   "source": [
+    "### Converting to wavelength\n",
+    "\n",
+    "The time-of-flight of a neutron is commonly used as the fundamental quantity from which one can compute the neutron energy or wavelength.\n",
+    "\n",
+    "Here, we compute the wavelengths from the time-of-flight using Scippneutron's `transform_coord` utility,\n",
+    "and compare our computed wavelengths to the true wavelengths which are known for the simulated neutrons."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "21",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from scippneutron.conversion.graph.beamline import beamline\n",
+    "from scippneutron.conversion.graph.tof import elastic\n",
+    "\n",
+    "# Perform coordinate transformation\n",
+    "graph = {**beamline(scatter=False), **elastic(\"tof\")}\n",
+    "\n",
+    "# Define wavelength bin edges\n",
+    "bins = sc.linspace(\"wavelength\", 6.0, 9.0, 101, unit=\"angstrom\")\n",
+    "\n",
+    "# Compute wavelengths\n",
+    "wav_hist = (\n",
+    "    tofs.transform_coords(\"wavelength\", graph=graph)\n",
+    "    .bins.concat(\"pulse\")\n",
+    "    .hist(wavelength=bins)\n",
+    ")\n",
+    "wavs = {det.name: wav_hist[\"detector_number\", i] for i, det in enumerate(detectors)}\n",
+    "\n",
+    "ground_truth = results[\"detector\"].data.flatten(to=\"event\")\n",
+    "ground_truth = ground_truth[~ground_truth.masks[\"blocked_by_others\"]].hist(\n",
+    "    wavelength=bins\n",
+    ")\n",
+    "\n",
+    "wavs[\"true\"] = ground_truth\n",
+    "pp.plot(wavs)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "22",
+   "metadata": {},
+   "source": [
+    "We see that all detectors agree on the wavelength spectrum,\n",
+    "which is also in very good agreement with the true neutron wavelengths.\n",
+    "\n",
+    "## Pulse-skipping mode\n",
+    "\n",
+    "In some beamline configurations, one wishes to study a wide range of wavelengths at a high flux.\n",
+    "This usually means that the spread of arrival times will spill-over into the next pulse if the detector is placed far enough to yield a good wavelength resolution.\n",
+    "\n",
+    "To avoid the next pulse polluting the data from the current pulse,\n",
+    "it is common practice to use a pulse-skipping chopper which basically blocks all neutrons every other pulse.\n",
+    "This could also be every 3 or 4 pulses for very long instruments.\n",
+    "\n",
+    "The time-distance diagram may look something like:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "23",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "source = tof.Source(facility=\"ess\", pulses=4)\n",
+    "choppers = [\n",
+    "    tof.Chopper(\n",
+    "        frequency=14.0 * Hz,\n",
+    "        open=sc.array(dims=[\"cutout\"], values=[0.0], unit=\"deg\"),\n",
+    "        close=sc.array(dims=[\"cutout\"], values=[33.0], unit=\"deg\"),\n",
+    "        phase=35.0 * deg,\n",
+    "        distance=8.0 * meter,\n",
+    "        name=\"chopper\",\n",
+    "    ),\n",
+    "    tof.Chopper(\n",
+    "        frequency=7.0 * Hz,\n",
+    "        open=sc.array(dims=[\"cutout\"], values=[0.0], unit=\"deg\"),\n",
+    "        close=sc.array(dims=[\"cutout\"], values=[120.0], unit=\"deg\"),\n",
+    "        phase=10.0 * deg,\n",
+    "        distance=15.0 * meter,\n",
+    "        name=\"pulse-skipping\",\n",
+    "    ),\n",
+    "]\n",
+    "detectors = [\n",
+    "    tof.Detector(distance=60.0 * meter, name=\"monitor\"),\n",
+    "    tof.Detector(distance=100.0 * meter, name=\"detector\"),\n",
+    "]\n",
+    "\n",
+    "model = tof.Model(source=source, choppers=choppers, detectors=detectors)\n",
+    "results = model.run()\n",
+    "results.plot(cmap=\"viridis_r\", blocked_rays=5000)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "24",
+   "metadata": {},
+   "source": [
+    "### Computing time-of-flight\n",
+    "\n",
+    "To compute the time-of-flight in pulse skipping mode,\n",
+    "we can use the same workflow as before.\n",
+    "\n",
+    "The only difference is that we set the `PulseStride` to 2 to skip every other pulse."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "25",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "workflow = sl.Pipeline(\n",
+    "    time_of_flight.providers(), params=time_of_flight.default_parameters()\n",
+    ")\n",
+    "\n",
+    "workflow[time_of_flight.PulseStride] = 2\n",
+    "workflow[time_of_flight.RawData] = results.to_nxevent_data()\n",
+    "workflow[time_of_flight.LtotalRange] = detectors[0].distance, detectors[-1].distance\n",
+    "workflow[time_of_flight.SimulationResults] = time_of_flight.simulate_beamline(\n",
+    "    choppers={\n",
+    "        ch.name: DiskChopper(\n",
+    "            frequency=-ch.frequency,\n",
+    "            beam_position=sc.scalar(0.0, unit=\"deg\"),\n",
+    "            phase=-ch.phase,\n",
+    "            axle_position=sc.vector(\n",
+    "                value=[0, 0, ch.distance.value], unit=chopper.distance.unit\n",
+    "            ),\n",
+    "            slit_begin=ch.open,\n",
+    "            slit_end=ch.close,\n",
+    "            slit_height=sc.scalar(10.0, unit=\"cm\"),\n",
+    "            radius=sc.scalar(30.0, unit=\"cm\"),\n",
+    "        )\n",
+    "        for ch in choppers\n",
+    "    },\n",
+    "    neutrons=500_000,\n",
+    ")\n",
+    "\n",
+    "workflow[time_of_flight.DistanceResolution] = sc.scalar(0.5, unit=\"m\")\n",
+    "workflow[time_of_flight.LookupTableRelativeErrorThreshold] = 0.1"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "26",
+   "metadata": {},
+   "source": [
+    "If we inspect the time-of-flight lookup table,\n",
+    "we can see that the time-of-arrival (toa) dimension now spans longer than the pulse period of 71 ms."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "27",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "table = workflow.compute(time_of_flight.TimeOfFlightLookupTable)\n",
+    "table.plot()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "28",
+   "metadata": {},
+   "source": [
+    "The time-of-flight profiles are then:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "29",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "tofs = workflow.compute(time_of_flight.TofData)\n",
+    "\n",
+    "tof_hist = tofs.bins.concat(\"pulse\").hist(tof=sc.scalar(500.0, unit=\"us\"))\n",
+    "pp.plot({det.name: tof_hist[\"detector_number\", i] for i, det in enumerate(detectors)})"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "30",
+   "metadata": {},
+   "source": [
+    "### Conversion to wavelength\n",
+    "\n",
+    "We now use the `transform_coords` as above to convert to wavelength."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "31",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Define wavelength bin edges\n",
+    "bins = sc.linspace(\"wavelength\", 1.0, 8.0, 401, unit=\"angstrom\")\n",
+    "\n",
+    "# Compute wavelengths\n",
+    "wav_hist = (\n",
+    "    tofs.transform_coords(\"wavelength\", graph=graph)\n",
+    "    .bins.concat(\"pulse\")\n",
+    "    .hist(wavelength=bins)\n",
+    ")\n",
+    "wavs = {det.name: wav_hist[\"detector_number\", i] for i, det in enumerate(detectors)}\n",
+    "\n",
+    "ground_truth = results[\"detector\"].data.flatten(to=\"event\")\n",
+    "ground_truth = ground_truth[~ground_truth.masks[\"blocked_by_others\"]].hist(\n",
+    "    wavelength=bins\n",
+    ")\n",
+    "\n",
+    "wavs[\"true\"] = ground_truth\n",
+    "pp.plot(wavs)"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/user-guide/tof/index.md b/docs/user-guide/tof/index.md
new file mode 100644
index 00000000..137d5dbd
--- /dev/null
+++ b/docs/user-guide/tof/index.md
@@ -0,0 +1,11 @@
+# Time-of-flight
+
+```{toctree}
+---
+maxdepth: 1
+---
+
+frame-unwrapping
+wfm
+dream
+```
diff --git a/docs/user-guide/tof/wfm.ipynb b/docs/user-guide/tof/wfm.ipynb
new file mode 100644
index 00000000..bce977f1
--- /dev/null
+++ b/docs/user-guide/tof/wfm.ipynb
@@ -0,0 +1,533 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "id": "0",
+   "metadata": {},
+   "source": [
+    "# Wavelength frame multiplication\n",
+    "\n",
+    "Wavelength frame multiplication (WFM) is a technique commonly used at long-pulse facilities to improve the resolution of the results measured at the neutron detectors.\n",
+    "See for example the article by [Schmakat et al. (2020)](https://www.sciencedirect.com/science/article/pii/S0168900220308640) for a description of how WFM works.\n",
+    "\n",
+    "In this notebook, we show how to use `essreduce`'s `time_of_flight` module to compute an accurate a time-of-flight coordinate,\n",
+    "from which a wavelength can be computed."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "1",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import numpy as np\n",
+    "import plopp as pp\n",
+    "import scipp as sc\n",
+    "import sciline as sl\n",
+    "from scippneutron.chopper import DiskChopper\n",
+    "from ess.reduce import time_of_flight"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "2",
+   "metadata": {},
+   "source": [
+    "## Setting up the beamline\n",
+    "\n",
+    "### Creating the beamline choppers\n",
+    "\n",
+    "We begin by defining the chopper settings for our beamline.\n",
+    "In principle, the chopper setting could simply be read from a NeXus file.\n",
+    "\n",
+    "For this example, we create choppers modeled on the [V20 ESS beamline at HZB](https://www.sciencedirect.com/science/article/pii/S0168900216309597).\n",
+    "It consists of 5 choppers:\n",
+    "\n",
+    "- 2 WFM choppers\n",
+    "- 2 frame-overlap choppers\n",
+    "- 1 pulse-overlap chopper\n",
+    "\n",
+    "The first 4 choppers have 6 openings (also known as cutouts),\n",
+    "while the last one only has a single opening."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "3",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "wfm1 = DiskChopper(\n",
+    "    frequency=sc.scalar(-70.0, unit=\"Hz\"),\n",
+    "    beam_position=sc.scalar(0.0, unit=\"deg\"),\n",
+    "    phase=sc.scalar(-47.10, unit=\"deg\"),\n",
+    "    axle_position=sc.vector(value=[0, 0, 6.6], unit=\"m\"),\n",
+    "    slit_begin=sc.array(\n",
+    "        dims=[\"cutout\"],\n",
+    "        values=np.array([83.71, 140.49, 193.26, 242.32, 287.91, 330.3]) + 15.0,\n",
+    "        unit=\"deg\",\n",
+    "    ),\n",
+    "    slit_end=sc.array(\n",
+    "        dims=[\"cutout\"],\n",
+    "        values=np.array([94.7, 155.79, 212.56, 265.33, 314.37, 360.0]) + 15.0,\n",
+    "        unit=\"deg\",\n",
+    "    ),\n",
+    "    slit_height=sc.scalar(10.0, unit=\"cm\"),\n",
+    "    radius=sc.scalar(30.0, unit=\"cm\"),\n",
+    ")\n",
+    "\n",
+    "wfm2 = DiskChopper(\n",
+    "    frequency=sc.scalar(-70.0, unit=\"Hz\"),\n",
+    "    beam_position=sc.scalar(0.0, unit=\"deg\"),\n",
+    "    phase=sc.scalar(-76.76, unit=\"deg\"),\n",
+    "    axle_position=sc.vector(value=[0, 0, 7.1], unit=\"m\"),\n",
+    "    slit_begin=sc.array(\n",
+    "        dims=[\"cutout\"],\n",
+    "        values=np.array([65.04, 126.1, 182.88, 235.67, 284.73, 330.32]) + 15.0,\n",
+    "        unit=\"deg\",\n",
+    "    ),\n",
+    "    slit_end=sc.array(\n",
+    "        dims=[\"cutout\"],\n",
+    "        values=np.array([76.03, 141.4, 202.18, 254.97, 307.74, 360.0]) + 15.0,\n",
+    "        unit=\"deg\",\n",
+    "    ),\n",
+    "    slit_height=sc.scalar(10.0, unit=\"cm\"),\n",
+    "    radius=sc.scalar(30.0, unit=\"cm\"),\n",
+    ")\n",
+    "\n",
+    "foc1 = DiskChopper(\n",
+    "    frequency=sc.scalar(-56.0, unit=\"Hz\"),\n",
+    "    beam_position=sc.scalar(0.0, unit=\"deg\"),\n",
+    "    phase=sc.scalar(-62.40, unit=\"deg\"),\n",
+    "    axle_position=sc.vector(value=[0, 0, 8.8], unit=\"m\"),\n",
+    "    slit_begin=sc.array(\n",
+    "        dims=[\"cutout\"],\n",
+    "        values=np.array([74.6, 139.6, 194.3, 245.3, 294.8, 347.2]),\n",
+    "        unit=\"deg\",\n",
+    "    ),\n",
+    "    slit_end=sc.array(\n",
+    "        dims=[\"cutout\"],\n",
+    "        values=np.array([95.2, 162.8, 216.1, 263.1, 310.5, 371.6]),\n",
+    "        unit=\"deg\",\n",
+    "    ),\n",
+    "    slit_height=sc.scalar(10.0, unit=\"cm\"),\n",
+    "    radius=sc.scalar(30.0, unit=\"cm\"),\n",
+    ")\n",
+    "\n",
+    "foc2 = DiskChopper(\n",
+    "    frequency=sc.scalar(-28.0, unit=\"Hz\"),\n",
+    "    beam_position=sc.scalar(0.0, unit=\"deg\"),\n",
+    "    phase=sc.scalar(-12.27, unit=\"deg\"),\n",
+    "    axle_position=sc.vector(value=[0, 0, 15.9], unit=\"m\"),\n",
+    "    slit_begin=sc.array(\n",
+    "        dims=[\"cutout\"],\n",
+    "        values=np.array([98.0, 154.0, 206.8, 255.0, 299.0, 344.65]),\n",
+    "        unit=\"deg\",\n",
+    "    ),\n",
+    "    slit_end=sc.array(\n",
+    "        dims=[\"cutout\"],\n",
+    "        values=np.array([134.6, 190.06, 237.01, 280.88, 323.56, 373.76]),\n",
+    "        unit=\"deg\",\n",
+    "    ),\n",
+    "    slit_height=sc.scalar(10.0, unit=\"cm\"),\n",
+    "    radius=sc.scalar(30.0, unit=\"cm\"),\n",
+    ")\n",
+    "\n",
+    "pol = DiskChopper(\n",
+    "    frequency=sc.scalar(-14.0, unit=\"Hz\"),\n",
+    "    beam_position=sc.scalar(0.0, unit=\"deg\"),\n",
+    "    phase=sc.scalar(0.0, unit=\"deg\"),\n",
+    "    axle_position=sc.vector(value=[0, 0, 17.0], unit=\"m\"),\n",
+    "    slit_begin=sc.array(\n",
+    "        dims=[\"cutout\"],\n",
+    "        values=np.array([40.0]),\n",
+    "        unit=\"deg\",\n",
+    "    ),\n",
+    "    slit_end=sc.array(\n",
+    "        dims=[\"cutout\"],\n",
+    "        values=np.array([240.0]),\n",
+    "        unit=\"deg\",\n",
+    "    ),\n",
+    "    slit_height=sc.scalar(10.0, unit=\"cm\"),\n",
+    "    radius=sc.scalar(30.0, unit=\"cm\"),\n",
+    ")\n",
+    "\n",
+    "disk_choppers = {\"wfm1\": wfm1, \"wfm2\": wfm2, \"foc1\": foc1, \"foc2\": foc2, \"pol\": pol}"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "4",
+   "metadata": {},
+   "source": [
+    "It is possible to visualize the properties of the choppers by inspecting their `repr`:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "5",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "wfm1"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "6",
+   "metadata": {},
+   "source": [
+    "### Adding a detector\n",
+    "\n",
+    "We also have a detector, which we place 26 meters away from the source."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "7",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "Ltotal = sc.scalar(26.0, unit=\"m\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "8",
+   "metadata": {},
+   "source": [
+    "## Creating some neutron events\n",
+    "\n",
+    "We create a semi-realistic set of neutron events based on the ESS pulse."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "9",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from ess.reduce.time_of_flight.fakes import FakeBeamline\n",
+    "\n",
+    "ess_beamline = FakeBeamline(\n",
+    "    choppers=disk_choppers,\n",
+    "    monitors={\"detector\": Ltotal},\n",
+    "    run_length=sc.scalar(1 / 14, unit=\"s\") * 14,\n",
+    "    events_per_pulse=200_000,\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "10",
+   "metadata": {},
+   "source": [
+    "The initial birth times and wavelengths of the generated neutrons can be visualized (for a single pulse):"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "11",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "one_pulse = ess_beamline.source.data[\"pulse\", 0]\n",
+    "one_pulse.hist(time=300).plot() + one_pulse.hist(wavelength=300).plot()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "12",
+   "metadata": {},
+   "source": [
+    "From this fake beamline, we extract the raw neutron signal at our detector:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "13",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "raw_data = ess_beamline.get_monitor(\"detector\")[0]\n",
+    "\n",
+    "# Visualize\n",
+    "raw_data.hist(event_time_offset=300).sum(\"pulse\").plot()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "14",
+   "metadata": {},
+   "source": [
+    "The total number of neutrons in our sample data that make it through the to detector is:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "15",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "raw_data.sum().value"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "16",
+   "metadata": {},
+   "source": [
+    "## Computing time-of-flight\n",
+    "\n",
+    "The chopper information is next used to construct a lookup table that provides an estimate of the real time-of-flight as a function of neutron time-of-arrival.\n",
+    "\n",
+    "We use the `tof` module to propagate a pulse of neutrons through the chopper system to the detectors,\n",
+    "and predict the most likely neutron wavelength for a given time-of-arrival and distance from source.\n",
+    "\n",
+    "From this,\n",
+    "we build a lookup table on which bilinear interpolation is used to compute a wavelength (and its corresponding time-of-flight)\n",
+    "for every neutron event.\n",
+    "\n",
+    "### Setting up the workflow"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "17",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "workflow = sl.Pipeline(\n",
+    "    time_of_flight.providers(), params=time_of_flight.default_parameters()\n",
+    ")\n",
+    "workflow[time_of_flight.RawData] = raw_data\n",
+    "workflow[time_of_flight.LtotalRange] = Ltotal, Ltotal\n",
+    "workflow[time_of_flight.LookupTableRelativeErrorThreshold] = 0.1\n",
+    "\n",
+    "workflow.visualize(time_of_flight.TofData)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "18",
+   "metadata": {},
+   "source": [
+    "We can see from the workflow diagram that we are still missing the simulated neutrons that are used to build the lookup table.\n",
+    "\n",
+    "Those are obtained by running a quick `tof` simulation of the beamline:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "19",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "workflow[time_of_flight.SimulationResults] = time_of_flight.simulate_beamline(\n",
+    "    choppers=disk_choppers,\n",
+    "    neutrons=3_000_000\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "20",
+   "metadata": {},
+   "source": [
+    "### Inspecting the lookup table\n",
+    "\n",
+    "The workflow first runs a simulation using the chopper parameters above,\n",
+    "and the result is stored in `SimulationResults` (see graph above).\n",
+    "\n",
+    "From these simulated neutrons, we create figures displaying the neutron wavelengths and time-of-flight,\n",
+    "as a function of arrival time at the detector.\n",
+    "\n",
+    "This is the basis for creating our lookup table."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "21",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "sim = workflow.compute(time_of_flight.SimulationResults)\n",
+    "# Compute time-of-arrival at the detector\n",
+    "tarrival = sim.time_of_arrival + ((Ltotal - sim.distance) / sim.speed).to(unit=\"us\")\n",
+    "# Compute time-of-flight at the detector\n",
+    "tflight = (Ltotal / sim.speed).to(unit=\"us\")\n",
+    "\n",
+    "events = sc.DataArray(\n",
+    "    data=sim.weight,\n",
+    "    coords={\"wavelength\": sim.wavelength, \"toa\": tarrival, \"tof\": tflight},\n",
+    ")\n",
+    "fig1 = events.hist(wavelength=300, toa=300).plot(norm=\"log\")\n",
+    "fig2 = events.hist(tof=300, toa=300).plot(norm=\"log\")\n",
+    "fig1 + fig2"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "22",
+   "metadata": {},
+   "source": [
+    "The lookup table is then obtained by computing the weighted mean of the time-of-flight inside each time-of-arrival bin.\n",
+    "\n",
+    "This is illustrated by the orange line in the figure below:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "23",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "table = workflow.compute(time_of_flight.TimeOfFlightLookupTable)\n",
+    "\n",
+    "# Overlay mean on the figure above\n",
+    "table[\"distance\", 1].plot(ax=fig2.ax, color=\"C1\", ls=\"-\", marker=None)\n",
+    "\n",
+    "# Zoom in\n",
+    "fig2.canvas.xrange = 40000, 50000\n",
+    "fig2.canvas.yrange = 35000, 50000\n",
+    "fig2"
+   ]
+  },
+  {
+   "attachments": {},
+   "cell_type": "markdown",
+   "id": "24",
+   "metadata": {},
+   "source": [
+    "We can see that the orange lines follow the center of the colored areas.\n",
+    "\n",
+    "We can also see that in regions where there is contamination from other chopper openings (overlapping regions in time),\n",
+    "the error bars on the orange line get larger.\n",
+    "\n",
+    "### Computing a time-of-flight coordinate\n",
+    "\n",
+    "We will now use our workflow to obtain our event data with a time-of-flight coordinate:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "25",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "tofs = workflow.compute(time_of_flight.TofData)\n",
+    "tofs"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "26",
+   "metadata": {},
+   "source": [
+    "Histogramming the data for a plot should show a profile with 6 bumps that correspond to the frames:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "27",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "tofs.bins.concat().hist(tof=300).plot()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "28",
+   "metadata": {},
+   "source": [
+    "### Converting to wavelength\n",
+    "\n",
+    "We can now convert our new time-of-flight coordinate to a neutron wavelength, using `tranform_coords`:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "29",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from scippneutron.conversion.graph.beamline import beamline\n",
+    "from scippneutron.conversion.graph.tof import elastic\n",
+    "\n",
+    "# Perform coordinate transformation\n",
+    "graph = {**beamline(scatter=False), **elastic(\"tof\")}\n",
+    "wav_wfm = tofs.transform_coords(\"wavelength\", graph=graph)\n",
+    "\n",
+    "# Define wavelength bin edges\n",
+    "wavs = sc.linspace(\"wavelength\", 2, 10, 301, unit=\"angstrom\")\n",
+    "\n",
+    "wav_wfm.hist(wavelength=wavs).sum(\"pulse\").plot()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "30",
+   "metadata": {},
+   "source": [
+    "### Comparing to the ground truth\n",
+    "\n",
+    "As a consistency check, because we actually know the wavelengths of the neutrons we created,\n",
+    "we can compare the true neutron wavelengths to those we computed above."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "31",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "ground_truth = ess_beamline.model_result[\"detector\"].data.flatten(to=\"event\")\n",
+    "ground_truth = ground_truth[~ground_truth.masks[\"blocked_by_others\"]]\n",
+    "\n",
+    "pp.plot(\n",
+    "    {\n",
+    "        \"wfm\": wav_wfm.hist(wavelength=wavs).sum(\"pulse\"),\n",
+    "        \"ground_truth\": ground_truth.hist(wavelength=wavs),\n",
+    "    }\n",
+    ")"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/pyproject.toml b/pyproject.toml
index 7ce8d6ff..cdc74857 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -32,7 +32,7 @@ requires-python = ">=3.10"
 # Make sure to list one dependency per line.
 dependencies = [
     "sciline>=24.06.2",
-    "scipp>=24.02.0",
+    "scipp>=25.01.0",
     "scippneutron>=24.11.0",
     "scippnexus>=24.11.0",
 ]
@@ -44,6 +44,8 @@ test = [
     "ipywidgets",
     "pooch",
     "pytest",
+    "scipy>=1.7.0",
+    "tof>=25.01.2",
 ]
 
 [project.scripts]
diff --git a/requirements/base.in b/requirements/base.in
index 7a89b7c5..aee29a13 100644
--- a/requirements/base.in
+++ b/requirements/base.in
@@ -3,6 +3,6 @@
 # --- END OF CUSTOM SECTION ---
 # The following was generated by 'tox -e deps', DO NOT EDIT MANUALLY!
 sciline>=24.06.2
-scipp>=24.02.0
+scipp>=25.01.0
 scippneutron>=24.11.0
 scippnexus>=24.11.0
diff --git a/requirements/base.txt b/requirements/base.txt
index 9f0e9f6b..4038854b 100644
--- a/requirements/base.txt
+++ b/requirements/base.txt
@@ -1,4 +1,4 @@
-# SHA1:41ad412be1c3ef8654797c35e9ebb78554531f85
+# SHA1:d18487328be0c30ec0a6929501d1c4a58c71bd48
 #
 # This file is autogenerated by pip-compile-multi
 # To update, run:
@@ -11,15 +11,15 @@ cyclebane==24.10.0
     # via sciline
 cycler==0.12.1
     # via matplotlib
-fonttools==4.55.0
+fonttools==4.55.4
     # via matplotlib
 h5py==3.12.1
     # via
     #   scippneutron
     #   scippnexus
-kiwisolver==1.4.7
+kiwisolver==1.4.8
     # via matplotlib
-matplotlib==3.9.2
+matplotlib==3.10.0
     # via
     #   mpltoolbox
     #   plopp
@@ -27,7 +27,7 @@ mpltoolbox==24.5.1
     # via scippneutron
 networkx==3.4.2
     # via cyclebane
-numpy==2.1.3
+numpy==2.2.2
     # via
     #   contourpy
     #   h5py
@@ -38,11 +38,11 @@ numpy==2.1.3
     #   scipy
 packaging==24.2
     # via matplotlib
-pillow==11.0.0
+pillow==11.1.0
     # via matplotlib
 plopp==24.10.0
     # via scippneutron
-pyparsing==3.2.0
+pyparsing==3.2.1
     # via matplotlib
 python-dateutil==2.9.0.post0
     # via
@@ -55,15 +55,15 @@ scipp==25.1.0
     #   -r base.in
     #   scippneutron
     #   scippnexus
-scippneutron==24.11.0
+scippneutron==25.1.0
     # via -r base.in
-scippnexus==24.11.0
+scippnexus==24.11.1
     # via
     #   -r base.in
     #   scippneutron
-scipy==1.14.1
+scipy==1.15.1
     # via
     #   scippneutron
     #   scippnexus
-six==1.16.0
+six==1.17.0
     # via python-dateutil
diff --git a/requirements/basetest.in b/requirements/basetest.in
index a7f83d23..89e3593a 100644
--- a/requirements/basetest.in
+++ b/requirements/basetest.in
@@ -10,3 +10,5 @@
 ipywidgets
 pooch
 pytest
+scipy>=1.7.0
+tof>=25.01.2
diff --git a/requirements/basetest.txt b/requirements/basetest.txt
index f189f102..c709387a 100644
--- a/requirements/basetest.txt
+++ b/requirements/basetest.txt
@@ -1,31 +1,39 @@
-# SHA1:d69d994ed83cc9e89ad349f490a0f8f41d18d8bf
+# SHA1:985a3a6a9bccb27b4f182f4ada77cde97c24fca1
 #
 # This file is autogenerated by pip-compile-multi
 # To update, run:
 #
 #    pip-compile-multi
 #
-asttokens==2.4.1
+asttokens==3.0.0
     # via stack-data
-certifi==2024.8.30
+certifi==2024.12.14
     # via requests
-charset-normalizer==3.4.0
+charset-normalizer==3.4.1
     # via requests
 comm==0.2.2
     # via ipywidgets
+contourpy==1.3.1
+    # via matplotlib
+cycler==0.12.1
+    # via matplotlib
 decorator==5.1.1
     # via ipython
 exceptiongroup==1.2.2
     # via
     #   ipython
     #   pytest
-executing==2.1.0
+executing==2.2.0
     # via stack-data
+fonttools==4.55.4
+    # via matplotlib
 idna==3.10
     # via requests
+importlib-resources==6.5.2
+    # via tof
 iniconfig==2.0.0
     # via pytest
-ipython==8.29.0
+ipython==8.31.0
     # via ipywidgets
 ipywidgets==8.1.5
     # via -r basetest.in
@@ -33,39 +41,66 @@ jedi==0.19.2
     # via ipython
 jupyterlab-widgets==3.0.13
     # via ipywidgets
+kiwisolver==1.4.8
+    # via matplotlib
+matplotlib==3.10.0
+    # via plopp
 matplotlib-inline==0.1.7
     # via ipython
+numpy==2.2.2
+    # via
+    #   contourpy
+    #   matplotlib
+    #   scipp
+    #   scipy
 packaging==24.2
     # via
+    #   matplotlib
     #   pooch
     #   pytest
 parso==0.8.4
     # via jedi
 pexpect==4.9.0
     # via ipython
+pillow==11.1.0
+    # via matplotlib
 platformdirs==4.3.6
     # via pooch
+plopp==24.10.0
+    # via tof
 pluggy==1.5.0
     # via pytest
 pooch==1.8.2
     # via -r basetest.in
-prompt-toolkit==3.0.48
+prompt-toolkit==3.0.50
     # via ipython
 ptyprocess==0.7.0
     # via pexpect
 pure-eval==0.2.3
     # via stack-data
-pygments==2.18.0
+pygments==2.19.1
     # via ipython
-pytest==8.3.3
+pyparsing==3.2.1
+    # via matplotlib
+pytest==8.3.4
     # via -r basetest.in
+python-dateutil==2.9.0.post0
+    # via matplotlib
 requests==2.32.3
     # via pooch
-six==1.16.0
-    # via asttokens
+scipp==25.1.0
+    # via tof
+scipy==1.15.1
+    # via
+    #   -r basetest.in
+    #   tof
+six==1.17.0
+    # via python-dateutil
 stack-data==0.6.3
     # via ipython
-tomli==2.1.0
+tof==25.1.2
+    # via -r basetest.in
+tomli==2.2.1
     # via pytest
 traitlets==5.14.3
     # via
@@ -75,7 +110,7 @@ traitlets==5.14.3
     #   matplotlib-inline
 typing-extensions==4.12.2
     # via ipython
-urllib3==2.2.3
+urllib3==2.3.0
     # via requests
 wcwidth==0.2.13
     # via prompt-toolkit
diff --git a/requirements/ci.txt b/requirements/ci.txt
index 14921a29..9ffaa1db 100644
--- a/requirements/ci.txt
+++ b/requirements/ci.txt
@@ -5,25 +5,25 @@
 #
 #    pip-compile-multi
 #
-cachetools==5.5.0
+cachetools==5.5.1
     # via tox
-certifi==2024.8.30
+certifi==2024.12.14
     # via requests
 chardet==5.2.0
     # via tox
-charset-normalizer==3.4.0
+charset-normalizer==3.4.1
     # via requests
 colorama==0.4.6
     # via tox
 distlib==0.3.9
     # via virtualenv
-filelock==3.16.1
+filelock==3.17.0
     # via
     #   tox
     #   virtualenv
-gitdb==4.0.11
+gitdb==4.0.12
     # via gitpython
-gitpython==3.1.43
+gitpython==3.1.44
     # via -r ci.in
 idna==3.10
     # via requests
@@ -38,21 +38,21 @@ platformdirs==4.3.6
     #   virtualenv
 pluggy==1.5.0
     # via tox
-pyproject-api==1.8.0
+pyproject-api==1.9.0
     # via tox
 requests==2.32.3
     # via -r ci.in
-smmap==5.0.1
+smmap==5.0.2
     # via gitdb
-tomli==2.1.0
+tomli==2.2.1
     # via
     #   pyproject-api
     #   tox
-tox==4.23.2
+tox==4.24.1
     # via -r ci.in
 typing-extensions==4.12.2
     # via tox
-urllib3==2.2.3
+urllib3==2.3.0
     # via requests
-virtualenv==20.27.1
+virtualenv==20.29.1
     # via tox
diff --git a/requirements/dev.txt b/requirements/dev.txt
index e0ff645a..de09d3ae 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -14,7 +14,7 @@
 -r wheels.txt
 annotated-types==0.7.0
     # via pydantic
-anyio==4.6.2.post1
+anyio==4.8.0
     # via
     #   httpx
     #   jupyter-server
@@ -28,7 +28,7 @@ async-lru==2.0.4
     # via jupyterlab
 cffi==1.17.1
     # via argon2-cffi-bindings
-click==8.1.7
+click==8.1.8
     # via
     #   pip-compile-multi
     #   pip-tools
@@ -44,13 +44,13 @@ h11==0.14.0
     # via httpcore
 httpcore==1.0.7
     # via httpx
-httpx==0.27.2
+httpx==0.28.1
     # via jupyterlab
 isoduration==20.11.0
     # via jsonschema
 jinja2-ansible-filters==1.3.2
     # via copier
-json5==0.9.28
+json5==0.10.0
     # via jupyterlab-server
 jsonpointer==3.0.0
     # via jsonschema
@@ -59,11 +59,11 @@ jsonschema[format-nongpl]==4.23.0
     #   jupyter-events
     #   jupyterlab-server
     #   nbformat
-jupyter-events==0.10.0
+jupyter-events==0.11.0
     # via jupyter-server
 jupyter-lsp==2.2.5
     # via jupyterlab
-jupyter-server==2.14.2
+jupyter-server==2.15.0
     # via
     #   jupyter-lsp
     #   jupyterlab
@@ -71,7 +71,7 @@ jupyter-server==2.14.2
     #   notebook-shim
 jupyter-server-terminals==0.5.3
     # via jupyter-server
-jupyterlab==4.3.1
+jupyterlab==4.3.4
     # via -r dev.in
 jupyterlab-server==2.27.3
     # via jupyterlab
@@ -81,23 +81,23 @@ overrides==7.7.0
     # via jupyter-server
 pathspec==0.12.1
     # via copier
-pip-compile-multi==2.6.4
+pip-compile-multi==2.7.1
     # via -r dev.in
 pip-tools==7.4.1
     # via pip-compile-multi
 plumbum==1.9.0
     # via copier
-prometheus-client==0.21.0
+prometheus-client==0.21.1
     # via jupyter-server
 pycparser==2.22
     # via cffi
-pydantic==2.9.2
+pydantic==2.10.5
     # via copier
-pydantic-core==2.23.4
+pydantic-core==2.27.2
     # via pydantic
-python-json-logger==2.0.7
+python-json-logger==3.2.1
     # via jupyter-events
-questionary==1.10.0
+questionary==2.1.0
     # via copier
 rfc3339-validator==0.1.4
     # via
@@ -110,16 +110,14 @@ rfc3986-validator==0.1.1
 send2trash==1.8.3
     # via jupyter-server
 sniffio==1.3.1
-    # via
-    #   anyio
-    #   httpx
+    # via anyio
 terminado==0.18.1
     # via
     #   jupyter-server
     #   jupyter-server-terminals
 toposort==1.10
     # via pip-compile-multi
-types-python-dateutil==2.9.0.20241003
+types-python-dateutil==2.9.0.20241206
     # via arrow
 uri-template==1.3.0
     # via jsonschema
@@ -127,7 +125,7 @@ webcolors==24.11.1
     # via jsonschema
 websocket-client==1.8.0
     # via jupyter-server
-wheel==0.45.0
+wheel==0.45.1
     # via pip-tools
 
 # The following packages are considered to be unsafe in a requirements file:
diff --git a/requirements/docs.in b/requirements/docs.in
index 89ee3b2a..50aae713 100644
--- a/requirements/docs.in
+++ b/requirements/docs.in
@@ -3,6 +3,7 @@ ipykernel
 ipython!=8.7.0  # Breaks syntax highlighting in Jupyter code cells.
 myst-parser
 nbsphinx
+plopp
 pydata-sphinx-theme>=0.14
 sphinx
 sphinx-autodoc-typehints
@@ -10,3 +11,4 @@ sphinx-copybutton
 sphinx-design
 graphviz
 ipywidgets
+tof
diff --git a/requirements/docs.txt b/requirements/docs.txt
index 15b223f0..bf65e28f 100644
--- a/requirements/docs.txt
+++ b/requirements/docs.txt
@@ -1,4 +1,4 @@
-# SHA1:1524e6a45a0af2817e810c7aaf8cccbb8f216bb4
+# SHA1:de5c64b312f00d07eca11697a7756051987dfe90
 #
 # This file is autogenerated by pip-compile-multi
 # To update, run:
@@ -10,9 +10,9 @@ accessible-pygments==0.0.5
     # via pydata-sphinx-theme
 alabaster==1.0.0
     # via sphinx
-asttokens==2.4.1
+asttokens==3.0.0
     # via stack-data
-attrs==24.2.0
+attrs==24.3.0
     # via
     #   jsonschema
     #   referencing
@@ -24,17 +24,17 @@ beautifulsoup4==4.12.3
     # via
     #   nbconvert
     #   pydata-sphinx-theme
-bleach==6.2.0
+bleach[css]==6.2.0
     # via nbconvert
-certifi==2024.8.30
+certifi==2024.12.14
     # via requests
-charset-normalizer==3.4.0
+charset-normalizer==3.4.1
     # via requests
 comm==0.2.2
     # via
     #   ipykernel
     #   ipywidgets
-debugpy==1.8.8
+debugpy==1.8.12
     # via ipykernel
 decorator==5.1.1
     # via ipython
@@ -48,9 +48,9 @@ docutils==0.21.2
     #   sphinx
 exceptiongroup==1.2.2
     # via ipython
-executing==2.1.0
+executing==2.2.0
     # via stack-data
-fastjsonschema==2.20.0
+fastjsonschema==2.21.1
     # via nbformat
 graphviz==0.20.3
     # via -r docs.in
@@ -58,9 +58,11 @@ idna==3.10
     # via requests
 imagesize==1.4.1
     # via sphinx
+importlib-resources==6.5.2
+    # via tof
 ipykernel==6.29.5
     # via -r docs.in
-ipython==8.29.0
+ipython==8.31.0
     # via
     #   -r docs.in
     #   ipykernel
@@ -69,7 +71,7 @@ ipywidgets==8.1.5
     # via -r docs.in
 jedi==0.19.2
     # via ipython
-jinja2==3.1.4
+jinja2==3.1.5
     # via
     #   myst-parser
     #   nbconvert
@@ -110,20 +112,20 @@ mdit-py-plugins==0.4.2
     # via myst-parser
 mdurl==0.1.2
     # via markdown-it-py
-mistune==3.0.2
+mistune==3.1.0
     # via nbconvert
 myst-parser==4.0.0
     # via -r docs.in
-nbclient==0.10.0
+nbclient==0.10.2
     # via nbconvert
-nbconvert==7.16.4
+nbconvert==7.16.5
     # via nbsphinx
 nbformat==5.10.4
     # via
     #   nbclient
     #   nbconvert
     #   nbsphinx
-nbsphinx==0.9.5
+nbsphinx==0.9.6
     # via -r docs.in
 nest-asyncio==1.6.0
     # via ipykernel
@@ -135,17 +137,17 @@ pexpect==4.9.0
     # via ipython
 platformdirs==4.3.6
     # via jupyter-core
-prompt-toolkit==3.0.48
+prompt-toolkit==3.0.50
     # via ipython
-psutil==6.1.0
+psutil==6.1.1
     # via ipykernel
 ptyprocess==0.7.0
     # via pexpect
 pure-eval==0.2.3
     # via stack-data
-pydata-sphinx-theme==0.16.0
+pydata-sphinx-theme==0.16.1
     # via -r docs.in
-pygments==2.18.0
+pygments==2.19.1
     # via
     #   accessible-pygments
     #   ipython
@@ -158,13 +160,13 @@ pyzmq==26.2.0
     # via
     #   ipykernel
     #   jupyter-client
-referencing==0.35.1
+referencing==0.36.1
     # via
     #   jsonschema
     #   jsonschema-specifications
 requests==2.32.3
     # via sphinx
-rpds-py==0.21.0
+rpds-py==0.22.3
     # via
     #   jsonschema
     #   referencing
@@ -181,7 +183,7 @@ sphinx==8.1.3
     #   sphinx-autodoc-typehints
     #   sphinx-copybutton
     #   sphinx-design
-sphinx-autodoc-typehints==2.5.0
+sphinx-autodoc-typehints==3.0.1
     # via -r docs.in
 sphinx-copybutton==0.5.2
     # via -r docs.in
@@ -202,10 +204,12 @@ sphinxcontrib-serializinghtml==2.0.0
 stack-data==0.6.3
     # via ipython
 tinycss2==1.4.0
-    # via nbconvert
-tomli==2.1.0
+    # via bleach
+tof==25.1.2
+    # via -r docs.in
+tomli==2.2.1
     # via sphinx
-tornado==6.4.1
+tornado==6.4.2
     # via
     #   ipykernel
     #   jupyter-client
@@ -225,8 +229,10 @@ traitlets==5.14.3
 typing-extensions==4.12.2
     # via
     #   ipython
+    #   mistune
     #   pydata-sphinx-theme
-urllib3==2.2.3
+    #   referencing
+urllib3==2.3.0
     # via requests
 wcwidth==0.2.13
     # via prompt-toolkit
diff --git a/requirements/mypy.txt b/requirements/mypy.txt
index 0b6fa4cc..d7a49e8a 100644
--- a/requirements/mypy.txt
+++ b/requirements/mypy.txt
@@ -6,7 +6,7 @@
 #    pip-compile-multi
 #
 -r test.txt
-mypy==1.13.0
+mypy==1.14.1
     # via -r mypy.in
 mypy-extensions==1.0.0
     # via mypy
diff --git a/requirements/nightly.in b/requirements/nightly.in
index d87643f6..487eb23e 100644
--- a/requirements/nightly.in
+++ b/requirements/nightly.in
@@ -4,8 +4,10 @@
 ipywidgets
 pooch
 pytest
+scipy>=1.7.0
 scippnexus @ git+https://github.com/scipp/scippnexus@main
 scipp @ https://github.com/scipp/scipp/releases/download/nightly/scipp-nightly-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
 sciline @ git+https://github.com/scipp/sciline@main
 cyclebane @ git+https://github.com/scipp/cyclebane@main
 scippneutron @ git+https://github.com/scipp/scippneutron@main
+tof @ git+https://github.com/scipp/tof@main
diff --git a/requirements/nightly.txt b/requirements/nightly.txt
index 56fe6f52..e4af6e81 100644
--- a/requirements/nightly.txt
+++ b/requirements/nightly.txt
@@ -1,15 +1,15 @@
-# SHA1:99a76dd6422a3c33a952108f215692b2ca32c80e
+# SHA1:c690c87be2259750742e8cf48f070ab7101b1da9
 #
 # This file is autogenerated by pip-compile-multi
 # To update, run:
 #
 #    pip-compile-multi
 #
-asttokens==2.4.1
+asttokens==3.0.0
     # via stack-data
-certifi==2024.8.30
+certifi==2024.12.14
     # via requests
-charset-normalizer==3.4.0
+charset-normalizer==3.4.1
     # via requests
 comm==0.2.2
     # via ipywidgets
@@ -27,9 +27,9 @@ exceptiongroup==1.2.2
     # via
     #   ipython
     #   pytest
-executing==2.1.0
+executing==2.2.0
     # via stack-data
-fonttools==4.55.0
+fonttools==4.55.4
     # via matplotlib
 h5py==3.12.1
     # via
@@ -37,9 +37,11 @@ h5py==3.12.1
     #   scippnexus
 idna==3.10
     # via requests
+importlib-resources==6.5.2
+    # via tof
 iniconfig==2.0.0
     # via pytest
-ipython==8.29.0
+ipython==8.31.0
     # via ipywidgets
 ipywidgets==8.1.5
     # via -r nightly.in
@@ -47,9 +49,9 @@ jedi==0.19.2
     # via ipython
 jupyterlab-widgets==3.0.13
     # via ipywidgets
-kiwisolver==1.4.7
+kiwisolver==1.4.8
     # via matplotlib
-matplotlib==3.9.2
+matplotlib==3.10.0
     # via
     #   mpltoolbox
     #   plopp
@@ -59,7 +61,7 @@ mpltoolbox==24.5.1
     # via scippneutron
 networkx==3.4.2
     # via cyclebane
-numpy==2.1.3
+numpy==2.2.2
     # via
     #   contourpy
     #   h5py
@@ -77,27 +79,29 @@ parso==0.8.4
     # via jedi
 pexpect==4.9.0
     # via ipython
-pillow==11.0.0
+pillow==11.1.0
     # via matplotlib
 platformdirs==4.3.6
     # via pooch
 plopp==24.10.0
-    # via scippneutron
+    # via
+    #   scippneutron
+    #   tof
 pluggy==1.5.0
     # via pytest
 pooch==1.8.2
     # via -r nightly.in
-prompt-toolkit==3.0.48
+prompt-toolkit==3.0.50
     # via ipython
 ptyprocess==0.7.0
     # via pexpect
 pure-eval==0.2.3
     # via stack-data
-pygments==2.18.0
+pygments==2.19.1
     # via ipython
-pyparsing==3.2.0
+pyparsing==3.2.1
     # via matplotlib
-pytest==8.3.3
+pytest==8.3.4
     # via -r nightly.in
 python-dateutil==2.9.0.post0
     # via
@@ -112,23 +116,26 @@ scipp @ https://github.com/scipp/scipp/releases/download/nightly/scipp-nightly-c
     #   -r nightly.in
     #   scippneutron
     #   scippnexus
+    #   tof
 scippneutron @ git+https://github.com/scipp/scippneutron@main
     # via -r nightly.in
 scippnexus @ git+https://github.com/scipp/scippnexus@main
     # via
     #   -r nightly.in
     #   scippneutron
-scipy==1.14.1
+scipy==1.15.1
     # via
+    #   -r nightly.in
     #   scippneutron
     #   scippnexus
-six==1.16.0
-    # via
-    #   asttokens
-    #   python-dateutil
+    #   tof
+six==1.17.0
+    # via python-dateutil
 stack-data==0.6.3
     # via ipython
-tomli==2.1.0
+tof @ git+https://github.com/scipp/tof@main
+    # via -r nightly.in
+tomli==2.2.1
     # via pytest
 traitlets==5.14.3
     # via
@@ -138,7 +145,7 @@ traitlets==5.14.3
     #   matplotlib-inline
 typing-extensions==4.12.2
     # via ipython
-urllib3==2.2.3
+urllib3==2.3.0
     # via requests
 wcwidth==0.2.13
     # via prompt-toolkit
diff --git a/requirements/static.txt b/requirements/static.txt
index e107d915..b7eb0124 100644
--- a/requirements/static.txt
+++ b/requirements/static.txt
@@ -9,17 +9,17 @@ cfgv==3.4.0
     # via pre-commit
 distlib==0.3.9
     # via virtualenv
-filelock==3.16.1
+filelock==3.17.0
     # via virtualenv
-identify==2.6.2
+identify==2.6.6
     # via pre-commit
 nodeenv==1.9.1
     # via pre-commit
 platformdirs==4.3.6
     # via virtualenv
-pre-commit==4.0.1
+pre-commit==4.1.0
     # via -r static.in
 pyyaml==6.0.2
     # via pre-commit
-virtualenv==20.27.1
+virtualenv==20.29.1
     # via pre-commit
diff --git a/requirements/wheels.txt b/requirements/wheels.txt
index 6a1c0600..bfae20bf 100644
--- a/requirements/wheels.txt
+++ b/requirements/wheels.txt
@@ -11,5 +11,5 @@ packaging==24.2
     # via build
 pyproject-hooks==1.2.0
     # via build
-tomli==2.1.0
+tomli==2.2.1
     # via build
diff --git a/src/ess/reduce/__init__.py b/src/ess/reduce/__init__.py
index d2b5a52f..c529dbd8 100644
--- a/src/ess/reduce/__init__.py
+++ b/src/ess/reduce/__init__.py
@@ -4,7 +4,7 @@
 
 import importlib.metadata
 
-from . import nexus, uncertainty
+from . import nexus, uncertainty, time_of_flight
 
 try:
     __version__ = importlib.metadata.version("essreduce")
@@ -13,4 +13,4 @@
 
 del importlib
 
-__all__ = ['nexus', 'uncertainty']
+__all__ = ["nexus", "uncertainty", "time_of_flight"]
diff --git a/src/ess/reduce/time_of_flight/__init__.py b/src/ess/reduce/time_of_flight/__init__.py
new file mode 100644
index 00000000..38080551
--- /dev/null
+++ b/src/ess/reduce/time_of_flight/__init__.py
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2023 Scipp contributors (https://github.com/scipp)
+
+"""
+Utilities for computing real neutron time-of-flight from chopper settings and
+neutron time-of-arrival at the detectors.
+"""
+
+from .toa_to_tof import (
+    default_parameters,
+    resample_tof_data,
+    providers,
+    TofWorkflow,
+)
+from .simulation import simulate_beamline
+from .types import (
+    DistanceResolution,
+    FrameFoldedTimeOfArrival,
+    FramePeriod,
+    LookupTableRelativeErrorThreshold,
+    LtotalRange,
+    MaskedTimeOfFlightLookupTable,
+    PivotTimeAtDetector,
+    PulsePeriod,
+    PulseStride,
+    PulseStrideOffset,
+    RawData,
+    ResampledTofData,
+    SimulationResults,
+    TimeOfArrivalMinusPivotTimeModuloPeriod,
+    TimeOfFlightLookupTable,
+    TofData,
+    UnwrappedTimeOfArrival,
+    UnwrappedTimeOfArrivalMinusPivotTime,
+)
+
+
+__all__ = [
+    "DistanceResolution",
+    "FrameFoldedTimeOfArrival",
+    "FramePeriod",
+    "LookupTableRelativeErrorThreshold",
+    "LtotalRange",
+    "MaskedTimeOfFlightLookupTable",
+    "PivotTimeAtDetector",
+    "PulsePeriod",
+    "PulseStride",
+    "PulseStrideOffset",
+    "RawData",
+    "ResampledTofData",
+    "SimulationResults",
+    "TimeOfArrivalMinusPivotTimeModuloPeriod",
+    "TimeOfFlightLookupTable",
+    "TofData",
+    "TofWorkflow",
+    "UnwrappedTimeOfArrival",
+    "UnwrappedTimeOfArrivalMinusPivotTime",
+    "default_parameters",
+    "providers",
+    "resample_tof_data",
+    "simulate_beamline",
+]
diff --git a/src/ess/reduce/time_of_flight/fakes.py b/src/ess/reduce/time_of_flight/fakes.py
new file mode 100644
index 00000000..5679b7d5
--- /dev/null
+++ b/src/ess/reduce/time_of_flight/fakes.py
@@ -0,0 +1,240 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2025 Scipp contributors (https://github.com/scipp)
+"""
+A fake time-of-flight neutron beamline for documentation and testing.
+
+This provides detector event data in a structure as typically provided in a NeXus file,
+with event_time_offset and event_time_zero information.
+"""
+
+from collections.abc import Callable
+
+import numpy as np
+import scipp as sc
+from scippneutron.chopper import DiskChopper
+
+
+class FakeBeamline:
+    def __init__(
+        self,
+        choppers: dict[str, DiskChopper],
+        monitors: dict[str, sc.Variable],
+        run_length: sc.Variable,
+        events_per_pulse: int = 200000,
+        source: Callable | None = None,
+    ):
+        import math
+
+        import tof as tof_pkg
+        from tof.facilities.ess_pulse import pulse
+
+        self.frequency = pulse.frequency
+        self.npulses = math.ceil((run_length * self.frequency).to(unit="").value)
+        self.events_per_pulse = events_per_pulse
+
+        # Create a source
+        if source is None:
+            self.source = tof_pkg.Source(
+                facility="ess", neutrons=self.events_per_pulse, pulses=self.npulses
+            )
+        else:
+            self.source = source(pulses=self.npulses)
+
+        # Convert the choppers to tof.Chopper
+        self.choppers = [
+            tof_pkg.Chopper(
+                frequency=abs(ch.frequency),
+                direction=tof_pkg.AntiClockwise
+                if (ch.frequency.value > 0.0)
+                else tof_pkg.Clockwise,
+                open=ch.slit_begin,
+                close=ch.slit_end,
+                phase=abs(ch.phase),
+                distance=ch.axle_position.fields.z,
+                name=name,
+            )
+            for name, ch in choppers.items()
+        ]
+
+        # Add detectors
+        self.monitors = [
+            tof_pkg.Detector(distance=distance, name=key)
+            for key, distance in monitors.items()
+        ]
+
+        #  Propagate the neutrons
+        self.model = tof_pkg.Model(
+            source=self.source, choppers=self.choppers, detectors=self.monitors
+        )
+        self.model_result = self.model.run()
+
+    def get_monitor(self, name: str) -> sc.DataGroup:
+        # Create some fake pulse time zero
+        start = sc.datetime("2024-01-01T12:00:00.000000")
+        period = sc.reciprocal(self.frequency)
+
+        detector = self.model_result.detectors[name]
+        raw_data = detector.data.flatten(to="event")
+        # Select only the neutrons that make it to the detector
+        raw_data = raw_data[~raw_data.masks["blocked_by_others"]].copy()
+        raw_data.coords["Ltotal"] = detector.distance
+
+        # Format the data in a way that resembles data loaded from NeXus
+        event_data = raw_data.copy(deep=False)
+        dt = period.to(unit="us")
+        event_time_zero = (dt * (event_data.coords["toa"] // dt)).to(dtype=int) + start
+        raw_data.coords["event_time_zero"] = event_time_zero
+        event_data.coords["event_time_zero"] = event_time_zero
+        event_data.coords["event_time_offset"] = (
+            event_data.coords.pop("toa").to(unit="s") % period
+        )
+        del event_data.coords["tof"]
+        del event_data.coords["speed"]
+        del event_data.coords["time"]
+        del event_data.coords["wavelength"]
+
+        return (
+            event_data.group("event_time_zero").rename_dims(event_time_zero="pulse"),
+            raw_data.group("event_time_zero").rename_dims(event_time_zero="pulse"),
+        )
+
+
+wfm1_chopper = DiskChopper(
+    frequency=sc.scalar(-70.0, unit="Hz"),
+    beam_position=sc.scalar(0.0, unit="deg"),
+    phase=sc.scalar(-47.10, unit="deg"),
+    axle_position=sc.vector(value=[0, 0, 6.6], unit="m"),
+    slit_begin=sc.array(
+        dims=["cutout"],
+        values=np.array([83.71, 140.49, 193.26, 242.32, 287.91, 330.3]) + 15.0,
+        unit="deg",
+    ),
+    slit_end=sc.array(
+        dims=["cutout"],
+        values=np.array([94.7, 155.79, 212.56, 265.33, 314.37, 360.0]) + 15.0,
+        unit="deg",
+    ),
+    slit_height=sc.scalar(10.0, unit="cm"),
+    radius=sc.scalar(30.0, unit="cm"),
+)
+
+wfm2_chopper = DiskChopper(
+    frequency=sc.scalar(-70.0, unit="Hz"),
+    beam_position=sc.scalar(0.0, unit="deg"),
+    phase=sc.scalar(-76.76, unit="deg"),
+    axle_position=sc.vector(value=[0, 0, 7.1], unit="m"),
+    slit_begin=sc.array(
+        dims=["cutout"],
+        values=np.array([65.04, 126.1, 182.88, 235.67, 284.73, 330.32]) + 15.0,
+        unit="deg",
+    ),
+    slit_end=sc.array(
+        dims=["cutout"],
+        values=np.array([76.03, 141.4, 202.18, 254.97, 307.74, 360.0]) + 15.0,
+        unit="deg",
+    ),
+    slit_height=sc.scalar(10.0, unit="cm"),
+    radius=sc.scalar(30.0, unit="cm"),
+)
+
+foc1_chopper = DiskChopper(
+    frequency=sc.scalar(-56.0, unit="Hz"),
+    beam_position=sc.scalar(0.0, unit="deg"),
+    phase=sc.scalar(-62.40, unit="deg"),
+    axle_position=sc.vector(value=[0, 0, 8.8], unit="m"),
+    slit_begin=sc.array(
+        dims=["cutout"],
+        values=np.array([74.6, 139.6, 194.3, 245.3, 294.8, 347.2]),
+        unit="deg",
+    ),
+    slit_end=sc.array(
+        dims=["cutout"],
+        values=np.array([95.2, 162.8, 216.1, 263.1, 310.5, 371.6]),
+        unit="deg",
+    ),
+    slit_height=sc.scalar(10.0, unit="cm"),
+    radius=sc.scalar(30.0, unit="cm"),
+)
+
+foc2_chopper = DiskChopper(
+    frequency=sc.scalar(-28.0, unit="Hz"),
+    beam_position=sc.scalar(0.0, unit="deg"),
+    phase=sc.scalar(-12.27, unit="deg"),
+    axle_position=sc.vector(value=[0, 0, 15.9], unit="m"),
+    slit_begin=sc.array(
+        dims=["cutout"],
+        values=np.array([98.0, 154.0, 206.8, 255.0, 299.0, 344.65]),
+        unit="deg",
+    ),
+    slit_end=sc.array(
+        dims=["cutout"],
+        values=np.array([134.6, 190.06, 237.01, 280.88, 323.56, 373.76]),
+        unit="deg",
+    ),
+    slit_height=sc.scalar(10.0, unit="cm"),
+    radius=sc.scalar(30.0, unit="cm"),
+)
+
+pol_chopper = DiskChopper(
+    frequency=sc.scalar(-14.0, unit="Hz"),
+    beam_position=sc.scalar(0.0, unit="deg"),
+    phase=sc.scalar(0.0, unit="deg"),
+    axle_position=sc.vector(value=[0, 0, 17.0], unit="m"),
+    slit_begin=sc.array(
+        dims=["cutout"],
+        values=np.array([40.0]),
+        unit="deg",
+    ),
+    slit_end=sc.array(
+        dims=["cutout"],
+        values=np.array([240.0]),
+        unit="deg",
+    ),
+    slit_height=sc.scalar(10.0, unit="cm"),
+    radius=sc.scalar(30.0, unit="cm"),
+)
+
+pulse_skipping = DiskChopper(
+    frequency=sc.scalar(-7.0, unit="Hz"),
+    beam_position=sc.scalar(0.0, unit="deg"),
+    phase=sc.scalar(0.0, unit="deg"),
+    axle_position=sc.vector(value=[0, 0, 30.0], unit="m"),
+    slit_begin=sc.array(
+        dims=["cutout"],
+        values=np.array([40.0]),
+        unit="deg",
+    ),
+    slit_end=sc.array(
+        dims=["cutout"],
+        values=np.array([140.0]),
+        unit="deg",
+    ),
+    slit_height=sc.scalar(10.0, unit="cm"),
+    radius=sc.scalar(30.0, unit="cm"),
+)
+
+
+def wfm_choppers():
+    return {
+        "wfm1": wfm1_chopper,
+        "wfm2": wfm2_chopper,
+        "foc1": foc1_chopper,
+        "foc2": foc2_chopper,
+        "pol": pol_chopper,
+    }
+
+
+def psc_choppers():
+    return {
+        name: DiskChopper(
+            frequency=ch.frequency,
+            beam_position=ch.beam_position,
+            phase=ch.phase,
+            axle_position=ch.axle_position,
+            slit_begin=ch.slit_begin[0:1],
+            slit_end=ch.slit_end[0:1],
+            slit_height=ch.slit_height[0:1],
+            radius=ch.radius,
+        )
+        for name, ch in wfm_choppers().items()
+    }
diff --git a/src/ess/reduce/time_of_flight/simulation.py b/src/ess/reduce/time_of_flight/simulation.py
new file mode 100644
index 00000000..7bdf79aa
--- /dev/null
+++ b/src/ess/reduce/time_of_flight/simulation.py
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2025 Scipp contributors (https://github.com/scipp)
+from collections.abc import Mapping
+
+import scipp as sc
+from scippneutron.chopper import DiskChopper
+
+from .types import SimulationResults
+
+
+def simulate_beamline(
+    choppers: Mapping[str, DiskChopper],
+    neutrons: int = 1_000_000,
+    seed: int | None = None,
+    facility: str = 'ess',
+) -> SimulationResults:
+    """
+    Simulate a pulse of neutrons propagating through a chopper cascade using the
+    ``tof`` package (https://tof.readthedocs.io).
+
+    Parameters
+    ----------
+    choppers:
+        A dict of DiskChopper objects representing the choppers in the beamline. See
+        https://scipp.github.io/scippneutron/user-guide/chopper/processing-nexus-choppers.html#Build-DiskChopper
+        for more information.
+    neutrons:
+        Number of neutrons to simulate.
+    seed:
+        Seed for the random number generator used in the simulation.
+    facility:
+        Facility where the experiment is performed.
+    """
+    import tof
+
+    tof_choppers = [
+        tof.Chopper(
+            frequency=abs(ch.frequency),
+            direction=tof.AntiClockwise
+            if (ch.frequency.value > 0.0)
+            else tof.Clockwise,
+            open=ch.slit_begin,
+            close=ch.slit_end,
+            phase=abs(ch.phase),
+            distance=ch.axle_position.fields.z,
+            name=name,
+        )
+        for name, ch in choppers.items()
+    ]
+    source = tof.Source(facility=facility, neutrons=neutrons, seed=seed)
+    if not tof_choppers:
+        events = source.data.squeeze()
+        return SimulationResults(
+            time_of_arrival=events.coords["time"],
+            speed=events.coords["speed"],
+            wavelength=events.coords["wavelength"],
+            weight=events.data,
+            distance=0.0 * sc.units.m,
+        )
+    model = tof.Model(source=source, choppers=tof_choppers)
+    results = model.run()
+    # Find name of the furthest chopper in tof_choppers
+    furthest_chopper = max(tof_choppers, key=lambda c: c.distance)
+    events = results[furthest_chopper.name].data.squeeze()
+    events = events[
+        ~(events.masks["blocked_by_others"] | events.masks["blocked_by_me"])
+    ]
+    return SimulationResults(
+        time_of_arrival=events.coords["toa"],
+        speed=events.coords["speed"],
+        wavelength=events.coords["wavelength"],
+        weight=events.data,
+        distance=furthest_chopper.distance,
+    )
diff --git a/src/ess/reduce/time_of_flight/to_events.py b/src/ess/reduce/time_of_flight/to_events.py
new file mode 100644
index 00000000..f8fa3350
--- /dev/null
+++ b/src/ess/reduce/time_of_flight/to_events.py
@@ -0,0 +1,104 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2025 Scipp contributors (https://github.com/scipp)
+
+from functools import reduce
+
+import numpy as np
+import scipp as sc
+
+
+def to_events(
+    da: sc.DataArray, event_dim: str, events_per_bin: int = 500
+) -> sc.DataArray:
+    """
+    Convert a histogrammed data array to an event list.
+    The generated events have a uniform distribution within each bin.
+    Each dimension with a bin-edge coordinate is converted to an event coordinate.
+    The contract is that if we re-histogram the event list with the same bin edges,
+    we should get the original counts back.
+    Masks on non-bin-edge dimensions are preserved.
+    If there are masks on bin-edge dimensions, the masked values are zeroed out in the
+    original data before the conversion to events.
+
+    Parameters
+    ----------
+    da:
+        DataArray to convert to events.
+    event_dim:
+        Name of the new event dimension.
+    events_per_bin:
+        Number of events to generate per bin.
+    """
+    if da.bins is not None:
+        raise ValueError("Cannot convert a binned DataArray to events.")
+    rng = np.random.default_rng()
+    event_coords = {}
+    edge_dims = []
+    midp_dims = []
+    # Separate bin-edge and midpoints coords
+    for dim in da.dims:
+        if da.coords.is_edges(dim):
+            edge_dims.append(dim)
+        else:
+            midp_dims.append(dim)
+
+    edge_sizes = {dim: da.sizes[dim] for dim in edge_dims}
+    for dim in edge_dims:
+        coord = da.coords[dim]
+        low = sc.broadcast(coord[dim, :-1], sizes=edge_sizes).values
+        high = sc.broadcast(coord[dim, 1:], sizes=edge_sizes).values
+
+        # The numpy.random.uniform function below does not support NaNs, so we need to
+        # replace them with zeros, and then replace them back after the random numbers
+        # have been generated.
+        nans = np.isnan(low) | np.isnan(high)
+        low = np.where(nans, 0.0, low)
+        high = np.where(nans, 0.0, high)
+
+        # In each bin, we generate a number of events with a uniform distribution.
+        events = rng.uniform(
+            low, high, size=(events_per_bin, *list(edge_sizes.values()))
+        )
+        events[..., nans] = np.nan
+        event_coords[dim] = sc.array(
+            dims=[event_dim, *edge_dims], values=events, unit=coord.unit
+        )
+
+    # Find and apply masks that are on a bin-edge dimension
+    event_masks = {}
+    other_masks = {}
+    edge_dims_set = set(edge_dims)
+    for key, mask in da.masks.items():
+        if set(mask.dims) & edge_dims_set:
+            event_masks[key] = mask
+        else:
+            other_masks[key] = mask
+
+    data = da.data
+    if event_masks:
+        inv_mask = (~reduce(lambda a, b: a | b, event_masks.values())).to(dtype=int)
+        inv_mask.unit = ''
+        data = data * inv_mask
+
+    # Create the data counts, which are the original counts divided by the number of
+    # events per bin
+    sizes = {event_dim: events_per_bin} | da.sizes
+    val = sc.broadcast(sc.values(data) / float(events_per_bin), sizes=sizes)
+    kwargs = {'dims': sizes.keys(), 'values': val.values, 'unit': data.unit}
+    if data.variances is not None:
+        # Note here that all the events are correlated.
+        # If we later histogram the events with different edges than the original
+        # histogram, then neighboring bins will be correlated, and the error obtained
+        # will be too small. It is however not clear what can be done to improve this.
+        kwargs['variances'] = sc.broadcast(
+            sc.variances(data) / float(events_per_bin), sizes=sizes
+        ).values
+    new_data = sc.array(**kwargs)
+
+    new = sc.DataArray(data=new_data, coords=event_coords)
+    new = new.transpose((*midp_dims, *edge_dims, event_dim)).flatten(
+        dims=[*edge_dims, event_dim], to=event_dim
+    )
+    return new.assign_coords(
+        {dim: da.coords[dim].copy() for dim in midp_dims}
+    ).assign_masks({key: mask.copy() for key, mask in other_masks.items()})
diff --git a/src/ess/reduce/time_of_flight/toa_to_tof.py b/src/ess/reduce/time_of_flight/toa_to_tof.py
new file mode 100644
index 00000000..ccbe3d3c
--- /dev/null
+++ b/src/ess/reduce/time_of_flight/toa_to_tof.py
@@ -0,0 +1,545 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2025 Scipp contributors (https://github.com/scipp)
+"""
+Time-of-flight workflow for unwrapping the time of arrival of the neutron at the
+detector.
+This workflow is used to convert raw detector data with event_time_zero and
+event_time_offset coordinates to data with a time-of-flight coordinate.
+"""
+
+from __future__ import annotations
+
+from collections.abc import Callable
+from typing import Any
+
+import numpy as np
+import scipp as sc
+from scipp._scipp.core import _bins_no_validate
+from scippneutron._utils import elem_unit
+
+from .to_events import to_events
+from .types import (
+    DistanceResolution,
+    FastestNeutron,
+    FrameFoldedTimeOfArrival,
+    FramePeriod,
+    LookupTableRelativeErrorThreshold,
+    Ltotal,
+    LtotalRange,
+    MaskedTimeOfFlightLookupTable,
+    PivotTimeAtDetector,
+    PulsePeriod,
+    PulseStride,
+    PulseStrideOffset,
+    RawData,
+    ResampledTofData,
+    SimulationResults,
+    TimeOfArrivalMinusPivotTimeModuloPeriod,
+    TimeOfArrivalResolution,
+    TimeOfFlightLookupTable,
+    TofData,
+    UnwrappedTimeOfArrival,
+    UnwrappedTimeOfArrivalMinusPivotTime,
+)
+
+
+def frame_period(pulse_period: PulsePeriod, pulse_stride: PulseStride) -> FramePeriod:
+    """
+    Return the period of a frame, which is defined by the pulse period times the pulse
+    stride.
+
+    Parameters
+    ----------
+    pulse_period:
+        Period of the source pulses, i.e., time between consecutive pulse starts.
+    pulse_stride:
+        Stride of used pulses. Usually 1, but may be a small integer when
+        pulse-skipping.
+    """
+    return FramePeriod(pulse_period * pulse_stride)
+
+
+def extract_ltotal(da: RawData) -> Ltotal:
+    """
+    Extract the total length of the flight path from the source to the detector from the
+    detector data.
+
+    Parameters
+    ----------
+    da:
+        Raw detector data loaded from a NeXus file, e.g., NXdetector containing
+        NXevent_data.
+    """
+    return Ltotal(da.coords["Ltotal"])
+
+
+def compute_tof_lookup_table(
+    simulation: SimulationResults,
+    ltotal_range: LtotalRange,
+    distance_resolution: DistanceResolution,
+    toa_resolution: TimeOfArrivalResolution,
+) -> TimeOfFlightLookupTable:
+    """
+    Compute a lookup table for time-of-flight as a function of distance and
+    time-of-arrival.
+    Parameters
+    ----------
+    simulation:
+        Results of a time-of-flight simulation used to create a lookup table.
+        The results should be a flat table with columns for time-of-arrival, speed,
+        wavelength, and weight.
+    ltotal_range:
+        Range of total flight path lengths from the source to the detector.
+    distance_resolution:
+        Resolution of the distance axis in the lookup table.
+    toa_resolution:
+        Resolution of the time-of-arrival axis in the lookup table.
+    """
+    distance_unit = "m"
+    res = distance_resolution.to(unit=distance_unit)
+    simulation_distance = simulation.distance.to(unit=distance_unit)
+
+    min_dist, max_dist = (
+        x.to(unit=distance_unit) - simulation_distance for x in ltotal_range
+    )
+    # We need to bin the data below, to compute the weighted mean of the wavelength.
+    # This results in data with bin edges.
+    # However, the 2d interpolator expects bin centers.
+    # We want to give the 2d interpolator a table that covers the requested range,
+    # hence we need to extend the range by at least half a resolution in each direction.
+    # Then, we make the choice that the resolution in distance is the quantity that
+    # should be preserved. Because the difference between min and max distance is
+    # not necessarily an integer multiple of the resolution, we need to add a pad to
+    # ensure that the last bin is not cut off. We want the upper edge to be higher than
+    # the maximum distance, hence we pad with an additional 1.5 x resolution.
+    pad = 2.0 * res
+    dist_edges = sc.array(
+        dims=["distance"],
+        values=np.arange((min_dist - pad).value, (max_dist + pad).value, res.value),
+        unit=distance_unit,
+    )
+    distances = sc.midpoints(dist_edges)
+
+    time_unit = simulation.time_of_arrival.unit
+    toas = simulation.time_of_arrival + (distances / simulation.speed).to(
+        unit=time_unit, copy=False
+    )
+
+    # Compute time-of-flight for all neutrons
+    wavs = sc.broadcast(simulation.wavelength.to(unit="m"), sizes=toas.sizes).flatten(
+        to="event"
+    )
+    dist = sc.broadcast(distances + simulation_distance, sizes=toas.sizes).flatten(
+        to="event"
+    )
+    tofs = dist * sc.constants.m_n
+    tofs *= wavs
+    tofs /= sc.constants.h
+
+    data = sc.DataArray(
+        data=sc.broadcast(simulation.weight, sizes=toas.sizes).flatten(to="event"),
+        coords={
+            "toa": toas.flatten(to="event"),
+            "tof": tofs.to(unit=time_unit, copy=False),
+            "distance": dist,
+        },
+    )
+
+    binned = data.bin(distance=dist_edges + simulation_distance, toa=toa_resolution)
+    # Weighted mean of tof inside each bin
+    mean_tof = (
+        binned.bins.data * binned.bins.coords["tof"]
+    ).bins.sum() / binned.bins.sum()
+    # Compute the variance of the tofs to track regions with large uncertainty
+    variance = (
+        binned.bins.data * (binned.bins.coords["tof"] - mean_tof) ** 2
+    ).bins.sum() / binned.bins.sum()
+
+    mean_tof.variances = variance.values
+
+    # Convert coordinates to midpoints
+    mean_tof.coords["toa"] = sc.midpoints(mean_tof.coords["toa"])
+    mean_tof.coords["distance"] = sc.midpoints(mean_tof.coords["distance"])
+
+    return TimeOfFlightLookupTable(mean_tof)
+
+
+def masked_tof_lookup_table(
+    tof_lookup: TimeOfFlightLookupTable,
+    error_threshold: LookupTableRelativeErrorThreshold,
+) -> MaskedTimeOfFlightLookupTable:
+    """
+    Mask regions of the lookup table where the variance of the projected time-of-flight
+    is larger than a given threshold.
+
+    Parameters
+    ----------
+    tof_lookup:
+        Lookup table giving time-of-flight as a function of distance and
+        time-of-arrival.
+    variance_threshold:
+        Threshold for the variance of the projected time-of-flight above which regions
+        are masked.
+    """
+    relative_error = sc.stddevs(tof_lookup.data) / sc.values(tof_lookup.data)
+    mask = relative_error > sc.scalar(error_threshold)
+    out = tof_lookup.copy()
+    # Use numpy for indexing as table is 2D
+    out.values[mask.values] = np.nan
+    return MaskedTimeOfFlightLookupTable(out)
+
+
+def find_fastest_neutron(simulation: SimulationResults) -> FastestNeutron:
+    """
+    Find the fastest neutron in the simulation results.
+    """
+    ind = np.argmax(simulation.speed.values)
+    return FastestNeutron(
+        time_of_arrival=simulation.time_of_arrival[ind],
+        speed=simulation.speed[ind],
+        distance=simulation.distance,
+    )
+
+
+def pivot_time_at_detector(
+    fastest_neutron: FastestNeutron, ltotal: Ltotal
+) -> PivotTimeAtDetector:
+    """
+    Compute the pivot time at the detector, i.e., the time of the start of the frame at
+    the detector.
+    The assumption here is that the fastest neutron in the simulation results is the one
+    that arrives at the detector first.
+    One could have an edge case where a slightly slower neutron which is born earlier
+    could arrive at the detector first, but this edge case is most probably uncommon,
+    and the difference in arrival times is likely to be small.
+
+    Parameters
+    ----------
+    fastest_neutron:
+        Properties of the fastest neutron in the simulation results.
+    ltotal:
+        Total length of the flight path from the source to the detector.
+    """
+    dist = ltotal - fastest_neutron.distance.to(unit=ltotal.unit)
+    toa = fastest_neutron.time_of_arrival + (dist / fastest_neutron.speed).to(
+        unit=fastest_neutron.time_of_arrival.unit, copy=False
+    )
+    return PivotTimeAtDetector(toa)
+
+
+def unwrapped_time_of_arrival(
+    da: RawData, offset: PulseStrideOffset, pulse_period: PulsePeriod
+) -> UnwrappedTimeOfArrival:
+    """
+    Compute the unwrapped time of arrival of the neutron at the detector.
+    For event data, this is essentially ``event_time_offset + event_time_zero``.
+
+    Parameters
+    ----------
+    da:
+        Raw detector data loaded from a NeXus file, e.g., NXdetector containing
+        NXevent_data.
+    offset:
+        Integer offset of the first pulse in the stride (typically zero unless we are
+        using pulse-skipping and the events do not begin with the first pulse in the
+        stride).
+    pulse_period:
+        Period of the source pulses, i.e., time between consecutive pulse starts.
+    """
+    if da.bins is None:
+        # 'time_of_flight' is the canonical name in NXmonitor, but in some files, it
+        # may be called 'tof'.
+        key = next(iter(set(da.coords.keys()) & {"time_of_flight", "tof"}))
+        toa = da.coords[key]
+    else:
+        # To unwrap the time of arrival, we want to add the event_time_zero to the
+        # event_time_offset. However, we do not really care about the exact datetimes,
+        # we just want to know the offsets with respect to the start of the run.
+        # Hence we use the smallest event_time_zero as the time origin.
+        time_zero = da.coords["event_time_zero"] - da.coords["event_time_zero"].min()
+        coord = da.bins.coords["event_time_offset"]
+        unit = elem_unit(coord)
+        toa = (
+            coord
+            + time_zero.to(dtype=float, unit=unit, copy=False)
+            - (offset * pulse_period).to(unit=unit, copy=False)
+        )
+    return UnwrappedTimeOfArrival(toa)
+
+
+def unwrapped_time_of_arrival_minus_frame_pivot_time(
+    toa: UnwrappedTimeOfArrival, pivot_time: PivotTimeAtDetector
+) -> UnwrappedTimeOfArrivalMinusPivotTime:
+    """
+    Compute the time of arrival of the neutron at the detector, unwrapped at the pulse
+    period, minus the start time of the frame.
+    We subtract the start time of the frame so that we can use a modulo operation to
+    wrap the time of arrival at the frame period in the case of pulse-skipping.
+
+    Parameters
+    ----------
+    toa:
+        Time of arrival of the neutron at the detector, unwrapped at the pulse period.
+    pivot_time:
+        Pivot time at the detector, i.e., the time of the start of the frame at the
+        detector.
+    """
+    # Order of operation to preserve dimension order
+    return UnwrappedTimeOfArrivalMinusPivotTime(
+        -pivot_time.to(unit=elem_unit(toa), copy=False) + toa
+    )
+
+
+def time_of_arrival_minus_pivot_time_modulo_period(
+    toa_minus_pivot_time: UnwrappedTimeOfArrivalMinusPivotTime,
+    frame_period: FramePeriod,
+) -> TimeOfArrivalMinusPivotTimeModuloPeriod:
+    """
+    Compute the time of arrival of the neutron at the detector, unwrapped at the pulse
+    period, minus the start time of the frame, modulo the frame period.
+
+    Parameters
+    ----------
+    toa_minus_pivot_time:
+        Time of arrival of the neutron at the detector, unwrapped at the pulse period,
+        minus the start time of the frame.
+    frame_period:
+        Period of the frame, i.e., time between the start of two consecutive frames.
+    """
+    return TimeOfArrivalMinusPivotTimeModuloPeriod(
+        toa_minus_pivot_time
+        % frame_period.to(unit=elem_unit(toa_minus_pivot_time), copy=False)
+    )
+
+
+def time_of_arrival_folded_by_frame(
+    toa: TimeOfArrivalMinusPivotTimeModuloPeriod,
+    pivot_time: PivotTimeAtDetector,
+) -> FrameFoldedTimeOfArrival:
+    """
+    The time of arrival of the neutron at the detector, folded by the frame period.
+
+    Parameters
+    ----------
+    toa:
+        Time of arrival of the neutron at the detector, unwrapped at the pulse period,
+        minus the start time of the frame, modulo the frame period.
+    pivot_time:
+        Pivot time at the detector, i.e., the time of the start of the frame at the
+        detector.
+    """
+    return FrameFoldedTimeOfArrival(
+        toa + pivot_time.to(unit=elem_unit(toa), copy=False)
+    )
+
+
+def time_of_flight_data(
+    da: RawData,
+    lookup: MaskedTimeOfFlightLookupTable,
+    ltotal: Ltotal,
+    toas: FrameFoldedTimeOfArrival,
+) -> TofData:
+    """
+    Convert the time-of-arrival data to time-of-flight data using a lookup table.
+    The output data will have a time-of-flight coordinate.
+    Parameters
+    ----------
+    da:
+        Raw detector data loaded from a NeXus file, e.g., NXdetector containing
+        NXevent_data.
+    lookup:
+        Lookup table giving time-of-flight as a function of distance and time of
+        arrival.
+    ltotal:
+        Total length of the flight path from the source to the detector.
+    toas:
+        Time of arrival of the neutron at the detector, folded by the frame period.
+    """
+    from scipy.interpolate import RegularGridInterpolator
+
+    # TODO: to make use of multi-threading, we could write our own interpolator.
+    # This should be simple enough as we are making the bins linspace, so computing
+    # bin indices is fast.
+    f = RegularGridInterpolator(
+        (
+            lookup.coords["toa"].to(unit=elem_unit(toas), copy=False).values,
+            lookup.coords["distance"].to(unit=ltotal.unit, copy=False).values,
+        ),
+        lookup.data.to(unit=elem_unit(toas), copy=False).values.T,
+        method="linear",
+        bounds_error=False,
+    )
+
+    if da.bins is not None:
+        ltotal = sc.bins_like(toas, ltotal).bins.constituents["data"]
+        toas = toas.bins.constituents["data"]
+
+    tofs = sc.array(
+        dims=toas.dims, values=f((toas.values, ltotal.values)), unit=elem_unit(toas)
+    )
+
+    if da.bins is not None:
+        parts = da.bins.constituents
+        parts["data"] = tofs
+        out = da.bins.assign_coords(tof=_bins_no_validate(**parts))
+    else:
+        out = da.assign_coords(tof=tofs)
+
+    return TofData(out)
+
+
+def resample_tof_data(da: TofData) -> ResampledTofData:
+    """
+    Histogrammed data that has been converted to `tof` will typically have
+    unsorted bin edges (due to either wrapping of `time_of_flight` or wavelength
+    overlap between subframes).
+    This function re-histograms the data to ensure that the bin edges are sorted.
+    It makes use of the ``to_events`` helper which generates a number of events in each
+    bin with a uniform distribution. The new events are then histogrammed using a set of
+    sorted bin edges.
+
+    WARNING:
+    This function is highly experimental, has limitations and should be used with
+    caution. It is a workaround to the issue that rebinning data with unsorted bin
+    edges is not supported in scipp.
+    As such, this function is not part of the default set of providers, and needs to be
+    inserted manually into the workflow.
+
+    Parameters
+    ----------
+    da:
+        Histogrammed data with the time-of-flight coordinate.
+    """
+    events = to_events(da.rename_dims(time_of_flight="tof"), "event")
+
+    # Define a new bin width, close to the original bin width.
+    # TODO: this could be a workflow parameter
+    coord = da.coords["tof"]
+    bin_width = (coord["time_of_flight", 1:] - coord["time_of_flight", :-1]).nanmedian()
+    rehist = events.hist(tof=bin_width)
+    for key, var in da.coords.items():
+        if "time_of_flight" not in var.dims:
+            rehist.coords[key] = var
+    return ResampledTofData(rehist)
+
+
+def default_parameters() -> dict:
+    """
+    Default parameters of the time-of-flight workflow.
+    """
+    return {
+        PulsePeriod: 1.0 / sc.scalar(14.0, unit="Hz"),
+        PulseStride: 1,
+        PulseStrideOffset: 0,
+        DistanceResolution: sc.scalar(0.1, unit="m"),
+        TimeOfArrivalResolution: 500,
+        LookupTableRelativeErrorThreshold: 0.1,
+    }
+
+
+def providers() -> tuple[Callable]:
+    """
+    Providers of the time-of-flight workflow.
+    """
+    return (
+        compute_tof_lookup_table,
+        extract_ltotal,
+        find_fastest_neutron,
+        frame_period,
+        masked_tof_lookup_table,
+        pivot_time_at_detector,
+        time_of_arrival_folded_by_frame,
+        time_of_arrival_minus_pivot_time_modulo_period,
+        time_of_flight_data,
+        unwrapped_time_of_arrival,
+        unwrapped_time_of_arrival_minus_frame_pivot_time,
+    )
+
+
+class TofWorkflow:
+    """
+    Helper class to build a time-of-flight workflow and cache the expensive part of
+    the computation: running the simulation and building the lookup table.
+
+    Parameters
+    ----------
+    simulated_neutrons:
+        Results of a time-of-flight simulation used to create a lookup table.
+        The results should be a flat table with columns for time-of-arrival, speed,
+        wavelength, and weight.
+    ltotal_range:
+        Range of total flight path lengths from the source to the detector.
+        This is used to create the lookup table to compute the neutron time-of-flight.
+        Note that the resulting table will extend slightly beyond this range, as the
+        supplied range is not necessarily a multiple of the distance resolution.
+    pulse_stride:
+        Stride of used pulses. Usually 1, but may be a small integer when
+        pulse-skipping.
+    pulse_stride_offset:
+        Integer offset of the first pulse in the stride (typically zero unless we are
+        using pulse-skipping and the events do not begin with the first pulse in the
+        stride).
+    distance_resolution:
+        Resolution of the distance axis in the lookup table.
+        Should be a single scalar value with a unit of length.
+        This is typically of the order of 1-10 cm.
+    toa_resolution:
+        Resolution of the time of arrival axis in the lookup table.
+        Can be an integer (number of bins) or a sc.Variable (bin width).
+    error_threshold:
+        Threshold for the variance of the projected time-of-flight above which regions
+        are masked.
+    """
+
+    def __init__(
+        self,
+        simulated_neutrons: SimulationResults,
+        ltotal_range: LtotalRange,
+        pulse_stride: PulseStride | None = None,
+        pulse_stride_offset: PulseStrideOffset | None = None,
+        distance_resolution: DistanceResolution | None = None,
+        toa_resolution: TimeOfArrivalResolution | None = None,
+        error_threshold: LookupTableRelativeErrorThreshold | None = None,
+    ):
+        import sciline as sl
+
+        self.pipeline = sl.Pipeline(providers())
+        self.pipeline[SimulationResults] = simulated_neutrons
+        self.pipeline[LtotalRange] = ltotal_range
+
+        params = default_parameters()
+        self.pipeline[PulsePeriod] = params[PulsePeriod]
+        self.pipeline[PulseStride] = pulse_stride or params[PulseStride]
+        self.pipeline[PulseStrideOffset] = (
+            pulse_stride_offset or params[PulseStrideOffset]
+        )
+        self.pipeline[DistanceResolution] = (
+            distance_resolution or params[DistanceResolution]
+        )
+        self.pipeline[TimeOfArrivalResolution] = (
+            toa_resolution or params[TimeOfArrivalResolution]
+        )
+        self.pipeline[LookupTableRelativeErrorThreshold] = (
+            error_threshold or params[LookupTableRelativeErrorThreshold]
+        )
+
+    def __getitem__(self, key):
+        return self.pipeline[key]
+
+    def __setitem__(self, key, value):
+        self.pipeline[key] = value
+
+    def persist(self) -> None:
+        for t in (SimulationResults, MaskedTimeOfFlightLookupTable, FastestNeutron):
+            self.pipeline[t] = self.pipeline.compute(t)
+
+    def compute(self, *args, **kwargs) -> Any:
+        return self.pipeline.compute(*args, **kwargs)
+
+    def visualize(self, *args, **kwargs) -> Any:
+        return self.pipeline.visualize(*args, **kwargs)
+
+    def copy(self) -> TofWorkflow:
+        out = self.__class__(choppers=None, facility=None, ltotal_range=None)
+        out.pipeline = self.pipeline.copy()
+        return out
diff --git a/src/ess/reduce/time_of_flight/types.py b/src/ess/reduce/time_of_flight/types.py
new file mode 100644
index 00000000..027c9164
--- /dev/null
+++ b/src/ess/reduce/time_of_flight/types.py
@@ -0,0 +1,176 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2025 Scipp contributors (https://github.com/scipp)
+
+from dataclasses import dataclass
+from typing import NewType
+
+import scipp as sc
+
+Ltotal = NewType("Ltotal", sc.Variable)
+"""
+Total length of the flight path from the source to the detector.
+"""
+
+
+@dataclass
+class SimulationResults:
+    """
+    Results of a time-of-flight simulation used to create a lookup table.
+
+    The results (apart from ``distance``) should be flat lists (1d arrays) of length N
+    where N is the number of neutrons, containing the properties of the neutrons in the
+    simulation.
+
+    Parameters
+    ----------
+    time_of_arrival:
+        Time of arrival of the neutrons at the position where the events were recorded
+        (1d array of size N).
+    speed:
+        Speed of the neutrons, typically derived from the wavelength of the neutrons
+        (1d array of size N).
+    wavelength:
+        Wavelength of the neutrons (1d array of size N).
+    weight:
+        Weight/probability of the neutrons (1d array of size N).
+    distance:
+        Distance from the source to the position where the events were recorded
+        (single value; we assume all neutrons were recorded at the same position).
+        For a ``tof`` simulation, this is just the position of the detector where the
+        events are recorded. For a ``McStas`` simulation, this is the distance between
+        the source and the event monitor.
+    """
+
+    time_of_arrival: sc.Variable
+    speed: sc.Variable
+    wavelength: sc.Variable
+    weight: sc.Variable
+    distance: sc.Variable
+
+
+@dataclass
+class FastestNeutron:
+    """
+    Properties of the fastest neutron in the simulation results.
+    """
+
+    time_of_arrival: sc.Variable
+    speed: sc.Variable
+    distance: sc.Variable
+
+
+LtotalRange = NewType("LtotalRange", tuple[sc.Variable, sc.Variable])
+"""
+Range (min, max) of the total length of the flight path from the source to the detector.
+This is used to create the lookup table to compute the neutron time-of-flight.
+Note that the resulting table will extend slightly beyond this range, as the supplied
+range is not necessarily a multiple of the distance resolution.
+
+Note also that the range of total flight paths is supplied manually to the workflow
+instead of being read from the input data, as it allows us to compute the expensive part
+of the workflow in advance (the lookup table) and does not need to be repeated for each
+run, or for new data coming in in the case of live data collection.
+"""
+
+DistanceResolution = NewType("DistanceResolution", sc.Variable)
+"""
+Step size of the distance axis in the lookup table.
+Should be a single scalar value with a unit of length.
+This is typically of the order of 1-10 cm.
+"""
+
+TimeOfArrivalResolution = NewType("TimeOfArrivalResolution", int | sc.Variable)
+"""
+Resolution of the time of arrival axis in the lookup table.
+Can be an integer (number of bins) or a sc.Variable (bin width).
+"""
+
+TimeOfFlightLookupTable = NewType("TimeOfFlightLookupTable", sc.DataArray)
+"""
+Lookup table giving time-of-flight as a function of distance and time of arrival.
+"""
+
+MaskedTimeOfFlightLookupTable = NewType("MaskedTimeOfFlightLookupTable", sc.DataArray)
+"""
+Lookup table giving time-of-flight as a function of distance and time of arrival, with
+regions of large uncertainty masked out.
+"""
+
+LookupTableRelativeErrorThreshold = NewType("LookupTableRelativeErrorThreshold", float)
+
+FramePeriod = NewType("FramePeriod", sc.Variable)
+"""
+The period of a frame, a (small) integer multiple of the source period.
+"""
+
+UnwrappedTimeOfArrival = NewType("UnwrappedTimeOfArrival", sc.Variable)
+"""
+Time of arrival of the neutron at the detector, unwrapped at the pulse period.
+"""
+
+PivotTimeAtDetector = NewType("PivotTimeAtDetector", sc.Variable)
+"""
+Pivot time at the detector, i.e., the time of the start of the frame at the detector.
+"""
+
+UnwrappedTimeOfArrivalMinusPivotTime = NewType(
+    "UnwrappedTimeOfArrivalMinusPivotTime", sc.Variable
+)
+"""
+Time of arrival of the neutron at the detector, unwrapped at the pulse period, minus
+the start time of the frame.
+"""
+
+TimeOfArrivalMinusPivotTimeModuloPeriod = NewType(
+    "TimeOfArrivalMinusPivotTimeModuloPeriod", sc.Variable
+)
+"""
+Time of arrival of the neutron at the detector minus the start time of the frame,
+modulo the frame period.
+"""
+
+FrameFoldedTimeOfArrival = NewType("FrameFoldedTimeOfArrival", sc.Variable)
+
+
+PulsePeriod = NewType("PulsePeriod", sc.Variable)
+"""
+Period of the source pulses, i.e., time between consecutive pulse starts.
+"""
+
+PulseStride = NewType("PulseStride", int)
+"""
+Stride of used pulses. Usually 1, but may be a small integer when pulse-skipping.
+"""
+
+PulseStrideOffset = NewType("PulseStrideOffset", int)
+"""
+When pulse-skipping, the offset of the first pulse in the stride.
+"""
+
+RawData = NewType("RawData", sc.DataArray)
+"""
+Raw detector data loaded from a NeXus file, e.g., NXdetector containing NXevent_data.
+"""
+
+TofData = NewType("TofData", sc.DataArray)
+"""
+Detector data with time-of-flight coordinate.
+"""
+
+ResampledTofData = NewType("ResampledTofData", sc.DataArray)
+"""
+Histogrammed detector data with time-of-flight coordinate, that has been resampled.
+
+Histogrammed data that has been converted to `tof` will typically have
+unsorted bin edges (due to either wrapping of `time_of_flight` or wavelength
+overlap between subframes).
+We thus resample the data to ensure that the bin edges are sorted.
+It makes use of the ``to_events`` helper which generates a number of events in each
+bin with a uniform distribution. The new events are then histogrammed using a set of
+sorted bin edges to yield a new histogram with sorted bin edges.
+
+WARNING:
+This function is highly experimental, has limitations and should be used with
+caution. It is a workaround to the issue that rebinning data with unsorted bin
+edges is not supported in scipp.
+"""
diff --git a/tests/time_of_flight/to_events_test.py b/tests/time_of_flight/to_events_test.py
new file mode 100644
index 00000000..0ed11a05
--- /dev/null
+++ b/tests/time_of_flight/to_events_test.py
@@ -0,0 +1,111 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2024 Scipp contributors (https://github.com/scipp)
+
+import pytest
+import scipp as sc
+
+from ess.reduce.time_of_flight.to_events import to_events
+
+
+def test_to_events_1d():
+    table = sc.data.table_xyz(1000)
+    hist = table.hist(x=20)
+    events = to_events(hist, "event")
+    assert "x" not in events.dims
+    result = events.hist(x=hist.coords["x"])
+    assert sc.identical(hist.coords["x"], result.coords["x"])
+    assert sc.allclose(hist.data, result.data)
+
+
+def test_to_events_1d_with_group_coord():
+    table = sc.data.table_xyz(1000, coord_max=10)
+    table.coords["l"] = table.coords["x"].to(dtype=int)
+    hist = table.group("l").hist(x=20)
+    events = to_events(hist, "event")
+    assert "x" not in events.dims
+    assert "l" in events.dims
+    result = events.hist(x=hist.coords["x"])
+    assert sc.identical(hist.coords["x"], result.coords["x"])
+    assert sc.identical(hist.coords["l"], result.coords["l"])
+    assert sc.allclose(hist.data, result.data)
+
+
+def test_to_events_2d():
+    table = sc.data.table_xyz(1000)
+    hist = table.hist(y=20, x=10)
+    events = to_events(hist, "event")
+    assert "x" not in events.dims
+    assert "y" not in events.dims
+    result = events.hist(y=hist.coords["y"], x=hist.coords["x"])
+    assert sc.identical(hist.coords["x"], result.coords["x"])
+    assert sc.identical(hist.coords["y"], result.coords["y"])
+    assert sc.allclose(hist.data, result.data)
+
+
+def test_to_events_2d_with_group_coord():
+    table = sc.data.table_xyz(1000, coord_max=10)
+    table.coords["l"] = table.coords["x"].to(dtype=int)
+    hist = table.group("l").hist(y=20, x=10)
+    events = to_events(hist, "event")
+    assert "x" not in events.dims
+    assert "y" not in events.dims
+    assert "l" in events.dims
+    result = events.hist(y=hist.coords["y"], x=hist.coords["x"])
+    assert sc.identical(hist.coords["x"], result.coords["x"])
+    assert sc.identical(hist.coords["y"], result.coords["y"])
+    assert sc.identical(hist.coords["l"], result.coords["l"])
+    assert sc.allclose(hist.data, result.data)
+
+
+def test_to_events_binned_data_input_raises():
+    table = sc.data.table_xyz(1000)
+    binned = table.bin(x=20)
+    with pytest.raises(ValueError, match="Cannot convert a binned DataArray to events"):
+        _ = to_events(binned, "event")
+
+
+def test_to_events_mask_on_midpoints_dim():
+    table = sc.data.table_xyz(1000, coord_max=10)
+    table.coords["l"] = table.coords["y"].to(dtype=int)
+
+    hist = table.group("l").hist(x=6)
+    hist.masks["m"] = hist.coords["l"] == sc.scalar(3.0, unit="m")
+    events = to_events(hist, "event")
+    result = events.hist(x=hist.coords["x"])
+    assert sc.identical(hist.coords["x"], result.coords["x"])
+    assert sc.allclose(hist.data, result.data)
+    assert sc.identical(hist.masks["m"], result.masks["m"])
+
+
+def test_to_events_mask_on_binedge_dim():
+    table = sc.data.table_xyz(1000, coord_max=10)
+    table.coords["l"] = table.coords["x"].to(dtype=int)
+
+    hist = table.group("l").hist(x=6)
+    hist.masks["m"] = sc.array(
+        dims=["x"], values=[False, False, True, True, False, False]
+    )
+    events = to_events(hist, "event")
+    result = events.hist(x=hist.coords["x"])
+    assert sc.identical(hist.coords["x"], result.coords["x"])
+    assert sc.isclose(hist.sum().data, result.sum().data)
+    assert "m" not in result.masks
+    assert result["x", 2:4].data.sum() == sc.scalar(0.0, unit=table.unit)
+
+
+def test_to_events_two_masks():
+    table = sc.data.table_xyz(1000, coord_max=10)
+    table.coords["l"] = table.coords["x"].to(dtype=int)
+
+    hist = table.group("l").hist(x=6)
+    hist.masks["m1"] = sc.array(
+        dims=["x"], values=[False, False, True, True, False, False]
+    )
+    hist.masks["m2"] = hist.coords["l"] == sc.scalar(3.0, unit="m")
+    events = to_events(hist, "event")
+    result = events.hist(x=hist.coords["x"])
+    assert sc.identical(hist.coords["x"], result.coords["x"])
+    assert sc.isclose(hist.sum().data, result.sum().data)
+    assert "m1" not in result.masks
+    assert sc.identical(hist.masks["m2"], result.masks["m2"])
+    assert result["x", 2:4].data.sum() == sc.scalar(0.0, unit=table.unit)
diff --git a/tests/time_of_flight/unwrap_test.py b/tests/time_of_flight/unwrap_test.py
new file mode 100644
index 00000000..c44b43af
--- /dev/null
+++ b/tests/time_of_flight/unwrap_test.py
@@ -0,0 +1,328 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2025 Scipp contributors (https://github.com/scipp)
+import numpy as np
+import pytest
+import scipp as sc
+from scipp.testing import assert_identical
+from scippneutron.conversion.graph.beamline import beamline as beamline_graph
+from scippneutron.conversion.graph.tof import elastic as elastic_graph
+
+from ess.reduce import time_of_flight
+from ess.reduce.time_of_flight import fakes
+
+sl = pytest.importorskip("sciline")
+
+
+def test_frame_period_is_pulse_period_if_not_pulse_skipping() -> None:
+    pl = sl.Pipeline(time_of_flight.providers())
+    period = sc.scalar(123.0, unit="ms")
+    pl[time_of_flight.PulsePeriod] = period
+    pl[time_of_flight.PulseStride] = 1
+    assert_identical(pl.compute(time_of_flight.FramePeriod), period)
+
+
+@pytest.mark.parametrize("stride", [1, 2, 3, 4])
+def test_frame_period_is_multiple_pulse_period_if_pulse_skipping(stride) -> None:
+    pl = sl.Pipeline(time_of_flight.providers())
+    period = sc.scalar(123.0, unit="ms")
+    pl[time_of_flight.PulsePeriod] = period
+    pl[time_of_flight.PulseStride] = stride
+    assert_identical(pl.compute(time_of_flight.FramePeriod), stride * period)
+
+
+def test_unwrap_with_no_choppers() -> None:
+    # At this small distance the frames are not overlapping (with the given wavelength
+    # range), despite not using any choppers.
+    distance = sc.scalar(10.0, unit="m")
+
+    beamline = fakes.FakeBeamline(
+        choppers={},
+        monitors={"detector": distance},
+        run_length=sc.scalar(1 / 14, unit="s") * 4,
+        events_per_pulse=100_000,
+    )
+
+    mon, ref = beamline.get_monitor("detector")
+
+    pl = sl.Pipeline(
+        time_of_flight.providers(), params=time_of_flight.default_parameters()
+    )
+
+    sim = time_of_flight.simulate_beamline(choppers={}, neutrons=300_000, seed=1234)
+
+    pl[time_of_flight.RawData] = mon
+    pl[time_of_flight.SimulationResults] = sim
+    pl[time_of_flight.LtotalRange] = distance, distance
+    pl[time_of_flight.LookupTableRelativeErrorThreshold] = 1.0
+
+    tofs = pl.compute(time_of_flight.TofData)
+
+    # Convert to wavelength
+    graph = {**beamline_graph(scatter=False), **elastic_graph("tof")}
+    wavs = tofs.transform_coords("wavelength", graph=graph).bins.concat().value
+    ref = ref.bins.concat().value
+
+    diff = abs(
+        (wavs.coords["wavelength"] - ref.coords["wavelength"])
+        / ref.coords["wavelength"]
+    )
+    # Most errors should be small
+    assert np.nanpercentile(diff.values, 96) < 1.0
+
+
+# At 80m, event_time_offset does not wrap around (all events are within the same pulse).
+# At 85m, event_time_offset wraps around.
+@pytest.mark.parametrize("dist", [80.0, 85.0])
+def test_standard_unwrap(dist) -> None:
+    distance = sc.scalar(dist, unit="m")
+    choppers = fakes.psc_choppers()
+    beamline = fakes.FakeBeamline(
+        choppers=choppers,
+        monitors={"detector": distance},
+        run_length=sc.scalar(1 / 14, unit="s") * 4,
+        events_per_pulse=100_000,
+    )
+    mon, ref = beamline.get_monitor("detector")
+
+    sim = time_of_flight.simulate_beamline(
+        choppers=choppers, neutrons=300_000, seed=1234
+    )
+
+    pl = sl.Pipeline(
+        time_of_flight.providers(), params=time_of_flight.default_parameters()
+    )
+
+    pl[time_of_flight.RawData] = mon
+    pl[time_of_flight.SimulationResults] = sim
+    pl[time_of_flight.LtotalRange] = distance, distance
+
+    tofs = pl.compute(time_of_flight.TofData)
+
+    # Convert to wavelength
+    graph = {**beamline_graph(scatter=False), **elastic_graph("tof")}
+    wavs = tofs.transform_coords("wavelength", graph=graph).bins.concat().value
+    ref = ref.bins.concat().value
+
+    diff = abs(
+        (wavs.coords["wavelength"] - ref.coords["wavelength"])
+        / ref.coords["wavelength"]
+    )
+    # All errors should be small
+    assert np.nanpercentile(diff.values, 100) < 0.01
+
+
+# At 80m, event_time_offset does not wrap around (all events are within the same pulse).
+# At 85m, event_time_offset wraps around.
+@pytest.mark.parametrize("dist", [80.0, 85.0])
+def test_standard_unwrap_histogram_mode(dist) -> None:
+    distance = sc.scalar(dist, unit="m")
+    choppers = fakes.psc_choppers()
+    beamline = fakes.FakeBeamline(
+        choppers=choppers,
+        monitors={"detector": distance},
+        run_length=sc.scalar(1 / 14, unit="s") * 4,
+        events_per_pulse=100_000,
+    )
+    mon, ref = beamline.get_monitor("detector")
+    mon = (
+        mon.hist(
+            event_time_offset=sc.linspace(
+                "event_time_offset", 0.0, 1000.0 / 14, num=1001, unit="ms"
+            ).to(unit="s")
+        )
+        .sum("pulse")
+        .rename(event_time_offset="time_of_flight")
+    )
+
+    sim = time_of_flight.simulate_beamline(
+        choppers=choppers, neutrons=300_000, seed=1234
+    )
+
+    pl = sl.Pipeline(
+        (*time_of_flight.providers(), time_of_flight.resample_tof_data),
+        params=time_of_flight.default_parameters(),
+    )
+
+    pl[time_of_flight.RawData] = mon
+    pl[time_of_flight.SimulationResults] = sim
+    pl[time_of_flight.LtotalRange] = distance, distance
+    tofs = pl.compute(time_of_flight.ResampledTofData)
+    graph = {**beamline_graph(scatter=False), **elastic_graph("tof")}
+    wavs = tofs.transform_coords("wavelength", graph=graph)
+    ref = ref.bins.concat().value.hist(wavelength=wavs.coords["wavelength"])
+    # We divide by the maximum to avoid large relative differences at the edges of the
+    # frames where the counts are low.
+    diff = (wavs - ref) / ref.max()
+    assert np.nanpercentile(diff.values, 96.0) < 0.3
+
+
+def test_pulse_skipping_unwrap() -> None:
+    distance = sc.scalar(100.0, unit="m")
+    choppers = fakes.psc_choppers()
+    choppers["pulse_skipping"] = fakes.pulse_skipping
+
+    beamline = fakes.FakeBeamline(
+        choppers=choppers,
+        monitors={"detector": distance},
+        run_length=sc.scalar(1.0, unit="s"),
+        events_per_pulse=100_000,
+    )
+    mon, ref = beamline.get_monitor("detector")
+
+    sim = time_of_flight.simulate_beamline(
+        choppers=choppers, neutrons=300_000, seed=1234
+    )
+
+    pl = sl.Pipeline(
+        time_of_flight.providers(), params=time_of_flight.default_parameters()
+    )
+
+    pl[time_of_flight.RawData] = mon
+    pl[time_of_flight.SimulationResults] = sim
+    pl[time_of_flight.LtotalRange] = distance, distance
+    pl[time_of_flight.PulseStride] = 2
+
+    tofs = pl.compute(time_of_flight.TofData)
+
+    # Convert to wavelength
+    graph = {**beamline_graph(scatter=False), **elastic_graph("tof")}
+    wavs = tofs.transform_coords("wavelength", graph=graph).bins.concat().value
+    ref = ref.bins.concat().value
+
+    diff = abs(
+        (wavs.coords["wavelength"] - ref.coords["wavelength"])
+        / ref.coords["wavelength"]
+    )
+    # All errors should be small
+    assert np.nanpercentile(diff.values, 100) < 0.01
+
+
+def test_pulse_skipping_unwrap_when_all_neutrons_arrive_after_second_pulse() -> None:
+    distance = sc.scalar(150.0, unit="m")
+    choppers = fakes.psc_choppers()
+    choppers["pulse_skipping"] = fakes.pulse_skipping
+
+    beamline = fakes.FakeBeamline(
+        choppers=choppers,
+        monitors={"detector": distance},
+        run_length=sc.scalar(1.0, unit="s"),
+        events_per_pulse=100_000,
+    )
+    mon, ref = beamline.get_monitor("detector")
+
+    sim = time_of_flight.simulate_beamline(
+        choppers=choppers, neutrons=300_000, seed=1234
+    )
+
+    pl = sl.Pipeline(
+        time_of_flight.providers(), params=time_of_flight.default_parameters()
+    )
+
+    pl[time_of_flight.RawData] = mon
+    pl[time_of_flight.SimulationResults] = sim
+    pl[time_of_flight.LtotalRange] = distance, distance
+    pl[time_of_flight.PulseStride] = 2
+    pl[time_of_flight.PulseStrideOffset] = 1  # Start the stride at the second pulse
+
+    tofs = pl.compute(time_of_flight.TofData)
+
+    # Convert to wavelength
+    graph = {**beamline_graph(scatter=False), **elastic_graph("tof")}
+    wavs = tofs.transform_coords("wavelength", graph=graph).bins.concat().value
+    ref = ref.bins.concat().value
+
+    diff = abs(
+        (wavs.coords["wavelength"] - ref.coords["wavelength"])
+        / ref.coords["wavelength"]
+    )
+    # All errors should be small
+    assert np.nanpercentile(diff.values, 100) < 0.01
+
+
+def test_pulse_skipping_unwrap_when_first_half_of_first_pulse_is_missing() -> None:
+    distance = sc.scalar(100.0, unit="m")
+    choppers = fakes.psc_choppers()
+    choppers["pulse_skipping"] = fakes.pulse_skipping
+
+    beamline = fakes.FakeBeamline(
+        choppers=choppers,
+        monitors={"detector": distance},
+        run_length=sc.scalar(1.0, unit="s"),
+        events_per_pulse=100_000,
+    )
+    mon, ref = beamline.get_monitor("detector")
+
+    sim = time_of_flight.simulate_beamline(
+        choppers=choppers, neutrons=300_000, seed=1234
+    )
+
+    pl = sl.Pipeline(
+        time_of_flight.providers(), params=time_of_flight.default_parameters()
+    )
+
+    pl[time_of_flight.RawData] = mon[
+        1:
+    ].copy()  # Skip first pulse = half of the first frame
+    pl[time_of_flight.SimulationResults] = sim
+    pl[time_of_flight.LtotalRange] = distance, distance
+    pl[time_of_flight.PulseStride] = 2
+    pl[time_of_flight.PulseStrideOffset] = 1  # Start the stride at the second pulse
+
+    tofs = pl.compute(time_of_flight.TofData)
+
+    # Convert to wavelength
+    graph = {**beamline_graph(scatter=False), **elastic_graph("tof")}
+    wavs = tofs.transform_coords("wavelength", graph=graph).bins.concat().value
+    ref = ref[1:].copy().bins.concat().value
+
+    diff = abs(
+        (wavs.coords["wavelength"] - ref.coords["wavelength"])
+        / ref.coords["wavelength"]
+    )
+    # All errors should be small
+    assert np.nanpercentile(diff.values, 100) < 0.01
+
+
+def test_pulse_skipping_unwrap_histogram_mode() -> None:
+    distance = sc.scalar(100.0, unit="m")
+    choppers = fakes.psc_choppers()
+    choppers["pulse_skipping"] = fakes.pulse_skipping
+
+    beamline = fakes.FakeBeamline(
+        choppers=choppers,
+        monitors={"detector": distance},
+        run_length=sc.scalar(1.0, unit="s"),
+        events_per_pulse=100_000,
+    )
+    mon, ref = beamline.get_monitor("detector")
+    mon = (
+        mon.hist(
+            event_time_offset=sc.linspace(
+                "event_time_offset", 0.0, 1000.0 / 14, num=1001, unit="ms"
+            ).to(unit="s")
+        )
+        .sum("pulse")
+        .rename(event_time_offset="time_of_flight")
+    )
+
+    sim = time_of_flight.simulate_beamline(
+        choppers=choppers, neutrons=300_000, seed=1234
+    )
+
+    pl = sl.Pipeline(
+        (*time_of_flight.providers(), time_of_flight.resample_tof_data),
+        params=time_of_flight.default_parameters(),
+    )
+
+    pl[time_of_flight.RawData] = mon
+    pl[time_of_flight.SimulationResults] = sim
+    pl[time_of_flight.LtotalRange] = distance, distance
+    pl[time_of_flight.PulseStride] = 2
+    tofs = pl.compute(time_of_flight.ResampledTofData)
+    graph = {**beamline_graph(scatter=False), **elastic_graph("tof")}
+    wavs = tofs.transform_coords("wavelength", graph=graph)
+    ref = ref.bins.concat().value.hist(wavelength=wavs.coords["wavelength"])
+    # We divide by the maximum to avoid large relative differences at the edges of the
+    # frames where the counts are low.
+    diff = (wavs - ref) / ref.max()
+    assert np.nanpercentile(diff.values, 96.0) < 0.3
diff --git a/tests/time_of_flight/wfm_test.py b/tests/time_of_flight/wfm_test.py
new file mode 100644
index 00000000..a29948ad
--- /dev/null
+++ b/tests/time_of_flight/wfm_test.py
@@ -0,0 +1,433 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2024 Scipp contributors (https://github.com/scipp)
+
+from functools import partial
+
+import pytest
+import scipp as sc
+import tof as tof_pkg
+from scippneutron.chopper import DiskChopper
+from scippneutron.conversion.graph.beamline import beamline
+from scippneutron.conversion.graph.tof import elastic
+
+from ess.reduce import time_of_flight
+from ess.reduce.time_of_flight import fakes
+
+sl = pytest.importorskip("sciline")
+
+
+def dream_choppers():
+    psc1 = DiskChopper(
+        frequency=sc.scalar(14.0, unit="Hz"),
+        beam_position=sc.scalar(0.0, unit="deg"),
+        phase=sc.scalar(286 - 180, unit="deg"),
+        axle_position=sc.vector(value=[0, 0, 6.145], unit="m"),
+        slit_begin=sc.array(
+            dims=["cutout"],
+            values=[-1.23, 70.49, 84.765, 113.565, 170.29, 271.635, 286.035, 301.17],
+            unit="deg",
+        ),
+        slit_end=sc.array(
+            dims=["cutout"],
+            values=[1.23, 73.51, 88.035, 116.835, 175.31, 275.565, 289.965, 303.63],
+            unit="deg",
+        ),
+        slit_height=sc.scalar(10.0, unit="cm"),
+        radius=sc.scalar(30.0, unit="cm"),
+    )
+
+    psc2 = DiskChopper(
+        frequency=sc.scalar(-14.0, unit="Hz"),
+        beam_position=sc.scalar(0.0, unit="deg"),
+        phase=sc.scalar(-236, unit="deg"),
+        axle_position=sc.vector(value=[0, 0, 6.155], unit="m"),
+        slit_begin=sc.array(
+            dims=["cutout"],
+            values=[-1.23, 27.0, 55.8, 142.385, 156.765, 214.115, 257.23, 315.49],
+            unit="deg",
+        ),
+        slit_end=sc.array(
+            dims=["cutout"],
+            values=[1.23, 30.6, 59.4, 145.615, 160.035, 217.885, 261.17, 318.11],
+            unit="deg",
+        ),
+        slit_height=sc.scalar(10.0, unit="cm"),
+        radius=sc.scalar(30.0, unit="cm"),
+    )
+
+    oc = DiskChopper(
+        frequency=sc.scalar(14.0, unit="Hz"),
+        beam_position=sc.scalar(0.0, unit="deg"),
+        phase=sc.scalar(297 - 180 - 90, unit="deg"),
+        axle_position=sc.vector(value=[0, 0, 6.174], unit="m"),
+        slit_begin=sc.array(dims=["cutout"], values=[-27.6 * 0.5], unit="deg"),
+        slit_end=sc.array(dims=["cutout"], values=[27.6 * 0.5], unit="deg"),
+        slit_height=sc.scalar(10.0, unit="cm"),
+        radius=sc.scalar(30.0, unit="cm"),
+    )
+
+    bcc = DiskChopper(
+        frequency=sc.scalar(112.0, unit="Hz"),
+        beam_position=sc.scalar(0.0, unit="deg"),
+        phase=sc.scalar(240 - 180, unit="deg"),
+        axle_position=sc.vector(value=[0, 0, 9.78], unit="m"),
+        slit_begin=sc.array(dims=["cutout"], values=[-36.875, 143.125], unit="deg"),
+        slit_end=sc.array(dims=["cutout"], values=[36.875, 216.875], unit="deg"),
+        slit_height=sc.scalar(10.0, unit="cm"),
+        radius=sc.scalar(30.0, unit="cm"),
+    )
+
+    t0 = DiskChopper(
+        frequency=sc.scalar(28.0, unit="Hz"),
+        beam_position=sc.scalar(0.0, unit="deg"),
+        phase=sc.scalar(280 - 180, unit="deg"),
+        axle_position=sc.vector(value=[0, 0, 13.05], unit="m"),
+        slit_begin=sc.array(dims=["cutout"], values=[-314.9 * 0.5], unit="deg"),
+        slit_end=sc.array(dims=["cutout"], values=[314.9 * 0.5], unit="deg"),
+        slit_height=sc.scalar(10.0, unit="cm"),
+        radius=sc.scalar(30.0, unit="cm"),
+    )
+
+    return {"psc1": psc1, "psc2": psc2, "oc": oc, "bcc": bcc, "t0": t0}
+
+
+def dream_choppers_with_frame_overlap():
+    out = dream_choppers()
+    out["bcc"] = DiskChopper(
+        frequency=sc.scalar(112.0, unit="Hz"),
+        beam_position=sc.scalar(0.0, unit="deg"),
+        phase=sc.scalar(240 - 180, unit="deg"),
+        axle_position=sc.vector(value=[0, 0, 9.78], unit="m"),
+        slit_begin=sc.array(dims=["cutout"], values=[-36.875, 143.125], unit="deg"),
+        slit_end=sc.array(dims=["cutout"], values=[56.875, 216.875], unit="deg"),
+        slit_height=sc.scalar(10.0, unit="cm"),
+        radius=sc.scalar(30.0, unit="cm"),
+    )
+    return out
+
+
+@pytest.fixture(scope="module")
+def simulation_dream_choppers():
+    return time_of_flight.simulate_beamline(
+        choppers=dream_choppers(), neutrons=100_000, seed=432
+    )
+
+
+@pytest.mark.parametrize("npulses", [1, 2])
+@pytest.mark.parametrize(
+    "ltotal",
+    [
+        sc.array(dims=["detector_number"], values=[77.675], unit="m"),
+        sc.array(dims=["detector_number"], values=[77.675, 76.5], unit="m"),
+        sc.array(
+            dims=["y", "x"],
+            values=[[77.675, 76.1, 78.05], [77.15, 77.3, 77.675]],
+            unit="m",
+        ),
+    ],
+)
+@pytest.mark.parametrize("time_offset_unit", ["s", "ms", "us", "ns"])
+@pytest.mark.parametrize("distance_unit", ["m", "mm"])
+def test_dream_wfm(
+    simulation_dream_choppers,
+    npulses,
+    ltotal,
+    time_offset_unit,
+    distance_unit,
+):
+    monitors = {
+        f"detector{i}": ltot for i, ltot in enumerate(ltotal.flatten(to="detector"))
+    }
+
+    # Create some neutron events
+    wavelengths = sc.array(
+        dims=["event"], values=[1.5, 1.6, 1.7, 3.3, 3.4, 3.5], unit="angstrom"
+    )
+    birth_times = sc.full(sizes=wavelengths.sizes, value=1.5, unit="ms")
+    ess_beamline = fakes.FakeBeamline(
+        choppers=dream_choppers(),
+        monitors=monitors,
+        run_length=sc.scalar(1 / 14, unit="s") * npulses,
+        events_per_pulse=len(wavelengths),
+        source=partial(
+            tof_pkg.Source.from_neutrons,
+            birth_times=birth_times,
+            wavelengths=wavelengths,
+            frequency=sc.scalar(14.0, unit="Hz"),
+        ),
+    )
+
+    # Save the true wavelengths for later
+    true_wavelengths = ess_beamline.source.data.coords["wavelength"]
+
+    raw_data = sc.concat(
+        [ess_beamline.get_monitor(key)[0] for key in monitors.keys()],
+        dim="detector",
+    ).fold(dim="detector", sizes=ltotal.sizes)
+
+    # Convert the time offset to the unit requested by the test
+    raw_data.bins.coords["event_time_offset"] = raw_data.bins.coords[
+        "event_time_offset"
+    ].to(unit=time_offset_unit, copy=False)
+
+    raw_data.coords["Ltotal"] = ltotal.to(unit=distance_unit, copy=False)
+
+    # Verify that all 6 neutrons made it through the chopper cascade
+    assert sc.identical(
+        raw_data.bins.concat("pulse").hist().data,
+        sc.array(
+            dims=["detector"],
+            values=[len(wavelengths) * npulses] * len(monitors),
+            unit="counts",
+            dtype="float64",
+        ).fold(dim="detector", sizes=ltotal.sizes),
+    )
+
+    # Set up the workflow
+    workflow = sl.Pipeline(
+        time_of_flight.providers(), params=time_of_flight.default_parameters()
+    )
+
+    workflow[time_of_flight.RawData] = raw_data
+    workflow[time_of_flight.SimulationResults] = simulation_dream_choppers
+    workflow[time_of_flight.LtotalRange] = ltotal.min(), ltotal.max()
+
+    # Compute time-of-flight
+    tofs = workflow.compute(time_of_flight.TofData)
+    assert {dim: tofs.sizes[dim] for dim in ltotal.sizes} == ltotal.sizes
+
+    # Convert to wavelength
+    graph = {**beamline(scatter=False), **elastic("tof")}
+    wav_wfm = tofs.transform_coords("wavelength", graph=graph)
+
+    # Compare the computed wavelengths to the true wavelengths
+    for i in range(npulses):
+        result = wav_wfm["pulse", i].flatten(to="detector")
+        for j in range(len(result)):
+            computed_wavelengths = result[j].values.coords["wavelength"]
+            assert sc.allclose(
+                computed_wavelengths,
+                true_wavelengths["pulse", i],
+                rtol=sc.scalar(1e-02),
+            )
+
+
+@pytest.fixture(scope="module")
+def simulation_dream_choppers_time_overlap():
+    return time_of_flight.simulate_beamline(
+        choppers=dream_choppers_with_frame_overlap(), neutrons=100_000, seed=432
+    )
+
+
+@pytest.mark.parametrize("npulses", [1, 2])
+@pytest.mark.parametrize(
+    "ltotal",
+    [
+        sc.array(dims=["detector_number"], values=[77.675], unit="m"),
+        sc.array(dims=["detector_number"], values=[77.675, 76.5], unit="m"),
+        sc.array(
+            dims=["y", "x"],
+            values=[[77.675, 76.1, 78.05], [77.15, 77.3, 77.675]],
+            unit="m",
+        ),
+    ],
+)
+@pytest.mark.parametrize("time_offset_unit", ["s", "ms", "us", "ns"])
+@pytest.mark.parametrize("distance_unit", ["m", "mm"])
+def test_dream_wfm_with_subframe_time_overlap(
+    simulation_dream_choppers_time_overlap,
+    npulses,
+    ltotal,
+    time_offset_unit,
+    distance_unit,
+):
+    monitors = {
+        f"detector{i}": ltot for i, ltot in enumerate(ltotal.flatten(to="detector"))
+    }
+
+    # Create some neutron events
+    wavelengths = [1.5, 1.6, 1.7, 3.3, 3.4, 3.5]
+    birth_times = [1.5] * len(wavelengths)
+
+    # Add overlap neutrons
+    birth_times.extend([0.0, 3.3])
+    wavelengths.extend([2.6, 2.4])
+
+    wavelengths = sc.array(dims=["event"], values=wavelengths, unit="angstrom")
+    birth_times = sc.array(dims=["event"], values=birth_times, unit="ms")
+
+    ess_beamline = fakes.FakeBeamline(
+        choppers=dream_choppers_with_frame_overlap(),
+        monitors=monitors,
+        run_length=sc.scalar(1 / 14, unit="s") * npulses,
+        events_per_pulse=len(wavelengths),
+        source=partial(
+            tof_pkg.Source.from_neutrons,
+            birth_times=birth_times,
+            wavelengths=wavelengths,
+            frequency=sc.scalar(14.0, unit="Hz"),
+        ),
+    )
+
+    # Save the true wavelengths for later
+    true_wavelengths = ess_beamline.source.data.coords["wavelength"]
+
+    raw_data = sc.concat(
+        [ess_beamline.get_monitor(key)[0] for key in monitors.keys()],
+        dim="detector",
+    ).fold(dim="detector", sizes=ltotal.sizes)
+
+    # Convert the time offset to the unit requested by the test
+    raw_data.bins.coords["event_time_offset"] = raw_data.bins.coords[
+        "event_time_offset"
+    ].to(unit=time_offset_unit, copy=False)
+
+    raw_data.coords["Ltotal"] = ltotal.to(unit=distance_unit, copy=False)
+
+    # Verify that all 6 neutrons made it through the chopper cascade
+    assert sc.identical(
+        raw_data.bins.concat("pulse").hist().data,
+        sc.array(
+            dims=["detector"],
+            values=[len(wavelengths) * npulses] * len(monitors),
+            unit="counts",
+            dtype="float64",
+        ).fold(dim="detector", sizes=ltotal.sizes),
+    )
+
+    # Set up the workflow
+    workflow = sl.Pipeline(
+        time_of_flight.providers(), params=time_of_flight.default_parameters()
+    )
+
+    workflow[time_of_flight.RawData] = raw_data
+    workflow[time_of_flight.SimulationResults] = simulation_dream_choppers_time_overlap
+    workflow[time_of_flight.LookupTableRelativeErrorThreshold] = 0.01
+    workflow[time_of_flight.LtotalRange] = ltotal.min(), ltotal.max()
+
+    # Make sure some values in the lookup table have been masked (turned to NaNs)
+    original_table = workflow.compute(time_of_flight.TimeOfFlightLookupTable)
+    masked_table = workflow.compute(time_of_flight.MaskedTimeOfFlightLookupTable)
+    assert sc.isnan(masked_table).data.sum() > sc.isnan(original_table).data.sum()
+
+    # Compute time-of-flight
+    tofs = workflow.compute(time_of_flight.TofData)
+    assert {dim: tofs.sizes[dim] for dim in ltotal.sizes} == ltotal.sizes
+
+    # Convert to wavelength
+    graph = {**beamline(scatter=False), **elastic("tof")}
+    wav_wfm = tofs.transform_coords("wavelength", graph=graph)
+
+    # Compare the computed wavelengths to the true wavelengths
+    for i in range(npulses):
+        result = wav_wfm["pulse", i].flatten(to="detector")
+        for j in range(len(result)):
+            computed_wavelengths = result[j].values.coords["wavelength"]
+            assert sc.allclose(
+                computed_wavelengths[:-2],
+                true_wavelengths["pulse", i][:-2],
+                rtol=sc.scalar(1e-02),
+            )
+            # The two neutrons in the overlap region should have NaN wavelengths
+            assert sc.isnan(computed_wavelengths[-2])
+            assert sc.isnan(computed_wavelengths[-1])
+
+
+@pytest.fixture(scope="module")
+def simulation_v20_choppers():
+    return time_of_flight.simulate_beamline(
+        choppers=fakes.wfm_choppers(), neutrons=300_000, seed=432
+    )
+
+
+@pytest.mark.parametrize("npulses", [1, 2])
+@pytest.mark.parametrize(
+    "ltotal",
+    [
+        sc.array(dims=["detector_number"], values=[26.0], unit="m"),
+        sc.array(dims=["detector_number"], values=[26.0, 25.5], unit="m"),
+        sc.array(
+            dims=["y", "x"], values=[[26.0, 25.1, 26.33], [25.9, 26.0, 25.7]], unit="m"
+        ),
+    ],
+)
+@pytest.mark.parametrize("time_offset_unit", ["s", "ms", "us", "ns"])
+@pytest.mark.parametrize("distance_unit", ["m", "mm"])
+def test_v20_compute_wavelengths_from_wfm(
+    simulation_v20_choppers, npulses, ltotal, time_offset_unit, distance_unit
+):
+    monitors = {
+        f"detector{i}": ltot for i, ltot in enumerate(ltotal.flatten(to="detector"))
+    }
+
+    # Create some neutron events
+    wavelengths = sc.array(
+        dims=["event"], values=[2.75, 4.2, 5.4, 6.5, 7.6, 8.75], unit="angstrom"
+    )
+    birth_times = sc.full(sizes=wavelengths.sizes, value=1.5, unit="ms")
+    ess_beamline = fakes.FakeBeamline(
+        choppers=fakes.wfm_choppers(),
+        monitors=monitors,
+        run_length=sc.scalar(1 / 14, unit="s") * npulses,
+        events_per_pulse=len(wavelengths),
+        source=partial(
+            tof_pkg.Source.from_neutrons,
+            birth_times=birth_times,
+            wavelengths=wavelengths,
+            frequency=sc.scalar(14.0, unit="Hz"),
+        ),
+    )
+
+    # Save the true wavelengths for later
+    true_wavelengths = ess_beamline.source.data.coords["wavelength"]
+
+    raw_data = sc.concat(
+        [ess_beamline.get_monitor(key)[0] for key in monitors.keys()],
+        dim="detector",
+    ).fold(dim="detector", sizes=ltotal.sizes)
+
+    # Convert the time offset to the unit requested by the test
+    raw_data.bins.coords["event_time_offset"] = raw_data.bins.coords[
+        "event_time_offset"
+    ].to(unit=time_offset_unit, copy=False)
+
+    raw_data.coords["Ltotal"] = ltotal.to(unit=distance_unit, copy=False)
+
+    # Verify that all 6 neutrons made it through the chopper cascade
+    assert sc.identical(
+        raw_data.bins.concat("pulse").hist().data,
+        sc.array(
+            dims=["detector"],
+            values=[len(wavelengths) * npulses] * len(monitors),
+            unit="counts",
+            dtype="float64",
+        ).fold(dim="detector", sizes=ltotal.sizes),
+    )
+
+    # Set up the workflow
+    workflow = sl.Pipeline(
+        time_of_flight.providers(), params=time_of_flight.default_parameters()
+    )
+
+    workflow[time_of_flight.RawData] = raw_data
+    workflow[time_of_flight.SimulationResults] = simulation_v20_choppers
+    workflow[time_of_flight.LtotalRange] = ltotal.min(), ltotal.max()
+
+    # Compute time-of-flight
+    tofs = workflow.compute(time_of_flight.TofData)
+    assert {dim: tofs.sizes[dim] for dim in ltotal.sizes} == ltotal.sizes
+
+    # Convert to wavelength
+    graph = {**beamline(scatter=False), **elastic("tof")}
+    wav_wfm = tofs.transform_coords("wavelength", graph=graph)
+
+    # Compare the computed wavelengths to the true wavelengths
+    for i in range(npulses):
+        result = wav_wfm["pulse", i].flatten(to="detector")
+        for j in range(len(result)):
+            computed_wavelengths = result[j].values.coords["wavelength"]
+            assert sc.allclose(
+                computed_wavelengths,
+                true_wavelengths["pulse", i],
+                rtol=sc.scalar(1e-02),
+            )
diff --git a/tox.ini b/tox.ini
index d0253400..a16e97b5 100644
--- a/tox.ini
+++ b/tox.ini
@@ -63,5 +63,5 @@ deps =
   tomli
 skip_install = true
 changedir = requirements
-commands = python ./make_base.py --nightly scippnexus,scipp,sciline,cyclebane,scippneutron
+commands = python ./make_base.py --nightly scippnexus,scipp,sciline,cyclebane,scippneutron,tof
            pip-compile-multi -d . --backtracking